Commit 112202d9098aae2c36436e5178c0cf3ced423c7b

Authored by Tejun Heo
1 parent 8d03ecfe47

workqueue: rename cpu_workqueue to pool_workqueue

workqueue has moved away from global_cwqs to worker_pools and with the
scheduled custom worker pools, wforkqueues will be associated with
pools which don't have anything to do with CPUs.  The workqueue code
went through significant amount of changes recently and mass renaming
isn't likely to hurt much additionally.  Let's replace 'cpu' with
'pool' so that it reflects the current design.

* s/struct cpu_workqueue_struct/struct pool_workqueue/
* s/cpu_wq/pool_wq/
* s/cwq/pwq/

This patch is purely cosmetic.

Signed-off-by: Tejun Heo <tj@kernel.org>

Showing 4 changed files with 228 additions and 229 deletions Side-by-side Diff

include/linux/workqueue.h
... ... @@ -27,7 +27,7 @@
27 27 enum {
28 28 WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
29 29 WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
30   - WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */
  30 + WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */
31 31 WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
32 32 #ifdef CONFIG_DEBUG_OBJECTS_WORK
33 33 WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
... ... @@ -40,7 +40,7 @@
40 40  
41 41 WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
42 42 WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
43   - WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
  43 + WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT,
44 44 WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
45 45 #ifdef CONFIG_DEBUG_OBJECTS_WORK
46 46 WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
47 47  
... ... @@ -60,14 +60,14 @@
60 60 WORK_CPU_END = NR_CPUS + 1,
61 61  
62 62 /*
63   - * Reserve 7 bits off of cwq pointer w/ debugobjects turned
64   - * off. This makes cwqs aligned to 256 bytes and allows 15
65   - * workqueue flush colors.
  63 + * Reserve 7 bits off of pwq pointer w/ debugobjects turned off.
  64 + * This makes pwqs aligned to 256 bytes and allows 15 workqueue
  65 + * flush colors.
66 66 */
67 67 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
68 68 WORK_STRUCT_COLOR_BITS,
69 69  
70   - /* data contains off-queue information when !WORK_STRUCT_CWQ */
  70 + /* data contains off-queue information when !WORK_STRUCT_PWQ */
71 71 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_FLAG_BITS,
72 72  
73 73 WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE),
include/trace/events/workqueue.h
... ... @@ -27,7 +27,7 @@
27 27 /**
28 28 * workqueue_queue_work - called when a work gets queued
29 29 * @req_cpu: the requested cpu
30   - * @cwq: pointer to struct cpu_workqueue_struct
  30 + * @pwq: pointer to struct pool_workqueue
31 31 * @work: pointer to struct work_struct
32 32 *
33 33 * This event occurs when a work is queued immediately or once a
34 34  
... ... @@ -36,10 +36,10 @@
36 36 */
37 37 TRACE_EVENT(workqueue_queue_work,
38 38  
39   - TP_PROTO(unsigned int req_cpu, struct cpu_workqueue_struct *cwq,
  39 + TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq,
40 40 struct work_struct *work),
41 41  
42   - TP_ARGS(req_cpu, cwq, work),
  42 + TP_ARGS(req_cpu, pwq, work),
43 43  
44 44 TP_STRUCT__entry(
45 45 __field( void *, work )
46 46  
... ... @@ -52,9 +52,9 @@
52 52 TP_fast_assign(
53 53 __entry->work = work;
54 54 __entry->function = work->func;
55   - __entry->workqueue = cwq->wq;
  55 + __entry->workqueue = pwq->wq;
56 56 __entry->req_cpu = req_cpu;
57   - __entry->cpu = cwq->pool->cpu;
  57 + __entry->cpu = pwq->pool->cpu;
58 58 ),
59 59  
60 60 TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u",
Changes suppressed. Click to show
... ... @@ -154,11 +154,12 @@
154 154 } ____cacheline_aligned_in_smp;
155 155  
156 156 /*
157   - * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of
158   - * work_struct->data are used for flags and thus cwqs need to be
159   - * aligned at two's power of the number of flag bits.
  157 + * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS
  158 + * of work_struct->data are used for flags and the remaining high bits
  159 + * point to the pwq; thus, pwqs need to be aligned at two's power of the
  160 + * number of flag bits.
160 161 */
161   -struct cpu_workqueue_struct {
  162 +struct pool_workqueue {
162 163 struct worker_pool *pool; /* I: the associated pool */
163 164 struct workqueue_struct *wq; /* I: the owning workqueue */
164 165 int work_color; /* L: current color */
165 166  
166 167  
... ... @@ -207,16 +208,16 @@
207 208 struct workqueue_struct {
208 209 unsigned int flags; /* W: WQ_* flags */
209 210 union {
210   - struct cpu_workqueue_struct __percpu *pcpu;
211   - struct cpu_workqueue_struct *single;
  211 + struct pool_workqueue __percpu *pcpu;
  212 + struct pool_workqueue *single;
212 213 unsigned long v;
213   - } cpu_wq; /* I: cwq's */
  214 + } pool_wq; /* I: pwq's */
214 215 struct list_head list; /* W: list of all workqueues */
215 216  
216 217 struct mutex flush_mutex; /* protects wq flushing */
217 218 int work_color; /* F: current work color */
218 219 int flush_color; /* F: current flush color */
219   - atomic_t nr_cwqs_to_flush; /* flush in progress */
  220 + atomic_t nr_pwqs_to_flush; /* flush in progress */
220 221 struct wq_flusher *first_flusher; /* F: first flusher */
221 222 struct list_head flusher_queue; /* F: flush waiters */
222 223 struct list_head flusher_overflow; /* F: flush overflow list */
... ... @@ -225,7 +226,7 @@
225 226 struct worker *rescuer; /* I: rescue worker */
226 227  
227 228 int nr_drainers; /* W: drain in progress */
228   - int saved_max_active; /* W: saved cwq max_active */
  229 + int saved_max_active; /* W: saved pwq max_active */
229 230 #ifdef CONFIG_LOCKDEP
230 231 struct lockdep_map lockdep_map;
231 232 #endif
... ... @@ -268,7 +269,7 @@
268 269 return WORK_CPU_END;
269 270 }
270 271  
271   -static inline int __next_cwq_cpu(int cpu, const struct cpumask *mask,
  272 +static inline int __next_pwq_cpu(int cpu, const struct cpumask *mask,
272 273 struct workqueue_struct *wq)
273 274 {
274 275 return __next_wq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
... ... @@ -284,7 +285,7 @@
284 285 *
285 286 * for_each_wq_cpu() : possible CPUs + WORK_CPU_UNBOUND
286 287 * for_each_online_wq_cpu() : online CPUs + WORK_CPU_UNBOUND
287   - * for_each_cwq_cpu() : possible CPUs for bound workqueues,
  288 + * for_each_pwq_cpu() : possible CPUs for bound workqueues,
288 289 * WORK_CPU_UNBOUND for unbound workqueues
289 290 */
290 291 #define for_each_wq_cpu(cpu) \
291 292  
... ... @@ -297,10 +298,10 @@
297 298 (cpu) < WORK_CPU_END; \
298 299 (cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3))
299 300  
300   -#define for_each_cwq_cpu(cpu, wq) \
301   - for ((cpu) = __next_cwq_cpu(-1, cpu_possible_mask, (wq)); \
  301 +#define for_each_pwq_cpu(cpu, wq) \
  302 + for ((cpu) = __next_pwq_cpu(-1, cpu_possible_mask, (wq)); \
302 303 (cpu) < WORK_CPU_END; \
303   - (cpu) = __next_cwq_cpu((cpu), cpu_possible_mask, (wq)))
  304 + (cpu) = __next_pwq_cpu((cpu), cpu_possible_mask, (wq)))
304 305  
305 306 #ifdef CONFIG_DEBUG_OBJECTS_WORK
306 307  
307 308  
308 309  
... ... @@ -479,14 +480,14 @@
479 480 return &pools[highpri];
480 481 }
481 482  
482   -static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
483   - struct workqueue_struct *wq)
  483 +static struct pool_workqueue *get_pwq(unsigned int cpu,
  484 + struct workqueue_struct *wq)
484 485 {
485 486 if (!(wq->flags & WQ_UNBOUND)) {
486 487 if (likely(cpu < nr_cpu_ids))
487   - return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
  488 + return per_cpu_ptr(wq->pool_wq.pcpu, cpu);
488 489 } else if (likely(cpu == WORK_CPU_UNBOUND))
489   - return wq->cpu_wq.single;
  490 + return wq->pool_wq.single;
490 491 return NULL;
491 492 }
492 493  
493 494  
494 495  
495 496  
... ... @@ -507,18 +508,18 @@
507 508 }
508 509  
509 510 /*
510   - * While queued, %WORK_STRUCT_CWQ is set and non flag bits of a work's data
511   - * contain the pointer to the queued cwq. Once execution starts, the flag
  511 + * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
  512 + * contain the pointer to the queued pwq. Once execution starts, the flag
512 513 * is cleared and the high bits contain OFFQ flags and pool ID.
513 514 *
514   - * set_work_cwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
515   - * and clear_work_data() can be used to set the cwq, pool or clear
  515 + * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
  516 + * and clear_work_data() can be used to set the pwq, pool or clear
516 517 * work->data. These functions should only be called while the work is
517 518 * owned - ie. while the PENDING bit is set.
518 519 *
519   - * get_work_pool() and get_work_cwq() can be used to obtain the pool or cwq
  520 + * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
520 521 * corresponding to a work. Pool is available once the work has been
521   - * queued anywhere after initialization until it is sync canceled. cwq is
  522 + * queued anywhere after initialization until it is sync canceled. pwq is
522 523 * available only while the work item is queued.
523 524 *
524 525 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
525 526  
... ... @@ -533,12 +534,11 @@
533 534 atomic_long_set(&work->data, data | flags | work_static(work));
534 535 }
535 536  
536   -static void set_work_cwq(struct work_struct *work,
537   - struct cpu_workqueue_struct *cwq,
  537 +static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
538 538 unsigned long extra_flags)
539 539 {
540   - set_work_data(work, (unsigned long)cwq,
541   - WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
  540 + set_work_data(work, (unsigned long)pwq,
  541 + WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
542 542 }
543 543  
544 544 static void set_work_pool_and_keep_pending(struct work_struct *work,
545 545  
... ... @@ -567,11 +567,11 @@
567 567 set_work_data(work, WORK_STRUCT_NO_POOL, 0);
568 568 }
569 569  
570   -static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
  570 +static struct pool_workqueue *get_work_pwq(struct work_struct *work)
571 571 {
572 572 unsigned long data = atomic_long_read(&work->data);
573 573  
574   - if (data & WORK_STRUCT_CWQ)
  574 + if (data & WORK_STRUCT_PWQ)
575 575 return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
576 576 else
577 577 return NULL;
... ... @@ -589,8 +589,8 @@
589 589 struct worker_pool *pool;
590 590 int pool_id;
591 591  
592   - if (data & WORK_STRUCT_CWQ)
593   - return ((struct cpu_workqueue_struct *)
  592 + if (data & WORK_STRUCT_PWQ)
  593 + return ((struct pool_workqueue *)
594 594 (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
595 595  
596 596 pool_id = data >> WORK_OFFQ_POOL_SHIFT;
... ... @@ -613,8 +613,8 @@
613 613 {
614 614 unsigned long data = atomic_long_read(&work->data);
615 615  
616   - if (data & WORK_STRUCT_CWQ)
617   - return ((struct cpu_workqueue_struct *)
  616 + if (data & WORK_STRUCT_PWQ)
  617 + return ((struct pool_workqueue *)
618 618 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
619 619  
620 620 return data >> WORK_OFFQ_POOL_SHIFT;
... ... @@ -632,7 +632,7 @@
632 632 {
633 633 unsigned long data = atomic_long_read(&work->data);
634 634  
635   - return !(data & WORK_STRUCT_CWQ) && (data & WORK_OFFQ_CANCELING);
  635 + return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
636 636 }
637 637  
638 638 /*
639 639  
640 640  
641 641  
642 642  
643 643  
644 644  
645 645  
646 646  
647 647  
648 648  
649 649  
650 650  
651 651  
652 652  
653 653  
654 654  
655 655  
... ... @@ -961,67 +961,67 @@
961 961 *nextp = n;
962 962 }
963 963  
964   -static void cwq_activate_delayed_work(struct work_struct *work)
  964 +static void pwq_activate_delayed_work(struct work_struct *work)
965 965 {
966   - struct cpu_workqueue_struct *cwq = get_work_cwq(work);
  966 + struct pool_workqueue *pwq = get_work_pwq(work);
967 967  
968 968 trace_workqueue_activate_work(work);
969   - move_linked_works(work, &cwq->pool->worklist, NULL);
  969 + move_linked_works(work, &pwq->pool->worklist, NULL);
970 970 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
971   - cwq->nr_active++;
  971 + pwq->nr_active++;
972 972 }
973 973  
974   -static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
  974 +static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
975 975 {
976   - struct work_struct *work = list_first_entry(&cwq->delayed_works,
  976 + struct work_struct *work = list_first_entry(&pwq->delayed_works,
977 977 struct work_struct, entry);
978 978  
979   - cwq_activate_delayed_work(work);
  979 + pwq_activate_delayed_work(work);
980 980 }
981 981  
982 982 /**
983   - * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
984   - * @cwq: cwq of interest
  983 + * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
  984 + * @pwq: pwq of interest
985 985 * @color: color of work which left the queue
986 986 *
987 987 * A work either has completed or is removed from pending queue,
988   - * decrement nr_in_flight of its cwq and handle workqueue flushing.
  988 + * decrement nr_in_flight of its pwq and handle workqueue flushing.
989 989 *
990 990 * CONTEXT:
991 991 * spin_lock_irq(pool->lock).
992 992 */
993   -static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
  993 +static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
994 994 {
995 995 /* ignore uncolored works */
996 996 if (color == WORK_NO_COLOR)
997 997 return;
998 998  
999   - cwq->nr_in_flight[color]--;
  999 + pwq->nr_in_flight[color]--;
1000 1000  
1001   - cwq->nr_active--;
1002   - if (!list_empty(&cwq->delayed_works)) {
  1001 + pwq->nr_active--;
  1002 + if (!list_empty(&pwq->delayed_works)) {
1003 1003 /* one down, submit a delayed one */
1004   - if (cwq->nr_active < cwq->max_active)
1005   - cwq_activate_first_delayed(cwq);
  1004 + if (pwq->nr_active < pwq->max_active)
  1005 + pwq_activate_first_delayed(pwq);
1006 1006 }
1007 1007  
1008 1008 /* is flush in progress and are we at the flushing tip? */
1009   - if (likely(cwq->flush_color != color))
  1009 + if (likely(pwq->flush_color != color))
1010 1010 return;
1011 1011  
1012 1012 /* are there still in-flight works? */
1013   - if (cwq->nr_in_flight[color])
  1013 + if (pwq->nr_in_flight[color])
1014 1014 return;
1015 1015  
1016   - /* this cwq is done, clear flush_color */
1017   - cwq->flush_color = -1;
  1016 + /* this pwq is done, clear flush_color */
  1017 + pwq->flush_color = -1;
1018 1018  
1019 1019 /*
1020   - * If this was the last cwq, wake up the first flusher. It
  1020 + * If this was the last pwq, wake up the first flusher. It
1021 1021 * will handle the rest.
1022 1022 */
1023   - if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1024   - complete(&cwq->wq->first_flusher->done);
  1023 + if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
  1024 + complete(&pwq->wq->first_flusher->done);
1025 1025 }
1026 1026  
1027 1027 /**
... ... @@ -1053,7 +1053,7 @@
1053 1053 unsigned long *flags)
1054 1054 {
1055 1055 struct worker_pool *pool;
1056   - struct cpu_workqueue_struct *cwq;
  1056 + struct pool_workqueue *pwq;
1057 1057  
1058 1058 local_irq_save(*flags);
1059 1059  
1060 1060  
1061 1061  
1062 1062  
1063 1063  
1064 1064  
... ... @@ -1084,31 +1084,31 @@
1084 1084  
1085 1085 spin_lock(&pool->lock);
1086 1086 /*
1087   - * work->data is guaranteed to point to cwq only while the work
1088   - * item is queued on cwq->wq, and both updating work->data to point
1089   - * to cwq on queueing and to pool on dequeueing are done under
1090   - * cwq->pool->lock. This in turn guarantees that, if work->data
1091   - * points to cwq which is associated with a locked pool, the work
  1087 + * work->data is guaranteed to point to pwq only while the work
  1088 + * item is queued on pwq->wq, and both updating work->data to point
  1089 + * to pwq on queueing and to pool on dequeueing are done under
  1090 + * pwq->pool->lock. This in turn guarantees that, if work->data
  1091 + * points to pwq which is associated with a locked pool, the work
1092 1092 * item is currently queued on that pool.
1093 1093 */
1094   - cwq = get_work_cwq(work);
1095   - if (cwq && cwq->pool == pool) {
  1094 + pwq = get_work_pwq(work);
  1095 + if (pwq && pwq->pool == pool) {
1096 1096 debug_work_deactivate(work);
1097 1097  
1098 1098 /*
1099 1099 * A delayed work item cannot be grabbed directly because
1100 1100 * it might have linked NO_COLOR work items which, if left
1101   - * on the delayed_list, will confuse cwq->nr_active
  1101 + * on the delayed_list, will confuse pwq->nr_active
1102 1102 * management later on and cause stall. Make sure the work
1103 1103 * item is activated before grabbing.
1104 1104 */
1105 1105 if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
1106   - cwq_activate_delayed_work(work);
  1106 + pwq_activate_delayed_work(work);
1107 1107  
1108 1108 list_del_init(&work->entry);
1109   - cwq_dec_nr_in_flight(get_work_cwq(work), get_work_color(work));
  1109 + pwq_dec_nr_in_flight(get_work_pwq(work), get_work_color(work));
1110 1110  
1111   - /* work->data points to cwq iff queued, point to pool */
  1111 + /* work->data points to pwq iff queued, point to pool */
1112 1112 set_work_pool_and_keep_pending(work, pool->id);
1113 1113  
1114 1114 spin_unlock(&pool->lock);
1115 1115  
1116 1116  
1117 1117  
1118 1118  
... ... @@ -1125,25 +1125,24 @@
1125 1125  
1126 1126 /**
1127 1127 * insert_work - insert a work into a pool
1128   - * @cwq: cwq @work belongs to
  1128 + * @pwq: pwq @work belongs to
1129 1129 * @work: work to insert
1130 1130 * @head: insertion point
1131 1131 * @extra_flags: extra WORK_STRUCT_* flags to set
1132 1132 *
1133   - * Insert @work which belongs to @cwq after @head. @extra_flags is or'd to
  1133 + * Insert @work which belongs to @pwq after @head. @extra_flags is or'd to
1134 1134 * work_struct flags.
1135 1135 *
1136 1136 * CONTEXT:
1137 1137 * spin_lock_irq(pool->lock).
1138 1138 */
1139   -static void insert_work(struct cpu_workqueue_struct *cwq,
1140   - struct work_struct *work, struct list_head *head,
1141   - unsigned int extra_flags)
  1139 +static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
  1140 + struct list_head *head, unsigned int extra_flags)
1142 1141 {
1143   - struct worker_pool *pool = cwq->pool;
  1142 + struct worker_pool *pool = pwq->pool;
1144 1143  
1145 1144 /* we own @work, set data and link */
1146   - set_work_cwq(work, cwq, extra_flags);
  1145 + set_work_pwq(work, pwq, extra_flags);
1147 1146 list_add_tail(&work->entry, head);
1148 1147  
1149 1148 /*
1150 1149  
... ... @@ -1170,13 +1169,13 @@
1170 1169 * Return %true iff I'm a worker execuing a work item on @wq. If
1171 1170 * I'm @worker, it's safe to dereference it without locking.
1172 1171 */
1173   - return worker && worker->current_cwq->wq == wq;
  1172 + return worker && worker->current_pwq->wq == wq;
1174 1173 }
1175 1174  
1176 1175 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
1177 1176 struct work_struct *work)
1178 1177 {
1179   - struct cpu_workqueue_struct *cwq;
  1178 + struct pool_workqueue *pwq;
1180 1179 struct list_head *worklist;
1181 1180 unsigned int work_flags;
1182 1181 unsigned int req_cpu = cpu;
... ... @@ -1196,7 +1195,7 @@
1196 1195 WARN_ON_ONCE(!is_chained_work(wq)))
1197 1196 return;
1198 1197  
1199   - /* determine the cwq to use */
  1198 + /* determine the pwq to use */
1200 1199 if (!(wq->flags & WQ_UNBOUND)) {
1201 1200 struct worker_pool *last_pool;
1202 1201  
1203 1202  
1204 1203  
1205 1204  
1206 1205  
1207 1206  
1208 1207  
1209 1208  
1210 1209  
1211 1210  
1212 1211  
1213 1212  
1214 1213  
1215 1214  
... ... @@ -1209,54 +1208,54 @@
1209 1208 * work needs to be queued on that cpu to guarantee
1210 1209 * non-reentrancy.
1211 1210 */
1212   - cwq = get_cwq(cpu, wq);
  1211 + pwq = get_pwq(cpu, wq);
1213 1212 last_pool = get_work_pool(work);
1214 1213  
1215   - if (last_pool && last_pool != cwq->pool) {
  1214 + if (last_pool && last_pool != pwq->pool) {
1216 1215 struct worker *worker;
1217 1216  
1218 1217 spin_lock(&last_pool->lock);
1219 1218  
1220 1219 worker = find_worker_executing_work(last_pool, work);
1221 1220  
1222   - if (worker && worker->current_cwq->wq == wq) {
1223   - cwq = get_cwq(last_pool->cpu, wq);
  1221 + if (worker && worker->current_pwq->wq == wq) {
  1222 + pwq = get_pwq(last_pool->cpu, wq);
1224 1223 } else {
1225 1224 /* meh... not running there, queue here */
1226 1225 spin_unlock(&last_pool->lock);
1227   - spin_lock(&cwq->pool->lock);
  1226 + spin_lock(&pwq->pool->lock);
1228 1227 }
1229 1228 } else {
1230   - spin_lock(&cwq->pool->lock);
  1229 + spin_lock(&pwq->pool->lock);
1231 1230 }
1232 1231 } else {
1233   - cwq = get_cwq(WORK_CPU_UNBOUND, wq);
1234   - spin_lock(&cwq->pool->lock);
  1232 + pwq = get_pwq(WORK_CPU_UNBOUND, wq);
  1233 + spin_lock(&pwq->pool->lock);
1235 1234 }
1236 1235  
1237   - /* cwq determined, queue */
1238   - trace_workqueue_queue_work(req_cpu, cwq, work);
  1236 + /* pwq determined, queue */
  1237 + trace_workqueue_queue_work(req_cpu, pwq, work);
1239 1238  
1240 1239 if (WARN_ON(!list_empty(&work->entry))) {
1241   - spin_unlock(&cwq->pool->lock);
  1240 + spin_unlock(&pwq->pool->lock);
1242 1241 return;
1243 1242 }
1244 1243  
1245   - cwq->nr_in_flight[cwq->work_color]++;
1246   - work_flags = work_color_to_flags(cwq->work_color);
  1244 + pwq->nr_in_flight[pwq->work_color]++;
  1245 + work_flags = work_color_to_flags(pwq->work_color);
1247 1246  
1248   - if (likely(cwq->nr_active < cwq->max_active)) {
  1247 + if (likely(pwq->nr_active < pwq->max_active)) {
1249 1248 trace_workqueue_activate_work(work);
1250   - cwq->nr_active++;
1251   - worklist = &cwq->pool->worklist;
  1249 + pwq->nr_active++;
  1250 + worklist = &pwq->pool->worklist;
1252 1251 } else {
1253 1252 work_flags |= WORK_STRUCT_DELAYED;
1254   - worklist = &cwq->delayed_works;
  1253 + worklist = &pwq->delayed_works;
1255 1254 }
1256 1255  
1257   - insert_work(cwq, work, worklist, work_flags);
  1256 + insert_work(pwq, work, worklist, work_flags);
1258 1257  
1259   - spin_unlock(&cwq->pool->lock);
  1258 + spin_unlock(&pwq->pool->lock);
1260 1259 }
1261 1260  
1262 1261 /**
1263 1262  
... ... @@ -1661,14 +1660,14 @@
1661 1660  
1662 1661 /*
1663 1662 * wq doesn't really matter but let's keep @worker->pool
1664   - * and @cwq->pool consistent for sanity.
  1663 + * and @pwq->pool consistent for sanity.
1665 1664 */
1666 1665 if (std_worker_pool_pri(worker->pool))
1667 1666 wq = system_highpri_wq;
1668 1667 else
1669 1668 wq = system_wq;
1670 1669  
1671   - insert_work(get_cwq(pool->cpu, wq), rebind_work,
  1670 + insert_work(get_pwq(pool->cpu, wq), rebind_work,
1672 1671 worker->scheduled.next,
1673 1672 work_color_to_flags(WORK_NO_COLOR));
1674 1673 }
1675 1674  
... ... @@ -1845,15 +1844,15 @@
1845 1844  
1846 1845 static bool send_mayday(struct work_struct *work)
1847 1846 {
1848   - struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1849   - struct workqueue_struct *wq = cwq->wq;
  1847 + struct pool_workqueue *pwq = get_work_pwq(work);
  1848 + struct workqueue_struct *wq = pwq->wq;
1850 1849 unsigned int cpu;
1851 1850  
1852 1851 if (!(wq->flags & WQ_RESCUER))
1853 1852 return false;
1854 1853  
1855 1854 /* mayday mayday mayday */
1856   - cpu = cwq->pool->cpu;
  1855 + cpu = pwq->pool->cpu;
1857 1856 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1858 1857 if (cpu == WORK_CPU_UNBOUND)
1859 1858 cpu = 0;
1860 1859  
... ... @@ -2082,9 +2081,9 @@
2082 2081 __releases(&pool->lock)
2083 2082 __acquires(&pool->lock)
2084 2083 {
2085   - struct cpu_workqueue_struct *cwq = get_work_cwq(work);
  2084 + struct pool_workqueue *pwq = get_work_pwq(work);
2086 2085 struct worker_pool *pool = worker->pool;
2087   - bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
  2086 + bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
2088 2087 int work_color;
2089 2088 struct worker *collision;
2090 2089 #ifdef CONFIG_LOCKDEP
... ... @@ -2125,7 +2124,7 @@
2125 2124 hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2126 2125 worker->current_work = work;
2127 2126 worker->current_func = work->func;
2128   - worker->current_cwq = cwq;
  2127 + worker->current_pwq = pwq;
2129 2128 work_color = get_work_color(work);
2130 2129  
2131 2130 list_del_init(&work->entry);
... ... @@ -2154,7 +2153,7 @@
2154 2153  
2155 2154 spin_unlock_irq(&pool->lock);
2156 2155  
2157   - lock_map_acquire_read(&cwq->wq->lockdep_map);
  2156 + lock_map_acquire_read(&pwq->wq->lockdep_map);
2158 2157 lock_map_acquire(&lockdep_map);
2159 2158 trace_workqueue_execute_start(work);
2160 2159 worker->current_func(work);
... ... @@ -2164,7 +2163,7 @@
2164 2163 */
2165 2164 trace_workqueue_execute_end(work);
2166 2165 lock_map_release(&lockdep_map);
2167   - lock_map_release(&cwq->wq->lockdep_map);
  2166 + lock_map_release(&pwq->wq->lockdep_map);
2168 2167  
2169 2168 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2170 2169 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
... ... @@ -2185,8 +2184,8 @@
2185 2184 hash_del(&worker->hentry);
2186 2185 worker->current_work = NULL;
2187 2186 worker->current_func = NULL;
2188   - worker->current_cwq = NULL;
2189   - cwq_dec_nr_in_flight(cwq, work_color);
  2187 + worker->current_pwq = NULL;
  2188 + pwq_dec_nr_in_flight(pwq, work_color);
2190 2189 }
2191 2190  
2192 2191 /**
... ... @@ -2353,8 +2352,8 @@
2353 2352 */
2354 2353 for_each_mayday_cpu(cpu, wq->mayday_mask) {
2355 2354 unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
2356   - struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
2357   - struct worker_pool *pool = cwq->pool;
  2355 + struct pool_workqueue *pwq = get_pwq(tcpu, wq);
  2356 + struct worker_pool *pool = pwq->pool;
2358 2357 struct work_struct *work, *n;
2359 2358  
2360 2359 __set_current_state(TASK_RUNNING);
... ... @@ -2370,7 +2369,7 @@
2370 2369 */
2371 2370 BUG_ON(!list_empty(&rescuer->scheduled));
2372 2371 list_for_each_entry_safe(work, n, &pool->worklist, entry)
2373   - if (get_work_cwq(work) == cwq)
  2372 + if (get_work_pwq(work) == pwq)
2374 2373 move_linked_works(work, scheduled, &n);
2375 2374  
2376 2375 process_scheduled_works(rescuer);
... ... @@ -2405,7 +2404,7 @@
2405 2404  
2406 2405 /**
2407 2406 * insert_wq_barrier - insert a barrier work
2408   - * @cwq: cwq to insert barrier into
  2407 + * @pwq: pwq to insert barrier into
2409 2408 * @barr: wq_barrier to insert
2410 2409 * @target: target work to attach @barr to
2411 2410 * @worker: worker currently executing @target, NULL if @target is not executing
2412 2411  
... ... @@ -2422,12 +2421,12 @@
2422 2421 * after a work with LINKED flag set.
2423 2422 *
2424 2423 * Note that when @worker is non-NULL, @target may be modified
2425   - * underneath us, so we can't reliably determine cwq from @target.
  2424 + * underneath us, so we can't reliably determine pwq from @target.
2426 2425 *
2427 2426 * CONTEXT:
2428 2427 * spin_lock_irq(pool->lock).
2429 2428 */
2430   -static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
  2429 +static void insert_wq_barrier(struct pool_workqueue *pwq,
2431 2430 struct wq_barrier *barr,
2432 2431 struct work_struct *target, struct worker *worker)
2433 2432 {
2434 2433  
2435 2434  
2436 2435  
... ... @@ -2460,23 +2459,23 @@
2460 2459 }
2461 2460  
2462 2461 debug_work_activate(&barr->work);
2463   - insert_work(cwq, &barr->work, head,
  2462 + insert_work(pwq, &barr->work, head,
2464 2463 work_color_to_flags(WORK_NO_COLOR) | linked);
2465 2464 }
2466 2465  
2467 2466 /**
2468   - * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
  2467 + * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
2469 2468 * @wq: workqueue being flushed
2470 2469 * @flush_color: new flush color, < 0 for no-op
2471 2470 * @work_color: new work color, < 0 for no-op
2472 2471 *
2473   - * Prepare cwqs for workqueue flushing.
  2472 + * Prepare pwqs for workqueue flushing.
2474 2473 *
2475   - * If @flush_color is non-negative, flush_color on all cwqs should be
2476   - * -1. If no cwq has in-flight commands at the specified color, all
2477   - * cwq->flush_color's stay at -1 and %false is returned. If any cwq
2478   - * has in flight commands, its cwq->flush_color is set to
2479   - * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
  2474 + * If @flush_color is non-negative, flush_color on all pwqs should be
  2475 + * -1. If no pwq has in-flight commands at the specified color, all
  2476 + * pwq->flush_color's stay at -1 and %false is returned. If any pwq
  2477 + * has in flight commands, its pwq->flush_color is set to
  2478 + * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
2480 2479 * wakeup logic is armed and %true is returned.
2481 2480 *
2482 2481 * The caller should have initialized @wq->first_flusher prior to
... ... @@ -2484,7 +2483,7 @@
2484 2483 * @flush_color is negative, no flush color update is done and %false
2485 2484 * is returned.
2486 2485 *
2487   - * If @work_color is non-negative, all cwqs should have the same
  2486 + * If @work_color is non-negative, all pwqs should have the same
2488 2487 * work_color which is previous to @work_color and all will be
2489 2488 * advanced to @work_color.
2490 2489 *
2491 2490  
2492 2491  
2493 2492  
2494 2493  
2495 2494  
2496 2495  
... ... @@ -2495,42 +2494,42 @@
2495 2494 * %true if @flush_color >= 0 and there's something to flush. %false
2496 2495 * otherwise.
2497 2496 */
2498   -static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
  2497 +static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2499 2498 int flush_color, int work_color)
2500 2499 {
2501 2500 bool wait = false;
2502 2501 unsigned int cpu;
2503 2502  
2504 2503 if (flush_color >= 0) {
2505   - BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
2506   - atomic_set(&wq->nr_cwqs_to_flush, 1);
  2504 + BUG_ON(atomic_read(&wq->nr_pwqs_to_flush));
  2505 + atomic_set(&wq->nr_pwqs_to_flush, 1);
2507 2506 }
2508 2507  
2509   - for_each_cwq_cpu(cpu, wq) {
2510   - struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2511   - struct worker_pool *pool = cwq->pool;
  2508 + for_each_pwq_cpu(cpu, wq) {
  2509 + struct pool_workqueue *pwq = get_pwq(cpu, wq);
  2510 + struct worker_pool *pool = pwq->pool;
2512 2511  
2513 2512 spin_lock_irq(&pool->lock);
2514 2513  
2515 2514 if (flush_color >= 0) {
2516   - BUG_ON(cwq->flush_color != -1);
  2515 + BUG_ON(pwq->flush_color != -1);
2517 2516  
2518   - if (cwq->nr_in_flight[flush_color]) {
2519   - cwq->flush_color = flush_color;
2520   - atomic_inc(&wq->nr_cwqs_to_flush);
  2517 + if (pwq->nr_in_flight[flush_color]) {
  2518 + pwq->flush_color = flush_color;
  2519 + atomic_inc(&wq->nr_pwqs_to_flush);
2521 2520 wait = true;
2522 2521 }
2523 2522 }
2524 2523  
2525 2524 if (work_color >= 0) {
2526   - BUG_ON(work_color != work_next_color(cwq->work_color));
2527   - cwq->work_color = work_color;
  2525 + BUG_ON(work_color != work_next_color(pwq->work_color));
  2526 + pwq->work_color = work_color;
2528 2527 }
2529 2528  
2530 2529 spin_unlock_irq(&pool->lock);
2531 2530 }
2532 2531  
2533   - if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
  2532 + if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
2534 2533 complete(&wq->first_flusher->done);
2535 2534  
2536 2535 return wait;
... ... @@ -2581,7 +2580,7 @@
2581 2580  
2582 2581 wq->first_flusher = &this_flusher;
2583 2582  
2584   - if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
  2583 + if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
2585 2584 wq->work_color)) {
2586 2585 /* nothing to flush, done */
2587 2586 wq->flush_color = next_color;
... ... @@ -2592,7 +2591,7 @@
2592 2591 /* wait in queue */
2593 2592 BUG_ON(wq->flush_color == this_flusher.flush_color);
2594 2593 list_add_tail(&this_flusher.list, &wq->flusher_queue);
2595   - flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
  2594 + flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2596 2595 }
2597 2596 } else {
2598 2597 /*
... ... @@ -2659,7 +2658,7 @@
2659 2658  
2660 2659 list_splice_tail_init(&wq->flusher_overflow,
2661 2660 &wq->flusher_queue);
2662   - flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
  2661 + flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2663 2662 }
2664 2663  
2665 2664 if (list_empty(&wq->flusher_queue)) {
... ... @@ -2669,7 +2668,7 @@
2669 2668  
2670 2669 /*
2671 2670 * Need to flush more colors. Make the next flusher
2672   - * the new first flusher and arm cwqs.
  2671 + * the new first flusher and arm pwqs.
2673 2672 */
2674 2673 BUG_ON(wq->flush_color == wq->work_color);
2675 2674 BUG_ON(wq->flush_color != next->flush_color);
... ... @@ -2677,7 +2676,7 @@
2677 2676 list_del_init(&next->list);
2678 2677 wq->first_flusher = next;
2679 2678  
2680   - if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
  2679 + if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
2681 2680 break;
2682 2681  
2683 2682 /*
2684 2683  
... ... @@ -2720,13 +2719,13 @@
2720 2719 reflush:
2721 2720 flush_workqueue(wq);
2722 2721  
2723   - for_each_cwq_cpu(cpu, wq) {
2724   - struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  2722 + for_each_pwq_cpu(cpu, wq) {
  2723 + struct pool_workqueue *pwq = get_pwq(cpu, wq);
2725 2724 bool drained;
2726 2725  
2727   - spin_lock_irq(&cwq->pool->lock);
2728   - drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
2729   - spin_unlock_irq(&cwq->pool->lock);
  2726 + spin_lock_irq(&pwq->pool->lock);
  2727 + drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
  2728 + spin_unlock_irq(&pwq->pool->lock);
2730 2729  
2731 2730 if (drained)
2732 2731 continue;
... ... @@ -2749,7 +2748,7 @@
2749 2748 {
2750 2749 struct worker *worker = NULL;
2751 2750 struct worker_pool *pool;
2752   - struct cpu_workqueue_struct *cwq;
  2751 + struct pool_workqueue *pwq;
2753 2752  
2754 2753 might_sleep();
2755 2754 pool = get_work_pool(work);
2756 2755  
2757 2756  
... ... @@ -2758,18 +2757,18 @@
2758 2757  
2759 2758 spin_lock_irq(&pool->lock);
2760 2759 /* see the comment in try_to_grab_pending() with the same code */
2761   - cwq = get_work_cwq(work);
2762   - if (cwq) {
2763   - if (unlikely(cwq->pool != pool))
  2760 + pwq = get_work_pwq(work);
  2761 + if (pwq) {
  2762 + if (unlikely(pwq->pool != pool))
2764 2763 goto already_gone;
2765 2764 } else {
2766 2765 worker = find_worker_executing_work(pool, work);
2767 2766 if (!worker)
2768 2767 goto already_gone;
2769   - cwq = worker->current_cwq;
  2768 + pwq = worker->current_pwq;
2770 2769 }
2771 2770  
2772   - insert_wq_barrier(cwq, barr, work, worker);
  2771 + insert_wq_barrier(pwq, barr, work, worker);
2773 2772 spin_unlock_irq(&pool->lock);
2774 2773  
2775 2774 /*
2776 2775  
... ... @@ -2778,11 +2777,11 @@
2778 2777 * flusher is not running on the same workqueue by verifying write
2779 2778 * access.
2780 2779 */
2781   - if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
2782   - lock_map_acquire(&cwq->wq->lockdep_map);
  2780 + if (pwq->wq->saved_max_active == 1 || pwq->wq->flags & WQ_RESCUER)
  2781 + lock_map_acquire(&pwq->wq->lockdep_map);
2783 2782 else
2784   - lock_map_acquire_read(&cwq->wq->lockdep_map);
2785   - lock_map_release(&cwq->wq->lockdep_map);
  2783 + lock_map_acquire_read(&pwq->wq->lockdep_map);
  2784 + lock_map_release(&pwq->wq->lockdep_map);
2786 2785  
2787 2786 return true;
2788 2787 already_gone:
2789 2788  
2790 2789  
2791 2790  
2792 2791  
2793 2792  
2794 2793  
2795 2794  
2796 2795  
... ... @@ -3092,46 +3091,46 @@
3092 3091 return system_wq != NULL;
3093 3092 }
3094 3093  
3095   -static int alloc_cwqs(struct workqueue_struct *wq)
  3094 +static int alloc_pwqs(struct workqueue_struct *wq)
3096 3095 {
3097 3096 /*
3098   - * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
  3097 + * pwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
3099 3098 * Make sure that the alignment isn't lower than that of
3100 3099 * unsigned long long.
3101 3100 */
3102   - const size_t size = sizeof(struct cpu_workqueue_struct);
  3101 + const size_t size = sizeof(struct pool_workqueue);
3103 3102 const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
3104 3103 __alignof__(unsigned long long));
3105 3104  
3106 3105 if (!(wq->flags & WQ_UNBOUND))
3107   - wq->cpu_wq.pcpu = __alloc_percpu(size, align);
  3106 + wq->pool_wq.pcpu = __alloc_percpu(size, align);
3108 3107 else {
3109 3108 void *ptr;
3110 3109  
3111 3110 /*
3112   - * Allocate enough room to align cwq and put an extra
  3111 + * Allocate enough room to align pwq and put an extra
3113 3112 * pointer at the end pointing back to the originally
3114 3113 * allocated pointer which will be used for free.
3115 3114 */
3116 3115 ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
3117 3116 if (ptr) {
3118   - wq->cpu_wq.single = PTR_ALIGN(ptr, align);
3119   - *(void **)(wq->cpu_wq.single + 1) = ptr;
  3117 + wq->pool_wq.single = PTR_ALIGN(ptr, align);
  3118 + *(void **)(wq->pool_wq.single + 1) = ptr;
3120 3119 }
3121 3120 }
3122 3121  
3123 3122 /* just in case, make sure it's actually aligned */
3124   - BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
3125   - return wq->cpu_wq.v ? 0 : -ENOMEM;
  3123 + BUG_ON(!IS_ALIGNED(wq->pool_wq.v, align));
  3124 + return wq->pool_wq.v ? 0 : -ENOMEM;
3126 3125 }
3127 3126  
3128   -static void free_cwqs(struct workqueue_struct *wq)
  3127 +static void free_pwqs(struct workqueue_struct *wq)
3129 3128 {
3130 3129 if (!(wq->flags & WQ_UNBOUND))
3131   - free_percpu(wq->cpu_wq.pcpu);
3132   - else if (wq->cpu_wq.single) {
3133   - /* the pointer to free is stored right after the cwq */
3134   - kfree(*(void **)(wq->cpu_wq.single + 1));
  3130 + free_percpu(wq->pool_wq.pcpu);
  3131 + else if (wq->pool_wq.single) {
  3132 + /* the pointer to free is stored right after the pwq */
  3133 + kfree(*(void **)(wq->pool_wq.single + 1));
3135 3134 }
3136 3135 }
3137 3136  
3138 3137  
3139 3138  
3140 3139  
... ... @@ -3185,25 +3184,25 @@
3185 3184 wq->flags = flags;
3186 3185 wq->saved_max_active = max_active;
3187 3186 mutex_init(&wq->flush_mutex);
3188   - atomic_set(&wq->nr_cwqs_to_flush, 0);
  3187 + atomic_set(&wq->nr_pwqs_to_flush, 0);
3189 3188 INIT_LIST_HEAD(&wq->flusher_queue);
3190 3189 INIT_LIST_HEAD(&wq->flusher_overflow);
3191 3190  
3192 3191 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
3193 3192 INIT_LIST_HEAD(&wq->list);
3194 3193  
3195   - if (alloc_cwqs(wq) < 0)
  3194 + if (alloc_pwqs(wq) < 0)
3196 3195 goto err;
3197 3196  
3198   - for_each_cwq_cpu(cpu, wq) {
3199   - struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  3197 + for_each_pwq_cpu(cpu, wq) {
  3198 + struct pool_workqueue *pwq = get_pwq(cpu, wq);
3200 3199  
3201   - BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
3202   - cwq->pool = get_std_worker_pool(cpu, flags & WQ_HIGHPRI);
3203   - cwq->wq = wq;
3204   - cwq->flush_color = -1;
3205   - cwq->max_active = max_active;
3206   - INIT_LIST_HEAD(&cwq->delayed_works);
  3200 + BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
  3201 + pwq->pool = get_std_worker_pool(cpu, flags & WQ_HIGHPRI);
  3202 + pwq->wq = wq;
  3203 + pwq->flush_color = -1;
  3204 + pwq->max_active = max_active;
  3205 + INIT_LIST_HEAD(&pwq->delayed_works);
3207 3206 }
3208 3207  
3209 3208 if (flags & WQ_RESCUER) {
... ... @@ -3234,8 +3233,8 @@
3234 3233 spin_lock(&workqueue_lock);
3235 3234  
3236 3235 if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
3237   - for_each_cwq_cpu(cpu, wq)
3238   - get_cwq(cpu, wq)->max_active = 0;
  3236 + for_each_pwq_cpu(cpu, wq)
  3237 + get_pwq(cpu, wq)->max_active = 0;
3239 3238  
3240 3239 list_add(&wq->list, &workqueues);
3241 3240  
... ... @@ -3244,7 +3243,7 @@
3244 3243 return wq;
3245 3244 err:
3246 3245 if (wq) {
3247   - free_cwqs(wq);
  3246 + free_pwqs(wq);
3248 3247 free_mayday_mask(wq->mayday_mask);
3249 3248 kfree(wq->rescuer);
3250 3249 kfree(wq);
3251 3250  
... ... @@ -3275,14 +3274,14 @@
3275 3274 spin_unlock(&workqueue_lock);
3276 3275  
3277 3276 /* sanity check */
3278   - for_each_cwq_cpu(cpu, wq) {
3279   - struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  3277 + for_each_pwq_cpu(cpu, wq) {
  3278 + struct pool_workqueue *pwq = get_pwq(cpu, wq);
3280 3279 int i;
3281 3280  
3282 3281 for (i = 0; i < WORK_NR_COLORS; i++)
3283   - BUG_ON(cwq->nr_in_flight[i]);
3284   - BUG_ON(cwq->nr_active);
3285   - BUG_ON(!list_empty(&cwq->delayed_works));
  3282 + BUG_ON(pwq->nr_in_flight[i]);
  3283 + BUG_ON(pwq->nr_active);
  3284 + BUG_ON(!list_empty(&pwq->delayed_works));
3286 3285 }
3287 3286  
3288 3287 if (wq->flags & WQ_RESCUER) {
3289 3288  
3290 3289  
3291 3290  
3292 3291  
3293 3292  
... ... @@ -3291,29 +3290,29 @@
3291 3290 kfree(wq->rescuer);
3292 3291 }
3293 3292  
3294   - free_cwqs(wq);
  3293 + free_pwqs(wq);
3295 3294 kfree(wq);
3296 3295 }
3297 3296 EXPORT_SYMBOL_GPL(destroy_workqueue);
3298 3297  
3299 3298 /**
3300   - * cwq_set_max_active - adjust max_active of a cwq
3301   - * @cwq: target cpu_workqueue_struct
  3299 + * pwq_set_max_active - adjust max_active of a pwq
  3300 + * @pwq: target pool_workqueue
3302 3301 * @max_active: new max_active value.
3303 3302 *
3304   - * Set @cwq->max_active to @max_active and activate delayed works if
  3303 + * Set @pwq->max_active to @max_active and activate delayed works if
3305 3304 * increased.
3306 3305 *
3307 3306 * CONTEXT:
3308 3307 * spin_lock_irq(pool->lock).
3309 3308 */
3310   -static void cwq_set_max_active(struct cpu_workqueue_struct *cwq, int max_active)
  3309 +static void pwq_set_max_active(struct pool_workqueue *pwq, int max_active)
3311 3310 {
3312   - cwq->max_active = max_active;
  3311 + pwq->max_active = max_active;
3313 3312  
3314   - while (!list_empty(&cwq->delayed_works) &&
3315   - cwq->nr_active < cwq->max_active)
3316   - cwq_activate_first_delayed(cwq);
  3313 + while (!list_empty(&pwq->delayed_works) &&
  3314 + pwq->nr_active < pwq->max_active)
  3315 + pwq_activate_first_delayed(pwq);
3317 3316 }
3318 3317  
3319 3318 /**
3320 3319  
... ... @@ -3336,15 +3335,15 @@
3336 3335  
3337 3336 wq->saved_max_active = max_active;
3338 3337  
3339   - for_each_cwq_cpu(cpu, wq) {
3340   - struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3341   - struct worker_pool *pool = cwq->pool;
  3338 + for_each_pwq_cpu(cpu, wq) {
  3339 + struct pool_workqueue *pwq = get_pwq(cpu, wq);
  3340 + struct worker_pool *pool = pwq->pool;
3342 3341  
3343 3342 spin_lock_irq(&pool->lock);
3344 3343  
3345 3344 if (!(wq->flags & WQ_FREEZABLE) ||
3346 3345 !(pool->flags & POOL_FREEZING))
3347   - cwq_set_max_active(cwq, max_active);
  3346 + pwq_set_max_active(pwq, max_active);
3348 3347  
3349 3348 spin_unlock_irq(&pool->lock);
3350 3349 }
3351 3350  
... ... @@ -3367,9 +3366,9 @@
3367 3366 */
3368 3367 bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
3369 3368 {
3370   - struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  3369 + struct pool_workqueue *pwq = get_pwq(cpu, wq);
3371 3370  
3372   - return !list_empty(&cwq->delayed_works);
  3371 + return !list_empty(&pwq->delayed_works);
3373 3372 }
3374 3373 EXPORT_SYMBOL_GPL(workqueue_congested);
3375 3374  
... ... @@ -3408,7 +3407,7 @@
3408 3407 * CPU hotplug.
3409 3408 *
3410 3409 * There are two challenges in supporting CPU hotplug. Firstly, there
3411   - * are a lot of assumptions on strong associations among work, cwq and
  3410 + * are a lot of assumptions on strong associations among work, pwq and
3412 3411 * pool which make migrating pending and scheduled works very
3413 3412 * difficult to implement without impacting hot paths. Secondly,
3414 3413 * worker pools serve mix of short, long and very long running works making
3415 3414  
3416 3415  
... ... @@ -3612,11 +3611,11 @@
3612 3611 pool->flags |= POOL_FREEZING;
3613 3612  
3614 3613 list_for_each_entry(wq, &workqueues, list) {
3615   - struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  3614 + struct pool_workqueue *pwq = get_pwq(cpu, wq);
3616 3615  
3617   - if (cwq && cwq->pool == pool &&
  3616 + if (pwq && pwq->pool == pool &&
3618 3617 (wq->flags & WQ_FREEZABLE))
3619   - cwq->max_active = 0;
  3618 + pwq->max_active = 0;
3620 3619 }
3621 3620  
3622 3621 spin_unlock_irq(&pool->lock);
3623 3622  
3624 3623  
... ... @@ -3655,13 +3654,13 @@
3655 3654 * to peek without lock.
3656 3655 */
3657 3656 list_for_each_entry(wq, &workqueues, list) {
3658   - struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  3657 + struct pool_workqueue *pwq = get_pwq(cpu, wq);
3659 3658  
3660   - if (!cwq || !(wq->flags & WQ_FREEZABLE))
  3659 + if (!pwq || !(wq->flags & WQ_FREEZABLE))
3661 3660 continue;
3662 3661  
3663   - BUG_ON(cwq->nr_active < 0);
3664   - if (cwq->nr_active) {
  3662 + BUG_ON(pwq->nr_active < 0);
  3663 + if (pwq->nr_active) {
3665 3664 busy = true;
3666 3665 goto out_unlock;
3667 3666 }
3668 3667  
3669 3668  
... ... @@ -3701,14 +3700,14 @@
3701 3700 pool->flags &= ~POOL_FREEZING;
3702 3701  
3703 3702 list_for_each_entry(wq, &workqueues, list) {
3704   - struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
  3703 + struct pool_workqueue *pwq = get_pwq(cpu, wq);
3705 3704  
3706   - if (!cwq || cwq->pool != pool ||
  3705 + if (!pwq || pwq->pool != pool ||
3707 3706 !(wq->flags & WQ_FREEZABLE))
3708 3707 continue;
3709 3708  
3710 3709 /* restore max_active and repopulate worklist */
3711   - cwq_set_max_active(cwq, wq->saved_max_active);
  3710 + pwq_set_max_active(pwq, wq->saved_max_active);
3712 3711 }
3713 3712  
3714 3713 wake_up_worker(pool);
kernel/workqueue_internal.h
... ... @@ -28,7 +28,7 @@
28 28  
29 29 struct work_struct *current_work; /* L: work being processed */
30 30 work_func_t current_func; /* L: current_work's fn */
31   - struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
  31 + struct pool_workqueue *current_pwq; /* L: current_work's pwq */
32 32 struct list_head scheduled; /* L: scheduled works */
33 33 struct task_struct *task; /* I: worker task */
34 34 struct worker_pool *pool; /* I: the associated pool */