Commit dc186ad741c12ae9ecac8b89e317ef706fdaf8f6
Committed by
Tejun Heo
1 parent
a9366e61b0
Exists in
master
and in
4 other branches
workqueue: Add debugobjects support
Add debugobject support to track the life time of work_structs. While at it, remove duplicate definition of INIT_DELAYED_WORK_ON_STACK(). Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Tejun Heo <tj@kernel.org>
Showing 4 changed files with 166 additions and 15 deletions Side-by-side Diff
arch/x86/kernel/smpboot.c
... | ... | @@ -687,7 +687,7 @@ |
687 | 687 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), |
688 | 688 | }; |
689 | 689 | |
690 | - INIT_WORK(&c_idle.work, do_fork_idle); | |
690 | + INIT_WORK_ON_STACK(&c_idle.work, do_fork_idle); | |
691 | 691 | |
692 | 692 | alternatives_smp_switch(1); |
693 | 693 | |
... | ... | @@ -713,6 +713,7 @@ |
713 | 713 | |
714 | 714 | if (IS_ERR(c_idle.idle)) { |
715 | 715 | printk("failed fork for CPU %d\n", cpu); |
716 | + destroy_work_on_stack(&c_idle.work); | |
716 | 717 | return PTR_ERR(c_idle.idle); |
717 | 718 | } |
718 | 719 | |
... | ... | @@ -831,6 +832,7 @@ |
831 | 832 | smpboot_restore_warm_reset_vector(); |
832 | 833 | } |
833 | 834 | |
835 | + destroy_work_on_stack(&c_idle.work); | |
834 | 836 | return boot_error; |
835 | 837 | } |
836 | 838 |
include/linux/workqueue.h
... | ... | @@ -25,6 +25,7 @@ |
25 | 25 | struct work_struct { |
26 | 26 | atomic_long_t data; |
27 | 27 | #define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ |
28 | +#define WORK_STRUCT_STATIC 1 /* static initializer (debugobjects) */ | |
28 | 29 | #define WORK_STRUCT_FLAG_MASK (3UL) |
29 | 30 | #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) |
30 | 31 | struct list_head entry; |
... | ... | @@ -35,6 +36,7 @@ |
35 | 36 | }; |
36 | 37 | |
37 | 38 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT(0) |
39 | +#define WORK_DATA_STATIC_INIT() ATOMIC_LONG_INIT(2) | |
38 | 40 | |
39 | 41 | struct delayed_work { |
40 | 42 | struct work_struct work; |
... | ... | @@ -63,7 +65,7 @@ |
63 | 65 | #endif |
64 | 66 | |
65 | 67 | #define __WORK_INITIALIZER(n, f) { \ |
66 | - .data = WORK_DATA_INIT(), \ | |
68 | + .data = WORK_DATA_STATIC_INIT(), \ | |
67 | 69 | .entry = { &(n).entry, &(n).entry }, \ |
68 | 70 | .func = (f), \ |
69 | 71 | __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ |
... | ... | @@ -91,6 +93,14 @@ |
91 | 93 | #define PREPARE_DELAYED_WORK(_work, _func) \ |
92 | 94 | PREPARE_WORK(&(_work)->work, (_func)) |
93 | 95 | |
96 | +#ifdef CONFIG_DEBUG_OBJECTS_WORK | |
97 | +extern void __init_work(struct work_struct *work, int onstack); | |
98 | +extern void destroy_work_on_stack(struct work_struct *work); | |
99 | +#else | |
100 | +static inline void __init_work(struct work_struct *work, int onstack) { } | |
101 | +static inline void destroy_work_on_stack(struct work_struct *work) { } | |
102 | +#endif | |
103 | + | |
94 | 104 | /* |
95 | 105 | * initialize all of a work item in one go |
96 | 106 | * |
97 | 107 | |
98 | 108 | |
99 | 109 | |
100 | 110 | |
... | ... | @@ -99,24 +109,36 @@ |
99 | 109 | * to generate better code. |
100 | 110 | */ |
101 | 111 | #ifdef CONFIG_LOCKDEP |
102 | -#define INIT_WORK(_work, _func) \ | |
112 | +#define __INIT_WORK(_work, _func, _onstack) \ | |
103 | 113 | do { \ |
104 | 114 | static struct lock_class_key __key; \ |
105 | 115 | \ |
116 | + __init_work((_work), _onstack); \ | |
106 | 117 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ |
107 | 118 | lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\ |
108 | 119 | INIT_LIST_HEAD(&(_work)->entry); \ |
109 | 120 | PREPARE_WORK((_work), (_func)); \ |
110 | 121 | } while (0) |
111 | 122 | #else |
112 | -#define INIT_WORK(_work, _func) \ | |
123 | +#define __INIT_WORK(_work, _func, _onstack) \ | |
113 | 124 | do { \ |
125 | + __init_work((_work), _onstack); \ | |
114 | 126 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ |
115 | 127 | INIT_LIST_HEAD(&(_work)->entry); \ |
116 | 128 | PREPARE_WORK((_work), (_func)); \ |
117 | 129 | } while (0) |
118 | 130 | #endif |
119 | 131 | |
132 | +#define INIT_WORK(_work, _func) \ | |
133 | + do { \ | |
134 | + __INIT_WORK((_work), (_func), 0); \ | |
135 | + } while (0) | |
136 | + | |
137 | +#define INIT_WORK_ON_STACK(_work, _func) \ | |
138 | + do { \ | |
139 | + __INIT_WORK((_work), (_func), 1); \ | |
140 | + } while (0) | |
141 | + | |
120 | 142 | #define INIT_DELAYED_WORK(_work, _func) \ |
121 | 143 | do { \ |
122 | 144 | INIT_WORK(&(_work)->work, (_func)); \ |
123 | 145 | |
124 | 146 | |
... | ... | @@ -125,20 +147,14 @@ |
125 | 147 | |
126 | 148 | #define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ |
127 | 149 | do { \ |
128 | - INIT_WORK(&(_work)->work, (_func)); \ | |
150 | + INIT_WORK_ON_STACK(&(_work)->work, (_func)); \ | |
129 | 151 | init_timer_on_stack(&(_work)->timer); \ |
130 | 152 | } while (0) |
131 | 153 | |
132 | -#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ | |
154 | +#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ | |
133 | 155 | do { \ |
134 | 156 | INIT_WORK(&(_work)->work, (_func)); \ |
135 | 157 | init_timer_deferrable(&(_work)->timer); \ |
136 | - } while (0) | |
137 | - | |
138 | -#define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ | |
139 | - do { \ | |
140 | - INIT_WORK(&(_work)->work, (_func)); \ | |
141 | - init_timer_on_stack(&(_work)->timer); \ | |
142 | 158 | } while (0) |
143 | 159 | |
144 | 160 | /** |
kernel/workqueue.c
... | ... | @@ -68,6 +68,116 @@ |
68 | 68 | #endif |
69 | 69 | }; |
70 | 70 | |
71 | +#ifdef CONFIG_DEBUG_OBJECTS_WORK | |
72 | + | |
73 | +static struct debug_obj_descr work_debug_descr; | |
74 | + | |
75 | +/* | |
76 | + * fixup_init is called when: | |
77 | + * - an active object is initialized | |
78 | + */ | |
79 | +static int work_fixup_init(void *addr, enum debug_obj_state state) | |
80 | +{ | |
81 | + struct work_struct *work = addr; | |
82 | + | |
83 | + switch (state) { | |
84 | + case ODEBUG_STATE_ACTIVE: | |
85 | + cancel_work_sync(work); | |
86 | + debug_object_init(work, &work_debug_descr); | |
87 | + return 1; | |
88 | + default: | |
89 | + return 0; | |
90 | + } | |
91 | +} | |
92 | + | |
93 | +/* | |
94 | + * fixup_activate is called when: | |
95 | + * - an active object is activated | |
96 | + * - an unknown object is activated (might be a statically initialized object) | |
97 | + */ | |
98 | +static int work_fixup_activate(void *addr, enum debug_obj_state state) | |
99 | +{ | |
100 | + struct work_struct *work = addr; | |
101 | + | |
102 | + switch (state) { | |
103 | + | |
104 | + case ODEBUG_STATE_NOTAVAILABLE: | |
105 | + /* | |
106 | + * This is not really a fixup. The work struct was | |
107 | + * statically initialized. We just make sure that it | |
108 | + * is tracked in the object tracker. | |
109 | + */ | |
110 | + if (test_bit(WORK_STRUCT_STATIC, work_data_bits(work))) { | |
111 | + debug_object_init(work, &work_debug_descr); | |
112 | + debug_object_activate(work, &work_debug_descr); | |
113 | + return 0; | |
114 | + } | |
115 | + WARN_ON_ONCE(1); | |
116 | + return 0; | |
117 | + | |
118 | + case ODEBUG_STATE_ACTIVE: | |
119 | + WARN_ON(1); | |
120 | + | |
121 | + default: | |
122 | + return 0; | |
123 | + } | |
124 | +} | |
125 | + | |
126 | +/* | |
127 | + * fixup_free is called when: | |
128 | + * - an active object is freed | |
129 | + */ | |
130 | +static int work_fixup_free(void *addr, enum debug_obj_state state) | |
131 | +{ | |
132 | + struct work_struct *work = addr; | |
133 | + | |
134 | + switch (state) { | |
135 | + case ODEBUG_STATE_ACTIVE: | |
136 | + cancel_work_sync(work); | |
137 | + debug_object_free(work, &work_debug_descr); | |
138 | + return 1; | |
139 | + default: | |
140 | + return 0; | |
141 | + } | |
142 | +} | |
143 | + | |
144 | +static struct debug_obj_descr work_debug_descr = { | |
145 | + .name = "work_struct", | |
146 | + .fixup_init = work_fixup_init, | |
147 | + .fixup_activate = work_fixup_activate, | |
148 | + .fixup_free = work_fixup_free, | |
149 | +}; | |
150 | + | |
151 | +static inline void debug_work_activate(struct work_struct *work) | |
152 | +{ | |
153 | + debug_object_activate(work, &work_debug_descr); | |
154 | +} | |
155 | + | |
156 | +static inline void debug_work_deactivate(struct work_struct *work) | |
157 | +{ | |
158 | + debug_object_deactivate(work, &work_debug_descr); | |
159 | +} | |
160 | + | |
161 | +void __init_work(struct work_struct *work, int onstack) | |
162 | +{ | |
163 | + if (onstack) | |
164 | + debug_object_init_on_stack(work, &work_debug_descr); | |
165 | + else | |
166 | + debug_object_init(work, &work_debug_descr); | |
167 | +} | |
168 | +EXPORT_SYMBOL_GPL(__init_work); | |
169 | + | |
170 | +void destroy_work_on_stack(struct work_struct *work) | |
171 | +{ | |
172 | + debug_object_free(work, &work_debug_descr); | |
173 | +} | |
174 | +EXPORT_SYMBOL_GPL(destroy_work_on_stack); | |
175 | + | |
176 | +#else | |
177 | +static inline void debug_work_activate(struct work_struct *work) { } | |
178 | +static inline void debug_work_deactivate(struct work_struct *work) { } | |
179 | +#endif | |
180 | + | |
71 | 181 | /* Serializes the accesses to the list of workqueues. */ |
72 | 182 | static DEFINE_SPINLOCK(workqueue_lock); |
73 | 183 | static LIST_HEAD(workqueues); |
... | ... | @@ -145,6 +255,7 @@ |
145 | 255 | { |
146 | 256 | unsigned long flags; |
147 | 257 | |
258 | + debug_work_activate(work); | |
148 | 259 | spin_lock_irqsave(&cwq->lock, flags); |
149 | 260 | insert_work(cwq, work, &cwq->worklist); |
150 | 261 | spin_unlock_irqrestore(&cwq->lock, flags); |
... | ... | @@ -280,6 +391,7 @@ |
280 | 391 | struct lockdep_map lockdep_map = work->lockdep_map; |
281 | 392 | #endif |
282 | 393 | trace_workqueue_execution(cwq->thread, work); |
394 | + debug_work_deactivate(work); | |
283 | 395 | cwq->current_work = work; |
284 | 396 | list_del_init(cwq->worklist.next); |
285 | 397 | spin_unlock_irq(&cwq->lock); |
286 | 398 | |
... | ... | @@ -350,11 +462,18 @@ |
350 | 462 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, |
351 | 463 | struct wq_barrier *barr, struct list_head *head) |
352 | 464 | { |
353 | - INIT_WORK(&barr->work, wq_barrier_func); | |
465 | + /* | |
466 | + * debugobject calls are safe here even with cwq->lock locked | |
467 | + * as we know for sure that this will not trigger any of the | |
468 | + * checks and call back into the fixup functions where we | |
469 | + * might deadlock. | |
470 | + */ | |
471 | + INIT_WORK_ON_STACK(&barr->work, wq_barrier_func); | |
354 | 472 | __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); |
355 | 473 | |
356 | 474 | init_completion(&barr->done); |
357 | 475 | |
476 | + debug_work_activate(&barr->work); | |
358 | 477 | insert_work(cwq, &barr->work, head); |
359 | 478 | } |
360 | 479 | |
361 | 480 | |
... | ... | @@ -372,8 +491,10 @@ |
372 | 491 | } |
373 | 492 | spin_unlock_irq(&cwq->lock); |
374 | 493 | |
375 | - if (active) | |
494 | + if (active) { | |
376 | 495 | wait_for_completion(&barr.done); |
496 | + destroy_work_on_stack(&barr.work); | |
497 | + } | |
377 | 498 | |
378 | 499 | return active; |
379 | 500 | } |
... | ... | @@ -451,6 +572,7 @@ |
451 | 572 | return 0; |
452 | 573 | |
453 | 574 | wait_for_completion(&barr.done); |
575 | + destroy_work_on_stack(&barr.work); | |
454 | 576 | return 1; |
455 | 577 | } |
456 | 578 | EXPORT_SYMBOL_GPL(flush_work); |
... | ... | @@ -485,6 +607,7 @@ |
485 | 607 | */ |
486 | 608 | smp_rmb(); |
487 | 609 | if (cwq == get_wq_data(work)) { |
610 | + debug_work_deactivate(work); | |
488 | 611 | list_del_init(&work->entry); |
489 | 612 | ret = 1; |
490 | 613 | } |
491 | 614 | |
... | ... | @@ -507,8 +630,10 @@ |
507 | 630 | } |
508 | 631 | spin_unlock_irq(&cwq->lock); |
509 | 632 | |
510 | - if (unlikely(running)) | |
633 | + if (unlikely(running)) { | |
511 | 634 | wait_for_completion(&barr.done); |
635 | + destroy_work_on_stack(&barr.work); | |
636 | + } | |
512 | 637 | } |
513 | 638 | |
514 | 639 | static void wait_on_work(struct work_struct *work) |
lib/Kconfig.debug
... | ... | @@ -298,6 +298,14 @@ |
298 | 298 | timer routines to track the life time of timer objects and |
299 | 299 | validate the timer operations. |
300 | 300 | |
301 | +config DEBUG_OBJECTS_WORK | |
302 | + bool "Debug work objects" | |
303 | + depends on DEBUG_OBJECTS | |
304 | + help | |
305 | + If you say Y here, additional code will be inserted into the | |
306 | + work queue routines to track the life time of work objects and | |
307 | + validate the work operations. | |
308 | + | |
301 | 309 | config DEBUG_OBJECTS_ENABLE_DEFAULT |
302 | 310 | int "debug_objects bootup default value (0-1)" |
303 | 311 | range 0 1 |