Blame view
kernel/workqueue.c
28.8 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 11 |
/* * linux/kernel/workqueue.c * * Generic mechanism for defining kernel helper threads for running * arbitrary tasks in process context. * * Started by Ingo Molnar, Copyright (C) 2002 * * Derived from the taskqueue/keventd code by: * * David Woodhouse <dwmw2@infradead.org> |
e1f8e8744
|
12 |
* Andrew Morton |
1da177e4c
|
13 14 |
* Kai Petzke <wpp@marie.physik.tu-berlin.de> * Theodore Ts'o <tytso@mit.edu> |
89ada6791
|
15 |
* |
cde535359
|
16 |
* Made to use alloc_percpu by Christoph Lameter. |
1da177e4c
|
17 18 19 20 21 22 23 24 25 26 27 28 29 |
*/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/signal.h> #include <linux/completion.h> #include <linux/workqueue.h> #include <linux/slab.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/kthread.h> |
1fa44ecad
|
30 |
#include <linux/hardirq.h> |
469340236
|
31 |
#include <linux/mempolicy.h> |
341a59585
|
32 |
#include <linux/freezer.h> |
d5abe6691
|
33 34 |
#include <linux/kallsyms.h> #include <linux/debug_locks.h> |
4e6045f13
|
35 |
#include <linux/lockdep.h> |
fb39125fd
|
36 37 |
#define CREATE_TRACE_POINTS #include <trace/events/workqueue.h> |
1da177e4c
|
38 39 |
/* |
f756d5e25
|
40 41 |
* The per-CPU workqueue (if single thread, we always use the first * possible cpu). |
1da177e4c
|
42 43 44 45 |
*/ struct cpu_workqueue_struct { spinlock_t lock; |
1da177e4c
|
46 47 |
struct list_head worklist; wait_queue_head_t more_work; |
3af24433e
|
48 |
struct work_struct *current_work; |
1da177e4c
|
49 50 |
struct workqueue_struct *wq; |
36c8b5868
|
51 |
struct task_struct *thread; |
1da177e4c
|
52 53 54 55 56 57 58 |
} ____cacheline_aligned; /* * The externally visible workqueue abstraction is an array of * per-CPU workqueues: */ struct workqueue_struct { |
89ada6791
|
59 |
struct cpu_workqueue_struct *cpu_wq; |
cce1a1656
|
60 |
struct list_head list; |
1da177e4c
|
61 |
const char *name; |
cce1a1656
|
62 |
int singlethread; |
319c2a986
|
63 |
int freezeable; /* Freeze threads during suspend */ |
0d557dc97
|
64 |
int rt; |
4e6045f13
|
65 66 67 |
#ifdef CONFIG_LOCKDEP struct lockdep_map lockdep_map; #endif |
1da177e4c
|
68 |
}; |
dc186ad74
|
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 |
#ifdef CONFIG_DEBUG_OBJECTS_WORK static struct debug_obj_descr work_debug_descr; /* * fixup_init is called when: * - an active object is initialized */ static int work_fixup_init(void *addr, enum debug_obj_state state) { struct work_struct *work = addr; switch (state) { case ODEBUG_STATE_ACTIVE: cancel_work_sync(work); debug_object_init(work, &work_debug_descr); return 1; default: return 0; } } /* * fixup_activate is called when: * - an active object is activated * - an unknown object is activated (might be a statically initialized object) */ static int work_fixup_activate(void *addr, enum debug_obj_state state) { struct work_struct *work = addr; switch (state) { case ODEBUG_STATE_NOTAVAILABLE: /* * This is not really a fixup. The work struct was * statically initialized. We just make sure that it * is tracked in the object tracker. */ if (test_bit(WORK_STRUCT_STATIC, work_data_bits(work))) { debug_object_init(work, &work_debug_descr); debug_object_activate(work, &work_debug_descr); return 0; } WARN_ON_ONCE(1); return 0; case ODEBUG_STATE_ACTIVE: WARN_ON(1); default: return 0; } } /* * fixup_free is called when: * - an active object is freed */ static int work_fixup_free(void *addr, enum debug_obj_state state) { struct work_struct *work = addr; switch (state) { case ODEBUG_STATE_ACTIVE: cancel_work_sync(work); debug_object_free(work, &work_debug_descr); return 1; default: return 0; } } static struct debug_obj_descr work_debug_descr = { .name = "work_struct", .fixup_init = work_fixup_init, .fixup_activate = work_fixup_activate, .fixup_free = work_fixup_free, }; static inline void debug_work_activate(struct work_struct *work) { debug_object_activate(work, &work_debug_descr); } static inline void debug_work_deactivate(struct work_struct *work) { debug_object_deactivate(work, &work_debug_descr); } void __init_work(struct work_struct *work, int onstack) { if (onstack) debug_object_init_on_stack(work, &work_debug_descr); else debug_object_init(work, &work_debug_descr); } EXPORT_SYMBOL_GPL(__init_work); void destroy_work_on_stack(struct work_struct *work) { debug_object_free(work, &work_debug_descr); } EXPORT_SYMBOL_GPL(destroy_work_on_stack); #else static inline void debug_work_activate(struct work_struct *work) { } static inline void debug_work_deactivate(struct work_struct *work) { } #endif |
95402b382
|
178 179 |
/* Serializes the accesses to the list of workqueues. */ static DEFINE_SPINLOCK(workqueue_lock); |
1da177e4c
|
180 |
static LIST_HEAD(workqueues); |
3af24433e
|
181 |
static int singlethread_cpu __read_mostly; |
e7577c50f
|
182 |
static const struct cpumask *cpu_singlethread_map __read_mostly; |
14441960e
|
183 184 185 186 187 188 189 |
/* * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD * flushes cwq->worklist. This means that flush_workqueue/wait_on_work * which comes in between can't use for_each_online_cpu(). We could * use cpu_possible_map, the cpumask below is more a documentation * than optimization. */ |
e7577c50f
|
190 |
static cpumask_var_t cpu_populated_map __read_mostly; |
f756d5e25
|
191 |
|
1da177e4c
|
192 |
/* If it's single threaded, it isn't in the list of workqueues. */ |
6cc88bc45
|
193 |
static inline int is_wq_single_threaded(struct workqueue_struct *wq) |
1da177e4c
|
194 |
{ |
cce1a1656
|
195 |
return wq->singlethread; |
1da177e4c
|
196 |
} |
e7577c50f
|
197 |
static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq) |
b1f4ec172
|
198 |
{ |
6cc88bc45
|
199 |
return is_wq_single_threaded(wq) |
e7577c50f
|
200 |
? cpu_singlethread_map : cpu_populated_map; |
b1f4ec172
|
201 |
} |
a848e3b67
|
202 203 204 |
static struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu) { |
6cc88bc45
|
205 |
if (unlikely(is_wq_single_threaded(wq))) |
a848e3b67
|
206 207 208 |
cpu = singlethread_cpu; return per_cpu_ptr(wq->cpu_wq, cpu); } |
4594bf159
|
209 210 211 212 |
/* * Set the workqueue on which a work item is to be run * - Must *only* be called if the pending flag is set */ |
ed7c0feed
|
213 214 |
static inline void set_wq_data(struct work_struct *work, struct cpu_workqueue_struct *cwq) |
365970a1e
|
215 |
{ |
4594bf159
|
216 217 218 |
unsigned long new; BUG_ON(!work_pending(work)); |
365970a1e
|
219 |
|
ed7c0feed
|
220 |
new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING); |
a08727bae
|
221 222 |
new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work); atomic_long_set(&work->data, new); |
365970a1e
|
223 |
} |
ed7c0feed
|
224 225 |
static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) |
365970a1e
|
226 |
{ |
a08727bae
|
227 |
return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); |
365970a1e
|
228 |
} |
b89deed32
|
229 |
static void insert_work(struct cpu_workqueue_struct *cwq, |
1a4d9b0aa
|
230 |
struct work_struct *work, struct list_head *head) |
b89deed32
|
231 |
{ |
e1d8aa9f1
|
232 |
trace_workqueue_insertion(cwq->thread, work); |
b89deed32
|
233 |
set_wq_data(work, cwq); |
6e84d644b
|
234 235 236 237 238 |
/* * Ensure that we get the right work->data if we see the * result of list_add() below, see try_to_grab_pending(). */ smp_wmb(); |
1a4d9b0aa
|
239 |
list_add_tail(&work->entry, head); |
b89deed32
|
240 241 |
wake_up(&cwq->more_work); } |
1da177e4c
|
242 243 244 245 |
static void __queue_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) { unsigned long flags; |
dc186ad74
|
246 |
debug_work_activate(work); |
1da177e4c
|
247 |
spin_lock_irqsave(&cwq->lock, flags); |
1a4d9b0aa
|
248 |
insert_work(cwq, work, &cwq->worklist); |
1da177e4c
|
249 250 |
spin_unlock_irqrestore(&cwq->lock, flags); } |
0fcb78c22
|
251 252 253 254 255 |
/** * queue_work - queue work on a workqueue * @wq: workqueue to use * @work: work to queue * |
057647fc4
|
256 |
* Returns 0 if @work was already on a queue, non-zero otherwise. |
1da177e4c
|
257 |
* |
00dfcaf74
|
258 259 |
* We queue the work to the CPU on which it was submitted, but if the CPU dies * it can be processed by another CPU. |
1da177e4c
|
260 |
*/ |
7ad5b3a50
|
261 |
int queue_work(struct workqueue_struct *wq, struct work_struct *work) |
1da177e4c
|
262 |
{ |
ef1ca236b
|
263 264 265 266 |
int ret; ret = queue_work_on(get_cpu(), wq, work); put_cpu(); |
1da177e4c
|
267 268 |
return ret; } |
ae90dd5db
|
269 |
EXPORT_SYMBOL_GPL(queue_work); |
1da177e4c
|
270 |
|
c1a220e7a
|
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 |
/** * queue_work_on - queue work on specific cpu * @cpu: CPU number to execute work on * @wq: workqueue to use * @work: work to queue * * Returns 0 if @work was already on a queue, non-zero otherwise. * * We queue the work to a specific CPU, the caller must ensure it * can't go away. */ int queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work) { int ret = 0; if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { BUG_ON(!list_empty(&work->entry)); __queue_work(wq_per_cpu(wq, cpu), work); ret = 1; } return ret; } EXPORT_SYMBOL_GPL(queue_work_on); |
6d141c3ff
|
295 |
static void delayed_work_timer_fn(unsigned long __data) |
1da177e4c
|
296 |
{ |
52bad64d9
|
297 |
struct delayed_work *dwork = (struct delayed_work *)__data; |
ed7c0feed
|
298 299 |
struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); struct workqueue_struct *wq = cwq->wq; |
1da177e4c
|
300 |
|
a848e3b67
|
301 |
__queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work); |
1da177e4c
|
302 |
} |
0fcb78c22
|
303 304 305 |
/** * queue_delayed_work - queue work on a workqueue after delay * @wq: workqueue to use |
af9997e42
|
306 |
* @dwork: delayable work to queue |
0fcb78c22
|
307 308 |
* @delay: number of jiffies to wait before queueing * |
057647fc4
|
309 |
* Returns 0 if @work was already on a queue, non-zero otherwise. |
0fcb78c22
|
310 |
*/ |
7ad5b3a50
|
311 |
int queue_delayed_work(struct workqueue_struct *wq, |
52bad64d9
|
312 |
struct delayed_work *dwork, unsigned long delay) |
1da177e4c
|
313 |
{ |
52bad64d9
|
314 |
if (delay == 0) |
63bc03625
|
315 |
return queue_work(wq, &dwork->work); |
1da177e4c
|
316 |
|
63bc03625
|
317 |
return queue_delayed_work_on(-1, wq, dwork, delay); |
1da177e4c
|
318 |
} |
ae90dd5db
|
319 |
EXPORT_SYMBOL_GPL(queue_delayed_work); |
1da177e4c
|
320 |
|
0fcb78c22
|
321 322 323 324 |
/** * queue_delayed_work_on - queue work on specific CPU after delay * @cpu: CPU number to execute work on * @wq: workqueue to use |
af9997e42
|
325 |
* @dwork: work to queue |
0fcb78c22
|
326 327 |
* @delay: number of jiffies to wait before queueing * |
057647fc4
|
328 |
* Returns 0 if @work was already on a queue, non-zero otherwise. |
0fcb78c22
|
329 |
*/ |
7a6bc1cdd
|
330 |
int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
52bad64d9
|
331 |
struct delayed_work *dwork, unsigned long delay) |
7a6bc1cdd
|
332 333 |
{ int ret = 0; |
52bad64d9
|
334 335 |
struct timer_list *timer = &dwork->timer; struct work_struct *work = &dwork->work; |
7a6bc1cdd
|
336 |
|
a08727bae
|
337 |
if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { |
7a6bc1cdd
|
338 339 |
BUG_ON(timer_pending(timer)); BUG_ON(!list_empty(&work->entry)); |
8a3e77cc2
|
340 |
timer_stats_timer_set_start_info(&dwork->timer); |
ed7c0feed
|
341 |
/* This stores cwq for the moment, for the timer_fn */ |
a848e3b67
|
342 |
set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id())); |
7a6bc1cdd
|
343 |
timer->expires = jiffies + delay; |
52bad64d9
|
344 |
timer->data = (unsigned long)dwork; |
7a6bc1cdd
|
345 |
timer->function = delayed_work_timer_fn; |
63bc03625
|
346 347 348 349 350 |
if (unlikely(cpu >= 0)) add_timer_on(timer, cpu); else add_timer(timer); |
7a6bc1cdd
|
351 352 353 354 |
ret = 1; } return ret; } |
ae90dd5db
|
355 |
EXPORT_SYMBOL_GPL(queue_delayed_work_on); |
1da177e4c
|
356 |
|
858119e15
|
357 |
static void run_workqueue(struct cpu_workqueue_struct *cwq) |
1da177e4c
|
358 |
{ |
f293ea920
|
359 |
spin_lock_irq(&cwq->lock); |
1da177e4c
|
360 361 362 |
while (!list_empty(&cwq->worklist)) { struct work_struct *work = list_entry(cwq->worklist.next, struct work_struct, entry); |
6bb49e596
|
363 |
work_func_t f = work->func; |
4e6045f13
|
364 365 366 367 368 369 370 371 372 373 374 |
#ifdef CONFIG_LOCKDEP /* * It is permissible to free the struct work_struct * from inside the function that is called from it, * this we need to take into account for lockdep too. * To avoid bogus "held lock freed" warnings as well * as problems when looking into work->lockdep_map, * make a copy and use that here. */ struct lockdep_map lockdep_map = work->lockdep_map; #endif |
e1d8aa9f1
|
375 |
trace_workqueue_execution(cwq->thread, work); |
dc186ad74
|
376 |
debug_work_deactivate(work); |
b89deed32
|
377 |
cwq->current_work = work; |
1da177e4c
|
378 |
list_del_init(cwq->worklist.next); |
f293ea920
|
379 |
spin_unlock_irq(&cwq->lock); |
1da177e4c
|
380 |
|
365970a1e
|
381 |
BUG_ON(get_wq_data(work) != cwq); |
23b2e5991
|
382 |
work_clear_pending(work); |
3295f0ef9
|
383 384 |
lock_map_acquire(&cwq->wq->lockdep_map); lock_map_acquire(&lockdep_map); |
65f27f384
|
385 |
f(work); |
3295f0ef9
|
386 387 |
lock_map_release(&lockdep_map); lock_map_release(&cwq->wq->lockdep_map); |
1da177e4c
|
388 |
|
d5abe6691
|
389 390 391 392 393 |
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " "%s/0x%08x/%d ", current->comm, preempt_count(), |
ba25f9dcc
|
394 |
task_pid_nr(current)); |
d5abe6691
|
395 396 397 398 399 400 |
printk(KERN_ERR " last function: "); print_symbol("%s ", (unsigned long)f); debug_show_held_locks(current); dump_stack(); } |
f293ea920
|
401 |
spin_lock_irq(&cwq->lock); |
b89deed32
|
402 |
cwq->current_work = NULL; |
1da177e4c
|
403 |
} |
f293ea920
|
404 |
spin_unlock_irq(&cwq->lock); |
1da177e4c
|
405 406 407 408 409 |
} static int worker_thread(void *__cwq) { struct cpu_workqueue_struct *cwq = __cwq; |
3af24433e
|
410 |
DEFINE_WAIT(wait); |
1da177e4c
|
411 |
|
831441862
|
412 413 |
if (cwq->wq->freezeable) set_freezable(); |
1da177e4c
|
414 |
|
3af24433e
|
415 |
for (;;) { |
3af24433e
|
416 |
prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); |
14441960e
|
417 418 419 |
if (!freezing(current) && !kthread_should_stop() && list_empty(&cwq->worklist)) |
1da177e4c
|
420 |
schedule(); |
3af24433e
|
421 |
finish_wait(&cwq->more_work, &wait); |
85f4186af
|
422 |
try_to_freeze(); |
14441960e
|
423 |
if (kthread_should_stop()) |
3af24433e
|
424 |
break; |
1da177e4c
|
425 |
|
3af24433e
|
426 |
run_workqueue(cwq); |
1da177e4c
|
427 |
} |
3af24433e
|
428 |
|
1da177e4c
|
429 430 |
return 0; } |
fc2e4d704
|
431 432 433 434 435 436 437 438 439 440 |
struct wq_barrier { struct work_struct work; struct completion done; }; static void wq_barrier_func(struct work_struct *work) { struct wq_barrier *barr = container_of(work, struct wq_barrier, work); complete(&barr->done); } |
83c22520c
|
441 |
static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, |
1a4d9b0aa
|
442 |
struct wq_barrier *barr, struct list_head *head) |
fc2e4d704
|
443 |
{ |
dc186ad74
|
444 445 446 447 448 449 450 |
/* * debugobject calls are safe here even with cwq->lock locked * as we know for sure that this will not trigger any of the * checks and call back into the fixup functions where we * might deadlock. */ INIT_WORK_ON_STACK(&barr->work, wq_barrier_func); |
fc2e4d704
|
451 452 453 |
__set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); init_completion(&barr->done); |
83c22520c
|
454 |
|
dc186ad74
|
455 |
debug_work_activate(&barr->work); |
1a4d9b0aa
|
456 |
insert_work(cwq, &barr->work, head); |
fc2e4d704
|
457 |
} |
14441960e
|
458 |
static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) |
1da177e4c
|
459 |
{ |
2355b70fd
|
460 461 |
int active = 0; struct wq_barrier barr; |
1da177e4c
|
462 |
|
2355b70fd
|
463 |
WARN_ON(cwq->thread == current); |
1da177e4c
|
464 |
|
2355b70fd
|
465 466 467 468 |
spin_lock_irq(&cwq->lock); if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { insert_wq_barrier(cwq, &barr, &cwq->worklist); active = 1; |
1da177e4c
|
469 |
} |
2355b70fd
|
470 |
spin_unlock_irq(&cwq->lock); |
dc186ad74
|
471 |
if (active) { |
2355b70fd
|
472 |
wait_for_completion(&barr.done); |
dc186ad74
|
473 474 |
destroy_work_on_stack(&barr.work); } |
14441960e
|
475 476 |
return active; |
1da177e4c
|
477 |
} |
0fcb78c22
|
478 |
/** |
1da177e4c
|
479 |
* flush_workqueue - ensure that any scheduled work has run to completion. |
0fcb78c22
|
480 |
* @wq: workqueue to flush |
1da177e4c
|
481 482 483 484 |
* * Forces execution of the workqueue and blocks until its completion. * This is typically used in driver shutdown handlers. * |
fc2e4d704
|
485 486 |
* We sleep until all works which were queued on entry have been handled, * but we are not livelocked by new incoming ones. |
1da177e4c
|
487 488 489 490 |
* * This function used to run the workqueues itself. Now we just wait for the * helper threads to do it. */ |
7ad5b3a50
|
491 |
void flush_workqueue(struct workqueue_struct *wq) |
1da177e4c
|
492 |
{ |
e7577c50f
|
493 |
const struct cpumask *cpu_map = wq_cpu_map(wq); |
cce1a1656
|
494 |
int cpu; |
1da177e4c
|
495 |
|
b1f4ec172
|
496 |
might_sleep(); |
3295f0ef9
|
497 498 |
lock_map_acquire(&wq->lockdep_map); lock_map_release(&wq->lockdep_map); |
aa85ea5b8
|
499 |
for_each_cpu(cpu, cpu_map) |
b1f4ec172
|
500 |
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); |
1da177e4c
|
501 |
} |
ae90dd5db
|
502 |
EXPORT_SYMBOL_GPL(flush_workqueue); |
1da177e4c
|
503 |
|
db7008972
|
504 505 506 507 |
/** * flush_work - block until a work_struct's callback has terminated * @work: the work which is to be flushed * |
a67da70dc
|
508 509 |
* Returns false if @work has already terminated. * |
db7008972
|
510 511 512 513 514 515 516 517 518 519 520 521 522 523 |
* It is expected that, prior to calling flush_work(), the caller has * arranged for the work to not be requeued, otherwise it doesn't make * sense to use this function. */ int flush_work(struct work_struct *work) { struct cpu_workqueue_struct *cwq; struct list_head *prev; struct wq_barrier barr; might_sleep(); cwq = get_wq_data(work); if (!cwq) return 0; |
3295f0ef9
|
524 525 |
lock_map_acquire(&cwq->wq->lockdep_map); lock_map_release(&cwq->wq->lockdep_map); |
a67da70dc
|
526 |
|
db7008972
|
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 |
prev = NULL; spin_lock_irq(&cwq->lock); if (!list_empty(&work->entry)) { /* * See the comment near try_to_grab_pending()->smp_rmb(). * If it was re-queued under us we are not going to wait. */ smp_rmb(); if (unlikely(cwq != get_wq_data(work))) goto out; prev = &work->entry; } else { if (cwq->current_work != work) goto out; prev = &cwq->worklist; } insert_wq_barrier(cwq, &barr, prev->next); out: spin_unlock_irq(&cwq->lock); if (!prev) return 0; wait_for_completion(&barr.done); |
dc186ad74
|
550 |
destroy_work_on_stack(&barr.work); |
db7008972
|
551 552 553 |
return 1; } EXPORT_SYMBOL_GPL(flush_work); |
6e84d644b
|
554 |
/* |
1f1f642e2
|
555 |
* Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit, |
6e84d644b
|
556 557 558 559 560 |
* so this work can't be re-armed in any way. */ static int try_to_grab_pending(struct work_struct *work) { struct cpu_workqueue_struct *cwq; |
1f1f642e2
|
561 |
int ret = -1; |
6e84d644b
|
562 563 |
if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) |
1f1f642e2
|
564 |
return 0; |
6e84d644b
|
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 |
/* * The queueing is in progress, or it is already queued. Try to * steal it from ->worklist without clearing WORK_STRUCT_PENDING. */ cwq = get_wq_data(work); if (!cwq) return ret; spin_lock_irq(&cwq->lock); if (!list_empty(&work->entry)) { /* * This work is queued, but perhaps we locked the wrong cwq. * In that case we must see the new value after rmb(), see * insert_work()->wmb(). */ smp_rmb(); if (cwq == get_wq_data(work)) { |
dc186ad74
|
584 |
debug_work_deactivate(work); |
6e84d644b
|
585 586 587 588 589 590 591 592 593 594 |
list_del_init(&work->entry); ret = 1; } } spin_unlock_irq(&cwq->lock); return ret; } static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, |
b89deed32
|
595 596 597 598 599 600 601 |
struct work_struct *work) { struct wq_barrier barr; int running = 0; spin_lock_irq(&cwq->lock); if (unlikely(cwq->current_work == work)) { |
1a4d9b0aa
|
602 |
insert_wq_barrier(cwq, &barr, cwq->worklist.next); |
b89deed32
|
603 604 605 |
running = 1; } spin_unlock_irq(&cwq->lock); |
dc186ad74
|
606 |
if (unlikely(running)) { |
b89deed32
|
607 |
wait_for_completion(&barr.done); |
dc186ad74
|
608 609 |
destroy_work_on_stack(&barr.work); } |
b89deed32
|
610 |
} |
6e84d644b
|
611 |
static void wait_on_work(struct work_struct *work) |
b89deed32
|
612 613 |
{ struct cpu_workqueue_struct *cwq; |
28e53bddf
|
614 |
struct workqueue_struct *wq; |
e7577c50f
|
615 |
const struct cpumask *cpu_map; |
b1f4ec172
|
616 |
int cpu; |
b89deed32
|
617 |
|
f293ea920
|
618 |
might_sleep(); |
3295f0ef9
|
619 620 |
lock_map_acquire(&work->lockdep_map); lock_map_release(&work->lockdep_map); |
4e6045f13
|
621 |
|
b89deed32
|
622 |
cwq = get_wq_data(work); |
b89deed32
|
623 |
if (!cwq) |
3af24433e
|
624 |
return; |
b89deed32
|
625 |
|
28e53bddf
|
626 627 |
wq = cwq->wq; cpu_map = wq_cpu_map(wq); |
aa85ea5b8
|
628 |
for_each_cpu(cpu, cpu_map) |
6e84d644b
|
629 630 |
wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); } |
1f1f642e2
|
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 |
static int __cancel_work_timer(struct work_struct *work, struct timer_list* timer) { int ret; do { ret = (timer && likely(del_timer(timer))); if (!ret) ret = try_to_grab_pending(work); wait_on_work(work); } while (unlikely(ret < 0)); work_clear_pending(work); return ret; } |
6e84d644b
|
646 647 648 649 |
/** * cancel_work_sync - block until a work_struct's callback has terminated * @work: the work which is to be flushed * |
1f1f642e2
|
650 651 |
* Returns true if @work was pending. * |
6e84d644b
|
652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 |
* cancel_work_sync() will cancel the work if it is queued. If the work's * callback appears to be running, cancel_work_sync() will block until it * has completed. * * It is possible to use this function if the work re-queues itself. It can * cancel the work even if it migrates to another workqueue, however in that * case it only guarantees that work->func() has completed on the last queued * workqueue. * * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not * pending, otherwise it goes into a busy-wait loop until the timer expires. * * The caller must ensure that workqueue_struct on which this work was last * queued can't be destroyed before this function returns. */ |
1f1f642e2
|
667 |
int cancel_work_sync(struct work_struct *work) |
6e84d644b
|
668 |
{ |
1f1f642e2
|
669 |
return __cancel_work_timer(work, NULL); |
b89deed32
|
670 |
} |
28e53bddf
|
671 |
EXPORT_SYMBOL_GPL(cancel_work_sync); |
b89deed32
|
672 |
|
6e84d644b
|
673 |
/** |
f5a421a45
|
674 |
* cancel_delayed_work_sync - reliably kill off a delayed work. |
6e84d644b
|
675 676 |
* @dwork: the delayed work struct * |
1f1f642e2
|
677 678 |
* Returns true if @dwork was pending. * |
6e84d644b
|
679 680 681 |
* It is possible to use this function if @dwork rearms itself via queue_work() * or queue_delayed_work(). See also the comment for cancel_work_sync(). */ |
1f1f642e2
|
682 |
int cancel_delayed_work_sync(struct delayed_work *dwork) |
6e84d644b
|
683 |
{ |
1f1f642e2
|
684 |
return __cancel_work_timer(&dwork->work, &dwork->timer); |
6e84d644b
|
685 |
} |
f5a421a45
|
686 |
EXPORT_SYMBOL(cancel_delayed_work_sync); |
1da177e4c
|
687 |
|
6e84d644b
|
688 |
static struct workqueue_struct *keventd_wq __read_mostly; |
1da177e4c
|
689 |
|
0fcb78c22
|
690 691 692 693 |
/** * schedule_work - put work task in global workqueue * @work: job to be done * |
5b0f437df
|
694 695 696 697 698 699 |
* Returns zero if @work was already on the kernel-global workqueue and * non-zero otherwise. * * This puts a job in the kernel-global workqueue if it was not already * queued and leaves it in the same position on the kernel-global * workqueue otherwise. |
0fcb78c22
|
700 |
*/ |
7ad5b3a50
|
701 |
int schedule_work(struct work_struct *work) |
1da177e4c
|
702 703 704 |
{ return queue_work(keventd_wq, work); } |
ae90dd5db
|
705 |
EXPORT_SYMBOL(schedule_work); |
1da177e4c
|
706 |
|
c1a220e7a
|
707 708 709 710 711 712 713 714 715 716 717 718 |
/* * schedule_work_on - put work task on a specific cpu * @cpu: cpu to put the work task on * @work: job to be done * * This puts a job on a specific cpu */ int schedule_work_on(int cpu, struct work_struct *work) { return queue_work_on(cpu, keventd_wq, work); } EXPORT_SYMBOL(schedule_work_on); |
0fcb78c22
|
719 720 |
/** * schedule_delayed_work - put work task in global workqueue after delay |
52bad64d9
|
721 722 |
* @dwork: job to be done * @delay: number of jiffies to wait or 0 for immediate execution |
0fcb78c22
|
723 724 725 726 |
* * After waiting for a given time this puts a job in the kernel-global * workqueue. */ |
7ad5b3a50
|
727 |
int schedule_delayed_work(struct delayed_work *dwork, |
82f67cd9f
|
728 |
unsigned long delay) |
1da177e4c
|
729 |
{ |
52bad64d9
|
730 |
return queue_delayed_work(keventd_wq, dwork, delay); |
1da177e4c
|
731 |
} |
ae90dd5db
|
732 |
EXPORT_SYMBOL(schedule_delayed_work); |
1da177e4c
|
733 |
|
0fcb78c22
|
734 |
/** |
8c53e4631
|
735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 |
* flush_delayed_work - block until a dwork_struct's callback has terminated * @dwork: the delayed work which is to be flushed * * Any timeout is cancelled, and any pending work is run immediately. */ void flush_delayed_work(struct delayed_work *dwork) { if (del_timer_sync(&dwork->timer)) { struct cpu_workqueue_struct *cwq; cwq = wq_per_cpu(keventd_wq, get_cpu()); __queue_work(cwq, &dwork->work); put_cpu(); } flush_work(&dwork->work); } EXPORT_SYMBOL(flush_delayed_work); /** |
0fcb78c22
|
753 754 |
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay * @cpu: cpu to use |
52bad64d9
|
755 |
* @dwork: job to be done |
0fcb78c22
|
756 757 758 759 760 |
* @delay: number of jiffies to wait * * After waiting for a given time this puts a job in the kernel-global * workqueue on the specified CPU. */ |
1da177e4c
|
761 |
int schedule_delayed_work_on(int cpu, |
52bad64d9
|
762 |
struct delayed_work *dwork, unsigned long delay) |
1da177e4c
|
763 |
{ |
52bad64d9
|
764 |
return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); |
1da177e4c
|
765 |
} |
ae90dd5db
|
766 |
EXPORT_SYMBOL(schedule_delayed_work_on); |
1da177e4c
|
767 |
|
b61367732
|
768 769 770 |
/** * schedule_on_each_cpu - call a function on each online CPU from keventd * @func: the function to call |
b61367732
|
771 772 773 774 |
* * Returns zero on success. * Returns -ve errno on failure. * |
b61367732
|
775 776 |
* schedule_on_each_cpu() is very slow. */ |
65f27f384
|
777 |
int schedule_on_each_cpu(work_func_t func) |
15316ba81
|
778 779 |
{ int cpu; |
65a644643
|
780 |
int orig = -1; |
b61367732
|
781 |
struct work_struct *works; |
15316ba81
|
782 |
|
b61367732
|
783 784 |
works = alloc_percpu(struct work_struct); if (!works) |
15316ba81
|
785 |
return -ENOMEM; |
b61367732
|
786 |
|
939818009
|
787 |
get_online_cpus(); |
65a644643
|
788 |
/* |
939818009
|
789 790 791 |
* When running in keventd don't schedule a work item on * itself. Can just call directly because the work queue is * already bound. This also is faster. |
65a644643
|
792 |
*/ |
939818009
|
793 |
if (current_is_keventd()) |
65a644643
|
794 |
orig = raw_smp_processor_id(); |
65a644643
|
795 |
|
15316ba81
|
796 |
for_each_online_cpu(cpu) { |
9bfb18392
|
797 798 799 |
struct work_struct *work = per_cpu_ptr(works, cpu); INIT_WORK(work, func); |
65a644643
|
800 |
if (cpu != orig) |
939818009
|
801 |
schedule_work_on(cpu, work); |
65a644643
|
802 |
} |
939818009
|
803 804 805 806 807 |
if (orig >= 0) func(per_cpu_ptr(works, orig)); for_each_online_cpu(cpu) flush_work(per_cpu_ptr(works, cpu)); |
95402b382
|
808 |
put_online_cpus(); |
b61367732
|
809 |
free_percpu(works); |
15316ba81
|
810 811 |
return 0; } |
1da177e4c
|
812 813 814 815 |
void flush_scheduled_work(void) { flush_workqueue(keventd_wq); } |
ae90dd5db
|
816 |
EXPORT_SYMBOL(flush_scheduled_work); |
1da177e4c
|
817 818 |
/** |
1fa44ecad
|
819 820 |
* execute_in_process_context - reliably execute the routine with user context * @fn: the function to execute |
1fa44ecad
|
821 822 823 824 825 826 827 828 829 |
* @ew: guaranteed storage for the execute work structure (must * be available when the work executes) * * Executes the function immediately if process context is available, * otherwise schedules the function for delayed execution. * * Returns: 0 - function was executed * 1 - function was scheduled for execution */ |
65f27f384
|
830 |
int execute_in_process_context(work_func_t fn, struct execute_work *ew) |
1fa44ecad
|
831 832 |
{ if (!in_interrupt()) { |
65f27f384
|
833 |
fn(&ew->work); |
1fa44ecad
|
834 835 |
return 0; } |
65f27f384
|
836 |
INIT_WORK(&ew->work, fn); |
1fa44ecad
|
837 838 839 840 841 |
schedule_work(&ew->work); return 1; } EXPORT_SYMBOL_GPL(execute_in_process_context); |
1da177e4c
|
842 843 844 845 846 847 848 849 |
int keventd_up(void) { return keventd_wq != NULL; } int current_is_keventd(void) { struct cpu_workqueue_struct *cwq; |
d243769d3
|
850 |
int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */ |
1da177e4c
|
851 852 853 |
int ret = 0; BUG_ON(!keventd_wq); |
89ada6791
|
854 |
cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu); |
1da177e4c
|
855 856 857 858 859 860 |
if (current == cwq->thread) ret = 1; return ret; } |
3af24433e
|
861 862 |
static struct cpu_workqueue_struct * init_cpu_workqueue(struct workqueue_struct *wq, int cpu) |
1da177e4c
|
863 |
{ |
89ada6791
|
864 |
struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); |
1da177e4c
|
865 |
|
3af24433e
|
866 867 868 869 870 871 |
cwq->wq = wq; spin_lock_init(&cwq->lock); INIT_LIST_HEAD(&cwq->worklist); init_waitqueue_head(&cwq->more_work); return cwq; |
1da177e4c
|
872 |
} |
3af24433e
|
873 874 |
static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) { |
0d557dc97
|
875 |
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
3af24433e
|
876 |
struct workqueue_struct *wq = cwq->wq; |
6cc88bc45
|
877 |
const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d"; |
3af24433e
|
878 879 880 881 882 883 884 885 886 887 888 889 890 |
struct task_struct *p; p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu); /* * Nobody can add the work_struct to this cwq, * if (caller is __create_workqueue) * nobody should see this wq * else // caller is CPU_UP_PREPARE * cpu is not on cpu_online_map * so we can abort safely. */ if (IS_ERR(p)) return PTR_ERR(p); |
0d557dc97
|
891 892 |
if (cwq->wq->rt) sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); |
3af24433e
|
893 |
cwq->thread = p; |
3af24433e
|
894 |
|
e1d8aa9f1
|
895 |
trace_workqueue_creation(cwq->thread, cpu); |
3af24433e
|
896 897 |
return 0; } |
06ba38a9a
|
898 899 900 901 902 903 904 905 906 907 |
static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) { struct task_struct *p = cwq->thread; if (p != NULL) { if (cpu >= 0) kthread_bind(p, cpu); wake_up_process(p); } } |
4e6045f13
|
908 909 910 |
struct workqueue_struct *__create_workqueue_key(const char *name, int singlethread, int freezeable, |
0d557dc97
|
911 |
int rt, |
eb13ba873
|
912 913 |
struct lock_class_key *key, const char *lock_name) |
1da177e4c
|
914 |
{ |
1da177e4c
|
915 |
struct workqueue_struct *wq; |
3af24433e
|
916 917 |
struct cpu_workqueue_struct *cwq; int err = 0, cpu; |
1da177e4c
|
918 |
|
3af24433e
|
919 920 921 922 923 924 925 926 927 928 929 |
wq = kzalloc(sizeof(*wq), GFP_KERNEL); if (!wq) return NULL; wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); if (!wq->cpu_wq) { kfree(wq); return NULL; } wq->name = name; |
eb13ba873
|
930 |
lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); |
cce1a1656
|
931 |
wq->singlethread = singlethread; |
3af24433e
|
932 |
wq->freezeable = freezeable; |
0d557dc97
|
933 |
wq->rt = rt; |
cce1a1656
|
934 |
INIT_LIST_HEAD(&wq->list); |
3af24433e
|
935 936 |
if (singlethread) { |
3af24433e
|
937 938 |
cwq = init_cpu_workqueue(wq, singlethread_cpu); err = create_workqueue_thread(cwq, singlethread_cpu); |
06ba38a9a
|
939 |
start_workqueue_thread(cwq, -1); |
3af24433e
|
940 |
} else { |
3da1c84c0
|
941 |
cpu_maps_update_begin(); |
6af8bf3d8
|
942 943 944 945 946 947 |
/* * We must place this wq on list even if the code below fails. * cpu_down(cpu) can remove cpu from cpu_populated_map before * destroy_workqueue() takes the lock, in that case we leak * cwq[cpu]->thread. */ |
95402b382
|
948 |
spin_lock(&workqueue_lock); |
3af24433e
|
949 |
list_add(&wq->list, &workqueues); |
95402b382
|
950 |
spin_unlock(&workqueue_lock); |
6af8bf3d8
|
951 952 953 954 955 956 |
/* * We must initialize cwqs for each possible cpu even if we * are going to call destroy_workqueue() finally. Otherwise * cpu_up() can hit the uninitialized cwq once we drop the * lock. */ |
3af24433e
|
957 958 959 960 961 |
for_each_possible_cpu(cpu) { cwq = init_cpu_workqueue(wq, cpu); if (err || !cpu_online(cpu)) continue; err = create_workqueue_thread(cwq, cpu); |
06ba38a9a
|
962 |
start_workqueue_thread(cwq, cpu); |
1da177e4c
|
963 |
} |
3da1c84c0
|
964 |
cpu_maps_update_done(); |
3af24433e
|
965 966 967 968 969 970 971 972 |
} if (err) { destroy_workqueue(wq); wq = NULL; } return wq; } |
4e6045f13
|
973 |
EXPORT_SYMBOL_GPL(__create_workqueue_key); |
1da177e4c
|
974 |
|
1e35eaa2d
|
975 |
static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) |
3af24433e
|
976 |
{ |
14441960e
|
977 |
/* |
3da1c84c0
|
978 979 |
* Our caller is either destroy_workqueue() or CPU_POST_DEAD, * cpu_add_remove_lock protects cwq->thread. |
14441960e
|
980 981 982 |
*/ if (cwq->thread == NULL) return; |
3af24433e
|
983 |
|
3295f0ef9
|
984 985 |
lock_map_acquire(&cwq->wq->lockdep_map); lock_map_release(&cwq->wq->lockdep_map); |
4e6045f13
|
986 |
|
13c22168b
|
987 |
flush_cpu_workqueue(cwq); |
14441960e
|
988 |
/* |
3da1c84c0
|
989 |
* If the caller is CPU_POST_DEAD and cwq->worklist was not empty, |
13c22168b
|
990 991 992 |
* a concurrent flush_workqueue() can insert a barrier after us. * However, in that case run_workqueue() won't return and check * kthread_should_stop() until it flushes all work_struct's. |
14441960e
|
993 994 995 996 997 |
* When ->worklist becomes empty it is safe to exit because no * more work_structs can be queued on this cwq: flush_workqueue * checks list_empty(), and a "normal" queue_work() can't use * a dead CPU. */ |
e1d8aa9f1
|
998 |
trace_workqueue_destruction(cwq->thread); |
14441960e
|
999 1000 |
kthread_stop(cwq->thread); cwq->thread = NULL; |
3af24433e
|
1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 |
} /** * destroy_workqueue - safely terminate a workqueue * @wq: target workqueue * * Safely destroy a workqueue. All work currently pending will be done first. */ void destroy_workqueue(struct workqueue_struct *wq) { |
e7577c50f
|
1011 |
const struct cpumask *cpu_map = wq_cpu_map(wq); |
b1f4ec172
|
1012 |
int cpu; |
3af24433e
|
1013 |
|
3da1c84c0
|
1014 |
cpu_maps_update_begin(); |
95402b382
|
1015 |
spin_lock(&workqueue_lock); |
b1f4ec172
|
1016 |
list_del(&wq->list); |
95402b382
|
1017 |
spin_unlock(&workqueue_lock); |
3af24433e
|
1018 |
|
aa85ea5b8
|
1019 |
for_each_cpu(cpu, cpu_map) |
1e35eaa2d
|
1020 |
cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); |
3da1c84c0
|
1021 |
cpu_maps_update_done(); |
9b41ea728
|
1022 |
|
3af24433e
|
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 |
free_percpu(wq->cpu_wq); kfree(wq); } EXPORT_SYMBOL_GPL(destroy_workqueue); static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; struct cpu_workqueue_struct *cwq; struct workqueue_struct *wq; |
8448502cf
|
1035 |
int ret = NOTIFY_OK; |
3af24433e
|
1036 |
|
8bb784428
|
1037 |
action &= ~CPU_TASKS_FROZEN; |
3af24433e
|
1038 |
switch (action) { |
3af24433e
|
1039 |
case CPU_UP_PREPARE: |
e7577c50f
|
1040 |
cpumask_set_cpu(cpu, cpu_populated_map); |
3af24433e
|
1041 |
} |
8448502cf
|
1042 |
undo: |
3af24433e
|
1043 1044 1045 1046 1047 1048 1049 |
list_for_each_entry(wq, &workqueues, list) { cwq = per_cpu_ptr(wq->cpu_wq, cpu); switch (action) { case CPU_UP_PREPARE: if (!create_workqueue_thread(cwq, cpu)) break; |
95402b382
|
1050 1051 1052 |
printk(KERN_ERR "workqueue [%s] for %i failed ", wq->name, cpu); |
8448502cf
|
1053 1054 1055 |
action = CPU_UP_CANCELED; ret = NOTIFY_BAD; goto undo; |
3af24433e
|
1056 1057 |
case CPU_ONLINE: |
06ba38a9a
|
1058 |
start_workqueue_thread(cwq, cpu); |
3af24433e
|
1059 1060 1061 |
break; case CPU_UP_CANCELED: |
06ba38a9a
|
1062 |
start_workqueue_thread(cwq, -1); |
3da1c84c0
|
1063 |
case CPU_POST_DEAD: |
1e35eaa2d
|
1064 |
cleanup_workqueue_thread(cwq); |
3af24433e
|
1065 1066 |
break; } |
1da177e4c
|
1067 |
} |
00dfcaf74
|
1068 1069 |
switch (action) { case CPU_UP_CANCELED: |
3da1c84c0
|
1070 |
case CPU_POST_DEAD: |
e7577c50f
|
1071 |
cpumask_clear_cpu(cpu, cpu_populated_map); |
00dfcaf74
|
1072 |
} |
8448502cf
|
1073 |
return ret; |
1da177e4c
|
1074 |
} |
1da177e4c
|
1075 |
|
2d3854a37
|
1076 |
#ifdef CONFIG_SMP |
8ccad40df
|
1077 |
|
2d3854a37
|
1078 |
struct work_for_cpu { |
6b44003e5
|
1079 |
struct completion completion; |
2d3854a37
|
1080 1081 1082 1083 |
long (*fn)(void *); void *arg; long ret; }; |
6b44003e5
|
1084 |
static int do_work_for_cpu(void *_wfc) |
2d3854a37
|
1085 |
{ |
6b44003e5
|
1086 |
struct work_for_cpu *wfc = _wfc; |
2d3854a37
|
1087 |
wfc->ret = wfc->fn(wfc->arg); |
6b44003e5
|
1088 1089 |
complete(&wfc->completion); return 0; |
2d3854a37
|
1090 1091 1092 1093 1094 1095 1096 1097 |
} /** * work_on_cpu - run a function in user context on a particular cpu * @cpu: the cpu to run on * @fn: the function to run * @arg: the function arg * |
31ad90812
|
1098 1099 |
* This will return the value @fn returns. * It is up to the caller to ensure that the cpu doesn't go offline. |
6b44003e5
|
1100 |
* The caller must not hold any locks which would prevent @fn from completing. |
2d3854a37
|
1101 1102 1103 |
*/ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) { |
6b44003e5
|
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 |
struct task_struct *sub_thread; struct work_for_cpu wfc = { .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion), .fn = fn, .arg = arg, }; sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu"); if (IS_ERR(sub_thread)) return PTR_ERR(sub_thread); kthread_bind(sub_thread, cpu); wake_up_process(sub_thread); wait_for_completion(&wfc.completion); |
2d3854a37
|
1117 1118 1119 1120 |
return wfc.ret; } EXPORT_SYMBOL_GPL(work_on_cpu); #endif /* CONFIG_SMP */ |
c12920d19
|
1121 |
void __init init_workqueues(void) |
1da177e4c
|
1122 |
{ |
e7577c50f
|
1123 1124 1125 1126 1127 |
alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL); cpumask_copy(cpu_populated_map, cpu_online_mask); singlethread_cpu = cpumask_first(cpu_possible_mask); cpu_singlethread_map = cpumask_of(singlethread_cpu); |
1da177e4c
|
1128 1129 1130 1131 |
hotcpu_notifier(workqueue_cpu_callback, 0); keventd_wq = create_workqueue("events"); BUG_ON(!keventd_wq); } |