Blame view

kernel/workqueue.c 28.8 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
7
8
9
10
11
  /*
   * linux/kernel/workqueue.c
   *
   * Generic mechanism for defining kernel helper threads for running
   * arbitrary tasks in process context.
   *
   * Started by Ingo Molnar, Copyright (C) 2002
   *
   * Derived from the taskqueue/keventd code by:
   *
   *   David Woodhouse <dwmw2@infradead.org>
e1f8e8744   Francois Cami   Remove Andrew Mor...
12
   *   Andrew Morton
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
13
14
   *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
   *   Theodore Ts'o <tytso@mit.edu>
89ada6791   Christoph Lameter   [PATCH] Use alloc...
15
   *
cde535359   Christoph Lameter   Christoph has moved
16
   * Made to use alloc_percpu by Christoph Lameter.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
17
18
19
20
21
22
23
24
25
26
27
28
29
   */
  
  #include <linux/module.h>
  #include <linux/kernel.h>
  #include <linux/sched.h>
  #include <linux/init.h>
  #include <linux/signal.h>
  #include <linux/completion.h>
  #include <linux/workqueue.h>
  #include <linux/slab.h>
  #include <linux/cpu.h>
  #include <linux/notifier.h>
  #include <linux/kthread.h>
1fa44ecad   James Bottomley   [SCSI] add execut...
30
  #include <linux/hardirq.h>
469340236   Christoph Lameter   [PATCH] mm: keven...
31
  #include <linux/mempolicy.h>
341a59585   Rafael J. Wysocki   [PATCH] Support f...
32
  #include <linux/freezer.h>
d5abe6691   Peter Zijlstra   [PATCH] debug: wo...
33
34
  #include <linux/kallsyms.h>
  #include <linux/debug_locks.h>
4e6045f13   Johannes Berg   workqueue: debug ...
35
  #include <linux/lockdep.h>
fb39125fd   Zhaolei   ftrace, workqueue...
36
37
  #define CREATE_TRACE_POINTS
  #include <trace/events/workqueue.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
38
39
  
  /*
f756d5e25   Nathan Lynch   [PATCH] fix workq...
40
41
   * The per-CPU workqueue (if single thread, we always use the first
   * possible cpu).
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
42
43
44
45
   */
  struct cpu_workqueue_struct {
  
  	spinlock_t lock;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
46
47
  	struct list_head worklist;
  	wait_queue_head_t more_work;
3af24433e   Oleg Nesterov   workqueue: don't ...
48
  	struct work_struct *current_work;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
49
50
  
  	struct workqueue_struct *wq;
36c8b5868   Ingo Molnar   [PATCH] sched: cl...
51
  	struct task_struct *thread;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
52
53
54
55
56
57
58
  } ____cacheline_aligned;
  
  /*
   * The externally visible workqueue abstraction is an array of
   * per-CPU workqueues:
   */
  struct workqueue_struct {
89ada6791   Christoph Lameter   [PATCH] Use alloc...
59
  	struct cpu_workqueue_struct *cpu_wq;
cce1a1656   Oleg Nesterov   workqueue: introd...
60
  	struct list_head list;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
61
  	const char *name;
cce1a1656   Oleg Nesterov   workqueue: introd...
62
  	int singlethread;
319c2a986   Oleg Nesterov   workqueue: fix fr...
63
  	int freezeable;		/* Freeze threads during suspend */
0d557dc97   Heiko Carstens   workqueue: introd...
64
  	int rt;
4e6045f13   Johannes Berg   workqueue: debug ...
65
66
67
  #ifdef CONFIG_LOCKDEP
  	struct lockdep_map lockdep_map;
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
68
  };
dc186ad74   Thomas Gleixner   workqueue: Add de...
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
  #ifdef CONFIG_DEBUG_OBJECTS_WORK
  
  static struct debug_obj_descr work_debug_descr;
  
  /*
   * fixup_init is called when:
   * - an active object is initialized
   */
  static int work_fixup_init(void *addr, enum debug_obj_state state)
  {
  	struct work_struct *work = addr;
  
  	switch (state) {
  	case ODEBUG_STATE_ACTIVE:
  		cancel_work_sync(work);
  		debug_object_init(work, &work_debug_descr);
  		return 1;
  	default:
  		return 0;
  	}
  }
  
  /*
   * fixup_activate is called when:
   * - an active object is activated
   * - an unknown object is activated (might be a statically initialized object)
   */
  static int work_fixup_activate(void *addr, enum debug_obj_state state)
  {
  	struct work_struct *work = addr;
  
  	switch (state) {
  
  	case ODEBUG_STATE_NOTAVAILABLE:
  		/*
  		 * This is not really a fixup. The work struct was
  		 * statically initialized. We just make sure that it
  		 * is tracked in the object tracker.
  		 */
  		if (test_bit(WORK_STRUCT_STATIC, work_data_bits(work))) {
  			debug_object_init(work, &work_debug_descr);
  			debug_object_activate(work, &work_debug_descr);
  			return 0;
  		}
  		WARN_ON_ONCE(1);
  		return 0;
  
  	case ODEBUG_STATE_ACTIVE:
  		WARN_ON(1);
  
  	default:
  		return 0;
  	}
  }
  
  /*
   * fixup_free is called when:
   * - an active object is freed
   */
  static int work_fixup_free(void *addr, enum debug_obj_state state)
  {
  	struct work_struct *work = addr;
  
  	switch (state) {
  	case ODEBUG_STATE_ACTIVE:
  		cancel_work_sync(work);
  		debug_object_free(work, &work_debug_descr);
  		return 1;
  	default:
  		return 0;
  	}
  }
  
  static struct debug_obj_descr work_debug_descr = {
  	.name		= "work_struct",
  	.fixup_init	= work_fixup_init,
  	.fixup_activate	= work_fixup_activate,
  	.fixup_free	= work_fixup_free,
  };
  
  static inline void debug_work_activate(struct work_struct *work)
  {
  	debug_object_activate(work, &work_debug_descr);
  }
  
  static inline void debug_work_deactivate(struct work_struct *work)
  {
  	debug_object_deactivate(work, &work_debug_descr);
  }
  
  void __init_work(struct work_struct *work, int onstack)
  {
  	if (onstack)
  		debug_object_init_on_stack(work, &work_debug_descr);
  	else
  		debug_object_init(work, &work_debug_descr);
  }
  EXPORT_SYMBOL_GPL(__init_work);
  
  void destroy_work_on_stack(struct work_struct *work)
  {
  	debug_object_free(work, &work_debug_descr);
  }
  EXPORT_SYMBOL_GPL(destroy_work_on_stack);
  
  #else
  static inline void debug_work_activate(struct work_struct *work) { }
  static inline void debug_work_deactivate(struct work_struct *work) { }
  #endif
95402b382   Gautham R Shenoy   cpu-hotplug: repl...
178
179
  /* Serializes the accesses to the list of workqueues. */
  static DEFINE_SPINLOCK(workqueue_lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
180
  static LIST_HEAD(workqueues);
3af24433e   Oleg Nesterov   workqueue: don't ...
181
  static int singlethread_cpu __read_mostly;
e7577c50f   Rusty Russell   cpumask: convert ...
182
  static const struct cpumask *cpu_singlethread_map __read_mostly;
14441960e   Oleg Nesterov   simplify cleanup_...
183
184
185
186
187
188
189
  /*
   * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
   * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
   * which comes in between can't use for_each_online_cpu(). We could
   * use cpu_possible_map, the cpumask below is more a documentation
   * than optimization.
   */
e7577c50f   Rusty Russell   cpumask: convert ...
190
  static cpumask_var_t cpu_populated_map __read_mostly;
f756d5e25   Nathan Lynch   [PATCH] fix workq...
191

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
192
  /* If it's single threaded, it isn't in the list of workqueues. */
6cc88bc45   David Howells   CRED: Rename is_s...
193
  static inline int is_wq_single_threaded(struct workqueue_struct *wq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
194
  {
cce1a1656   Oleg Nesterov   workqueue: introd...
195
  	return wq->singlethread;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
196
  }
e7577c50f   Rusty Russell   cpumask: convert ...
197
  static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq)
b1f4ec172   Oleg Nesterov   workqueue: introd...
198
  {
6cc88bc45   David Howells   CRED: Rename is_s...
199
  	return is_wq_single_threaded(wq)
e7577c50f   Rusty Russell   cpumask: convert ...
200
  		? cpu_singlethread_map : cpu_populated_map;
b1f4ec172   Oleg Nesterov   workqueue: introd...
201
  }
a848e3b67   Oleg Nesterov   workqueue: introd...
202
203
204
  static
  struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu)
  {
6cc88bc45   David Howells   CRED: Rename is_s...
205
  	if (unlikely(is_wq_single_threaded(wq)))
a848e3b67   Oleg Nesterov   workqueue: introd...
206
207
208
  		cpu = singlethread_cpu;
  	return per_cpu_ptr(wq->cpu_wq, cpu);
  }
4594bf159   David Howells   [PATCH] WorkStruc...
209
210
211
212
  /*
   * Set the workqueue on which a work item is to be run
   * - Must *only* be called if the pending flag is set
   */
ed7c0feed   Oleg Nesterov   make queue_delaye...
213
214
  static inline void set_wq_data(struct work_struct *work,
  				struct cpu_workqueue_struct *cwq)
365970a1e   David Howells   WorkStruct: Merge...
215
  {
4594bf159   David Howells   [PATCH] WorkStruc...
216
217
218
  	unsigned long new;
  
  	BUG_ON(!work_pending(work));
365970a1e   David Howells   WorkStruct: Merge...
219

ed7c0feed   Oleg Nesterov   make queue_delaye...
220
  	new = (unsigned long) cwq | (1UL << WORK_STRUCT_PENDING);
a08727bae   Linus Torvalds   Make workqueue bi...
221
222
  	new |= WORK_STRUCT_FLAG_MASK & *work_data_bits(work);
  	atomic_long_set(&work->data, new);
365970a1e   David Howells   WorkStruct: Merge...
223
  }
ed7c0feed   Oleg Nesterov   make queue_delaye...
224
225
  static inline
  struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
365970a1e   David Howells   WorkStruct: Merge...
226
  {
a08727bae   Linus Torvalds   Make workqueue bi...
227
  	return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
365970a1e   David Howells   WorkStruct: Merge...
228
  }
b89deed32   Oleg Nesterov   implement flush_w...
229
  static void insert_work(struct cpu_workqueue_struct *cwq,
1a4d9b0aa   Oleg Nesterov   workqueues: inser...
230
  			struct work_struct *work, struct list_head *head)
b89deed32   Oleg Nesterov   implement flush_w...
231
  {
e1d8aa9f1   Frederic Weisbecker   tracing: add a ne...
232
  	trace_workqueue_insertion(cwq->thread, work);
b89deed32   Oleg Nesterov   implement flush_w...
233
  	set_wq_data(work, cwq);
6e84d644b   Oleg Nesterov   make cancel_rearm...
234
235
236
237
238
  	/*
  	 * Ensure that we get the right work->data if we see the
  	 * result of list_add() below, see try_to_grab_pending().
  	 */
  	smp_wmb();
1a4d9b0aa   Oleg Nesterov   workqueues: inser...
239
  	list_add_tail(&work->entry, head);
b89deed32   Oleg Nesterov   implement flush_w...
240
241
  	wake_up(&cwq->more_work);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
242
243
244
245
  static void __queue_work(struct cpu_workqueue_struct *cwq,
  			 struct work_struct *work)
  {
  	unsigned long flags;
dc186ad74   Thomas Gleixner   workqueue: Add de...
246
  	debug_work_activate(work);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
247
  	spin_lock_irqsave(&cwq->lock, flags);
1a4d9b0aa   Oleg Nesterov   workqueues: inser...
248
  	insert_work(cwq, work, &cwq->worklist);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
249
250
  	spin_unlock_irqrestore(&cwq->lock, flags);
  }
0fcb78c22   Rolf Eike Beer   [PATCH] Add DocBo...
251
252
253
254
255
  /**
   * queue_work - queue work on a workqueue
   * @wq: workqueue to use
   * @work: work to queue
   *
057647fc4   Alan Stern   [PATCH] workqueue...
256
   * Returns 0 if @work was already on a queue, non-zero otherwise.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
257
   *
00dfcaf74   Oleg Nesterov   workqueues: shrin...
258
259
   * We queue the work to the CPU on which it was submitted, but if the CPU dies
   * it can be processed by another CPU.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
260
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
261
  int queue_work(struct workqueue_struct *wq, struct work_struct *work)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
262
  {
ef1ca236b   Oleg Nesterov   workqueues: queue...
263
264
265
266
  	int ret;
  
  	ret = queue_work_on(get_cpu(), wq, work);
  	put_cpu();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
267
268
  	return ret;
  }
ae90dd5db   Dave Jones   Move workqueue ex...
269
  EXPORT_SYMBOL_GPL(queue_work);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
270

c1a220e7a   Zhang Rui   pm: introduce new...
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
  /**
   * queue_work_on - queue work on specific cpu
   * @cpu: CPU number to execute work on
   * @wq: workqueue to use
   * @work: work to queue
   *
   * Returns 0 if @work was already on a queue, non-zero otherwise.
   *
   * We queue the work to a specific CPU, the caller must ensure it
   * can't go away.
   */
  int
  queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
  {
  	int ret = 0;
  
  	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
  		BUG_ON(!list_empty(&work->entry));
  		__queue_work(wq_per_cpu(wq, cpu), work);
  		ret = 1;
  	}
  	return ret;
  }
  EXPORT_SYMBOL_GPL(queue_work_on);
6d141c3ff   Li Zefan   workqueue: make d...
295
  static void delayed_work_timer_fn(unsigned long __data)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
296
  {
52bad64d9   David Howells   WorkStruct: Separ...
297
  	struct delayed_work *dwork = (struct delayed_work *)__data;
ed7c0feed   Oleg Nesterov   make queue_delaye...
298
299
  	struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
  	struct workqueue_struct *wq = cwq->wq;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
300

a848e3b67   Oleg Nesterov   workqueue: introd...
301
  	__queue_work(wq_per_cpu(wq, smp_processor_id()), &dwork->work);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
302
  }
0fcb78c22   Rolf Eike Beer   [PATCH] Add DocBo...
303
304
305
  /**
   * queue_delayed_work - queue work on a workqueue after delay
   * @wq: workqueue to use
af9997e42   Randy Dunlap   [PATCH] fix kerne...
306
   * @dwork: delayable work to queue
0fcb78c22   Rolf Eike Beer   [PATCH] Add DocBo...
307
308
   * @delay: number of jiffies to wait before queueing
   *
057647fc4   Alan Stern   [PATCH] workqueue...
309
   * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c22   Rolf Eike Beer   [PATCH] Add DocBo...
310
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
311
  int queue_delayed_work(struct workqueue_struct *wq,
52bad64d9   David Howells   WorkStruct: Separ...
312
  			struct delayed_work *dwork, unsigned long delay)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
313
  {
52bad64d9   David Howells   WorkStruct: Separ...
314
  	if (delay == 0)
63bc03625   Oleg Nesterov   unify queue_delay...
315
  		return queue_work(wq, &dwork->work);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
316

63bc03625   Oleg Nesterov   unify queue_delay...
317
  	return queue_delayed_work_on(-1, wq, dwork, delay);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
318
  }
ae90dd5db   Dave Jones   Move workqueue ex...
319
  EXPORT_SYMBOL_GPL(queue_delayed_work);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
320

0fcb78c22   Rolf Eike Beer   [PATCH] Add DocBo...
321
322
323
324
  /**
   * queue_delayed_work_on - queue work on specific CPU after delay
   * @cpu: CPU number to execute work on
   * @wq: workqueue to use
af9997e42   Randy Dunlap   [PATCH] fix kerne...
325
   * @dwork: work to queue
0fcb78c22   Rolf Eike Beer   [PATCH] Add DocBo...
326
327
   * @delay: number of jiffies to wait before queueing
   *
057647fc4   Alan Stern   [PATCH] workqueue...
328
   * Returns 0 if @work was already on a queue, non-zero otherwise.
0fcb78c22   Rolf Eike Beer   [PATCH] Add DocBo...
329
   */
7a6bc1cdd   Venkatesh Pallipadi   [CPUFREQ] Add que...
330
  int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
52bad64d9   David Howells   WorkStruct: Separ...
331
  			struct delayed_work *dwork, unsigned long delay)
7a6bc1cdd   Venkatesh Pallipadi   [CPUFREQ] Add que...
332
333
  {
  	int ret = 0;
52bad64d9   David Howells   WorkStruct: Separ...
334
335
  	struct timer_list *timer = &dwork->timer;
  	struct work_struct *work = &dwork->work;
7a6bc1cdd   Venkatesh Pallipadi   [CPUFREQ] Add que...
336

a08727bae   Linus Torvalds   Make workqueue bi...
337
  	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
7a6bc1cdd   Venkatesh Pallipadi   [CPUFREQ] Add que...
338
339
  		BUG_ON(timer_pending(timer));
  		BUG_ON(!list_empty(&work->entry));
8a3e77cc2   Andrew Liu   workqueue: remove...
340
  		timer_stats_timer_set_start_info(&dwork->timer);
ed7c0feed   Oleg Nesterov   make queue_delaye...
341
  		/* This stores cwq for the moment, for the timer_fn */
a848e3b67   Oleg Nesterov   workqueue: introd...
342
  		set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
7a6bc1cdd   Venkatesh Pallipadi   [CPUFREQ] Add que...
343
  		timer->expires = jiffies + delay;
52bad64d9   David Howells   WorkStruct: Separ...
344
  		timer->data = (unsigned long)dwork;
7a6bc1cdd   Venkatesh Pallipadi   [CPUFREQ] Add que...
345
  		timer->function = delayed_work_timer_fn;
63bc03625   Oleg Nesterov   unify queue_delay...
346
347
348
349
350
  
  		if (unlikely(cpu >= 0))
  			add_timer_on(timer, cpu);
  		else
  			add_timer(timer);
7a6bc1cdd   Venkatesh Pallipadi   [CPUFREQ] Add que...
351
352
353
354
  		ret = 1;
  	}
  	return ret;
  }
ae90dd5db   Dave Jones   Move workqueue ex...
355
  EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
356

858119e15   Arjan van de Ven   [PATCH] Unlinline...
357
  static void run_workqueue(struct cpu_workqueue_struct *cwq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
358
  {
f293ea920   Oleg Nesterov   workqueue: don't ...
359
  	spin_lock_irq(&cwq->lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
360
361
362
  	while (!list_empty(&cwq->worklist)) {
  		struct work_struct *work = list_entry(cwq->worklist.next,
  						struct work_struct, entry);
6bb49e596   David Howells   WorkStruct: Typed...
363
  		work_func_t f = work->func;
4e6045f13   Johannes Berg   workqueue: debug ...
364
365
366
367
368
369
370
371
372
373
374
  #ifdef CONFIG_LOCKDEP
  		/*
  		 * It is permissible to free the struct work_struct
  		 * from inside the function that is called from it,
  		 * this we need to take into account for lockdep too.
  		 * To avoid bogus "held lock freed" warnings as well
  		 * as problems when looking into work->lockdep_map,
  		 * make a copy and use that here.
  		 */
  		struct lockdep_map lockdep_map = work->lockdep_map;
  #endif
e1d8aa9f1   Frederic Weisbecker   tracing: add a ne...
375
  		trace_workqueue_execution(cwq->thread, work);
dc186ad74   Thomas Gleixner   workqueue: Add de...
376
  		debug_work_deactivate(work);
b89deed32   Oleg Nesterov   implement flush_w...
377
  		cwq->current_work = work;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
378
  		list_del_init(cwq->worklist.next);
f293ea920   Oleg Nesterov   workqueue: don't ...
379
  		spin_unlock_irq(&cwq->lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
380

365970a1e   David Howells   WorkStruct: Merge...
381
  		BUG_ON(get_wq_data(work) != cwq);
23b2e5991   Oleg Nesterov   workqueue: kill N...
382
  		work_clear_pending(work);
3295f0ef9   Ingo Molnar   lockdep: rename m...
383
384
  		lock_map_acquire(&cwq->wq->lockdep_map);
  		lock_map_acquire(&lockdep_map);
65f27f384   David Howells   WorkStruct: Pass ...
385
  		f(work);
3295f0ef9   Ingo Molnar   lockdep: rename m...
386
387
  		lock_map_release(&lockdep_map);
  		lock_map_release(&cwq->wq->lockdep_map);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
388

d5abe6691   Peter Zijlstra   [PATCH] debug: wo...
389
390
391
392
393
  		if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
  			printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
  					"%s/0x%08x/%d
  ",
  					current->comm, preempt_count(),
ba25f9dcc   Pavel Emelyanov   Use helpers to ob...
394
  				       	task_pid_nr(current));
d5abe6691   Peter Zijlstra   [PATCH] debug: wo...
395
396
397
398
399
400
  			printk(KERN_ERR "    last function: ");
  			print_symbol("%s
  ", (unsigned long)f);
  			debug_show_held_locks(current);
  			dump_stack();
  		}
f293ea920   Oleg Nesterov   workqueue: don't ...
401
  		spin_lock_irq(&cwq->lock);
b89deed32   Oleg Nesterov   implement flush_w...
402
  		cwq->current_work = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
403
  	}
f293ea920   Oleg Nesterov   workqueue: don't ...
404
  	spin_unlock_irq(&cwq->lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
405
406
407
408
409
  }
  
  static int worker_thread(void *__cwq)
  {
  	struct cpu_workqueue_struct *cwq = __cwq;
3af24433e   Oleg Nesterov   workqueue: don't ...
410
  	DEFINE_WAIT(wait);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
411

831441862   Rafael J. Wysocki   Freezer: make ker...
412
413
  	if (cwq->wq->freezeable)
  		set_freezable();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
414

3af24433e   Oleg Nesterov   workqueue: don't ...
415
  	for (;;) {
3af24433e   Oleg Nesterov   workqueue: don't ...
416
  		prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
14441960e   Oleg Nesterov   simplify cleanup_...
417
418
419
  		if (!freezing(current) &&
  		    !kthread_should_stop() &&
  		    list_empty(&cwq->worklist))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
420
  			schedule();
3af24433e   Oleg Nesterov   workqueue: don't ...
421
  		finish_wait(&cwq->more_work, &wait);
85f4186af   Oleg Nesterov   worker_thread: fi...
422
  		try_to_freeze();
14441960e   Oleg Nesterov   simplify cleanup_...
423
  		if (kthread_should_stop())
3af24433e   Oleg Nesterov   workqueue: don't ...
424
  			break;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
425

3af24433e   Oleg Nesterov   workqueue: don't ...
426
  		run_workqueue(cwq);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
427
  	}
3af24433e   Oleg Nesterov   workqueue: don't ...
428

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
429
430
  	return 0;
  }
fc2e4d704   Oleg Nesterov   reimplement flush...
431
432
433
434
435
436
437
438
439
440
  struct wq_barrier {
  	struct work_struct	work;
  	struct completion	done;
  };
  
  static void wq_barrier_func(struct work_struct *work)
  {
  	struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
  	complete(&barr->done);
  }
83c22520c   Oleg Nesterov   flush_cpu_workque...
441
  static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
1a4d9b0aa   Oleg Nesterov   workqueues: inser...
442
  			struct wq_barrier *barr, struct list_head *head)
fc2e4d704   Oleg Nesterov   reimplement flush...
443
  {
dc186ad74   Thomas Gleixner   workqueue: Add de...
444
445
446
447
448
449
450
  	/*
  	 * debugobject calls are safe here even with cwq->lock locked
  	 * as we know for sure that this will not trigger any of the
  	 * checks and call back into the fixup functions where we
  	 * might deadlock.
  	 */
  	INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
fc2e4d704   Oleg Nesterov   reimplement flush...
451
452
453
  	__set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));
  
  	init_completion(&barr->done);
83c22520c   Oleg Nesterov   flush_cpu_workque...
454

dc186ad74   Thomas Gleixner   workqueue: Add de...
455
  	debug_work_activate(&barr->work);
1a4d9b0aa   Oleg Nesterov   workqueues: inser...
456
  	insert_work(cwq, &barr->work, head);
fc2e4d704   Oleg Nesterov   reimplement flush...
457
  }
14441960e   Oleg Nesterov   simplify cleanup_...
458
  static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
459
  {
2355b70fd   Lai Jiangshan   workqueue: avoid ...
460
461
  	int active = 0;
  	struct wq_barrier barr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
462

2355b70fd   Lai Jiangshan   workqueue: avoid ...
463
  	WARN_ON(cwq->thread == current);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
464

2355b70fd   Lai Jiangshan   workqueue: avoid ...
465
466
467
468
  	spin_lock_irq(&cwq->lock);
  	if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
  		insert_wq_barrier(cwq, &barr, &cwq->worklist);
  		active = 1;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
469
  	}
2355b70fd   Lai Jiangshan   workqueue: avoid ...
470
  	spin_unlock_irq(&cwq->lock);
dc186ad74   Thomas Gleixner   workqueue: Add de...
471
  	if (active) {
2355b70fd   Lai Jiangshan   workqueue: avoid ...
472
  		wait_for_completion(&barr.done);
dc186ad74   Thomas Gleixner   workqueue: Add de...
473
474
  		destroy_work_on_stack(&barr.work);
  	}
14441960e   Oleg Nesterov   simplify cleanup_...
475
476
  
  	return active;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
477
  }
0fcb78c22   Rolf Eike Beer   [PATCH] Add DocBo...
478
  /**
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
479
   * flush_workqueue - ensure that any scheduled work has run to completion.
0fcb78c22   Rolf Eike Beer   [PATCH] Add DocBo...
480
   * @wq: workqueue to flush
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
481
482
483
484
   *
   * Forces execution of the workqueue and blocks until its completion.
   * This is typically used in driver shutdown handlers.
   *
fc2e4d704   Oleg Nesterov   reimplement flush...
485
486
   * We sleep until all works which were queued on entry have been handled,
   * but we are not livelocked by new incoming ones.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
487
488
489
490
   *
   * This function used to run the workqueues itself.  Now we just wait for the
   * helper threads to do it.
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
491
  void flush_workqueue(struct workqueue_struct *wq)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
492
  {
e7577c50f   Rusty Russell   cpumask: convert ...
493
  	const struct cpumask *cpu_map = wq_cpu_map(wq);
cce1a1656   Oleg Nesterov   workqueue: introd...
494
  	int cpu;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
495

b1f4ec172   Oleg Nesterov   workqueue: introd...
496
  	might_sleep();
3295f0ef9   Ingo Molnar   lockdep: rename m...
497
498
  	lock_map_acquire(&wq->lockdep_map);
  	lock_map_release(&wq->lockdep_map);
aa85ea5b8   Rusty Russell   cpumask: use new ...
499
  	for_each_cpu(cpu, cpu_map)
b1f4ec172   Oleg Nesterov   workqueue: introd...
500
  		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
501
  }
ae90dd5db   Dave Jones   Move workqueue ex...
502
  EXPORT_SYMBOL_GPL(flush_workqueue);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
503

db7008972   Oleg Nesterov   workqueues: imple...
504
505
506
507
  /**
   * flush_work - block until a work_struct's callback has terminated
   * @work: the work which is to be flushed
   *
a67da70dc   Oleg Nesterov   workqueues: lockd...
508
509
   * Returns false if @work has already terminated.
   *
db7008972   Oleg Nesterov   workqueues: imple...
510
511
512
513
514
515
516
517
518
519
520
521
522
523
   * It is expected that, prior to calling flush_work(), the caller has
   * arranged for the work to not be requeued, otherwise it doesn't make
   * sense to use this function.
   */
  int flush_work(struct work_struct *work)
  {
  	struct cpu_workqueue_struct *cwq;
  	struct list_head *prev;
  	struct wq_barrier barr;
  
  	might_sleep();
  	cwq = get_wq_data(work);
  	if (!cwq)
  		return 0;
3295f0ef9   Ingo Molnar   lockdep: rename m...
524
525
  	lock_map_acquire(&cwq->wq->lockdep_map);
  	lock_map_release(&cwq->wq->lockdep_map);
a67da70dc   Oleg Nesterov   workqueues: lockd...
526

db7008972   Oleg Nesterov   workqueues: imple...
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
  	prev = NULL;
  	spin_lock_irq(&cwq->lock);
  	if (!list_empty(&work->entry)) {
  		/*
  		 * See the comment near try_to_grab_pending()->smp_rmb().
  		 * If it was re-queued under us we are not going to wait.
  		 */
  		smp_rmb();
  		if (unlikely(cwq != get_wq_data(work)))
  			goto out;
  		prev = &work->entry;
  	} else {
  		if (cwq->current_work != work)
  			goto out;
  		prev = &cwq->worklist;
  	}
  	insert_wq_barrier(cwq, &barr, prev->next);
  out:
  	spin_unlock_irq(&cwq->lock);
  	if (!prev)
  		return 0;
  
  	wait_for_completion(&barr.done);
dc186ad74   Thomas Gleixner   workqueue: Add de...
550
  	destroy_work_on_stack(&barr.work);
db7008972   Oleg Nesterov   workqueues: imple...
551
552
553
  	return 1;
  }
  EXPORT_SYMBOL_GPL(flush_work);
6e84d644b   Oleg Nesterov   make cancel_rearm...
554
  /*
1f1f642e2   Oleg Nesterov   make cancel_xxx_w...
555
   * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
6e84d644b   Oleg Nesterov   make cancel_rearm...
556
557
558
559
560
   * so this work can't be re-armed in any way.
   */
  static int try_to_grab_pending(struct work_struct *work)
  {
  	struct cpu_workqueue_struct *cwq;
1f1f642e2   Oleg Nesterov   make cancel_xxx_w...
561
  	int ret = -1;
6e84d644b   Oleg Nesterov   make cancel_rearm...
562
563
  
  	if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work)))
1f1f642e2   Oleg Nesterov   make cancel_xxx_w...
564
  		return 0;
6e84d644b   Oleg Nesterov   make cancel_rearm...
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
  
  	/*
  	 * The queueing is in progress, or it is already queued. Try to
  	 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
  	 */
  
  	cwq = get_wq_data(work);
  	if (!cwq)
  		return ret;
  
  	spin_lock_irq(&cwq->lock);
  	if (!list_empty(&work->entry)) {
  		/*
  		 * This work is queued, but perhaps we locked the wrong cwq.
  		 * In that case we must see the new value after rmb(), see
  		 * insert_work()->wmb().
  		 */
  		smp_rmb();
  		if (cwq == get_wq_data(work)) {
dc186ad74   Thomas Gleixner   workqueue: Add de...
584
  			debug_work_deactivate(work);
6e84d644b   Oleg Nesterov   make cancel_rearm...
585
586
587
588
589
590
591
592
593
594
  			list_del_init(&work->entry);
  			ret = 1;
  		}
  	}
  	spin_unlock_irq(&cwq->lock);
  
  	return ret;
  }
  
  static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
b89deed32   Oleg Nesterov   implement flush_w...
595
596
597
598
599
600
601
  				struct work_struct *work)
  {
  	struct wq_barrier barr;
  	int running = 0;
  
  	spin_lock_irq(&cwq->lock);
  	if (unlikely(cwq->current_work == work)) {
1a4d9b0aa   Oleg Nesterov   workqueues: inser...
602
  		insert_wq_barrier(cwq, &barr, cwq->worklist.next);
b89deed32   Oleg Nesterov   implement flush_w...
603
604
605
  		running = 1;
  	}
  	spin_unlock_irq(&cwq->lock);
dc186ad74   Thomas Gleixner   workqueue: Add de...
606
  	if (unlikely(running)) {
b89deed32   Oleg Nesterov   implement flush_w...
607
  		wait_for_completion(&barr.done);
dc186ad74   Thomas Gleixner   workqueue: Add de...
608
609
  		destroy_work_on_stack(&barr.work);
  	}
b89deed32   Oleg Nesterov   implement flush_w...
610
  }
6e84d644b   Oleg Nesterov   make cancel_rearm...
611
  static void wait_on_work(struct work_struct *work)
b89deed32   Oleg Nesterov   implement flush_w...
612
613
  {
  	struct cpu_workqueue_struct *cwq;
28e53bddf   Oleg Nesterov   unify flush_work/...
614
  	struct workqueue_struct *wq;
e7577c50f   Rusty Russell   cpumask: convert ...
615
  	const struct cpumask *cpu_map;
b1f4ec172   Oleg Nesterov   workqueue: introd...
616
  	int cpu;
b89deed32   Oleg Nesterov   implement flush_w...
617

f293ea920   Oleg Nesterov   workqueue: don't ...
618
  	might_sleep();
3295f0ef9   Ingo Molnar   lockdep: rename m...
619
620
  	lock_map_acquire(&work->lockdep_map);
  	lock_map_release(&work->lockdep_map);
4e6045f13   Johannes Berg   workqueue: debug ...
621

b89deed32   Oleg Nesterov   implement flush_w...
622
  	cwq = get_wq_data(work);
b89deed32   Oleg Nesterov   implement flush_w...
623
  	if (!cwq)
3af24433e   Oleg Nesterov   workqueue: don't ...
624
  		return;
b89deed32   Oleg Nesterov   implement flush_w...
625

28e53bddf   Oleg Nesterov   unify flush_work/...
626
627
  	wq = cwq->wq;
  	cpu_map = wq_cpu_map(wq);
aa85ea5b8   Rusty Russell   cpumask: use new ...
628
  	for_each_cpu(cpu, cpu_map)
6e84d644b   Oleg Nesterov   make cancel_rearm...
629
630
  		wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
  }
1f1f642e2   Oleg Nesterov   make cancel_xxx_w...
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
  static int __cancel_work_timer(struct work_struct *work,
  				struct timer_list* timer)
  {
  	int ret;
  
  	do {
  		ret = (timer && likely(del_timer(timer)));
  		if (!ret)
  			ret = try_to_grab_pending(work);
  		wait_on_work(work);
  	} while (unlikely(ret < 0));
  
  	work_clear_pending(work);
  	return ret;
  }
6e84d644b   Oleg Nesterov   make cancel_rearm...
646
647
648
649
  /**
   * cancel_work_sync - block until a work_struct's callback has terminated
   * @work: the work which is to be flushed
   *
1f1f642e2   Oleg Nesterov   make cancel_xxx_w...
650
651
   * Returns true if @work was pending.
   *
6e84d644b   Oleg Nesterov   make cancel_rearm...
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
   * cancel_work_sync() will cancel the work if it is queued. If the work's
   * callback appears to be running, cancel_work_sync() will block until it
   * has completed.
   *
   * It is possible to use this function if the work re-queues itself. It can
   * cancel the work even if it migrates to another workqueue, however in that
   * case it only guarantees that work->func() has completed on the last queued
   * workqueue.
   *
   * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
   * pending, otherwise it goes into a busy-wait loop until the timer expires.
   *
   * The caller must ensure that workqueue_struct on which this work was last
   * queued can't be destroyed before this function returns.
   */
1f1f642e2   Oleg Nesterov   make cancel_xxx_w...
667
  int cancel_work_sync(struct work_struct *work)
6e84d644b   Oleg Nesterov   make cancel_rearm...
668
  {
1f1f642e2   Oleg Nesterov   make cancel_xxx_w...
669
  	return __cancel_work_timer(work, NULL);
b89deed32   Oleg Nesterov   implement flush_w...
670
  }
28e53bddf   Oleg Nesterov   unify flush_work/...
671
  EXPORT_SYMBOL_GPL(cancel_work_sync);
b89deed32   Oleg Nesterov   implement flush_w...
672

6e84d644b   Oleg Nesterov   make cancel_rearm...
673
  /**
f5a421a45   Oleg Nesterov   rename cancel_rea...
674
   * cancel_delayed_work_sync - reliably kill off a delayed work.
6e84d644b   Oleg Nesterov   make cancel_rearm...
675
676
   * @dwork: the delayed work struct
   *
1f1f642e2   Oleg Nesterov   make cancel_xxx_w...
677
678
   * Returns true if @dwork was pending.
   *
6e84d644b   Oleg Nesterov   make cancel_rearm...
679
680
681
   * It is possible to use this function if @dwork rearms itself via queue_work()
   * or queue_delayed_work(). See also the comment for cancel_work_sync().
   */
1f1f642e2   Oleg Nesterov   make cancel_xxx_w...
682
  int cancel_delayed_work_sync(struct delayed_work *dwork)
6e84d644b   Oleg Nesterov   make cancel_rearm...
683
  {
1f1f642e2   Oleg Nesterov   make cancel_xxx_w...
684
  	return __cancel_work_timer(&dwork->work, &dwork->timer);
6e84d644b   Oleg Nesterov   make cancel_rearm...
685
  }
f5a421a45   Oleg Nesterov   rename cancel_rea...
686
  EXPORT_SYMBOL(cancel_delayed_work_sync);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
687

6e84d644b   Oleg Nesterov   make cancel_rearm...
688
  static struct workqueue_struct *keventd_wq __read_mostly;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
689

0fcb78c22   Rolf Eike Beer   [PATCH] Add DocBo...
690
691
692
693
  /**
   * schedule_work - put work task in global workqueue
   * @work: job to be done
   *
5b0f437df   Bart Van Assche   workqueues: Impro...
694
695
696
697
698
699
   * Returns zero if @work was already on the kernel-global workqueue and
   * non-zero otherwise.
   *
   * This puts a job in the kernel-global workqueue if it was not already
   * queued and leaves it in the same position on the kernel-global
   * workqueue otherwise.
0fcb78c22   Rolf Eike Beer   [PATCH] Add DocBo...
700
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
701
  int schedule_work(struct work_struct *work)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
702
703
704
  {
  	return queue_work(keventd_wq, work);
  }
ae90dd5db   Dave Jones   Move workqueue ex...
705
  EXPORT_SYMBOL(schedule_work);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
706

c1a220e7a   Zhang Rui   pm: introduce new...
707
708
709
710
711
712
713
714
715
716
717
718
  /*
   * schedule_work_on - put work task on a specific cpu
   * @cpu: cpu to put the work task on
   * @work: job to be done
   *
   * This puts a job on a specific cpu
   */
  int schedule_work_on(int cpu, struct work_struct *work)
  {
  	return queue_work_on(cpu, keventd_wq, work);
  }
  EXPORT_SYMBOL(schedule_work_on);
0fcb78c22   Rolf Eike Beer   [PATCH] Add DocBo...
719
720
  /**
   * schedule_delayed_work - put work task in global workqueue after delay
52bad64d9   David Howells   WorkStruct: Separ...
721
722
   * @dwork: job to be done
   * @delay: number of jiffies to wait or 0 for immediate execution
0fcb78c22   Rolf Eike Beer   [PATCH] Add DocBo...
723
724
725
726
   *
   * After waiting for a given time this puts a job in the kernel-global
   * workqueue.
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
727
  int schedule_delayed_work(struct delayed_work *dwork,
82f67cd9f   Ingo Molnar   [PATCH] Add debug...
728
  					unsigned long delay)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
729
  {
52bad64d9   David Howells   WorkStruct: Separ...
730
  	return queue_delayed_work(keventd_wq, dwork, delay);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
731
  }
ae90dd5db   Dave Jones   Move workqueue ex...
732
  EXPORT_SYMBOL(schedule_delayed_work);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
733

0fcb78c22   Rolf Eike Beer   [PATCH] Add DocBo...
734
  /**
8c53e4631   Linus Torvalds   workqueue: add 'f...
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
   * flush_delayed_work - block until a dwork_struct's callback has terminated
   * @dwork: the delayed work which is to be flushed
   *
   * Any timeout is cancelled, and any pending work is run immediately.
   */
  void flush_delayed_work(struct delayed_work *dwork)
  {
  	if (del_timer_sync(&dwork->timer)) {
  		struct cpu_workqueue_struct *cwq;
  		cwq = wq_per_cpu(keventd_wq, get_cpu());
  		__queue_work(cwq, &dwork->work);
  		put_cpu();
  	}
  	flush_work(&dwork->work);
  }
  EXPORT_SYMBOL(flush_delayed_work);
  
  /**
0fcb78c22   Rolf Eike Beer   [PATCH] Add DocBo...
753
754
   * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
   * @cpu: cpu to use
52bad64d9   David Howells   WorkStruct: Separ...
755
   * @dwork: job to be done
0fcb78c22   Rolf Eike Beer   [PATCH] Add DocBo...
756
757
758
759
760
   * @delay: number of jiffies to wait
   *
   * After waiting for a given time this puts a job in the kernel-global
   * workqueue on the specified CPU.
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
761
  int schedule_delayed_work_on(int cpu,
52bad64d9   David Howells   WorkStruct: Separ...
762
  			struct delayed_work *dwork, unsigned long delay)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
763
  {
52bad64d9   David Howells   WorkStruct: Separ...
764
  	return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
765
  }
ae90dd5db   Dave Jones   Move workqueue ex...
766
  EXPORT_SYMBOL(schedule_delayed_work_on);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
767

b61367732   Andrew Morton   [PATCH] schedule_...
768
769
770
  /**
   * schedule_on_each_cpu - call a function on each online CPU from keventd
   * @func: the function to call
b61367732   Andrew Morton   [PATCH] schedule_...
771
772
773
774
   *
   * Returns zero on success.
   * Returns -ve errno on failure.
   *
b61367732   Andrew Morton   [PATCH] schedule_...
775
776
   * schedule_on_each_cpu() is very slow.
   */
65f27f384   David Howells   WorkStruct: Pass ...
777
  int schedule_on_each_cpu(work_func_t func)
15316ba81   Christoph Lameter   [PATCH] add sched...
778
779
  {
  	int cpu;
65a644643   Andi Kleen   HWPOISON: Allow s...
780
  	int orig = -1;
b61367732   Andrew Morton   [PATCH] schedule_...
781
  	struct work_struct *works;
15316ba81   Christoph Lameter   [PATCH] add sched...
782

b61367732   Andrew Morton   [PATCH] schedule_...
783
784
  	works = alloc_percpu(struct work_struct);
  	if (!works)
15316ba81   Christoph Lameter   [PATCH] add sched...
785
  		return -ENOMEM;
b61367732   Andrew Morton   [PATCH] schedule_...
786

939818009   Tejun Heo   workqueue: fix ra...
787
  	get_online_cpus();
65a644643   Andi Kleen   HWPOISON: Allow s...
788
  	/*
939818009   Tejun Heo   workqueue: fix ra...
789
790
791
  	 * When running in keventd don't schedule a work item on
  	 * itself.  Can just call directly because the work queue is
  	 * already bound.  This also is faster.
65a644643   Andi Kleen   HWPOISON: Allow s...
792
  	 */
939818009   Tejun Heo   workqueue: fix ra...
793
  	if (current_is_keventd())
65a644643   Andi Kleen   HWPOISON: Allow s...
794
  		orig = raw_smp_processor_id();
65a644643   Andi Kleen   HWPOISON: Allow s...
795

15316ba81   Christoph Lameter   [PATCH] add sched...
796
  	for_each_online_cpu(cpu) {
9bfb18392   Ingo Molnar   [PATCH] workqueue...
797
798
799
  		struct work_struct *work = per_cpu_ptr(works, cpu);
  
  		INIT_WORK(work, func);
65a644643   Andi Kleen   HWPOISON: Allow s...
800
  		if (cpu != orig)
939818009   Tejun Heo   workqueue: fix ra...
801
  			schedule_work_on(cpu, work);
65a644643   Andi Kleen   HWPOISON: Allow s...
802
  	}
939818009   Tejun Heo   workqueue: fix ra...
803
804
805
806
807
  	if (orig >= 0)
  		func(per_cpu_ptr(works, orig));
  
  	for_each_online_cpu(cpu)
  		flush_work(per_cpu_ptr(works, cpu));
95402b382   Gautham R Shenoy   cpu-hotplug: repl...
808
  	put_online_cpus();
b61367732   Andrew Morton   [PATCH] schedule_...
809
  	free_percpu(works);
15316ba81   Christoph Lameter   [PATCH] add sched...
810
811
  	return 0;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
812
813
814
815
  void flush_scheduled_work(void)
  {
  	flush_workqueue(keventd_wq);
  }
ae90dd5db   Dave Jones   Move workqueue ex...
816
  EXPORT_SYMBOL(flush_scheduled_work);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
817
818
  
  /**
1fa44ecad   James Bottomley   [SCSI] add execut...
819
820
   * execute_in_process_context - reliably execute the routine with user context
   * @fn:		the function to execute
1fa44ecad   James Bottomley   [SCSI] add execut...
821
822
823
824
825
826
827
828
829
   * @ew:		guaranteed storage for the execute work structure (must
   *		be available when the work executes)
   *
   * Executes the function immediately if process context is available,
   * otherwise schedules the function for delayed execution.
   *
   * Returns:	0 - function was executed
   *		1 - function was scheduled for execution
   */
65f27f384   David Howells   WorkStruct: Pass ...
830
  int execute_in_process_context(work_func_t fn, struct execute_work *ew)
1fa44ecad   James Bottomley   [SCSI] add execut...
831
832
  {
  	if (!in_interrupt()) {
65f27f384   David Howells   WorkStruct: Pass ...
833
  		fn(&ew->work);
1fa44ecad   James Bottomley   [SCSI] add execut...
834
835
  		return 0;
  	}
65f27f384   David Howells   WorkStruct: Pass ...
836
  	INIT_WORK(&ew->work, fn);
1fa44ecad   James Bottomley   [SCSI] add execut...
837
838
839
840
841
  	schedule_work(&ew->work);
  
  	return 1;
  }
  EXPORT_SYMBOL_GPL(execute_in_process_context);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
842
843
844
845
846
847
848
849
  int keventd_up(void)
  {
  	return keventd_wq != NULL;
  }
  
  int current_is_keventd(void)
  {
  	struct cpu_workqueue_struct *cwq;
d243769d3   Hugh Dickins   fix bogus hotplug...
850
  	int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
851
852
853
  	int ret = 0;
  
  	BUG_ON(!keventd_wq);
89ada6791   Christoph Lameter   [PATCH] Use alloc...
854
  	cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
855
856
857
858
859
860
  	if (current == cwq->thread)
  		ret = 1;
  
  	return ret;
  
  }
3af24433e   Oleg Nesterov   workqueue: don't ...
861
862
  static struct cpu_workqueue_struct *
  init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
863
  {
89ada6791   Christoph Lameter   [PATCH] Use alloc...
864
  	struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
865

3af24433e   Oleg Nesterov   workqueue: don't ...
866
867
868
869
870
871
  	cwq->wq = wq;
  	spin_lock_init(&cwq->lock);
  	INIT_LIST_HEAD(&cwq->worklist);
  	init_waitqueue_head(&cwq->more_work);
  
  	return cwq;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
872
  }
3af24433e   Oleg Nesterov   workqueue: don't ...
873
874
  static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
  {
0d557dc97   Heiko Carstens   workqueue: introd...
875
  	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
3af24433e   Oleg Nesterov   workqueue: don't ...
876
  	struct workqueue_struct *wq = cwq->wq;
6cc88bc45   David Howells   CRED: Rename is_s...
877
  	const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d";
3af24433e   Oleg Nesterov   workqueue: don't ...
878
879
880
881
882
883
884
885
886
887
888
889
890
  	struct task_struct *p;
  
  	p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
  	/*
  	 * Nobody can add the work_struct to this cwq,
  	 *	if (caller is __create_workqueue)
  	 *		nobody should see this wq
  	 *	else // caller is CPU_UP_PREPARE
  	 *		cpu is not on cpu_online_map
  	 * so we can abort safely.
  	 */
  	if (IS_ERR(p))
  		return PTR_ERR(p);
0d557dc97   Heiko Carstens   workqueue: introd...
891
892
  	if (cwq->wq->rt)
  		sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
3af24433e   Oleg Nesterov   workqueue: don't ...
893
  	cwq->thread = p;
3af24433e   Oleg Nesterov   workqueue: don't ...
894

e1d8aa9f1   Frederic Weisbecker   tracing: add a ne...
895
  	trace_workqueue_creation(cwq->thread, cpu);
3af24433e   Oleg Nesterov   workqueue: don't ...
896
897
  	return 0;
  }
06ba38a9a   Oleg Nesterov   workqueues: shift...
898
899
900
901
902
903
904
905
906
907
  static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
  {
  	struct task_struct *p = cwq->thread;
  
  	if (p != NULL) {
  		if (cpu >= 0)
  			kthread_bind(p, cpu);
  		wake_up_process(p);
  	}
  }
4e6045f13   Johannes Berg   workqueue: debug ...
908
909
910
  struct workqueue_struct *__create_workqueue_key(const char *name,
  						int singlethread,
  						int freezeable,
0d557dc97   Heiko Carstens   workqueue: introd...
911
  						int rt,
eb13ba873   Johannes Berg   lockdep: fix work...
912
913
  						struct lock_class_key *key,
  						const char *lock_name)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
914
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
915
  	struct workqueue_struct *wq;
3af24433e   Oleg Nesterov   workqueue: don't ...
916
917
  	struct cpu_workqueue_struct *cwq;
  	int err = 0, cpu;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
918

3af24433e   Oleg Nesterov   workqueue: don't ...
919
920
921
922
923
924
925
926
927
928
929
  	wq = kzalloc(sizeof(*wq), GFP_KERNEL);
  	if (!wq)
  		return NULL;
  
  	wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
  	if (!wq->cpu_wq) {
  		kfree(wq);
  		return NULL;
  	}
  
  	wq->name = name;
eb13ba873   Johannes Berg   lockdep: fix work...
930
  	lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
cce1a1656   Oleg Nesterov   workqueue: introd...
931
  	wq->singlethread = singlethread;
3af24433e   Oleg Nesterov   workqueue: don't ...
932
  	wq->freezeable = freezeable;
0d557dc97   Heiko Carstens   workqueue: introd...
933
  	wq->rt = rt;
cce1a1656   Oleg Nesterov   workqueue: introd...
934
  	INIT_LIST_HEAD(&wq->list);
3af24433e   Oleg Nesterov   workqueue: don't ...
935
936
  
  	if (singlethread) {
3af24433e   Oleg Nesterov   workqueue: don't ...
937
938
  		cwq = init_cpu_workqueue(wq, singlethread_cpu);
  		err = create_workqueue_thread(cwq, singlethread_cpu);
06ba38a9a   Oleg Nesterov   workqueues: shift...
939
  		start_workqueue_thread(cwq, -1);
3af24433e   Oleg Nesterov   workqueue: don't ...
940
  	} else {
3da1c84c0   Oleg Nesterov   workqueues: make ...
941
  		cpu_maps_update_begin();
6af8bf3d8   Oleg Nesterov   workqueues: add c...
942
943
944
945
946
947
  		/*
  		 * We must place this wq on list even if the code below fails.
  		 * cpu_down(cpu) can remove cpu from cpu_populated_map before
  		 * destroy_workqueue() takes the lock, in that case we leak
  		 * cwq[cpu]->thread.
  		 */
95402b382   Gautham R Shenoy   cpu-hotplug: repl...
948
  		spin_lock(&workqueue_lock);
3af24433e   Oleg Nesterov   workqueue: don't ...
949
  		list_add(&wq->list, &workqueues);
95402b382   Gautham R Shenoy   cpu-hotplug: repl...
950
  		spin_unlock(&workqueue_lock);
6af8bf3d8   Oleg Nesterov   workqueues: add c...
951
952
953
954
955
956
  		/*
  		 * We must initialize cwqs for each possible cpu even if we
  		 * are going to call destroy_workqueue() finally. Otherwise
  		 * cpu_up() can hit the uninitialized cwq once we drop the
  		 * lock.
  		 */
3af24433e   Oleg Nesterov   workqueue: don't ...
957
958
959
960
961
  		for_each_possible_cpu(cpu) {
  			cwq = init_cpu_workqueue(wq, cpu);
  			if (err || !cpu_online(cpu))
  				continue;
  			err = create_workqueue_thread(cwq, cpu);
06ba38a9a   Oleg Nesterov   workqueues: shift...
962
  			start_workqueue_thread(cwq, cpu);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
963
  		}
3da1c84c0   Oleg Nesterov   workqueues: make ...
964
  		cpu_maps_update_done();
3af24433e   Oleg Nesterov   workqueue: don't ...
965
966
967
968
969
970
971
972
  	}
  
  	if (err) {
  		destroy_workqueue(wq);
  		wq = NULL;
  	}
  	return wq;
  }
4e6045f13   Johannes Berg   workqueue: debug ...
973
  EXPORT_SYMBOL_GPL(__create_workqueue_key);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
974

1e35eaa2d   Oleg Nesterov   cleanup_workqueue...
975
  static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
3af24433e   Oleg Nesterov   workqueue: don't ...
976
  {
14441960e   Oleg Nesterov   simplify cleanup_...
977
  	/*
3da1c84c0   Oleg Nesterov   workqueues: make ...
978
979
  	 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
  	 * cpu_add_remove_lock protects cwq->thread.
14441960e   Oleg Nesterov   simplify cleanup_...
980
981
982
  	 */
  	if (cwq->thread == NULL)
  		return;
3af24433e   Oleg Nesterov   workqueue: don't ...
983

3295f0ef9   Ingo Molnar   lockdep: rename m...
984
985
  	lock_map_acquire(&cwq->wq->lockdep_map);
  	lock_map_release(&cwq->wq->lockdep_map);
4e6045f13   Johannes Berg   workqueue: debug ...
986

13c22168b   Oleg Nesterov   destroy_workqueue...
987
  	flush_cpu_workqueue(cwq);
14441960e   Oleg Nesterov   simplify cleanup_...
988
  	/*
3da1c84c0   Oleg Nesterov   workqueues: make ...
989
  	 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
13c22168b   Oleg Nesterov   destroy_workqueue...
990
991
992
  	 * a concurrent flush_workqueue() can insert a barrier after us.
  	 * However, in that case run_workqueue() won't return and check
  	 * kthread_should_stop() until it flushes all work_struct's.
14441960e   Oleg Nesterov   simplify cleanup_...
993
994
995
996
997
  	 * When ->worklist becomes empty it is safe to exit because no
  	 * more work_structs can be queued on this cwq: flush_workqueue
  	 * checks list_empty(), and a "normal" queue_work() can't use
  	 * a dead CPU.
  	 */
e1d8aa9f1   Frederic Weisbecker   tracing: add a ne...
998
  	trace_workqueue_destruction(cwq->thread);
14441960e   Oleg Nesterov   simplify cleanup_...
999
1000
  	kthread_stop(cwq->thread);
  	cwq->thread = NULL;
3af24433e   Oleg Nesterov   workqueue: don't ...
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
  }
  
  /**
   * destroy_workqueue - safely terminate a workqueue
   * @wq: target workqueue
   *
   * Safely destroy a workqueue. All work currently pending will be done first.
   */
  void destroy_workqueue(struct workqueue_struct *wq)
  {
e7577c50f   Rusty Russell   cpumask: convert ...
1011
  	const struct cpumask *cpu_map = wq_cpu_map(wq);
b1f4ec172   Oleg Nesterov   workqueue: introd...
1012
  	int cpu;
3af24433e   Oleg Nesterov   workqueue: don't ...
1013

3da1c84c0   Oleg Nesterov   workqueues: make ...
1014
  	cpu_maps_update_begin();
95402b382   Gautham R Shenoy   cpu-hotplug: repl...
1015
  	spin_lock(&workqueue_lock);
b1f4ec172   Oleg Nesterov   workqueue: introd...
1016
  	list_del(&wq->list);
95402b382   Gautham R Shenoy   cpu-hotplug: repl...
1017
  	spin_unlock(&workqueue_lock);
3af24433e   Oleg Nesterov   workqueue: don't ...
1018

aa85ea5b8   Rusty Russell   cpumask: use new ...
1019
  	for_each_cpu(cpu, cpu_map)
1e35eaa2d   Oleg Nesterov   cleanup_workqueue...
1020
  		cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
3da1c84c0   Oleg Nesterov   workqueues: make ...
1021
   	cpu_maps_update_done();
9b41ea728   Andrew Morton   [PATCH] workqueue...
1022

3af24433e   Oleg Nesterov   workqueue: don't ...
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
  	free_percpu(wq->cpu_wq);
  	kfree(wq);
  }
  EXPORT_SYMBOL_GPL(destroy_workqueue);
  
  static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
  						unsigned long action,
  						void *hcpu)
  {
  	unsigned int cpu = (unsigned long)hcpu;
  	struct cpu_workqueue_struct *cwq;
  	struct workqueue_struct *wq;
8448502cf   Oleg Nesterov   workqueues: do CP...
1035
  	int ret = NOTIFY_OK;
3af24433e   Oleg Nesterov   workqueue: don't ...
1036

8bb784428   Rafael J. Wysocki   Add suspend-relat...
1037
  	action &= ~CPU_TASKS_FROZEN;
3af24433e   Oleg Nesterov   workqueue: don't ...
1038
  	switch (action) {
3af24433e   Oleg Nesterov   workqueue: don't ...
1039
  	case CPU_UP_PREPARE:
e7577c50f   Rusty Russell   cpumask: convert ...
1040
  		cpumask_set_cpu(cpu, cpu_populated_map);
3af24433e   Oleg Nesterov   workqueue: don't ...
1041
  	}
8448502cf   Oleg Nesterov   workqueues: do CP...
1042
  undo:
3af24433e   Oleg Nesterov   workqueue: don't ...
1043
1044
1045
1046
1047
1048
1049
  	list_for_each_entry(wq, &workqueues, list) {
  		cwq = per_cpu_ptr(wq->cpu_wq, cpu);
  
  		switch (action) {
  		case CPU_UP_PREPARE:
  			if (!create_workqueue_thread(cwq, cpu))
  				break;
95402b382   Gautham R Shenoy   cpu-hotplug: repl...
1050
1051
1052
  			printk(KERN_ERR "workqueue [%s] for %i failed
  ",
  				wq->name, cpu);
8448502cf   Oleg Nesterov   workqueues: do CP...
1053
1054
1055
  			action = CPU_UP_CANCELED;
  			ret = NOTIFY_BAD;
  			goto undo;
3af24433e   Oleg Nesterov   workqueue: don't ...
1056
1057
  
  		case CPU_ONLINE:
06ba38a9a   Oleg Nesterov   workqueues: shift...
1058
  			start_workqueue_thread(cwq, cpu);
3af24433e   Oleg Nesterov   workqueue: don't ...
1059
1060
1061
  			break;
  
  		case CPU_UP_CANCELED:
06ba38a9a   Oleg Nesterov   workqueues: shift...
1062
  			start_workqueue_thread(cwq, -1);
3da1c84c0   Oleg Nesterov   workqueues: make ...
1063
  		case CPU_POST_DEAD:
1e35eaa2d   Oleg Nesterov   cleanup_workqueue...
1064
  			cleanup_workqueue_thread(cwq);
3af24433e   Oleg Nesterov   workqueue: don't ...
1065
1066
  			break;
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1067
  	}
00dfcaf74   Oleg Nesterov   workqueues: shrin...
1068
1069
  	switch (action) {
  	case CPU_UP_CANCELED:
3da1c84c0   Oleg Nesterov   workqueues: make ...
1070
  	case CPU_POST_DEAD:
e7577c50f   Rusty Russell   cpumask: convert ...
1071
  		cpumask_clear_cpu(cpu, cpu_populated_map);
00dfcaf74   Oleg Nesterov   workqueues: shrin...
1072
  	}
8448502cf   Oleg Nesterov   workqueues: do CP...
1073
  	return ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1074
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1075

2d3854a37   Rusty Russell   cpumask: introduc...
1076
  #ifdef CONFIG_SMP
8ccad40df   Rusty Russell   work_on_cpu: Use ...
1077

2d3854a37   Rusty Russell   cpumask: introduc...
1078
  struct work_for_cpu {
6b44003e5   Andrew Morton   work_on_cpu(): re...
1079
  	struct completion completion;
2d3854a37   Rusty Russell   cpumask: introduc...
1080
1081
1082
1083
  	long (*fn)(void *);
  	void *arg;
  	long ret;
  };
6b44003e5   Andrew Morton   work_on_cpu(): re...
1084
  static int do_work_for_cpu(void *_wfc)
2d3854a37   Rusty Russell   cpumask: introduc...
1085
  {
6b44003e5   Andrew Morton   work_on_cpu(): re...
1086
  	struct work_for_cpu *wfc = _wfc;
2d3854a37   Rusty Russell   cpumask: introduc...
1087
  	wfc->ret = wfc->fn(wfc->arg);
6b44003e5   Andrew Morton   work_on_cpu(): re...
1088
1089
  	complete(&wfc->completion);
  	return 0;
2d3854a37   Rusty Russell   cpumask: introduc...
1090
1091
1092
1093
1094
1095
1096
1097
  }
  
  /**
   * work_on_cpu - run a function in user context on a particular cpu
   * @cpu: the cpu to run on
   * @fn: the function to run
   * @arg: the function arg
   *
31ad90812   Rusty Russell   work_on_cpu: don'...
1098
1099
   * This will return the value @fn returns.
   * It is up to the caller to ensure that the cpu doesn't go offline.
6b44003e5   Andrew Morton   work_on_cpu(): re...
1100
   * The caller must not hold any locks which would prevent @fn from completing.
2d3854a37   Rusty Russell   cpumask: introduc...
1101
1102
1103
   */
  long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
  {
6b44003e5   Andrew Morton   work_on_cpu(): re...
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
  	struct task_struct *sub_thread;
  	struct work_for_cpu wfc = {
  		.completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
  		.fn = fn,
  		.arg = arg,
  	};
  
  	sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
  	if (IS_ERR(sub_thread))
  		return PTR_ERR(sub_thread);
  	kthread_bind(sub_thread, cpu);
  	wake_up_process(sub_thread);
  	wait_for_completion(&wfc.completion);
2d3854a37   Rusty Russell   cpumask: introduc...
1117
1118
1119
1120
  	return wfc.ret;
  }
  EXPORT_SYMBOL_GPL(work_on_cpu);
  #endif /* CONFIG_SMP */
c12920d19   Oleg Nesterov   workqueue: make i...
1121
  void __init init_workqueues(void)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1122
  {
e7577c50f   Rusty Russell   cpumask: convert ...
1123
1124
1125
1126
1127
  	alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL);
  
  	cpumask_copy(cpu_populated_map, cpu_online_mask);
  	singlethread_cpu = cpumask_first(cpu_possible_mask);
  	cpu_singlethread_map = cpumask_of(singlethread_cpu);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1128
1129
1130
1131
  	hotcpu_notifier(workqueue_cpu_callback, 0);
  	keventd_wq = create_workqueue("events");
  	BUG_ON(!keventd_wq);
  }