Blame view

kernel/kthread.c 32.1 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
  /* Kernel thread helper functions.
   *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
   *
73c279927   Eric W. Biederman   kthread: don't de...
4
   * Creation is done via kthreadd, so that we get a clean environment
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
5
6
7
8
9
10
11
   * even if we're invoked from userspace (think modprobe, hotplug cpu,
   * etc.).
   */
  #include <linux/sched.h>
  #include <linux/kthread.h>
  #include <linux/completion.h>
  #include <linux/err.h>
58568d2a8   Miao Xie   cpuset,mm: update...
12
  #include <linux/cpuset.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
13
14
  #include <linux/unistd.h>
  #include <linux/file.h>
9984de1a5   Paul Gortmaker   kernel: Map most ...
15
  #include <linux/export.h>
97d1f15b7   Arjan van de Ven   [PATCH] sem2mutex...
16
  #include <linux/mutex.h>
b56c0d893   Tejun Heo   kthread: implemen...
17
18
  #include <linux/slab.h>
  #include <linux/freezer.h>
a74fb73c1   Al Viro   infrastructure fo...
19
  #include <linux/ptrace.h>
cd42d559e   Tejun Heo   kthread: implemen...
20
  #include <linux/uaccess.h>
ad8d75fff   Steven Rostedt   tracing/events: m...
21
  #include <trace/events/sched.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
22

73c279927   Eric W. Biederman   kthread: don't de...
23
24
25
  static DEFINE_SPINLOCK(kthread_create_lock);
  static LIST_HEAD(kthread_create_list);
  struct task_struct *kthreadd_task;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
26
27
28
  
  struct kthread_create_info
  {
73c279927   Eric W. Biederman   kthread: don't de...
29
  	/* Information passed to kthread() from kthreadd. */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
30
31
  	int (*threadfn)(void *data);
  	void *data;
207205a2b   Eric Dumazet   kthread: NUMA awa...
32
  	int node;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
33

73c279927   Eric W. Biederman   kthread: don't de...
34
  	/* Result passed back to kthread_create() from kthreadd. */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
35
  	struct task_struct *result;
786235eeb   Tetsuo Handa   kthread: make kth...
36
  	struct completion *done;
65f27f384   David Howells   WorkStruct: Pass ...
37

73c279927   Eric W. Biederman   kthread: don't de...
38
  	struct list_head list;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
39
  };
63706172f   Oleg Nesterov   kthreads: rework ...
40
  struct kthread {
2a1d44601   Thomas Gleixner   kthread: Implemen...
41
42
  	unsigned long flags;
  	unsigned int cpu;
82805ab77   Tejun Heo   kthread: implemen...
43
  	void *data;
2a1d44601   Thomas Gleixner   kthread: Implemen...
44
  	struct completion parked;
63706172f   Oleg Nesterov   kthreads: rework ...
45
  	struct completion exited;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
46
  };
2a1d44601   Thomas Gleixner   kthread: Implemen...
47
48
49
50
51
52
  enum KTHREAD_BITS {
  	KTHREAD_IS_PER_CPU = 0,
  	KTHREAD_SHOULD_STOP,
  	KTHREAD_SHOULD_PARK,
  	KTHREAD_IS_PARKED,
  };
4ecdafc80   Oleg Nesterov   kthread: introduc...
53
54
55
56
57
58
59
60
61
62
63
  #define __to_kthread(vfork)	\
  	container_of(vfork, struct kthread, exited)
  
  static inline struct kthread *to_kthread(struct task_struct *k)
  {
  	return __to_kthread(k->vfork_done);
  }
  
  static struct kthread *to_live_kthread(struct task_struct *k)
  {
  	struct completion *vfork = ACCESS_ONCE(k->vfork_done);
23196f2e5   Oleg Nesterov   kthread: Pin the ...
64
  	if (likely(vfork) && try_get_task_stack(k))
4ecdafc80   Oleg Nesterov   kthread: introduc...
65
66
67
  		return __to_kthread(vfork);
  	return NULL;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
68

9e37bd301   Randy Dunlap   [PATCH] kthread: ...
69
70
71
  /**
   * kthread_should_stop - should this kthread return now?
   *
72fd4a35a   Robert P. J. Day   [PATCH] Numerous ...
72
   * When someone calls kthread_stop() on your kthread, it will be woken
9e37bd301   Randy Dunlap   [PATCH] kthread: ...
73
74
75
   * and this will return true.  You should then return, and your return
   * value will be passed through to kthread_stop().
   */
2a1d44601   Thomas Gleixner   kthread: Implemen...
76
  bool kthread_should_stop(void)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
77
  {
2a1d44601   Thomas Gleixner   kthread: Implemen...
78
  	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
79
80
  }
  EXPORT_SYMBOL(kthread_should_stop);
82805ab77   Tejun Heo   kthread: implemen...
81
  /**
2a1d44601   Thomas Gleixner   kthread: Implemen...
82
83
84
85
86
87
88
89
90
91
92
93
94
95
   * kthread_should_park - should this kthread park now?
   *
   * When someone calls kthread_park() on your kthread, it will be woken
   * and this will return true.  You should then do the necessary
   * cleanup and call kthread_parkme()
   *
   * Similar to kthread_should_stop(), but this keeps the thread alive
   * and in a park position. kthread_unpark() "restarts" the thread and
   * calls the thread function again.
   */
  bool kthread_should_park(void)
  {
  	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
  }
18896451e   David Kershner   kthread: export k...
96
  EXPORT_SYMBOL_GPL(kthread_should_park);
2a1d44601   Thomas Gleixner   kthread: Implemen...
97
98
  
  /**
8a32c441c   Tejun Heo   freezer: implemen...
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
   * kthread_freezable_should_stop - should this freezable kthread return now?
   * @was_frozen: optional out parameter, indicates whether %current was frozen
   *
   * kthread_should_stop() for freezable kthreads, which will enter
   * refrigerator if necessary.  This function is safe from kthread_stop() /
   * freezer deadlock and freezable kthreads should use this function instead
   * of calling try_to_freeze() directly.
   */
  bool kthread_freezable_should_stop(bool *was_frozen)
  {
  	bool frozen = false;
  
  	might_sleep();
  
  	if (unlikely(freezing(current)))
  		frozen = __refrigerator(true);
  
  	if (was_frozen)
  		*was_frozen = frozen;
  
  	return kthread_should_stop();
  }
  EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
  
  /**
82805ab77   Tejun Heo   kthread: implemen...
124
125
126
127
128
129
130
131
132
133
134
   * kthread_data - return data value specified on kthread creation
   * @task: kthread task in question
   *
   * Return the data value specified when kthread @task was created.
   * The caller is responsible for ensuring the validity of @task when
   * calling this function.
   */
  void *kthread_data(struct task_struct *task)
  {
  	return to_kthread(task)->data;
  }
cd42d559e   Tejun Heo   kthread: implemen...
135
  /**
e700591ae   Petr Mladek   kthread: rename p...
136
   * kthread_probe_data - speculative version of kthread_data()
cd42d559e   Tejun Heo   kthread: implemen...
137
138
139
140
141
142
143
   * @task: possible kthread task in question
   *
   * @task could be a kthread task.  Return the data value specified when it
   * was created if accessible.  If @task isn't a kthread task or its data is
   * inaccessible for any reason, %NULL is returned.  This function requires
   * that @task itself is safe to dereference.
   */
e700591ae   Petr Mladek   kthread: rename p...
144
  void *kthread_probe_data(struct task_struct *task)
cd42d559e   Tejun Heo   kthread: implemen...
145
146
147
148
149
150
151
  {
  	struct kthread *kthread = to_kthread(task);
  	void *data = NULL;
  
  	probe_kernel_read(&data, &kthread->data, sizeof(data));
  	return data;
  }
2a1d44601   Thomas Gleixner   kthread: Implemen...
152
153
  static void __kthread_parkme(struct kthread *self)
  {
f2530dc71   Thomas Gleixner   kthread: Prevent ...
154
  	__set_current_state(TASK_PARKED);
2a1d44601   Thomas Gleixner   kthread: Implemen...
155
156
157
158
  	while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
  		if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
  			complete(&self->parked);
  		schedule();
f2530dc71   Thomas Gleixner   kthread: Prevent ...
159
  		__set_current_state(TASK_PARKED);
2a1d44601   Thomas Gleixner   kthread: Implemen...
160
161
162
163
164
165
166
167
168
  	}
  	clear_bit(KTHREAD_IS_PARKED, &self->flags);
  	__set_current_state(TASK_RUNNING);
  }
  
  void kthread_parkme(void)
  {
  	__kthread_parkme(to_kthread(current));
  }
18896451e   David Kershner   kthread: export k...
169
  EXPORT_SYMBOL_GPL(kthread_parkme);
2a1d44601   Thomas Gleixner   kthread: Implemen...
170

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
171
172
  static int kthread(void *_create)
  {
63706172f   Oleg Nesterov   kthreads: rework ...
173
  	/* Copy data: it's on kthread's stack */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
174
  	struct kthread_create_info *create = _create;
63706172f   Oleg Nesterov   kthreads: rework ...
175
176
  	int (*threadfn)(void *data) = create->threadfn;
  	void *data = create->data;
786235eeb   Tetsuo Handa   kthread: make kth...
177
  	struct completion *done;
63706172f   Oleg Nesterov   kthreads: rework ...
178
179
  	struct kthread self;
  	int ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
180

2a1d44601   Thomas Gleixner   kthread: Implemen...
181
  	self.flags = 0;
82805ab77   Tejun Heo   kthread: implemen...
182
  	self.data = data;
63706172f   Oleg Nesterov   kthreads: rework ...
183
  	init_completion(&self.exited);
2a1d44601   Thomas Gleixner   kthread: Implemen...
184
  	init_completion(&self.parked);
63706172f   Oleg Nesterov   kthreads: rework ...
185
  	current->vfork_done = &self.exited;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
186

786235eeb   Tetsuo Handa   kthread: make kth...
187
188
189
190
191
192
  	/* If user was SIGKILLed, I release the structure. */
  	done = xchg(&create->done, NULL);
  	if (!done) {
  		kfree(create);
  		do_exit(-EINTR);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
193
  	/* OK, tell user we're spawned, wait for stop or wakeup */
a076e4bca   Oleg Nesterov   freezer: fix kthr...
194
  	__set_current_state(TASK_UNINTERRUPTIBLE);
3217ab97f   Vitaliy Gusev   kthread: Don't lo...
195
  	create->result = current;
786235eeb   Tetsuo Handa   kthread: make kth...
196
  	complete(done);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
197
  	schedule();
63706172f   Oleg Nesterov   kthreads: rework ...
198
  	ret = -EINTR;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
199

2a1d44601   Thomas Gleixner   kthread: Implemen...
200
201
202
203
  	if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
  		__kthread_parkme(&self);
  		ret = threadfn(data);
  	}
63706172f   Oleg Nesterov   kthreads: rework ...
204
205
  	/* we can't just return, we must preserve "self" on stack */
  	do_exit(ret);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
206
  }
207205a2b   Eric Dumazet   kthread: NUMA awa...
207
208
209
210
211
212
213
  /* called from do_fork() to get node information for about to be created task */
  int tsk_fork_get_node(struct task_struct *tsk)
  {
  #ifdef CONFIG_NUMA
  	if (tsk == kthreadd_task)
  		return tsk->pref_node_fork;
  #endif
81c98869f   Nishanth Aravamudan   kthread: ensure l...
214
  	return NUMA_NO_NODE;
207205a2b   Eric Dumazet   kthread: NUMA awa...
215
  }
73c279927   Eric W. Biederman   kthread: don't de...
216
  static void create_kthread(struct kthread_create_info *create)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
217
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
218
  	int pid;
207205a2b   Eric Dumazet   kthread: NUMA awa...
219
220
221
  #ifdef CONFIG_NUMA
  	current->pref_node_fork = create->node;
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
222
223
  	/* We want our own signal handler (we take no signals by default). */
  	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
cdd140bdd   Oleg Nesterov   kthreads: simplif...
224
  	if (pid < 0) {
786235eeb   Tetsuo Handa   kthread: make kth...
225
226
227
228
229
230
231
  		/* If user was SIGKILLed, I release the structure. */
  		struct completion *done = xchg(&create->done, NULL);
  
  		if (!done) {
  			kfree(create);
  			return;
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
232
  		create->result = ERR_PTR(pid);
786235eeb   Tetsuo Handa   kthread: make kth...
233
  		complete(done);
cdd140bdd   Oleg Nesterov   kthreads: simplif...
234
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
235
  }
255451e45   Petr Mladek   kthread: allow to...
236
237
238
239
  static struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
  						    void *data, int node,
  						    const char namefmt[],
  						    va_list args)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
240
  {
786235eeb   Tetsuo Handa   kthread: make kth...
241
242
243
244
245
246
247
248
249
250
251
  	DECLARE_COMPLETION_ONSTACK(done);
  	struct task_struct *task;
  	struct kthread_create_info *create = kmalloc(sizeof(*create),
  						     GFP_KERNEL);
  
  	if (!create)
  		return ERR_PTR(-ENOMEM);
  	create->threadfn = threadfn;
  	create->data = data;
  	create->node = node;
  	create->done = &done;
73c279927   Eric W. Biederman   kthread: don't de...
252
253
  
  	spin_lock(&kthread_create_lock);
786235eeb   Tetsuo Handa   kthread: make kth...
254
  	list_add_tail(&create->list, &kthread_create_list);
73c279927   Eric W. Biederman   kthread: don't de...
255
  	spin_unlock(&kthread_create_lock);
cbd9b67bd   Dmitry Adamushko   kthread: call wak...
256
  	wake_up_process(kthreadd_task);
786235eeb   Tetsuo Handa   kthread: make kth...
257
258
259
260
261
262
263
264
265
266
267
268
  	/*
  	 * Wait for completion in killable state, for I might be chosen by
  	 * the OOM killer while kthreadd is trying to allocate memory for
  	 * new kernel thread.
  	 */
  	if (unlikely(wait_for_completion_killable(&done))) {
  		/*
  		 * If I was SIGKILLed before kthreadd (or new kernel thread)
  		 * calls complete(), leave the cleanup of this structure to
  		 * that thread.
  		 */
  		if (xchg(&create->done, NULL))
8fe6929cf   Tetsuo Handa   kthread: fix retu...
269
  			return ERR_PTR(-EINTR);
786235eeb   Tetsuo Handa   kthread: make kth...
270
271
272
273
274
275
276
277
  		/*
  		 * kthreadd (or new kernel thread) will call complete()
  		 * shortly.
  		 */
  		wait_for_completion(&done);
  	}
  	task = create->result;
  	if (!IS_ERR(task)) {
c9b5f501e   Peter Zijlstra   sched: Constify f...
278
  		static const struct sched_param param = { .sched_priority = 0 };
1c99315bb   Oleg Nesterov   kthread: move sch...
279

786235eeb   Tetsuo Handa   kthread: make kth...
280
  		vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
1c99315bb   Oleg Nesterov   kthread: move sch...
281
282
283
284
  		/*
  		 * root may have changed our (kthreadd's) priority or CPU mask.
  		 * The kernel thread should not inherit these properties.
  		 */
786235eeb   Tetsuo Handa   kthread: make kth...
285
286
  		sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
  		set_cpus_allowed_ptr(task, cpu_all_mask);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
287
  	}
786235eeb   Tetsuo Handa   kthread: make kth...
288
289
  	kfree(create);
  	return task;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
290
  }
255451e45   Petr Mladek   kthread: allow to...
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
  
  /**
   * kthread_create_on_node - create a kthread.
   * @threadfn: the function to run until signal_pending(current).
   * @data: data ptr for @threadfn.
   * @node: task and thread structures for the thread are allocated on this node
   * @namefmt: printf-style name for the thread.
   *
   * Description: This helper function creates and names a kernel
   * thread.  The thread will be stopped: use wake_up_process() to start
   * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
   * is affine to all CPUs.
   *
   * If thread is going to be bound on a particular cpu, give its node
   * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
   * When woken, the thread will run @threadfn() with @data as its
   * argument. @threadfn() can either call do_exit() directly if it is a
   * standalone thread for which no one will call kthread_stop(), or
   * return when 'kthread_should_stop()' is true (which means
   * kthread_stop() has been called).  The return value should be zero
   * or a negative error number; it will be passed to kthread_stop().
   *
   * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
   */
  struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
  					   void *data, int node,
  					   const char namefmt[],
  					   ...)
  {
  	struct task_struct *task;
  	va_list args;
  
  	va_start(args, namefmt);
  	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
  	va_end(args);
  
  	return task;
  }
207205a2b   Eric Dumazet   kthread: NUMA awa...
329
  EXPORT_SYMBOL(kthread_create_on_node);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
330

25834c73f   Peter Zijlstra   sched: Fix a race...
331
  static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
2a1d44601   Thomas Gleixner   kthread: Implemen...
332
  {
25834c73f   Peter Zijlstra   sched: Fix a race...
333
  	unsigned long flags;
f2530dc71   Thomas Gleixner   kthread: Prevent ...
334
335
336
337
  	if (!wait_task_inactive(p, state)) {
  		WARN_ON(1);
  		return;
  	}
25834c73f   Peter Zijlstra   sched: Fix a race...
338

2a1d44601   Thomas Gleixner   kthread: Implemen...
339
  	/* It's safe because the task is inactive. */
25834c73f   Peter Zijlstra   sched: Fix a race...
340
341
  	raw_spin_lock_irqsave(&p->pi_lock, flags);
  	do_set_cpus_allowed(p, mask);
14a40ffcc   Tejun Heo   sched: replace PF...
342
  	p->flags |= PF_NO_SETAFFINITY;
25834c73f   Peter Zijlstra   sched: Fix a race...
343
344
345
346
347
348
349
350
351
352
353
  	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  }
  
  static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
  {
  	__kthread_bind_mask(p, cpumask_of(cpu), state);
  }
  
  void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
  {
  	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
2a1d44601   Thomas Gleixner   kthread: Implemen...
354
  }
9e37bd301   Randy Dunlap   [PATCH] kthread: ...
355
  /**
881232b70   Peter Zijlstra   sched: Move kthre...
356
357
358
359
360
361
362
363
364
365
   * kthread_bind - bind a just-created kthread to a cpu.
   * @p: thread created by kthread_create().
   * @cpu: cpu (might not be online, must be possible) for @k to run on.
   *
   * Description: This function is equivalent to set_cpus_allowed(),
   * except that @cpu doesn't need to be online, and the thread must be
   * stopped (i.e., just returned from kthread_create()).
   */
  void kthread_bind(struct task_struct *p, unsigned int cpu)
  {
f2530dc71   Thomas Gleixner   kthread: Prevent ...
366
  	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
881232b70   Peter Zijlstra   sched: Move kthre...
367
368
369
370
  }
  EXPORT_SYMBOL(kthread_bind);
  
  /**
2a1d44601   Thomas Gleixner   kthread: Implemen...
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
   * kthread_create_on_cpu - Create a cpu bound kthread
   * @threadfn: the function to run until signal_pending(current).
   * @data: data ptr for @threadfn.
   * @cpu: The cpu on which the thread should be bound,
   * @namefmt: printf-style name for the thread. Format is restricted
   *	     to "name.*%u". Code fills in cpu number.
   *
   * Description: This helper function creates and names a kernel thread
   * The thread will be woken and put into park mode.
   */
  struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
  					  void *data, unsigned int cpu,
  					  const char *namefmt)
  {
  	struct task_struct *p;
109228389   Nishanth Aravamudan   kernel/kthread.c:...
386
  	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
2a1d44601   Thomas Gleixner   kthread: Implemen...
387
388
389
  				   cpu);
  	if (IS_ERR(p))
  		return p;
a65d40961   Petr Mladek   kthread/smpboot: ...
390
391
  	kthread_bind(p, cpu);
  	/* CPU hotplug need to bind once again when unparking the thread. */
2a1d44601   Thomas Gleixner   kthread: Implemen...
392
393
  	set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
  	to_kthread(p)->cpu = cpu;
2a1d44601   Thomas Gleixner   kthread: Implemen...
394
395
  	return p;
  }
f2530dc71   Thomas Gleixner   kthread: Prevent ...
396
397
398
399
400
401
402
403
404
405
  static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
  {
  	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
  	/*
  	 * We clear the IS_PARKED bit here as we don't wait
  	 * until the task has left the park code. So if we'd
  	 * park before that happens we'd see the IS_PARKED bit
  	 * which might be about to be cleared.
  	 */
  	if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
a65d40961   Petr Mladek   kthread/smpboot: ...
406
407
408
409
  		/*
  		 * Newly created kthread was parked when the CPU was offline.
  		 * The binding was lost and we need to set it again.
  		 */
f2530dc71   Thomas Gleixner   kthread: Prevent ...
410
411
412
413
414
  		if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
  			__kthread_bind(k, kthread->cpu, TASK_PARKED);
  		wake_up_state(k, TASK_PARKED);
  	}
  }
2a1d44601   Thomas Gleixner   kthread: Implemen...
415
416
417
418
419
420
421
422
423
424
  /**
   * kthread_unpark - unpark a thread created by kthread_create().
   * @k:		thread created by kthread_create().
   *
   * Sets kthread_should_park() for @k to return false, wakes it, and
   * waits for it to return. If the thread is marked percpu then its
   * bound to the cpu again.
   */
  void kthread_unpark(struct task_struct *k)
  {
b5c5442bb   Oleg Nesterov   kthread: kill tas...
425
  	struct kthread *kthread = to_live_kthread(k);
2a1d44601   Thomas Gleixner   kthread: Implemen...
426

23196f2e5   Oleg Nesterov   kthread: Pin the ...
427
  	if (kthread) {
f2530dc71   Thomas Gleixner   kthread: Prevent ...
428
  		__kthread_unpark(k, kthread);
23196f2e5   Oleg Nesterov   kthread: Pin the ...
429
430
  		put_task_stack(k);
  	}
2a1d44601   Thomas Gleixner   kthread: Implemen...
431
  }
18896451e   David Kershner   kthread: export k...
432
  EXPORT_SYMBOL_GPL(kthread_unpark);
2a1d44601   Thomas Gleixner   kthread: Implemen...
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
  
  /**
   * kthread_park - park a thread created by kthread_create().
   * @k: thread created by kthread_create().
   *
   * Sets kthread_should_park() for @k to return true, wakes it, and
   * waits for it to return. This can also be called after kthread_create()
   * instead of calling wake_up_process(): the thread will park without
   * calling threadfn().
   *
   * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
   * If called by the kthread itself just the park bit is set.
   */
  int kthread_park(struct task_struct *k)
  {
b5c5442bb   Oleg Nesterov   kthread: kill tas...
448
  	struct kthread *kthread = to_live_kthread(k);
2a1d44601   Thomas Gleixner   kthread: Implemen...
449
450
451
452
453
454
455
456
457
458
  	int ret = -ENOSYS;
  
  	if (kthread) {
  		if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
  			set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
  			if (k != current) {
  				wake_up_process(k);
  				wait_for_completion(&kthread->parked);
  			}
  		}
23196f2e5   Oleg Nesterov   kthread: Pin the ...
459
  		put_task_stack(k);
2a1d44601   Thomas Gleixner   kthread: Implemen...
460
461
  		ret = 0;
  	}
2a1d44601   Thomas Gleixner   kthread: Implemen...
462
463
  	return ret;
  }
18896451e   David Kershner   kthread: export k...
464
  EXPORT_SYMBOL_GPL(kthread_park);
2a1d44601   Thomas Gleixner   kthread: Implemen...
465
466
  
  /**
9e37bd301   Randy Dunlap   [PATCH] kthread: ...
467
468
469
470
   * kthread_stop - stop a thread created by kthread_create().
   * @k: thread created by kthread_create().
   *
   * Sets kthread_should_stop() for @k to return true, wakes it, and
9ae260270   Oleg Nesterov   update the commen...
471
472
473
474
475
476
   * waits for it to exit. This can also be called after kthread_create()
   * instead of calling wake_up_process(): the thread will exit without
   * calling threadfn().
   *
   * If threadfn() may call do_exit() itself, the caller must ensure
   * task_struct can't go away.
9e37bd301   Randy Dunlap   [PATCH] kthread: ...
477
478
479
480
   *
   * Returns the result of threadfn(), or %-EINTR if wake_up_process()
   * was never called.
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
481
482
  int kthread_stop(struct task_struct *k)
  {
b5c5442bb   Oleg Nesterov   kthread: kill tas...
483
  	struct kthread *kthread;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
484
  	int ret;
0a16b6075   Mathieu Desnoyers   tracing, sched: L...
485
  	trace_sched_kthread_stop(k);
b5c5442bb   Oleg Nesterov   kthread: kill tas...
486
487
488
  
  	get_task_struct(k);
  	kthread = to_live_kthread(k);
2a1d44601   Thomas Gleixner   kthread: Implemen...
489
490
  	if (kthread) {
  		set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
f2530dc71   Thomas Gleixner   kthread: Prevent ...
491
  		__kthread_unpark(k, kthread);
63706172f   Oleg Nesterov   kthreads: rework ...
492
493
  		wake_up_process(k);
  		wait_for_completion(&kthread->exited);
23196f2e5   Oleg Nesterov   kthread: Pin the ...
494
  		put_task_stack(k);
63706172f   Oleg Nesterov   kthreads: rework ...
495
496
  	}
  	ret = k->exit_code;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
497
  	put_task_struct(k);
0a16b6075   Mathieu Desnoyers   tracing, sched: L...
498

b5c5442bb   Oleg Nesterov   kthread: kill tas...
499
  	trace_sched_kthread_stop_ret(ret);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
500
501
  	return ret;
  }
52e92e578   Adrian Bunk   [PATCH] remove ke...
502
  EXPORT_SYMBOL(kthread_stop);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
503

e804a4a4d   Satyam Sharma   kthread: silence ...
504
  int kthreadd(void *unused)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
505
  {
73c279927   Eric W. Biederman   kthread: don't de...
506
  	struct task_struct *tsk = current;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
507

e804a4a4d   Satyam Sharma   kthread: silence ...
508
  	/* Setup a clean context for our children to inherit. */
73c279927   Eric W. Biederman   kthread: don't de...
509
  	set_task_comm(tsk, "kthreadd");
10ab825bd   Oleg Nesterov   change kernel thr...
510
  	ignore_signals(tsk);
1a2142afa   Rusty Russell   cpumask: remove d...
511
  	set_cpus_allowed_ptr(tsk, cpu_all_mask);
aee4faa49   Lai Jiangshan   kthread: use N_ME...
512
  	set_mems_allowed(node_states[N_MEMORY]);
73c279927   Eric W. Biederman   kthread: don't de...
513

34b087e48   Tejun Heo   freezer: kill unu...
514
  	current->flags |= PF_NOFREEZE;
73c279927   Eric W. Biederman   kthread: don't de...
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
  
  	for (;;) {
  		set_current_state(TASK_INTERRUPTIBLE);
  		if (list_empty(&kthread_create_list))
  			schedule();
  		__set_current_state(TASK_RUNNING);
  
  		spin_lock(&kthread_create_lock);
  		while (!list_empty(&kthread_create_list)) {
  			struct kthread_create_info *create;
  
  			create = list_entry(kthread_create_list.next,
  					    struct kthread_create_info, list);
  			list_del_init(&create->list);
  			spin_unlock(&kthread_create_lock);
  
  			create_kthread(create);
  
  			spin_lock(&kthread_create_lock);
  		}
  		spin_unlock(&kthread_create_lock);
  	}
  
  	return 0;
  }
b56c0d893   Tejun Heo   kthread: implemen...
540

3989144f8   Petr Mladek   kthread: kthread ...
541
  void __kthread_init_worker(struct kthread_worker *worker,
4f32e9b1f   Yong Zhang   kthread_work: mak...
542
543
544
  				const char *name,
  				struct lock_class_key *key)
  {
dbf52682c   Petr Mladek   kthread: better s...
545
  	memset(worker, 0, sizeof(struct kthread_worker));
4f32e9b1f   Yong Zhang   kthread_work: mak...
546
547
548
  	spin_lock_init(&worker->lock);
  	lockdep_set_class_and_name(&worker->lock, key, name);
  	INIT_LIST_HEAD(&worker->work_list);
22597dc3d   Petr Mladek   kthread: initial ...
549
  	INIT_LIST_HEAD(&worker->delayed_work_list);
4f32e9b1f   Yong Zhang   kthread_work: mak...
550
  }
3989144f8   Petr Mladek   kthread: kthread ...
551
  EXPORT_SYMBOL_GPL(__kthread_init_worker);
4f32e9b1f   Yong Zhang   kthread_work: mak...
552

b56c0d893   Tejun Heo   kthread: implemen...
553
554
555
556
  /**
   * kthread_worker_fn - kthread function to process kthread_worker
   * @worker_ptr: pointer to initialized kthread_worker
   *
fbae2d44a   Petr Mladek   kthread: add kthr...
557
558
559
   * This function implements the main cycle of kthread worker. It processes
   * work_list until it is stopped with kthread_stop(). It sleeps when the queue
   * is empty.
b56c0d893   Tejun Heo   kthread: implemen...
560
   *
fbae2d44a   Petr Mladek   kthread: add kthr...
561
562
563
   * The works are not allowed to keep any locks, disable preemption or interrupts
   * when they finish. There is defined a safe point for freezing when one work
   * finishes and before a new one is started.
8197b3d43   Petr Mladek   kthread: detect w...
564
565
566
   *
   * Also the works must not be handled by more than one worker at the same time,
   * see also kthread_queue_work().
b56c0d893   Tejun Heo   kthread: implemen...
567
568
569
570
571
   */
  int kthread_worker_fn(void *worker_ptr)
  {
  	struct kthread_worker *worker = worker_ptr;
  	struct kthread_work *work;
fbae2d44a   Petr Mladek   kthread: add kthr...
572
573
574
575
576
  	/*
  	 * FIXME: Update the check and remove the assignment when all kthread
  	 * worker users are created using kthread_create_worker*() functions.
  	 */
  	WARN_ON(worker->task && worker->task != current);
b56c0d893   Tejun Heo   kthread: implemen...
577
  	worker->task = current;
dbf52682c   Petr Mladek   kthread: better s...
578
579
580
  
  	if (worker->flags & KTW_FREEZABLE)
  		set_freezable();
b56c0d893   Tejun Heo   kthread: implemen...
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
  repeat:
  	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
  
  	if (kthread_should_stop()) {
  		__set_current_state(TASK_RUNNING);
  		spin_lock_irq(&worker->lock);
  		worker->task = NULL;
  		spin_unlock_irq(&worker->lock);
  		return 0;
  	}
  
  	work = NULL;
  	spin_lock_irq(&worker->lock);
  	if (!list_empty(&worker->work_list)) {
  		work = list_first_entry(&worker->work_list,
  					struct kthread_work, node);
  		list_del_init(&work->node);
  	}
46f3d9762   Tejun Heo   kthread_worker: r...
599
  	worker->current_work = work;
b56c0d893   Tejun Heo   kthread: implemen...
600
601
602
603
604
  	spin_unlock_irq(&worker->lock);
  
  	if (work) {
  		__set_current_state(TASK_RUNNING);
  		work->func(work);
b56c0d893   Tejun Heo   kthread: implemen...
605
606
607
608
609
610
611
  	} else if (!freezing(current))
  		schedule();
  
  	try_to_freeze();
  	goto repeat;
  }
  EXPORT_SYMBOL_GPL(kthread_worker_fn);
fbae2d44a   Petr Mladek   kthread: add kthr...
612
  static struct kthread_worker *
dbf52682c   Petr Mladek   kthread: better s...
613
614
  __kthread_create_worker(int cpu, unsigned int flags,
  			const char namefmt[], va_list args)
fbae2d44a   Petr Mladek   kthread: add kthr...
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
  {
  	struct kthread_worker *worker;
  	struct task_struct *task;
  
  	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
  	if (!worker)
  		return ERR_PTR(-ENOMEM);
  
  	kthread_init_worker(worker);
  
  	if (cpu >= 0) {
  		char name[TASK_COMM_LEN];
  
  		/*
  		 * kthread_create_worker_on_cpu() allows to pass a generic
  		 * namefmt in compare with kthread_create_on_cpu. We need
  		 * to format it here.
  		 */
  		vsnprintf(name, sizeof(name), namefmt, args);
  		task = kthread_create_on_cpu(kthread_worker_fn, worker,
  					     cpu, name);
  	} else {
  		task = __kthread_create_on_node(kthread_worker_fn, worker,
  						-1, namefmt, args);
  	}
  
  	if (IS_ERR(task))
  		goto fail_task;
dbf52682c   Petr Mladek   kthread: better s...
643
  	worker->flags = flags;
fbae2d44a   Petr Mladek   kthread: add kthr...
644
645
646
647
648
649
650
651
652
653
654
  	worker->task = task;
  	wake_up_process(task);
  	return worker;
  
  fail_task:
  	kfree(worker);
  	return ERR_CAST(task);
  }
  
  /**
   * kthread_create_worker - create a kthread worker
dbf52682c   Petr Mladek   kthread: better s...
655
   * @flags: flags modifying the default behavior of the worker
fbae2d44a   Petr Mladek   kthread: add kthr...
656
657
658
659
660
661
662
   * @namefmt: printf-style name for the kthread worker (task).
   *
   * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
   * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
   * when the worker was SIGKILLed.
   */
  struct kthread_worker *
dbf52682c   Petr Mladek   kthread: better s...
663
  kthread_create_worker(unsigned int flags, const char namefmt[], ...)
fbae2d44a   Petr Mladek   kthread: add kthr...
664
665
666
667
668
  {
  	struct kthread_worker *worker;
  	va_list args;
  
  	va_start(args, namefmt);
dbf52682c   Petr Mladek   kthread: better s...
669
  	worker = __kthread_create_worker(-1, flags, namefmt, args);
fbae2d44a   Petr Mladek   kthread: add kthr...
670
671
672
673
674
675
676
677
678
679
  	va_end(args);
  
  	return worker;
  }
  EXPORT_SYMBOL(kthread_create_worker);
  
  /**
   * kthread_create_worker_on_cpu - create a kthread worker and bind it
   *	it to a given CPU and the associated NUMA node.
   * @cpu: CPU number
dbf52682c   Petr Mladek   kthread: better s...
680
   * @flags: flags modifying the default behavior of the worker
fbae2d44a   Petr Mladek   kthread: add kthr...
681
682
683
684
685
686
687
688
689
690
691
692
693
   * @namefmt: printf-style name for the kthread worker (task).
   *
   * Use a valid CPU number if you want to bind the kthread worker
   * to the given CPU and the associated NUMA node.
   *
   * A good practice is to add the cpu number also into the worker name.
   * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
   *
   * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
   * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
   * when the worker was SIGKILLed.
   */
  struct kthread_worker *
dbf52682c   Petr Mladek   kthread: better s...
694
695
  kthread_create_worker_on_cpu(int cpu, unsigned int flags,
  			     const char namefmt[], ...)
fbae2d44a   Petr Mladek   kthread: add kthr...
696
697
698
699
700
  {
  	struct kthread_worker *worker;
  	va_list args;
  
  	va_start(args, namefmt);
dbf52682c   Petr Mladek   kthread: better s...
701
  	worker = __kthread_create_worker(cpu, flags, namefmt, args);
fbae2d44a   Petr Mladek   kthread: add kthr...
702
703
704
705
706
  	va_end(args);
  
  	return worker;
  }
  EXPORT_SYMBOL(kthread_create_worker_on_cpu);
37be45d49   Petr Mladek   kthread: allow to...
707
708
709
710
711
712
713
714
715
716
717
718
  /*
   * Returns true when the work could not be queued at the moment.
   * It happens when it is already pending in a worker list
   * or when it is being cancelled.
   */
  static inline bool queuing_blocked(struct kthread_worker *worker,
  				   struct kthread_work *work)
  {
  	lockdep_assert_held(&worker->lock);
  
  	return !list_empty(&work->node) || work->canceling;
  }
8197b3d43   Petr Mladek   kthread: detect w...
719
720
721
722
723
724
725
726
  static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
  					     struct kthread_work *work)
  {
  	lockdep_assert_held(&worker->lock);
  	WARN_ON_ONCE(!list_empty(&work->node));
  	/* Do not use a work with >1 worker, see kthread_queue_work() */
  	WARN_ON_ONCE(work->worker && work->worker != worker);
  }
9a2e03d8e   Tejun Heo   kthread_worker: r...
727
  /* insert @work before @pos in @worker */
3989144f8   Petr Mladek   kthread: kthread ...
728
  static void kthread_insert_work(struct kthread_worker *worker,
8197b3d43   Petr Mladek   kthread: detect w...
729
730
  				struct kthread_work *work,
  				struct list_head *pos)
9a2e03d8e   Tejun Heo   kthread_worker: r...
731
  {
8197b3d43   Petr Mladek   kthread: detect w...
732
  	kthread_insert_work_sanity_check(worker, work);
9a2e03d8e   Tejun Heo   kthread_worker: r...
733
734
  
  	list_add_tail(&work->node, pos);
46f3d9762   Tejun Heo   kthread_worker: r...
735
  	work->worker = worker;
ed1403ec2   Lai Jiangshan   kthread_work: wak...
736
  	if (!worker->current_work && likely(worker->task))
9a2e03d8e   Tejun Heo   kthread_worker: r...
737
738
  		wake_up_process(worker->task);
  }
b56c0d893   Tejun Heo   kthread: implemen...
739
  /**
3989144f8   Petr Mladek   kthread: kthread ...
740
   * kthread_queue_work - queue a kthread_work
b56c0d893   Tejun Heo   kthread: implemen...
741
742
743
744
745
746
   * @worker: target kthread_worker
   * @work: kthread_work to queue
   *
   * Queue @work to work processor @task for async execution.  @task
   * must have been created with kthread_worker_create().  Returns %true
   * if @work was successfully queued, %false if it was already pending.
8197b3d43   Petr Mladek   kthread: detect w...
747
748
749
   *
   * Reinitialize the work if it needs to be used by another worker.
   * For example, when the worker was stopped and started again.
b56c0d893   Tejun Heo   kthread: implemen...
750
   */
3989144f8   Petr Mladek   kthread: kthread ...
751
  bool kthread_queue_work(struct kthread_worker *worker,
b56c0d893   Tejun Heo   kthread: implemen...
752
753
754
755
756
757
  			struct kthread_work *work)
  {
  	bool ret = false;
  	unsigned long flags;
  
  	spin_lock_irqsave(&worker->lock, flags);
37be45d49   Petr Mladek   kthread: allow to...
758
  	if (!queuing_blocked(worker, work)) {
3989144f8   Petr Mladek   kthread: kthread ...
759
  		kthread_insert_work(worker, work, &worker->work_list);
b56c0d893   Tejun Heo   kthread: implemen...
760
761
762
763
764
  		ret = true;
  	}
  	spin_unlock_irqrestore(&worker->lock, flags);
  	return ret;
  }
3989144f8   Petr Mladek   kthread: kthread ...
765
  EXPORT_SYMBOL_GPL(kthread_queue_work);
b56c0d893   Tejun Heo   kthread: implemen...
766

22597dc3d   Petr Mladek   kthread: initial ...
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
  /**
   * kthread_delayed_work_timer_fn - callback that queues the associated kthread
   *	delayed work when the timer expires.
   * @__data: pointer to the data associated with the timer
   *
   * The format of the function is defined by struct timer_list.
   * It should have been called from irqsafe timer with irq already off.
   */
  void kthread_delayed_work_timer_fn(unsigned long __data)
  {
  	struct kthread_delayed_work *dwork =
  		(struct kthread_delayed_work *)__data;
  	struct kthread_work *work = &dwork->work;
  	struct kthread_worker *worker = work->worker;
  
  	/*
  	 * This might happen when a pending work is reinitialized.
  	 * It means that it is used a wrong way.
  	 */
  	if (WARN_ON_ONCE(!worker))
  		return;
  
  	spin_lock(&worker->lock);
  	/* Work must not be used with >1 worker, see kthread_queue_work(). */
  	WARN_ON_ONCE(work->worker != worker);
  
  	/* Move the work from worker->delayed_work_list. */
  	WARN_ON_ONCE(list_empty(&work->node));
  	list_del_init(&work->node);
  	kthread_insert_work(worker, work, &worker->work_list);
  
  	spin_unlock(&worker->lock);
  }
  EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
  
  void __kthread_queue_delayed_work(struct kthread_worker *worker,
  				  struct kthread_delayed_work *dwork,
  				  unsigned long delay)
  {
  	struct timer_list *timer = &dwork->timer;
  	struct kthread_work *work = &dwork->work;
  
  	WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn ||
  		     timer->data != (unsigned long)dwork);
  
  	/*
  	 * If @delay is 0, queue @dwork->work immediately.  This is for
  	 * both optimization and correctness.  The earliest @timer can
  	 * expire is on the closest next tick and delayed_work users depend
  	 * on that there's no such delay when @delay is 0.
  	 */
  	if (!delay) {
  		kthread_insert_work(worker, work, &worker->work_list);
  		return;
  	}
  
  	/* Be paranoid and try to detect possible races already now. */
  	kthread_insert_work_sanity_check(worker, work);
  
  	list_add(&work->node, &worker->delayed_work_list);
  	work->worker = worker;
  	timer_stats_timer_set_start_info(&dwork->timer);
  	timer->expires = jiffies + delay;
  	add_timer(timer);
  }
  
  /**
   * kthread_queue_delayed_work - queue the associated kthread work
   *	after a delay.
   * @worker: target kthread_worker
   * @dwork: kthread_delayed_work to queue
   * @delay: number of jiffies to wait before queuing
   *
   * If the work has not been pending it starts a timer that will queue
   * the work after the given @delay. If @delay is zero, it queues the
   * work immediately.
   *
   * Return: %false if the @work has already been pending. It means that
   * either the timer was running or the work was queued. It returns %true
   * otherwise.
   */
  bool kthread_queue_delayed_work(struct kthread_worker *worker,
  				struct kthread_delayed_work *dwork,
  				unsigned long delay)
  {
  	struct kthread_work *work = &dwork->work;
  	unsigned long flags;
  	bool ret = false;
  
  	spin_lock_irqsave(&worker->lock, flags);
37be45d49   Petr Mladek   kthread: allow to...
857
  	if (!queuing_blocked(worker, work)) {
22597dc3d   Petr Mladek   kthread: initial ...
858
859
860
861
862
863
864
865
  		__kthread_queue_delayed_work(worker, dwork, delay);
  		ret = true;
  	}
  
  	spin_unlock_irqrestore(&worker->lock, flags);
  	return ret;
  }
  EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
9a2e03d8e   Tejun Heo   kthread_worker: r...
866
867
868
869
870
871
872
873
874
875
876
  struct kthread_flush_work {
  	struct kthread_work	work;
  	struct completion	done;
  };
  
  static void kthread_flush_work_fn(struct kthread_work *work)
  {
  	struct kthread_flush_work *fwork =
  		container_of(work, struct kthread_flush_work, work);
  	complete(&fwork->done);
  }
b56c0d893   Tejun Heo   kthread: implemen...
877
  /**
3989144f8   Petr Mladek   kthread: kthread ...
878
   * kthread_flush_work - flush a kthread_work
b56c0d893   Tejun Heo   kthread: implemen...
879
880
881
882
   * @work: work to flush
   *
   * If @work is queued or executing, wait for it to finish execution.
   */
3989144f8   Petr Mladek   kthread: kthread ...
883
  void kthread_flush_work(struct kthread_work *work)
b56c0d893   Tejun Heo   kthread: implemen...
884
  {
46f3d9762   Tejun Heo   kthread_worker: r...
885
886
887
888
889
890
  	struct kthread_flush_work fwork = {
  		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
  		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
  	};
  	struct kthread_worker *worker;
  	bool noop = false;
46f3d9762   Tejun Heo   kthread_worker: r...
891
892
893
  	worker = work->worker;
  	if (!worker)
  		return;
b56c0d893   Tejun Heo   kthread: implemen...
894

46f3d9762   Tejun Heo   kthread_worker: r...
895
  	spin_lock_irq(&worker->lock);
8197b3d43   Petr Mladek   kthread: detect w...
896
897
  	/* Work must not be used with >1 worker, see kthread_queue_work(). */
  	WARN_ON_ONCE(work->worker != worker);
b56c0d893   Tejun Heo   kthread: implemen...
898

46f3d9762   Tejun Heo   kthread_worker: r...
899
  	if (!list_empty(&work->node))
3989144f8   Petr Mladek   kthread: kthread ...
900
  		kthread_insert_work(worker, &fwork.work, work->node.next);
46f3d9762   Tejun Heo   kthread_worker: r...
901
  	else if (worker->current_work == work)
3989144f8   Petr Mladek   kthread: kthread ...
902
903
  		kthread_insert_work(worker, &fwork.work,
  				    worker->work_list.next);
46f3d9762   Tejun Heo   kthread_worker: r...
904
905
  	else
  		noop = true;
b56c0d893   Tejun Heo   kthread: implemen...
906

46f3d9762   Tejun Heo   kthread_worker: r...
907
  	spin_unlock_irq(&worker->lock);
b56c0d893   Tejun Heo   kthread: implemen...
908

46f3d9762   Tejun Heo   kthread_worker: r...
909
910
  	if (!noop)
  		wait_for_completion(&fwork.done);
b56c0d893   Tejun Heo   kthread: implemen...
911
  }
3989144f8   Petr Mladek   kthread: kthread ...
912
  EXPORT_SYMBOL_GPL(kthread_flush_work);
b56c0d893   Tejun Heo   kthread: implemen...
913

37be45d49   Petr Mladek   kthread: allow to...
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
  /*
   * This function removes the work from the worker queue. Also it makes sure
   * that it won't get queued later via the delayed work's timer.
   *
   * The work might still be in use when this function finishes. See the
   * current_work proceed by the worker.
   *
   * Return: %true if @work was pending and successfully canceled,
   *	%false if @work was not pending
   */
  static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
  				  unsigned long *flags)
  {
  	/* Try to cancel the timer if exists. */
  	if (is_dwork) {
  		struct kthread_delayed_work *dwork =
  			container_of(work, struct kthread_delayed_work, work);
  		struct kthread_worker *worker = work->worker;
  
  		/*
  		 * del_timer_sync() must be called to make sure that the timer
  		 * callback is not running. The lock must be temporary released
  		 * to avoid a deadlock with the callback. In the meantime,
  		 * any queuing is blocked by setting the canceling counter.
  		 */
  		work->canceling++;
  		spin_unlock_irqrestore(&worker->lock, *flags);
  		del_timer_sync(&dwork->timer);
  		spin_lock_irqsave(&worker->lock, *flags);
  		work->canceling--;
  	}
  
  	/*
  	 * Try to remove the work from a worker list. It might either
  	 * be from worker->work_list or from worker->delayed_work_list.
  	 */
  	if (!list_empty(&work->node)) {
  		list_del_init(&work->node);
  		return true;
  	}
  
  	return false;
  }
9a6b06c8d   Petr Mladek   kthread: allow to...
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
  /**
   * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
   * @worker: kthread worker to use
   * @dwork: kthread delayed work to queue
   * @delay: number of jiffies to wait before queuing
   *
   * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
   * modify @dwork's timer so that it expires after @delay. If @delay is zero,
   * @work is guaranteed to be queued immediately.
   *
   * Return: %true if @dwork was pending and its timer was modified,
   * %false otherwise.
   *
   * A special case is when the work is being canceled in parallel.
   * It might be caused either by the real kthread_cancel_delayed_work_sync()
   * or yet another kthread_mod_delayed_work() call. We let the other command
   * win and return %false here. The caller is supposed to synchronize these
   * operations a reasonable way.
   *
   * This function is safe to call from any context including IRQ handler.
   * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
   * for details.
   */
  bool kthread_mod_delayed_work(struct kthread_worker *worker,
  			      struct kthread_delayed_work *dwork,
  			      unsigned long delay)
  {
  	struct kthread_work *work = &dwork->work;
  	unsigned long flags;
  	int ret = false;
  
  	spin_lock_irqsave(&worker->lock, flags);
  
  	/* Do not bother with canceling when never queued. */
  	if (!work->worker)
  		goto fast_queue;
  
  	/* Work must not be used with >1 worker, see kthread_queue_work() */
  	WARN_ON_ONCE(work->worker != worker);
  
  	/* Do not fight with another command that is canceling this work. */
  	if (work->canceling)
  		goto out;
  
  	ret = __kthread_cancel_work(work, true, &flags);
  fast_queue:
  	__kthread_queue_delayed_work(worker, dwork, delay);
  out:
  	spin_unlock_irqrestore(&worker->lock, flags);
  	return ret;
  }
  EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
37be45d49   Petr Mladek   kthread: allow to...
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
  static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
  {
  	struct kthread_worker *worker = work->worker;
  	unsigned long flags;
  	int ret = false;
  
  	if (!worker)
  		goto out;
  
  	spin_lock_irqsave(&worker->lock, flags);
  	/* Work must not be used with >1 worker, see kthread_queue_work(). */
  	WARN_ON_ONCE(work->worker != worker);
  
  	ret = __kthread_cancel_work(work, is_dwork, &flags);
  
  	if (worker->current_work != work)
  		goto out_fast;
  
  	/*
  	 * The work is in progress and we need to wait with the lock released.
  	 * In the meantime, block any queuing by setting the canceling counter.
  	 */
  	work->canceling++;
  	spin_unlock_irqrestore(&worker->lock, flags);
  	kthread_flush_work(work);
  	spin_lock_irqsave(&worker->lock, flags);
  	work->canceling--;
  
  out_fast:
  	spin_unlock_irqrestore(&worker->lock, flags);
  out:
  	return ret;
  }
  
  /**
   * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
   * @work: the kthread work to cancel
   *
   * Cancel @work and wait for its execution to finish.  This function
   * can be used even if the work re-queues itself. On return from this
   * function, @work is guaranteed to be not pending or executing on any CPU.
   *
   * kthread_cancel_work_sync(&delayed_work->work) must not be used for
   * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
   *
   * The caller must ensure that the worker on which @work was last
   * queued can't be destroyed before this function returns.
   *
   * Return: %true if @work was pending, %false otherwise.
   */
  bool kthread_cancel_work_sync(struct kthread_work *work)
  {
  	return __kthread_cancel_work_sync(work, false);
  }
  EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
  
  /**
   * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
   *	wait for it to finish.
   * @dwork: the kthread delayed work to cancel
   *
   * This is kthread_cancel_work_sync() for delayed works.
   *
   * Return: %true if @dwork was pending, %false otherwise.
   */
  bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
  {
  	return __kthread_cancel_work_sync(&dwork->work, true);
  }
  EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
b56c0d893   Tejun Heo   kthread: implemen...
1079
  /**
3989144f8   Petr Mladek   kthread: kthread ...
1080
   * kthread_flush_worker - flush all current works on a kthread_worker
b56c0d893   Tejun Heo   kthread: implemen...
1081
1082
1083
1084
1085
   * @worker: worker to flush
   *
   * Wait until all currently executing or pending works on @worker are
   * finished.
   */
3989144f8   Petr Mladek   kthread: kthread ...
1086
  void kthread_flush_worker(struct kthread_worker *worker)
b56c0d893   Tejun Heo   kthread: implemen...
1087
1088
1089
1090
1091
  {
  	struct kthread_flush_work fwork = {
  		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
  		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
  	};
3989144f8   Petr Mladek   kthread: kthread ...
1092
  	kthread_queue_work(worker, &fwork.work);
b56c0d893   Tejun Heo   kthread: implemen...
1093
1094
  	wait_for_completion(&fwork.done);
  }
3989144f8   Petr Mladek   kthread: kthread ...
1095
  EXPORT_SYMBOL_GPL(kthread_flush_worker);
35033fe9c   Petr Mladek   kthread: add kthr...
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
  
  /**
   * kthread_destroy_worker - destroy a kthread worker
   * @worker: worker to be destroyed
   *
   * Flush and destroy @worker.  The simple flush is enough because the kthread
   * worker API is used only in trivial scenarios.  There are no multi-step state
   * machines needed.
   */
  void kthread_destroy_worker(struct kthread_worker *worker)
  {
  	struct task_struct *task;
  
  	task = worker->task;
  	if (WARN_ON(!task))
  		return;
  
  	kthread_flush_worker(worker);
  	kthread_stop(task);
  	WARN_ON(!list_empty(&worker->work_list));
  	kfree(worker);
  }
  EXPORT_SYMBOL(kthread_destroy_worker);