Blame view

kernel/kthread.c 19.2 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
  /* Kernel thread helper functions.
   *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
   *
73c279927   Eric W. Biederman   kthread: don't de...
4
   * Creation is done via kthreadd, so that we get a clean environment
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
5
6
7
8
9
10
11
   * even if we're invoked from userspace (think modprobe, hotplug cpu,
   * etc.).
   */
  #include <linux/sched.h>
  #include <linux/kthread.h>
  #include <linux/completion.h>
  #include <linux/err.h>
58568d2a8   Miao Xie   cpuset,mm: update...
12
  #include <linux/cpuset.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
13
14
  #include <linux/unistd.h>
  #include <linux/file.h>
9984de1a5   Paul Gortmaker   kernel: Map most ...
15
  #include <linux/export.h>
97d1f15b7   Arjan van de Ven   [PATCH] sem2mutex...
16
  #include <linux/mutex.h>
b56c0d893   Tejun Heo   kthread: implemen...
17
18
  #include <linux/slab.h>
  #include <linux/freezer.h>
a74fb73c1   Al Viro   infrastructure fo...
19
  #include <linux/ptrace.h>
cd42d559e   Tejun Heo   kthread: implemen...
20
  #include <linux/uaccess.h>
ad8d75fff   Steven Rostedt   tracing/events: m...
21
  #include <trace/events/sched.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
22

73c279927   Eric W. Biederman   kthread: don't de...
23
24
25
  static DEFINE_SPINLOCK(kthread_create_lock);
  static LIST_HEAD(kthread_create_list);
  struct task_struct *kthreadd_task;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
26
27
28
  
  struct kthread_create_info
  {
73c279927   Eric W. Biederman   kthread: don't de...
29
  	/* Information passed to kthread() from kthreadd. */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
30
31
  	int (*threadfn)(void *data);
  	void *data;
207205a2b   Eric Dumazet   kthread: NUMA awa...
32
  	int node;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
33

73c279927   Eric W. Biederman   kthread: don't de...
34
  	/* Result passed back to kthread_create() from kthreadd. */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
35
  	struct task_struct *result;
786235eeb   Tetsuo Handa   kthread: make kth...
36
  	struct completion *done;
65f27f384   David Howells   WorkStruct: Pass ...
37

73c279927   Eric W. Biederman   kthread: don't de...
38
  	struct list_head list;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
39
  };
63706172f   Oleg Nesterov   kthreads: rework ...
40
  struct kthread {
2a1d44601   Thomas Gleixner   kthread: Implemen...
41
42
  	unsigned long flags;
  	unsigned int cpu;
82805ab77   Tejun Heo   kthread: implemen...
43
  	void *data;
2a1d44601   Thomas Gleixner   kthread: Implemen...
44
  	struct completion parked;
63706172f   Oleg Nesterov   kthreads: rework ...
45
  	struct completion exited;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
46
  };
2a1d44601   Thomas Gleixner   kthread: Implemen...
47
48
49
50
51
52
  enum KTHREAD_BITS {
  	KTHREAD_IS_PER_CPU = 0,
  	KTHREAD_SHOULD_STOP,
  	KTHREAD_SHOULD_PARK,
  	KTHREAD_IS_PARKED,
  };
4ecdafc80   Oleg Nesterov   kthread: introduc...
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
  #define __to_kthread(vfork)	\
  	container_of(vfork, struct kthread, exited)
  
  static inline struct kthread *to_kthread(struct task_struct *k)
  {
  	return __to_kthread(k->vfork_done);
  }
  
  static struct kthread *to_live_kthread(struct task_struct *k)
  {
  	struct completion *vfork = ACCESS_ONCE(k->vfork_done);
  	if (likely(vfork))
  		return __to_kthread(vfork);
  	return NULL;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
68

9e37bd301   Randy Dunlap   [PATCH] kthread: ...
69
70
71
  /**
   * kthread_should_stop - should this kthread return now?
   *
72fd4a35a   Robert P. J. Day   [PATCH] Numerous ...
72
   * When someone calls kthread_stop() on your kthread, it will be woken
9e37bd301   Randy Dunlap   [PATCH] kthread: ...
73
74
75
   * and this will return true.  You should then return, and your return
   * value will be passed through to kthread_stop().
   */
2a1d44601   Thomas Gleixner   kthread: Implemen...
76
  bool kthread_should_stop(void)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
77
  {
2a1d44601   Thomas Gleixner   kthread: Implemen...
78
  	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
79
80
  }
  EXPORT_SYMBOL(kthread_should_stop);
82805ab77   Tejun Heo   kthread: implemen...
81
  /**
2a1d44601   Thomas Gleixner   kthread: Implemen...
82
83
84
85
86
87
88
89
90
91
92
93
94
95
   * kthread_should_park - should this kthread park now?
   *
   * When someone calls kthread_park() on your kthread, it will be woken
   * and this will return true.  You should then do the necessary
   * cleanup and call kthread_parkme()
   *
   * Similar to kthread_should_stop(), but this keeps the thread alive
   * and in a park position. kthread_unpark() "restarts" the thread and
   * calls the thread function again.
   */
  bool kthread_should_park(void)
  {
  	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
  }
18896451e   David Kershner   kthread: export k...
96
  EXPORT_SYMBOL_GPL(kthread_should_park);
2a1d44601   Thomas Gleixner   kthread: Implemen...
97
98
  
  /**
8a32c441c   Tejun Heo   freezer: implemen...
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
   * kthread_freezable_should_stop - should this freezable kthread return now?
   * @was_frozen: optional out parameter, indicates whether %current was frozen
   *
   * kthread_should_stop() for freezable kthreads, which will enter
   * refrigerator if necessary.  This function is safe from kthread_stop() /
   * freezer deadlock and freezable kthreads should use this function instead
   * of calling try_to_freeze() directly.
   */
  bool kthread_freezable_should_stop(bool *was_frozen)
  {
  	bool frozen = false;
  
  	might_sleep();
  
  	if (unlikely(freezing(current)))
  		frozen = __refrigerator(true);
  
  	if (was_frozen)
  		*was_frozen = frozen;
  
  	return kthread_should_stop();
  }
  EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
  
  /**
82805ab77   Tejun Heo   kthread: implemen...
124
125
126
127
128
129
130
131
132
133
134
   * kthread_data - return data value specified on kthread creation
   * @task: kthread task in question
   *
   * Return the data value specified when kthread @task was created.
   * The caller is responsible for ensuring the validity of @task when
   * calling this function.
   */
  void *kthread_data(struct task_struct *task)
  {
  	return to_kthread(task)->data;
  }
cd42d559e   Tejun Heo   kthread: implemen...
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
  /**
   * probe_kthread_data - speculative version of kthread_data()
   * @task: possible kthread task in question
   *
   * @task could be a kthread task.  Return the data value specified when it
   * was created if accessible.  If @task isn't a kthread task or its data is
   * inaccessible for any reason, %NULL is returned.  This function requires
   * that @task itself is safe to dereference.
   */
  void *probe_kthread_data(struct task_struct *task)
  {
  	struct kthread *kthread = to_kthread(task);
  	void *data = NULL;
  
  	probe_kernel_read(&data, &kthread->data, sizeof(data));
  	return data;
  }
2a1d44601   Thomas Gleixner   kthread: Implemen...
152
153
  static void __kthread_parkme(struct kthread *self)
  {
f2530dc71   Thomas Gleixner   kthread: Prevent ...
154
  	__set_current_state(TASK_PARKED);
2a1d44601   Thomas Gleixner   kthread: Implemen...
155
156
157
158
  	while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
  		if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
  			complete(&self->parked);
  		schedule();
f2530dc71   Thomas Gleixner   kthread: Prevent ...
159
  		__set_current_state(TASK_PARKED);
2a1d44601   Thomas Gleixner   kthread: Implemen...
160
161
162
163
164
165
166
167
168
  	}
  	clear_bit(KTHREAD_IS_PARKED, &self->flags);
  	__set_current_state(TASK_RUNNING);
  }
  
  void kthread_parkme(void)
  {
  	__kthread_parkme(to_kthread(current));
  }
18896451e   David Kershner   kthread: export k...
169
  EXPORT_SYMBOL_GPL(kthread_parkme);
2a1d44601   Thomas Gleixner   kthread: Implemen...
170

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
171
172
  static int kthread(void *_create)
  {
63706172f   Oleg Nesterov   kthreads: rework ...
173
  	/* Copy data: it's on kthread's stack */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
174
  	struct kthread_create_info *create = _create;
63706172f   Oleg Nesterov   kthreads: rework ...
175
176
  	int (*threadfn)(void *data) = create->threadfn;
  	void *data = create->data;
786235eeb   Tetsuo Handa   kthread: make kth...
177
  	struct completion *done;
63706172f   Oleg Nesterov   kthreads: rework ...
178
179
  	struct kthread self;
  	int ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
180

2a1d44601   Thomas Gleixner   kthread: Implemen...
181
  	self.flags = 0;
82805ab77   Tejun Heo   kthread: implemen...
182
  	self.data = data;
63706172f   Oleg Nesterov   kthreads: rework ...
183
  	init_completion(&self.exited);
2a1d44601   Thomas Gleixner   kthread: Implemen...
184
  	init_completion(&self.parked);
63706172f   Oleg Nesterov   kthreads: rework ...
185
  	current->vfork_done = &self.exited;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
186

786235eeb   Tetsuo Handa   kthread: make kth...
187
188
189
190
191
192
  	/* If user was SIGKILLed, I release the structure. */
  	done = xchg(&create->done, NULL);
  	if (!done) {
  		kfree(create);
  		do_exit(-EINTR);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
193
  	/* OK, tell user we're spawned, wait for stop or wakeup */
a076e4bca   Oleg Nesterov   freezer: fix kthr...
194
  	__set_current_state(TASK_UNINTERRUPTIBLE);
3217ab97f   Vitaliy Gusev   kthread: Don't lo...
195
  	create->result = current;
786235eeb   Tetsuo Handa   kthread: make kth...
196
  	complete(done);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
197
  	schedule();
63706172f   Oleg Nesterov   kthreads: rework ...
198
  	ret = -EINTR;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
199

2a1d44601   Thomas Gleixner   kthread: Implemen...
200
201
202
203
  	if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
  		__kthread_parkme(&self);
  		ret = threadfn(data);
  	}
63706172f   Oleg Nesterov   kthreads: rework ...
204
205
  	/* we can't just return, we must preserve "self" on stack */
  	do_exit(ret);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
206
  }
207205a2b   Eric Dumazet   kthread: NUMA awa...
207
208
209
210
211
212
213
  /* called from do_fork() to get node information for about to be created task */
  int tsk_fork_get_node(struct task_struct *tsk)
  {
  #ifdef CONFIG_NUMA
  	if (tsk == kthreadd_task)
  		return tsk->pref_node_fork;
  #endif
81c98869f   Nishanth Aravamudan   kthread: ensure l...
214
  	return NUMA_NO_NODE;
207205a2b   Eric Dumazet   kthread: NUMA awa...
215
  }
73c279927   Eric W. Biederman   kthread: don't de...
216
  static void create_kthread(struct kthread_create_info *create)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
217
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
218
  	int pid;
207205a2b   Eric Dumazet   kthread: NUMA awa...
219
220
221
  #ifdef CONFIG_NUMA
  	current->pref_node_fork = create->node;
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
222
223
  	/* We want our own signal handler (we take no signals by default). */
  	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
cdd140bdd   Oleg Nesterov   kthreads: simplif...
224
  	if (pid < 0) {
786235eeb   Tetsuo Handa   kthread: make kth...
225
226
227
228
229
230
231
  		/* If user was SIGKILLed, I release the structure. */
  		struct completion *done = xchg(&create->done, NULL);
  
  		if (!done) {
  			kfree(create);
  			return;
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
232
  		create->result = ERR_PTR(pid);
786235eeb   Tetsuo Handa   kthread: make kth...
233
  		complete(done);
cdd140bdd   Oleg Nesterov   kthreads: simplif...
234
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
235
  }
9e37bd301   Randy Dunlap   [PATCH] kthread: ...
236
  /**
207205a2b   Eric Dumazet   kthread: NUMA awa...
237
   * kthread_create_on_node - create a kthread.
9e37bd301   Randy Dunlap   [PATCH] kthread: ...
238
239
   * @threadfn: the function to run until signal_pending(current).
   * @data: data ptr for @threadfn.
e9f069868   Andrew Morton   kernel/kthread.c:...
240
   * @node: task and thread structures for the thread are allocated on this node
9e37bd301   Randy Dunlap   [PATCH] kthread: ...
241
242
243
244
   * @namefmt: printf-style name for the thread.
   *
   * Description: This helper function creates and names a kernel
   * thread.  The thread will be stopped: use wake_up_process() to start
e9f069868   Andrew Morton   kernel/kthread.c:...
245
246
   * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
   * is affine to all CPUs.
9e37bd301   Randy Dunlap   [PATCH] kthread: ...
247
   *
207205a2b   Eric Dumazet   kthread: NUMA awa...
248
   * If thread is going to be bound on a particular cpu, give its node
e9f069868   Andrew Morton   kernel/kthread.c:...
249
   * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
9e37bd301   Randy Dunlap   [PATCH] kthread: ...
250
   * When woken, the thread will run @threadfn() with @data as its
72fd4a35a   Robert P. J. Day   [PATCH] Numerous ...
251
   * argument. @threadfn() can either call do_exit() directly if it is a
25985edce   Lucas De Marchi   Fix common misspe...
252
   * standalone thread for which no one will call kthread_stop(), or
9e37bd301   Randy Dunlap   [PATCH] kthread: ...
253
254
255
256
   * return when 'kthread_should_stop()' is true (which means
   * kthread_stop() has been called).  The return value should be zero
   * or a negative error number; it will be passed to kthread_stop().
   *
8fe6929cf   Tetsuo Handa   kthread: fix retu...
257
   * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
9e37bd301   Randy Dunlap   [PATCH] kthread: ...
258
   */
207205a2b   Eric Dumazet   kthread: NUMA awa...
259
  struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
2a1d44601   Thomas Gleixner   kthread: Implemen...
260
  					   void *data, int node,
207205a2b   Eric Dumazet   kthread: NUMA awa...
261
262
  					   const char namefmt[],
  					   ...)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
263
  {
786235eeb   Tetsuo Handa   kthread: make kth...
264
265
266
267
268
269
270
271
272
273
274
  	DECLARE_COMPLETION_ONSTACK(done);
  	struct task_struct *task;
  	struct kthread_create_info *create = kmalloc(sizeof(*create),
  						     GFP_KERNEL);
  
  	if (!create)
  		return ERR_PTR(-ENOMEM);
  	create->threadfn = threadfn;
  	create->data = data;
  	create->node = node;
  	create->done = &done;
73c279927   Eric W. Biederman   kthread: don't de...
275
276
  
  	spin_lock(&kthread_create_lock);
786235eeb   Tetsuo Handa   kthread: make kth...
277
  	list_add_tail(&create->list, &kthread_create_list);
73c279927   Eric W. Biederman   kthread: don't de...
278
  	spin_unlock(&kthread_create_lock);
cbd9b67bd   Dmitry Adamushko   kthread: call wak...
279
  	wake_up_process(kthreadd_task);
786235eeb   Tetsuo Handa   kthread: make kth...
280
281
282
283
284
285
286
287
288
289
290
291
  	/*
  	 * Wait for completion in killable state, for I might be chosen by
  	 * the OOM killer while kthreadd is trying to allocate memory for
  	 * new kernel thread.
  	 */
  	if (unlikely(wait_for_completion_killable(&done))) {
  		/*
  		 * If I was SIGKILLed before kthreadd (or new kernel thread)
  		 * calls complete(), leave the cleanup of this structure to
  		 * that thread.
  		 */
  		if (xchg(&create->done, NULL))
8fe6929cf   Tetsuo Handa   kthread: fix retu...
292
  			return ERR_PTR(-EINTR);
786235eeb   Tetsuo Handa   kthread: make kth...
293
294
295
296
297
298
299
300
  		/*
  		 * kthreadd (or new kernel thread) will call complete()
  		 * shortly.
  		 */
  		wait_for_completion(&done);
  	}
  	task = create->result;
  	if (!IS_ERR(task)) {
c9b5f501e   Peter Zijlstra   sched: Constify f...
301
  		static const struct sched_param param = { .sched_priority = 0 };
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
302
  		va_list args;
1c99315bb   Oleg Nesterov   kthread: move sch...
303

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
304
  		va_start(args, namefmt);
786235eeb   Tetsuo Handa   kthread: make kth...
305
  		vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
306
  		va_end(args);
1c99315bb   Oleg Nesterov   kthread: move sch...
307
308
309
310
  		/*
  		 * root may have changed our (kthreadd's) priority or CPU mask.
  		 * The kernel thread should not inherit these properties.
  		 */
786235eeb   Tetsuo Handa   kthread: make kth...
311
312
  		sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
  		set_cpus_allowed_ptr(task, cpu_all_mask);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
313
  	}
786235eeb   Tetsuo Handa   kthread: make kth...
314
315
  	kfree(create);
  	return task;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
316
  }
207205a2b   Eric Dumazet   kthread: NUMA awa...
317
  EXPORT_SYMBOL(kthread_create_on_node);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
318

25834c73f   Peter Zijlstra   sched: Fix a race...
319
  static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
2a1d44601   Thomas Gleixner   kthread: Implemen...
320
  {
25834c73f   Peter Zijlstra   sched: Fix a race...
321
  	unsigned long flags;
f2530dc71   Thomas Gleixner   kthread: Prevent ...
322
323
324
325
  	if (!wait_task_inactive(p, state)) {
  		WARN_ON(1);
  		return;
  	}
25834c73f   Peter Zijlstra   sched: Fix a race...
326

2a1d44601   Thomas Gleixner   kthread: Implemen...
327
  	/* It's safe because the task is inactive. */
25834c73f   Peter Zijlstra   sched: Fix a race...
328
329
  	raw_spin_lock_irqsave(&p->pi_lock, flags);
  	do_set_cpus_allowed(p, mask);
14a40ffcc   Tejun Heo   sched: replace PF...
330
  	p->flags |= PF_NO_SETAFFINITY;
25834c73f   Peter Zijlstra   sched: Fix a race...
331
332
333
334
335
336
337
338
339
340
341
  	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  }
  
  static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
  {
  	__kthread_bind_mask(p, cpumask_of(cpu), state);
  }
  
  void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
  {
  	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
2a1d44601   Thomas Gleixner   kthread: Implemen...
342
  }
9e37bd301   Randy Dunlap   [PATCH] kthread: ...
343
  /**
881232b70   Peter Zijlstra   sched: Move kthre...
344
345
346
347
348
349
350
351
352
353
   * kthread_bind - bind a just-created kthread to a cpu.
   * @p: thread created by kthread_create().
   * @cpu: cpu (might not be online, must be possible) for @k to run on.
   *
   * Description: This function is equivalent to set_cpus_allowed(),
   * except that @cpu doesn't need to be online, and the thread must be
   * stopped (i.e., just returned from kthread_create()).
   */
  void kthread_bind(struct task_struct *p, unsigned int cpu)
  {
f2530dc71   Thomas Gleixner   kthread: Prevent ...
354
  	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
881232b70   Peter Zijlstra   sched: Move kthre...
355
356
357
358
  }
  EXPORT_SYMBOL(kthread_bind);
  
  /**
2a1d44601   Thomas Gleixner   kthread: Implemen...
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
   * kthread_create_on_cpu - Create a cpu bound kthread
   * @threadfn: the function to run until signal_pending(current).
   * @data: data ptr for @threadfn.
   * @cpu: The cpu on which the thread should be bound,
   * @namefmt: printf-style name for the thread. Format is restricted
   *	     to "name.*%u". Code fills in cpu number.
   *
   * Description: This helper function creates and names a kernel thread
   * The thread will be woken and put into park mode.
   */
  struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
  					  void *data, unsigned int cpu,
  					  const char *namefmt)
  {
  	struct task_struct *p;
109228389   Nishanth Aravamudan   kernel/kthread.c:...
374
  	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
2a1d44601   Thomas Gleixner   kthread: Implemen...
375
376
377
378
379
380
381
382
383
  				   cpu);
  	if (IS_ERR(p))
  		return p;
  	set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
  	to_kthread(p)->cpu = cpu;
  	/* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */
  	kthread_park(p);
  	return p;
  }
f2530dc71   Thomas Gleixner   kthread: Prevent ...
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
  static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
  {
  	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
  	/*
  	 * We clear the IS_PARKED bit here as we don't wait
  	 * until the task has left the park code. So if we'd
  	 * park before that happens we'd see the IS_PARKED bit
  	 * which might be about to be cleared.
  	 */
  	if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
  		if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
  			__kthread_bind(k, kthread->cpu, TASK_PARKED);
  		wake_up_state(k, TASK_PARKED);
  	}
  }
2a1d44601   Thomas Gleixner   kthread: Implemen...
399
400
401
402
403
404
405
406
407
408
  /**
   * kthread_unpark - unpark a thread created by kthread_create().
   * @k:		thread created by kthread_create().
   *
   * Sets kthread_should_park() for @k to return false, wakes it, and
   * waits for it to return. If the thread is marked percpu then its
   * bound to the cpu again.
   */
  void kthread_unpark(struct task_struct *k)
  {
b5c5442bb   Oleg Nesterov   kthread: kill tas...
409
  	struct kthread *kthread = to_live_kthread(k);
2a1d44601   Thomas Gleixner   kthread: Implemen...
410

f2530dc71   Thomas Gleixner   kthread: Prevent ...
411
412
  	if (kthread)
  		__kthread_unpark(k, kthread);
2a1d44601   Thomas Gleixner   kthread: Implemen...
413
  }
18896451e   David Kershner   kthread: export k...
414
  EXPORT_SYMBOL_GPL(kthread_unpark);
2a1d44601   Thomas Gleixner   kthread: Implemen...
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
  
  /**
   * kthread_park - park a thread created by kthread_create().
   * @k: thread created by kthread_create().
   *
   * Sets kthread_should_park() for @k to return true, wakes it, and
   * waits for it to return. This can also be called after kthread_create()
   * instead of calling wake_up_process(): the thread will park without
   * calling threadfn().
   *
   * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
   * If called by the kthread itself just the park bit is set.
   */
  int kthread_park(struct task_struct *k)
  {
b5c5442bb   Oleg Nesterov   kthread: kill tas...
430
  	struct kthread *kthread = to_live_kthread(k);
2a1d44601   Thomas Gleixner   kthread: Implemen...
431
432
433
434
435
436
437
438
439
440
441
442
  	int ret = -ENOSYS;
  
  	if (kthread) {
  		if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
  			set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
  			if (k != current) {
  				wake_up_process(k);
  				wait_for_completion(&kthread->parked);
  			}
  		}
  		ret = 0;
  	}
2a1d44601   Thomas Gleixner   kthread: Implemen...
443
444
  	return ret;
  }
18896451e   David Kershner   kthread: export k...
445
  EXPORT_SYMBOL_GPL(kthread_park);
2a1d44601   Thomas Gleixner   kthread: Implemen...
446
447
  
  /**
9e37bd301   Randy Dunlap   [PATCH] kthread: ...
448
449
450
451
   * kthread_stop - stop a thread created by kthread_create().
   * @k: thread created by kthread_create().
   *
   * Sets kthread_should_stop() for @k to return true, wakes it, and
9ae260270   Oleg Nesterov   update the commen...
452
453
454
455
456
457
   * waits for it to exit. This can also be called after kthread_create()
   * instead of calling wake_up_process(): the thread will exit without
   * calling threadfn().
   *
   * If threadfn() may call do_exit() itself, the caller must ensure
   * task_struct can't go away.
9e37bd301   Randy Dunlap   [PATCH] kthread: ...
458
459
460
461
   *
   * Returns the result of threadfn(), or %-EINTR if wake_up_process()
   * was never called.
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
462
463
  int kthread_stop(struct task_struct *k)
  {
b5c5442bb   Oleg Nesterov   kthread: kill tas...
464
  	struct kthread *kthread;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
465
  	int ret;
0a16b6075   Mathieu Desnoyers   tracing, sched: L...
466
  	trace_sched_kthread_stop(k);
b5c5442bb   Oleg Nesterov   kthread: kill tas...
467
468
469
  
  	get_task_struct(k);
  	kthread = to_live_kthread(k);
2a1d44601   Thomas Gleixner   kthread: Implemen...
470
471
  	if (kthread) {
  		set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
f2530dc71   Thomas Gleixner   kthread: Prevent ...
472
  		__kthread_unpark(k, kthread);
63706172f   Oleg Nesterov   kthreads: rework ...
473
474
475
476
  		wake_up_process(k);
  		wait_for_completion(&kthread->exited);
  	}
  	ret = k->exit_code;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
477
  	put_task_struct(k);
0a16b6075   Mathieu Desnoyers   tracing, sched: L...
478

b5c5442bb   Oleg Nesterov   kthread: kill tas...
479
  	trace_sched_kthread_stop_ret(ret);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
480
481
  	return ret;
  }
52e92e578   Adrian Bunk   [PATCH] remove ke...
482
  EXPORT_SYMBOL(kthread_stop);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
483

e804a4a4d   Satyam Sharma   kthread: silence ...
484
  int kthreadd(void *unused)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
485
  {
73c279927   Eric W. Biederman   kthread: don't de...
486
  	struct task_struct *tsk = current;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
487

e804a4a4d   Satyam Sharma   kthread: silence ...
488
  	/* Setup a clean context for our children to inherit. */
73c279927   Eric W. Biederman   kthread: don't de...
489
  	set_task_comm(tsk, "kthreadd");
10ab825bd   Oleg Nesterov   change kernel thr...
490
  	ignore_signals(tsk);
1a2142afa   Rusty Russell   cpumask: remove d...
491
  	set_cpus_allowed_ptr(tsk, cpu_all_mask);
aee4faa49   Lai Jiangshan   kthread: use N_ME...
492
  	set_mems_allowed(node_states[N_MEMORY]);
73c279927   Eric W. Biederman   kthread: don't de...
493

34b087e48   Tejun Heo   freezer: kill unu...
494
  	current->flags |= PF_NOFREEZE;
73c279927   Eric W. Biederman   kthread: don't de...
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
  
  	for (;;) {
  		set_current_state(TASK_INTERRUPTIBLE);
  		if (list_empty(&kthread_create_list))
  			schedule();
  		__set_current_state(TASK_RUNNING);
  
  		spin_lock(&kthread_create_lock);
  		while (!list_empty(&kthread_create_list)) {
  			struct kthread_create_info *create;
  
  			create = list_entry(kthread_create_list.next,
  					    struct kthread_create_info, list);
  			list_del_init(&create->list);
  			spin_unlock(&kthread_create_lock);
  
  			create_kthread(create);
  
  			spin_lock(&kthread_create_lock);
  		}
  		spin_unlock(&kthread_create_lock);
  	}
  
  	return 0;
  }
b56c0d893   Tejun Heo   kthread: implemen...
520

4f32e9b1f   Yong Zhang   kthread_work: mak...
521
522
523
524
525
526
527
528
529
530
  void __init_kthread_worker(struct kthread_worker *worker,
  				const char *name,
  				struct lock_class_key *key)
  {
  	spin_lock_init(&worker->lock);
  	lockdep_set_class_and_name(&worker->lock, key, name);
  	INIT_LIST_HEAD(&worker->work_list);
  	worker->task = NULL;
  }
  EXPORT_SYMBOL_GPL(__init_kthread_worker);
b56c0d893   Tejun Heo   kthread: implemen...
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
  /**
   * kthread_worker_fn - kthread function to process kthread_worker
   * @worker_ptr: pointer to initialized kthread_worker
   *
   * This function can be used as @threadfn to kthread_create() or
   * kthread_run() with @worker_ptr argument pointing to an initialized
   * kthread_worker.  The started kthread will process work_list until
   * the it is stopped with kthread_stop().  A kthread can also call
   * this function directly after extra initialization.
   *
   * Different kthreads can be used for the same kthread_worker as long
   * as there's only one kthread attached to it at any given time.  A
   * kthread_worker without an attached kthread simply collects queued
   * kthread_works.
   */
  int kthread_worker_fn(void *worker_ptr)
  {
  	struct kthread_worker *worker = worker_ptr;
  	struct kthread_work *work;
  
  	WARN_ON(worker->task);
  	worker->task = current;
  repeat:
  	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
  
  	if (kthread_should_stop()) {
  		__set_current_state(TASK_RUNNING);
  		spin_lock_irq(&worker->lock);
  		worker->task = NULL;
  		spin_unlock_irq(&worker->lock);
  		return 0;
  	}
  
  	work = NULL;
  	spin_lock_irq(&worker->lock);
  	if (!list_empty(&worker->work_list)) {
  		work = list_first_entry(&worker->work_list,
  					struct kthread_work, node);
  		list_del_init(&work->node);
  	}
46f3d9762   Tejun Heo   kthread_worker: r...
571
  	worker->current_work = work;
b56c0d893   Tejun Heo   kthread: implemen...
572
573
574
575
576
  	spin_unlock_irq(&worker->lock);
  
  	if (work) {
  		__set_current_state(TASK_RUNNING);
  		work->func(work);
b56c0d893   Tejun Heo   kthread: implemen...
577
578
579
580
581
582
583
  	} else if (!freezing(current))
  		schedule();
  
  	try_to_freeze();
  	goto repeat;
  }
  EXPORT_SYMBOL_GPL(kthread_worker_fn);
9a2e03d8e   Tejun Heo   kthread_worker: r...
584
585
586
587
588
589
590
591
  /* insert @work before @pos in @worker */
  static void insert_kthread_work(struct kthread_worker *worker,
  			       struct kthread_work *work,
  			       struct list_head *pos)
  {
  	lockdep_assert_held(&worker->lock);
  
  	list_add_tail(&work->node, pos);
46f3d9762   Tejun Heo   kthread_worker: r...
592
  	work->worker = worker;
ed1403ec2   Lai Jiangshan   kthread_work: wak...
593
  	if (!worker->current_work && likely(worker->task))
9a2e03d8e   Tejun Heo   kthread_worker: r...
594
595
  		wake_up_process(worker->task);
  }
b56c0d893   Tejun Heo   kthread: implemen...
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
  /**
   * queue_kthread_work - queue a kthread_work
   * @worker: target kthread_worker
   * @work: kthread_work to queue
   *
   * Queue @work to work processor @task for async execution.  @task
   * must have been created with kthread_worker_create().  Returns %true
   * if @work was successfully queued, %false if it was already pending.
   */
  bool queue_kthread_work(struct kthread_worker *worker,
  			struct kthread_work *work)
  {
  	bool ret = false;
  	unsigned long flags;
  
  	spin_lock_irqsave(&worker->lock, flags);
  	if (list_empty(&work->node)) {
9a2e03d8e   Tejun Heo   kthread_worker: r...
613
  		insert_kthread_work(worker, work, &worker->work_list);
b56c0d893   Tejun Heo   kthread: implemen...
614
615
616
617
618
619
  		ret = true;
  	}
  	spin_unlock_irqrestore(&worker->lock, flags);
  	return ret;
  }
  EXPORT_SYMBOL_GPL(queue_kthread_work);
9a2e03d8e   Tejun Heo   kthread_worker: r...
620
621
622
623
624
625
626
627
628
629
630
  struct kthread_flush_work {
  	struct kthread_work	work;
  	struct completion	done;
  };
  
  static void kthread_flush_work_fn(struct kthread_work *work)
  {
  	struct kthread_flush_work *fwork =
  		container_of(work, struct kthread_flush_work, work);
  	complete(&fwork->done);
  }
b56c0d893   Tejun Heo   kthread: implemen...
631
632
633
634
635
636
637
638
  /**
   * flush_kthread_work - flush a kthread_work
   * @work: work to flush
   *
   * If @work is queued or executing, wait for it to finish execution.
   */
  void flush_kthread_work(struct kthread_work *work)
  {
46f3d9762   Tejun Heo   kthread_worker: r...
639
640
641
642
643
644
645
646
647
648
649
  	struct kthread_flush_work fwork = {
  		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
  		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
  	};
  	struct kthread_worker *worker;
  	bool noop = false;
  
  retry:
  	worker = work->worker;
  	if (!worker)
  		return;
b56c0d893   Tejun Heo   kthread: implemen...
650

46f3d9762   Tejun Heo   kthread_worker: r...
651
652
653
654
655
  	spin_lock_irq(&worker->lock);
  	if (work->worker != worker) {
  		spin_unlock_irq(&worker->lock);
  		goto retry;
  	}
b56c0d893   Tejun Heo   kthread: implemen...
656

46f3d9762   Tejun Heo   kthread_worker: r...
657
658
659
660
661
662
  	if (!list_empty(&work->node))
  		insert_kthread_work(worker, &fwork.work, work->node.next);
  	else if (worker->current_work == work)
  		insert_kthread_work(worker, &fwork.work, worker->work_list.next);
  	else
  		noop = true;
b56c0d893   Tejun Heo   kthread: implemen...
663

46f3d9762   Tejun Heo   kthread_worker: r...
664
  	spin_unlock_irq(&worker->lock);
b56c0d893   Tejun Heo   kthread: implemen...
665

46f3d9762   Tejun Heo   kthread_worker: r...
666
667
  	if (!noop)
  		wait_for_completion(&fwork.done);
b56c0d893   Tejun Heo   kthread: implemen...
668
669
  }
  EXPORT_SYMBOL_GPL(flush_kthread_work);
b56c0d893   Tejun Heo   kthread: implemen...
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
  /**
   * flush_kthread_worker - flush all current works on a kthread_worker
   * @worker: worker to flush
   *
   * Wait until all currently executing or pending works on @worker are
   * finished.
   */
  void flush_kthread_worker(struct kthread_worker *worker)
  {
  	struct kthread_flush_work fwork = {
  		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
  		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
  	};
  
  	queue_kthread_work(worker, &fwork.work);
  	wait_for_completion(&fwork.done);
  }
  EXPORT_SYMBOL_GPL(flush_kthread_worker);