Blame view

kernel/kthread.c 36.7 KB
457c89965   Thomas Gleixner   treewide: Add SPD...
1
  // SPDX-License-Identifier: GPL-2.0-only
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
2
3
  /* Kernel thread helper functions.
   *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
9bf5b9eb2   Christoph Hellwig   kernel: move use_...
4
   *   Copyright (C) 2009 Red Hat, Inc.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
5
   *
73c279927   Eric W. Biederman   kthread: don't de...
6
   * Creation is done via kthreadd, so that we get a clean environment
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
7
8
9
   * even if we're invoked from userspace (think modprobe, hotplug cpu,
   * etc.).
   */
ae7e81c07   Ingo Molnar   sched/headers: Pr...
10
  #include <uapi/linux/sched/types.h>
9bf5b9eb2   Christoph Hellwig   kernel: move use_...
11
12
  #include <linux/mm.h>
  #include <linux/mmu_context.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
13
  #include <linux/sched.h>
9bf5b9eb2   Christoph Hellwig   kernel: move use_...
14
  #include <linux/sched/mm.h>
299300258   Ingo Molnar   sched/headers: Pr...
15
  #include <linux/sched/task.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
16
17
18
  #include <linux/kthread.h>
  #include <linux/completion.h>
  #include <linux/err.h>
8af0c18af   Suren Baghdasaryan   include/: refacto...
19
  #include <linux/cgroup.h>
58568d2a8   Miao Xie   cpuset,mm: update...
20
  #include <linux/cpuset.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
21
22
  #include <linux/unistd.h>
  #include <linux/file.h>
9984de1a5   Paul Gortmaker   kernel: Map most ...
23
  #include <linux/export.h>
97d1f15b7   Arjan van de Ven   [PATCH] sem2mutex...
24
  #include <linux/mutex.h>
b56c0d893   Tejun Heo   kthread: implemen...
25
26
  #include <linux/slab.h>
  #include <linux/freezer.h>
a74fb73c1   Al Viro   infrastructure fo...
27
  #include <linux/ptrace.h>
cd42d559e   Tejun Heo   kthread: implemen...
28
  #include <linux/uaccess.h>
98fa15f34   Anshuman Khandual   mm: replace all o...
29
  #include <linux/numa.h>
9cc5b8656   Marcelo Tosatti   isolcpus: Affine ...
30
  #include <linux/sched/isolation.h>
ad8d75fff   Steven Rostedt   tracing/events: m...
31
  #include <trace/events/sched.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
32

9bf5b9eb2   Christoph Hellwig   kernel: move use_...
33

73c279927   Eric W. Biederman   kthread: don't de...
34
35
36
  static DEFINE_SPINLOCK(kthread_create_lock);
  static LIST_HEAD(kthread_create_list);
  struct task_struct *kthreadd_task;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
37
38
39
  
  struct kthread_create_info
  {
73c279927   Eric W. Biederman   kthread: don't de...
40
  	/* Information passed to kthread() from kthreadd. */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
41
42
  	int (*threadfn)(void *data);
  	void *data;
207205a2b   Eric Dumazet   kthread: NUMA awa...
43
  	int node;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
44

73c279927   Eric W. Biederman   kthread: don't de...
45
  	/* Result passed back to kthread_create() from kthreadd. */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
46
  	struct task_struct *result;
786235eeb   Tetsuo Handa   kthread: make kth...
47
  	struct completion *done;
65f27f384   David Howells   WorkStruct: Pass ...
48

73c279927   Eric W. Biederman   kthread: don't de...
49
  	struct list_head list;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
50
  };
63706172f   Oleg Nesterov   kthreads: rework ...
51
  struct kthread {
2a1d44601   Thomas Gleixner   kthread: Implemen...
52
53
  	unsigned long flags;
  	unsigned int cpu;
52782c92a   J. Bruce Fields   kthread: save thr...
54
  	int (*threadfn)(void *);
82805ab77   Tejun Heo   kthread: implemen...
55
  	void *data;
37c54f9bd   Christoph Hellwig   kernel: set USER_...
56
  	mm_segment_t oldfs;
2a1d44601   Thomas Gleixner   kthread: Implemen...
57
  	struct completion parked;
63706172f   Oleg Nesterov   kthreads: rework ...
58
  	struct completion exited;
0b508bc92   Shaohua Li   block: fix a buil...
59
  #ifdef CONFIG_BLK_CGROUP
05e3db95e   Shaohua Li   kthread: add a me...
60
61
  	struct cgroup_subsys_state *blkcg_css;
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
62
  };
2a1d44601   Thomas Gleixner   kthread: Implemen...
63
64
65
66
  enum KTHREAD_BITS {
  	KTHREAD_IS_PER_CPU = 0,
  	KTHREAD_SHOULD_STOP,
  	KTHREAD_SHOULD_PARK,
2a1d44601   Thomas Gleixner   kthread: Implemen...
67
  };
1da5c46fa   Oleg Nesterov   kthread: Make str...
68
69
70
71
72
73
74
75
76
  static inline void set_kthread_struct(void *kthread)
  {
  	/*
  	 * We abuse ->set_child_tid to avoid the new member and because it
  	 * can't be wrongly copied by copy_process(). We also rely on fact
  	 * that the caller can't exec, so PF_KTHREAD can't be cleared.
  	 */
  	current->set_child_tid = (__force void __user *)kthread;
  }
4ecdafc80   Oleg Nesterov   kthread: introduc...
77
78
79
  
  static inline struct kthread *to_kthread(struct task_struct *k)
  {
1da5c46fa   Oleg Nesterov   kthread: Make str...
80
81
  	WARN_ON(!(k->flags & PF_KTHREAD));
  	return (__force void *)k->set_child_tid;
4ecdafc80   Oleg Nesterov   kthread: introduc...
82
  }
1da5c46fa   Oleg Nesterov   kthread: Make str...
83
84
  void free_kthread_struct(struct task_struct *k)
  {
05e3db95e   Shaohua Li   kthread: add a me...
85
  	struct kthread *kthread;
1da5c46fa   Oleg Nesterov   kthread: Make str...
86
87
88
89
  	/*
  	 * Can be NULL if this kthread was created by kernel_thread()
  	 * or if kmalloc() in kthread() failed.
  	 */
05e3db95e   Shaohua Li   kthread: add a me...
90
  	kthread = to_kthread(k);
0b508bc92   Shaohua Li   block: fix a buil...
91
  #ifdef CONFIG_BLK_CGROUP
05e3db95e   Shaohua Li   kthread: add a me...
92
93
94
  	WARN_ON_ONCE(kthread && kthread->blkcg_css);
  #endif
  	kfree(kthread);
1da5c46fa   Oleg Nesterov   kthread: Make str...
95
  }
9e37bd301   Randy Dunlap   [PATCH] kthread: ...
96
97
98
  /**
   * kthread_should_stop - should this kthread return now?
   *
72fd4a35a   Robert P. J. Day   [PATCH] Numerous ...
99
   * When someone calls kthread_stop() on your kthread, it will be woken
9e37bd301   Randy Dunlap   [PATCH] kthread: ...
100
101
102
   * and this will return true.  You should then return, and your return
   * value will be passed through to kthread_stop().
   */
2a1d44601   Thomas Gleixner   kthread: Implemen...
103
  bool kthread_should_stop(void)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
104
  {
2a1d44601   Thomas Gleixner   kthread: Implemen...
105
  	return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
106
107
  }
  EXPORT_SYMBOL(kthread_should_stop);
0121805d9   Matthias Kaehlcke   kthread: Add __kt...
108
109
110
111
112
  bool __kthread_should_park(struct task_struct *k)
  {
  	return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
  }
  EXPORT_SYMBOL_GPL(__kthread_should_park);
82805ab77   Tejun Heo   kthread: implemen...
113
  /**
2a1d44601   Thomas Gleixner   kthread: Implemen...
114
115
116
117
118
119
120
121
122
123
124
125
   * kthread_should_park - should this kthread park now?
   *
   * When someone calls kthread_park() on your kthread, it will be woken
   * and this will return true.  You should then do the necessary
   * cleanup and call kthread_parkme()
   *
   * Similar to kthread_should_stop(), but this keeps the thread alive
   * and in a park position. kthread_unpark() "restarts" the thread and
   * calls the thread function again.
   */
  bool kthread_should_park(void)
  {
0121805d9   Matthias Kaehlcke   kthread: Add __kt...
126
  	return __kthread_should_park(current);
2a1d44601   Thomas Gleixner   kthread: Implemen...
127
  }
18896451e   David Kershner   kthread: export k...
128
  EXPORT_SYMBOL_GPL(kthread_should_park);
2a1d44601   Thomas Gleixner   kthread: Implemen...
129
130
  
  /**
8a32c441c   Tejun Heo   freezer: implemen...
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
   * kthread_freezable_should_stop - should this freezable kthread return now?
   * @was_frozen: optional out parameter, indicates whether %current was frozen
   *
   * kthread_should_stop() for freezable kthreads, which will enter
   * refrigerator if necessary.  This function is safe from kthread_stop() /
   * freezer deadlock and freezable kthreads should use this function instead
   * of calling try_to_freeze() directly.
   */
  bool kthread_freezable_should_stop(bool *was_frozen)
  {
  	bool frozen = false;
  
  	might_sleep();
  
  	if (unlikely(freezing(current)))
  		frozen = __refrigerator(true);
  
  	if (was_frozen)
  		*was_frozen = frozen;
  
  	return kthread_should_stop();
  }
  EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
  
  /**
52782c92a   J. Bruce Fields   kthread: save thr...
156
157
158
159
160
161
162
163
164
165
166
167
168
169
   * kthread_func - return the function specified on kthread creation
   * @task: kthread task in question
   *
   * Returns NULL if the task is not a kthread.
   */
  void *kthread_func(struct task_struct *task)
  {
  	if (task->flags & PF_KTHREAD)
  		return to_kthread(task)->threadfn;
  	return NULL;
  }
  EXPORT_SYMBOL_GPL(kthread_func);
  
  /**
82805ab77   Tejun Heo   kthread: implemen...
170
171
172
173
174
175
176
177
178
179
180
   * kthread_data - return data value specified on kthread creation
   * @task: kthread task in question
   *
   * Return the data value specified when kthread @task was created.
   * The caller is responsible for ensuring the validity of @task when
   * calling this function.
   */
  void *kthread_data(struct task_struct *task)
  {
  	return to_kthread(task)->data;
  }
52782c92a   J. Bruce Fields   kthread: save thr...
181
  EXPORT_SYMBOL_GPL(kthread_data);
82805ab77   Tejun Heo   kthread: implemen...
182

cd42d559e   Tejun Heo   kthread: implemen...
183
  /**
e700591ae   Petr Mladek   kthread: rename p...
184
   * kthread_probe_data - speculative version of kthread_data()
cd42d559e   Tejun Heo   kthread: implemen...
185
186
187
188
189
190
191
   * @task: possible kthread task in question
   *
   * @task could be a kthread task.  Return the data value specified when it
   * was created if accessible.  If @task isn't a kthread task or its data is
   * inaccessible for any reason, %NULL is returned.  This function requires
   * that @task itself is safe to dereference.
   */
e700591ae   Petr Mladek   kthread: rename p...
192
  void *kthread_probe_data(struct task_struct *task)
cd42d559e   Tejun Heo   kthread: implemen...
193
194
195
  {
  	struct kthread *kthread = to_kthread(task);
  	void *data = NULL;
fe557319a   Christoph Hellwig   maccess: rename p...
196
  	copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
cd42d559e   Tejun Heo   kthread: implemen...
197
198
  	return data;
  }
2a1d44601   Thomas Gleixner   kthread: Implemen...
199
200
  static void __kthread_parkme(struct kthread *self)
  {
741a76b35   Peter Zijlstra   kthread, sched/wa...
201
  	for (;;) {
1cef1150e   Peter Zijlstra   kthread, sched/co...
202
203
204
205
206
207
208
209
210
211
  		/*
  		 * TASK_PARKED is a special state; we must serialize against
  		 * possible pending wakeups to avoid store-store collisions on
  		 * task->state.
  		 *
  		 * Such a collision might possibly result in the task state
  		 * changin from TASK_PARKED and us failing the
  		 * wait_task_inactive() in kthread_park().
  		 */
  		set_special_state(TASK_PARKED);
741a76b35   Peter Zijlstra   kthread, sched/wa...
212
213
  		if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
  			break;
1cef1150e   Peter Zijlstra   kthread, sched/co...
214

26c7295be   Liang Chen   kthread: Do not p...
215
216
217
218
219
220
  		/*
  		 * Thread is going to call schedule(), do not preempt it,
  		 * or the caller of kthread_park() may spend more time in
  		 * wait_task_inactive().
  		 */
  		preempt_disable();
f83ee19be   Peter Zijlstra   kthread: Simplify...
221
  		complete(&self->parked);
26c7295be   Liang Chen   kthread: Do not p...
222
223
  		schedule_preempt_disabled();
  		preempt_enable();
2a1d44601   Thomas Gleixner   kthread: Implemen...
224
  	}
2a1d44601   Thomas Gleixner   kthread: Implemen...
225
226
227
228
229
230
231
  	__set_current_state(TASK_RUNNING);
  }
  
  void kthread_parkme(void)
  {
  	__kthread_parkme(to_kthread(current));
  }
18896451e   David Kershner   kthread: export k...
232
  EXPORT_SYMBOL_GPL(kthread_parkme);
2a1d44601   Thomas Gleixner   kthread: Implemen...
233

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
234
235
  static int kthread(void *_create)
  {
63706172f   Oleg Nesterov   kthreads: rework ...
236
  	/* Copy data: it's on kthread's stack */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
237
  	struct kthread_create_info *create = _create;
63706172f   Oleg Nesterov   kthreads: rework ...
238
239
  	int (*threadfn)(void *data) = create->threadfn;
  	void *data = create->data;
786235eeb   Tetsuo Handa   kthread: make kth...
240
  	struct completion *done;
1da5c46fa   Oleg Nesterov   kthread: Make str...
241
  	struct kthread *self;
63706172f   Oleg Nesterov   kthreads: rework ...
242
  	int ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
243

e10237cc7   Shaohua Li   kthread: zero the...
244
  	self = kzalloc(sizeof(*self), GFP_KERNEL);
1da5c46fa   Oleg Nesterov   kthread: Make str...
245
  	set_kthread_struct(self);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
246

786235eeb   Tetsuo Handa   kthread: make kth...
247
248
249
250
251
252
  	/* If user was SIGKILLed, I release the structure. */
  	done = xchg(&create->done, NULL);
  	if (!done) {
  		kfree(create);
  		do_exit(-EINTR);
  	}
1da5c46fa   Oleg Nesterov   kthread: Make str...
253
254
255
256
257
258
  
  	if (!self) {
  		create->result = ERR_PTR(-ENOMEM);
  		complete(done);
  		do_exit(-ENOMEM);
  	}
52782c92a   J. Bruce Fields   kthread: save thr...
259
  	self->threadfn = threadfn;
1da5c46fa   Oleg Nesterov   kthread: Make str...
260
261
262
263
  	self->data = data;
  	init_completion(&self->exited);
  	init_completion(&self->parked);
  	current->vfork_done = &self->exited;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
264
  	/* OK, tell user we're spawned, wait for stop or wakeup */
a076e4bca   Oleg Nesterov   freezer: fix kthr...
265
  	__set_current_state(TASK_UNINTERRUPTIBLE);
3217ab97f   Vitaliy Gusev   kthread: Don't lo...
266
  	create->result = current;
26c7295be   Liang Chen   kthread: Do not p...
267
268
269
270
271
  	/*
  	 * Thread is going to call schedule(), do not preempt it,
  	 * or the creator may spend more time in wait_task_inactive().
  	 */
  	preempt_disable();
786235eeb   Tetsuo Handa   kthread: make kth...
272
  	complete(done);
26c7295be   Liang Chen   kthread: Do not p...
273
274
  	schedule_preempt_disabled();
  	preempt_enable();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
275

63706172f   Oleg Nesterov   kthreads: rework ...
276
  	ret = -EINTR;
1da5c46fa   Oleg Nesterov   kthread: Make str...
277
  	if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
77f88796c   Tejun Heo   cgroup, kthread: ...
278
  		cgroup_kthread_ready();
1da5c46fa   Oleg Nesterov   kthread: Make str...
279
  		__kthread_parkme(self);
2a1d44601   Thomas Gleixner   kthread: Implemen...
280
281
  		ret = threadfn(data);
  	}
63706172f   Oleg Nesterov   kthreads: rework ...
282
  	do_exit(ret);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
283
  }
207205a2b   Eric Dumazet   kthread: NUMA awa...
284
285
286
287
288
289
290
  /* called from do_fork() to get node information for about to be created task */
  int tsk_fork_get_node(struct task_struct *tsk)
  {
  #ifdef CONFIG_NUMA
  	if (tsk == kthreadd_task)
  		return tsk->pref_node_fork;
  #endif
81c98869f   Nishanth Aravamudan   kthread: ensure l...
291
  	return NUMA_NO_NODE;
207205a2b   Eric Dumazet   kthread: NUMA awa...
292
  }
73c279927   Eric W. Biederman   kthread: don't de...
293
  static void create_kthread(struct kthread_create_info *create)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
294
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
295
  	int pid;
207205a2b   Eric Dumazet   kthread: NUMA awa...
296
297
298
  #ifdef CONFIG_NUMA
  	current->pref_node_fork = create->node;
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
299
300
  	/* We want our own signal handler (we take no signals by default). */
  	pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
cdd140bdd   Oleg Nesterov   kthreads: simplif...
301
  	if (pid < 0) {
786235eeb   Tetsuo Handa   kthread: make kth...
302
303
304
305
306
307
308
  		/* If user was SIGKILLed, I release the structure. */
  		struct completion *done = xchg(&create->done, NULL);
  
  		if (!done) {
  			kfree(create);
  			return;
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
309
  		create->result = ERR_PTR(pid);
786235eeb   Tetsuo Handa   kthread: make kth...
310
  		complete(done);
cdd140bdd   Oleg Nesterov   kthreads: simplif...
311
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
312
  }
c0b942a76   Nicolas Iooss   kthread: add __pr...
313
314
  static __printf(4, 0)
  struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
255451e45   Petr Mladek   kthread: allow to...
315
316
317
  						    void *data, int node,
  						    const char namefmt[],
  						    va_list args)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
318
  {
786235eeb   Tetsuo Handa   kthread: make kth...
319
320
321
322
323
324
325
326
327
328
329
  	DECLARE_COMPLETION_ONSTACK(done);
  	struct task_struct *task;
  	struct kthread_create_info *create = kmalloc(sizeof(*create),
  						     GFP_KERNEL);
  
  	if (!create)
  		return ERR_PTR(-ENOMEM);
  	create->threadfn = threadfn;
  	create->data = data;
  	create->node = node;
  	create->done = &done;
73c279927   Eric W. Biederman   kthread: don't de...
330
331
  
  	spin_lock(&kthread_create_lock);
786235eeb   Tetsuo Handa   kthread: make kth...
332
  	list_add_tail(&create->list, &kthread_create_list);
73c279927   Eric W. Biederman   kthread: don't de...
333
  	spin_unlock(&kthread_create_lock);
cbd9b67bd   Dmitry Adamushko   kthread: call wak...
334
  	wake_up_process(kthreadd_task);
786235eeb   Tetsuo Handa   kthread: make kth...
335
336
337
338
339
340
341
342
343
344
345
346
  	/*
  	 * Wait for completion in killable state, for I might be chosen by
  	 * the OOM killer while kthreadd is trying to allocate memory for
  	 * new kernel thread.
  	 */
  	if (unlikely(wait_for_completion_killable(&done))) {
  		/*
  		 * If I was SIGKILLed before kthreadd (or new kernel thread)
  		 * calls complete(), leave the cleanup of this structure to
  		 * that thread.
  		 */
  		if (xchg(&create->done, NULL))
8fe6929cf   Tetsuo Handa   kthread: fix retu...
347
  			return ERR_PTR(-EINTR);
786235eeb   Tetsuo Handa   kthread: make kth...
348
349
350
351
352
353
354
355
  		/*
  		 * kthreadd (or new kernel thread) will call complete()
  		 * shortly.
  		 */
  		wait_for_completion(&done);
  	}
  	task = create->result;
  	if (!IS_ERR(task)) {
c9b5f501e   Peter Zijlstra   sched: Constify f...
356
  		static const struct sched_param param = { .sched_priority = 0 };
3e536e222   Snild Dolkow   kthread, tracing:...
357
  		char name[TASK_COMM_LEN];
1c99315bb   Oleg Nesterov   kthread: move sch...
358

3e536e222   Snild Dolkow   kthread, tracing:...
359
360
361
362
363
364
  		/*
  		 * task is already visible to other tasks, so updating
  		 * COMM must be protected.
  		 */
  		vsnprintf(name, sizeof(name), namefmt, args);
  		set_task_comm(task, name);
1c99315bb   Oleg Nesterov   kthread: move sch...
365
366
367
368
  		/*
  		 * root may have changed our (kthreadd's) priority or CPU mask.
  		 * The kernel thread should not inherit these properties.
  		 */
786235eeb   Tetsuo Handa   kthread: make kth...
369
  		sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
9cc5b8656   Marcelo Tosatti   isolcpus: Affine ...
370
371
  		set_cpus_allowed_ptr(task,
  				     housekeeping_cpumask(HK_FLAG_KTHREAD));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
372
  	}
786235eeb   Tetsuo Handa   kthread: make kth...
373
374
  	kfree(create);
  	return task;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
375
  }
255451e45   Petr Mladek   kthread: allow to...
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
  
  /**
   * kthread_create_on_node - create a kthread.
   * @threadfn: the function to run until signal_pending(current).
   * @data: data ptr for @threadfn.
   * @node: task and thread structures for the thread are allocated on this node
   * @namefmt: printf-style name for the thread.
   *
   * Description: This helper function creates and names a kernel
   * thread.  The thread will be stopped: use wake_up_process() to start
   * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
   * is affine to all CPUs.
   *
   * If thread is going to be bound on a particular cpu, give its node
   * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
   * When woken, the thread will run @threadfn() with @data as its
   * argument. @threadfn() can either call do_exit() directly if it is a
   * standalone thread for which no one will call kthread_stop(), or
   * return when 'kthread_should_stop()' is true (which means
   * kthread_stop() has been called).  The return value should be zero
   * or a negative error number; it will be passed to kthread_stop().
   *
   * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
   */
  struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
  					   void *data, int node,
  					   const char namefmt[],
  					   ...)
  {
  	struct task_struct *task;
  	va_list args;
  
  	va_start(args, namefmt);
  	task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
  	va_end(args);
  
  	return task;
  }
207205a2b   Eric Dumazet   kthread: NUMA awa...
414
  EXPORT_SYMBOL(kthread_create_on_node);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
415

25834c73f   Peter Zijlstra   sched: Fix a race...
416
  static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
2a1d44601   Thomas Gleixner   kthread: Implemen...
417
  {
25834c73f   Peter Zijlstra   sched: Fix a race...
418
  	unsigned long flags;
f2530dc71   Thomas Gleixner   kthread: Prevent ...
419
420
421
422
  	if (!wait_task_inactive(p, state)) {
  		WARN_ON(1);
  		return;
  	}
25834c73f   Peter Zijlstra   sched: Fix a race...
423

2a1d44601   Thomas Gleixner   kthread: Implemen...
424
  	/* It's safe because the task is inactive. */
25834c73f   Peter Zijlstra   sched: Fix a race...
425
426
  	raw_spin_lock_irqsave(&p->pi_lock, flags);
  	do_set_cpus_allowed(p, mask);
14a40ffcc   Tejun Heo   sched: replace PF...
427
  	p->flags |= PF_NO_SETAFFINITY;
25834c73f   Peter Zijlstra   sched: Fix a race...
428
429
430
431
432
433
434
435
436
437
438
  	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  }
  
  static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
  {
  	__kthread_bind_mask(p, cpumask_of(cpu), state);
  }
  
  void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
  {
  	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
2a1d44601   Thomas Gleixner   kthread: Implemen...
439
  }
5a920a650   Shaleen Agrawal   ANDROID: Sched: E...
440
  EXPORT_SYMBOL_GPL(kthread_bind_mask);
2a1d44601   Thomas Gleixner   kthread: Implemen...
441

9e37bd301   Randy Dunlap   [PATCH] kthread: ...
442
  /**
881232b70   Peter Zijlstra   sched: Move kthre...
443
444
445
446
447
448
449
450
451
452
   * kthread_bind - bind a just-created kthread to a cpu.
   * @p: thread created by kthread_create().
   * @cpu: cpu (might not be online, must be possible) for @k to run on.
   *
   * Description: This function is equivalent to set_cpus_allowed(),
   * except that @cpu doesn't need to be online, and the thread must be
   * stopped (i.e., just returned from kthread_create()).
   */
  void kthread_bind(struct task_struct *p, unsigned int cpu)
  {
f2530dc71   Thomas Gleixner   kthread: Prevent ...
453
  	__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
881232b70   Peter Zijlstra   sched: Move kthre...
454
455
456
457
  }
  EXPORT_SYMBOL(kthread_bind);
  
  /**
2a1d44601   Thomas Gleixner   kthread: Implemen...
458
459
460
461
462
463
464
465
   * kthread_create_on_cpu - Create a cpu bound kthread
   * @threadfn: the function to run until signal_pending(current).
   * @data: data ptr for @threadfn.
   * @cpu: The cpu on which the thread should be bound,
   * @namefmt: printf-style name for the thread. Format is restricted
   *	     to "name.*%u". Code fills in cpu number.
   *
   * Description: This helper function creates and names a kernel thread
2a1d44601   Thomas Gleixner   kthread: Implemen...
466
467
468
469
470
471
   */
  struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
  					  void *data, unsigned int cpu,
  					  const char *namefmt)
  {
  	struct task_struct *p;
109228389   Nishanth Aravamudan   kernel/kthread.c:...
472
  	p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
2a1d44601   Thomas Gleixner   kthread: Implemen...
473
474
475
  				   cpu);
  	if (IS_ERR(p))
  		return p;
a65d40961   Petr Mladek   kthread/smpboot: ...
476
477
  	kthread_bind(p, cpu);
  	/* CPU hotplug need to bind once again when unparking the thread. */
2a1d44601   Thomas Gleixner   kthread: Implemen...
478
479
  	set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
  	to_kthread(p)->cpu = cpu;
2a1d44601   Thomas Gleixner   kthread: Implemen...
480
481
  	return p;
  }
cf380a4a9   Oleg Nesterov   kthread: Don't us...
482
483
484
485
486
487
488
489
490
  /**
   * kthread_unpark - unpark a thread created by kthread_create().
   * @k:		thread created by kthread_create().
   *
   * Sets kthread_should_park() for @k to return false, wakes it, and
   * waits for it to return. If the thread is marked percpu then its
   * bound to the cpu again.
   */
  void kthread_unpark(struct task_struct *k)
f2530dc71   Thomas Gleixner   kthread: Prevent ...
491
  {
cf380a4a9   Oleg Nesterov   kthread: Don't us...
492
  	struct kthread *kthread = to_kthread(k);
f2530dc71   Thomas Gleixner   kthread: Prevent ...
493
  	/*
85f1abe00   Peter Zijlstra   kthread, sched/wa...
494
495
  	 * Newly created kthread was parked when the CPU was offline.
  	 * The binding was lost and we need to set it again.
f2530dc71   Thomas Gleixner   kthread: Prevent ...
496
  	 */
85f1abe00   Peter Zijlstra   kthread, sched/wa...
497
498
499
500
  	if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
  		__kthread_bind(k, kthread->cpu, TASK_PARKED);
  
  	clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
1cef1150e   Peter Zijlstra   kthread, sched/co...
501
502
503
  	/*
  	 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
  	 */
85f1abe00   Peter Zijlstra   kthread, sched/wa...
504
  	wake_up_state(k, TASK_PARKED);
f2530dc71   Thomas Gleixner   kthread: Prevent ...
505
  }
18896451e   David Kershner   kthread: export k...
506
  EXPORT_SYMBOL_GPL(kthread_unpark);
2a1d44601   Thomas Gleixner   kthread: Implemen...
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
  
  /**
   * kthread_park - park a thread created by kthread_create().
   * @k: thread created by kthread_create().
   *
   * Sets kthread_should_park() for @k to return true, wakes it, and
   * waits for it to return. This can also be called after kthread_create()
   * instead of calling wake_up_process(): the thread will park without
   * calling threadfn().
   *
   * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
   * If called by the kthread itself just the park bit is set.
   */
  int kthread_park(struct task_struct *k)
  {
cf380a4a9   Oleg Nesterov   kthread: Don't us...
522
523
524
525
  	struct kthread *kthread = to_kthread(k);
  
  	if (WARN_ON(k->flags & PF_EXITING))
  		return -ENOSYS;
f83ee19be   Peter Zijlstra   kthread: Simplify...
526
527
  	if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
  		return -EBUSY;
85f1abe00   Peter Zijlstra   kthread, sched/wa...
528
529
530
  	set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
  	if (k != current) {
  		wake_up_process(k);
1cef1150e   Peter Zijlstra   kthread, sched/co...
531
532
533
534
  		/*
  		 * Wait for __kthread_parkme() to complete(), this means we
  		 * _will_ have TASK_PARKED and are about to call schedule().
  		 */
85f1abe00   Peter Zijlstra   kthread, sched/wa...
535
  		wait_for_completion(&kthread->parked);
1cef1150e   Peter Zijlstra   kthread, sched/co...
536
537
538
539
540
  		/*
  		 * Now wait for that schedule() to complete and the task to
  		 * get scheduled out.
  		 */
  		WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
2a1d44601   Thomas Gleixner   kthread: Implemen...
541
  	}
cf380a4a9   Oleg Nesterov   kthread: Don't us...
542
543
  
  	return 0;
2a1d44601   Thomas Gleixner   kthread: Implemen...
544
  }
18896451e   David Kershner   kthread: export k...
545
  EXPORT_SYMBOL_GPL(kthread_park);
2a1d44601   Thomas Gleixner   kthread: Implemen...
546
547
  
  /**
9e37bd301   Randy Dunlap   [PATCH] kthread: ...
548
549
550
551
   * kthread_stop - stop a thread created by kthread_create().
   * @k: thread created by kthread_create().
   *
   * Sets kthread_should_stop() for @k to return true, wakes it, and
9ae260270   Oleg Nesterov   update the commen...
552
553
554
555
556
557
   * waits for it to exit. This can also be called after kthread_create()
   * instead of calling wake_up_process(): the thread will exit without
   * calling threadfn().
   *
   * If threadfn() may call do_exit() itself, the caller must ensure
   * task_struct can't go away.
9e37bd301   Randy Dunlap   [PATCH] kthread: ...
558
559
560
561
   *
   * Returns the result of threadfn(), or %-EINTR if wake_up_process()
   * was never called.
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
562
563
  int kthread_stop(struct task_struct *k)
  {
b5c5442bb   Oleg Nesterov   kthread: kill tas...
564
  	struct kthread *kthread;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
565
  	int ret;
0a16b6075   Mathieu Desnoyers   tracing, sched: L...
566
  	trace_sched_kthread_stop(k);
b5c5442bb   Oleg Nesterov   kthread: kill tas...
567
568
  
  	get_task_struct(k);
efb29fbfa   Oleg Nesterov   kthread: Don't us...
569
570
  	kthread = to_kthread(k);
  	set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
cf380a4a9   Oleg Nesterov   kthread: Don't us...
571
  	kthread_unpark(k);
efb29fbfa   Oleg Nesterov   kthread: Don't us...
572
573
  	wake_up_process(k);
  	wait_for_completion(&kthread->exited);
63706172f   Oleg Nesterov   kthreads: rework ...
574
  	ret = k->exit_code;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
575
  	put_task_struct(k);
0a16b6075   Mathieu Desnoyers   tracing, sched: L...
576

b5c5442bb   Oleg Nesterov   kthread: kill tas...
577
  	trace_sched_kthread_stop_ret(ret);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
578
579
  	return ret;
  }
52e92e578   Adrian Bunk   [PATCH] remove ke...
580
  EXPORT_SYMBOL(kthread_stop);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
581

e804a4a4d   Satyam Sharma   kthread: silence ...
582
  int kthreadd(void *unused)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
583
  {
73c279927   Eric W. Biederman   kthread: don't de...
584
  	struct task_struct *tsk = current;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
585

e804a4a4d   Satyam Sharma   kthread: silence ...
586
  	/* Setup a clean context for our children to inherit. */
73c279927   Eric W. Biederman   kthread: don't de...
587
  	set_task_comm(tsk, "kthreadd");
10ab825bd   Oleg Nesterov   change kernel thr...
588
  	ignore_signals(tsk);
9cc5b8656   Marcelo Tosatti   isolcpus: Affine ...
589
  	set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_FLAG_KTHREAD));
aee4faa49   Lai Jiangshan   kthread: use N_ME...
590
  	set_mems_allowed(node_states[N_MEMORY]);
73c279927   Eric W. Biederman   kthread: don't de...
591

34b087e48   Tejun Heo   freezer: kill unu...
592
  	current->flags |= PF_NOFREEZE;
77f88796c   Tejun Heo   cgroup, kthread: ...
593
  	cgroup_init_kthreadd();
73c279927   Eric W. Biederman   kthread: don't de...
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
  
  	for (;;) {
  		set_current_state(TASK_INTERRUPTIBLE);
  		if (list_empty(&kthread_create_list))
  			schedule();
  		__set_current_state(TASK_RUNNING);
  
  		spin_lock(&kthread_create_lock);
  		while (!list_empty(&kthread_create_list)) {
  			struct kthread_create_info *create;
  
  			create = list_entry(kthread_create_list.next,
  					    struct kthread_create_info, list);
  			list_del_init(&create->list);
  			spin_unlock(&kthread_create_lock);
  
  			create_kthread(create);
  
  			spin_lock(&kthread_create_lock);
  		}
  		spin_unlock(&kthread_create_lock);
  	}
  
  	return 0;
  }
b56c0d893   Tejun Heo   kthread: implemen...
619

3989144f8   Petr Mladek   kthread: kthread ...
620
  void __kthread_init_worker(struct kthread_worker *worker,
4f32e9b1f   Yong Zhang   kthread_work: mak...
621
622
623
  				const char *name,
  				struct lock_class_key *key)
  {
dbf52682c   Petr Mladek   kthread: better s...
624
  	memset(worker, 0, sizeof(struct kthread_worker));
fe99a4f4d   Julia Cartwright   kthread: Convert ...
625
  	raw_spin_lock_init(&worker->lock);
4f32e9b1f   Yong Zhang   kthread_work: mak...
626
627
  	lockdep_set_class_and_name(&worker->lock, key, name);
  	INIT_LIST_HEAD(&worker->work_list);
22597dc3d   Petr Mladek   kthread: initial ...
628
  	INIT_LIST_HEAD(&worker->delayed_work_list);
4f32e9b1f   Yong Zhang   kthread_work: mak...
629
  }
3989144f8   Petr Mladek   kthread: kthread ...
630
  EXPORT_SYMBOL_GPL(__kthread_init_worker);
4f32e9b1f   Yong Zhang   kthread_work: mak...
631

b56c0d893   Tejun Heo   kthread: implemen...
632
633
634
635
  /**
   * kthread_worker_fn - kthread function to process kthread_worker
   * @worker_ptr: pointer to initialized kthread_worker
   *
fbae2d44a   Petr Mladek   kthread: add kthr...
636
637
638
   * This function implements the main cycle of kthread worker. It processes
   * work_list until it is stopped with kthread_stop(). It sleeps when the queue
   * is empty.
b56c0d893   Tejun Heo   kthread: implemen...
639
   *
fbae2d44a   Petr Mladek   kthread: add kthr...
640
641
642
   * The works are not allowed to keep any locks, disable preemption or interrupts
   * when they finish. There is defined a safe point for freezing when one work
   * finishes and before a new one is started.
8197b3d43   Petr Mladek   kthread: detect w...
643
644
645
   *
   * Also the works must not be handled by more than one worker at the same time,
   * see also kthread_queue_work().
b56c0d893   Tejun Heo   kthread: implemen...
646
647
648
649
650
   */
  int kthread_worker_fn(void *worker_ptr)
  {
  	struct kthread_worker *worker = worker_ptr;
  	struct kthread_work *work;
fbae2d44a   Petr Mladek   kthread: add kthr...
651
652
653
654
655
  	/*
  	 * FIXME: Update the check and remove the assignment when all kthread
  	 * worker users are created using kthread_create_worker*() functions.
  	 */
  	WARN_ON(worker->task && worker->task != current);
b56c0d893   Tejun Heo   kthread: implemen...
656
  	worker->task = current;
dbf52682c   Petr Mladek   kthread: better s...
657
658
659
  
  	if (worker->flags & KTW_FREEZABLE)
  		set_freezable();
b56c0d893   Tejun Heo   kthread: implemen...
660
661
662
663
664
  repeat:
  	set_current_state(TASK_INTERRUPTIBLE);	/* mb paired w/ kthread_stop */
  
  	if (kthread_should_stop()) {
  		__set_current_state(TASK_RUNNING);
fe99a4f4d   Julia Cartwright   kthread: Convert ...
665
  		raw_spin_lock_irq(&worker->lock);
b56c0d893   Tejun Heo   kthread: implemen...
666
  		worker->task = NULL;
fe99a4f4d   Julia Cartwright   kthread: Convert ...
667
  		raw_spin_unlock_irq(&worker->lock);
b56c0d893   Tejun Heo   kthread: implemen...
668
669
670
671
  		return 0;
  	}
  
  	work = NULL;
fe99a4f4d   Julia Cartwright   kthread: Convert ...
672
  	raw_spin_lock_irq(&worker->lock);
b56c0d893   Tejun Heo   kthread: implemen...
673
674
675
676
677
  	if (!list_empty(&worker->work_list)) {
  		work = list_first_entry(&worker->work_list,
  					struct kthread_work, node);
  		list_del_init(&work->node);
  	}
46f3d9762   Tejun Heo   kthread_worker: r...
678
  	worker->current_work = work;
fe99a4f4d   Julia Cartwright   kthread: Convert ...
679
  	raw_spin_unlock_irq(&worker->lock);
b56c0d893   Tejun Heo   kthread: implemen...
680
681
682
683
  
  	if (work) {
  		__set_current_state(TASK_RUNNING);
  		work->func(work);
b56c0d893   Tejun Heo   kthread: implemen...
684
685
686
687
  	} else if (!freezing(current))
  		schedule();
  
  	try_to_freeze();
22cf8bc6c   Shaohua Li   kernel/kthread.c:...
688
  	cond_resched();
b56c0d893   Tejun Heo   kthread: implemen...
689
690
691
  	goto repeat;
  }
  EXPORT_SYMBOL_GPL(kthread_worker_fn);
c0b942a76   Nicolas Iooss   kthread: add __pr...
692
  static __printf(3, 0) struct kthread_worker *
dbf52682c   Petr Mladek   kthread: better s...
693
694
  __kthread_create_worker(int cpu, unsigned int flags,
  			const char namefmt[], va_list args)
fbae2d44a   Petr Mladek   kthread: add kthr...
695
696
697
  {
  	struct kthread_worker *worker;
  	struct task_struct *task;
98fa15f34   Anshuman Khandual   mm: replace all o...
698
  	int node = NUMA_NO_NODE;
fbae2d44a   Petr Mladek   kthread: add kthr...
699
700
701
702
703
704
  
  	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
  	if (!worker)
  		return ERR_PTR(-ENOMEM);
  
  	kthread_init_worker(worker);
8fb9dcbdc   Oleg Nesterov   kthread: Don't ab...
705
706
  	if (cpu >= 0)
  		node = cpu_to_node(cpu);
fbae2d44a   Petr Mladek   kthread: add kthr...
707

8fb9dcbdc   Oleg Nesterov   kthread: Don't ab...
708
709
  	task = __kthread_create_on_node(kthread_worker_fn, worker,
  						node, namefmt, args);
fbae2d44a   Petr Mladek   kthread: add kthr...
710
711
  	if (IS_ERR(task))
  		goto fail_task;
8fb9dcbdc   Oleg Nesterov   kthread: Don't ab...
712
713
  	if (cpu >= 0)
  		kthread_bind(task, cpu);
dbf52682c   Petr Mladek   kthread: better s...
714
  	worker->flags = flags;
fbae2d44a   Petr Mladek   kthread: add kthr...
715
716
717
718
719
720
721
722
723
724
725
  	worker->task = task;
  	wake_up_process(task);
  	return worker;
  
  fail_task:
  	kfree(worker);
  	return ERR_CAST(task);
  }
  
  /**
   * kthread_create_worker - create a kthread worker
dbf52682c   Petr Mladek   kthread: better s...
726
   * @flags: flags modifying the default behavior of the worker
fbae2d44a   Petr Mladek   kthread: add kthr...
727
728
729
730
731
732
733
   * @namefmt: printf-style name for the kthread worker (task).
   *
   * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
   * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
   * when the worker was SIGKILLed.
   */
  struct kthread_worker *
dbf52682c   Petr Mladek   kthread: better s...
734
  kthread_create_worker(unsigned int flags, const char namefmt[], ...)
fbae2d44a   Petr Mladek   kthread: add kthr...
735
736
737
738
739
  {
  	struct kthread_worker *worker;
  	va_list args;
  
  	va_start(args, namefmt);
dbf52682c   Petr Mladek   kthread: better s...
740
  	worker = __kthread_create_worker(-1, flags, namefmt, args);
fbae2d44a   Petr Mladek   kthread: add kthr...
741
742
743
744
745
746
747
748
  	va_end(args);
  
  	return worker;
  }
  EXPORT_SYMBOL(kthread_create_worker);
  
  /**
   * kthread_create_worker_on_cpu - create a kthread worker and bind it
7b7b8a2c9   Randy Dunlap   kernel/: fix repe...
749
   *	to a given CPU and the associated NUMA node.
fbae2d44a   Petr Mladek   kthread: add kthr...
750
   * @cpu: CPU number
dbf52682c   Petr Mladek   kthread: better s...
751
   * @flags: flags modifying the default behavior of the worker
fbae2d44a   Petr Mladek   kthread: add kthr...
752
753
754
755
756
757
758
759
760
761
762
763
764
   * @namefmt: printf-style name for the kthread worker (task).
   *
   * Use a valid CPU number if you want to bind the kthread worker
   * to the given CPU and the associated NUMA node.
   *
   * A good practice is to add the cpu number also into the worker name.
   * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
   *
   * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
   * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
   * when the worker was SIGKILLed.
   */
  struct kthread_worker *
dbf52682c   Petr Mladek   kthread: better s...
765
766
  kthread_create_worker_on_cpu(int cpu, unsigned int flags,
  			     const char namefmt[], ...)
fbae2d44a   Petr Mladek   kthread: add kthr...
767
768
769
770
771
  {
  	struct kthread_worker *worker;
  	va_list args;
  
  	va_start(args, namefmt);
dbf52682c   Petr Mladek   kthread: better s...
772
  	worker = __kthread_create_worker(cpu, flags, namefmt, args);
fbae2d44a   Petr Mladek   kthread: add kthr...
773
774
775
776
777
  	va_end(args);
  
  	return worker;
  }
  EXPORT_SYMBOL(kthread_create_worker_on_cpu);
37be45d49   Petr Mladek   kthread: allow to...
778
779
780
781
782
783
784
785
786
787
788
789
  /*
   * Returns true when the work could not be queued at the moment.
   * It happens when it is already pending in a worker list
   * or when it is being cancelled.
   */
  static inline bool queuing_blocked(struct kthread_worker *worker,
  				   struct kthread_work *work)
  {
  	lockdep_assert_held(&worker->lock);
  
  	return !list_empty(&work->node) || work->canceling;
  }
8197b3d43   Petr Mladek   kthread: detect w...
790
791
792
793
794
795
796
797
  static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
  					     struct kthread_work *work)
  {
  	lockdep_assert_held(&worker->lock);
  	WARN_ON_ONCE(!list_empty(&work->node));
  	/* Do not use a work with >1 worker, see kthread_queue_work() */
  	WARN_ON_ONCE(work->worker && work->worker != worker);
  }
9a2e03d8e   Tejun Heo   kthread_worker: r...
798
  /* insert @work before @pos in @worker */
3989144f8   Petr Mladek   kthread: kthread ...
799
  static void kthread_insert_work(struct kthread_worker *worker,
8197b3d43   Petr Mladek   kthread: detect w...
800
801
  				struct kthread_work *work,
  				struct list_head *pos)
9a2e03d8e   Tejun Heo   kthread_worker: r...
802
  {
8197b3d43   Petr Mladek   kthread: detect w...
803
  	kthread_insert_work_sanity_check(worker, work);
9a2e03d8e   Tejun Heo   kthread_worker: r...
804
805
  
  	list_add_tail(&work->node, pos);
46f3d9762   Tejun Heo   kthread_worker: r...
806
  	work->worker = worker;
ed1403ec2   Lai Jiangshan   kthread_work: wak...
807
  	if (!worker->current_work && likely(worker->task))
9a2e03d8e   Tejun Heo   kthread_worker: r...
808
809
  		wake_up_process(worker->task);
  }
b56c0d893   Tejun Heo   kthread: implemen...
810
  /**
3989144f8   Petr Mladek   kthread: kthread ...
811
   * kthread_queue_work - queue a kthread_work
b56c0d893   Tejun Heo   kthread: implemen...
812
813
814
815
816
817
   * @worker: target kthread_worker
   * @work: kthread_work to queue
   *
   * Queue @work to work processor @task for async execution.  @task
   * must have been created with kthread_worker_create().  Returns %true
   * if @work was successfully queued, %false if it was already pending.
8197b3d43   Petr Mladek   kthread: detect w...
818
819
820
   *
   * Reinitialize the work if it needs to be used by another worker.
   * For example, when the worker was stopped and started again.
b56c0d893   Tejun Heo   kthread: implemen...
821
   */
3989144f8   Petr Mladek   kthread: kthread ...
822
  bool kthread_queue_work(struct kthread_worker *worker,
b56c0d893   Tejun Heo   kthread: implemen...
823
824
825
826
  			struct kthread_work *work)
  {
  	bool ret = false;
  	unsigned long flags;
fe99a4f4d   Julia Cartwright   kthread: Convert ...
827
  	raw_spin_lock_irqsave(&worker->lock, flags);
37be45d49   Petr Mladek   kthread: allow to...
828
  	if (!queuing_blocked(worker, work)) {
3989144f8   Petr Mladek   kthread: kthread ...
829
  		kthread_insert_work(worker, work, &worker->work_list);
b56c0d893   Tejun Heo   kthread: implemen...
830
831
  		ret = true;
  	}
fe99a4f4d   Julia Cartwright   kthread: Convert ...
832
  	raw_spin_unlock_irqrestore(&worker->lock, flags);
b56c0d893   Tejun Heo   kthread: implemen...
833
834
  	return ret;
  }
3989144f8   Petr Mladek   kthread: kthread ...
835
  EXPORT_SYMBOL_GPL(kthread_queue_work);
b56c0d893   Tejun Heo   kthread: implemen...
836

22597dc3d   Petr Mladek   kthread: initial ...
837
838
839
  /**
   * kthread_delayed_work_timer_fn - callback that queues the associated kthread
   *	delayed work when the timer expires.
fe5c3b69b   Kees Cook   kthread: Convert ...
840
   * @t: pointer to the expired timer
22597dc3d   Petr Mladek   kthread: initial ...
841
842
843
844
   *
   * The format of the function is defined by struct timer_list.
   * It should have been called from irqsafe timer with irq already off.
   */
fe5c3b69b   Kees Cook   kthread: Convert ...
845
  void kthread_delayed_work_timer_fn(struct timer_list *t)
22597dc3d   Petr Mladek   kthread: initial ...
846
  {
fe5c3b69b   Kees Cook   kthread: Convert ...
847
  	struct kthread_delayed_work *dwork = from_timer(dwork, t, timer);
22597dc3d   Petr Mladek   kthread: initial ...
848
849
  	struct kthread_work *work = &dwork->work;
  	struct kthread_worker *worker = work->worker;
ad01423ae   Sebastian Andrzej Siewior   kthread: Do not u...
850
  	unsigned long flags;
22597dc3d   Petr Mladek   kthread: initial ...
851
852
853
854
855
856
857
  
  	/*
  	 * This might happen when a pending work is reinitialized.
  	 * It means that it is used a wrong way.
  	 */
  	if (WARN_ON_ONCE(!worker))
  		return;
ad01423ae   Sebastian Andrzej Siewior   kthread: Do not u...
858
  	raw_spin_lock_irqsave(&worker->lock, flags);
22597dc3d   Petr Mladek   kthread: initial ...
859
860
861
862
863
864
  	/* Work must not be used with >1 worker, see kthread_queue_work(). */
  	WARN_ON_ONCE(work->worker != worker);
  
  	/* Move the work from worker->delayed_work_list. */
  	WARN_ON_ONCE(list_empty(&work->node));
  	list_del_init(&work->node);
6993d0fdb   Zqiang   kthread_worker: p...
865
866
  	if (!work->canceling)
  		kthread_insert_work(worker, work, &worker->work_list);
22597dc3d   Petr Mladek   kthread: initial ...
867

ad01423ae   Sebastian Andrzej Siewior   kthread: Do not u...
868
  	raw_spin_unlock_irqrestore(&worker->lock, flags);
22597dc3d   Petr Mladek   kthread: initial ...
869
870
  }
  EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
bc88f85c6   Ben Dooks   kthread: make __k...
871
872
873
  static void __kthread_queue_delayed_work(struct kthread_worker *worker,
  					 struct kthread_delayed_work *dwork,
  					 unsigned long delay)
22597dc3d   Petr Mladek   kthread: initial ...
874
875
876
  {
  	struct timer_list *timer = &dwork->timer;
  	struct kthread_work *work = &dwork->work;
0f186b1e6   Sami Tolvanen   ANDROID: kthread:...
877
878
879
880
881
882
883
  	/*
  	 * With CFI, timer->function can point to a jump table entry in a module,
  	 * which fails the comparison. Disable the warning if CFI and modules are
  	 * both enabled.
  	 */
  	if (!IS_ENABLED(CONFIG_CFI_CLANG) || !IS_ENABLED(CONFIG_MODULES))
  		WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
22597dc3d   Petr Mladek   kthread: initial ...
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
  
  	/*
  	 * If @delay is 0, queue @dwork->work immediately.  This is for
  	 * both optimization and correctness.  The earliest @timer can
  	 * expire is on the closest next tick and delayed_work users depend
  	 * on that there's no such delay when @delay is 0.
  	 */
  	if (!delay) {
  		kthread_insert_work(worker, work, &worker->work_list);
  		return;
  	}
  
  	/* Be paranoid and try to detect possible races already now. */
  	kthread_insert_work_sanity_check(worker, work);
  
  	list_add(&work->node, &worker->delayed_work_list);
  	work->worker = worker;
22597dc3d   Petr Mladek   kthread: initial ...
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
  	timer->expires = jiffies + delay;
  	add_timer(timer);
  }
  
  /**
   * kthread_queue_delayed_work - queue the associated kthread work
   *	after a delay.
   * @worker: target kthread_worker
   * @dwork: kthread_delayed_work to queue
   * @delay: number of jiffies to wait before queuing
   *
   * If the work has not been pending it starts a timer that will queue
   * the work after the given @delay. If @delay is zero, it queues the
   * work immediately.
   *
   * Return: %false if the @work has already been pending. It means that
   * either the timer was running or the work was queued. It returns %true
   * otherwise.
   */
  bool kthread_queue_delayed_work(struct kthread_worker *worker,
  				struct kthread_delayed_work *dwork,
  				unsigned long delay)
  {
  	struct kthread_work *work = &dwork->work;
  	unsigned long flags;
  	bool ret = false;
fe99a4f4d   Julia Cartwright   kthread: Convert ...
927
  	raw_spin_lock_irqsave(&worker->lock, flags);
22597dc3d   Petr Mladek   kthread: initial ...
928

37be45d49   Petr Mladek   kthread: allow to...
929
  	if (!queuing_blocked(worker, work)) {
22597dc3d   Petr Mladek   kthread: initial ...
930
931
932
  		__kthread_queue_delayed_work(worker, dwork, delay);
  		ret = true;
  	}
fe99a4f4d   Julia Cartwright   kthread: Convert ...
933
  	raw_spin_unlock_irqrestore(&worker->lock, flags);
22597dc3d   Petr Mladek   kthread: initial ...
934
935
936
  	return ret;
  }
  EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
9a2e03d8e   Tejun Heo   kthread_worker: r...
937
938
939
940
941
942
943
944
945
946
947
  struct kthread_flush_work {
  	struct kthread_work	work;
  	struct completion	done;
  };
  
  static void kthread_flush_work_fn(struct kthread_work *work)
  {
  	struct kthread_flush_work *fwork =
  		container_of(work, struct kthread_flush_work, work);
  	complete(&fwork->done);
  }
b56c0d893   Tejun Heo   kthread: implemen...
948
  /**
3989144f8   Petr Mladek   kthread: kthread ...
949
   * kthread_flush_work - flush a kthread_work
b56c0d893   Tejun Heo   kthread: implemen...
950
951
952
953
   * @work: work to flush
   *
   * If @work is queued or executing, wait for it to finish execution.
   */
3989144f8   Petr Mladek   kthread: kthread ...
954
  void kthread_flush_work(struct kthread_work *work)
b56c0d893   Tejun Heo   kthread: implemen...
955
  {
46f3d9762   Tejun Heo   kthread_worker: r...
956
957
958
959
960
961
  	struct kthread_flush_work fwork = {
  		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
  		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
  	};
  	struct kthread_worker *worker;
  	bool noop = false;
46f3d9762   Tejun Heo   kthread_worker: r...
962
963
964
  	worker = work->worker;
  	if (!worker)
  		return;
b56c0d893   Tejun Heo   kthread: implemen...
965

fe99a4f4d   Julia Cartwright   kthread: Convert ...
966
  	raw_spin_lock_irq(&worker->lock);
8197b3d43   Petr Mladek   kthread: detect w...
967
968
  	/* Work must not be used with >1 worker, see kthread_queue_work(). */
  	WARN_ON_ONCE(work->worker != worker);
b56c0d893   Tejun Heo   kthread: implemen...
969

46f3d9762   Tejun Heo   kthread_worker: r...
970
  	if (!list_empty(&work->node))
3989144f8   Petr Mladek   kthread: kthread ...
971
  		kthread_insert_work(worker, &fwork.work, work->node.next);
46f3d9762   Tejun Heo   kthread_worker: r...
972
  	else if (worker->current_work == work)
3989144f8   Petr Mladek   kthread: kthread ...
973
974
  		kthread_insert_work(worker, &fwork.work,
  				    worker->work_list.next);
46f3d9762   Tejun Heo   kthread_worker: r...
975
976
  	else
  		noop = true;
b56c0d893   Tejun Heo   kthread: implemen...
977

fe99a4f4d   Julia Cartwright   kthread: Convert ...
978
  	raw_spin_unlock_irq(&worker->lock);
b56c0d893   Tejun Heo   kthread: implemen...
979

46f3d9762   Tejun Heo   kthread_worker: r...
980
981
  	if (!noop)
  		wait_for_completion(&fwork.done);
b56c0d893   Tejun Heo   kthread: implemen...
982
  }
3989144f8   Petr Mladek   kthread: kthread ...
983
  EXPORT_SYMBOL_GPL(kthread_flush_work);
b56c0d893   Tejun Heo   kthread: implemen...
984

37be45d49   Petr Mladek   kthread: allow to...
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
  /*
   * This function removes the work from the worker queue. Also it makes sure
   * that it won't get queued later via the delayed work's timer.
   *
   * The work might still be in use when this function finishes. See the
   * current_work proceed by the worker.
   *
   * Return: %true if @work was pending and successfully canceled,
   *	%false if @work was not pending
   */
  static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
  				  unsigned long *flags)
  {
  	/* Try to cancel the timer if exists. */
  	if (is_dwork) {
  		struct kthread_delayed_work *dwork =
  			container_of(work, struct kthread_delayed_work, work);
  		struct kthread_worker *worker = work->worker;
  
  		/*
  		 * del_timer_sync() must be called to make sure that the timer
  		 * callback is not running. The lock must be temporary released
  		 * to avoid a deadlock with the callback. In the meantime,
  		 * any queuing is blocked by setting the canceling counter.
  		 */
  		work->canceling++;
fe99a4f4d   Julia Cartwright   kthread: Convert ...
1011
  		raw_spin_unlock_irqrestore(&worker->lock, *flags);
37be45d49   Petr Mladek   kthread: allow to...
1012
  		del_timer_sync(&dwork->timer);
fe99a4f4d   Julia Cartwright   kthread: Convert ...
1013
  		raw_spin_lock_irqsave(&worker->lock, *flags);
37be45d49   Petr Mladek   kthread: allow to...
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
  		work->canceling--;
  	}
  
  	/*
  	 * Try to remove the work from a worker list. It might either
  	 * be from worker->work_list or from worker->delayed_work_list.
  	 */
  	if (!list_empty(&work->node)) {
  		list_del_init(&work->node);
  		return true;
  	}
  
  	return false;
  }
9a6b06c8d   Petr Mladek   kthread: allow to...
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
  /**
   * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
   * @worker: kthread worker to use
   * @dwork: kthread delayed work to queue
   * @delay: number of jiffies to wait before queuing
   *
   * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
   * modify @dwork's timer so that it expires after @delay. If @delay is zero,
   * @work is guaranteed to be queued immediately.
   *
   * Return: %true if @dwork was pending and its timer was modified,
   * %false otherwise.
   *
   * A special case is when the work is being canceled in parallel.
   * It might be caused either by the real kthread_cancel_delayed_work_sync()
   * or yet another kthread_mod_delayed_work() call. We let the other command
   * win and return %false here. The caller is supposed to synchronize these
   * operations a reasonable way.
   *
   * This function is safe to call from any context including IRQ handler.
   * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
   * for details.
   */
  bool kthread_mod_delayed_work(struct kthread_worker *worker,
  			      struct kthread_delayed_work *dwork,
  			      unsigned long delay)
  {
  	struct kthread_work *work = &dwork->work;
  	unsigned long flags;
  	int ret = false;
fe99a4f4d   Julia Cartwright   kthread: Convert ...
1058
  	raw_spin_lock_irqsave(&worker->lock, flags);
9a6b06c8d   Petr Mladek   kthread: allow to...
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
  
  	/* Do not bother with canceling when never queued. */
  	if (!work->worker)
  		goto fast_queue;
  
  	/* Work must not be used with >1 worker, see kthread_queue_work() */
  	WARN_ON_ONCE(work->worker != worker);
  
  	/* Do not fight with another command that is canceling this work. */
  	if (work->canceling)
  		goto out;
  
  	ret = __kthread_cancel_work(work, true, &flags);
  fast_queue:
  	__kthread_queue_delayed_work(worker, dwork, delay);
  out:
fe99a4f4d   Julia Cartwright   kthread: Convert ...
1075
  	raw_spin_unlock_irqrestore(&worker->lock, flags);
9a6b06c8d   Petr Mladek   kthread: allow to...
1076
1077
1078
  	return ret;
  }
  EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
37be45d49   Petr Mladek   kthread: allow to...
1079
1080
1081
1082
1083
1084
1085
1086
  static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
  {
  	struct kthread_worker *worker = work->worker;
  	unsigned long flags;
  	int ret = false;
  
  	if (!worker)
  		goto out;
fe99a4f4d   Julia Cartwright   kthread: Convert ...
1087
  	raw_spin_lock_irqsave(&worker->lock, flags);
37be45d49   Petr Mladek   kthread: allow to...
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
  	/* Work must not be used with >1 worker, see kthread_queue_work(). */
  	WARN_ON_ONCE(work->worker != worker);
  
  	ret = __kthread_cancel_work(work, is_dwork, &flags);
  
  	if (worker->current_work != work)
  		goto out_fast;
  
  	/*
  	 * The work is in progress and we need to wait with the lock released.
  	 * In the meantime, block any queuing by setting the canceling counter.
  	 */
  	work->canceling++;
fe99a4f4d   Julia Cartwright   kthread: Convert ...
1101
  	raw_spin_unlock_irqrestore(&worker->lock, flags);
37be45d49   Petr Mladek   kthread: allow to...
1102
  	kthread_flush_work(work);
fe99a4f4d   Julia Cartwright   kthread: Convert ...
1103
  	raw_spin_lock_irqsave(&worker->lock, flags);
37be45d49   Petr Mladek   kthread: allow to...
1104
1105
1106
  	work->canceling--;
  
  out_fast:
fe99a4f4d   Julia Cartwright   kthread: Convert ...
1107
  	raw_spin_unlock_irqrestore(&worker->lock, flags);
37be45d49   Petr Mladek   kthread: allow to...
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
  out:
  	return ret;
  }
  
  /**
   * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
   * @work: the kthread work to cancel
   *
   * Cancel @work and wait for its execution to finish.  This function
   * can be used even if the work re-queues itself. On return from this
   * function, @work is guaranteed to be not pending or executing on any CPU.
   *
   * kthread_cancel_work_sync(&delayed_work->work) must not be used for
   * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
   *
   * The caller must ensure that the worker on which @work was last
   * queued can't be destroyed before this function returns.
   *
   * Return: %true if @work was pending, %false otherwise.
   */
  bool kthread_cancel_work_sync(struct kthread_work *work)
  {
  	return __kthread_cancel_work_sync(work, false);
  }
  EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
  
  /**
   * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
   *	wait for it to finish.
   * @dwork: the kthread delayed work to cancel
   *
   * This is kthread_cancel_work_sync() for delayed works.
   *
   * Return: %true if @dwork was pending, %false otherwise.
   */
  bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
  {
  	return __kthread_cancel_work_sync(&dwork->work, true);
  }
  EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
b56c0d893   Tejun Heo   kthread: implemen...
1148
  /**
3989144f8   Petr Mladek   kthread: kthread ...
1149
   * kthread_flush_worker - flush all current works on a kthread_worker
b56c0d893   Tejun Heo   kthread: implemen...
1150
1151
1152
1153
1154
   * @worker: worker to flush
   *
   * Wait until all currently executing or pending works on @worker are
   * finished.
   */
3989144f8   Petr Mladek   kthread: kthread ...
1155
  void kthread_flush_worker(struct kthread_worker *worker)
b56c0d893   Tejun Heo   kthread: implemen...
1156
1157
1158
1159
1160
  {
  	struct kthread_flush_work fwork = {
  		KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
  		COMPLETION_INITIALIZER_ONSTACK(fwork.done),
  	};
3989144f8   Petr Mladek   kthread: kthread ...
1161
  	kthread_queue_work(worker, &fwork.work);
b56c0d893   Tejun Heo   kthread: implemen...
1162
1163
  	wait_for_completion(&fwork.done);
  }
3989144f8   Petr Mladek   kthread: kthread ...
1164
  EXPORT_SYMBOL_GPL(kthread_flush_worker);
35033fe9c   Petr Mladek   kthread: add kthr...
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
  
  /**
   * kthread_destroy_worker - destroy a kthread worker
   * @worker: worker to be destroyed
   *
   * Flush and destroy @worker.  The simple flush is enough because the kthread
   * worker API is used only in trivial scenarios.  There are no multi-step state
   * machines needed.
   */
  void kthread_destroy_worker(struct kthread_worker *worker)
  {
  	struct task_struct *task;
  
  	task = worker->task;
  	if (WARN_ON(!task))
  		return;
  
  	kthread_flush_worker(worker);
  	kthread_stop(task);
  	WARN_ON(!list_empty(&worker->work_list));
  	kfree(worker);
  }
  EXPORT_SYMBOL(kthread_destroy_worker);
05e3db95e   Shaohua Li   kthread: add a me...
1188

f5678e7f2   Christoph Hellwig   kernel: better do...
1189
1190
1191
  /**
   * kthread_use_mm - make the calling kthread operate on an address space
   * @mm: address space to operate on
9bf5b9eb2   Christoph Hellwig   kernel: move use_...
1192
   */
f5678e7f2   Christoph Hellwig   kernel: better do...
1193
  void kthread_use_mm(struct mm_struct *mm)
9bf5b9eb2   Christoph Hellwig   kernel: move use_...
1194
1195
1196
  {
  	struct mm_struct *active_mm;
  	struct task_struct *tsk = current;
f5678e7f2   Christoph Hellwig   kernel: better do...
1197
1198
  	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
  	WARN_ON_ONCE(tsk->mm);
9bf5b9eb2   Christoph Hellwig   kernel: move use_...
1199
  	task_lock(tsk);
38cf307c1   Peter Zijlstra   mm: fix kthread_u...
1200
1201
  	/* Hold off tlb flush IPIs while switching mm's */
  	local_irq_disable();
9bf5b9eb2   Christoph Hellwig   kernel: move use_...
1202
1203
1204
1205
1206
1207
  	active_mm = tsk->active_mm;
  	if (active_mm != mm) {
  		mmgrab(mm);
  		tsk->active_mm = mm;
  	}
  	tsk->mm = mm;
38cf307c1   Peter Zijlstra   mm: fix kthread_u...
1208
1209
  	switch_mm_irqs_off(active_mm, mm, tsk);
  	local_irq_enable();
9bf5b9eb2   Christoph Hellwig   kernel: move use_...
1210
1211
1212
1213
1214
1215
1216
  	task_unlock(tsk);
  #ifdef finish_arch_post_lock_switch
  	finish_arch_post_lock_switch();
  #endif
  
  	if (active_mm != mm)
  		mmdrop(active_mm);
37c54f9bd   Christoph Hellwig   kernel: set USER_...
1217

3d13f313c   Christoph Hellwig   uaccess: add forc...
1218
  	to_kthread(tsk)->oldfs = force_uaccess_begin();
9bf5b9eb2   Christoph Hellwig   kernel: move use_...
1219
  }
f5678e7f2   Christoph Hellwig   kernel: better do...
1220
  EXPORT_SYMBOL_GPL(kthread_use_mm);
9bf5b9eb2   Christoph Hellwig   kernel: move use_...
1221

f5678e7f2   Christoph Hellwig   kernel: better do...
1222
1223
1224
  /**
   * kthread_unuse_mm - reverse the effect of kthread_use_mm()
   * @mm: address space to operate on
9bf5b9eb2   Christoph Hellwig   kernel: move use_...
1225
   */
f5678e7f2   Christoph Hellwig   kernel: better do...
1226
  void kthread_unuse_mm(struct mm_struct *mm)
9bf5b9eb2   Christoph Hellwig   kernel: move use_...
1227
1228
  {
  	struct task_struct *tsk = current;
f5678e7f2   Christoph Hellwig   kernel: better do...
1229
1230
  	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
  	WARN_ON_ONCE(!tsk->mm);
3d13f313c   Christoph Hellwig   uaccess: add forc...
1231
  	force_uaccess_end(to_kthread(tsk)->oldfs);
37c54f9bd   Christoph Hellwig   kernel: set USER_...
1232

9bf5b9eb2   Christoph Hellwig   kernel: move use_...
1233
1234
  	task_lock(tsk);
  	sync_mm_rss(mm);
38cf307c1   Peter Zijlstra   mm: fix kthread_u...
1235
  	local_irq_disable();
9bf5b9eb2   Christoph Hellwig   kernel: move use_...
1236
1237
1238
  	tsk->mm = NULL;
  	/* active_mm is still 'mm' */
  	enter_lazy_tlb(mm, tsk);
38cf307c1   Peter Zijlstra   mm: fix kthread_u...
1239
  	local_irq_enable();
9bf5b9eb2   Christoph Hellwig   kernel: move use_...
1240
1241
  	task_unlock(tsk);
  }
f5678e7f2   Christoph Hellwig   kernel: better do...
1242
  EXPORT_SYMBOL_GPL(kthread_unuse_mm);
9bf5b9eb2   Christoph Hellwig   kernel: move use_...
1243

0b508bc92   Shaohua Li   block: fix a buil...
1244
  #ifdef CONFIG_BLK_CGROUP
05e3db95e   Shaohua Li   kthread: add a me...
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
  /**
   * kthread_associate_blkcg - associate blkcg to current kthread
   * @css: the cgroup info
   *
   * Current thread must be a kthread. The thread is running jobs on behalf of
   * other threads. In some cases, we expect the jobs attach cgroup info of
   * original threads instead of that of current thread. This function stores
   * original thread's cgroup info in current kthread context for later
   * retrieval.
   */
  void kthread_associate_blkcg(struct cgroup_subsys_state *css)
  {
  	struct kthread *kthread;
  
  	if (!(current->flags & PF_KTHREAD))
  		return;
  	kthread = to_kthread(current);
  	if (!kthread)
  		return;
  
  	if (kthread->blkcg_css) {
  		css_put(kthread->blkcg_css);
  		kthread->blkcg_css = NULL;
  	}
  	if (css) {
  		css_get(css);
  		kthread->blkcg_css = css;
  	}
  }
  EXPORT_SYMBOL(kthread_associate_blkcg);
  
  /**
   * kthread_blkcg - get associated blkcg css of current kthread
   *
   * Current thread must be a kthread.
   */
  struct cgroup_subsys_state *kthread_blkcg(void)
  {
  	struct kthread *kthread;
  
  	if (current->flags & PF_KTHREAD) {
  		kthread = to_kthread(current);
  		if (kthread)
  			return kthread->blkcg_css;
  	}
  	return NULL;
  }
  EXPORT_SYMBOL(kthread_blkcg);
  #endif