Blame view
kernel/kthread.c
34.1 KB
457c89965
|
1 |
// SPDX-License-Identifier: GPL-2.0-only |
1da177e4c
|
2 3 4 |
/* Kernel thread helper functions. * Copyright (C) 2004 IBM Corporation, Rusty Russell. * |
73c279927
|
5 |
* Creation is done via kthreadd, so that we get a clean environment |
1da177e4c
|
6 7 8 |
* even if we're invoked from userspace (think modprobe, hotplug cpu, * etc.). */ |
ae7e81c07
|
9 |
#include <uapi/linux/sched/types.h> |
1da177e4c
|
10 |
#include <linux/sched.h> |
299300258
|
11 |
#include <linux/sched/task.h> |
1da177e4c
|
12 13 14 |
#include <linux/kthread.h> #include <linux/completion.h> #include <linux/err.h> |
8af0c18af
|
15 |
#include <linux/cgroup.h> |
58568d2a8
|
16 |
#include <linux/cpuset.h> |
1da177e4c
|
17 18 |
#include <linux/unistd.h> #include <linux/file.h> |
9984de1a5
|
19 |
#include <linux/export.h> |
97d1f15b7
|
20 |
#include <linux/mutex.h> |
b56c0d893
|
21 22 |
#include <linux/slab.h> #include <linux/freezer.h> |
a74fb73c1
|
23 |
#include <linux/ptrace.h> |
cd42d559e
|
24 |
#include <linux/uaccess.h> |
98fa15f34
|
25 |
#include <linux/numa.h> |
ad8d75fff
|
26 |
#include <trace/events/sched.h> |
1da177e4c
|
27 |
|
73c279927
|
28 29 30 |
static DEFINE_SPINLOCK(kthread_create_lock); static LIST_HEAD(kthread_create_list); struct task_struct *kthreadd_task; |
1da177e4c
|
31 32 33 |
struct kthread_create_info { |
73c279927
|
34 |
/* Information passed to kthread() from kthreadd. */ |
1da177e4c
|
35 36 |
int (*threadfn)(void *data); void *data; |
207205a2b
|
37 |
int node; |
1da177e4c
|
38 |
|
73c279927
|
39 |
/* Result passed back to kthread_create() from kthreadd. */ |
1da177e4c
|
40 |
struct task_struct *result; |
786235eeb
|
41 |
struct completion *done; |
65f27f384
|
42 |
|
73c279927
|
43 |
struct list_head list; |
1da177e4c
|
44 |
}; |
63706172f
|
45 |
struct kthread { |
2a1d44601
|
46 47 |
unsigned long flags; unsigned int cpu; |
82805ab77
|
48 |
void *data; |
2a1d44601
|
49 |
struct completion parked; |
63706172f
|
50 |
struct completion exited; |
0b508bc92
|
51 |
#ifdef CONFIG_BLK_CGROUP |
05e3db95e
|
52 53 |
struct cgroup_subsys_state *blkcg_css; #endif |
1da177e4c
|
54 |
}; |
2a1d44601
|
55 56 57 58 |
enum KTHREAD_BITS { KTHREAD_IS_PER_CPU = 0, KTHREAD_SHOULD_STOP, KTHREAD_SHOULD_PARK, |
2a1d44601
|
59 |
}; |
1da5c46fa
|
60 61 62 63 64 65 66 67 68 |
static inline void set_kthread_struct(void *kthread) { /* * We abuse ->set_child_tid to avoid the new member and because it * can't be wrongly copied by copy_process(). We also rely on fact * that the caller can't exec, so PF_KTHREAD can't be cleared. */ current->set_child_tid = (__force void __user *)kthread; } |
4ecdafc80
|
69 70 71 |
static inline struct kthread *to_kthread(struct task_struct *k) { |
1da5c46fa
|
72 73 |
WARN_ON(!(k->flags & PF_KTHREAD)); return (__force void *)k->set_child_tid; |
4ecdafc80
|
74 |
} |
1da5c46fa
|
75 76 |
void free_kthread_struct(struct task_struct *k) { |
05e3db95e
|
77 |
struct kthread *kthread; |
1da5c46fa
|
78 79 80 81 |
/* * Can be NULL if this kthread was created by kernel_thread() * or if kmalloc() in kthread() failed. */ |
05e3db95e
|
82 |
kthread = to_kthread(k); |
0b508bc92
|
83 |
#ifdef CONFIG_BLK_CGROUP |
05e3db95e
|
84 85 86 |
WARN_ON_ONCE(kthread && kthread->blkcg_css); #endif kfree(kthread); |
1da5c46fa
|
87 |
} |
9e37bd301
|
88 89 90 |
/** * kthread_should_stop - should this kthread return now? * |
72fd4a35a
|
91 |
* When someone calls kthread_stop() on your kthread, it will be woken |
9e37bd301
|
92 93 94 |
* and this will return true. You should then return, and your return * value will be passed through to kthread_stop(). */ |
2a1d44601
|
95 |
bool kthread_should_stop(void) |
1da177e4c
|
96 |
{ |
2a1d44601
|
97 |
return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags); |
1da177e4c
|
98 99 |
} EXPORT_SYMBOL(kthread_should_stop); |
0121805d9
|
100 101 102 103 104 |
bool __kthread_should_park(struct task_struct *k) { return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags); } EXPORT_SYMBOL_GPL(__kthread_should_park); |
82805ab77
|
105 |
/** |
2a1d44601
|
106 107 108 109 110 111 112 113 114 115 116 117 |
* kthread_should_park - should this kthread park now? * * When someone calls kthread_park() on your kthread, it will be woken * and this will return true. You should then do the necessary * cleanup and call kthread_parkme() * * Similar to kthread_should_stop(), but this keeps the thread alive * and in a park position. kthread_unpark() "restarts" the thread and * calls the thread function again. */ bool kthread_should_park(void) { |
0121805d9
|
118 |
return __kthread_should_park(current); |
2a1d44601
|
119 |
} |
18896451e
|
120 |
EXPORT_SYMBOL_GPL(kthread_should_park); |
2a1d44601
|
121 122 |
/** |
8a32c441c
|
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
* kthread_freezable_should_stop - should this freezable kthread return now? * @was_frozen: optional out parameter, indicates whether %current was frozen * * kthread_should_stop() for freezable kthreads, which will enter * refrigerator if necessary. This function is safe from kthread_stop() / * freezer deadlock and freezable kthreads should use this function instead * of calling try_to_freeze() directly. */ bool kthread_freezable_should_stop(bool *was_frozen) { bool frozen = false; might_sleep(); if (unlikely(freezing(current))) frozen = __refrigerator(true); if (was_frozen) *was_frozen = frozen; return kthread_should_stop(); } EXPORT_SYMBOL_GPL(kthread_freezable_should_stop); /** |
82805ab77
|
148 149 150 151 152 153 154 155 156 157 158 |
* kthread_data - return data value specified on kthread creation * @task: kthread task in question * * Return the data value specified when kthread @task was created. * The caller is responsible for ensuring the validity of @task when * calling this function. */ void *kthread_data(struct task_struct *task) { return to_kthread(task)->data; } |
cd42d559e
|
159 |
/** |
e700591ae
|
160 |
* kthread_probe_data - speculative version of kthread_data() |
cd42d559e
|
161 162 163 164 165 166 167 |
* @task: possible kthread task in question * * @task could be a kthread task. Return the data value specified when it * was created if accessible. If @task isn't a kthread task or its data is * inaccessible for any reason, %NULL is returned. This function requires * that @task itself is safe to dereference. */ |
e700591ae
|
168 |
void *kthread_probe_data(struct task_struct *task) |
cd42d559e
|
169 170 171 172 173 174 175 |
{ struct kthread *kthread = to_kthread(task); void *data = NULL; probe_kernel_read(&data, &kthread->data, sizeof(data)); return data; } |
2a1d44601
|
176 177 |
static void __kthread_parkme(struct kthread *self) { |
741a76b35
|
178 |
for (;;) { |
1cef1150e
|
179 180 181 182 183 184 185 186 187 188 |
/* * TASK_PARKED is a special state; we must serialize against * possible pending wakeups to avoid store-store collisions on * task->state. * * Such a collision might possibly result in the task state * changin from TASK_PARKED and us failing the * wait_task_inactive() in kthread_park(). */ set_special_state(TASK_PARKED); |
741a76b35
|
189 190 |
if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags)) break; |
1cef1150e
|
191 |
|
f83ee19be
|
192 |
complete(&self->parked); |
2a1d44601
|
193 |
schedule(); |
2a1d44601
|
194 |
} |
2a1d44601
|
195 196 197 198 199 200 201 |
__set_current_state(TASK_RUNNING); } void kthread_parkme(void) { __kthread_parkme(to_kthread(current)); } |
18896451e
|
202 |
EXPORT_SYMBOL_GPL(kthread_parkme); |
2a1d44601
|
203 |
|
1da177e4c
|
204 205 |
static int kthread(void *_create) { |
63706172f
|
206 |
/* Copy data: it's on kthread's stack */ |
1da177e4c
|
207 |
struct kthread_create_info *create = _create; |
63706172f
|
208 209 |
int (*threadfn)(void *data) = create->threadfn; void *data = create->data; |
786235eeb
|
210 |
struct completion *done; |
1da5c46fa
|
211 |
struct kthread *self; |
63706172f
|
212 |
int ret; |
1da177e4c
|
213 |
|
e10237cc7
|
214 |
self = kzalloc(sizeof(*self), GFP_KERNEL); |
1da5c46fa
|
215 |
set_kthread_struct(self); |
1da177e4c
|
216 |
|
786235eeb
|
217 218 219 220 221 222 |
/* If user was SIGKILLed, I release the structure. */ done = xchg(&create->done, NULL); if (!done) { kfree(create); do_exit(-EINTR); } |
1da5c46fa
|
223 224 225 226 227 228 |
if (!self) { create->result = ERR_PTR(-ENOMEM); complete(done); do_exit(-ENOMEM); } |
1da5c46fa
|
229 230 231 232 |
self->data = data; init_completion(&self->exited); init_completion(&self->parked); current->vfork_done = &self->exited; |
1da177e4c
|
233 |
/* OK, tell user we're spawned, wait for stop or wakeup */ |
a076e4bca
|
234 |
__set_current_state(TASK_UNINTERRUPTIBLE); |
3217ab97f
|
235 |
create->result = current; |
786235eeb
|
236 |
complete(done); |
1da177e4c
|
237 |
schedule(); |
63706172f
|
238 |
ret = -EINTR; |
1da5c46fa
|
239 |
if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) { |
77f88796c
|
240 |
cgroup_kthread_ready(); |
1da5c46fa
|
241 |
__kthread_parkme(self); |
2a1d44601
|
242 243 |
ret = threadfn(data); } |
63706172f
|
244 |
do_exit(ret); |
1da177e4c
|
245 |
} |
207205a2b
|
246 247 248 249 250 251 252 |
/* called from do_fork() to get node information for about to be created task */ int tsk_fork_get_node(struct task_struct *tsk) { #ifdef CONFIG_NUMA if (tsk == kthreadd_task) return tsk->pref_node_fork; #endif |
81c98869f
|
253 |
return NUMA_NO_NODE; |
207205a2b
|
254 |
} |
73c279927
|
255 |
static void create_kthread(struct kthread_create_info *create) |
1da177e4c
|
256 |
{ |
1da177e4c
|
257 |
int pid; |
207205a2b
|
258 259 260 |
#ifdef CONFIG_NUMA current->pref_node_fork = create->node; #endif |
1da177e4c
|
261 262 |
/* We want our own signal handler (we take no signals by default). */ pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD); |
cdd140bdd
|
263 |
if (pid < 0) { |
786235eeb
|
264 265 266 267 268 269 270 |
/* If user was SIGKILLed, I release the structure. */ struct completion *done = xchg(&create->done, NULL); if (!done) { kfree(create); return; } |
1da177e4c
|
271 |
create->result = ERR_PTR(pid); |
786235eeb
|
272 |
complete(done); |
cdd140bdd
|
273 |
} |
1da177e4c
|
274 |
} |
c0b942a76
|
275 276 |
static __printf(4, 0) struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), |
255451e45
|
277 278 279 |
void *data, int node, const char namefmt[], va_list args) |
1da177e4c
|
280 |
{ |
786235eeb
|
281 282 283 284 285 286 287 288 289 290 291 |
DECLARE_COMPLETION_ONSTACK(done); struct task_struct *task; struct kthread_create_info *create = kmalloc(sizeof(*create), GFP_KERNEL); if (!create) return ERR_PTR(-ENOMEM); create->threadfn = threadfn; create->data = data; create->node = node; create->done = &done; |
73c279927
|
292 293 |
spin_lock(&kthread_create_lock); |
786235eeb
|
294 |
list_add_tail(&create->list, &kthread_create_list); |
73c279927
|
295 |
spin_unlock(&kthread_create_lock); |
cbd9b67bd
|
296 |
wake_up_process(kthreadd_task); |
786235eeb
|
297 298 299 300 301 302 303 304 305 306 307 308 |
/* * Wait for completion in killable state, for I might be chosen by * the OOM killer while kthreadd is trying to allocate memory for * new kernel thread. */ if (unlikely(wait_for_completion_killable(&done))) { /* * If I was SIGKILLed before kthreadd (or new kernel thread) * calls complete(), leave the cleanup of this structure to * that thread. */ if (xchg(&create->done, NULL)) |
8fe6929cf
|
309 |
return ERR_PTR(-EINTR); |
786235eeb
|
310 311 312 313 314 315 316 317 |
/* * kthreadd (or new kernel thread) will call complete() * shortly. */ wait_for_completion(&done); } task = create->result; if (!IS_ERR(task)) { |
c9b5f501e
|
318 |
static const struct sched_param param = { .sched_priority = 0 }; |
3e536e222
|
319 |
char name[TASK_COMM_LEN]; |
1c99315bb
|
320 |
|
3e536e222
|
321 322 323 324 325 326 |
/* * task is already visible to other tasks, so updating * COMM must be protected. */ vsnprintf(name, sizeof(name), namefmt, args); set_task_comm(task, name); |
1c99315bb
|
327 328 329 330 |
/* * root may have changed our (kthreadd's) priority or CPU mask. * The kernel thread should not inherit these properties. */ |
786235eeb
|
331 332 |
sched_setscheduler_nocheck(task, SCHED_NORMAL, ¶m); set_cpus_allowed_ptr(task, cpu_all_mask); |
1da177e4c
|
333 |
} |
786235eeb
|
334 335 |
kfree(create); return task; |
1da177e4c
|
336 |
} |
255451e45
|
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 |
/** * kthread_create_on_node - create a kthread. * @threadfn: the function to run until signal_pending(current). * @data: data ptr for @threadfn. * @node: task and thread structures for the thread are allocated on this node * @namefmt: printf-style name for the thread. * * Description: This helper function creates and names a kernel * thread. The thread will be stopped: use wake_up_process() to start * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and * is affine to all CPUs. * * If thread is going to be bound on a particular cpu, give its node * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE. * When woken, the thread will run @threadfn() with @data as its * argument. @threadfn() can either call do_exit() directly if it is a * standalone thread for which no one will call kthread_stop(), or * return when 'kthread_should_stop()' is true (which means * kthread_stop() has been called). The return value should be zero * or a negative error number; it will be passed to kthread_stop(). * * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR). */ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), void *data, int node, const char namefmt[], ...) { struct task_struct *task; va_list args; va_start(args, namefmt); task = __kthread_create_on_node(threadfn, data, node, namefmt, args); va_end(args); return task; } |
207205a2b
|
375 |
EXPORT_SYMBOL(kthread_create_on_node); |
1da177e4c
|
376 |
|
25834c73f
|
377 |
static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state) |
2a1d44601
|
378 |
{ |
25834c73f
|
379 |
unsigned long flags; |
f2530dc71
|
380 381 382 383 |
if (!wait_task_inactive(p, state)) { WARN_ON(1); return; } |
25834c73f
|
384 |
|
2a1d44601
|
385 |
/* It's safe because the task is inactive. */ |
25834c73f
|
386 387 |
raw_spin_lock_irqsave(&p->pi_lock, flags); do_set_cpus_allowed(p, mask); |
14a40ffcc
|
388 |
p->flags |= PF_NO_SETAFFINITY; |
25834c73f
|
389 390 391 392 393 394 395 396 397 398 399 |
raw_spin_unlock_irqrestore(&p->pi_lock, flags); } static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state) { __kthread_bind_mask(p, cpumask_of(cpu), state); } void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask) { __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE); |
2a1d44601
|
400 |
} |
9e37bd301
|
401 |
/** |
881232b70
|
402 403 404 405 406 407 408 409 410 411 |
* kthread_bind - bind a just-created kthread to a cpu. * @p: thread created by kthread_create(). * @cpu: cpu (might not be online, must be possible) for @k to run on. * * Description: This function is equivalent to set_cpus_allowed(), * except that @cpu doesn't need to be online, and the thread must be * stopped (i.e., just returned from kthread_create()). */ void kthread_bind(struct task_struct *p, unsigned int cpu) { |
f2530dc71
|
412 |
__kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE); |
881232b70
|
413 414 415 416 |
} EXPORT_SYMBOL(kthread_bind); /** |
2a1d44601
|
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 |
* kthread_create_on_cpu - Create a cpu bound kthread * @threadfn: the function to run until signal_pending(current). * @data: data ptr for @threadfn. * @cpu: The cpu on which the thread should be bound, * @namefmt: printf-style name for the thread. Format is restricted * to "name.*%u". Code fills in cpu number. * * Description: This helper function creates and names a kernel thread * The thread will be woken and put into park mode. */ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), void *data, unsigned int cpu, const char *namefmt) { struct task_struct *p; |
109228389
|
432 |
p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt, |
2a1d44601
|
433 434 435 |
cpu); if (IS_ERR(p)) return p; |
a65d40961
|
436 437 |
kthread_bind(p, cpu); /* CPU hotplug need to bind once again when unparking the thread. */ |
2a1d44601
|
438 439 |
set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags); to_kthread(p)->cpu = cpu; |
2a1d44601
|
440 441 |
return p; } |
cf380a4a9
|
442 443 444 445 446 447 448 449 450 |
/** * kthread_unpark - unpark a thread created by kthread_create(). * @k: thread created by kthread_create(). * * Sets kthread_should_park() for @k to return false, wakes it, and * waits for it to return. If the thread is marked percpu then its * bound to the cpu again. */ void kthread_unpark(struct task_struct *k) |
f2530dc71
|
451 |
{ |
cf380a4a9
|
452 |
struct kthread *kthread = to_kthread(k); |
f2530dc71
|
453 |
/* |
85f1abe00
|
454 455 |
* Newly created kthread was parked when the CPU was offline. * The binding was lost and we need to set it again. |
f2530dc71
|
456 |
*/ |
85f1abe00
|
457 458 459 460 |
if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) __kthread_bind(k, kthread->cpu, TASK_PARKED); clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); |
1cef1150e
|
461 462 463 |
/* * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup. */ |
85f1abe00
|
464 |
wake_up_state(k, TASK_PARKED); |
f2530dc71
|
465 |
} |
18896451e
|
466 |
EXPORT_SYMBOL_GPL(kthread_unpark); |
2a1d44601
|
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 |
/** * kthread_park - park a thread created by kthread_create(). * @k: thread created by kthread_create(). * * Sets kthread_should_park() for @k to return true, wakes it, and * waits for it to return. This can also be called after kthread_create() * instead of calling wake_up_process(): the thread will park without * calling threadfn(). * * Returns 0 if the thread is parked, -ENOSYS if the thread exited. * If called by the kthread itself just the park bit is set. */ int kthread_park(struct task_struct *k) { |
cf380a4a9
|
482 483 484 485 |
struct kthread *kthread = to_kthread(k); if (WARN_ON(k->flags & PF_EXITING)) return -ENOSYS; |
f83ee19be
|
486 487 |
if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))) return -EBUSY; |
85f1abe00
|
488 489 490 |
set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); if (k != current) { wake_up_process(k); |
1cef1150e
|
491 492 493 494 |
/* * Wait for __kthread_parkme() to complete(), this means we * _will_ have TASK_PARKED and are about to call schedule(). */ |
85f1abe00
|
495 |
wait_for_completion(&kthread->parked); |
1cef1150e
|
496 497 498 499 500 |
/* * Now wait for that schedule() to complete and the task to * get scheduled out. */ WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED)); |
2a1d44601
|
501 |
} |
cf380a4a9
|
502 503 |
return 0; |
2a1d44601
|
504 |
} |
18896451e
|
505 |
EXPORT_SYMBOL_GPL(kthread_park); |
2a1d44601
|
506 507 |
/** |
9e37bd301
|
508 509 510 511 |
* kthread_stop - stop a thread created by kthread_create(). * @k: thread created by kthread_create(). * * Sets kthread_should_stop() for @k to return true, wakes it, and |
9ae260270
|
512 513 514 515 516 517 |
* waits for it to exit. This can also be called after kthread_create() * instead of calling wake_up_process(): the thread will exit without * calling threadfn(). * * If threadfn() may call do_exit() itself, the caller must ensure * task_struct can't go away. |
9e37bd301
|
518 519 520 521 |
* * Returns the result of threadfn(), or %-EINTR if wake_up_process() * was never called. */ |
1da177e4c
|
522 523 |
int kthread_stop(struct task_struct *k) { |
b5c5442bb
|
524 |
struct kthread *kthread; |
1da177e4c
|
525 |
int ret; |
0a16b6075
|
526 |
trace_sched_kthread_stop(k); |
b5c5442bb
|
527 528 |
get_task_struct(k); |
efb29fbfa
|
529 530 |
kthread = to_kthread(k); set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); |
cf380a4a9
|
531 |
kthread_unpark(k); |
efb29fbfa
|
532 533 |
wake_up_process(k); wait_for_completion(&kthread->exited); |
63706172f
|
534 |
ret = k->exit_code; |
1da177e4c
|
535 |
put_task_struct(k); |
0a16b6075
|
536 |
|
b5c5442bb
|
537 |
trace_sched_kthread_stop_ret(ret); |
1da177e4c
|
538 539 |
return ret; } |
52e92e578
|
540 |
EXPORT_SYMBOL(kthread_stop); |
1da177e4c
|
541 |
|
e804a4a4d
|
542 |
int kthreadd(void *unused) |
1da177e4c
|
543 |
{ |
73c279927
|
544 |
struct task_struct *tsk = current; |
1da177e4c
|
545 |
|
e804a4a4d
|
546 |
/* Setup a clean context for our children to inherit. */ |
73c279927
|
547 |
set_task_comm(tsk, "kthreadd"); |
10ab825bd
|
548 |
ignore_signals(tsk); |
1a2142afa
|
549 |
set_cpus_allowed_ptr(tsk, cpu_all_mask); |
aee4faa49
|
550 |
set_mems_allowed(node_states[N_MEMORY]); |
73c279927
|
551 |
|
34b087e48
|
552 |
current->flags |= PF_NOFREEZE; |
77f88796c
|
553 |
cgroup_init_kthreadd(); |
73c279927
|
554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 |
for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (list_empty(&kthread_create_list)) schedule(); __set_current_state(TASK_RUNNING); spin_lock(&kthread_create_lock); while (!list_empty(&kthread_create_list)) { struct kthread_create_info *create; create = list_entry(kthread_create_list.next, struct kthread_create_info, list); list_del_init(&create->list); spin_unlock(&kthread_create_lock); create_kthread(create); spin_lock(&kthread_create_lock); } spin_unlock(&kthread_create_lock); } return 0; } |
b56c0d893
|
579 |
|
3989144f8
|
580 |
void __kthread_init_worker(struct kthread_worker *worker, |
4f32e9b1f
|
581 582 583 |
const char *name, struct lock_class_key *key) { |
dbf52682c
|
584 |
memset(worker, 0, sizeof(struct kthread_worker)); |
fe99a4f4d
|
585 |
raw_spin_lock_init(&worker->lock); |
4f32e9b1f
|
586 587 |
lockdep_set_class_and_name(&worker->lock, key, name); INIT_LIST_HEAD(&worker->work_list); |
22597dc3d
|
588 |
INIT_LIST_HEAD(&worker->delayed_work_list); |
4f32e9b1f
|
589 |
} |
3989144f8
|
590 |
EXPORT_SYMBOL_GPL(__kthread_init_worker); |
4f32e9b1f
|
591 |
|
b56c0d893
|
592 593 594 595 |
/** * kthread_worker_fn - kthread function to process kthread_worker * @worker_ptr: pointer to initialized kthread_worker * |
fbae2d44a
|
596 597 598 |
* This function implements the main cycle of kthread worker. It processes * work_list until it is stopped with kthread_stop(). It sleeps when the queue * is empty. |
b56c0d893
|
599 |
* |
fbae2d44a
|
600 601 602 |
* The works are not allowed to keep any locks, disable preemption or interrupts * when they finish. There is defined a safe point for freezing when one work * finishes and before a new one is started. |
8197b3d43
|
603 604 605 |
* * Also the works must not be handled by more than one worker at the same time, * see also kthread_queue_work(). |
b56c0d893
|
606 607 608 609 610 |
*/ int kthread_worker_fn(void *worker_ptr) { struct kthread_worker *worker = worker_ptr; struct kthread_work *work; |
fbae2d44a
|
611 612 613 614 615 |
/* * FIXME: Update the check and remove the assignment when all kthread * worker users are created using kthread_create_worker*() functions. */ WARN_ON(worker->task && worker->task != current); |
b56c0d893
|
616 |
worker->task = current; |
dbf52682c
|
617 618 619 |
if (worker->flags & KTW_FREEZABLE) set_freezable(); |
b56c0d893
|
620 621 622 623 624 |
repeat: set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */ if (kthread_should_stop()) { __set_current_state(TASK_RUNNING); |
fe99a4f4d
|
625 |
raw_spin_lock_irq(&worker->lock); |
b56c0d893
|
626 |
worker->task = NULL; |
fe99a4f4d
|
627 |
raw_spin_unlock_irq(&worker->lock); |
b56c0d893
|
628 629 630 631 |
return 0; } work = NULL; |
fe99a4f4d
|
632 |
raw_spin_lock_irq(&worker->lock); |
b56c0d893
|
633 634 635 636 637 |
if (!list_empty(&worker->work_list)) { work = list_first_entry(&worker->work_list, struct kthread_work, node); list_del_init(&work->node); } |
46f3d9762
|
638 |
worker->current_work = work; |
fe99a4f4d
|
639 |
raw_spin_unlock_irq(&worker->lock); |
b56c0d893
|
640 641 642 643 |
if (work) { __set_current_state(TASK_RUNNING); work->func(work); |
b56c0d893
|
644 645 646 647 |
} else if (!freezing(current)) schedule(); try_to_freeze(); |
22cf8bc6c
|
648 |
cond_resched(); |
b56c0d893
|
649 650 651 |
goto repeat; } EXPORT_SYMBOL_GPL(kthread_worker_fn); |
c0b942a76
|
652 |
static __printf(3, 0) struct kthread_worker * |
dbf52682c
|
653 654 |
__kthread_create_worker(int cpu, unsigned int flags, const char namefmt[], va_list args) |
fbae2d44a
|
655 656 657 |
{ struct kthread_worker *worker; struct task_struct *task; |
98fa15f34
|
658 |
int node = NUMA_NO_NODE; |
fbae2d44a
|
659 660 661 662 663 664 |
worker = kzalloc(sizeof(*worker), GFP_KERNEL); if (!worker) return ERR_PTR(-ENOMEM); kthread_init_worker(worker); |
8fb9dcbdc
|
665 666 |
if (cpu >= 0) node = cpu_to_node(cpu); |
fbae2d44a
|
667 |
|
8fb9dcbdc
|
668 669 |
task = __kthread_create_on_node(kthread_worker_fn, worker, node, namefmt, args); |
fbae2d44a
|
670 671 |
if (IS_ERR(task)) goto fail_task; |
8fb9dcbdc
|
672 673 |
if (cpu >= 0) kthread_bind(task, cpu); |
dbf52682c
|
674 |
worker->flags = flags; |
fbae2d44a
|
675 676 677 678 679 680 681 682 683 684 685 |
worker->task = task; wake_up_process(task); return worker; fail_task: kfree(worker); return ERR_CAST(task); } /** * kthread_create_worker - create a kthread worker |
dbf52682c
|
686 |
* @flags: flags modifying the default behavior of the worker |
fbae2d44a
|
687 688 689 690 691 692 693 |
* @namefmt: printf-style name for the kthread worker (task). * * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM) * when the needed structures could not get allocated, and ERR_PTR(-EINTR) * when the worker was SIGKILLed. */ struct kthread_worker * |
dbf52682c
|
694 |
kthread_create_worker(unsigned int flags, const char namefmt[], ...) |
fbae2d44a
|
695 696 697 698 699 |
{ struct kthread_worker *worker; va_list args; va_start(args, namefmt); |
dbf52682c
|
700 |
worker = __kthread_create_worker(-1, flags, namefmt, args); |
fbae2d44a
|
701 702 703 704 705 706 707 708 709 710 |
va_end(args); return worker; } EXPORT_SYMBOL(kthread_create_worker); /** * kthread_create_worker_on_cpu - create a kthread worker and bind it * it to a given CPU and the associated NUMA node. * @cpu: CPU number |
dbf52682c
|
711 |
* @flags: flags modifying the default behavior of the worker |
fbae2d44a
|
712 713 714 715 716 717 718 719 720 721 722 723 724 |
* @namefmt: printf-style name for the kthread worker (task). * * Use a valid CPU number if you want to bind the kthread worker * to the given CPU and the associated NUMA node. * * A good practice is to add the cpu number also into the worker name. * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu). * * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM) * when the needed structures could not get allocated, and ERR_PTR(-EINTR) * when the worker was SIGKILLed. */ struct kthread_worker * |
dbf52682c
|
725 726 |
kthread_create_worker_on_cpu(int cpu, unsigned int flags, const char namefmt[], ...) |
fbae2d44a
|
727 728 729 730 731 |
{ struct kthread_worker *worker; va_list args; va_start(args, namefmt); |
dbf52682c
|
732 |
worker = __kthread_create_worker(cpu, flags, namefmt, args); |
fbae2d44a
|
733 734 735 736 737 |
va_end(args); return worker; } EXPORT_SYMBOL(kthread_create_worker_on_cpu); |
37be45d49
|
738 739 740 741 742 743 744 745 746 747 748 749 |
/* * Returns true when the work could not be queued at the moment. * It happens when it is already pending in a worker list * or when it is being cancelled. */ static inline bool queuing_blocked(struct kthread_worker *worker, struct kthread_work *work) { lockdep_assert_held(&worker->lock); return !list_empty(&work->node) || work->canceling; } |
8197b3d43
|
750 751 752 753 754 755 756 757 |
static void kthread_insert_work_sanity_check(struct kthread_worker *worker, struct kthread_work *work) { lockdep_assert_held(&worker->lock); WARN_ON_ONCE(!list_empty(&work->node)); /* Do not use a work with >1 worker, see kthread_queue_work() */ WARN_ON_ONCE(work->worker && work->worker != worker); } |
9a2e03d8e
|
758 |
/* insert @work before @pos in @worker */ |
3989144f8
|
759 |
static void kthread_insert_work(struct kthread_worker *worker, |
8197b3d43
|
760 761 |
struct kthread_work *work, struct list_head *pos) |
9a2e03d8e
|
762 |
{ |
8197b3d43
|
763 |
kthread_insert_work_sanity_check(worker, work); |
9a2e03d8e
|
764 765 |
list_add_tail(&work->node, pos); |
46f3d9762
|
766 |
work->worker = worker; |
ed1403ec2
|
767 |
if (!worker->current_work && likely(worker->task)) |
9a2e03d8e
|
768 769 |
wake_up_process(worker->task); } |
b56c0d893
|
770 |
/** |
3989144f8
|
771 |
* kthread_queue_work - queue a kthread_work |
b56c0d893
|
772 773 774 775 776 777 |
* @worker: target kthread_worker * @work: kthread_work to queue * * Queue @work to work processor @task for async execution. @task * must have been created with kthread_worker_create(). Returns %true * if @work was successfully queued, %false if it was already pending. |
8197b3d43
|
778 779 780 |
* * Reinitialize the work if it needs to be used by another worker. * For example, when the worker was stopped and started again. |
b56c0d893
|
781 |
*/ |
3989144f8
|
782 |
bool kthread_queue_work(struct kthread_worker *worker, |
b56c0d893
|
783 784 785 786 |
struct kthread_work *work) { bool ret = false; unsigned long flags; |
fe99a4f4d
|
787 |
raw_spin_lock_irqsave(&worker->lock, flags); |
37be45d49
|
788 |
if (!queuing_blocked(worker, work)) { |
3989144f8
|
789 |
kthread_insert_work(worker, work, &worker->work_list); |
b56c0d893
|
790 791 |
ret = true; } |
fe99a4f4d
|
792 |
raw_spin_unlock_irqrestore(&worker->lock, flags); |
b56c0d893
|
793 794 |
return ret; } |
3989144f8
|
795 |
EXPORT_SYMBOL_GPL(kthread_queue_work); |
b56c0d893
|
796 |
|
22597dc3d
|
797 798 799 |
/** * kthread_delayed_work_timer_fn - callback that queues the associated kthread * delayed work when the timer expires. |
fe5c3b69b
|
800 |
* @t: pointer to the expired timer |
22597dc3d
|
801 802 803 804 |
* * The format of the function is defined by struct timer_list. * It should have been called from irqsafe timer with irq already off. */ |
fe5c3b69b
|
805 |
void kthread_delayed_work_timer_fn(struct timer_list *t) |
22597dc3d
|
806 |
{ |
fe5c3b69b
|
807 |
struct kthread_delayed_work *dwork = from_timer(dwork, t, timer); |
22597dc3d
|
808 809 |
struct kthread_work *work = &dwork->work; struct kthread_worker *worker = work->worker; |
ad01423ae
|
810 |
unsigned long flags; |
22597dc3d
|
811 812 813 814 815 816 817 |
/* * This might happen when a pending work is reinitialized. * It means that it is used a wrong way. */ if (WARN_ON_ONCE(!worker)) return; |
ad01423ae
|
818 |
raw_spin_lock_irqsave(&worker->lock, flags); |
22597dc3d
|
819 820 821 822 823 824 825 |
/* Work must not be used with >1 worker, see kthread_queue_work(). */ WARN_ON_ONCE(work->worker != worker); /* Move the work from worker->delayed_work_list. */ WARN_ON_ONCE(list_empty(&work->node)); list_del_init(&work->node); kthread_insert_work(worker, work, &worker->work_list); |
ad01423ae
|
826 |
raw_spin_unlock_irqrestore(&worker->lock, flags); |
22597dc3d
|
827 828 |
} EXPORT_SYMBOL(kthread_delayed_work_timer_fn); |
bc88f85c6
|
829 830 831 |
static void __kthread_queue_delayed_work(struct kthread_worker *worker, struct kthread_delayed_work *dwork, unsigned long delay) |
22597dc3d
|
832 833 834 |
{ struct timer_list *timer = &dwork->timer; struct kthread_work *work = &dwork->work; |
841b86f32
|
835 |
WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn); |
22597dc3d
|
836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 |
/* * If @delay is 0, queue @dwork->work immediately. This is for * both optimization and correctness. The earliest @timer can * expire is on the closest next tick and delayed_work users depend * on that there's no such delay when @delay is 0. */ if (!delay) { kthread_insert_work(worker, work, &worker->work_list); return; } /* Be paranoid and try to detect possible races already now. */ kthread_insert_work_sanity_check(worker, work); list_add(&work->node, &worker->delayed_work_list); work->worker = worker; |
22597dc3d
|
853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 |
timer->expires = jiffies + delay; add_timer(timer); } /** * kthread_queue_delayed_work - queue the associated kthread work * after a delay. * @worker: target kthread_worker * @dwork: kthread_delayed_work to queue * @delay: number of jiffies to wait before queuing * * If the work has not been pending it starts a timer that will queue * the work after the given @delay. If @delay is zero, it queues the * work immediately. * * Return: %false if the @work has already been pending. It means that * either the timer was running or the work was queued. It returns %true * otherwise. */ bool kthread_queue_delayed_work(struct kthread_worker *worker, struct kthread_delayed_work *dwork, unsigned long delay) { struct kthread_work *work = &dwork->work; unsigned long flags; bool ret = false; |
fe99a4f4d
|
879 |
raw_spin_lock_irqsave(&worker->lock, flags); |
22597dc3d
|
880 |
|
37be45d49
|
881 |
if (!queuing_blocked(worker, work)) { |
22597dc3d
|
882 883 884 |
__kthread_queue_delayed_work(worker, dwork, delay); ret = true; } |
fe99a4f4d
|
885 |
raw_spin_unlock_irqrestore(&worker->lock, flags); |
22597dc3d
|
886 887 888 |
return ret; } EXPORT_SYMBOL_GPL(kthread_queue_delayed_work); |
9a2e03d8e
|
889 890 891 892 893 894 895 896 897 898 899 |
struct kthread_flush_work { struct kthread_work work; struct completion done; }; static void kthread_flush_work_fn(struct kthread_work *work) { struct kthread_flush_work *fwork = container_of(work, struct kthread_flush_work, work); complete(&fwork->done); } |
b56c0d893
|
900 |
/** |
3989144f8
|
901 |
* kthread_flush_work - flush a kthread_work |
b56c0d893
|
902 903 904 905 |
* @work: work to flush * * If @work is queued or executing, wait for it to finish execution. */ |
3989144f8
|
906 |
void kthread_flush_work(struct kthread_work *work) |
b56c0d893
|
907 |
{ |
46f3d9762
|
908 909 910 911 912 913 |
struct kthread_flush_work fwork = { KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), COMPLETION_INITIALIZER_ONSTACK(fwork.done), }; struct kthread_worker *worker; bool noop = false; |
46f3d9762
|
914 915 916 |
worker = work->worker; if (!worker) return; |
b56c0d893
|
917 |
|
fe99a4f4d
|
918 |
raw_spin_lock_irq(&worker->lock); |
8197b3d43
|
919 920 |
/* Work must not be used with >1 worker, see kthread_queue_work(). */ WARN_ON_ONCE(work->worker != worker); |
b56c0d893
|
921 |
|
46f3d9762
|
922 |
if (!list_empty(&work->node)) |
3989144f8
|
923 |
kthread_insert_work(worker, &fwork.work, work->node.next); |
46f3d9762
|
924 |
else if (worker->current_work == work) |
3989144f8
|
925 926 |
kthread_insert_work(worker, &fwork.work, worker->work_list.next); |
46f3d9762
|
927 928 |
else noop = true; |
b56c0d893
|
929 |
|
fe99a4f4d
|
930 |
raw_spin_unlock_irq(&worker->lock); |
b56c0d893
|
931 |
|
46f3d9762
|
932 933 |
if (!noop) wait_for_completion(&fwork.done); |
b56c0d893
|
934 |
} |
3989144f8
|
935 |
EXPORT_SYMBOL_GPL(kthread_flush_work); |
b56c0d893
|
936 |
|
37be45d49
|
937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 |
/* * This function removes the work from the worker queue. Also it makes sure * that it won't get queued later via the delayed work's timer. * * The work might still be in use when this function finishes. See the * current_work proceed by the worker. * * Return: %true if @work was pending and successfully canceled, * %false if @work was not pending */ static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork, unsigned long *flags) { /* Try to cancel the timer if exists. */ if (is_dwork) { struct kthread_delayed_work *dwork = container_of(work, struct kthread_delayed_work, work); struct kthread_worker *worker = work->worker; /* * del_timer_sync() must be called to make sure that the timer * callback is not running. The lock must be temporary released * to avoid a deadlock with the callback. In the meantime, * any queuing is blocked by setting the canceling counter. */ work->canceling++; |
fe99a4f4d
|
963 |
raw_spin_unlock_irqrestore(&worker->lock, *flags); |
37be45d49
|
964 |
del_timer_sync(&dwork->timer); |
fe99a4f4d
|
965 |
raw_spin_lock_irqsave(&worker->lock, *flags); |
37be45d49
|
966 967 968 969 970 971 972 973 974 975 976 977 978 979 |
work->canceling--; } /* * Try to remove the work from a worker list. It might either * be from worker->work_list or from worker->delayed_work_list. */ if (!list_empty(&work->node)) { list_del_init(&work->node); return true; } return false; } |
9a6b06c8d
|
980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 |
/** * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work * @worker: kthread worker to use * @dwork: kthread delayed work to queue * @delay: number of jiffies to wait before queuing * * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise, * modify @dwork's timer so that it expires after @delay. If @delay is zero, * @work is guaranteed to be queued immediately. * * Return: %true if @dwork was pending and its timer was modified, * %false otherwise. * * A special case is when the work is being canceled in parallel. * It might be caused either by the real kthread_cancel_delayed_work_sync() * or yet another kthread_mod_delayed_work() call. We let the other command * win and return %false here. The caller is supposed to synchronize these * operations a reasonable way. * * This function is safe to call from any context including IRQ handler. * See __kthread_cancel_work() and kthread_delayed_work_timer_fn() * for details. */ bool kthread_mod_delayed_work(struct kthread_worker *worker, struct kthread_delayed_work *dwork, unsigned long delay) { struct kthread_work *work = &dwork->work; unsigned long flags; int ret = false; |
fe99a4f4d
|
1010 |
raw_spin_lock_irqsave(&worker->lock, flags); |
9a6b06c8d
|
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 |
/* Do not bother with canceling when never queued. */ if (!work->worker) goto fast_queue; /* Work must not be used with >1 worker, see kthread_queue_work() */ WARN_ON_ONCE(work->worker != worker); /* Do not fight with another command that is canceling this work. */ if (work->canceling) goto out; ret = __kthread_cancel_work(work, true, &flags); fast_queue: __kthread_queue_delayed_work(worker, dwork, delay); out: |
fe99a4f4d
|
1027 |
raw_spin_unlock_irqrestore(&worker->lock, flags); |
9a6b06c8d
|
1028 1029 1030 |
return ret; } EXPORT_SYMBOL_GPL(kthread_mod_delayed_work); |
37be45d49
|
1031 1032 1033 1034 1035 1036 1037 1038 |
static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) { struct kthread_worker *worker = work->worker; unsigned long flags; int ret = false; if (!worker) goto out; |
fe99a4f4d
|
1039 |
raw_spin_lock_irqsave(&worker->lock, flags); |
37be45d49
|
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 |
/* Work must not be used with >1 worker, see kthread_queue_work(). */ WARN_ON_ONCE(work->worker != worker); ret = __kthread_cancel_work(work, is_dwork, &flags); if (worker->current_work != work) goto out_fast; /* * The work is in progress and we need to wait with the lock released. * In the meantime, block any queuing by setting the canceling counter. */ work->canceling++; |
fe99a4f4d
|
1053 |
raw_spin_unlock_irqrestore(&worker->lock, flags); |
37be45d49
|
1054 |
kthread_flush_work(work); |
fe99a4f4d
|
1055 |
raw_spin_lock_irqsave(&worker->lock, flags); |
37be45d49
|
1056 1057 1058 |
work->canceling--; out_fast: |
fe99a4f4d
|
1059 |
raw_spin_unlock_irqrestore(&worker->lock, flags); |
37be45d49
|
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 |
out: return ret; } /** * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish * @work: the kthread work to cancel * * Cancel @work and wait for its execution to finish. This function * can be used even if the work re-queues itself. On return from this * function, @work is guaranteed to be not pending or executing on any CPU. * * kthread_cancel_work_sync(&delayed_work->work) must not be used for * delayed_work's. Use kthread_cancel_delayed_work_sync() instead. * * The caller must ensure that the worker on which @work was last * queued can't be destroyed before this function returns. * * Return: %true if @work was pending, %false otherwise. */ bool kthread_cancel_work_sync(struct kthread_work *work) { return __kthread_cancel_work_sync(work, false); } EXPORT_SYMBOL_GPL(kthread_cancel_work_sync); /** * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and * wait for it to finish. * @dwork: the kthread delayed work to cancel * * This is kthread_cancel_work_sync() for delayed works. * * Return: %true if @dwork was pending, %false otherwise. */ bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork) { return __kthread_cancel_work_sync(&dwork->work, true); } EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync); |
b56c0d893
|
1100 |
/** |
3989144f8
|
1101 |
* kthread_flush_worker - flush all current works on a kthread_worker |
b56c0d893
|
1102 1103 1104 1105 1106 |
* @worker: worker to flush * * Wait until all currently executing or pending works on @worker are * finished. */ |
3989144f8
|
1107 |
void kthread_flush_worker(struct kthread_worker *worker) |
b56c0d893
|
1108 1109 1110 1111 1112 |
{ struct kthread_flush_work fwork = { KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn), COMPLETION_INITIALIZER_ONSTACK(fwork.done), }; |
3989144f8
|
1113 |
kthread_queue_work(worker, &fwork.work); |
b56c0d893
|
1114 1115 |
wait_for_completion(&fwork.done); } |
3989144f8
|
1116 |
EXPORT_SYMBOL_GPL(kthread_flush_worker); |
35033fe9c
|
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 |
/** * kthread_destroy_worker - destroy a kthread worker * @worker: worker to be destroyed * * Flush and destroy @worker. The simple flush is enough because the kthread * worker API is used only in trivial scenarios. There are no multi-step state * machines needed. */ void kthread_destroy_worker(struct kthread_worker *worker) { struct task_struct *task; task = worker->task; if (WARN_ON(!task)) return; kthread_flush_worker(worker); kthread_stop(task); WARN_ON(!list_empty(&worker->work_list)); kfree(worker); } EXPORT_SYMBOL(kthread_destroy_worker); |
05e3db95e
|
1140 |
|
0b508bc92
|
1141 |
#ifdef CONFIG_BLK_CGROUP |
05e3db95e
|
1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 |
/** * kthread_associate_blkcg - associate blkcg to current kthread * @css: the cgroup info * * Current thread must be a kthread. The thread is running jobs on behalf of * other threads. In some cases, we expect the jobs attach cgroup info of * original threads instead of that of current thread. This function stores * original thread's cgroup info in current kthread context for later * retrieval. */ void kthread_associate_blkcg(struct cgroup_subsys_state *css) { struct kthread *kthread; if (!(current->flags & PF_KTHREAD)) return; kthread = to_kthread(current); if (!kthread) return; if (kthread->blkcg_css) { css_put(kthread->blkcg_css); kthread->blkcg_css = NULL; } if (css) { css_get(css); kthread->blkcg_css = css; } } EXPORT_SYMBOL(kthread_associate_blkcg); /** * kthread_blkcg - get associated blkcg css of current kthread * * Current thread must be a kthread. */ struct cgroup_subsys_state *kthread_blkcg(void) { struct kthread *kthread; if (current->flags & PF_KTHREAD) { kthread = to_kthread(current); if (kthread) return kthread->blkcg_css; } return NULL; } EXPORT_SYMBOL(kthread_blkcg); #endif |