Blame view
kernel/task_work.c
3.25 KB
e73f8959a
|
1 2 3 |
#include <linux/spinlock.h> #include <linux/task_work.h> #include <linux/tracehook.h> |
9da33de62
|
4 |
static struct callback_head work_exited; /* all we need is ->next == NULL */ |
892f6668f
|
5 6 7 8 9 10 11 12 13 14 15 16 17 18 |
/** * task_work_add - ask the @task to execute @work->func() * @task: the task which should run the callback * @work: the callback to run * @notify: send the notification if true * * Queue @work for task_work_run() below and notify the @task if @notify. * Fails if the @task is exiting/exited and thus it can't process this @work. * Otherwise @work->func() will be called when the @task returns from kernel * mode or exits. * * This is like the signal handler which runs in kernel mode, but it doesn't * try to wake up the @task. * |
c82199061
|
19 20 |
* Note: there is no ordering guarantee on works queued here. * |
892f6668f
|
21 22 23 |
* RETURNS: * 0 if succeeds or -ESRCH. */ |
e73f8959a
|
24 |
int |
ac3d0da8f
|
25 |
task_work_add(struct task_struct *task, struct callback_head *work, bool notify) |
e73f8959a
|
26 |
{ |
ac3d0da8f
|
27 |
struct callback_head *head; |
9da33de62
|
28 |
|
ac3d0da8f
|
29 |
do { |
61e96496d
|
30 |
head = READ_ONCE(task->task_works); |
9da33de62
|
31 32 |
if (unlikely(head == &work_exited)) return -ESRCH; |
ac3d0da8f
|
33 34 |
work->next = head; } while (cmpxchg(&task->task_works, head, work) != head); |
e73f8959a
|
35 |
|
ed3e694d7
|
36 |
if (notify) |
e73f8959a
|
37 |
set_notify_resume(task); |
ed3e694d7
|
38 |
return 0; |
e73f8959a
|
39 |
} |
892f6668f
|
40 41 42 43 44 45 46 47 48 49 50 |
/** * task_work_cancel - cancel a pending work added by task_work_add() * @task: the task which should execute the work * @func: identifies the work to remove * * Find the last queued pending work with ->func == @func and remove * it from queue. * * RETURNS: * The found work or NULL if not found. */ |
67d121455
|
51 |
struct callback_head * |
e73f8959a
|
52 53 |
task_work_cancel(struct task_struct *task, task_work_func_t func) { |
ac3d0da8f
|
54 |
struct callback_head **pprev = &task->task_works; |
205e550a0
|
55 |
struct callback_head *work; |
e73f8959a
|
56 |
unsigned long flags; |
61e96496d
|
57 58 59 |
if (likely(!task->task_works)) return NULL; |
ac3d0da8f
|
60 61 62 63 |
/* * If cmpxchg() fails we continue without updating pprev. * Either we raced with task_work_add() which added the * new entry before this work, we will find it again. Or |
9da33de62
|
64 |
* we raced with task_work_run(), *pprev == NULL/exited. |
ac3d0da8f
|
65 |
*/ |
e73f8959a
|
66 |
raw_spin_lock_irqsave(&task->pi_lock, flags); |
61e96496d
|
67 |
while ((work = lockless_dereference(*pprev))) { |
ac3d0da8f
|
68 69 70 71 |
if (work->func != func) pprev = &work->next; else if (cmpxchg(pprev, work, work->next) == work) break; |
e73f8959a
|
72 |
} |
e73f8959a
|
73 |
raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
ac3d0da8f
|
74 75 |
return work; |
e73f8959a
|
76 |
} |
892f6668f
|
77 78 79 80 81 82 83 84 |
/** * task_work_run - execute the works added by task_work_add() * * Flush the pending works. Should be used by the core kernel code. * Called before the task returns to the user-mode or stops, or when * it exits. In the latter case task_work_add() can no longer add the * new work after task_work_run() returns. */ |
e73f8959a
|
85 86 87 |
void task_work_run(void) { struct task_struct *task = current; |
ac3d0da8f
|
88 |
struct callback_head *work, *head, *next; |
e73f8959a
|
89 |
|
ac3d0da8f
|
90 |
for (;;) { |
9da33de62
|
91 92 93 94 95 |
/* * work->func() can do task_work_add(), do not set * work_exited unless the list is empty. */ do { |
61e96496d
|
96 |
work = READ_ONCE(task->task_works); |
9da33de62
|
97 98 99 |
head = !work && (task->flags & PF_EXITING) ? &work_exited : NULL; } while (cmpxchg(&task->task_works, work, head) != work); |
ac3d0da8f
|
100 101 102 103 104 105 106 107 |
if (!work) break; /* * Synchronize with task_work_cancel(). It can't remove * the first entry == work, cmpxchg(task_works) should * fail, but it can play with *work and other entries. */ raw_spin_unlock_wait(&task->pi_lock); |
e73f8959a
|
108 |
|
ac3d0da8f
|
109 110 111 112 |
do { next = work->next; work->func(work); work = next; |
f341861fb
|
113 |
cond_resched(); |
ac3d0da8f
|
114 |
} while (work); |
e73f8959a
|
115 116 |
} } |