Blame view
kernel/task_work.c
4.28 KB
b24413180 License cleanup: ... |
1 |
// SPDX-License-Identifier: GPL-2.0 |
e73f8959a task_work_add: ge... |
2 3 4 |
#include <linux/spinlock.h> #include <linux/task_work.h> #include <linux/tracehook.h> |
9da33de62 task_work: task_w... |
5 |
static struct callback_head work_exited; /* all we need is ->next == NULL */ |
892f6668f task_work: docume... |
6 7 8 9 |
/** * task_work_add - ask the @task to execute @work->func() * @task: the task which should run the callback * @work: the callback to run |
91989c707 task_work: cleanu... |
10 |
* @notify: how to notify the targeted task |
892f6668f task_work: docume... |
11 |
* |
91989c707 task_work: cleanu... |
12 13 14 15 16 17 18 |
* Queue @work for task_work_run() below and notify the @task if @notify * is @TWA_RESUME or @TWA_SIGNAL. @TWA_SIGNAL works like signals, in that the * it will interrupt the targeted task and run the task_work. @TWA_RESUME * work is run only when the task exits the kernel and returns to user mode, * or before entering guest mode. Fails if the @task is exiting/exited and thus * it can't process this @work. Otherwise @work->func() will be called when the * @task goes through one of the aforementioned transitions, or exits. |
892f6668f task_work: docume... |
19 |
* |
91989c707 task_work: cleanu... |
20 21 22 |
* If the targeted task is exiting, then an error is returned and the work item * is not queued. It's up to the caller to arrange for an alternative mechanism * in that case. |
892f6668f task_work: docume... |
23 |
* |
91989c707 task_work: cleanu... |
24 25 |
* Note: there is no ordering guarantee on works queued here. The task_work * list is LIFO. |
c82199061 task_work: remove... |
26 |
* |
892f6668f task_work: docume... |
27 28 29 |
* RETURNS: * 0 if succeeds or -ESRCH. */ |
91989c707 task_work: cleanu... |
30 31 |
int task_work_add(struct task_struct *task, struct callback_head *work, enum task_work_notify_mode notify) |
e73f8959a task_work_add: ge... |
32 |
{ |
ac3d0da8f task_work: Make t... |
33 |
struct callback_head *head; |
e91b48162 task_work: teach ... |
34 |
unsigned long flags; |
9da33de62 task_work: task_w... |
35 |
|
ac3d0da8f task_work: Make t... |
36 |
do { |
61e96496d task_work: use RE... |
37 |
head = READ_ONCE(task->task_works); |
9da33de62 task_work: task_w... |
38 39 |
if (unlikely(head == &work_exited)) return -ESRCH; |
ac3d0da8f task_work: Make t... |
40 41 |
work->next = head; } while (cmpxchg(&task->task_works, head, work) != head); |
e73f8959a task_work_add: ge... |
42 |
|
e91b48162 task_work: teach ... |
43 |
switch (notify) { |
91989c707 task_work: cleanu... |
44 45 |
case TWA_NONE: break; |
e91b48162 task_work: teach ... |
46 |
case TWA_RESUME: |
e73f8959a task_work_add: ge... |
47 |
set_notify_resume(task); |
e91b48162 task_work: teach ... |
48 49 |
break; case TWA_SIGNAL: |
ebf0d100d task_work: only g... |
50 51 52 53 54 55 56 |
/* * Only grab the sighand lock if we don't already have some * task_work pending. This pairs with the smp_store_mb() * in get_signal(), see comment there. */ if (!(READ_ONCE(task->jobctl) & JOBCTL_TASK_WORK) && lock_task_sighand(task, &flags)) { |
e91b48162 task_work: teach ... |
57 58 59 60 61 |
task->jobctl |= JOBCTL_TASK_WORK; signal_wake_up(task, 0); unlock_task_sighand(task, &flags); } break; |
91989c707 task_work: cleanu... |
62 63 64 |
default: WARN_ON_ONCE(1); break; |
e91b48162 task_work: teach ... |
65 |
} |
ed3e694d7 move exit_task_wo... |
66 |
return 0; |
e73f8959a task_work_add: ge... |
67 |
} |
892f6668f task_work: docume... |
68 69 70 71 72 73 74 75 76 77 78 |
/** * task_work_cancel - cancel a pending work added by task_work_add() * @task: the task which should execute the work * @func: identifies the work to remove * * Find the last queued pending work with ->func == @func and remove * it from queue. * * RETURNS: * The found work or NULL if not found. */ |
67d121455 merge task_work a... |
79 |
struct callback_head * |
e73f8959a task_work_add: ge... |
80 81 |
task_work_cancel(struct task_struct *task, task_work_func_t func) { |
ac3d0da8f task_work: Make t... |
82 |
struct callback_head **pprev = &task->task_works; |
205e550a0 task_work: minor ... |
83 |
struct callback_head *work; |
e73f8959a task_work_add: ge... |
84 |
unsigned long flags; |
61e96496d task_work: use RE... |
85 86 87 |
if (likely(!task->task_works)) return NULL; |
ac3d0da8f task_work: Make t... |
88 89 90 91 |
/* * If cmpxchg() fails we continue without updating pprev. * Either we raced with task_work_add() which added the * new entry before this work, we will find it again. Or |
9da33de62 task_work: task_w... |
92 |
* we raced with task_work_run(), *pprev == NULL/exited. |
ac3d0da8f task_work: Make t... |
93 |
*/ |
e73f8959a task_work_add: ge... |
94 |
raw_spin_lock_irqsave(&task->pi_lock, flags); |
506458efa locking/barriers:... |
95 |
while ((work = READ_ONCE(*pprev))) { |
ac3d0da8f task_work: Make t... |
96 97 98 99 |
if (work->func != func) pprev = &work->next; else if (cmpxchg(pprev, work, work->next) == work) break; |
e73f8959a task_work_add: ge... |
100 |
} |
e73f8959a task_work_add: ge... |
101 |
raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
ac3d0da8f task_work: Make t... |
102 103 |
return work; |
e73f8959a task_work_add: ge... |
104 |
} |
892f6668f task_work: docume... |
105 106 107 108 109 110 111 112 |
/** * task_work_run - execute the works added by task_work_add() * * Flush the pending works. Should be used by the core kernel code. * Called before the task returns to the user-mode or stops, or when * it exits. In the latter case task_work_add() can no longer add the * new work after task_work_run() returns. */ |
e73f8959a task_work_add: ge... |
113 114 115 |
void task_work_run(void) { struct task_struct *task = current; |
ac3d0da8f task_work: Make t... |
116 |
struct callback_head *work, *head, *next; |
e73f8959a task_work_add: ge... |
117 |
|
ac3d0da8f task_work: Make t... |
118 |
for (;;) { |
9da33de62 task_work: task_w... |
119 120 121 122 123 |
/* * work->func() can do task_work_add(), do not set * work_exited unless the list is empty. */ do { |
6fb614920 task_work_run: do... |
124 |
head = NULL; |
61e96496d task_work: use RE... |
125 |
work = READ_ONCE(task->task_works); |
6fb614920 task_work_run: do... |
126 127 128 129 130 131 |
if (!work) { if (task->flags & PF_EXITING) head = &work_exited; else break; } |
9da33de62 task_work: task_w... |
132 |
} while (cmpxchg(&task->task_works, work, head) != work); |
ac3d0da8f task_work: Make t... |
133 134 |
if (!work) break; |
6fb614920 task_work_run: do... |
135 136 137 138 139 140 141 |
/* * Synchronize with task_work_cancel(). It can not remove * the first entry == work, cmpxchg(task_works) must fail. * But it can remove another entry from the ->next list. */ raw_spin_lock_irq(&task->pi_lock); raw_spin_unlock_irq(&task->pi_lock); |
e73f8959a task_work_add: ge... |
142 |
|
ac3d0da8f task_work: Make t... |
143 144 145 146 |
do { next = work->next; work->func(work); work = next; |
f341861fb task_work: add a ... |
147 |
cond_resched(); |
ac3d0da8f task_work: Make t... |
148 |
} while (work); |
e73f8959a task_work_add: ge... |
149 150 |
} } |