Blame view

kernel/task_work.c 3.92 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
e73f8959a   Oleg Nesterov   task_work_add: ge...
2
3
4
  #include <linux/spinlock.h>
  #include <linux/task_work.h>
  #include <linux/tracehook.h>
9da33de62   Oleg Nesterov   task_work: task_w...
5
  static struct callback_head work_exited; /* all we need is ->next == NULL */
892f6668f   Oleg Nesterov   task_work: docume...
6
7
8
9
  /**
   * task_work_add - ask the @task to execute @work->func()
   * @task: the task which should run the callback
   * @work: the callback to run
91989c707   Jens Axboe   task_work: cleanu...
10
   * @notify: how to notify the targeted task
892f6668f   Oleg Nesterov   task_work: docume...
11
   *
91989c707   Jens Axboe   task_work: cleanu...
12
13
14
15
16
17
18
   * Queue @work for task_work_run() below and notify the @task if @notify
   * is @TWA_RESUME or @TWA_SIGNAL. @TWA_SIGNAL works like signals, in that the
   * it will interrupt the targeted task and run the task_work. @TWA_RESUME
   * work is run only when the task exits the kernel and returns to user mode,
   * or before entering guest mode. Fails if the @task is exiting/exited and thus
   * it can't process this @work. Otherwise @work->func() will be called when the
   * @task goes through one of the aforementioned transitions, or exits.
892f6668f   Oleg Nesterov   task_work: docume...
19
   *
91989c707   Jens Axboe   task_work: cleanu...
20
21
22
   * If the targeted task is exiting, then an error is returned and the work item
   * is not queued. It's up to the caller to arrange for an alternative mechanism
   * in that case.
892f6668f   Oleg Nesterov   task_work: docume...
23
   *
91989c707   Jens Axboe   task_work: cleanu...
24
25
   * Note: there is no ordering guarantee on works queued here. The task_work
   * list is LIFO.
c82199061   Eric Dumazet   task_work: remove...
26
   *
892f6668f   Oleg Nesterov   task_work: docume...
27
28
29
   * RETURNS:
   * 0 if succeeds or -ESRCH.
   */
91989c707   Jens Axboe   task_work: cleanu...
30
31
  int task_work_add(struct task_struct *task, struct callback_head *work,
  		  enum task_work_notify_mode notify)
e73f8959a   Oleg Nesterov   task_work_add: ge...
32
  {
ac3d0da8f   Oleg Nesterov   task_work: Make t...
33
  	struct callback_head *head;
9da33de62   Oleg Nesterov   task_work: task_w...
34

ac3d0da8f   Oleg Nesterov   task_work: Make t...
35
  	do {
61e96496d   Oleg Nesterov   task_work: use RE...
36
  		head = READ_ONCE(task->task_works);
9da33de62   Oleg Nesterov   task_work: task_w...
37
38
  		if (unlikely(head == &work_exited))
  			return -ESRCH;
ac3d0da8f   Oleg Nesterov   task_work: Make t...
39
40
  		work->next = head;
  	} while (cmpxchg(&task->task_works, head, work) != head);
e73f8959a   Oleg Nesterov   task_work_add: ge...
41

e91b48162   Oleg Nesterov   task_work: teach ...
42
  	switch (notify) {
91989c707   Jens Axboe   task_work: cleanu...
43
44
  	case TWA_NONE:
  		break;
e91b48162   Oleg Nesterov   task_work: teach ...
45
  	case TWA_RESUME:
e73f8959a   Oleg Nesterov   task_work_add: ge...
46
  		set_notify_resume(task);
e91b48162   Oleg Nesterov   task_work: teach ...
47
48
  		break;
  	case TWA_SIGNAL:
03941ccfd   Jens Axboe   task_work: remove...
49
  		set_notify_signal(task);
e91b48162   Oleg Nesterov   task_work: teach ...
50
  		break;
91989c707   Jens Axboe   task_work: cleanu...
51
52
53
  	default:
  		WARN_ON_ONCE(1);
  		break;
e91b48162   Oleg Nesterov   task_work: teach ...
54
  	}
ed3e694d7   Al Viro   move exit_task_wo...
55
  	return 0;
e73f8959a   Oleg Nesterov   task_work_add: ge...
56
  }
892f6668f   Oleg Nesterov   task_work: docume...
57
58
59
60
61
62
63
64
65
66
67
  /**
   * task_work_cancel - cancel a pending work added by task_work_add()
   * @task: the task which should execute the work
   * @func: identifies the work to remove
   *
   * Find the last queued pending work with ->func == @func and remove
   * it from queue.
   *
   * RETURNS:
   * The found work or NULL if not found.
   */
67d121455   Al Viro   merge task_work a...
68
  struct callback_head *
e73f8959a   Oleg Nesterov   task_work_add: ge...
69
70
  task_work_cancel(struct task_struct *task, task_work_func_t func)
  {
ac3d0da8f   Oleg Nesterov   task_work: Make t...
71
  	struct callback_head **pprev = &task->task_works;
205e550a0   Oleg Nesterov   task_work: minor ...
72
  	struct callback_head *work;
e73f8959a   Oleg Nesterov   task_work_add: ge...
73
  	unsigned long flags;
61e96496d   Oleg Nesterov   task_work: use RE...
74
75
76
  
  	if (likely(!task->task_works))
  		return NULL;
ac3d0da8f   Oleg Nesterov   task_work: Make t...
77
78
79
80
  	/*
  	 * If cmpxchg() fails we continue without updating pprev.
  	 * Either we raced with task_work_add() which added the
  	 * new entry before this work, we will find it again. Or
9da33de62   Oleg Nesterov   task_work: task_w...
81
  	 * we raced with task_work_run(), *pprev == NULL/exited.
ac3d0da8f   Oleg Nesterov   task_work: Make t...
82
  	 */
e73f8959a   Oleg Nesterov   task_work_add: ge...
83
  	raw_spin_lock_irqsave(&task->pi_lock, flags);
506458efa   Will Deacon   locking/barriers:...
84
  	while ((work = READ_ONCE(*pprev))) {
ac3d0da8f   Oleg Nesterov   task_work: Make t...
85
86
87
88
  		if (work->func != func)
  			pprev = &work->next;
  		else if (cmpxchg(pprev, work, work->next) == work)
  			break;
e73f8959a   Oleg Nesterov   task_work_add: ge...
89
  	}
e73f8959a   Oleg Nesterov   task_work_add: ge...
90
  	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
ac3d0da8f   Oleg Nesterov   task_work: Make t...
91
92
  
  	return work;
e73f8959a   Oleg Nesterov   task_work_add: ge...
93
  }
892f6668f   Oleg Nesterov   task_work: docume...
94
95
96
97
98
99
100
101
  /**
   * task_work_run - execute the works added by task_work_add()
   *
   * Flush the pending works. Should be used by the core kernel code.
   * Called before the task returns to the user-mode or stops, or when
   * it exits. In the latter case task_work_add() can no longer add the
   * new work after task_work_run() returns.
   */
e73f8959a   Oleg Nesterov   task_work_add: ge...
102
103
104
  void task_work_run(void)
  {
  	struct task_struct *task = current;
ac3d0da8f   Oleg Nesterov   task_work: Make t...
105
  	struct callback_head *work, *head, *next;
e73f8959a   Oleg Nesterov   task_work_add: ge...
106

ac3d0da8f   Oleg Nesterov   task_work: Make t...
107
  	for (;;) {
9da33de62   Oleg Nesterov   task_work: task_w...
108
109
110
111
112
  		/*
  		 * work->func() can do task_work_add(), do not set
  		 * work_exited unless the list is empty.
  		 */
  		do {
6fb614920   Oleg Nesterov   task_work_run: do...
113
  			head = NULL;
61e96496d   Oleg Nesterov   task_work: use RE...
114
  			work = READ_ONCE(task->task_works);
6fb614920   Oleg Nesterov   task_work_run: do...
115
116
117
118
119
120
  			if (!work) {
  				if (task->flags & PF_EXITING)
  					head = &work_exited;
  				else
  					break;
  			}
9da33de62   Oleg Nesterov   task_work: task_w...
121
  		} while (cmpxchg(&task->task_works, work, head) != work);
ac3d0da8f   Oleg Nesterov   task_work: Make t...
122
123
  		if (!work)
  			break;
6fb614920   Oleg Nesterov   task_work_run: do...
124
125
126
127
128
129
130
  		/*
  		 * Synchronize with task_work_cancel(). It can not remove
  		 * the first entry == work, cmpxchg(task_works) must fail.
  		 * But it can remove another entry from the ->next list.
  		 */
  		raw_spin_lock_irq(&task->pi_lock);
  		raw_spin_unlock_irq(&task->pi_lock);
e73f8959a   Oleg Nesterov   task_work_add: ge...
131

ac3d0da8f   Oleg Nesterov   task_work: Make t...
132
133
134
135
  		do {
  			next = work->next;
  			work->func(work);
  			work = next;
f341861fb   Eric Dumazet   task_work: add a ...
136
  			cond_resched();
ac3d0da8f   Oleg Nesterov   task_work: Make t...
137
  		} while (work);
e73f8959a   Oleg Nesterov   task_work_add: ge...
138
139
  	}
  }