Blame view

kernel/rcutiny.c 8.41 KB
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
  /*
   * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published by
   * the Free Software Foundation; either version 2 of the License, or
   * (at your option) any later version.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
   * along with this program; if not, write to the Free Software
   * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
   *
   * Copyright IBM Corporation, 2008
   *
   * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
   *
   * For detailed explanation of Read-Copy Update mechanism see -
4ce5b9034   Ingo Molnar   rcu: Do tiny clea...
23
   *		Documentation/RCU
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
24
   */
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
25
  #include <linux/moduleparam.h>
4ce5b9034   Ingo Molnar   rcu: Do tiny clea...
26
27
  #include <linux/completion.h>
  #include <linux/interrupt.h>
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
28
  #include <linux/notifier.h>
4ce5b9034   Ingo Molnar   rcu: Do tiny clea...
29
30
31
  #include <linux/rcupdate.h>
  #include <linux/kernel.h>
  #include <linux/module.h>
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
32
  #include <linux/mutex.h>
4ce5b9034   Ingo Molnar   rcu: Do tiny clea...
33
34
35
  #include <linux/sched.h>
  #include <linux/types.h>
  #include <linux/init.h>
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
36
  #include <linux/time.h>
4ce5b9034   Ingo Molnar   rcu: Do tiny clea...
37
  #include <linux/cpu.h>
268bb0ce3   Linus Torvalds   sanitize <linux/p...
38
  #include <linux/prefetch.h>
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
39

24278d148   Paul E. McKenney   rcu: priority boo...
40
41
42
43
  /* Controls for rcu_kthread() kthread, replacing RCU_SOFTIRQ used previously. */
  static struct task_struct *rcu_kthread_task;
  static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq);
  static unsigned long have_rcu_kthread_work;
b2c0710c4   Paul E. McKenney   rcu: move TINY_RC...
44

a57eb940d   Paul E. McKenney   rcu: Add a TINY_P...
45
  /* Forward declarations for rcutiny_plugin.h. */
24278d148   Paul E. McKenney   rcu: priority boo...
46
  struct rcu_ctrlblk;
b554d7de8   Eric Dumazet   rcu: optimize rcu...
47
  static void invoke_rcu_kthread(void);
b2c0710c4   Paul E. McKenney   rcu: move TINY_RC...
48
  static void rcu_process_callbacks(struct rcu_ctrlblk *rcp);
24278d148   Paul E. McKenney   rcu: priority boo...
49
  static int rcu_kthread(void *arg);
a57eb940d   Paul E. McKenney   rcu: Add a TINY_P...
50
51
52
53
54
  static void __call_rcu(struct rcu_head *head,
  		       void (*func)(struct rcu_head *rcu),
  		       struct rcu_ctrlblk *rcp);
  
  #include "rcutiny_plugin.h"
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
  #ifdef CONFIG_NO_HZ
  
  static long rcu_dynticks_nesting = 1;
  
  /*
   * Enter dynticks-idle mode, which is an extended quiescent state
   * if we have fully entered that mode (i.e., if the new value of
   * dynticks_nesting is zero).
   */
  void rcu_enter_nohz(void)
  {
  	if (--rcu_dynticks_nesting == 0)
  		rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
  }
  
  /*
   * Exit dynticks-idle mode, so that we are no longer in an extended
   * quiescent state.
   */
  void rcu_exit_nohz(void)
  {
  	rcu_dynticks_nesting++;
  }
  
  #endif /* #ifdef CONFIG_NO_HZ */
  
  /*
b554d7de8   Eric Dumazet   rcu: optimize rcu...
82
83
   * Helper function for rcu_sched_qs() and rcu_bh_qs().
   * Also irqs are disabled to avoid confusion due to interrupt handlers
4ce5b9034   Ingo Molnar   rcu: Do tiny clea...
84
   * invoking call_rcu().
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
85
86
87
   */
  static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
  {
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
88
89
90
  	if (rcp->rcucblist != NULL &&
  	    rcp->donetail != rcp->curtail) {
  		rcp->donetail = rcp->curtail;
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
91
92
  		return 1;
  	}
4ce5b9034   Ingo Molnar   rcu: Do tiny clea...
93

9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
94
95
96
97
  	return 0;
  }
  
  /*
b554d7de8   Eric Dumazet   rcu: optimize rcu...
98
99
100
101
102
103
104
105
106
107
   * Wake up rcu_kthread() to process callbacks now eligible for invocation
   * or to boost readers.
   */
  static void invoke_rcu_kthread(void)
  {
  	have_rcu_kthread_work = 1;
  	wake_up(&rcu_kthread_wq);
  }
  
  /*
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
108
109
110
111
112
113
   * Record an rcu quiescent state.  And an rcu_bh quiescent state while we
   * are at it, given that any rcu quiescent state is also an rcu_bh
   * quiescent state.  Use "+" instead of "||" to defeat short circuiting.
   */
  void rcu_sched_qs(int cpu)
  {
b554d7de8   Eric Dumazet   rcu: optimize rcu...
114
115
116
  	unsigned long flags;
  
  	local_irq_save(flags);
99652b54d   Paul E. McKenney   rcu: rename rcuti...
117
118
  	if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
  	    rcu_qsctr_help(&rcu_bh_ctrlblk))
24278d148   Paul E. McKenney   rcu: priority boo...
119
  		invoke_rcu_kthread();
b554d7de8   Eric Dumazet   rcu: optimize rcu...
120
  	local_irq_restore(flags);
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
121
122
123
124
125
126
127
  }
  
  /*
   * Record an rcu_bh quiescent state.
   */
  void rcu_bh_qs(int cpu)
  {
b554d7de8   Eric Dumazet   rcu: optimize rcu...
128
129
130
  	unsigned long flags;
  
  	local_irq_save(flags);
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
131
  	if (rcu_qsctr_help(&rcu_bh_ctrlblk))
24278d148   Paul E. McKenney   rcu: priority boo...
132
  		invoke_rcu_kthread();
b554d7de8   Eric Dumazet   rcu: optimize rcu...
133
  	local_irq_restore(flags);
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
  }
  
  /*
   * Check to see if the scheduling-clock interrupt came from an extended
   * quiescent state, and, if so, tell RCU about it.
   */
  void rcu_check_callbacks(int cpu, int user)
  {
  	if (user ||
  	    (idle_cpu(cpu) &&
  	     !in_softirq() &&
  	     hardirq_count() <= (1 << HARDIRQ_SHIFT)))
  		rcu_sched_qs(cpu);
  	else if (!in_softirq())
  		rcu_bh_qs(cpu);
a57eb940d   Paul E. McKenney   rcu: Add a TINY_P...
149
  	rcu_preempt_check_callbacks();
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
150
151
152
  }
  
  /*
b2c0710c4   Paul E. McKenney   rcu: move TINY_RC...
153
154
   * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
   * whose grace period has elapsed.
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
155
   */
b2c0710c4   Paul E. McKenney   rcu: move TINY_RC...
156
  static void rcu_process_callbacks(struct rcu_ctrlblk *rcp)
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
157
  {
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
158
  	struct rcu_head *next, *list;
4ce5b9034   Ingo Molnar   rcu: Do tiny clea...
159
  	unsigned long flags;
9e571a82f   Paul E. McKenney   rcu: add tracing ...
160
  	RCU_TRACE(int cb_count = 0);
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
161
162
163
164
165
166
167
168
169
170
171
172
  
  	/* If no RCU callbacks ready to invoke, just return. */
  	if (&rcp->rcucblist == rcp->donetail)
  		return;
  
  	/* Move the ready-to-invoke callbacks to a local list. */
  	local_irq_save(flags);
  	list = rcp->rcucblist;
  	rcp->rcucblist = *rcp->donetail;
  	*rcp->donetail = NULL;
  	if (rcp->curtail == rcp->donetail)
  		rcp->curtail = &rcp->rcucblist;
a57eb940d   Paul E. McKenney   rcu: Add a TINY_P...
173
  	rcu_preempt_remove_callbacks(rcp);
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
174
175
176
177
178
179
180
  	rcp->donetail = &rcp->rcucblist;
  	local_irq_restore(flags);
  
  	/* Invoke the callbacks on the local list. */
  	while (list) {
  		next = list->next;
  		prefetch(next);
551d55a94   Mathieu Desnoyers   tree/tiny rcu: Ad...
181
  		debug_rcu_head_unqueue(list);
b2c0710c4   Paul E. McKenney   rcu: move TINY_RC...
182
  		local_bh_disable();
9ab1544eb   Lai Jiangshan   rcu: introduce kf...
183
  		__rcu_reclaim(list);
b2c0710c4   Paul E. McKenney   rcu: move TINY_RC...
184
  		local_bh_enable();
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
185
  		list = next;
9e571a82f   Paul E. McKenney   rcu: add tracing ...
186
  		RCU_TRACE(cb_count++);
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
187
  	}
9e571a82f   Paul E. McKenney   rcu: add tracing ...
188
  	RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
189
190
191
  }
  
  /*
b2c0710c4   Paul E. McKenney   rcu: move TINY_RC...
192
193
194
195
196
197
   * This kthread invokes RCU callbacks whose grace periods have
   * elapsed.  It is awakened as needed, and takes the place of the
   * RCU_SOFTIRQ that was used previously for this purpose.
   * This is a kthread, but it is never stopped, at least not until
   * the system goes down.
   */
24278d148   Paul E. McKenney   rcu: priority boo...
198
  static int rcu_kthread(void *arg)
b2c0710c4   Paul E. McKenney   rcu: move TINY_RC...
199
200
  {
  	unsigned long work;
24278d148   Paul E. McKenney   rcu: priority boo...
201
  	unsigned long morework;
b2c0710c4   Paul E. McKenney   rcu: move TINY_RC...
202
203
204
  	unsigned long flags;
  
  	for (;;) {
b24efdfdf   Paul E. McKenney   rcu: avoid pointl...
205
206
  		wait_event_interruptible(rcu_kthread_wq,
  					 have_rcu_kthread_work != 0);
24278d148   Paul E. McKenney   rcu: priority boo...
207
  		morework = rcu_boost();
b2c0710c4   Paul E. McKenney   rcu: move TINY_RC...
208
  		local_irq_save(flags);
24278d148   Paul E. McKenney   rcu: priority boo...
209
210
  		work = have_rcu_kthread_work;
  		have_rcu_kthread_work = morework;
b2c0710c4   Paul E. McKenney   rcu: move TINY_RC...
211
212
213
214
215
216
  		local_irq_restore(flags);
  		if (work) {
  			rcu_process_callbacks(&rcu_sched_ctrlblk);
  			rcu_process_callbacks(&rcu_bh_ctrlblk);
  			rcu_preempt_process_callbacks();
  		}
24278d148   Paul E. McKenney   rcu: priority boo...
217
  		schedule_timeout_interruptible(1); /* Leave CPU for others. */
b2c0710c4   Paul E. McKenney   rcu: move TINY_RC...
218
219
220
221
222
223
  	}
  
  	return 0;  /* Not reached, but needed to shut gcc up. */
  }
  
  /*
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
224
225
226
227
228
229
230
231
232
   * Wait for a grace period to elapse.  But it is illegal to invoke
   * synchronize_sched() from within an RCU read-side critical section.
   * Therefore, any legal call to synchronize_sched() is a quiescent
   * state, and so on a UP system, synchronize_sched() need do nothing.
   * Ditto for synchronize_rcu_bh().  (But Lai Jiangshan points out the
   * benefits of doing might_sleep() to reduce latency.)
   *
   * Cool, huh?  (Due to Josh Triplett.)
   *
da848c47b   Paul E. McKenney   rcu: shrink rcuti...
233
234
   * But we want to make this a static inline later.  The cond_resched()
   * currently makes this problematic.
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
235
236
237
238
239
240
   */
  void synchronize_sched(void)
  {
  	cond_resched();
  }
  EXPORT_SYMBOL_GPL(synchronize_sched);
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
241
242
243
244
245
246
247
248
  /*
   * Helper function for call_rcu() and call_rcu_bh().
   */
  static void __call_rcu(struct rcu_head *head,
  		       void (*func)(struct rcu_head *rcu),
  		       struct rcu_ctrlblk *rcp)
  {
  	unsigned long flags;
551d55a94   Mathieu Desnoyers   tree/tiny rcu: Ad...
249
  	debug_rcu_head_queue(head);
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
250
251
  	head->func = func;
  	head->next = NULL;
4ce5b9034   Ingo Molnar   rcu: Do tiny clea...
252

9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
253
254
255
  	local_irq_save(flags);
  	*rcp->curtail = head;
  	rcp->curtail = &head->next;
9e571a82f   Paul E. McKenney   rcu: add tracing ...
256
  	RCU_TRACE(rcp->qlen++);
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
257
258
259
260
  	local_irq_restore(flags);
  }
  
  /*
a57eb940d   Paul E. McKenney   rcu: Add a TINY_P...
261
   * Post an RCU callback to be invoked after the end of an RCU-sched grace
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
262
263
264
   * period.  But since we have but one CPU, that would be after any
   * quiescent state.
   */
a57eb940d   Paul E. McKenney   rcu: Add a TINY_P...
265
  void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
266
  {
99652b54d   Paul E. McKenney   rcu: rename rcuti...
267
  	__call_rcu(head, func, &rcu_sched_ctrlblk);
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
268
  }
a57eb940d   Paul E. McKenney   rcu: Add a TINY_P...
269
  EXPORT_SYMBOL_GPL(call_rcu_sched);
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
270
271
272
273
274
  
  /*
   * Post an RCU bottom-half callback to be invoked after any subsequent
   * quiescent state.
   */
4ce5b9034   Ingo Molnar   rcu: Do tiny clea...
275
  void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
276
277
278
279
  {
  	__call_rcu(head, func, &rcu_bh_ctrlblk);
  }
  EXPORT_SYMBOL_GPL(call_rcu_bh);
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
280
281
282
  void rcu_barrier_bh(void)
  {
  	struct rcu_synchronize rcu;
72d5a9f7a   Paul E. McKenney   rcu: remove all r...
283
  	init_rcu_head_on_stack(&rcu.head);
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
284
285
286
287
288
  	init_completion(&rcu.completion);
  	/* Will wake me after RCU finished. */
  	call_rcu_bh(&rcu.head, wakeme_after_rcu);
  	/* Wait for it. */
  	wait_for_completion(&rcu.completion);
72d5a9f7a   Paul E. McKenney   rcu: remove all r...
289
  	destroy_rcu_head_on_stack(&rcu.head);
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
290
291
292
293
294
295
  }
  EXPORT_SYMBOL_GPL(rcu_barrier_bh);
  
  void rcu_barrier_sched(void)
  {
  	struct rcu_synchronize rcu;
72d5a9f7a   Paul E. McKenney   rcu: remove all r...
296
  	init_rcu_head_on_stack(&rcu.head);
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
297
298
299
300
301
  	init_completion(&rcu.completion);
  	/* Will wake me after RCU finished. */
  	call_rcu_sched(&rcu.head, wakeme_after_rcu);
  	/* Wait for it. */
  	wait_for_completion(&rcu.completion);
72d5a9f7a   Paul E. McKenney   rcu: remove all r...
302
  	destroy_rcu_head_on_stack(&rcu.head);
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
303
304
  }
  EXPORT_SYMBOL_GPL(rcu_barrier_sched);
b2c0710c4   Paul E. McKenney   rcu: move TINY_RC...
305
306
307
308
  /*
   * Spawn the kthread that invokes RCU callbacks.
   */
  static int __init rcu_spawn_kthreads(void)
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
309
  {
24278d148   Paul E. McKenney   rcu: priority boo...
310
311
312
313
314
  	struct sched_param sp;
  
  	rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread");
  	sp.sched_priority = RCU_BOOST_PRIO;
  	sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp);
b2c0710c4   Paul E. McKenney   rcu: move TINY_RC...
315
  	return 0;
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
316
  }
b2c0710c4   Paul E. McKenney   rcu: move TINY_RC...
317
  early_initcall(rcu_spawn_kthreads);