Blame view

kernel/rcutree.c 55.2 KB
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
  /*
   * Read-Copy Update mechanism for mutual exclusion
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published by
   * the Free Software Foundation; either version 2 of the License, or
   * (at your option) any later version.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
   * along with this program; if not, write to the Free Software
   * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
   *
   * Copyright IBM Corporation, 2008
   *
   * Authors: Dipankar Sarma <dipankar@in.ibm.com>
   *	    Manfred Spraul <manfred@colorfullife.com>
   *	    Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
   *
   * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
   * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
   *
   * For detailed explanation of Read-Copy Update mechanism see -
a71fca58b   Paul E. McKenney   rcu: Fix whitespa...
28
   *	Documentation/RCU
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
29
30
31
32
33
34
35
36
37
   */
  #include <linux/types.h>
  #include <linux/kernel.h>
  #include <linux/init.h>
  #include <linux/spinlock.h>
  #include <linux/smp.h>
  #include <linux/rcupdate.h>
  #include <linux/interrupt.h>
  #include <linux/sched.h>
c1dc0b9c0   Ingo Molnar   debug lockups: Im...
38
  #include <linux/nmi.h>
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
39
40
41
42
43
44
45
46
47
48
  #include <asm/atomic.h>
  #include <linux/bitops.h>
  #include <linux/module.h>
  #include <linux/completion.h>
  #include <linux/moduleparam.h>
  #include <linux/percpu.h>
  #include <linux/notifier.h>
  #include <linux/cpu.h>
  #include <linux/mutex.h>
  #include <linux/time.h>
6ebb237be   Paul E. McKenney   rcu: Re-arrange c...
49
  #include <linux/kernel_stat.h>
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
50

9f77da9f4   Paul E. McKenney   rcu: Move private...
51
  #include "rcutree.h"
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
52
  /* Data structures. */
b668c9cf3   Paul E. McKenney   rcu: Fix grace-pe...
53
  static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
88b91c7ca   Peter Zijlstra   rcu: Simplify cre...
54

64db4cfff   Paul E. McKenney   "Tree RCU": scala...
55
56
57
58
59
60
  #define RCU_STATE_INITIALIZER(name) { \
  	.level = { &name.node[0] }, \
  	.levelcnt = { \
  		NUM_RCU_LVL_0,  /* root of hierarchy. */ \
  		NUM_RCU_LVL_1, \
  		NUM_RCU_LVL_2, \
cf244dc01   Paul E. McKenney   rcu: Enable fourt...
61
62
  		NUM_RCU_LVL_3, \
  		NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
63
  	}, \
83f5b01ff   Paul E. McKenney   rcu: Fix long-gra...
64
  	.signaled = RCU_GP_IDLE, \
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
65
66
67
  	.gpnum = -300, \
  	.completed = -300, \
  	.onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \
e74f4c456   Paul E. McKenney   rcu: Make hot-unp...
68
69
70
  	.orphan_cbs_list = NULL, \
  	.orphan_cbs_tail = &name.orphan_cbs_list, \
  	.orphan_qlen = 0, \
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
71
72
73
74
  	.fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \
  	.n_force_qs = 0, \
  	.n_force_qs_ngp = 0, \
  }
d6714c22b   Paul E. McKenney   rcu: Renamings to...
75
76
  struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state);
  DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
77

6258c4fb5   Ingo Molnar   kmemtrace, rcu: f...
78
79
  struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
  DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
b1f77b058   Ingo Molnar   kmemtrace, rcu: f...
80

6ebb237be   Paul E. McKenney   rcu: Re-arrange c...
81
  static int rcu_scheduler_active __read_mostly;
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
82

b1f77b058   Ingo Molnar   kmemtrace, rcu: f...
83
  /*
fc2219d49   Paul E. McKenney   rcu: Clean up cod...
84
85
86
87
88
89
90
91
92
93
   * Return true if an RCU grace period is in progress.  The ACCESS_ONCE()s
   * permit this function to be invoked without holding the root rcu_node
   * structure's ->lock, but of course results can be subject to change.
   */
  static int rcu_gp_in_progress(struct rcu_state *rsp)
  {
  	return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
  }
  
  /*
d6714c22b   Paul E. McKenney   rcu: Renamings to...
94
   * Note a quiescent state.  Because we do not need to know
b1f77b058   Ingo Molnar   kmemtrace, rcu: f...
95
   * how many quiescent states passed, just if there was at least
d6714c22b   Paul E. McKenney   rcu: Renamings to...
96
   * one since the start of the grace period, this just sets a flag.
b1f77b058   Ingo Molnar   kmemtrace, rcu: f...
97
   */
d6714c22b   Paul E. McKenney   rcu: Renamings to...
98
  void rcu_sched_qs(int cpu)
b1f77b058   Ingo Molnar   kmemtrace, rcu: f...
99
  {
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
100
  	struct rcu_data *rdp;
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
101
  	rdp = &per_cpu(rcu_sched_data, cpu);
c64ac3ce0   Paul E. McKenney   rcu: Simplify ass...
102
  	rdp->passed_quiesc_completed = rdp->gpnum - 1;
c3422bea5   Paul E. McKenney   rcu: Simplify rcu...
103
104
105
  	barrier();
  	rdp->passed_quiesc = 1;
  	rcu_preempt_note_context_switch(cpu);
b1f77b058   Ingo Molnar   kmemtrace, rcu: f...
106
  }
d6714c22b   Paul E. McKenney   rcu: Renamings to...
107
  void rcu_bh_qs(int cpu)
b1f77b058   Ingo Molnar   kmemtrace, rcu: f...
108
  {
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
109
  	struct rcu_data *rdp;
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
110
  	rdp = &per_cpu(rcu_bh_data, cpu);
c64ac3ce0   Paul E. McKenney   rcu: Simplify ass...
111
  	rdp->passed_quiesc_completed = rdp->gpnum - 1;
c3422bea5   Paul E. McKenney   rcu: Simplify rcu...
112
113
  	barrier();
  	rdp->passed_quiesc = 1;
b1f77b058   Ingo Molnar   kmemtrace, rcu: f...
114
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
115
116
  
  #ifdef CONFIG_NO_HZ
90a4d2c01   Paul E. McKenney   rcu: make treercu...
117
118
119
120
  DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
  	.dynticks_nesting = 1,
  	.dynticks = 1,
  };
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
121
122
123
124
125
  #endif /* #ifdef CONFIG_NO_HZ */
  
  static int blimit = 10;		/* Maximum callbacks per softirq. */
  static int qhimark = 10000;	/* If this many pending, ignore blimit. */
  static int qlowmark = 100;	/* Once only this many pending, use blimit. */
3d76c0829   Paul E. McKenney   rcu: Clean up cod...
126
127
128
  module_param(blimit, int, 0);
  module_param(qhimark, int, 0);
  module_param(qlowmark, int, 0);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
129
  static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
a157229ca   Paul E. McKenney   rcu: Simplify rcu...
130
  static int rcu_pending(int cpu);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
131
132
  
  /*
d6714c22b   Paul E. McKenney   rcu: Renamings to...
133
   * Return the number of RCU-sched batches processed thus far for debug & stats.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
134
   */
d6714c22b   Paul E. McKenney   rcu: Renamings to...
135
  long rcu_batches_completed_sched(void)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
136
  {
d6714c22b   Paul E. McKenney   rcu: Renamings to...
137
  	return rcu_sched_state.completed;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
138
  }
d6714c22b   Paul E. McKenney   rcu: Renamings to...
139
  EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
140
141
142
143
144
145
146
147
148
149
150
  
  /*
   * Return the number of RCU BH batches processed thus far for debug & stats.
   */
  long rcu_batches_completed_bh(void)
  {
  	return rcu_bh_state.completed;
  }
  EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
  
  /*
bf66f18e7   Paul E. McKenney   rcu: Add force_qu...
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
   * Force a quiescent state for RCU BH.
   */
  void rcu_bh_force_quiescent_state(void)
  {
  	force_quiescent_state(&rcu_bh_state, 0);
  }
  EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
  
  /*
   * Force a quiescent state for RCU-sched.
   */
  void rcu_sched_force_quiescent_state(void)
  {
  	force_quiescent_state(&rcu_sched_state, 0);
  }
  EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
  
  /*
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
169
170
171
172
173
174
175
176
177
178
179
180
181
182
   * Does the CPU have callbacks ready to be invoked?
   */
  static int
  cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
  {
  	return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL];
  }
  
  /*
   * Does the current CPU require a yet-as-unscheduled grace period?
   */
  static int
  cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
  {
fc2219d49   Paul E. McKenney   rcu: Clean up cod...
183
  	return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
  }
  
  /*
   * Return the root node of the specified rcu_state structure.
   */
  static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
  {
  	return &rsp->node[0];
  }
  
  #ifdef CONFIG_SMP
  
  /*
   * If the specified CPU is offline, tell the caller that it is in
   * a quiescent state.  Otherwise, whack it with a reschedule IPI.
   * Grace periods can end up waiting on an offline CPU when that
   * CPU is in the process of coming online -- it will be added to the
   * rcu_node bitmasks before it actually makes it online.  The same thing
   * can happen while a CPU is in the process of coming online.  Because this
   * race is quite rare, we check for it after detecting that the grace
   * period has been delayed rather than checking each and every CPU
   * each and every time we start a new grace period.
   */
  static int rcu_implicit_offline_qs(struct rcu_data *rdp)
  {
  	/*
  	 * If the CPU is offline, it is in a quiescent state.  We can
  	 * trust its state not to change because interrupts are disabled.
  	 */
  	if (cpu_is_offline(rdp->cpu)) {
  		rdp->offline_fqs++;
  		return 1;
  	}
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
217
218
219
  	/* If preemptable RCU, no point in sending reschedule IPI. */
  	if (rdp->preemptable)
  		return 0;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
220
221
222
223
224
225
226
227
228
229
230
231
  	/* The CPU is online, so send it a reschedule IPI. */
  	if (rdp->cpu != smp_processor_id())
  		smp_send_reschedule(rdp->cpu);
  	else
  		set_need_resched();
  	rdp->resched_ipi++;
  	return 0;
  }
  
  #endif /* #ifdef CONFIG_SMP */
  
  #ifdef CONFIG_NO_HZ
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
  
  /**
   * rcu_enter_nohz - inform RCU that current CPU is entering nohz
   *
   * Enter nohz mode, in other words, -leave- the mode in which RCU
   * read-side critical sections can occur.  (Though RCU read-side
   * critical sections can occur in irq handlers in nohz mode, a possibility
   * handled by rcu_irq_enter() and rcu_irq_exit()).
   */
  void rcu_enter_nohz(void)
  {
  	unsigned long flags;
  	struct rcu_dynticks *rdtp;
  
  	smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
  	local_irq_save(flags);
  	rdtp = &__get_cpu_var(rcu_dynticks);
  	rdtp->dynticks++;
  	rdtp->dynticks_nesting--;
868489660   Paul E. McKenney   rcu: Changes from...
251
  	WARN_ON_ONCE(rdtp->dynticks & 0x1);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
  	local_irq_restore(flags);
  }
  
  /*
   * rcu_exit_nohz - inform RCU that current CPU is leaving nohz
   *
   * Exit nohz mode, in other words, -enter- the mode in which RCU
   * read-side critical sections normally occur.
   */
  void rcu_exit_nohz(void)
  {
  	unsigned long flags;
  	struct rcu_dynticks *rdtp;
  
  	local_irq_save(flags);
  	rdtp = &__get_cpu_var(rcu_dynticks);
  	rdtp->dynticks++;
  	rdtp->dynticks_nesting++;
868489660   Paul E. McKenney   rcu: Changes from...
270
  	WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
  	local_irq_restore(flags);
  	smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
  }
  
  /**
   * rcu_nmi_enter - inform RCU of entry to NMI context
   *
   * If the CPU was idle with dynamic ticks active, and there is no
   * irq handler running, this updates rdtp->dynticks_nmi to let the
   * RCU grace-period handling know that the CPU is active.
   */
  void rcu_nmi_enter(void)
  {
  	struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
  
  	if (rdtp->dynticks & 0x1)
  		return;
  	rdtp->dynticks_nmi++;
868489660   Paul E. McKenney   rcu: Changes from...
289
  	WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1));
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
  	smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
  }
  
  /**
   * rcu_nmi_exit - inform RCU of exit from NMI context
   *
   * If the CPU was idle with dynamic ticks active, and there is no
   * irq handler running, this updates rdtp->dynticks_nmi to let the
   * RCU grace-period handling know that the CPU is no longer active.
   */
  void rcu_nmi_exit(void)
  {
  	struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
  
  	if (rdtp->dynticks & 0x1)
  		return;
  	smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
  	rdtp->dynticks_nmi++;
868489660   Paul E. McKenney   rcu: Changes from...
308
  	WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
  }
  
  /**
   * rcu_irq_enter - inform RCU of entry to hard irq context
   *
   * If the CPU was idle with dynamic ticks active, this updates the
   * rdtp->dynticks to let the RCU handling know that the CPU is active.
   */
  void rcu_irq_enter(void)
  {
  	struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
  
  	if (rdtp->dynticks_nesting++)
  		return;
  	rdtp->dynticks++;
868489660   Paul E. McKenney   rcu: Changes from...
324
  	WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
  	smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
  }
  
  /**
   * rcu_irq_exit - inform RCU of exit from hard irq context
   *
   * If the CPU was idle with dynamic ticks active, update the rdp->dynticks
   * to put let the RCU handling be aware that the CPU is going back to idle
   * with no ticks.
   */
  void rcu_irq_exit(void)
  {
  	struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
  
  	if (--rdtp->dynticks_nesting)
  		return;
  	smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
  	rdtp->dynticks++;
868489660   Paul E. McKenney   rcu: Changes from...
343
  	WARN_ON_ONCE(rdtp->dynticks & 0x1);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
344
345
  
  	/* If the interrupt queued a callback, get out of dyntick mode. */
d6714c22b   Paul E. McKenney   rcu: Renamings to...
346
  	if (__get_cpu_var(rcu_sched_data).nxtlist ||
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
347
348
349
  	    __get_cpu_var(rcu_bh_data).nxtlist)
  		set_need_resched();
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
350
351
352
  #ifdef CONFIG_SMP
  
  /*
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
353
354
   * Snapshot the specified CPU's dynticks counter so that we can later
   * credit them with an implicit quiescent state.  Return 1 if this CPU
1eba8f843   Paul E. McKenney   rcu: Clean up cod...
355
   * is in dynticks idle mode, which is an extended quiescent state.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
   */
  static int dyntick_save_progress_counter(struct rcu_data *rdp)
  {
  	int ret;
  	int snap;
  	int snap_nmi;
  
  	snap = rdp->dynticks->dynticks;
  	snap_nmi = rdp->dynticks->dynticks_nmi;
  	smp_mb();	/* Order sampling of snap with end of grace period. */
  	rdp->dynticks_snap = snap;
  	rdp->dynticks_nmi_snap = snap_nmi;
  	ret = ((snap & 0x1) == 0) && ((snap_nmi & 0x1) == 0);
  	if (ret)
  		rdp->dynticks_fqs++;
  	return ret;
  }
  
  /*
   * Return true if the specified CPU has passed through a quiescent
   * state by virtue of being in or having passed through an dynticks
   * idle state since the last call to dyntick_save_progress_counter()
   * for this same CPU.
   */
  static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
  {
  	long curr;
  	long curr_nmi;
  	long snap;
  	long snap_nmi;
  
  	curr = rdp->dynticks->dynticks;
  	snap = rdp->dynticks_snap;
  	curr_nmi = rdp->dynticks->dynticks_nmi;
  	snap_nmi = rdp->dynticks_nmi_snap;
  	smp_mb(); /* force ordering with cpu entering/leaving dynticks. */
  
  	/*
  	 * If the CPU passed through or entered a dynticks idle phase with
  	 * no active irq/NMI handlers, then we can safely pretend that the CPU
  	 * already acknowledged the request to pass through a quiescent
  	 * state.  Either way, that CPU cannot possibly be in an RCU
  	 * read-side critical section that started before the beginning
  	 * of the current RCU grace period.
  	 */
  	if ((curr != snap || (curr & 0x1) == 0) &&
  	    (curr_nmi != snap_nmi || (curr_nmi & 0x1) == 0)) {
  		rdp->dynticks_fqs++;
  		return 1;
  	}
  
  	/* Go check for the CPU being offline. */
  	return rcu_implicit_offline_qs(rdp);
  }
  
  #endif /* #ifdef CONFIG_SMP */
  
  #else /* #ifdef CONFIG_NO_HZ */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
414
  #ifdef CONFIG_SMP
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
  static int dyntick_save_progress_counter(struct rcu_data *rdp)
  {
  	return 0;
  }
  
  static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
  {
  	return rcu_implicit_offline_qs(rdp);
  }
  
  #endif /* #ifdef CONFIG_SMP */
  
  #endif /* #else #ifdef CONFIG_NO_HZ */
  
  #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
  
  static void record_gp_stall_check_time(struct rcu_state *rsp)
  {
  	rsp->gp_start = jiffies;
  	rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK;
  }
  
  static void print_other_cpu_stall(struct rcu_state *rsp)
  {
  	int cpu;
  	long delta;
  	unsigned long flags;
  	struct rcu_node *rnp = rcu_get_root(rsp);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
443
444
445
446
447
  
  	/* Only let one CPU complain about others per time interval. */
  
  	spin_lock_irqsave(&rnp->lock, flags);
  	delta = jiffies - rsp->jiffies_stall;
fc2219d49   Paul E. McKenney   rcu: Clean up cod...
448
  	if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
449
450
451
452
  		spin_unlock_irqrestore(&rnp->lock, flags);
  		return;
  	}
  	rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
453
454
455
456
457
458
  
  	/*
  	 * Now rat on any tasks that got kicked up to the root rcu_node
  	 * due to CPU offlining.
  	 */
  	rcu_print_task_stall(rnp);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
459
460
461
462
463
  	spin_unlock_irqrestore(&rnp->lock, flags);
  
  	/* OK, time to rat on our buddy... */
  
  	printk(KERN_ERR "INFO: RCU detected CPU stalls:");
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
464
  	rcu_for_each_leaf_node(rsp, rnp) {
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
465
  		rcu_print_task_stall(rnp);
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
466
  		if (rnp->qsmask == 0)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
467
  			continue;
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
468
469
470
  		for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
  			if (rnp->qsmask & (1UL << cpu))
  				printk(" %d", rnp->grplo + cpu);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
471
472
473
474
  	}
  	printk(" (detected by %d, t=%ld jiffies)
  ",
  	       smp_processor_id(), (long)(jiffies - rsp->gp_start));
c1dc0b9c0   Ingo Molnar   debug lockups: Im...
475
  	trigger_all_cpu_backtrace();
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
476
477
478
479
480
481
482
483
484
485
486
  	force_quiescent_state(rsp, 0);  /* Kick them all. */
  }
  
  static void print_cpu_stall(struct rcu_state *rsp)
  {
  	unsigned long flags;
  	struct rcu_node *rnp = rcu_get_root(rsp);
  
  	printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)
  ",
  			smp_processor_id(), jiffies - rsp->gp_start);
c1dc0b9c0   Ingo Molnar   debug lockups: Im...
487
  	trigger_all_cpu_backtrace();
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
488
489
490
491
492
  	spin_lock_irqsave(&rnp->lock, flags);
  	if ((long)(jiffies - rsp->jiffies_stall) >= 0)
  		rsp->jiffies_stall =
  			jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
  	spin_unlock_irqrestore(&rnp->lock, flags);
c1dc0b9c0   Ingo Molnar   debug lockups: Im...
493

64db4cfff   Paul E. McKenney   "Tree RCU": scala...
494
495
496
497
498
499
500
501
502
503
504
505
506
507
  	set_need_resched();  /* kick ourselves to get things going. */
  }
  
  static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
  {
  	long delta;
  	struct rcu_node *rnp;
  
  	delta = jiffies - rsp->jiffies_stall;
  	rnp = rdp->mynode;
  	if ((rnp->qsmask & rdp->grpmask) && delta >= 0) {
  
  		/* We haven't checked in, so go dump stack. */
  		print_cpu_stall(rsp);
fc2219d49   Paul E. McKenney   rcu: Clean up cod...
508
  	} else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
  
  		/* They had two time units to dump stack, so complain. */
  		print_other_cpu_stall(rsp);
  	}
  }
  
  #else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
  
  static void record_gp_stall_check_time(struct rcu_state *rsp)
  {
  }
  
  static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
  {
  }
  
  #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
  
  /*
   * Update CPU-local rcu_data state to record the newly noticed grace period.
   * This is used both when we started the grace period and when we notice
9160306e6   Paul E. McKenney   rcu: Fix note_new...
530
531
532
   * that someone else started the grace period.  The caller must hold the
   * ->lock of the leaf rcu_node structure corresponding to the current CPU,
   *  and must have irqs disabled.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
533
   */
9160306e6   Paul E. McKenney   rcu: Fix note_new...
534
535
536
537
538
539
540
541
  static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
  {
  	if (rdp->gpnum != rnp->gpnum) {
  		rdp->qs_pending = 1;
  		rdp->passed_quiesc = 0;
  		rdp->gpnum = rnp->gpnum;
  	}
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
542
543
  static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
  {
9160306e6   Paul E. McKenney   rcu: Fix note_new...
544
545
546
547
548
549
550
551
552
553
554
555
  	unsigned long flags;
  	struct rcu_node *rnp;
  
  	local_irq_save(flags);
  	rnp = rdp->mynode;
  	if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */
  	    !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */
  		local_irq_restore(flags);
  		return;
  	}
  	__note_new_gpnum(rsp, rnp, rdp);
  	spin_unlock_irqrestore(&rnp->lock, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
  }
  
  /*
   * Did someone else start a new RCU grace period start since we last
   * checked?  Update local state appropriately if so.  Must be called
   * on the CPU corresponding to rdp.
   */
  static int
  check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
  {
  	unsigned long flags;
  	int ret = 0;
  
  	local_irq_save(flags);
  	if (rdp->gpnum != rsp->gpnum) {
  		note_new_gpnum(rsp, rdp);
  		ret = 1;
  	}
  	local_irq_restore(flags);
  	return ret;
  }
  
  /*
d09b62dfa   Paul E. McKenney   rcu: Fix synchron...
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
   * Advance this CPU's callbacks, but only if the current grace period
   * has ended.  This may be called only from the CPU to whom the rdp
   * belongs.  In addition, the corresponding leaf rcu_node structure's
   * ->lock must be held by the caller, with irqs disabled.
   */
  static void
  __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
  {
  	/* Did another grace period end? */
  	if (rdp->completed != rnp->completed) {
  
  		/* Advance callbacks.  No harm if list empty. */
  		rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
  		rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
  		rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
  
  		/* Remember that we saw this grace-period completion. */
  		rdp->completed = rnp->completed;
  	}
  }
  
  /*
   * Advance this CPU's callbacks, but only if the current grace period
   * has ended.  This may be called only from the CPU to whom the rdp
   * belongs.
   */
  static void
  rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
  {
  	unsigned long flags;
  	struct rcu_node *rnp;
  
  	local_irq_save(flags);
  	rnp = rdp->mynode;
  	if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */
  	    !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */
  		local_irq_restore(flags);
  		return;
  	}
  	__rcu_process_gp_end(rsp, rnp, rdp);
  	spin_unlock_irqrestore(&rnp->lock, flags);
  }
  
  /*
   * Do per-CPU grace-period initialization for running CPU.  The caller
   * must hold the lock of the leaf rcu_node structure corresponding to
   * this CPU.
   */
  static void
  rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
  {
  	/* Prior grace period ended, so advance callbacks for current CPU. */
  	__rcu_process_gp_end(rsp, rnp, rdp);
  
  	/*
  	 * Because this CPU just now started the new grace period, we know
  	 * that all of its callbacks will be covered by this upcoming grace
  	 * period, even the ones that were registered arbitrarily recently.
  	 * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
  	 *
  	 * Other CPUs cannot be sure exactly when the grace period started.
  	 * Therefore, their recently registered callbacks must pass through
  	 * an additional RCU_NEXT_READY stage, so that they will be handled
  	 * by the next RCU grace period.
  	 */
  	rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
  	rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
9160306e6   Paul E. McKenney   rcu: Fix note_new...
646
647
648
  
  	/* Set state so that this CPU will detect the next quiescent state. */
  	__note_new_gpnum(rsp, rnp, rdp);
d09b62dfa   Paul E. McKenney   rcu: Fix synchron...
649
650
651
  }
  
  /*
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
652
653
654
655
656
657
658
659
660
661
662
   * Start a new RCU grace period if warranted, re-initializing the hierarchy
   * in preparation for detecting the next grace period.  The caller must hold
   * the root node's ->lock, which is released before return.  Hard irqs must
   * be disabled.
   */
  static void
  rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
  	__releases(rcu_get_root(rsp)->lock)
  {
  	struct rcu_data *rdp = rsp->rda[smp_processor_id()];
  	struct rcu_node *rnp = rcu_get_root(rsp);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
663

07079d535   Paul E. McKenney   rcu: Prohibit sta...
664
  	if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) {
46a1e34ed   Paul E. McKenney   rcu: Make force_q...
665
666
  		if (cpu_needs_another_gp(rsp, rdp))
  			rsp->fqs_need_gp = 1;
b32e9eb6a   Paul E. McKenney   rcu: Accelerate c...
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
  		if (rnp->completed == rsp->completed) {
  			spin_unlock_irqrestore(&rnp->lock, flags);
  			return;
  		}
  		spin_unlock(&rnp->lock);	 /* irqs remain disabled. */
  
  		/*
  		 * Propagate new ->completed value to rcu_node structures
  		 * so that other CPUs don't have to wait until the start
  		 * of the next grace period to process their callbacks.
  		 */
  		rcu_for_each_node_breadth_first(rsp, rnp) {
  			spin_lock(&rnp->lock);	 /* irqs already disabled. */
  			rnp->completed = rsp->completed;
  			spin_unlock(&rnp->lock); /* irqs remain disabled. */
  		}
  		local_irq_restore(flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
684
685
686
687
688
  		return;
  	}
  
  	/* Advance to a new grace period and initialize state. */
  	rsp->gpnum++;
c3422bea5   Paul E. McKenney   rcu: Simplify rcu...
689
  	WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
690
691
  	rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
  	rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
692
  	record_gp_stall_check_time(rsp);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
693

64db4cfff   Paul E. McKenney   "Tree RCU": scala...
694
695
  	/* Special-case the common single-level case. */
  	if (NUM_RCU_NODES == 1) {
b0e165c03   Paul E. McKenney   rcu: Add debug ch...
696
  		rcu_preempt_check_blocked_tasks(rnp);
28ecd5802   Paul E. McKenney   rcu: Add WARN_ON_...
697
  		rnp->qsmask = rnp->qsmaskinit;
de078d875   Paul E. McKenney   rcu: Need to upda...
698
  		rnp->gpnum = rsp->gpnum;
d09b62dfa   Paul E. McKenney   rcu: Fix synchron...
699
  		rnp->completed = rsp->completed;
c12172c02   Paul E. McKenney   rcu: fix rcutree ...
700
  		rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
d09b62dfa   Paul E. McKenney   rcu: Fix synchron...
701
  		rcu_start_gp_per_cpu(rsp, rnp, rdp);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
702
703
704
705
706
707
708
709
710
711
712
  		spin_unlock_irqrestore(&rnp->lock, flags);
  		return;
  	}
  
  	spin_unlock(&rnp->lock);  /* leave irqs disabled. */
  
  
  	/* Exclude any concurrent CPU-hotplug operations. */
  	spin_lock(&rsp->onofflock);  /* irqs already disabled. */
  
  	/*
b835db1f9   Paul E. McKenney   rcu: Initialize m...
713
714
715
716
717
718
719
720
721
  	 * Set the quiescent-state-needed bits in all the rcu_node
  	 * structures for all currently online CPUs in breadth-first
  	 * order, starting from the root rcu_node structure.  This
  	 * operation relies on the layout of the hierarchy within the
  	 * rsp->node[] array.  Note that other CPUs will access only
  	 * the leaves of the hierarchy, which still indicate that no
  	 * grace period is in progress, at least until the corresponding
  	 * leaf node has been initialized.  In addition, we have excluded
  	 * CPU-hotplug operations.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
722
723
724
725
  	 *
  	 * Note that the grace period cannot complete until we finish
  	 * the initialization process, as there will be at least one
  	 * qsmask bit set in the root node until that time, namely the
b835db1f9   Paul E. McKenney   rcu: Initialize m...
726
727
  	 * one corresponding to this CPU, due to the fact that we have
  	 * irqs disabled.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
728
  	 */
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
729
  	rcu_for_each_node_breadth_first(rsp, rnp) {
83f5b01ff   Paul E. McKenney   rcu: Fix long-gra...
730
  		spin_lock(&rnp->lock);		/* irqs already disabled. */
b0e165c03   Paul E. McKenney   rcu: Add debug ch...
731
  		rcu_preempt_check_blocked_tasks(rnp);
49e291266   Paul E. McKenney   rcu: Fix thinko, ...
732
  		rnp->qsmask = rnp->qsmaskinit;
de078d875   Paul E. McKenney   rcu: Need to upda...
733
  		rnp->gpnum = rsp->gpnum;
d09b62dfa   Paul E. McKenney   rcu: Fix synchron...
734
735
736
  		rnp->completed = rsp->completed;
  		if (rnp == rdp->mynode)
  			rcu_start_gp_per_cpu(rsp, rnp, rdp);
83f5b01ff   Paul E. McKenney   rcu: Fix long-gra...
737
  		spin_unlock(&rnp->lock);	/* irqs remain disabled. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
738
  	}
83f5b01ff   Paul E. McKenney   rcu: Fix long-gra...
739
740
  	rnp = rcu_get_root(rsp);
  	spin_lock(&rnp->lock);			/* irqs already disabled. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
741
  	rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
83f5b01ff   Paul E. McKenney   rcu: Fix long-gra...
742
  	spin_unlock(&rnp->lock);		/* irqs remain disabled. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
743
744
745
746
  	spin_unlock_irqrestore(&rsp->onofflock, flags);
  }
  
  /*
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
747
748
749
750
751
   * Report a full set of quiescent states to the specified rcu_state
   * data structure.  This involves cleaning up after the prior grace
   * period and letting rcu_start_gp() start up the next grace period
   * if one is needed.  Note that the caller must hold rnp->lock, as
   * required by rcu_start_gp(), which will release it.
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
752
   */
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
753
  static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
fc2219d49   Paul E. McKenney   rcu: Clean up cod...
754
  	__releases(rcu_get_root(rsp)->lock)
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
755
  {
fc2219d49   Paul E. McKenney   rcu: Clean up cod...
756
  	WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
757
  	rsp->completed = rsp->gpnum;
83f5b01ff   Paul E. McKenney   rcu: Fix long-gra...
758
  	rsp->signaled = RCU_GP_IDLE;
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
759
760
761
762
  	rcu_start_gp(rsp, flags);  /* releases root node's rnp->lock. */
  }
  
  /*
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
763
764
765
766
767
768
   * Similar to rcu_report_qs_rdp(), for which it is a helper function.
   * Allows quiescent states for a group of CPUs to be reported at one go
   * to the specified rcu_node structure, though all the CPUs in the group
   * must be represented by the same rcu_node structure (which need not be
   * a leaf rcu_node structure, though it often will be).  That structure's
   * lock must be held upon entry, and it is released before return.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
769
770
   */
  static void
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
771
772
  rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
  		  struct rcu_node *rnp, unsigned long flags)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
773
774
  	__releases(rnp->lock)
  {
28ecd5802   Paul E. McKenney   rcu: Add WARN_ON_...
775
  	struct rcu_node *rnp_c;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
776
777
778
779
780
781
782
783
784
  	/* Walk up the rcu_node hierarchy. */
  	for (;;) {
  		if (!(rnp->qsmask & mask)) {
  
  			/* Our bit has already been cleared, so done. */
  			spin_unlock_irqrestore(&rnp->lock, flags);
  			return;
  		}
  		rnp->qsmask &= ~mask;
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
785
  		if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
786
787
788
789
790
791
792
793
794
795
796
797
798
  
  			/* Other bits still set at this level, so done. */
  			spin_unlock_irqrestore(&rnp->lock, flags);
  			return;
  		}
  		mask = rnp->grpmask;
  		if (rnp->parent == NULL) {
  
  			/* No more levels.  Exit loop holding root lock. */
  
  			break;
  		}
  		spin_unlock_irqrestore(&rnp->lock, flags);
28ecd5802   Paul E. McKenney   rcu: Add WARN_ON_...
799
  		rnp_c = rnp;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
800
801
  		rnp = rnp->parent;
  		spin_lock_irqsave(&rnp->lock, flags);
28ecd5802   Paul E. McKenney   rcu: Add WARN_ON_...
802
  		WARN_ON_ONCE(rnp_c->qsmask);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
803
804
805
806
  	}
  
  	/*
  	 * Get here if we are the last CPU to pass through a quiescent
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
807
  	 * state for this grace period.  Invoke rcu_report_qs_rsp()
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
808
  	 * to clean up and start the next grace period if one is needed.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
809
  	 */
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
810
  	rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
811
812
813
  }
  
  /*
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
814
815
816
817
818
819
820
   * Record a quiescent state for the specified CPU to that CPU's rcu_data
   * structure.  This must be either called from the specified CPU, or
   * called when the specified CPU is known to be offline (and when it is
   * also known that no other CPU is concurrently trying to help the offline
   * CPU).  The lastcomp argument is used to make sure we are still in the
   * grace period of interest.  We don't want to end the current grace period
   * based on quiescent states detected in an earlier grace period!
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
821
822
   */
  static void
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
823
  rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
824
825
826
827
828
829
830
  {
  	unsigned long flags;
  	unsigned long mask;
  	struct rcu_node *rnp;
  
  	rnp = rdp->mynode;
  	spin_lock_irqsave(&rnp->lock, flags);
560d4bc0d   Paul E. McKenney   rcu: Further clea...
831
  	if (lastcomp != rnp->completed) {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
832
833
834
835
836
837
  
  		/*
  		 * Someone beat us to it for this grace period, so leave.
  		 * The race with GP start is resolved by the fact that we
  		 * hold the leaf rcu_node lock, so that the per-CPU bits
  		 * cannot yet be initialized -- so we would simply find our
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
838
839
  		 * CPU's bit already cleared in rcu_report_qs_rnp() if this
  		 * race occurred.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
  		 */
  		rdp->passed_quiesc = 0;	/* try again later! */
  		spin_unlock_irqrestore(&rnp->lock, flags);
  		return;
  	}
  	mask = rdp->grpmask;
  	if ((rnp->qsmask & mask) == 0) {
  		spin_unlock_irqrestore(&rnp->lock, flags);
  	} else {
  		rdp->qs_pending = 0;
  
  		/*
  		 * This GP can't end until cpu checks in, so all of our
  		 * callbacks can be processed during the next GP.
  		 */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
855
  		rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
856
  		rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
  	}
  }
  
  /*
   * Check to see if there is a new grace period of which this CPU
   * is not yet aware, and if so, set up local rcu_data state for it.
   * Otherwise, see if this CPU has just passed through its first
   * quiescent state for this grace period, and record that fact if so.
   */
  static void
  rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
  {
  	/* If there is now a new grace period, record and return. */
  	if (check_for_new_grace_period(rsp, rdp))
  		return;
  
  	/*
  	 * Does this CPU still need to do its part for current grace period?
  	 * If no, return and let the other CPUs do their part as well.
  	 */
  	if (!rdp->qs_pending)
  		return;
  
  	/*
  	 * Was there a quiescent state since the beginning of the grace
  	 * period? If no, then exit and wait for the next call.
  	 */
  	if (!rdp->passed_quiesc)
  		return;
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
886
887
888
889
890
  	/*
  	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
  	 * judge of that).
  	 */
  	rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
891
892
893
894
895
  }
  
  #ifdef CONFIG_HOTPLUG_CPU
  
  /*
e74f4c456   Paul E. McKenney   rcu: Make hot-unp...
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
   * Move a dying CPU's RCU callbacks to the ->orphan_cbs_list for the
   * specified flavor of RCU.  The callbacks will be adopted by the next
   * _rcu_barrier() invocation or by the CPU_DEAD notifier, whichever
   * comes first.  Because this is invoked from the CPU_DYING notifier,
   * irqs are already disabled.
   */
  static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
  {
  	int i;
  	struct rcu_data *rdp = rsp->rda[smp_processor_id()];
  
  	if (rdp->nxtlist == NULL)
  		return;  /* irqs disabled, so comparison is stable. */
  	spin_lock(&rsp->onofflock);  /* irqs already disabled. */
  	*rsp->orphan_cbs_tail = rdp->nxtlist;
  	rsp->orphan_cbs_tail = rdp->nxttail[RCU_NEXT_TAIL];
  	rdp->nxtlist = NULL;
  	for (i = 0; i < RCU_NEXT_SIZE; i++)
  		rdp->nxttail[i] = &rdp->nxtlist;
  	rsp->orphan_qlen += rdp->qlen;
  	rdp->qlen = 0;
  	spin_unlock(&rsp->onofflock);  /* irqs remain disabled. */
  }
  
  /*
   * Adopt previously orphaned RCU callbacks.
   */
  static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
  {
  	unsigned long flags;
  	struct rcu_data *rdp;
  
  	spin_lock_irqsave(&rsp->onofflock, flags);
  	rdp = rsp->rda[smp_processor_id()];
  	if (rsp->orphan_cbs_list == NULL) {
  		spin_unlock_irqrestore(&rsp->onofflock, flags);
  		return;
  	}
  	*rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list;
  	rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail;
  	rdp->qlen += rsp->orphan_qlen;
  	rsp->orphan_cbs_list = NULL;
  	rsp->orphan_cbs_tail = &rsp->orphan_cbs_list;
  	rsp->orphan_qlen = 0;
  	spin_unlock_irqrestore(&rsp->onofflock, flags);
  }
  
  /*
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
944
945
946
947
948
   * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy
   * and move all callbacks from the outgoing CPU to the current one.
   */
  static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
  {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
949
  	unsigned long flags;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
950
  	unsigned long mask;
d9a3da069   Paul E. McKenney   rcu: Add expedite...
951
  	int need_report = 0;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
952
  	struct rcu_data *rdp = rsp->rda[cpu];
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
953
954
955
956
957
958
  	struct rcu_node *rnp;
  
  	/* Exclude any attempts to start a new grace period. */
  	spin_lock_irqsave(&rsp->onofflock, flags);
  
  	/* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
28ecd5802   Paul E. McKenney   rcu: Add WARN_ON_...
959
  	rnp = rdp->mynode;	/* this is the outgoing CPU's rnp. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
960
961
962
963
964
  	mask = rdp->grpmask;	/* rnp->grplo is constant. */
  	do {
  		spin_lock(&rnp->lock);		/* irqs already disabled. */
  		rnp->qsmaskinit &= ~mask;
  		if (rnp->qsmaskinit != 0) {
b668c9cf3   Paul E. McKenney   rcu: Fix grace-pe...
965
966
  			if (rnp != rdp->mynode)
  				spin_unlock(&rnp->lock); /* irqs remain disabled. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
967
968
  			break;
  		}
b668c9cf3   Paul E. McKenney   rcu: Fix grace-pe...
969
  		if (rnp == rdp->mynode)
d9a3da069   Paul E. McKenney   rcu: Add expedite...
970
  			need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
b668c9cf3   Paul E. McKenney   rcu: Fix grace-pe...
971
972
  		else
  			spin_unlock(&rnp->lock); /* irqs remain disabled. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
973
  		mask = rnp->grpmask;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
974
975
  		rnp = rnp->parent;
  	} while (rnp != NULL);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
976

b668c9cf3   Paul E. McKenney   rcu: Fix grace-pe...
977
978
979
  	/*
  	 * We still hold the leaf rcu_node structure lock here, and
  	 * irqs are still disabled.  The reason for this subterfuge is
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
980
981
  	 * because invoking rcu_report_unblock_qs_rnp() with ->onofflock
  	 * held leads to deadlock.
b668c9cf3   Paul E. McKenney   rcu: Fix grace-pe...
982
983
984
  	 */
  	spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
  	rnp = rdp->mynode;
d9a3da069   Paul E. McKenney   rcu: Add expedite...
985
  	if (need_report & RCU_OFL_TASKS_NORM_GP)
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
986
  		rcu_report_unblock_qs_rnp(rnp, flags);
b668c9cf3   Paul E. McKenney   rcu: Fix grace-pe...
987
988
  	else
  		spin_unlock_irqrestore(&rnp->lock, flags);
d9a3da069   Paul E. McKenney   rcu: Add expedite...
989
990
  	if (need_report & RCU_OFL_TASKS_EXP_GP)
  		rcu_report_exp_rnp(rsp, rnp);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
991

e74f4c456   Paul E. McKenney   rcu: Make hot-unp...
992
  	rcu_adopt_orphan_cbs(rsp);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
993
994
995
996
997
998
999
1000
1001
1002
  }
  
  /*
   * Remove the specified CPU from the RCU hierarchy and move any pending
   * callbacks that it might have to the current CPU.  This code assumes
   * that at least one CPU in the system will remain running at all times.
   * Any attempt to offline -all- CPUs is likely to strand RCU callbacks.
   */
  static void rcu_offline_cpu(int cpu)
  {
d6714c22b   Paul E. McKenney   rcu: Renamings to...
1003
  	__rcu_offline_cpu(cpu, &rcu_sched_state);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1004
  	__rcu_offline_cpu(cpu, &rcu_bh_state);
33f76148c   Paul E. McKenney   rcu: Add CPU-offl...
1005
  	rcu_preempt_offline_cpu(cpu);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1006
1007
1008
  }
  
  #else /* #ifdef CONFIG_HOTPLUG_CPU */
e74f4c456   Paul E. McKenney   rcu: Make hot-unp...
1009
1010
1011
1012
1013
1014
1015
  static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
  {
  }
  
  static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
  {
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
  static void rcu_offline_cpu(int cpu)
  {
  }
  
  #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
  
  /*
   * Invoke any RCU callbacks that have made it to the end of their grace
   * period.  Thottle as specified by rdp->blimit.
   */
37c72e56f   Paul E. McKenney   rcu: Prevent RCU ...
1026
  static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
  {
  	unsigned long flags;
  	struct rcu_head *next, *list, **tail;
  	int count;
  
  	/* If no callbacks are ready, just return.*/
  	if (!cpu_has_callbacks_ready_to_invoke(rdp))
  		return;
  
  	/*
  	 * Extract the list of ready callbacks, disabling to prevent
  	 * races with call_rcu() from interrupt handlers.
  	 */
  	local_irq_save(flags);
  	list = rdp->nxtlist;
  	rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
  	*rdp->nxttail[RCU_DONE_TAIL] = NULL;
  	tail = rdp->nxttail[RCU_DONE_TAIL];
  	for (count = RCU_NEXT_SIZE - 1; count >= 0; count--)
  		if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL])
  			rdp->nxttail[count] = &rdp->nxtlist;
  	local_irq_restore(flags);
  
  	/* Invoke callbacks. */
  	count = 0;
  	while (list) {
  		next = list->next;
  		prefetch(next);
  		list->func(list);
  		list = next;
  		if (++count >= rdp->blimit)
  			break;
  	}
  
  	local_irq_save(flags);
  
  	/* Update count, and requeue any remaining callbacks. */
  	rdp->qlen -= count;
  	if (list != NULL) {
  		*tail = rdp->nxtlist;
  		rdp->nxtlist = list;
  		for (count = 0; count < RCU_NEXT_SIZE; count++)
  			if (&rdp->nxtlist == rdp->nxttail[count])
  				rdp->nxttail[count] = tail;
  			else
  				break;
  	}
  
  	/* Reinstate batch limit if we have worked down the excess. */
  	if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
  		rdp->blimit = blimit;
37c72e56f   Paul E. McKenney   rcu: Prevent RCU ...
1078
1079
1080
1081
1082
1083
  	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
  	if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
  		rdp->qlen_last_fqs_check = 0;
  		rdp->n_force_qs_snap = rsp->n_force_qs;
  	} else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
  		rdp->qlen_last_fqs_check = rdp->qlen;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
  	local_irq_restore(flags);
  
  	/* Re-raise the RCU softirq if there are callbacks remaining. */
  	if (cpu_has_callbacks_ready_to_invoke(rdp))
  		raise_softirq(RCU_SOFTIRQ);
  }
  
  /*
   * Check to see if this CPU is in a non-context-switch quiescent state
   * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
   * Also schedule the RCU softirq handler.
   *
   * This function must be called with hardirqs disabled.  It is normally
   * invoked from the scheduling-clock interrupt.  If rcu_pending returns
   * false, there is no point in invoking rcu_check_callbacks().
   */
  void rcu_check_callbacks(int cpu, int user)
  {
a157229ca   Paul E. McKenney   rcu: Simplify rcu...
1102
1103
  	if (!rcu_pending(cpu))
  		return; /* if nothing for RCU to do. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1104
  	if (user ||
a68260483   Paul E. McKenney   rcu: Teach RCU th...
1105
1106
  	    (idle_cpu(cpu) && rcu_scheduler_active &&
  	     !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1107
1108
1109
1110
1111
  
  		/*
  		 * Get here if this CPU took its interrupt from user
  		 * mode or from the idle loop, and if this is not a
  		 * nested interrupt.  In this case, the CPU is in
d6714c22b   Paul E. McKenney   rcu: Renamings to...
1112
  		 * a quiescent state, so note it.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1113
1114
  		 *
  		 * No memory barrier is required here because both
d6714c22b   Paul E. McKenney   rcu: Renamings to...
1115
1116
1117
  		 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
  		 * variables that other CPUs neither access nor modify,
  		 * at least not while the corresponding CPU is online.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1118
  		 */
d6714c22b   Paul E. McKenney   rcu: Renamings to...
1119
1120
  		rcu_sched_qs(cpu);
  		rcu_bh_qs(cpu);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1121
1122
1123
1124
1125
1126
1127
  
  	} else if (!in_softirq()) {
  
  		/*
  		 * Get here if this CPU did not take its interrupt from
  		 * softirq, in other words, if it is not interrupting
  		 * a rcu_bh read-side critical section.  This is an _bh
d6714c22b   Paul E. McKenney   rcu: Renamings to...
1128
  		 * critical section, so note it.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1129
  		 */
d6714c22b   Paul E. McKenney   rcu: Renamings to...
1130
  		rcu_bh_qs(cpu);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1131
  	}
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
1132
  	rcu_preempt_check_callbacks(cpu);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1133
1134
1135
1136
1137
1138
1139
1140
  	raise_softirq(RCU_SOFTIRQ);
  }
  
  #ifdef CONFIG_SMP
  
  /*
   * Scan the leaf rcu_node structures, processing dyntick state for any that
   * have not yet encountered a quiescent state, using the function specified.
ee47eb9f4   Paul E. McKenney   rcu: Remove leg o...
1141
   * The caller must have suppressed start of new grace periods.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1142
   */
45f014c52   Paul E. McKenney   rcu: Remove redun...
1143
  static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1144
1145
1146
1147
1148
  {
  	unsigned long bit;
  	int cpu;
  	unsigned long flags;
  	unsigned long mask;
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
1149
  	struct rcu_node *rnp;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1150

a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
1151
  	rcu_for_each_leaf_node(rsp, rnp) {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1152
  		mask = 0;
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
1153
  		spin_lock_irqsave(&rnp->lock, flags);
ee47eb9f4   Paul E. McKenney   rcu: Remove leg o...
1154
  		if (!rcu_gp_in_progress(rsp)) {
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
1155
  			spin_unlock_irqrestore(&rnp->lock, flags);
0f10dc826   Paul E. McKenney   rcu: Eliminate rc...
1156
  			return;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1157
  		}
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
1158
1159
  		if (rnp->qsmask == 0) {
  			spin_unlock_irqrestore(&rnp->lock, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1160
1161
  			continue;
  		}
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
1162
  		cpu = rnp->grplo;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1163
  		bit = 1;
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
1164
1165
  		for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
  			if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu]))
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1166
1167
  				mask |= bit;
  		}
45f014c52   Paul E. McKenney   rcu: Remove redun...
1168
  		if (mask != 0) {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1169

d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
1170
1171
  			/* rcu_report_qs_rnp() releases rnp->lock. */
  			rcu_report_qs_rnp(mask, rsp, rnp, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1172
1173
  			continue;
  		}
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
1174
  		spin_unlock_irqrestore(&rnp->lock, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1175
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1176
1177
1178
1179
1180
1181
1182
1183
1184
  }
  
  /*
   * Force quiescent states on reluctant CPUs, and also detect which
   * CPUs are in dyntick-idle mode.
   */
  static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
  {
  	unsigned long flags;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1185
  	struct rcu_node *rnp = rcu_get_root(rsp);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1186

fc2219d49   Paul E. McKenney   rcu: Clean up cod...
1187
  	if (!rcu_gp_in_progress(rsp))
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1188
1189
1190
1191
1192
1193
  		return;  /* No grace period in progress, nothing to force. */
  	if (!spin_trylock_irqsave(&rsp->fqslock, flags)) {
  		rsp->n_force_qs_lh++; /* Inexact, can lose counts.  Tough! */
  		return;	/* Someone else is already on the job. */
  	}
  	if (relaxed &&
ef631b0ca   Paul E. McKenney   rcu: Make hierarc...
1194
  	    (long)(rsp->jiffies_force_qs - jiffies) >= 0)
f96e9232e   Paul E. McKenney   rcu: Adjust force...
1195
  		goto unlock_fqs_ret; /* no emergency and done recently. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1196
  	rsp->n_force_qs++;
07079d535   Paul E. McKenney   rcu: Prohibit sta...
1197
  	spin_lock(&rnp->lock);  /* irqs already disabled */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1198
  	rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
560d4bc0d   Paul E. McKenney   rcu: Further clea...
1199
  	if(!rcu_gp_in_progress(rsp)) {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1200
  		rsp->n_force_qs_ngp++;
07079d535   Paul E. McKenney   rcu: Prohibit sta...
1201
  		spin_unlock(&rnp->lock);  /* irqs remain disabled */
f96e9232e   Paul E. McKenney   rcu: Adjust force...
1202
  		goto unlock_fqs_ret;  /* no GP in progress, time updated. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1203
  	}
07079d535   Paul E. McKenney   rcu: Prohibit sta...
1204
  	rsp->fqs_active = 1;
f3a8b5c6a   Paul E. McKenney   rcu: Eliminate lo...
1205
  	switch (rsp->signaled) {
83f5b01ff   Paul E. McKenney   rcu: Fix long-gra...
1206
  	case RCU_GP_IDLE:
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1207
  	case RCU_GP_INIT:
83f5b01ff   Paul E. McKenney   rcu: Fix long-gra...
1208
  		break; /* grace period idle or initializing, ignore. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1209
1210
  
  	case RCU_SAVE_DYNTICK:
07079d535   Paul E. McKenney   rcu: Prohibit sta...
1211
  		spin_unlock(&rnp->lock);  /* irqs remain disabled */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1212
1213
1214
1215
  		if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK)
  			break; /* So gcc recognizes the dead code. */
  
  		/* Record dyntick-idle state. */
45f014c52   Paul E. McKenney   rcu: Remove redun...
1216
  		force_qs_rnp(rsp, dyntick_save_progress_counter);
07079d535   Paul E. McKenney   rcu: Prohibit sta...
1217
  		spin_lock(&rnp->lock);  /* irqs already disabled */
ee47eb9f4   Paul E. McKenney   rcu: Remove leg o...
1218
  		if (rcu_gp_in_progress(rsp))
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1219
  			rsp->signaled = RCU_FORCE_QS;
ee47eb9f4   Paul E. McKenney   rcu: Remove leg o...
1220
  		break;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1221
1222
1223
1224
  
  	case RCU_FORCE_QS:
  
  		/* Check dyntick-idle state, send IPI to laggarts. */
07079d535   Paul E. McKenney   rcu: Prohibit sta...
1225
  		spin_unlock(&rnp->lock);  /* irqs remain disabled */
45f014c52   Paul E. McKenney   rcu: Remove redun...
1226
  		force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1227
1228
  
  		/* Leave state in case more forcing is required. */
07079d535   Paul E. McKenney   rcu: Prohibit sta...
1229
  		spin_lock(&rnp->lock);  /* irqs already disabled */
f96e9232e   Paul E. McKenney   rcu: Adjust force...
1230
  		break;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1231
  	}
07079d535   Paul E. McKenney   rcu: Prohibit sta...
1232
  	rsp->fqs_active = 0;
46a1e34ed   Paul E. McKenney   rcu: Make force_q...
1233
1234
1235
1236
1237
1238
  	if (rsp->fqs_need_gp) {
  		spin_unlock(&rsp->fqslock); /* irqs remain disabled */
  		rsp->fqs_need_gp = 0;
  		rcu_start_gp(rsp, flags); /* releases rnp->lock */
  		return;
  	}
07079d535   Paul E. McKenney   rcu: Prohibit sta...
1239
  	spin_unlock(&rnp->lock);  /* irqs remain disabled */
f96e9232e   Paul E. McKenney   rcu: Adjust force...
1240
  unlock_fqs_ret:
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
  	spin_unlock_irqrestore(&rsp->fqslock, flags);
  }
  
  #else /* #ifdef CONFIG_SMP */
  
  static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
  {
  	set_need_resched();
  }
  
  #endif /* #else #ifdef CONFIG_SMP */
  
  /*
   * This does the RCU processing work from softirq context for the
   * specified rcu_state and rcu_data structures.  This may be called
   * only from the CPU to whom the rdp belongs.
   */
  static void
  __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
  {
  	unsigned long flags;
2e5975580   Paul E. McKenney   rcu: Simplify RCU...
1262
  	WARN_ON_ONCE(rdp->beenonline == 0);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1263
1264
1265
1266
  	/*
  	 * If an RCU GP has gone long enough, go check for dyntick
  	 * idle CPUs and, if needed, send resched IPIs.
  	 */
ef631b0ca   Paul E. McKenney   rcu: Make hierarc...
1267
  	if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
  		force_quiescent_state(rsp, 1);
  
  	/*
  	 * Advance callbacks in response to end of earlier grace
  	 * period that some other CPU ended.
  	 */
  	rcu_process_gp_end(rsp, rdp);
  
  	/* Update RCU state based on any recent quiescent states. */
  	rcu_check_quiescent_state(rsp, rdp);
  
  	/* Does this CPU require a not-yet-started grace period? */
  	if (cpu_needs_another_gp(rsp, rdp)) {
  		spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags);
  		rcu_start_gp(rsp, flags);  /* releases above lock */
  	}
  
  	/* If there are callbacks ready, invoke them. */
37c72e56f   Paul E. McKenney   rcu: Prevent RCU ...
1286
  	rcu_do_batch(rsp, rdp);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
  }
  
  /*
   * Do softirq processing for the current CPU.
   */
  static void rcu_process_callbacks(struct softirq_action *unused)
  {
  	/*
  	 * Memory references from any prior RCU read-side critical sections
  	 * executed by the interrupted code must be seen before any RCU
  	 * grace-period manipulations below.
  	 */
  	smp_mb(); /* See above block comment. */
d6714c22b   Paul E. McKenney   rcu: Renamings to...
1300
1301
  	__rcu_process_callbacks(&rcu_sched_state,
  				&__get_cpu_var(rcu_sched_data));
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1302
  	__rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
1303
  	rcu_preempt_process_callbacks();
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
  
  	/*
  	 * Memory references from any later RCU read-side critical sections
  	 * executed by the interrupted code must be seen after any RCU
  	 * grace-period manipulations above.
  	 */
  	smp_mb(); /* See above block comment. */
  }
  
  static void
  __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
  	   struct rcu_state *rsp)
  {
  	unsigned long flags;
  	struct rcu_data *rdp;
  
  	head->func = func;
  	head->next = NULL;
  
  	smp_mb(); /* Ensure RCU update seen before callback registry. */
  
  	/*
  	 * Opportunistically note grace-period endings and beginnings.
  	 * Note that we might see a beginning right after we see an
  	 * end, but never vice versa, since this CPU has to pass through
  	 * a quiescent state betweentimes.
  	 */
  	local_irq_save(flags);
  	rdp = rsp->rda[smp_processor_id()];
  	rcu_process_gp_end(rsp, rdp);
  	check_for_new_grace_period(rsp, rdp);
  
  	/* Add the callback to our list. */
  	*rdp->nxttail[RCU_NEXT_TAIL] = head;
  	rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
  
  	/* Start a new grace period if one not already started. */
fc2219d49   Paul E. McKenney   rcu: Clean up cod...
1341
  	if (!rcu_gp_in_progress(rsp)) {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1342
1343
1344
1345
1346
1347
  		unsigned long nestflag;
  		struct rcu_node *rnp_root = rcu_get_root(rsp);
  
  		spin_lock_irqsave(&rnp_root->lock, nestflag);
  		rcu_start_gp(rsp, nestflag);  /* releases rnp_root->lock. */
  	}
37c72e56f   Paul E. McKenney   rcu: Prevent RCU ...
1348
1349
1350
1351
1352
1353
1354
1355
  	/*
  	 * Force the grace period if too many callbacks or too long waiting.
  	 * Enforce hysteresis, and don't invoke force_quiescent_state()
  	 * if some other CPU has recently done so.  Also, don't bother
  	 * invoking force_quiescent_state() if the newly enqueued callback
  	 * is the only one waiting for a grace period to complete.
  	 */
  	if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1356
  		rdp->blimit = LONG_MAX;
37c72e56f   Paul E. McKenney   rcu: Prevent RCU ...
1357
1358
1359
1360
1361
  		if (rsp->n_force_qs == rdp->n_force_qs_snap &&
  		    *rdp->nxttail[RCU_DONE_TAIL] != head)
  			force_quiescent_state(rsp, 0);
  		rdp->n_force_qs_snap = rsp->n_force_qs;
  		rdp->qlen_last_fqs_check = rdp->qlen;
ef631b0ca   Paul E. McKenney   rcu: Make hierarc...
1362
  	} else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1363
1364
1365
1366
1367
  		force_quiescent_state(rsp, 1);
  	local_irq_restore(flags);
  }
  
  /*
d6714c22b   Paul E. McKenney   rcu: Renamings to...
1368
   * Queue an RCU-sched callback for invocation after a grace period.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1369
   */
d6714c22b   Paul E. McKenney   rcu: Renamings to...
1370
  void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1371
  {
d6714c22b   Paul E. McKenney   rcu: Renamings to...
1372
  	__call_rcu(head, func, &rcu_sched_state);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1373
  }
d6714c22b   Paul E. McKenney   rcu: Renamings to...
1374
  EXPORT_SYMBOL_GPL(call_rcu_sched);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1375
1376
1377
1378
1379
1380
1381
1382
1383
  
  /*
   * Queue an RCU for invocation after a quicker grace period.
   */
  void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  {
  	__call_rcu(head, func, &rcu_bh_state);
  }
  EXPORT_SYMBOL_GPL(call_rcu_bh);
6ebb237be   Paul E. McKenney   rcu: Re-arrange c...
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
  /**
   * synchronize_sched - wait until an rcu-sched grace period has elapsed.
   *
   * Control will return to the caller some time after a full rcu-sched
   * grace period has elapsed, in other words after all currently executing
   * rcu-sched read-side critical sections have completed.   These read-side
   * critical sections are delimited by rcu_read_lock_sched() and
   * rcu_read_unlock_sched(), and may be nested.  Note that preempt_disable(),
   * local_irq_disable(), and so on may be used in place of
   * rcu_read_lock_sched().
   *
   * This means that all preempt_disable code sequences, including NMI and
   * hardware-interrupt handlers, in progress on entry will have completed
   * before this primitive returns.  However, this does not guarantee that
   * softirq handlers will have completed, since in some kernels, these
   * handlers can run in process context, and can block.
   *
   * This primitive provides the guarantees made by the (now removed)
   * synchronize_kernel() API.  In contrast, synchronize_rcu() only
   * guarantees that rcu_read_lock() sections will have completed.
   * In "classic RCU", these two guarantees happen to be one and
   * the same, but can differ in realtime RCU implementations.
   */
  void synchronize_sched(void)
  {
  	struct rcu_synchronize rcu;
  
  	if (rcu_blocking_is_gp())
  		return;
  
  	init_completion(&rcu.completion);
  	/* Will wake me after RCU finished. */
  	call_rcu_sched(&rcu.head, wakeme_after_rcu);
  	/* Wait for it. */
  	wait_for_completion(&rcu.completion);
  }
  EXPORT_SYMBOL_GPL(synchronize_sched);
  
  /**
   * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
   *
   * Control will return to the caller some time after a full rcu_bh grace
   * period has elapsed, in other words after all currently executing rcu_bh
   * read-side critical sections have completed.  RCU read-side critical
   * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
   * and may be nested.
   */
  void synchronize_rcu_bh(void)
  {
  	struct rcu_synchronize rcu;
  
  	if (rcu_blocking_is_gp())
  		return;
  
  	init_completion(&rcu.completion);
  	/* Will wake me after RCU finished. */
  	call_rcu_bh(&rcu.head, wakeme_after_rcu);
  	/* Wait for it. */
  	wait_for_completion(&rcu.completion);
  }
  EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1445
1446
1447
1448
1449
1450
1451
1452
1453
  /*
   * Check to see if there is any immediate RCU-related work to be done
   * by the current CPU, for the specified type of RCU, returning 1 if so.
   * The checks are in order of increasing expense: checks that can be
   * carried out against CPU-local state are performed first.  However,
   * we must check for CPU stalls first, else we might not get a chance.
   */
  static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
  {
2f51f9884   Paul E. McKenney   rcu: Eliminate __...
1454
  	struct rcu_node *rnp = rdp->mynode;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1455
1456
1457
1458
1459
1460
  	rdp->n_rcu_pending++;
  
  	/* Check for CPU stalls, if enabled. */
  	check_cpu_stall(rsp, rdp);
  
  	/* Is the RCU core waiting for a quiescent state from this CPU? */
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
1461
1462
  	if (rdp->qs_pending) {
  		rdp->n_rp_qs_pending++;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1463
  		return 1;
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
1464
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1465
1466
  
  	/* Does this CPU have callbacks ready to invoke? */
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
1467
1468
  	if (cpu_has_callbacks_ready_to_invoke(rdp)) {
  		rdp->n_rp_cb_ready++;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1469
  		return 1;
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
1470
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1471
1472
  
  	/* Has RCU gone idle with this CPU needing another grace period? */
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
1473
1474
  	if (cpu_needs_another_gp(rsp, rdp)) {
  		rdp->n_rp_cpu_needs_gp++;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1475
  		return 1;
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
1476
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1477
1478
  
  	/* Has another RCU grace period completed?  */
2f51f9884   Paul E. McKenney   rcu: Eliminate __...
1479
  	if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
1480
  		rdp->n_rp_gp_completed++;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1481
  		return 1;
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
1482
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1483
1484
  
  	/* Has a new RCU grace period started? */
2f51f9884   Paul E. McKenney   rcu: Eliminate __...
1485
  	if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
1486
  		rdp->n_rp_gp_started++;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1487
  		return 1;
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
1488
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1489
1490
  
  	/* Has an RCU GP gone long enough to send resched IPIs &c? */
fc2219d49   Paul E. McKenney   rcu: Clean up cod...
1491
  	if (rcu_gp_in_progress(rsp) &&
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
1492
1493
  	    ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) {
  		rdp->n_rp_need_fqs++;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1494
  		return 1;
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
1495
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1496
1497
  
  	/* nothing to do */
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
1498
  	rdp->n_rp_need_nothing++;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1499
1500
1501
1502
1503
1504
1505
1506
  	return 0;
  }
  
  /*
   * Check to see if there is any immediate RCU-related work to be done
   * by the current CPU, returning 1 if so.  This function is part of the
   * RCU implementation; it is -not- an exported member of the RCU API.
   */
a157229ca   Paul E. McKenney   rcu: Simplify rcu...
1507
  static int rcu_pending(int cpu)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1508
  {
d6714c22b   Paul E. McKenney   rcu: Renamings to...
1509
  	return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) ||
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
1510
1511
  	       __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) ||
  	       rcu_preempt_pending(cpu);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
  }
  
  /*
   * Check to see if any future RCU-related work will need to be done
   * by the current CPU, even if none need be done immediately, returning
   * 1 if so.  This function is part of the RCU implementation; it is -not-
   * an exported member of the RCU API.
   */
  int rcu_needs_cpu(int cpu)
  {
  	/* RCU callbacks either ready or pending? */
d6714c22b   Paul E. McKenney   rcu: Renamings to...
1523
  	return per_cpu(rcu_sched_data, cpu).nxtlist ||
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
1524
1525
  	       per_cpu(rcu_bh_data, cpu).nxtlist ||
  	       rcu_preempt_needs_cpu(cpu);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1526
  }
6ebb237be   Paul E. McKenney   rcu: Re-arrange c...
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
  /*
   * This function is invoked towards the end of the scheduler's initialization
   * process.  Before this is called, the idle task might contain
   * RCU read-side critical sections (during which time, this idle
   * task is booting the system).  After this function is called, the
   * idle tasks are prohibited from containing RCU read-side critical
   * sections.
   */
  void rcu_scheduler_starting(void)
  {
  	WARN_ON(num_online_cpus() != 1);
  	WARN_ON(nr_context_switches() > 0);
  	rcu_scheduler_active = 1;
  }
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
1541
1542
1543
1544
  static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
  static atomic_t rcu_barrier_cpu_count;
  static DEFINE_MUTEX(rcu_barrier_mutex);
  static struct completion rcu_barrier_completion;
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
  
  static void rcu_barrier_callback(struct rcu_head *notused)
  {
  	if (atomic_dec_and_test(&rcu_barrier_cpu_count))
  		complete(&rcu_barrier_completion);
  }
  
  /*
   * Called with preemption disabled, and from cross-cpu IRQ context.
   */
  static void rcu_barrier_func(void *type)
  {
  	int cpu = smp_processor_id();
  	struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
  	void (*call_rcu_func)(struct rcu_head *head,
  			      void (*func)(struct rcu_head *head));
  
  	atomic_inc(&rcu_barrier_cpu_count);
  	call_rcu_func = type;
  	call_rcu_func(head, rcu_barrier_callback);
  }
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
1566
1567
1568
1569
  /*
   * Orchestrate the specified type of RCU barrier, waiting for all
   * RCU callbacks of the specified type to complete.
   */
e74f4c456   Paul E. McKenney   rcu: Make hot-unp...
1570
1571
  static void _rcu_barrier(struct rcu_state *rsp,
  			 void (*call_rcu_func)(struct rcu_head *head,
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
1572
1573
1574
  					       void (*func)(struct rcu_head *head)))
  {
  	BUG_ON(in_interrupt());
e74f4c456   Paul E. McKenney   rcu: Make hot-unp...
1575
  	/* Take mutex to serialize concurrent rcu_barrier() requests. */
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
  	mutex_lock(&rcu_barrier_mutex);
  	init_completion(&rcu_barrier_completion);
  	/*
  	 * Initialize rcu_barrier_cpu_count to 1, then invoke
  	 * rcu_barrier_func() on each CPU, so that each CPU also has
  	 * incremented rcu_barrier_cpu_count.  Only then is it safe to
  	 * decrement rcu_barrier_cpu_count -- otherwise the first CPU
  	 * might complete its grace period before all of the other CPUs
  	 * did their increment, causing this function to return too
  	 * early.
  	 */
  	atomic_set(&rcu_barrier_cpu_count, 1);
e74f4c456   Paul E. McKenney   rcu: Make hot-unp...
1588
1589
  	preempt_disable(); /* stop CPU_DYING from filling orphan_cbs_list */
  	rcu_adopt_orphan_cbs(rsp);
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
1590
  	on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1);
e74f4c456   Paul E. McKenney   rcu: Make hot-unp...
1591
  	preempt_enable(); /* CPU_DYING can again fill orphan_cbs_list */
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
1592
1593
1594
1595
  	if (atomic_dec_and_test(&rcu_barrier_cpu_count))
  		complete(&rcu_barrier_completion);
  	wait_for_completion(&rcu_barrier_completion);
  	mutex_unlock(&rcu_barrier_mutex);
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
1596
  }
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
1597
1598
1599
1600
1601
1602
  
  /**
   * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
   */
  void rcu_barrier_bh(void)
  {
e74f4c456   Paul E. McKenney   rcu: Make hot-unp...
1603
  	_rcu_barrier(&rcu_bh_state, call_rcu_bh);
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
1604
1605
1606
1607
1608
1609
1610
1611
  }
  EXPORT_SYMBOL_GPL(rcu_barrier_bh);
  
  /**
   * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
   */
  void rcu_barrier_sched(void)
  {
e74f4c456   Paul E. McKenney   rcu: Make hot-unp...
1612
  	_rcu_barrier(&rcu_sched_state, call_rcu_sched);
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
1613
1614
  }
  EXPORT_SYMBOL_GPL(rcu_barrier_sched);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1615
  /*
27569620c   Paul E. McKenney   rcu: Split hierar...
1616
   * Do boot-time initialization of a CPU's per-CPU RCU data.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1617
   */
27569620c   Paul E. McKenney   rcu: Split hierar...
1618
1619
  static void __init
  rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1620
1621
1622
  {
  	unsigned long flags;
  	int i;
27569620c   Paul E. McKenney   rcu: Split hierar...
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
  	struct rcu_data *rdp = rsp->rda[cpu];
  	struct rcu_node *rnp = rcu_get_root(rsp);
  
  	/* Set up local state, ensuring consistent view of global state. */
  	spin_lock_irqsave(&rnp->lock, flags);
  	rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
  	rdp->nxtlist = NULL;
  	for (i = 0; i < RCU_NEXT_SIZE; i++)
  		rdp->nxttail[i] = &rdp->nxtlist;
  	rdp->qlen = 0;
  #ifdef CONFIG_NO_HZ
  	rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
  #endif /* #ifdef CONFIG_NO_HZ */
  	rdp->cpu = cpu;
  	spin_unlock_irqrestore(&rnp->lock, flags);
  }
  
  /*
   * Initialize a CPU's per-CPU RCU data.  Note that only one online or
   * offline event can be happening at a given time.  Note also that we
   * can accept some slop in the rsp->completed access due to the fact
   * that this CPU cannot possibly have any RCU callbacks in flight yet.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1645
   */
e4fa4c970   Lai Jiangshan   rcu: add __cpuini...
1646
  static void __cpuinit
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
1647
  rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1648
1649
  {
  	unsigned long flags;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1650
1651
1652
1653
1654
1655
  	unsigned long mask;
  	struct rcu_data *rdp = rsp->rda[cpu];
  	struct rcu_node *rnp = rcu_get_root(rsp);
  
  	/* Set up local state, ensuring consistent view of global state. */
  	spin_lock_irqsave(&rnp->lock, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1656
1657
1658
  	rdp->passed_quiesc = 0;  /* We could be racing with new GP, */
  	rdp->qs_pending = 1;	 /*  so set up to respond to current GP. */
  	rdp->beenonline = 1;	 /* We have now been online. */
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
1659
  	rdp->preemptable = preemptable;
37c72e56f   Paul E. McKenney   rcu: Prevent RCU ...
1660
1661
  	rdp->qlen_last_fqs_check = 0;
  	rdp->n_force_qs_snap = rsp->n_force_qs;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1662
  	rdp->blimit = blimit;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
  	spin_unlock(&rnp->lock);		/* irqs remain disabled. */
  
  	/*
  	 * A new grace period might start here.  If so, we won't be part
  	 * of it, but that is OK, as we are currently in a quiescent state.
  	 */
  
  	/* Exclude any attempts to start a new GP on large systems. */
  	spin_lock(&rsp->onofflock);		/* irqs already disabled. */
  
  	/* Add CPU to rcu_node bitmasks. */
  	rnp = rdp->mynode;
  	mask = rdp->grpmask;
  	do {
  		/* Exclude any attempts to start a new GP on small systems. */
  		spin_lock(&rnp->lock);	/* irqs already disabled. */
  		rnp->qsmaskinit |= mask;
  		mask = rnp->grpmask;
d09b62dfa   Paul E. McKenney   rcu: Fix synchron...
1681
1682
1683
1684
1685
  		if (rnp == rdp->mynode) {
  			rdp->gpnum = rnp->completed; /* if GP in progress... */
  			rdp->completed = rnp->completed;
  			rdp->passed_quiesc_completed = rnp->completed - 1;
  		}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1686
1687
1688
  		spin_unlock(&rnp->lock); /* irqs already disabled. */
  		rnp = rnp->parent;
  	} while (rnp != NULL && !(rnp->qsmaskinit & mask));
e7d8842ed   Paul E. McKenney   rcu: Apply result...
1689
  	spin_unlock_irqrestore(&rsp->onofflock, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1690
1691
1692
1693
  }
  
  static void __cpuinit rcu_online_cpu(int cpu)
  {
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
1694
1695
1696
  	rcu_init_percpu_data(cpu, &rcu_sched_state, 0);
  	rcu_init_percpu_data(cpu, &rcu_bh_state, 0);
  	rcu_preempt_init_percpu_data(cpu);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1697
1698
1699
  }
  
  /*
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
1700
   * Handle CPU online/offline notification events.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1701
   */
9f680ab41   Paul E. McKenney   rcu: Eliminate un...
1702
1703
  static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
  				    unsigned long action, void *hcpu)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1704
1705
1706
1707
1708
1709
1710
1711
  {
  	long cpu = (long)hcpu;
  
  	switch (action) {
  	case CPU_UP_PREPARE:
  	case CPU_UP_PREPARE_FROZEN:
  		rcu_online_cpu(cpu);
  		break;
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
1712
1713
1714
  	case CPU_DYING:
  	case CPU_DYING_FROZEN:
  		/*
e74f4c456   Paul E. McKenney   rcu: Make hot-unp...
1715
  		 * preempt_disable() in _rcu_barrier() prevents stop_machine(),
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
1716
  		 * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
e74f4c456   Paul E. McKenney   rcu: Make hot-unp...
1717
1718
1719
1720
1721
1722
  		 * returns, all online cpus have queued rcu_barrier_func().
  		 * The dying CPU clears its cpu_online_mask bit and
  		 * moves all of its RCU callbacks to ->orphan_cbs_list
  		 * in the context of stop_machine(), so subsequent calls
  		 * to _rcu_barrier() will adopt these callbacks and only
  		 * then queue rcu_barrier_func() on all remaining CPUs.
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
1723
  		 */
e74f4c456   Paul E. McKenney   rcu: Make hot-unp...
1724
1725
1726
  		rcu_send_cbs_to_orphanage(&rcu_bh_state);
  		rcu_send_cbs_to_orphanage(&rcu_sched_state);
  		rcu_preempt_send_cbs_to_orphanage();
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
1727
  		break;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
  	case CPU_DEAD:
  	case CPU_DEAD_FROZEN:
  	case CPU_UP_CANCELED:
  	case CPU_UP_CANCELED_FROZEN:
  		rcu_offline_cpu(cpu);
  		break;
  	default:
  		break;
  	}
  	return NOTIFY_OK;
  }
  
  /*
   * Compute the per-level fanout, either using the exact fanout specified
   * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
   */
  #ifdef CONFIG_RCU_FANOUT_EXACT
  static void __init rcu_init_levelspread(struct rcu_state *rsp)
  {
  	int i;
  
  	for (i = NUM_RCU_LVLS - 1; i >= 0; i--)
  		rsp->levelspread[i] = CONFIG_RCU_FANOUT;
  }
  #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
  static void __init rcu_init_levelspread(struct rcu_state *rsp)
  {
  	int ccur;
  	int cprv;
  	int i;
  
  	cprv = NR_CPUS;
  	for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
  		ccur = rsp->levelcnt[i];
  		rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
  		cprv = ccur;
  	}
  }
  #endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */
  
  /*
   * Helper function for rcu_init() that initializes one rcu_state structure.
   */
  static void __init rcu_init_one(struct rcu_state *rsp)
  {
  	int cpustride = 1;
  	int i;
  	int j;
  	struct rcu_node *rnp;
  
  	/* Initialize the level-tracking arrays. */
  
  	for (i = 1; i < NUM_RCU_LVLS; i++)
  		rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
  	rcu_init_levelspread(rsp);
  
  	/* Initialize the elements themselves, starting from the leaves. */
  
  	for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
  		cpustride *= rsp->levelspread[i];
  		rnp = rsp->level[i];
  		for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
88b91c7ca   Peter Zijlstra   rcu: Simplify cre...
1790
  			spin_lock_init(&rnp->lock);
b668c9cf3   Paul E. McKenney   rcu: Fix grace-pe...
1791
  			lockdep_set_class(&rnp->lock, &rcu_node_class[i]);
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
1792
  			rnp->gpnum = 0;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
  			rnp->qsmask = 0;
  			rnp->qsmaskinit = 0;
  			rnp->grplo = j * cpustride;
  			rnp->grphi = (j + 1) * cpustride - 1;
  			if (rnp->grphi >= NR_CPUS)
  				rnp->grphi = NR_CPUS - 1;
  			if (i == 0) {
  				rnp->grpnum = 0;
  				rnp->grpmask = 0;
  				rnp->parent = NULL;
  			} else {
  				rnp->grpnum = j % rsp->levelspread[i - 1];
  				rnp->grpmask = 1UL << rnp->grpnum;
  				rnp->parent = rsp->level[i - 1] +
  					      j / rsp->levelspread[i - 1];
  			}
  			rnp->level = i;
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
1810
1811
  			INIT_LIST_HEAD(&rnp->blocked_tasks[0]);
  			INIT_LIST_HEAD(&rnp->blocked_tasks[1]);
d9a3da069   Paul E. McKenney   rcu: Add expedite...
1812
1813
  			INIT_LIST_HEAD(&rnp->blocked_tasks[2]);
  			INIT_LIST_HEAD(&rnp->blocked_tasks[3]);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1814
1815
1816
1817
1818
  		}
  	}
  }
  
  /*
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
1819
1820
1821
   * Helper macro for __rcu_init() and __rcu_init_preempt().  To be used
   * nowhere else!  Assigns leaf node pointers into each CPU's rcu_data
   * structure.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1822
   */
65cf8f866   Paul E. McKenney   rcu: Merge per-RC...
1823
  #define RCU_INIT_FLAVOR(rsp, rcu_data) \
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1824
  do { \
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
1825
1826
1827
1828
  	int i; \
  	int j; \
  	struct rcu_node *rnp; \
  	\
65cf8f866   Paul E. McKenney   rcu: Merge per-RC...
1829
  	rcu_init_one(rsp); \
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1830
1831
1832
1833
1834
1835
1836
  	rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \
  	j = 0; \
  	for_each_possible_cpu(i) { \
  		if (i > rnp[j].grphi) \
  			j++; \
  		per_cpu(rcu_data, i).mynode = &rnp[j]; \
  		(rsp)->rda[i] = &per_cpu(rcu_data, i); \
65cf8f866   Paul E. McKenney   rcu: Merge per-RC...
1837
  		rcu_boot_init_percpu_data(i, rsp); \
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1838
1839
  	} \
  } while (0)
9f680ab41   Paul E. McKenney   rcu: Eliminate un...
1840
  void __init rcu_init(void)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1841
  {
9f680ab41   Paul E. McKenney   rcu: Eliminate un...
1842
  	int i;
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
1843
  	rcu_bootup_announce();
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1844
1845
1846
1847
  #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
  	printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.
  ");
  #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
cf244dc01   Paul E. McKenney   rcu: Enable fourt...
1848
1849
1850
1851
  #if NUM_RCU_LVL_4 != 0
  	printk(KERN_INFO "Experimental four-level hierarchy is enabled.
  ");
  #endif /* #if NUM_RCU_LVL_4 != 0 */
65cf8f866   Paul E. McKenney   rcu: Merge per-RC...
1852
1853
  	RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data);
  	RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data);
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
1854
  	__rcu_init_preempt();
2e5975580   Paul E. McKenney   rcu: Simplify RCU...
1855
  	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
9f680ab41   Paul E. McKenney   rcu: Eliminate un...
1856
1857
1858
1859
1860
1861
1862
1863
1864
  
  	/*
  	 * We don't need protection against CPU-hotplug here because
  	 * this is called early in boot, before either interrupts
  	 * or the scheduler are operational.
  	 */
  	cpu_notifier(rcu_cpu_notify, 0);
  	for_each_online_cpu(i)
  		rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)i);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1865
  }
1eba8f843   Paul E. McKenney   rcu: Clean up cod...
1866
  #include "rcutree_plugin.h"