Blame view

kernel/rcu/tree.c 131 KB
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
  /*
   * Read-Copy Update mechanism for mutual exclusion
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published by
   * the Free Software Foundation; either version 2 of the License, or
   * (at your option) any later version.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
87de1cfdc   Paul E. McKenney   rcu: Stop trackin...
15
16
   * along with this program; if not, you can access it online at
   * http://www.gnu.org/licenses/gpl-2.0.html.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
17
18
19
20
21
22
23
24
25
26
27
   *
   * Copyright IBM Corporation, 2008
   *
   * Authors: Dipankar Sarma <dipankar@in.ibm.com>
   *	    Manfred Spraul <manfred@colorfullife.com>
   *	    Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
   *
   * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
   * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
   *
   * For detailed explanation of Read-Copy Update mechanism see -
a71fca58b   Paul E. McKenney   rcu: Fix whitespa...
28
   *	Documentation/RCU
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
29
30
31
32
33
34
   */
  #include <linux/types.h>
  #include <linux/kernel.h>
  #include <linux/init.h>
  #include <linux/spinlock.h>
  #include <linux/smp.h>
f9411ebe3   Ingo Molnar   rcu: Separate the...
35
  #include <linux/rcupdate_wait.h>
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
36
37
  #include <linux/interrupt.h>
  #include <linux/sched.h>
b17b01533   Ingo Molnar   sched/headers: Pr...
38
  #include <linux/sched/debug.h>
c1dc0b9c0   Ingo Molnar   debug lockups: Im...
39
  #include <linux/nmi.h>
8826f3b03   Paul E. McKenney   rcu: Avoid acquir...
40
  #include <linux/atomic.h>
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
41
  #include <linux/bitops.h>
9984de1a5   Paul Gortmaker   kernel: Map most ...
42
  #include <linux/export.h>
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
43
44
45
46
47
48
49
  #include <linux/completion.h>
  #include <linux/moduleparam.h>
  #include <linux/percpu.h>
  #include <linux/notifier.h>
  #include <linux/cpu.h>
  #include <linux/mutex.h>
  #include <linux/time.h>
bbad93798   Paul E. McKenney   rcu: slim down rc...
50
  #include <linux/kernel_stat.h>
a26ac2455   Paul E. McKenney   rcu: move TREE_RC...
51
52
  #include <linux/wait.h>
  #include <linux/kthread.h>
ae7e81c07   Ingo Molnar   sched/headers: Pr...
53
  #include <uapi/linux/sched/types.h>
268bb0ce3   Linus Torvalds   sanitize <linux/p...
54
  #include <linux/prefetch.h>
3d3b7db0a   Paul E. McKenney   rcu: Move synchro...
55
56
  #include <linux/delay.h>
  #include <linux/stop_machine.h>
661a85dc0   Paul E. McKenney   rcu: Add random P...
57
  #include <linux/random.h>
af658dca2   Steven Rostedt (Red Hat)   tracing: Rename f...
58
  #include <linux/trace_events.h>
d1d74d14e   Borislav Petkov   rcu: Expedite gra...
59
  #include <linux/suspend.h>
a278d4718   Paul E. McKenney   rcu: Fix dyntick-...
60
  #include <linux/ftrace.h>
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
61

4102adab9   Paul E. McKenney   rcu: Move RCU-rel...
62
  #include "tree.h"
29c00b4a1   Paul E. McKenney   rcu: Add event-tr...
63
  #include "rcu.h"
9f77da9f4   Paul E. McKenney   rcu: Move private...
64

4102adab9   Paul E. McKenney   rcu: Move RCU-rel...
65
66
67
68
  #ifdef MODULE_PARAM_PREFIX
  #undef MODULE_PARAM_PREFIX
  #endif
  #define MODULE_PARAM_PREFIX "rcutree."
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
69
  /* Data structures. */
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
70
71
72
73
74
75
76
77
  /*
   * In order to export the rcu_state name to the tracing tools, it
   * needs to be added in the __tracepoint_string section.
   * This requires defining a separate variable tp_<sname>_varname
   * that points to the string being used, and this will allow
   * the tracing userspace tools to be able to decipher the string
   * address to the matching string.
   */
a8a29b3b7   Ard Biesheuvel   rcu: Define trace...
78
79
  #ifdef CONFIG_TRACING
  # define DEFINE_RCU_TPS(sname) \
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
80
  static char sname##_varname[] = #sname; \
a8a29b3b7   Ard Biesheuvel   rcu: Define trace...
81
82
83
84
85
86
87
88
89
  static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname;
  # define RCU_STATE_NAME(sname) sname##_varname
  #else
  # define DEFINE_RCU_TPS(sname)
  # define RCU_STATE_NAME(sname) __stringify(sname)
  #endif
  
  #define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
  DEFINE_RCU_TPS(sname) \
c92fb0579   Nicolas Iooss   rcu: Make rcu_*_d...
90
  static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \
a41bfeb2f   Steven Rostedt (Red Hat)   rcu: Simplify RCU...
91
  struct rcu_state sname##_state = { \
6c90cc7bf   Paul E. McKenney   rcu: Prevent exce...
92
  	.level = { &sname##_state.node[0] }, \
2723249a3   Paul E. McKenney   rcu: Wire ->rda p...
93
  	.rda = &sname##_data, \
037b64ed0   Paul E. McKenney   rcu: Place pointe...
94
  	.call = cr, \
77f81fe08   Petr Mladek   rcu: Finish foldi...
95
  	.gp_state = RCU_GP_IDLE, \
42c3533ee   Paul E. McKenney   rcu: Fix tracing ...
96
97
  	.gpnum = 0UL - 300UL, \
  	.completed = 0UL - 300UL, \
7be7f0be9   Paul E. McKenney   rcu: Move rcu_bar...
98
  	.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
a8a29b3b7   Ard Biesheuvel   rcu: Define trace...
99
  	.name = RCU_STATE_NAME(sname), \
a48898585   Paul E. McKenney   rcu: Distinguish ...
100
  	.abbr = sabbr, \
f6a12f34a   Paul E. McKenney   rcu: Enforce expe...
101
  	.exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
3b5f668e7   Paul E. McKenney   rcu: Overlap wake...
102
  	.exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
2723249a3   Paul E. McKenney   rcu: Wire ->rda p...
103
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
104

a41bfeb2f   Steven Rostedt (Red Hat)   rcu: Simplify RCU...
105
106
  RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
  RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
b1f77b058   Ingo Molnar   kmemtrace, rcu: f...
107

b28a7c016   Paul E. McKenney   rcu: Tell the com...
108
  static struct rcu_state *const rcu_state_p;
6ce75a232   Paul E. McKenney   rcu: Introduce fo...
109
  LIST_HEAD(rcu_struct_flavors);
27f4d2805   Paul E. McKenney   rcu: priority boo...
110

a3dc2948c   Paul E. McKenney   rcu: Enable diagn...
111
112
113
  /* Dump rcu_node combining tree at boot to verify correct setup. */
  static bool dump_tree;
  module_param(dump_tree, bool, 0444);
7fa270010   Paul E. McKenney   rcu: Convert CONF...
114
115
116
  /* Control rcu_node-tree auto-balancing at boot time. */
  static bool rcu_fanout_exact;
  module_param(rcu_fanout_exact, bool, 0444);
47d631af5   Paul E. McKenney   rcu: Make RCU abl...
117
118
  /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
  static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
7e5c2dfb4   Paul E. McKenney   rcu: Make rcutree...
119
  module_param(rcu_fanout_leaf, int, 0444);
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
120
  int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
cb0071023   Alexander Gordeev   rcu: Limit count ...
121
  /* Number of rcu_nodes at specified level. */
e95d68d21   Paul E. McKenney   srcu: Make num_rc...
122
  int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
123
  int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
088e9d253   Daniel Bristot de Oliveira   rcu: sysctl: Pani...
124
125
  /* panic() on RCU Stall sysctl. */
  int sysctl_panic_on_rcu_stall __read_mostly;
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
126

b0d304172   Paul E. McKenney   rcu: Prevent RCU ...
127
  /*
52d7e48b8   Paul E. McKenney   rcu: Narrow early...
128
129
130
131
   * The rcu_scheduler_active variable is initialized to the value
   * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
   * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
   * RCU can assume that there is but one task, allowing RCU to (for example)
0d95092cc   Paul E. McKenney   rcu: Fix outdated...
132
   * optimize synchronize_rcu() to a simple barrier().  When this variable
52d7e48b8   Paul E. McKenney   rcu: Narrow early...
133
134
135
136
137
   * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
   * to detect real grace periods.  This variable is also used to suppress
   * boot-time false positives from lockdep-RCU error checking.  Finally, it
   * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
   * is fully initialized, including all of its kthreads having been spawned.
b0d304172   Paul E. McKenney   rcu: Prevent RCU ...
138
   */
bbad93798   Paul E. McKenney   rcu: slim down rc...
139
140
  int rcu_scheduler_active __read_mostly;
  EXPORT_SYMBOL_GPL(rcu_scheduler_active);
b0d304172   Paul E. McKenney   rcu: Prevent RCU ...
141
142
143
144
145
146
147
148
149
150
151
152
153
  /*
   * The rcu_scheduler_fully_active variable transitions from zero to one
   * during the early_initcall() processing, which is after the scheduler
   * is capable of creating new tasks.  So RCU processing (for example,
   * creating tasks for RCU priority boosting) must be delayed until after
   * rcu_scheduler_fully_active transitions from zero to one.  We also
   * currently delay invocation of any RCU callbacks until after this point.
   *
   * It might later prove better for people registering RCU callbacks during
   * early boot to take responsibility for these callbacks, but one step at
   * a time.
   */
  static int rcu_scheduler_fully_active __read_mostly;
0aa04b055   Paul E. McKenney   rcu: Process offl...
154
155
  static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
  static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
5d01bbd11   Thomas Gleixner   rcu: Yield simpler
156
  static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
a46e0899e   Paul E. McKenney   rcu: use softirq ...
157
158
  static void invoke_rcu_core(void);
  static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
6587a23b6   Paul E. McKenney   rcu: Switch synch...
159
160
  static void rcu_report_exp_rdp(struct rcu_state *rsp,
  			       struct rcu_data *rdp, bool wake);
3549c2bc2   Paul E. McKenney   rcu: Move expedit...
161
  static void sync_sched_exp_online_cleanup(int cpu);
a26ac2455   Paul E. McKenney   rcu: move TREE_RC...
162

a94844b22   Paul E. McKenney   rcu: Optionally r...
163
  /* rcuc/rcub kthread realtime priority */
26730f55c   Paul E. McKenney   rcu: Make RCU abl...
164
  static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
a94844b22   Paul E. McKenney   rcu: Optionally r...
165
  module_param(kthread_prio, int, 0644);
8d7dc9283   Paul E. McKenney   rcu: Control grac...
166
  /* Delay in jiffies for grace-period initialization delays, debug only. */
0f41c0dda   Paul E. McKenney   rcu: Provide diag...
167

90040c9e3   Paul E. McKenney   rcu: Remove *_SLO...
168
169
170
171
172
173
  static int gp_preinit_delay;
  module_param(gp_preinit_delay, int, 0444);
  static int gp_init_delay;
  module_param(gp_init_delay, int, 0444);
  static int gp_cleanup_delay;
  module_param(gp_cleanup_delay, int, 0444);
0f41c0dda   Paul E. McKenney   rcu: Provide diag...
174

eab128e83   Paul E. McKenney   rcu: Modulate gra...
175
176
  /*
   * Number of grace periods between delays, normalized by the duration of
bfd090be1   Paul E. McKenney   rcu: Fix typo in ...
177
   * the delay.  The longer the delay, the more the grace periods between
eab128e83   Paul E. McKenney   rcu: Modulate gra...
178
179
180
181
182
183
184
   * each delay.  The reason for this normalization is that it means that,
   * for non-zero delays, the overall slowdown of grace periods is constant
   * regardless of the duration of the delay.  This arrangement balances
   * the need for long delays to increase some race probabilities with the
   * need for fast grace periods to increase other race probabilities.
   */
  #define PER_RCU_NODE_PERIOD 3	/* Number of grace periods between delays. */
37745d281   Paul E. McKenney   rcu: Provide diag...
185

a26ac2455   Paul E. McKenney   rcu: move TREE_RC...
186
  /*
4a2986568   Paul E. McKenney   rcu: make rcutort...
187
188
189
190
191
192
193
194
195
196
197
198
   * Track the rcutorture test sequence number and the update version
   * number within a given test.  The rcutorture_testseq is incremented
   * on every rcutorture module load and unload, so has an odd value
   * when a test is running.  The rcutorture_vernum is set to zero
   * when rcutorture starts and is incremented on each rcutorture update.
   * These variables enable correlating rcutorture output with the
   * RCU tracing information.
   */
  unsigned long rcutorture_testseq;
  unsigned long rcutorture_vernum;
  
  /*
0aa04b055   Paul E. McKenney   rcu: Process offl...
199
200
201
202
203
204
205
   * Compute the mask of online CPUs for the specified rcu_node structure.
   * This will not be stable unless the rcu_node structure's ->lock is
   * held, but the bit corresponding to the current CPU will be stable
   * in most contexts.
   */
  unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
  {
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
206
  	return READ_ONCE(rnp->qsmaskinitnext);
0aa04b055   Paul E. McKenney   rcu: Process offl...
207
208
209
  }
  
  /*
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
210
   * Return true if an RCU grace period is in progress.  The READ_ONCE()s
fc2219d49   Paul E. McKenney   rcu: Clean up cod...
211
212
213
214
215
   * permit this function to be invoked without holding the root rcu_node
   * structure's ->lock, but of course results can be subject to change.
   */
  static int rcu_gp_in_progress(struct rcu_state *rsp)
  {
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
216
  	return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum);
fc2219d49   Paul E. McKenney   rcu: Clean up cod...
217
218
219
  }
  
  /*
d6714c22b   Paul E. McKenney   rcu: Renamings to...
220
   * Note a quiescent state.  Because we do not need to know
b1f77b058   Ingo Molnar   kmemtrace, rcu: f...
221
   * how many quiescent states passed, just if there was at least
d6714c22b   Paul E. McKenney   rcu: Renamings to...
222
   * one since the start of the grace period, this just sets a flag.
e4cc1f22b   Paul E. McKenney   rcu: Simplify qui...
223
   * The caller must have disabled preemption.
b1f77b058   Ingo Molnar   kmemtrace, rcu: f...
224
   */
284a8c93a   Paul E. McKenney   rcu: Per-CPU oper...
225
  void rcu_sched_qs(void)
b1f77b058   Ingo Molnar   kmemtrace, rcu: f...
226
  {
f4687d263   Paul E. McKenney   rcu: Add preempti...
227
  	RCU_LOCKDEP_WARN(preemptible(), "rcu_sched_qs() invoked with preemption enabled!!!");
fecbf6f01   Paul E. McKenney   rcu: Simplify rcu...
228
229
230
231
232
233
234
235
  	if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
  		return;
  	trace_rcu_grace_period(TPS("rcu_sched"),
  			       __this_cpu_read(rcu_sched_data.gpnum),
  			       TPS("cpuqs"));
  	__this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
  	if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
  		return;
46a5d164d   Paul E. McKenney   rcu: Stop disabli...
236
237
238
  	__this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
  	rcu_report_exp_rdp(&rcu_sched_state,
  			   this_cpu_ptr(&rcu_sched_data), true);
b1f77b058   Ingo Molnar   kmemtrace, rcu: f...
239
  }
284a8c93a   Paul E. McKenney   rcu: Per-CPU oper...
240
  void rcu_bh_qs(void)
b1f77b058   Ingo Molnar   kmemtrace, rcu: f...
241
  {
f4687d263   Paul E. McKenney   rcu: Add preempti...
242
  	RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!");
5b74c4589   Paul E. McKenney   rcu: Make ->cpu_n...
243
  	if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
284a8c93a   Paul E. McKenney   rcu: Per-CPU oper...
244
245
246
  		trace_rcu_grace_period(TPS("rcu_bh"),
  				       __this_cpu_read(rcu_bh_data.gpnum),
  				       TPS("cpuqs"));
5b74c4589   Paul E. McKenney   rcu: Make ->cpu_n...
247
  		__this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
284a8c93a   Paul E. McKenney   rcu: Per-CPU oper...
248
  	}
b1f77b058   Ingo Molnar   kmemtrace, rcu: f...
249
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
250

b8c17e666   Paul E. McKenney   rcu: Maintain spe...
251
252
253
254
255
256
257
258
259
  /*
   * Steal a bit from the bottom of ->dynticks for idle entry/exit
   * control.  Initially this is for TLB flushing.
   */
  #define RCU_DYNTICK_CTRL_MASK 0x1
  #define RCU_DYNTICK_CTRL_CTR  (RCU_DYNTICK_CTRL_MASK + 1)
  #ifndef rcu_eqs_special_exit
  #define rcu_eqs_special_exit() do { } while (0)
  #endif
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
260
261
262
  
  static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
  	.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
b8c17e666   Paul E. McKenney   rcu: Maintain spe...
263
  	.dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
264
  };
6563de9d6   Paul E. McKenney   rcu: Abstract the...
265
  /*
03ecd3f48   Steven Rostedt (VMware)   rcu/tracing: Add ...
266
267
268
269
270
271
272
273
274
275
276
277
278
279
   * There's a few places, currently just in the tracing infrastructure,
   * that uses rcu_irq_enter() to make sure RCU is watching. But there's
   * a small location where that will not even work. In those cases
   * rcu_irq_enter_disabled() needs to be checked to make sure rcu_irq_enter()
   * can be called.
   */
  static DEFINE_PER_CPU(bool, disable_rcu_irq_enter);
  
  bool rcu_irq_enter_disabled(void)
  {
  	return this_cpu_read(disable_rcu_irq_enter);
  }
  
  /*
2625d469b   Paul E. McKenney   rcu: Abstract dyn...
280
281
282
283
284
285
   * Record entry into an extended quiescent state.  This is only to be
   * called when not already in an extended quiescent state.
   */
  static void rcu_dynticks_eqs_enter(void)
  {
  	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
b8c17e666   Paul E. McKenney   rcu: Maintain spe...
286
  	int seq;
2625d469b   Paul E. McKenney   rcu: Abstract dyn...
287
288
  
  	/*
b8c17e666   Paul E. McKenney   rcu: Maintain spe...
289
  	 * CPUs seeing atomic_add_return() must see prior RCU read-side
2625d469b   Paul E. McKenney   rcu: Abstract dyn...
290
291
292
  	 * critical sections, and we also must force ordering with the
  	 * next idle sojourn.
  	 */
b8c17e666   Paul E. McKenney   rcu: Maintain spe...
293
294
295
296
297
298
299
  	seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
  	/* Better be in an extended quiescent state! */
  	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
  		     (seq & RCU_DYNTICK_CTRL_CTR));
  	/* Better not have special action (TLB flush) pending! */
  	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
  		     (seq & RCU_DYNTICK_CTRL_MASK));
2625d469b   Paul E. McKenney   rcu: Abstract dyn...
300
301
302
303
304
305
306
307
308
  }
  
  /*
   * Record exit from an extended quiescent state.  This is only to be
   * called from an extended quiescent state.
   */
  static void rcu_dynticks_eqs_exit(void)
  {
  	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
b8c17e666   Paul E. McKenney   rcu: Maintain spe...
309
  	int seq;
2625d469b   Paul E. McKenney   rcu: Abstract dyn...
310
311
  
  	/*
b8c17e666   Paul E. McKenney   rcu: Maintain spe...
312
  	 * CPUs seeing atomic_add_return() must see prior idle sojourns,
2625d469b   Paul E. McKenney   rcu: Abstract dyn...
313
314
315
  	 * and we also must force ordering with the next RCU read-side
  	 * critical section.
  	 */
b8c17e666   Paul E. McKenney   rcu: Maintain spe...
316
317
318
319
320
321
322
323
324
  	seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
  	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
  		     !(seq & RCU_DYNTICK_CTRL_CTR));
  	if (seq & RCU_DYNTICK_CTRL_MASK) {
  		atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdtp->dynticks);
  		smp_mb__after_atomic(); /* _exit after clearing mask. */
  		/* Prefer duplicate flushes to losing a flush. */
  		rcu_eqs_special_exit();
  	}
2625d469b   Paul E. McKenney   rcu: Abstract dyn...
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
  }
  
  /*
   * Reset the current CPU's ->dynticks counter to indicate that the
   * newly onlined CPU is no longer in an extended quiescent state.
   * This will either leave the counter unchanged, or increment it
   * to the next non-quiescent value.
   *
   * The non-atomic test/increment sequence works because the upper bits
   * of the ->dynticks counter are manipulated only by the corresponding CPU,
   * or when the corresponding CPU is offline.
   */
  static void rcu_dynticks_eqs_online(void)
  {
  	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
b8c17e666   Paul E. McKenney   rcu: Maintain spe...
340
  	if (atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR)
2625d469b   Paul E. McKenney   rcu: Abstract dyn...
341
  		return;
b8c17e666   Paul E. McKenney   rcu: Maintain spe...
342
  	atomic_add(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
2625d469b   Paul E. McKenney   rcu: Abstract dyn...
343
344
345
  }
  
  /*
02a5c550b   Paul E. McKenney   rcu: Abstract ext...
346
347
348
349
350
351
352
   * Is the current CPU in an extended quiescent state?
   *
   * No ordering, as we are sampling CPU-local information.
   */
  bool rcu_dynticks_curr_cpu_in_eqs(void)
  {
  	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
b8c17e666   Paul E. McKenney   rcu: Maintain spe...
353
  	return !(atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR);
02a5c550b   Paul E. McKenney   rcu: Abstract ext...
354
355
356
  }
  
  /*
8b2f63ab0   Paul E. McKenney   rcu: Abstract the...
357
358
359
   * Snapshot the ->dynticks counter with full ordering so as to allow
   * stable comparison of this counter with past and future snapshots.
   */
02a5c550b   Paul E. McKenney   rcu: Abstract ext...
360
  int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
8b2f63ab0   Paul E. McKenney   rcu: Abstract the...
361
362
  {
  	int snap = atomic_add_return(0, &rdtp->dynticks);
b8c17e666   Paul E. McKenney   rcu: Maintain spe...
363
  	return snap & ~RCU_DYNTICK_CTRL_MASK;
8b2f63ab0   Paul E. McKenney   rcu: Abstract the...
364
365
366
  }
  
  /*
02a5c550b   Paul E. McKenney   rcu: Abstract ext...
367
368
369
370
371
   * Return true if the snapshot returned from rcu_dynticks_snap()
   * indicates that RCU is in an extended quiescent state.
   */
  static bool rcu_dynticks_in_eqs(int snap)
  {
b8c17e666   Paul E. McKenney   rcu: Maintain spe...
372
  	return !(snap & RCU_DYNTICK_CTRL_CTR);
02a5c550b   Paul E. McKenney   rcu: Abstract ext...
373
374
375
376
377
378
379
380
381
382
383
384
385
  }
  
  /*
   * Return true if the CPU corresponding to the specified rcu_dynticks
   * structure has spent some time in an extended quiescent state since
   * rcu_dynticks_snap() returned the specified snapshot.
   */
  static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
  {
  	return snap != rcu_dynticks_snap(rdtp);
  }
  
  /*
6563de9d6   Paul E. McKenney   rcu: Abstract the...
386
387
388
389
390
391
   * Do a double-increment of the ->dynticks counter to emulate a
   * momentary idle-CPU quiescent state.
   */
  static void rcu_dynticks_momentary_idle(void)
  {
  	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
b8c17e666   Paul E. McKenney   rcu: Maintain spe...
392
393
  	int special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
  					&rdtp->dynticks);
6563de9d6   Paul E. McKenney   rcu: Abstract the...
394
395
  
  	/* It is illegal to call this from idle state. */
b8c17e666   Paul E. McKenney   rcu: Maintain spe...
396
  	WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
6563de9d6   Paul E. McKenney   rcu: Abstract the...
397
  }
b8c17e666   Paul E. McKenney   rcu: Maintain spe...
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
  /*
   * Set the special (bottom) bit of the specified CPU so that it
   * will take special action (such as flushing its TLB) on the
   * next exit from an extended quiescent state.  Returns true if
   * the bit was successfully set, or false if the CPU was not in
   * an extended quiescent state.
   */
  bool rcu_eqs_special_set(int cpu)
  {
  	int old;
  	int new;
  	struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
  
  	do {
  		old = atomic_read(&rdtp->dynticks);
  		if (old & RCU_DYNTICK_CTRL_CTR)
  			return false;
  		new = old | RCU_DYNTICK_CTRL_MASK;
  	} while (atomic_cmpxchg(&rdtp->dynticks, old, new) != old);
  	return true;
6563de9d6   Paul E. McKenney   rcu: Abstract the...
418
  }
5cd37193c   Paul E. McKenney   rcu: Make cond_re...
419

4a81e8328   Paul E. McKenney   rcu: Reduce overh...
420
421
422
423
424
425
426
  /*
   * Let the RCU core know that this CPU has gone through the scheduler,
   * which is a quiescent state.  This is called when the need for a
   * quiescent state is urgent, so we burn an atomic operation and full
   * memory barriers to let the RCU core know about it, regardless of what
   * this CPU might (or might not) do in the near future.
   *
0f9be8cab   Paul E. McKenney   rcu: Eliminate fl...
427
   * We inform the RCU core by emulating a zero-duration dyntick-idle period.
46a5d164d   Paul E. McKenney   rcu: Stop disabli...
428
429
   *
   * The caller must have disabled interrupts.
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
430
431
432
   */
  static void rcu_momentary_dyntick_idle(void)
  {
0f9be8cab   Paul E. McKenney   rcu: Eliminate fl...
433
434
  	raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false);
  	rcu_dynticks_momentary_idle();
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
435
  }
25502a6c1   Paul E. McKenney   rcu: refactor RCU...
436
437
438
  /*
   * Note a context switch.  This is a quiescent state for RCU-sched,
   * and requires special handling for preemptible RCU.
46a5d164d   Paul E. McKenney   rcu: Stop disabli...
439
   * The caller must have disabled interrupts.
25502a6c1   Paul E. McKenney   rcu: refactor RCU...
440
   */
bcbfdd01d   Paul E. McKenney   rcu: Make non-pre...
441
  void rcu_note_context_switch(bool preempt)
25502a6c1   Paul E. McKenney   rcu: refactor RCU...
442
  {
bb73c52ba   Boqun Feng   rcu: Don't disabl...
443
  	barrier(); /* Avoid RCU read-side critical sections leaking down. */
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
444
  	trace_rcu_utilization(TPS("Start context switch"));
284a8c93a   Paul E. McKenney   rcu: Per-CPU oper...
445
  	rcu_sched_qs();
5b72f9643   Paul E. McKenney   rcu: Complain if ...
446
  	rcu_preempt_note_context_switch(preempt);
9226b10d7   Paul E. McKenney   rcu: Place guard ...
447
448
449
450
  	/* Load rcu_urgent_qs before other flags. */
  	if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs)))
  		goto out;
  	this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
0f9be8cab   Paul E. McKenney   rcu: Eliminate fl...
451
  	if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs)))
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
452
  		rcu_momentary_dyntick_idle();
9226b10d7   Paul E. McKenney   rcu: Place guard ...
453
  	this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
bcbfdd01d   Paul E. McKenney   rcu: Make non-pre...
454
455
  	if (!preempt)
  		rcu_note_voluntary_context_switch_lite(current);
9226b10d7   Paul E. McKenney   rcu: Place guard ...
456
  out:
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
457
  	trace_rcu_utilization(TPS("End context switch"));
bb73c52ba   Boqun Feng   rcu: Don't disabl...
458
  	barrier(); /* Avoid RCU read-side critical sections leaking up. */
25502a6c1   Paul E. McKenney   rcu: refactor RCU...
459
  }
29ce83100   Gleb Natapov   rcu: provide rcu_...
460
  EXPORT_SYMBOL_GPL(rcu_note_context_switch);
25502a6c1   Paul E. McKenney   rcu: refactor RCU...
461

5cd37193c   Paul E. McKenney   rcu: Make cond_re...
462
  /*
1925d1967   Paul E. McKenney   rcu: Fix a couple...
463
   * Register a quiescent state for all RCU flavors.  If there is an
5cd37193c   Paul E. McKenney   rcu: Make cond_re...
464
465
   * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
   * dyntick-idle quiescent state visible to other CPUs (but only for those
1925d1967   Paul E. McKenney   rcu: Fix a couple...
466
   * RCU flavors in desperate need of a quiescent state, which will normally
5cd37193c   Paul E. McKenney   rcu: Make cond_re...
467
468
   * be none of them).  Either way, do a lightweight quiescent state for
   * all RCU flavors.
bb73c52ba   Boqun Feng   rcu: Don't disabl...
469
470
471
472
473
   *
   * The barrier() calls are redundant in the common case when this is
   * called externally, but just in case this is called from within this
   * file.
   *
5cd37193c   Paul E. McKenney   rcu: Make cond_re...
474
475
476
   */
  void rcu_all_qs(void)
  {
46a5d164d   Paul E. McKenney   rcu: Stop disabli...
477
  	unsigned long flags;
9226b10d7   Paul E. McKenney   rcu: Place guard ...
478
479
480
481
482
483
484
485
486
  	if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs))
  		return;
  	preempt_disable();
  	/* Load rcu_urgent_qs before other flags. */
  	if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) {
  		preempt_enable();
  		return;
  	}
  	this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
bb73c52ba   Boqun Feng   rcu: Don't disabl...
487
  	barrier(); /* Avoid RCU read-side critical sections leaking down. */
0f9be8cab   Paul E. McKenney   rcu: Eliminate fl...
488
  	if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) {
46a5d164d   Paul E. McKenney   rcu: Stop disabli...
489
  		local_irq_save(flags);
5cd37193c   Paul E. McKenney   rcu: Make cond_re...
490
  		rcu_momentary_dyntick_idle();
46a5d164d   Paul E. McKenney   rcu: Stop disabli...
491
492
  		local_irq_restore(flags);
  	}
9226b10d7   Paul E. McKenney   rcu: Place guard ...
493
  	if (unlikely(raw_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)))
a1e122484   Paul E. McKenney   rcu: Make cond_re...
494
  		rcu_sched_qs();
9577df9a3   Paul E. McKenney   rcu: Pull rcu_qs_...
495
  	this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
bb73c52ba   Boqun Feng   rcu: Don't disabl...
496
  	barrier(); /* Avoid RCU read-side critical sections leaking up. */
9226b10d7   Paul E. McKenney   rcu: Place guard ...
497
  	preempt_enable();
5cd37193c   Paul E. McKenney   rcu: Make cond_re...
498
499
  }
  EXPORT_SYMBOL_GPL(rcu_all_qs);
17c7798be   Paul E. McKenney   rcu: Update rcu_b...
500
501
502
503
504
505
  #define DEFAULT_RCU_BLIMIT 10     /* Maximum callbacks per rcu_do_batch. */
  static long blimit = DEFAULT_RCU_BLIMIT;
  #define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */
  static long qhimark = DEFAULT_RCU_QHIMARK;
  #define DEFAULT_RCU_QLOMARK 100   /* Once only this many pending, use blimit. */
  static long qlowmark = DEFAULT_RCU_QLOMARK;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
506

878d7439d   Eric Dumazet   rcu: Fix batch-li...
507
508
509
  module_param(blimit, long, 0444);
  module_param(qhimark, long, 0444);
  module_param(qlowmark, long, 0444);
3d76c0829   Paul E. McKenney   rcu: Clean up cod...
510

026ad2835   Paul E. McKenney   rcu: Drive quiesc...
511
512
  static ulong jiffies_till_first_fqs = ULONG_MAX;
  static ulong jiffies_till_next_fqs = ULONG_MAX;
8c7c4829a   Paul E. McKenney   rcu: Awaken grace...
513
  static bool rcu_kick_kthreads;
d40011f60   Paul E. McKenney   rcu: Control grac...
514
515
516
  
  module_param(jiffies_till_first_fqs, ulong, 0644);
  module_param(jiffies_till_next_fqs, ulong, 0644);
8c7c4829a   Paul E. McKenney   rcu: Awaken grace...
517
  module_param(rcu_kick_kthreads, bool, 0644);
d40011f60   Paul E. McKenney   rcu: Control grac...
518

4a81e8328   Paul E. McKenney   rcu: Reduce overh...
519
520
521
522
523
524
  /*
   * How long the grace period must be before we start recruiting
   * quiescent-state help from rcu_note_context_switch().
   */
  static ulong jiffies_till_sched_qs = HZ / 20;
  module_param(jiffies_till_sched_qs, ulong, 0644);
48a7639ce   Paul E. McKenney   rcu: Make callers...
525
  static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
910ee45db   Paul E. McKenney   rcu: Make rcu_acc...
526
  				  struct rcu_data *rdp);
fe5ac724d   Paul E. McKenney   rcu: Remove nohz_...
527
  static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp));
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
528
  static void force_quiescent_state(struct rcu_state *rsp);
e3950ecd5   Paul E. McKenney   rcu: Remove "cpu"...
529
  static int rcu_pending(void);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
530
531
  
  /*
917963d0b   Paul E. McKenney   rcutorture: Check...
532
   * Return the number of RCU batches started thus far for debug & stats.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
533
   */
917963d0b   Paul E. McKenney   rcutorture: Check...
534
535
536
537
538
539
540
541
  unsigned long rcu_batches_started(void)
  {
  	return rcu_state_p->gpnum;
  }
  EXPORT_SYMBOL_GPL(rcu_batches_started);
  
  /*
   * Return the number of RCU-sched batches started thus far for debug & stats.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
542
   */
917963d0b   Paul E. McKenney   rcutorture: Check...
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
  unsigned long rcu_batches_started_sched(void)
  {
  	return rcu_sched_state.gpnum;
  }
  EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
  
  /*
   * Return the number of RCU BH batches started thus far for debug & stats.
   */
  unsigned long rcu_batches_started_bh(void)
  {
  	return rcu_bh_state.gpnum;
  }
  EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
  
  /*
   * Return the number of RCU batches completed thus far for debug & stats.
   */
  unsigned long rcu_batches_completed(void)
  {
  	return rcu_state_p->completed;
  }
  EXPORT_SYMBOL_GPL(rcu_batches_completed);
  
  /*
   * Return the number of RCU-sched batches completed thus far for debug & stats.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
569
   */
9733e4f0a   Paul E. McKenney   rcu: Make _batche...
570
  unsigned long rcu_batches_completed_sched(void)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
571
  {
d6714c22b   Paul E. McKenney   rcu: Renamings to...
572
  	return rcu_sched_state.completed;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
573
  }
d6714c22b   Paul E. McKenney   rcu: Renamings to...
574
  EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
575
576
  
  /*
917963d0b   Paul E. McKenney   rcutorture: Check...
577
   * Return the number of RCU BH batches completed thus far for debug & stats.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
578
   */
9733e4f0a   Paul E. McKenney   rcu: Make _batche...
579
  unsigned long rcu_batches_completed_bh(void)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
580
581
582
583
584
585
  {
  	return rcu_bh_state.completed;
  }
  EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
  
  /*
291783b8a   Paul E. McKenney   rcutorture: Exped...
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
   * Return the number of RCU expedited batches completed thus far for
   * debug & stats.  Odd numbers mean that a batch is in progress, even
   * numbers mean idle.  The value returned will thus be roughly double
   * the cumulative batches since boot.
   */
  unsigned long rcu_exp_batches_completed(void)
  {
  	return rcu_state_p->expedited_sequence;
  }
  EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
  
  /*
   * Return the number of RCU-sched expedited batches completed thus far
   * for debug & stats.  Similar to rcu_exp_batches_completed().
   */
  unsigned long rcu_exp_batches_completed_sched(void)
  {
  	return rcu_sched_state.expedited_sequence;
  }
  EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
  
  /*
a381d757d   Andreea-Cristina Bernat   rcu: Merge rcu_sc...
608
609
610
611
   * Force a quiescent state.
   */
  void rcu_force_quiescent_state(void)
  {
e534165bb   Uma Sharma   rcu: Variable nam...
612
  	force_quiescent_state(rcu_state_p);
a381d757d   Andreea-Cristina Bernat   rcu: Merge rcu_sc...
613
614
615
616
  }
  EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
  
  /*
bf66f18e7   Paul E. McKenney   rcu: Add force_qu...
617
618
619
620
   * Force a quiescent state for RCU BH.
   */
  void rcu_bh_force_quiescent_state(void)
  {
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
621
  	force_quiescent_state(&rcu_bh_state);
bf66f18e7   Paul E. McKenney   rcu: Add force_qu...
622
623
624
625
  }
  EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
  
  /*
e7580f338   Paul E. McKenney   rcu: Get rcu_sche...
626
627
628
629
630
631
632
633
634
   * Force a quiescent state for RCU-sched.
   */
  void rcu_sched_force_quiescent_state(void)
  {
  	force_quiescent_state(&rcu_sched_state);
  }
  EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
  
  /*
afea227fd   Paul E. McKenney   rcutorture: Expor...
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
   * Show the state of the grace-period kthreads.
   */
  void show_rcu_gp_kthreads(void)
  {
  	struct rcu_state *rsp;
  
  	for_each_rcu_flavor(rsp) {
  		pr_info("%s: wait state: %d ->state: %#lx
  ",
  			rsp->name, rsp->gp_state, rsp->gp_kthread->state);
  		/* sched_show_task(rsp->gp_kthread); */
  	}
  }
  EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
  
  /*
4a2986568   Paul E. McKenney   rcu: make rcutort...
651
652
653
654
655
656
657
658
659
660
661
662
663
664
   * Record the number of times rcutorture tests have been initiated and
   * terminated.  This information allows the debugfs tracing stats to be
   * correlated to the rcutorture messages, even when the rcutorture module
   * is being repeatedly loaded and unloaded.  In other words, we cannot
   * store this state in rcutorture itself.
   */
  void rcutorture_record_test_transition(void)
  {
  	rcutorture_testseq++;
  	rcutorture_vernum = 0;
  }
  EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
  
  /*
ad0dc7f94   Paul E. McKenney   rcutorture: Add f...
665
666
667
668
669
670
671
672
673
   * Send along grace-period-related data for rcutorture diagnostics.
   */
  void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
  			    unsigned long *gpnum, unsigned long *completed)
  {
  	struct rcu_state *rsp = NULL;
  
  	switch (test_type) {
  	case RCU_FLAVOR:
e534165bb   Uma Sharma   rcu: Variable nam...
674
  		rsp = rcu_state_p;
ad0dc7f94   Paul E. McKenney   rcutorture: Add f...
675
676
677
678
679
680
681
682
683
684
  		break;
  	case RCU_BH_FLAVOR:
  		rsp = &rcu_bh_state;
  		break;
  	case RCU_SCHED_FLAVOR:
  		rsp = &rcu_sched_state;
  		break;
  	default:
  		break;
  	}
7f6733c3c   Paul E. McKenney   srcu: Make rcutor...
685
  	if (rsp == NULL)
ad0dc7f94   Paul E. McKenney   rcutorture: Add f...
686
  		return;
7f6733c3c   Paul E. McKenney   srcu: Make rcutor...
687
688
689
  	*flags = READ_ONCE(rsp->gp_flags);
  	*gpnum = READ_ONCE(rsp->gpnum);
  	*completed = READ_ONCE(rsp->completed);
ad0dc7f94   Paul E. McKenney   rcutorture: Add f...
690
691
692
693
  }
  EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
  
  /*
4a2986568   Paul E. McKenney   rcu: make rcutort...
694
695
696
697
698
699
700
701
702
703
704
   * Record the number of writer passes through the current rcutorture test.
   * This is also used to correlate debugfs tracing stats with the rcutorture
   * messages.
   */
  void rcutorture_record_progress(unsigned long vernum)
  {
  	rcutorture_vernum++;
  }
  EXPORT_SYMBOL_GPL(rcutorture_record_progress);
  
  /*
365187fbc   Paul E. McKenney   rcu: Update cpu_n...
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
   * Return the root node of the specified rcu_state structure.
   */
  static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
  {
  	return &rsp->node[0];
  }
  
  /*
   * Is there any need for future grace periods?
   * Interrupts must be disabled.  If the caller does not hold the root
   * rnp_node structure's ->lock, the results are advisory only.
   */
  static int rcu_future_needs_gp(struct rcu_state *rsp)
  {
  	struct rcu_node *rnp = rcu_get_root(rsp);
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
720
  	int idx = (READ_ONCE(rnp->completed) + 1) & 0x1;
365187fbc   Paul E. McKenney   rcu: Update cpu_n...
721
  	int *fp = &rnp->need_future_gp[idx];
c0b334c5b   Paul E. McKenney   rcu: Add lockdep_...
722
  	RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_future_needs_gp() invoked with irqs enabled!!!");
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
723
  	return READ_ONCE(*fp);
365187fbc   Paul E. McKenney   rcu: Update cpu_n...
724
725
726
  }
  
  /*
dc35c8934   Paul E. McKenney   rcu: Tag callback...
727
728
729
   * Does the current CPU require a not-yet-started grace period?
   * The caller must have disabled interrupts to prevent races with
   * normal callback registry.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
730
   */
d117c8aa1   Paul E. McKenney   rcu: Make cpu_nee...
731
  static bool
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
732
733
  cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
  {
c0b334c5b   Paul E. McKenney   rcu: Add lockdep_...
734
  	RCU_LOCKDEP_WARN(!irqs_disabled(), "cpu_needs_another_gp() invoked with irqs enabled!!!");
dc35c8934   Paul E. McKenney   rcu: Tag callback...
735
  	if (rcu_gp_in_progress(rsp))
d117c8aa1   Paul E. McKenney   rcu: Make cpu_nee...
736
  		return false;  /* No, a grace period is already in progress. */
365187fbc   Paul E. McKenney   rcu: Update cpu_n...
737
  	if (rcu_future_needs_gp(rsp))
d117c8aa1   Paul E. McKenney   rcu: Make cpu_nee...
738
  		return true;  /* Yes, a no-CBs CPU needs one. */
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
739
  	if (!rcu_segcblist_is_enabled(&rdp->cblist))
d117c8aa1   Paul E. McKenney   rcu: Make cpu_nee...
740
  		return false;  /* No, this is a no-CBs (or offline) CPU. */
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
741
  	if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
d117c8aa1   Paul E. McKenney   rcu: Make cpu_nee...
742
  		return true;  /* Yes, CPU has newly registered callbacks. */
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
743
744
745
  	if (rcu_segcblist_future_gp_needed(&rdp->cblist,
  					   READ_ONCE(rsp->completed)))
  		return true;  /* Yes, CBs for future grace period. */
d117c8aa1   Paul E. McKenney   rcu: Make cpu_nee...
746
  	return false; /* No grace period needed. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
747
748
749
  }
  
  /*
a278d4718   Paul E. McKenney   rcu: Fix dyntick-...
750
   * rcu_eqs_enter_common - current CPU is entering an extended quiescent state
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
751
   *
a278d4718   Paul E. McKenney   rcu: Fix dyntick-...
752
753
   * Enter idle, doing appropriate accounting.  The caller must have
   * disabled interrupts.
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
754
   */
a278d4718   Paul E. McKenney   rcu: Fix dyntick-...
755
  static void rcu_eqs_enter_common(bool user)
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
756
  {
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
757
758
  	struct rcu_state *rsp;
  	struct rcu_data *rdp;
a278d4718   Paul E. McKenney   rcu: Fix dyntick-...
759
  	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
760

c0b334c5b   Paul E. McKenney   rcu: Add lockdep_...
761
  	RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_eqs_enter_common() invoked with irqs enabled!!!");
a278d4718   Paul E. McKenney   rcu: Fix dyntick-...
762
  	trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0);
1ce46ee59   Paul E. McKenney   rcu: Conditionall...
763
764
  	if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
  	    !user && !is_idle_task(current)) {
289828e62   Paul E. McKenney   rcu: Silence unus...
765
766
  		struct task_struct *idle __maybe_unused =
  			idle_task(smp_processor_id());
0989cb467   Paul E. McKenney   rcu: Add more inf...
767

a278d4718   Paul E. McKenney   rcu: Fix dyntick-...
768
  		trace_rcu_dyntick(TPS("Error on entry: not idle task"), rdtp->dynticks_nesting, 0);
274529ba9   Paul E. McKenney   rcu: Consolidate ...
769
  		rcu_ftrace_dump(DUMP_ORIG);
0989cb467   Paul E. McKenney   rcu: Add more inf...
770
771
772
  		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
  			  current->pid, current->comm,
  			  idle->pid, idle->comm); /* must be idle task! */
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
773
  	}
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
774
775
776
777
  	for_each_rcu_flavor(rsp) {
  		rdp = this_cpu_ptr(rsp->rda);
  		do_nocb_deferred_wakeup(rdp);
  	}
198bbf812   Paul E. McKenney   rcu: Remove "cpu"...
778
  	rcu_prepare_for_idle();
03ecd3f48   Steven Rostedt (VMware)   rcu/tracing: Add ...
779
  	__this_cpu_inc(disable_rcu_irq_enter);
a278d4718   Paul E. McKenney   rcu: Fix dyntick-...
780
781
  	rdtp->dynticks_nesting = 0; /* Breaks tracing momentarily. */
  	rcu_dynticks_eqs_enter(); /* After this, tracing works again. */
03ecd3f48   Steven Rostedt (VMware)   rcu/tracing: Add ...
782
  	__this_cpu_dec(disable_rcu_irq_enter);
176f8f7a5   Paul E. McKenney   rcu: Make TASKS_R...
783
  	rcu_dynticks_task_enter();
c44e2cdda   Paul E. McKenney   rcu: Check for id...
784
785
  
  	/*
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
786
  	 * It is illegal to enter an extended quiescent state while
c44e2cdda   Paul E. McKenney   rcu: Check for id...
787
788
  	 * in an RCU read-side critical section.
  	 */
f78f5b90c   Paul E. McKenney   rcu: Rename rcu_l...
789
790
791
792
793
794
  	RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
  			 "Illegal idle entry in RCU read-side critical section.");
  	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map),
  			 "Illegal idle entry in RCU-bh read-side critical section.");
  	RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map),
  			 "Illegal idle entry in RCU-sched read-side critical section.");
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
795
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
796

adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
797
798
799
  /*
   * Enter an RCU extended quiescent state, which can be either the
   * idle loop or adaptive-tickless usermode execution.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
800
   */
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
801
  static void rcu_eqs_enter(bool user)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
802
  {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
803
  	struct rcu_dynticks *rdtp;
c9d4b0af9   Christoph Lameter   rcu: Replace __ge...
804
  	rdtp = this_cpu_ptr(&rcu_dynticks);
1ce46ee59   Paul E. McKenney   rcu: Conditionall...
805
  	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
a278d4718   Paul E. McKenney   rcu: Fix dyntick-...
806
807
808
809
  		     (rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
  	if ((rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
  		rcu_eqs_enter_common(user);
  	else
29e37d814   Paul E. McKenney   rcu: Allow nestin...
810
  		rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
811
  }
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
  
  /**
   * rcu_idle_enter - inform RCU that current CPU is entering idle
   *
   * Enter idle mode, in other words, -leave- the mode in which RCU
   * read-side critical sections can occur.  (Though RCU read-side
   * critical sections can occur in irq handlers in idle, a possibility
   * handled by irq_enter() and irq_exit().)
   *
   * We crowbar the ->dynticks_nesting field to zero to allow for
   * the possibility of usermode upcalls having messed up our count
   * of interrupt nesting level during the prior busy period.
   */
  void rcu_idle_enter(void)
  {
3a6079926   Peter Zijlstra (Intel)   rcu: Make rcu_idl...
827
  	RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_idle_enter() invoked with irqs enabled!!!");
cb349ca95   Paul E. McKenney   rcu: Apply micro-...
828
  	rcu_eqs_enter(false);
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
829
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
830

d1ec4c34c   Paul E. McKenney   rcu: Drop RCU_USE...
831
  #ifdef CONFIG_NO_HZ_FULL
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
832
  /**
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
833
834
835
836
837
838
839
840
841
   * rcu_user_enter - inform RCU that we are resuming userspace.
   *
   * Enter RCU idle mode right before resuming userspace.  No use of RCU
   * is permitted between this call and rcu_user_exit(). This way the
   * CPU doesn't need to maintain the tick for RCU maintenance purposes
   * when the CPU runs in userspace.
   */
  void rcu_user_enter(void)
  {
d4db30af5   Paul E. McKenney   rcu: Add warning ...
842
843
  	RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_user_enter() invoked with irqs enabled!!!");
  	rcu_eqs_enter(true);
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
844
  }
d1ec4c34c   Paul E. McKenney   rcu: Drop RCU_USE...
845
  #endif /* CONFIG_NO_HZ_FULL */
19dd1591f   Frederic Weisbecker   rcu: New rcu_user...
846
847
  
  /**
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
848
849
850
851
   * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
   *
   * Exit from an interrupt handler, which might possibly result in entering
   * idle mode, in other words, leaving the mode in which read-side critical
7c9906ca5   Paul E. McKenney   rcu: Don't redund...
852
   * sections can occur.  The caller must have disabled interrupts.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
853
   *
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
854
855
856
857
858
859
860
861
   * This code assumes that the idle loop never does anything that might
   * result in unbalanced calls to irq_enter() and irq_exit().  If your
   * architecture violates this assumption, RCU will give you what you
   * deserve, good and hard.  But very infrequently and irreproducibly.
   *
   * Use things like work queues to work around this limitation.
   *
   * You have been warned.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
862
   */
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
863
  void rcu_irq_exit(void)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
864
  {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
865
  	struct rcu_dynticks *rdtp;
7c9906ca5   Paul E. McKenney   rcu: Don't redund...
866
  	RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
c9d4b0af9   Christoph Lameter   rcu: Replace __ge...
867
  	rdtp = this_cpu_ptr(&rcu_dynticks);
28585a832   Paul E. McKenney   rcu: Allow for pa...
868
869
  
  	/* Page faults can happen in NMI handlers, so check... */
f39b536ce   Paul E. McKenney   rcu: Remove extra...
870
  	if (rdtp->dynticks_nmi_nesting)
28585a832   Paul E. McKenney   rcu: Allow for pa...
871
  		return;
1ce46ee59   Paul E. McKenney   rcu: Conditionall...
872
  	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
a278d4718   Paul E. McKenney   rcu: Fix dyntick-...
873
874
875
876
877
878
879
  		     rdtp->dynticks_nesting < 1);
  	if (rdtp->dynticks_nesting <= 1) {
  		rcu_eqs_enter_common(true);
  	} else {
  		trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nesting, rdtp->dynticks_nesting - 1);
  		rdtp->dynticks_nesting--;
  	}
7c9906ca5   Paul E. McKenney   rcu: Don't redund...
880
881
882
883
884
885
886
887
888
889
890
  }
  
  /*
   * Wrapper for rcu_irq_exit() where interrupts are enabled.
   */
  void rcu_irq_exit_irqson(void)
  {
  	unsigned long flags;
  
  	local_irq_save(flags);
  	rcu_irq_exit();
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
891
892
893
894
  	local_irq_restore(flags);
  }
  
  /*
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
895
   * rcu_eqs_exit_common - current CPU moving away from extended quiescent state
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
896
897
898
899
900
   *
   * If the new value of the ->dynticks_nesting counter was previously zero,
   * we really have exited idle, and must do the appropriate accounting.
   * The caller must have disabled interrupts.
   */
28ced795c   Christoph Lameter   rcu: Remove rcu_d...
901
  static void rcu_eqs_exit_common(long long oldval, int user)
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
902
  {
2625d469b   Paul E. McKenney   rcu: Abstract dyn...
903
  	RCU_TRACE(struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);)
28ced795c   Christoph Lameter   rcu: Remove rcu_d...
904

176f8f7a5   Paul E. McKenney   rcu: Make TASKS_R...
905
  	rcu_dynticks_task_exit();
2625d469b   Paul E. McKenney   rcu: Abstract dyn...
906
  	rcu_dynticks_eqs_exit();
8fa7845df   Paul E. McKenney   rcu: Remove "cpu"...
907
  	rcu_cleanup_after_idle();
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
908
  	trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
1ce46ee59   Paul E. McKenney   rcu: Conditionall...
909
910
  	if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
  	    !user && !is_idle_task(current)) {
289828e62   Paul E. McKenney   rcu: Silence unus...
911
912
  		struct task_struct *idle __maybe_unused =
  			idle_task(smp_processor_id());
0989cb467   Paul E. McKenney   rcu: Add more inf...
913

f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
914
  		trace_rcu_dyntick(TPS("Error on exit: not idle task"),
4145fa7fb   Paul E. McKenney   rcu: Deconfuse dy...
915
  				  oldval, rdtp->dynticks_nesting);
274529ba9   Paul E. McKenney   rcu: Consolidate ...
916
  		rcu_ftrace_dump(DUMP_ORIG);
0989cb467   Paul E. McKenney   rcu: Add more inf...
917
918
919
  		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
  			  current->pid, current->comm,
  			  idle->pid, idle->comm); /* must be idle task! */
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
920
921
  	}
  }
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
922
923
924
  /*
   * Exit an RCU extended quiescent state, which can be either the
   * idle loop or adaptive-tickless usermode execution.
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
925
   */
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
926
  static void rcu_eqs_exit(bool user)
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
927
  {
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
928
929
  	struct rcu_dynticks *rdtp;
  	long long oldval;
c0b334c5b   Paul E. McKenney   rcu: Add lockdep_...
930
  	RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_eqs_exit() invoked with irqs enabled!!!");
c9d4b0af9   Christoph Lameter   rcu: Replace __ge...
931
  	rdtp = this_cpu_ptr(&rcu_dynticks);
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
932
  	oldval = rdtp->dynticks_nesting;
1ce46ee59   Paul E. McKenney   rcu: Conditionall...
933
  	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
3a5924052   Paul E. McKenney   rcu: Allow task-l...
934
  	if (oldval & DYNTICK_TASK_NEST_MASK) {
29e37d814   Paul E. McKenney   rcu: Allow nestin...
935
  		rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
3a5924052   Paul E. McKenney   rcu: Allow task-l...
936
  	} else {
35fe723bd   Masami Hiramatsu   rcu/tracing: Set ...
937
  		__this_cpu_inc(disable_rcu_irq_enter);
29e37d814   Paul E. McKenney   rcu: Allow nestin...
938
  		rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
28ced795c   Christoph Lameter   rcu: Remove rcu_d...
939
  		rcu_eqs_exit_common(oldval, user);
35fe723bd   Masami Hiramatsu   rcu/tracing: Set ...
940
  		__this_cpu_dec(disable_rcu_irq_enter);
3a5924052   Paul E. McKenney   rcu: Allow task-l...
941
  	}
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
942
  }
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
943
944
945
946
947
948
949
950
951
952
953
954
955
956
  
  /**
   * rcu_idle_exit - inform RCU that current CPU is leaving idle
   *
   * Exit idle mode, in other words, -enter- the mode in which RCU
   * read-side critical sections can occur.
   *
   * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
   * allow for the possibility of usermode upcalls messing up our count
   * of interrupt nesting level during the busy period that is just
   * now starting.
   */
  void rcu_idle_exit(void)
  {
c5d900bf6   Frederic Weisbecker   rcu: Allow rcu_us...
957
958
959
  	unsigned long flags;
  
  	local_irq_save(flags);
cb349ca95   Paul E. McKenney   rcu: Apply micro-...
960
  	rcu_eqs_exit(false);
c5d900bf6   Frederic Weisbecker   rcu: Allow rcu_us...
961
  	local_irq_restore(flags);
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
962
  }
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
963

d1ec4c34c   Paul E. McKenney   rcu: Drop RCU_USE...
964
  #ifdef CONFIG_NO_HZ_FULL
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
965
  /**
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
966
967
968
969
970
971
972
   * rcu_user_exit - inform RCU that we are exiting userspace.
   *
   * Exit RCU idle mode while entering the kernel because it can
   * run a RCU read side critical section anytime.
   */
  void rcu_user_exit(void)
  {
91d1aa43d   Frederic Weisbecker   context_tracking:...
973
  	rcu_eqs_exit(1);
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
974
  }
d1ec4c34c   Paul E. McKenney   rcu: Drop RCU_USE...
975
  #endif /* CONFIG_NO_HZ_FULL */
19dd1591f   Frederic Weisbecker   rcu: New rcu_user...
976
977
  
  /**
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
978
979
980
981
   * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
   *
   * Enter an interrupt handler, which might possibly result in exiting
   * idle mode, in other words, entering the mode in which read-side critical
7c9906ca5   Paul E. McKenney   rcu: Don't redund...
982
   * sections can occur.  The caller must have disabled interrupts.
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
   *
   * Note that the Linux kernel is fully capable of entering an interrupt
   * handler that it never exits, for example when doing upcalls to
   * user mode!  This code assumes that the idle loop never does upcalls to
   * user mode.  If your architecture does do upcalls from the idle loop (or
   * does anything else that results in unbalanced calls to the irq_enter()
   * and irq_exit() functions), RCU will give you what you deserve, good
   * and hard.  But very infrequently and irreproducibly.
   *
   * Use things like work queues to work around this limitation.
   *
   * You have been warned.
   */
  void rcu_irq_enter(void)
  {
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
998
999
  	struct rcu_dynticks *rdtp;
  	long long oldval;
7c9906ca5   Paul E. McKenney   rcu: Don't redund...
1000
  	RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!");
c9d4b0af9   Christoph Lameter   rcu: Replace __ge...
1001
  	rdtp = this_cpu_ptr(&rcu_dynticks);
28585a832   Paul E. McKenney   rcu: Allow for pa...
1002
1003
  
  	/* Page faults can happen in NMI handlers, so check... */
f39b536ce   Paul E. McKenney   rcu: Remove extra...
1004
  	if (rdtp->dynticks_nmi_nesting)
28585a832   Paul E. McKenney   rcu: Allow for pa...
1005
  		return;
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
1006
1007
  	oldval = rdtp->dynticks_nesting;
  	rdtp->dynticks_nesting++;
1ce46ee59   Paul E. McKenney   rcu: Conditionall...
1008
1009
  	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
  		     rdtp->dynticks_nesting == 0);
b6fc60201   Frederic Weisbecker   rcu: Don't check ...
1010
  	if (oldval)
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1011
  		trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
b6fc60201   Frederic Weisbecker   rcu: Don't check ...
1012
  	else
28ced795c   Christoph Lameter   rcu: Remove rcu_d...
1013
  		rcu_eqs_exit_common(oldval, true);
7c9906ca5   Paul E. McKenney   rcu: Don't redund...
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
  }
  
  /*
   * Wrapper for rcu_irq_enter() where interrupts are enabled.
   */
  void rcu_irq_enter_irqson(void)
  {
  	unsigned long flags;
  
  	local_irq_save(flags);
  	rcu_irq_enter();
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1025
  	local_irq_restore(flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1026
1027
1028
1029
1030
  }
  
  /**
   * rcu_nmi_enter - inform RCU of entry to NMI context
   *
734d16801   Paul E. McKenney   rcu: Make rcu_nmi...
1031
1032
1033
1034
1035
   * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and
   * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know
   * that the CPU is active.  This implementation permits nested NMIs, as
   * long as the nesting level does not overflow an int.  (You will probably
   * run out of stack space first.)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1036
1037
1038
   */
  void rcu_nmi_enter(void)
  {
c9d4b0af9   Christoph Lameter   rcu: Replace __ge...
1039
  	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
734d16801   Paul E. McKenney   rcu: Make rcu_nmi...
1040
  	int incby = 2;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1041

734d16801   Paul E. McKenney   rcu: Make rcu_nmi...
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
  	/* Complain about underflow. */
  	WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
  
  	/*
  	 * If idle from RCU viewpoint, atomically increment ->dynticks
  	 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
  	 * Otherwise, increment ->dynticks_nmi_nesting by two.  This means
  	 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
  	 * to be in the outermost NMI handler that interrupted an RCU-idle
  	 * period (observation due to Andy Lutomirski).
  	 */
02a5c550b   Paul E. McKenney   rcu: Abstract ext...
1053
  	if (rcu_dynticks_curr_cpu_in_eqs()) {
2625d469b   Paul E. McKenney   rcu: Abstract dyn...
1054
  		rcu_dynticks_eqs_exit();
734d16801   Paul E. McKenney   rcu: Make rcu_nmi...
1055
1056
1057
1058
  		incby = 1;
  	}
  	rdtp->dynticks_nmi_nesting += incby;
  	barrier();
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1059
1060
1061
1062
1063
  }
  
  /**
   * rcu_nmi_exit - inform RCU of exit from NMI context
   *
734d16801   Paul E. McKenney   rcu: Make rcu_nmi...
1064
1065
1066
1067
   * If we are returning from the outermost NMI handler that interrupted an
   * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
   * to let the RCU grace-period handling know that the CPU is back to
   * being RCU-idle.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1068
1069
1070
   */
  void rcu_nmi_exit(void)
  {
c9d4b0af9   Christoph Lameter   rcu: Replace __ge...
1071
  	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1072

734d16801   Paul E. McKenney   rcu: Make rcu_nmi...
1073
1074
1075
1076
1077
1078
  	/*
  	 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
  	 * (We are exiting an NMI handler, so RCU better be paying attention
  	 * to us!)
  	 */
  	WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
02a5c550b   Paul E. McKenney   rcu: Abstract ext...
1079
  	WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
734d16801   Paul E. McKenney   rcu: Make rcu_nmi...
1080
1081
1082
1083
1084
1085
1086
  
  	/*
  	 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
  	 * leave it in non-RCU-idle state.
  	 */
  	if (rdtp->dynticks_nmi_nesting != 1) {
  		rdtp->dynticks_nmi_nesting -= 2;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1087
  		return;
734d16801   Paul E. McKenney   rcu: Make rcu_nmi...
1088
1089
1090
1091
  	}
  
  	/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
  	rdtp->dynticks_nmi_nesting = 0;
2625d469b   Paul E. McKenney   rcu: Abstract dyn...
1092
  	rcu_dynticks_eqs_enter();
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1093
1094
1095
  }
  
  /**
5c173eb8b   Paul E. McKenney   rcu: Consistent r...
1096
   * rcu_is_watching - see if RCU thinks that the current CPU is idle
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1097
   *
791875d16   Paul E. McKenney   rcu: Eliminate th...
1098
1099
1100
   * Return true if RCU is watching the running CPU, which means that this
   * CPU can safely enter RCU read-side critical sections.  In other words,
   * if the current CPU is in its idle loop and is neither in an interrupt
34240697d   Paul E. McKenney   rcu: Disable pree...
1101
   * or NMI handler, return true.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1102
   */
9418fb208   Steven Rostedt   rcu: Do not trace...
1103
  bool notrace rcu_is_watching(void)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1104
  {
f534ed1fd   Pranith Kumar   rcu: Use bool typ...
1105
  	bool ret;
34240697d   Paul E. McKenney   rcu: Disable pree...
1106

46f00d18f   Alexei Starovoitov   rcu: Make rcu_is_...
1107
  	preempt_disable_notrace();
791875d16   Paul E. McKenney   rcu: Eliminate th...
1108
  	ret = !rcu_dynticks_curr_cpu_in_eqs();
46f00d18f   Alexei Starovoitov   rcu: Make rcu_is_...
1109
  	preempt_enable_notrace();
34240697d   Paul E. McKenney   rcu: Disable pree...
1110
  	return ret;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1111
  }
5c173eb8b   Paul E. McKenney   rcu: Consistent r...
1112
  EXPORT_SYMBOL_GPL(rcu_is_watching);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1113

bcbfdd01d   Paul E. McKenney   rcu: Make non-pre...
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
  /*
   * If a holdout task is actually running, request an urgent quiescent
   * state from its CPU.  This is unsynchronized, so migrations can cause
   * the request to go to the wrong CPU.  Which is OK, all that will happen
   * is that the CPU's next context switch will be a bit slower and next
   * time around this task will generate another request.
   */
  void rcu_request_urgent_qs_task(struct task_struct *t)
  {
  	int cpu;
  
  	barrier();
  	cpu = task_cpu(t);
  	if (!task_curr(t))
  		return; /* This task is not running on that CPU. */
  	smp_store_release(per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, cpu), true);
  }
62fde6edf   Paul E. McKenney   rcu: Make __call_...
1131
  #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
c0d6d01bf   Paul E. McKenney   rcu: Check for il...
1132
1133
1134
1135
1136
1137
1138
  
  /*
   * Is the current CPU online?  Disable preemption to avoid false positives
   * that could otherwise happen due to the current CPU number being sampled,
   * this task being preempted, its old CPU being taken offline, resuming
   * on some other CPU, then determining that its old CPU is now offline.
   * It is OK to use RCU on an offline processor during initial boot, hence
2036d94a7   Paul E. McKenney   rcu: Rework detec...
1139
1140
1141
1142
1143
1144
   * the check for rcu_scheduler_fully_active.  Note also that it is OK
   * for a CPU coming online to use RCU for one jiffy prior to marking itself
   * online in the cpu_online_mask.  Similarly, it is OK for a CPU going
   * offline to continue to use RCU for one jiffy after marking itself
   * offline in the cpu_online_mask.  This leniency is necessary given the
   * non-atomic nature of the online and offline processing, for example,
4df837425   Thomas Gleixner   rcu: Convert rcut...
1145
1146
   * the fact that a CPU enters the scheduler after completing the teardown
   * of the CPU.
2036d94a7   Paul E. McKenney   rcu: Rework detec...
1147
   *
4df837425   Thomas Gleixner   rcu: Convert rcut...
1148
1149
   * This is also why RCU internally marks CPUs online during in the
   * preparation phase and offline after the CPU has been taken down.
c0d6d01bf   Paul E. McKenney   rcu: Check for il...
1150
1151
1152
1153
1154
1155
   *
   * Disable checking if in an NMI handler because we cannot safely report
   * errors from NMI handlers anyway.
   */
  bool rcu_lockdep_current_cpu_online(void)
  {
2036d94a7   Paul E. McKenney   rcu: Rework detec...
1156
1157
  	struct rcu_data *rdp;
  	struct rcu_node *rnp;
c0d6d01bf   Paul E. McKenney   rcu: Check for il...
1158
1159
1160
  	bool ret;
  
  	if (in_nmi())
f6f7ee9af   Fengguang Wu   rcu: Fix coccinel...
1161
  		return true;
c0d6d01bf   Paul E. McKenney   rcu: Check for il...
1162
  	preempt_disable();
c9d4b0af9   Christoph Lameter   rcu: Replace __ge...
1163
  	rdp = this_cpu_ptr(&rcu_sched_data);
2036d94a7   Paul E. McKenney   rcu: Rework detec...
1164
  	rnp = rdp->mynode;
0aa04b055   Paul E. McKenney   rcu: Process offl...
1165
  	ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
c0d6d01bf   Paul E. McKenney   rcu: Check for il...
1166
1167
1168
1169
1170
  	      !rcu_scheduler_fully_active;
  	preempt_enable();
  	return ret;
  }
  EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
62fde6edf   Paul E. McKenney   rcu: Make __call_...
1171
  #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
1172

64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1173
  /**
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
1174
   * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1175
   *
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
1176
1177
1178
   * If the current CPU is idle or running at a first-level (not nested)
   * interrupt from idle, return true.  The caller must have at least
   * disabled preemption.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1179
   */
62e3cb143   Josh Triplett   rcu: Make rcu_is_...
1180
  static int rcu_is_cpu_rrupt_from_idle(void)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1181
  {
c9d4b0af9   Christoph Lameter   rcu: Replace __ge...
1182
  	return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1183
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1184
  /*
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1185
1186
   * Snapshot the specified CPU's dynticks counter so that we can later
   * credit them with an implicit quiescent state.  Return 1 if this CPU
1eba8f843   Paul E. McKenney   rcu: Clean up cod...
1187
   * is in dynticks idle mode, which is an extended quiescent state.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1188
   */
fe5ac724d   Paul E. McKenney   rcu: Remove nohz_...
1189
  static int dyntick_save_progress_counter(struct rcu_data *rdp)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1190
  {
8b2f63ab0   Paul E. McKenney   rcu: Abstract the...
1191
  	rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
02a5c550b   Paul E. McKenney   rcu: Abstract ext...
1192
  	if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
7941dbdeb   Andreea-Cristina Bernat   rcu: Add event tr...
1193
  		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1194
  		if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
e3663b102   Paul E. McKenney   rcu: Handle gpnum...
1195
  				 rdp->mynode->gpnum))
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1196
  			WRITE_ONCE(rdp->gpwrap, true);
23a9bacd3   Paul E. McKenney   rcu: Set rdp->gpw...
1197
  		return 1;
7941dbdeb   Andreea-Cristina Bernat   rcu: Add event tr...
1198
  	}
23a9bacd3   Paul E. McKenney   rcu: Set rdp->gpw...
1199
  	return 0;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1200
1201
1202
1203
1204
1205
  }
  
  /*
   * Return true if the specified CPU has passed through a quiescent
   * state by virtue of being in or having passed through an dynticks
   * idle state since the last call to dyntick_save_progress_counter()
a82dcc760   Paul E. McKenney   rcu: Make offline...
1206
   * for this same CPU, or by virtue of having been offline.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1207
   */
fe5ac724d   Paul E. McKenney   rcu: Remove nohz_...
1208
  static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1209
  {
3a19b46a5   Paul E. McKenney   rcu: Check cond_r...
1210
  	unsigned long jtsq;
0f9be8cab   Paul E. McKenney   rcu: Eliminate fl...
1211
  	bool *rnhqp;
9226b10d7   Paul E. McKenney   rcu: Place guard ...
1212
  	bool *ruqp;
3a19b46a5   Paul E. McKenney   rcu: Check cond_r...
1213
1214
  	unsigned long rjtsc;
  	struct rcu_node *rnp;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1215
1216
1217
1218
1219
1220
1221
1222
1223
  
  	/*
  	 * If the CPU passed through or entered a dynticks idle phase with
  	 * no active irq/NMI handlers, then we can safely pretend that the CPU
  	 * already acknowledged the request to pass through a quiescent
  	 * state.  Either way, that CPU cannot possibly be in an RCU
  	 * read-side critical section that started before the beginning
  	 * of the current RCU grace period.
  	 */
02a5c550b   Paul E. McKenney   rcu: Abstract ext...
1224
  	if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1225
  		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1226
1227
1228
  		rdp->dynticks_fqs++;
  		return 1;
  	}
3a19b46a5   Paul E. McKenney   rcu: Check cond_r...
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
  	/* Compute and saturate jiffies_till_sched_qs. */
  	jtsq = jiffies_till_sched_qs;
  	rjtsc = rcu_jiffies_till_stall_check();
  	if (jtsq > rjtsc / 2) {
  		WRITE_ONCE(jiffies_till_sched_qs, rjtsc);
  		jtsq = rjtsc / 2;
  	} else if (jtsq < 1) {
  		WRITE_ONCE(jiffies_till_sched_qs, 1);
  		jtsq = 1;
  	}
a82dcc760   Paul E. McKenney   rcu: Make offline...
1239
  	/*
3a19b46a5   Paul E. McKenney   rcu: Check cond_r...
1240
1241
1242
1243
  	 * Has this CPU encountered a cond_resched_rcu_qs() since the
  	 * beginning of the grace period?  For this to be the case,
  	 * the CPU has to have noticed the current grace period.  This
  	 * might not be the case for nohz_full CPUs looping in the kernel.
a82dcc760   Paul E. McKenney   rcu: Make offline...
1244
  	 */
3a19b46a5   Paul E. McKenney   rcu: Check cond_r...
1245
  	rnp = rdp->mynode;
9226b10d7   Paul E. McKenney   rcu: Place guard ...
1246
  	ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
3a19b46a5   Paul E. McKenney   rcu: Check cond_r...
1247
  	if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
9577df9a3   Paul E. McKenney   rcu: Pull rcu_qs_...
1248
  	    READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
3a19b46a5   Paul E. McKenney   rcu: Check cond_r...
1249
1250
1251
  	    READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) {
  		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc"));
  		return 1;
9226b10d7   Paul E. McKenney   rcu: Place guard ...
1252
1253
1254
  	} else {
  		/* Load rcu_qs_ctr before store to rcu_urgent_qs. */
  		smp_store_release(ruqp, true);
3a19b46a5   Paul E. McKenney   rcu: Check cond_r...
1255
  	}
38d30b336   Paul E. McKenney   rcu: Adjust FQS o...
1256
1257
  	/* Check for the CPU being offline. */
  	if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp))) {
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1258
  		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
a82dcc760   Paul E. McKenney   rcu: Make offline...
1259
1260
1261
  		rdp->offline_fqs++;
  		return 1;
  	}
65d798f0f   Paul E. McKenney   rcu: Kick adaptiv...
1262
1263
  
  	/*
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
1264
1265
1266
1267
1268
1269
  	 * A CPU running for an extended time within the kernel can
  	 * delay RCU grace periods.  When the CPU is in NO_HZ_FULL mode,
  	 * even context-switching back and forth between a pair of
  	 * in-kernel CPU-bound tasks cannot advance grace periods.
  	 * So if the grace period is old enough, make the CPU pay attention.
  	 * Note that the unsynchronized assignments to the per-CPU
0f9be8cab   Paul E. McKenney   rcu: Eliminate fl...
1270
  	 * rcu_need_heavy_qs variable are safe.  Yes, setting of
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
  	 * bits can be lost, but they will be set again on the next
  	 * force-quiescent-state pass.  So lost bit sets do not result
  	 * in incorrect behavior, merely in a grace period lasting
  	 * a few jiffies longer than it might otherwise.  Because
  	 * there are at most four threads involved, and because the
  	 * updates are only once every few jiffies, the probability of
  	 * lossage (and thus of slight grace-period extension) is
  	 * quite low.
  	 *
  	 * Note that if the jiffies_till_sched_qs boot/sysfs parameter
  	 * is set too high, we override with half of the RCU CPU stall
  	 * warning delay.
6193c76ab   Paul E. McKenney   rcu: Kick CPU hal...
1283
  	 */
0f9be8cab   Paul E. McKenney   rcu: Eliminate fl...
1284
1285
1286
1287
1288
  	rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu);
  	if (!READ_ONCE(*rnhqp) &&
  	    (time_after(jiffies, rdp->rsp->gp_start + jtsq) ||
  	     time_after(jiffies, rdp->rsp->jiffies_resched))) {
  		WRITE_ONCE(*rnhqp, true);
9226b10d7   Paul E. McKenney   rcu: Place guard ...
1289
1290
  		/* Store rcu_need_heavy_qs before rcu_urgent_qs. */
  		smp_store_release(ruqp, true);
4914950aa   Paul E. McKenney   rcu: Stop treatin...
1291
  		rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
6193c76ab   Paul E. McKenney   rcu: Kick CPU hal...
1292
  	}
28053bc72   Paul E. McKenney   rcu: Add long-ter...
1293
1294
1295
1296
1297
1298
  	/*
  	 * If more than halfway to RCU CPU stall-warning time, do
  	 * a resched_cpu() to try to loosen things up a bit.
  	 */
  	if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2)
  		resched_cpu(rdp->cpu);
4914950aa   Paul E. McKenney   rcu: Stop treatin...
1299

a82dcc760   Paul E. McKenney   rcu: Make offline...
1300
  	return 0;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1301
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1302
1303
  static void record_gp_stall_check_time(struct rcu_state *rsp)
  {
cb1e78cfa   Paul E. McKenney   rcu: Remove ACCES...
1304
  	unsigned long j = jiffies;
6193c76ab   Paul E. McKenney   rcu: Kick CPU hal...
1305
  	unsigned long j1;
26cdfedf6   Paul E. McKenney   rcu: Reject memor...
1306
1307
1308
  
  	rsp->gp_start = j;
  	smp_wmb(); /* Record start time before stall time. */
6193c76ab   Paul E. McKenney   rcu: Kick CPU hal...
1309
  	j1 = rcu_jiffies_till_stall_check();
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1310
  	WRITE_ONCE(rsp->jiffies_stall, j + j1);
6193c76ab   Paul E. McKenney   rcu: Kick CPU hal...
1311
  	rsp->jiffies_resched = j + j1 / 2;
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1312
  	rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1313
  }
b637a328b   Paul E. McKenney   rcu: Print remote...
1314
  /*
6b50e119c   Paul E. McKenney   rcutorture: Print...
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
   * Convert a ->gp_state value to a character string.
   */
  static const char *gp_state_getname(short gs)
  {
  	if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
  		return "???";
  	return gp_state_names[gs];
  }
  
  /*
fb81a44b8   Paul E. McKenney   rcu: Add GP-kthre...
1325
1326
1327
1328
1329
1330
1331
1332
   * Complain about starvation of grace-period kthread.
   */
  static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
  {
  	unsigned long gpa;
  	unsigned long j;
  
  	j = jiffies;
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1333
  	gpa = READ_ONCE(rsp->gp_activity);
b1adb3e27   Paul E. McKenney   rcutorture: Dump...
1334
  	if (j - gpa > 2 * HZ) {
96036c430   Paul E. McKenney   rcu: Add last-CPU...
1335
1336
  		pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x %s(%d) ->state=%#lx ->cpu=%d
  ",
81e701e43   Paul E. McKenney   rcu: Add more deb...
1337
  		       rsp->name, j - gpa,
319362c90   Paul E. McKenney   rcu: Provide more...
1338
  		       rsp->gpnum, rsp->completed,
6b50e119c   Paul E. McKenney   rcutorture: Print...
1339
1340
  		       rsp->gp_flags,
  		       gp_state_getname(rsp->gp_state), rsp->gp_state,
96036c430   Paul E. McKenney   rcu: Add last-CPU...
1341
1342
  		       rsp->gp_kthread ? rsp->gp_kthread->state : ~0,
  		       rsp->gp_kthread ? task_cpu(rsp->gp_kthread) : -1);
86057b80a   Paul E. McKenney   rcu: Awaken grace...
1343
  		if (rsp->gp_kthread) {
b1adb3e27   Paul E. McKenney   rcutorture: Dump...
1344
  			sched_show_task(rsp->gp_kthread);
86057b80a   Paul E. McKenney   rcu: Awaken grace...
1345
1346
  			wake_up_process(rsp->gp_kthread);
  		}
b1adb3e27   Paul E. McKenney   rcutorture: Dump...
1347
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1348
  }
b637a328b   Paul E. McKenney   rcu: Print remote...
1349
  /*
7aa92230c   Paul E. McKenney   rcu: Once again u...
1350
1351
1352
1353
   * Dump stacks of all tasks running on stalled CPUs.  First try using
   * NMIs, but fall back to manual remote stack tracing on architectures
   * that don't support NMI-based stack dumps.  The NMI-triggered stack
   * traces are more accurate because they are printed by the target CPU.
b637a328b   Paul E. McKenney   rcu: Print remote...
1354
1355
1356
1357
1358
1359
1360
1361
   */
  static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
  {
  	int cpu;
  	unsigned long flags;
  	struct rcu_node *rnp;
  
  	rcu_for_each_leaf_node(rsp, rnp) {
6cf100812   Paul E. McKenney   rcu: Add transiti...
1362
  		raw_spin_lock_irqsave_rcu_node(rnp, flags);
7aa92230c   Paul E. McKenney   rcu: Once again u...
1363
1364
1365
  		for_each_leaf_node_possible_cpu(rnp, cpu)
  			if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
  				if (!trigger_single_cpu_backtrace(cpu))
bc75e9998   Mark Rutland   rcu: Correctly ha...
1366
  					dump_cpu_task(cpu);
67c583a7d   Boqun Feng   RCU: Privatize rc...
1367
  		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
b637a328b   Paul E. McKenney   rcu: Print remote...
1368
1369
  	}
  }
8c7c4829a   Paul E. McKenney   rcu: Awaken grace...
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
  /*
   * If too much time has passed in the current grace period, and if
   * so configured, go kick the relevant kthreads.
   */
  static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
  {
  	unsigned long j;
  
  	if (!rcu_kick_kthreads)
  		return;
  	j = READ_ONCE(rsp->jiffies_kick_kthreads);
aa3e0bf1a   Paul E. McKenney   rcu: Don't kick u...
1381
1382
  	if (time_after(jiffies, j) && rsp->gp_kthread &&
  	    (rcu_gp_in_progress(rsp) || READ_ONCE(rsp->gp_flags))) {
8c7c4829a   Paul E. McKenney   rcu: Awaken grace...
1383
1384
  		WARN_ONCE(1, "Kicking %s grace-period kthread
  ", rsp->name);
5dffed1e5   Paul E. McKenney   rcu: Dump ftrace ...
1385
  		rcu_ftrace_dump(DUMP_ALL);
8c7c4829a   Paul E. McKenney   rcu: Awaken grace...
1386
1387
1388
1389
  		wake_up_process(rsp->gp_kthread);
  		WRITE_ONCE(rsp->jiffies_kick_kthreads, j + HZ);
  	}
  }
088e9d253   Daniel Bristot de Oliveira   rcu: sysctl: Pani...
1390
1391
1392
1393
1394
1395
  static inline void panic_on_rcu_stall(void)
  {
  	if (sysctl_panic_on_rcu_stall)
  		panic("RCU Stall
  ");
  }
6ccd2ecd4   Paul E. McKenney   rcu: Improve diag...
1396
  static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1397
1398
1399
1400
  {
  	int cpu;
  	long delta;
  	unsigned long flags;
6ccd2ecd4   Paul E. McKenney   rcu: Improve diag...
1401
1402
  	unsigned long gpa;
  	unsigned long j;
285fe2948   Paul E. McKenney   rcu: Fix detectio...
1403
  	int ndetected = 0;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1404
  	struct rcu_node *rnp = rcu_get_root(rsp);
53bb857c3   Paul E. McKenney   rcu: Dump number ...
1405
  	long totqlen = 0;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1406

8c7c4829a   Paul E. McKenney   rcu: Awaken grace...
1407
1408
1409
1410
  	/* Kick and suppress, if so configured. */
  	rcu_stall_kick_kthreads(rsp);
  	if (rcu_cpu_stall_suppress)
  		return;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1411
  	/* Only let one CPU complain about others per time interval. */
6cf100812   Paul E. McKenney   rcu: Add transiti...
1412
  	raw_spin_lock_irqsave_rcu_node(rnp, flags);
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1413
  	delta = jiffies - READ_ONCE(rsp->jiffies_stall);
fc2219d49   Paul E. McKenney   rcu: Clean up cod...
1414
  	if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
67c583a7d   Boqun Feng   RCU: Privatize rc...
1415
  		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1416
1417
  		return;
  	}
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1418
1419
  	WRITE_ONCE(rsp->jiffies_stall,
  		   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
67c583a7d   Boqun Feng   RCU: Privatize rc...
1420
  	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1421

8cdd32a91   Paul E. McKenney   rcu: refer RCU CP...
1422
1423
1424
1425
1426
  	/*
  	 * OK, time to rat on our buddy...
  	 * See Documentation/RCU/stallwarn.txt for info on how to debug
  	 * RCU CPU stall warnings.
  	 */
d7f3e2073   Paul E. McKenney   rcu: Convert rcut...
1427
  	pr_err("INFO: %s detected stalls on CPUs/tasks:",
4300aa642   Paul E. McKenney   rcu: improve RCU ...
1428
  	       rsp->name);
a858af287   Paul E. McKenney   rcu: Print schedu...
1429
  	print_cpu_stall_info_begin();
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
1430
  	rcu_for_each_leaf_node(rsp, rnp) {
6cf100812   Paul E. McKenney   rcu: Add transiti...
1431
  		raw_spin_lock_irqsave_rcu_node(rnp, flags);
9bc8b5586   Paul E. McKenney   rcu: Suppress NMI...
1432
  		ndetected += rcu_print_task_stall(rnp);
c8020a67e   Paul E. McKenney   rcu: Protect rcu_...
1433
  		if (rnp->qsmask != 0) {
bc75e9998   Mark Rutland   rcu: Correctly ha...
1434
1435
1436
  			for_each_leaf_node_possible_cpu(rnp, cpu)
  				if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
  					print_cpu_stall_info(rsp, cpu);
c8020a67e   Paul E. McKenney   rcu: Protect rcu_...
1437
1438
1439
  					ndetected++;
  				}
  		}
67c583a7d   Boqun Feng   RCU: Privatize rc...
1440
  		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1441
  	}
a858af287   Paul E. McKenney   rcu: Print schedu...
1442

a858af287   Paul E. McKenney   rcu: Print schedu...
1443
  	print_cpu_stall_info_end();
53bb857c3   Paul E. McKenney   rcu: Dump number ...
1444
  	for_each_possible_cpu(cpu)
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
1445
1446
  		totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
  							    cpu)->cblist);
83ebe63ea   Paul E. McKenney   rcu: Print negati...
1447
1448
  	pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)
  ",
eee058826   Paul E. McKenney   rcu: Add grace-pe...
1449
  	       smp_processor_id(), (long)(jiffies - rsp->gp_start),
83ebe63ea   Paul E. McKenney   rcu: Print negati...
1450
  	       (long)rsp->gpnum, (long)rsp->completed, totqlen);
6ccd2ecd4   Paul E. McKenney   rcu: Improve diag...
1451
  	if (ndetected) {
b637a328b   Paul E. McKenney   rcu: Print remote...
1452
  		rcu_dump_cpu_stacks(rsp);
c4402b27f   Byungchul Park   rcu: Only dump st...
1453
1454
1455
  
  		/* Complain about tasks blocking the grace period. */
  		rcu_print_detail_task_stall(rsp);
6ccd2ecd4   Paul E. McKenney   rcu: Improve diag...
1456
  	} else {
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1457
1458
  		if (READ_ONCE(rsp->gpnum) != gpnum ||
  		    READ_ONCE(rsp->completed) == gpnum) {
6ccd2ecd4   Paul E. McKenney   rcu: Improve diag...
1459
1460
1461
1462
  			pr_err("INFO: Stall ended before state dump start
  ");
  		} else {
  			j = jiffies;
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1463
  			gpa = READ_ONCE(rsp->gp_activity);
237a0f219   Paul E. McKenney   rcu: Detect stall...
1464
1465
  			pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx
  ",
6ccd2ecd4   Paul E. McKenney   rcu: Improve diag...
1466
  			       rsp->name, j - gpa, j, gpa,
237a0f219   Paul E. McKenney   rcu: Detect stall...
1467
1468
  			       jiffies_till_next_fqs,
  			       rcu_get_root(rsp)->qsmask);
6ccd2ecd4   Paul E. McKenney   rcu: Improve diag...
1469
1470
1471
1472
  			/* In this case, the current CPU might be at fault. */
  			sched_show_task(current);
  		}
  	}
c1dc0b9c0   Ingo Molnar   debug lockups: Im...
1473

fb81a44b8   Paul E. McKenney   rcu: Add GP-kthre...
1474
  	rcu_check_gp_kthread_starvation(rsp);
088e9d253   Daniel Bristot de Oliveira   rcu: sysctl: Pani...
1475
  	panic_on_rcu_stall();
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
1476
  	force_quiescent_state(rsp);  /* Kick them all. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1477
1478
1479
1480
  }
  
  static void print_cpu_stall(struct rcu_state *rsp)
  {
53bb857c3   Paul E. McKenney   rcu: Dump number ...
1481
  	int cpu;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1482
1483
  	unsigned long flags;
  	struct rcu_node *rnp = rcu_get_root(rsp);
53bb857c3   Paul E. McKenney   rcu: Dump number ...
1484
  	long totqlen = 0;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1485

8c7c4829a   Paul E. McKenney   rcu: Awaken grace...
1486
1487
1488
1489
  	/* Kick and suppress, if so configured. */
  	rcu_stall_kick_kthreads(rsp);
  	if (rcu_cpu_stall_suppress)
  		return;
8cdd32a91   Paul E. McKenney   rcu: refer RCU CP...
1490
1491
1492
1493
1494
  	/*
  	 * OK, time to rat on ourselves...
  	 * See Documentation/RCU/stallwarn.txt for info on how to debug
  	 * RCU CPU stall warnings.
  	 */
d7f3e2073   Paul E. McKenney   rcu: Convert rcut...
1495
  	pr_err("INFO: %s self-detected stall on CPU", rsp->name);
a858af287   Paul E. McKenney   rcu: Print schedu...
1496
1497
1498
  	print_cpu_stall_info_begin();
  	print_cpu_stall_info(rsp, smp_processor_id());
  	print_cpu_stall_info_end();
53bb857c3   Paul E. McKenney   rcu: Dump number ...
1499
  	for_each_possible_cpu(cpu)
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
1500
1501
  		totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
  							    cpu)->cblist);
83ebe63ea   Paul E. McKenney   rcu: Print negati...
1502
1503
1504
1505
  	pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)
  ",
  		jiffies - rsp->gp_start,
  		(long)rsp->gpnum, (long)rsp->completed, totqlen);
fb81a44b8   Paul E. McKenney   rcu: Add GP-kthre...
1506
1507
  
  	rcu_check_gp_kthread_starvation(rsp);
bc1dce514   Paul E. McKenney   rcu: Don't use NM...
1508
  	rcu_dump_cpu_stacks(rsp);
c1dc0b9c0   Ingo Molnar   debug lockups: Im...
1509

6cf100812   Paul E. McKenney   rcu: Add transiti...
1510
  	raw_spin_lock_irqsave_rcu_node(rnp, flags);
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1511
1512
1513
  	if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
  		WRITE_ONCE(rsp->jiffies_stall,
  			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
67c583a7d   Boqun Feng   RCU: Privatize rc...
1514
  	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
c1dc0b9c0   Ingo Molnar   debug lockups: Im...
1515

088e9d253   Daniel Bristot de Oliveira   rcu: sysctl: Pani...
1516
  	panic_on_rcu_stall();
b021fe3e2   Peter Zijlstra   sched, rcu: Make ...
1517
1518
1519
1520
1521
1522
1523
1524
  	/*
  	 * Attempt to revive the RCU machinery by forcing a context switch.
  	 *
  	 * A context switch would normally allow the RCU state machine to make
  	 * progress and it could be we're stuck in kernel space without context
  	 * switches for an entirely unreasonable amount of time.
  	 */
  	resched_cpu(smp_processor_id());
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1525
1526
1527
1528
  }
  
  static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
  {
26cdfedf6   Paul E. McKenney   rcu: Reject memor...
1529
1530
1531
  	unsigned long completed;
  	unsigned long gpnum;
  	unsigned long gps;
bad6e1393   Paul E. McKenney   rcu: get rid of s...
1532
1533
  	unsigned long j;
  	unsigned long js;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1534
  	struct rcu_node *rnp;
8c7c4829a   Paul E. McKenney   rcu: Awaken grace...
1535
1536
  	if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
  	    !rcu_gp_in_progress(rsp))
c68de2097   Paul E. McKenney   rcu: disable CPU ...
1537
  		return;
8c7c4829a   Paul E. McKenney   rcu: Awaken grace...
1538
  	rcu_stall_kick_kthreads(rsp);
cb1e78cfa   Paul E. McKenney   rcu: Remove ACCES...
1539
  	j = jiffies;
26cdfedf6   Paul E. McKenney   rcu: Reject memor...
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
  
  	/*
  	 * Lots of memory barriers to reject false positives.
  	 *
  	 * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
  	 * then rsp->gp_start, and finally rsp->completed.  These values
  	 * are updated in the opposite order with memory barriers (or
  	 * equivalent) during grace-period initialization and cleanup.
  	 * Now, a false positive can occur if we get an new value of
  	 * rsp->gp_start and a old value of rsp->jiffies_stall.  But given
  	 * the memory barriers, the only way that this can happen is if one
  	 * grace period ends and another starts between these two fetches.
  	 * Detect this by comparing rsp->completed with the previous fetch
  	 * from rsp->gpnum.
  	 *
  	 * Given this check, comparisons of jiffies, rsp->jiffies_stall,
  	 * and rsp->gp_start suffice to forestall false positives.
  	 */
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1558
  	gpnum = READ_ONCE(rsp->gpnum);
26cdfedf6   Paul E. McKenney   rcu: Reject memor...
1559
  	smp_rmb(); /* Pick up ->gpnum first... */
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1560
  	js = READ_ONCE(rsp->jiffies_stall);
26cdfedf6   Paul E. McKenney   rcu: Reject memor...
1561
  	smp_rmb(); /* ...then ->jiffies_stall before the rest... */
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1562
  	gps = READ_ONCE(rsp->gp_start);
26cdfedf6   Paul E. McKenney   rcu: Reject memor...
1563
  	smp_rmb(); /* ...and finally ->gp_start before ->completed. */
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1564
  	completed = READ_ONCE(rsp->completed);
26cdfedf6   Paul E. McKenney   rcu: Reject memor...
1565
1566
1567
1568
  	if (ULONG_CMP_GE(completed, gpnum) ||
  	    ULONG_CMP_LT(j, js) ||
  	    ULONG_CMP_GE(gps, js))
  		return; /* No stall or GP completed since entering function. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1569
  	rnp = rdp->mynode;
c96ea7cfd   Paul E. McKenney   rcu: Avoid spurio...
1570
  	if (rcu_gp_in_progress(rsp) &&
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1571
  	    (READ_ONCE(rnp->qsmask) & rdp->grpmask)) {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1572
1573
1574
  
  		/* We haven't checked in, so go dump stack. */
  		print_cpu_stall(rsp);
bad6e1393   Paul E. McKenney   rcu: get rid of s...
1575
1576
  	} else if (rcu_gp_in_progress(rsp) &&
  		   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1577

bad6e1393   Paul E. McKenney   rcu: get rid of s...
1578
  		/* They had a few time units to dump stack, so complain. */
6ccd2ecd4   Paul E. McKenney   rcu: Improve diag...
1579
  		print_other_cpu_stall(rsp, gpnum);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1580
1581
  	}
  }
53d84e004   Paul E. McKenney   rcu: permit suppr...
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
  /**
   * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
   *
   * Set the stall-warning timeout way off into the future, thus preventing
   * any RCU CPU stall-warning messages from appearing in the current set of
   * RCU grace periods.
   *
   * The caller must disable hard irqs.
   */
  void rcu_cpu_stall_reset(void)
  {
6ce75a232   Paul E. McKenney   rcu: Introduce fo...
1593
1594
1595
  	struct rcu_state *rsp;
  
  	for_each_rcu_flavor(rsp)
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1596
  		WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
53d84e004   Paul E. McKenney   rcu: permit suppr...
1597
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1598
  /*
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
   * Determine the value that ->completed will have at the end of the
   * next subsequent grace period.  This is used to tag callbacks so that
   * a CPU can invoke callbacks in a timely fashion even if that CPU has
   * been dyntick-idle for an extended period with callbacks under the
   * influence of RCU_FAST_NO_HZ.
   *
   * The caller must hold rnp->lock with interrupts disabled.
   */
  static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
  				       struct rcu_node *rnp)
  {
c0b334c5b   Paul E. McKenney   rcu: Add lockdep_...
1610
  	lockdep_assert_held(&rnp->lock);
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
  	/*
  	 * If RCU is idle, we just wait for the next grace period.
  	 * But we can only be sure that RCU is idle if we are looking
  	 * at the root rcu_node structure -- otherwise, a new grace
  	 * period might have started, but just not yet gotten around
  	 * to initializing the current non-root rcu_node structure.
  	 */
  	if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
  		return rnp->completed + 1;
  
  	/*
  	 * Otherwise, wait for a possible partial grace period and
  	 * then the subsequent full grace period.
  	 */
  	return rnp->completed + 2;
  }
  
  /*
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1629
1630
1631
1632
   * Trace-event helper function for rcu_start_future_gp() and
   * rcu_nocb_wait_gp().
   */
  static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
e66c33d57   Steven Rostedt (Red Hat)   rcu: Add const an...
1633
  				unsigned long c, const char *s)
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1634
1635
1636
1637
1638
1639
1640
1641
1642
  {
  	trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
  				      rnp->completed, c, rnp->level,
  				      rnp->grplo, rnp->grphi, s);
  }
  
  /*
   * Start some future grace period, as needed to handle newly arrived
   * callbacks.  The required future grace periods are recorded in each
48a7639ce   Paul E. McKenney   rcu: Make callers...
1643
1644
   * rcu_node structure's ->need_future_gp field.  Returns true if there
   * is reason to awaken the grace-period kthread.
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1645
1646
1647
   *
   * The caller must hold the specified rcu_node structure's ->lock.
   */
48a7639ce   Paul E. McKenney   rcu: Make callers...
1648
1649
1650
  static bool __maybe_unused
  rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
  		    unsigned long *c_out)
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1651
1652
  {
  	unsigned long c;
48a7639ce   Paul E. McKenney   rcu: Make callers...
1653
  	bool ret = false;
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1654
  	struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
c0b334c5b   Paul E. McKenney   rcu: Add lockdep_...
1655
  	lockdep_assert_held(&rnp->lock);
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1656
1657
1658
1659
1660
  	/*
  	 * Pick up grace-period number for new callbacks.  If this
  	 * grace period is already marked as needed, return to the caller.
  	 */
  	c = rcu_cbs_completed(rdp->rsp, rnp);
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1661
  	trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1662
  	if (rnp->need_future_gp[c & 0x1]) {
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1663
  		trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
48a7639ce   Paul E. McKenney   rcu: Make callers...
1664
  		goto out;
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1665
1666
1667
1668
1669
1670
1671
  	}
  
  	/*
  	 * If either this rcu_node structure or the root rcu_node structure
  	 * believe that a grace period is in progress, then we must wait
  	 * for the one following, which is in "c".  Because our request
  	 * will be noticed at the end of the current grace period, we don't
48bd8e9b8   Pranith Kumar   rcu: Check both r...
1672
1673
1674
1675
1676
1677
1678
  	 * need to explicitly start one.  We only do the lockless check
  	 * of rnp_root's fields if the current rcu_node structure thinks
  	 * there is no grace period in flight, and because we hold rnp->lock,
  	 * the only possible change is when rnp_root's two fields are
  	 * equal, in which case rnp_root->gpnum might be concurrently
  	 * incremented.  But that is OK, as it will just result in our
  	 * doing some extra useless work.
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1679
1680
  	 */
  	if (rnp->gpnum != rnp->completed ||
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1681
  	    READ_ONCE(rnp_root->gpnum) != READ_ONCE(rnp_root->completed)) {
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1682
  		rnp->need_future_gp[c & 0x1]++;
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1683
  		trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
48a7639ce   Paul E. McKenney   rcu: Make callers...
1684
  		goto out;
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1685
1686
1687
1688
1689
1690
1691
  	}
  
  	/*
  	 * There might be no grace period in progress.  If we don't already
  	 * hold it, acquire the root rcu_node structure's lock in order to
  	 * start one (if needed).
  	 */
2a67e741b   Peter Zijlstra   rcu: Create trans...
1692
1693
  	if (rnp != rnp_root)
  		raw_spin_lock_rcu_node(rnp_root);
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1694
1695
1696
1697
  
  	/*
  	 * Get a new grace-period number.  If there really is no grace
  	 * period in progress, it will be smaller than the one we obtained
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
1698
  	 * earlier.  Adjust callbacks as needed.
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1699
1700
  	 */
  	c = rcu_cbs_completed(rdp->rsp, rnp_root);
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
1701
1702
  	if (!rcu_is_nocb_cpu(rdp->cpu))
  		(void)rcu_segcblist_accelerate(&rdp->cblist, c);
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1703
1704
1705
1706
1707
1708
  
  	/*
  	 * If the needed for the required grace period is already
  	 * recorded, trace and leave.
  	 */
  	if (rnp_root->need_future_gp[c & 0x1]) {
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1709
  		trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1710
1711
1712
1713
1714
1715
1716
1717
  		goto unlock_out;
  	}
  
  	/* Record the need for the future grace period. */
  	rnp_root->need_future_gp[c & 0x1]++;
  
  	/* If a grace period is not already in progress, start one. */
  	if (rnp_root->gpnum != rnp_root->completed) {
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1718
  		trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot"));
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1719
  	} else {
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1720
  		trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot"));
48a7639ce   Paul E. McKenney   rcu: Make callers...
1721
  		ret = rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp);
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1722
1723
1724
  	}
  unlock_out:
  	if (rnp != rnp_root)
67c583a7d   Boqun Feng   RCU: Privatize rc...
1725
  		raw_spin_unlock_rcu_node(rnp_root);
48a7639ce   Paul E. McKenney   rcu: Make callers...
1726
1727
1728
1729
  out:
  	if (c_out != NULL)
  		*c_out = c;
  	return ret;
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1730
1731
1732
1733
  }
  
  /*
   * Clean up any old requests for the just-ended grace period.  Also return
d1e4f01d0   Paul E. McKenney   rcu: Remove obsol...
1734
   * whether any additional grace periods have been requested.
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1735
1736
1737
1738
1739
1740
   */
  static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
  {
  	int c = rnp->completed;
  	int needmore;
  	struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1741
1742
  	rnp->need_future_gp[c & 0x1] = 0;
  	needmore = rnp->need_future_gp[(c + 1) & 0x1];
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1743
1744
  	trace_rcu_future_gp(rnp, rdp, c,
  			    needmore ? TPS("CleanupMore") : TPS("Cleanup"));
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1745
1746
1747
1748
  	return needmore;
  }
  
  /*
48a7639ce   Paul E. McKenney   rcu: Make callers...
1749
1750
1751
1752
1753
1754
1755
1756
1757
   * Awaken the grace-period kthread for the specified flavor of RCU.
   * Don't do a self-awaken, and don't bother awakening when there is
   * nothing for the grace-period kthread to do (as in several CPUs
   * raced to awaken, and we lost), and finally don't try to awaken
   * a kthread that has not yet been created.
   */
  static void rcu_gp_kthread_wake(struct rcu_state *rsp)
  {
  	if (current == rsp->gp_kthread ||
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1758
  	    !READ_ONCE(rsp->gp_flags) ||
48a7639ce   Paul E. McKenney   rcu: Make callers...
1759
1760
  	    !rsp->gp_kthread)
  		return;
abedf8e24   Paul Gortmaker   rcu: Use simple w...
1761
  	swake_up(&rsp->gp_wq);
48a7639ce   Paul E. McKenney   rcu: Make callers...
1762
1763
1764
  }
  
  /*
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1765
1766
1767
1768
1769
1770
   * If there is room, assign a ->completed number to any callbacks on
   * this CPU that have not already been assigned.  Also accelerate any
   * callbacks that were previously assigned a ->completed number that has
   * since proven to be too conservative, which can happen if callbacks get
   * assigned a ->completed number while RCU is idle, but with reference to
   * a non-root rcu_node structure.  This function is idempotent, so it does
48a7639ce   Paul E. McKenney   rcu: Make callers...
1771
1772
   * not hurt to call it repeatedly.  Returns an flag saying that we should
   * awaken the RCU grace-period kthread.
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1773
1774
1775
   *
   * The caller must hold rnp->lock with interrupts disabled.
   */
48a7639ce   Paul E. McKenney   rcu: Make callers...
1776
  static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1777
1778
  			       struct rcu_data *rdp)
  {
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
1779
  	bool ret = false;
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1780

c0b334c5b   Paul E. McKenney   rcu: Add lockdep_...
1781
  	lockdep_assert_held(&rnp->lock);
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
1782
1783
  	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
  	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
48a7639ce   Paul E. McKenney   rcu: Make callers...
1784
  		return false;
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1785
1786
  
  	/*
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
1787
1788
1789
1790
1791
1792
1793
1794
  	 * Callbacks are often registered with incomplete grace-period
  	 * information.  Something about the fact that getting exact
  	 * information requires acquiring a global lock...  RCU therefore
  	 * makes a conservative estimate of the grace period number at which
  	 * a given callback will become ready to invoke.	The following
  	 * code checks this estimate and improves it when possible, thus
  	 * accelerating callback invocation to an earlier grace-period
  	 * number.
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1795
  	 */
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
1796
1797
  	if (rcu_segcblist_accelerate(&rdp->cblist, rcu_cbs_completed(rsp, rnp)))
  		ret = rcu_start_future_gp(rnp, rdp, NULL);
6d4b418c7   Paul E. McKenney   rcu: Trace callba...
1798
1799
  
  	/* Trace depending on how much we were able to accelerate. */
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
1800
  	if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1801
  		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
6d4b418c7   Paul E. McKenney   rcu: Trace callba...
1802
  	else
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1803
  		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
48a7639ce   Paul E. McKenney   rcu: Make callers...
1804
  	return ret;
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1805
1806
1807
1808
1809
1810
1811
1812
  }
  
  /*
   * Move any callbacks whose grace period has completed to the
   * RCU_DONE_TAIL sublist, then compact the remaining sublists and
   * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL
   * sublist.  This function is idempotent, so it does not hurt to
   * invoke it repeatedly.  As long as it is not invoked -too- often...
48a7639ce   Paul E. McKenney   rcu: Make callers...
1813
   * Returns true if the RCU grace-period kthread needs to be awakened.
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1814
1815
1816
   *
   * The caller must hold rnp->lock with interrupts disabled.
   */
48a7639ce   Paul E. McKenney   rcu: Make callers...
1817
  static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1818
1819
  			    struct rcu_data *rdp)
  {
c0b334c5b   Paul E. McKenney   rcu: Add lockdep_...
1820
  	lockdep_assert_held(&rnp->lock);
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
1821
1822
  	/* If no pending (not yet ready to invoke) callbacks, nothing to do. */
  	if (!rcu_segcblist_pend_cbs(&rdp->cblist))
48a7639ce   Paul E. McKenney   rcu: Make callers...
1823
  		return false;
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1824
1825
1826
1827
1828
  
  	/*
  	 * Find all callbacks whose ->completed numbers indicate that they
  	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
  	 */
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
1829
  	rcu_segcblist_advance(&rdp->cblist, rnp->completed);
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1830
1831
  
  	/* Classify any remaining callbacks. */
48a7639ce   Paul E. McKenney   rcu: Make callers...
1832
  	return rcu_accelerate_cbs(rsp, rnp, rdp);
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1833
1834
1835
  }
  
  /*
ba9fbe955   Paul E. McKenney   rcu: Merge __rcu_...
1836
1837
1838
   * Update CPU-local rcu_data state to record the beginnings and ends of
   * grace periods.  The caller must hold the ->lock of the leaf rcu_node
   * structure corresponding to the current CPU, and must have irqs disabled.
48a7639ce   Paul E. McKenney   rcu: Make callers...
1839
   * Returns true if the grace-period kthread needs to be awakened.
d09b62dfa   Paul E. McKenney   rcu: Fix synchron...
1840
   */
48a7639ce   Paul E. McKenney   rcu: Make callers...
1841
1842
  static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
  			      struct rcu_data *rdp)
d09b62dfa   Paul E. McKenney   rcu: Fix synchron...
1843
  {
48a7639ce   Paul E. McKenney   rcu: Make callers...
1844
  	bool ret;
3563a438f   Paul E. McKenney   rcu: Avoid redund...
1845
  	bool need_gp;
48a7639ce   Paul E. McKenney   rcu: Make callers...
1846

c0b334c5b   Paul E. McKenney   rcu: Add lockdep_...
1847
  	lockdep_assert_held(&rnp->lock);
ba9fbe955   Paul E. McKenney   rcu: Merge __rcu_...
1848
  	/* Handle the ends of any preceding grace periods first. */
e3663b102   Paul E. McKenney   rcu: Handle gpnum...
1849
  	if (rdp->completed == rnp->completed &&
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1850
  	    !unlikely(READ_ONCE(rdp->gpwrap))) {
d09b62dfa   Paul E. McKenney   rcu: Fix synchron...
1851

ba9fbe955   Paul E. McKenney   rcu: Merge __rcu_...
1852
  		/* No grace period end, so just accelerate recent callbacks. */
48a7639ce   Paul E. McKenney   rcu: Make callers...
1853
  		ret = rcu_accelerate_cbs(rsp, rnp, rdp);
d09b62dfa   Paul E. McKenney   rcu: Fix synchron...
1854

dc35c8934   Paul E. McKenney   rcu: Tag callback...
1855
1856
1857
  	} else {
  
  		/* Advance callbacks. */
48a7639ce   Paul E. McKenney   rcu: Make callers...
1858
  		ret = rcu_advance_cbs(rsp, rnp, rdp);
d09b62dfa   Paul E. McKenney   rcu: Fix synchron...
1859
1860
1861
  
  		/* Remember that we saw this grace-period completion. */
  		rdp->completed = rnp->completed;
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1862
  		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
d09b62dfa   Paul E. McKenney   rcu: Fix synchron...
1863
  	}
398ebe600   Paul E. McKenney   rcu: Make __note_...
1864

7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1865
  	if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) {
6eaef633d   Paul E. McKenney   rcu: Move code to...
1866
1867
1868
1869
1870
1871
  		/*
  		 * If the current grace period is waiting for this CPU,
  		 * set up to detect a quiescent state, otherwise don't
  		 * go looking for one.
  		 */
  		rdp->gpnum = rnp->gpnum;
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1872
  		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
3563a438f   Paul E. McKenney   rcu: Avoid redund...
1873
1874
  		need_gp = !!(rnp->qsmask & rdp->grpmask);
  		rdp->cpu_no_qs.b.norm = need_gp;
9577df9a3   Paul E. McKenney   rcu: Pull rcu_qs_...
1875
  		rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
3563a438f   Paul E. McKenney   rcu: Avoid redund...
1876
  		rdp->core_needs_qs = need_gp;
6eaef633d   Paul E. McKenney   rcu: Move code to...
1877
  		zero_cpu_stall_ticks(rdp);
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1878
  		WRITE_ONCE(rdp->gpwrap, false);
6eaef633d   Paul E. McKenney   rcu: Move code to...
1879
  	}
48a7639ce   Paul E. McKenney   rcu: Make callers...
1880
  	return ret;
6eaef633d   Paul E. McKenney   rcu: Move code to...
1881
  }
d34ea3221   Paul E. McKenney   rcu: Rename note_...
1882
  static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
6eaef633d   Paul E. McKenney   rcu: Move code to...
1883
1884
  {
  	unsigned long flags;
48a7639ce   Paul E. McKenney   rcu: Make callers...
1885
  	bool needwake;
6eaef633d   Paul E. McKenney   rcu: Move code to...
1886
1887
1888
1889
  	struct rcu_node *rnp;
  
  	local_irq_save(flags);
  	rnp = rdp->mynode;
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1890
1891
1892
  	if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
  	     rdp->completed == READ_ONCE(rnp->completed) &&
  	     !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
2a67e741b   Peter Zijlstra   rcu: Create trans...
1893
  	    !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
6eaef633d   Paul E. McKenney   rcu: Move code to...
1894
1895
1896
  		local_irq_restore(flags);
  		return;
  	}
48a7639ce   Paul E. McKenney   rcu: Make callers...
1897
  	needwake = __note_gp_changes(rsp, rnp, rdp);
67c583a7d   Boqun Feng   RCU: Privatize rc...
1898
  	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
48a7639ce   Paul E. McKenney   rcu: Make callers...
1899
1900
  	if (needwake)
  		rcu_gp_kthread_wake(rsp);
6eaef633d   Paul E. McKenney   rcu: Move code to...
1901
  }
0f41c0dda   Paul E. McKenney   rcu: Provide diag...
1902
1903
1904
1905
1906
1907
  static void rcu_gp_slow(struct rcu_state *rsp, int delay)
  {
  	if (delay > 0 &&
  	    !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
  		schedule_timeout_uninterruptible(delay);
  }
6eaef633d   Paul E. McKenney   rcu: Move code to...
1908
  /*
45fed3e7c   Paul E. McKenney   rcu: Make rcu_gp_...
1909
   * Initialize a new grace period.  Return false if no grace period required.
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
1910
   */
45fed3e7c   Paul E. McKenney   rcu: Make rcu_gp_...
1911
  static bool rcu_gp_init(struct rcu_state *rsp)
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
1912
  {
0aa04b055   Paul E. McKenney   rcu: Process offl...
1913
  	unsigned long oldmask;
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
1914
  	struct rcu_data *rdp;
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
1915
  	struct rcu_node *rnp = rcu_get_root(rsp);
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
1916

7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1917
  	WRITE_ONCE(rsp->gp_activity, jiffies);
2a67e741b   Peter Zijlstra   rcu: Create trans...
1918
  	raw_spin_lock_irq_rcu_node(rnp);
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1919
  	if (!READ_ONCE(rsp->gp_flags)) {
f7be82093   Paul E. McKenney   rcu: Improve grac...
1920
  		/* Spurious wakeup, tell caller to go back to sleep.  */
67c583a7d   Boqun Feng   RCU: Privatize rc...
1921
  		raw_spin_unlock_irq_rcu_node(rnp);
45fed3e7c   Paul E. McKenney   rcu: Make rcu_gp_...
1922
  		return false;
f7be82093   Paul E. McKenney   rcu: Improve grac...
1923
  	}
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1924
  	WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
1925

f7be82093   Paul E. McKenney   rcu: Improve grac...
1926
1927
1928
1929
1930
  	if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
  		/*
  		 * Grace period already in progress, don't start another.
  		 * Not supposed to be able to happen.
  		 */
67c583a7d   Boqun Feng   RCU: Privatize rc...
1931
  		raw_spin_unlock_irq_rcu_node(rnp);
45fed3e7c   Paul E. McKenney   rcu: Make rcu_gp_...
1932
  		return false;
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
1933
  	}
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
1934
  	/* Advance to a new grace period and initialize state. */
26cdfedf6   Paul E. McKenney   rcu: Reject memor...
1935
  	record_gp_stall_check_time(rsp);
765a3f4fe   Paul E. McKenney   rcu: Provide grac...
1936
1937
  	/* Record GP times before starting GP, hence smp_store_release(). */
  	smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1938
  	trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
67c583a7d   Boqun Feng   RCU: Privatize rc...
1939
  	raw_spin_unlock_irq_rcu_node(rnp);
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
1940

7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
1941
  	/*
0aa04b055   Paul E. McKenney   rcu: Process offl...
1942
1943
1944
1945
1946
1947
  	 * Apply per-leaf buffered online and offline operations to the
  	 * rcu_node tree.  Note that this new grace period need not wait
  	 * for subsequent online CPUs, and that quiescent-state forcing
  	 * will handle subsequent offline CPUs.
  	 */
  	rcu_for_each_leaf_node(rsp, rnp) {
0f41c0dda   Paul E. McKenney   rcu: Provide diag...
1948
  		rcu_gp_slow(rsp, gp_preinit_delay);
2a67e741b   Peter Zijlstra   rcu: Create trans...
1949
  		raw_spin_lock_irq_rcu_node(rnp);
0aa04b055   Paul E. McKenney   rcu: Process offl...
1950
1951
1952
  		if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
  		    !rnp->wait_blkd_tasks) {
  			/* Nothing to do on this leaf rcu_node structure. */
67c583a7d   Boqun Feng   RCU: Privatize rc...
1953
  			raw_spin_unlock_irq_rcu_node(rnp);
0aa04b055   Paul E. McKenney   rcu: Process offl...
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
  			continue;
  		}
  
  		/* Record old state, apply changes to ->qsmaskinit field. */
  		oldmask = rnp->qsmaskinit;
  		rnp->qsmaskinit = rnp->qsmaskinitnext;
  
  		/* If zero-ness of ->qsmaskinit changed, propagate up tree. */
  		if (!oldmask != !rnp->qsmaskinit) {
  			if (!oldmask) /* First online CPU for this rcu_node. */
  				rcu_init_new_rnp(rnp);
  			else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */
  				rnp->wait_blkd_tasks = true;
  			else /* Last offline CPU and can propagate. */
  				rcu_cleanup_dead_rnp(rnp);
  		}
  
  		/*
  		 * If all waited-on tasks from prior grace period are
  		 * done, and if all this rcu_node structure's CPUs are
  		 * still offline, propagate up the rcu_node tree and
  		 * clear ->wait_blkd_tasks.  Otherwise, if one of this
  		 * rcu_node structure's CPUs has since come back online,
  		 * simply clear ->wait_blkd_tasks (but rcu_cleanup_dead_rnp()
  		 * checks for this, so just call it unconditionally).
  		 */
  		if (rnp->wait_blkd_tasks &&
  		    (!rcu_preempt_has_tasks(rnp) ||
  		     rnp->qsmaskinit)) {
  			rnp->wait_blkd_tasks = false;
  			rcu_cleanup_dead_rnp(rnp);
  		}
67c583a7d   Boqun Feng   RCU: Privatize rc...
1986
  		raw_spin_unlock_irq_rcu_node(rnp);
0aa04b055   Paul E. McKenney   rcu: Process offl...
1987
  	}
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
1988
1989
1990
1991
1992
1993
1994
1995
  
  	/*
  	 * Set the quiescent-state-needed bits in all the rcu_node
  	 * structures for all currently online CPUs in breadth-first order,
  	 * starting from the root rcu_node structure, relying on the layout
  	 * of the tree within the rsp->node[] array.  Note that other CPUs
  	 * will access only the leaves of the hierarchy, thus seeing that no
  	 * grace period is in progress, at least until the corresponding
590d1757b   Paul E. McKenney   rcu: Fix outdated...
1996
  	 * leaf node has been initialized.
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
1997
1998
1999
2000
2001
  	 *
  	 * The grace period cannot complete until the initialization
  	 * process finishes, because this kthread handles both.
  	 */
  	rcu_for_each_node_breadth_first(rsp, rnp) {
0f41c0dda   Paul E. McKenney   rcu: Provide diag...
2002
  		rcu_gp_slow(rsp, gp_init_delay);
2a67e741b   Peter Zijlstra   rcu: Create trans...
2003
  		raw_spin_lock_irq_rcu_node(rnp);
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
2004
  		rdp = this_cpu_ptr(rsp->rda);
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2005
2006
  		rcu_preempt_check_blocked_tasks(rnp);
  		rnp->qsmask = rnp->qsmaskinit;
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2007
  		WRITE_ONCE(rnp->gpnum, rsp->gpnum);
3f47da0f3   Lai Jiangshan   rcu_tree: Avoid t...
2008
  		if (WARN_ON_ONCE(rnp->completed != rsp->completed))
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2009
  			WRITE_ONCE(rnp->completed, rsp->completed);
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2010
  		if (rnp == rdp->mynode)
48a7639ce   Paul E. McKenney   rcu: Make callers...
2011
  			(void)__note_gp_changes(rsp, rnp, rdp);
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2012
2013
2014
2015
  		rcu_preempt_boost_start_gp(rnp);
  		trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
  					    rnp->level, rnp->grplo,
  					    rnp->grphi, rnp->qsmask);
67c583a7d   Boqun Feng   RCU: Privatize rc...
2016
  		raw_spin_unlock_irq_rcu_node(rnp);
bde6c3aa9   Paul E. McKenney   rcu: Provide cond...
2017
  		cond_resched_rcu_qs();
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2018
  		WRITE_ONCE(rsp->gp_activity, jiffies);
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2019
  	}
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
2020

45fed3e7c   Paul E. McKenney   rcu: Make rcu_gp_...
2021
  	return true;
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2022
  }
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
2023

7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2024
  /*
d5374226c   Luis R. Rodriguez   rcu: Use idle ver...
2025
2026
   * Helper function for swait_event_idle() wakeup at force-quiescent-state
   * time.
b9a425cfc   Paul E. McKenney   rcu: Pull out wai...
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
   */
  static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
  {
  	struct rcu_node *rnp = rcu_get_root(rsp);
  
  	/* Someone like call_rcu() requested a force-quiescent-state scan. */
  	*gfp = READ_ONCE(rsp->gp_flags);
  	if (*gfp & RCU_GP_FLAG_FQS)
  		return true;
  
  	/* The current grace period has completed. */
  	if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
  		return true;
  
  	return false;
  }
  
  /*
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2045
2046
   * Do one round of quiescent-state forcing.
   */
77f81fe08   Petr Mladek   rcu: Finish foldi...
2047
  static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2048
  {
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2049
  	struct rcu_node *rnp = rcu_get_root(rsp);
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2050
  	WRITE_ONCE(rsp->gp_activity, jiffies);
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2051
  	rsp->n_force_qs++;
77f81fe08   Petr Mladek   rcu: Finish foldi...
2052
  	if (first_time) {
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2053
  		/* Collect dyntick-idle snapshots. */
fe5ac724d   Paul E. McKenney   rcu: Remove nohz_...
2054
  		force_qs_rnp(rsp, dyntick_save_progress_counter);
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2055
2056
  	} else {
  		/* Handle dyntick-idle and offline CPUs. */
fe5ac724d   Paul E. McKenney   rcu: Remove nohz_...
2057
  		force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2058
2059
  	}
  	/* Clear flag to prevent immediate re-entry. */
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2060
  	if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2a67e741b   Peter Zijlstra   rcu: Create trans...
2061
  		raw_spin_lock_irq_rcu_node(rnp);
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2062
2063
  		WRITE_ONCE(rsp->gp_flags,
  			   READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
67c583a7d   Boqun Feng   RCU: Privatize rc...
2064
  		raw_spin_unlock_irq_rcu_node(rnp);
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2065
  	}
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2066
2067
2068
  }
  
  /*
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2069
2070
   * Clean up after the old grace period.
   */
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2071
  static void rcu_gp_cleanup(struct rcu_state *rsp)
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2072
2073
  {
  	unsigned long gp_duration;
48a7639ce   Paul E. McKenney   rcu: Make callers...
2074
  	bool needgp = false;
dae6e64d2   Paul E. McKenney   rcu: Introduce pr...
2075
  	int nocb = 0;
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2076
2077
  	struct rcu_data *rdp;
  	struct rcu_node *rnp = rcu_get_root(rsp);
abedf8e24   Paul Gortmaker   rcu: Use simple w...
2078
  	struct swait_queue_head *sq;
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
2079

7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2080
  	WRITE_ONCE(rsp->gp_activity, jiffies);
2a67e741b   Peter Zijlstra   rcu: Create trans...
2081
  	raw_spin_lock_irq_rcu_node(rnp);
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2082
2083
2084
  	gp_duration = jiffies - rsp->gp_start;
  	if (gp_duration > rsp->gp_max)
  		rsp->gp_max = gp_duration;
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
2085

7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2086
2087
2088
2089
2090
2091
2092
  	/*
  	 * We know the grace period is complete, but to everyone else
  	 * it appears to still be ongoing.  But it is also the case
  	 * that to everyone else it looks like there is nothing that
  	 * they can do to advance the grace period.  It is therefore
  	 * safe for us to drop the lock in order to mark the grace
  	 * period as completed in all of the rcu_node structures.
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2093
  	 */
67c583a7d   Boqun Feng   RCU: Privatize rc...
2094
  	raw_spin_unlock_irq_rcu_node(rnp);
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
2095

5d4b86594   Paul E. McKenney   rcu: Fix day-zero...
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
  	/*
  	 * Propagate new ->completed value to rcu_node structures so
  	 * that other CPUs don't have to wait until the start of the next
  	 * grace period to process their callbacks.  This also avoids
  	 * some nasty RCU grace-period initialization races by forcing
  	 * the end of the current grace period to be completely recorded in
  	 * all of the rcu_node structures before the beginning of the next
  	 * grace period is recorded in any of the rcu_node structures.
  	 */
  	rcu_for_each_node_breadth_first(rsp, rnp) {
2a67e741b   Peter Zijlstra   rcu: Create trans...
2106
  		raw_spin_lock_irq_rcu_node(rnp);
5c60d25fa   Paul E. McKenney   rcu: Add diagnost...
2107
2108
  		WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
  		WARN_ON_ONCE(rnp->qsmask);
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2109
  		WRITE_ONCE(rnp->completed, rsp->gpnum);
b11cc5760   Paul E. McKenney   rcu: Accelerate R...
2110
2111
  		rdp = this_cpu_ptr(rsp->rda);
  		if (rnp == rdp->mynode)
48a7639ce   Paul E. McKenney   rcu: Make callers...
2112
  			needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
78e4bc34e   Paul E. McKenney   rcu: Fix and comm...
2113
  		/* smp_mb() provided by prior unlock-lock pair. */
0446be489   Paul E. McKenney   rcu: Abstract rcu...
2114
  		nocb += rcu_future_gp_cleanup(rsp, rnp);
065bb78c5   Daniel Wagner   rcu: Do not call ...
2115
  		sq = rcu_nocb_gp_get(rnp);
67c583a7d   Boqun Feng   RCU: Privatize rc...
2116
  		raw_spin_unlock_irq_rcu_node(rnp);
065bb78c5   Daniel Wagner   rcu: Do not call ...
2117
  		rcu_nocb_gp_cleanup(sq);
bde6c3aa9   Paul E. McKenney   rcu: Provide cond...
2118
  		cond_resched_rcu_qs();
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2119
  		WRITE_ONCE(rsp->gp_activity, jiffies);
0f41c0dda   Paul E. McKenney   rcu: Provide diag...
2120
  		rcu_gp_slow(rsp, gp_cleanup_delay);
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2121
  	}
5d4b86594   Paul E. McKenney   rcu: Fix day-zero...
2122
  	rnp = rcu_get_root(rsp);
2a67e741b   Peter Zijlstra   rcu: Create trans...
2123
  	raw_spin_lock_irq_rcu_node(rnp); /* Order GP before ->completed update. */
dae6e64d2   Paul E. McKenney   rcu: Introduce pr...
2124
  	rcu_nocb_gp_set(rnp, nocb);
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2125

765a3f4fe   Paul E. McKenney   rcu: Provide grac...
2126
  	/* Declare grace period done. */
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2127
  	WRITE_ONCE(rsp->completed, rsp->gpnum);
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
2128
  	trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
77f81fe08   Petr Mladek   rcu: Finish foldi...
2129
  	rsp->gp_state = RCU_GP_IDLE;
5d4b86594   Paul E. McKenney   rcu: Fix day-zero...
2130
  	rdp = this_cpu_ptr(rsp->rda);
48a7639ce   Paul E. McKenney   rcu: Make callers...
2131
2132
2133
  	/* Advance CBs to reduce false positives below. */
  	needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
  	if (needgp || cpu_needs_another_gp(rsp, rdp)) {
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2134
  		WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
bb311eccb   Paul E. McKenney   rcu: Add tracing ...
2135
  		trace_rcu_grace_period(rsp->name,
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2136
  				       READ_ONCE(rsp->gpnum),
bb311eccb   Paul E. McKenney   rcu: Add tracing ...
2137
2138
  				       TPS("newreq"));
  	}
67c583a7d   Boqun Feng   RCU: Privatize rc...
2139
  	raw_spin_unlock_irq_rcu_node(rnp);
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2140
2141
2142
2143
2144
2145
2146
  }
  
  /*
   * Body of kthread that handles grace periods.
   */
  static int __noreturn rcu_gp_kthread(void *arg)
  {
77f81fe08   Petr Mladek   rcu: Finish foldi...
2147
  	bool first_gp_fqs;
88d6df612   Paul E. McKenney   rcu: Prevent spur...
2148
  	int gf;
d40011f60   Paul E. McKenney   rcu: Control grac...
2149
  	unsigned long j;
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2150
  	int ret;
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2151
2152
  	struct rcu_state *rsp = arg;
  	struct rcu_node *rnp = rcu_get_root(rsp);
5871968d5   Paul E. McKenney   rcu: Tighten up a...
2153
  	rcu_bind_gp_kthread();
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2154
2155
2156
2157
  	for (;;) {
  
  		/* Handle grace-period start. */
  		for (;;) {
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2158
  			trace_rcu_grace_period(rsp->name,
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2159
  					       READ_ONCE(rsp->gpnum),
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2160
  					       TPS("reqwait"));
afea227fd   Paul E. McKenney   rcutorture: Expor...
2161
  			rsp->gp_state = RCU_GP_WAIT_GPS;
d5374226c   Luis R. Rodriguez   rcu: Use idle ver...
2162
2163
  			swait_event_idle(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
  						     RCU_GP_FLAG_INIT);
319362c90   Paul E. McKenney   rcu: Provide more...
2164
  			rsp->gp_state = RCU_GP_DONE_GPS;
78e4bc34e   Paul E. McKenney   rcu: Fix and comm...
2165
  			/* Locking provides needed memory barrier. */
f7be82093   Paul E. McKenney   rcu: Improve grac...
2166
  			if (rcu_gp_init(rsp))
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2167
  				break;
bde6c3aa9   Paul E. McKenney   rcu: Provide cond...
2168
  			cond_resched_rcu_qs();
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2169
  			WRITE_ONCE(rsp->gp_activity, jiffies);
73a860cd5   Paul E. McKenney   rcu: Replace flus...
2170
  			WARN_ON(signal_pending(current));
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2171
  			trace_rcu_grace_period(rsp->name,
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2172
  					       READ_ONCE(rsp->gpnum),
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2173
  					       TPS("reqwaitsig"));
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2174
  		}
cabc49c1f   Paul E. McKenney   rcu: Move RCU gra...
2175

4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2176
  		/* Handle quiescent-state forcing. */
77f81fe08   Petr Mladek   rcu: Finish foldi...
2177
  		first_gp_fqs = true;
d40011f60   Paul E. McKenney   rcu: Control grac...
2178
2179
2180
2181
2182
  		j = jiffies_till_first_fqs;
  		if (j > HZ) {
  			j = HZ;
  			jiffies_till_first_fqs = HZ;
  		}
88d6df612   Paul E. McKenney   rcu: Prevent spur...
2183
  		ret = 0;
cabc49c1f   Paul E. McKenney   rcu: Move RCU gra...
2184
  		for (;;) {
8c7c4829a   Paul E. McKenney   rcu: Awaken grace...
2185
  			if (!ret) {
88d6df612   Paul E. McKenney   rcu: Prevent spur...
2186
  				rsp->jiffies_force_qs = jiffies + j;
8c7c4829a   Paul E. McKenney   rcu: Awaken grace...
2187
2188
2189
  				WRITE_ONCE(rsp->jiffies_kick_kthreads,
  					   jiffies + 3 * j);
  			}
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2190
  			trace_rcu_grace_period(rsp->name,
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2191
  					       READ_ONCE(rsp->gpnum),
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2192
  					       TPS("fqswait"));
afea227fd   Paul E. McKenney   rcutorture: Expor...
2193
  			rsp->gp_state = RCU_GP_WAIT_FQS;
d5374226c   Luis R. Rodriguez   rcu: Use idle ver...
2194
  			ret = swait_event_idle_timeout(rsp->gp_wq,
b9a425cfc   Paul E. McKenney   rcu: Pull out wai...
2195
  					rcu_gp_fqs_check_wake(rsp, &gf), j);
32bb1c799   Paul E. McKenney   rcu: Rename RCU_G...
2196
  			rsp->gp_state = RCU_GP_DOING_FQS;
78e4bc34e   Paul E. McKenney   rcu: Fix and comm...
2197
  			/* Locking provides needed memory barriers. */
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2198
  			/* If grace period done, leave loop. */
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2199
  			if (!READ_ONCE(rnp->qsmask) &&
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2200
  			    !rcu_preempt_blocked_readers_cgp(rnp))
cabc49c1f   Paul E. McKenney   rcu: Move RCU gra...
2201
  				break;
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2202
  			/* If time for quiescent-state forcing, do it. */
88d6df612   Paul E. McKenney   rcu: Prevent spur...
2203
2204
  			if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
  			    (gf & RCU_GP_FLAG_FQS)) {
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2205
  				trace_rcu_grace_period(rsp->name,
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2206
  						       READ_ONCE(rsp->gpnum),
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2207
  						       TPS("fqsstart"));
77f81fe08   Petr Mladek   rcu: Finish foldi...
2208
2209
  				rcu_gp_fqs(rsp, first_gp_fqs);
  				first_gp_fqs = false;
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2210
  				trace_rcu_grace_period(rsp->name,
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2211
  						       READ_ONCE(rsp->gpnum),
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2212
  						       TPS("fqsend"));
bde6c3aa9   Paul E. McKenney   rcu: Provide cond...
2213
  				cond_resched_rcu_qs();
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2214
  				WRITE_ONCE(rsp->gp_activity, jiffies);
fcfd0a237   Paul E. McKenney   rcu: Make FQS sch...
2215
2216
2217
2218
2219
2220
2221
2222
2223
  				ret = 0; /* Force full wait till next FQS. */
  				j = jiffies_till_next_fqs;
  				if (j > HZ) {
  					j = HZ;
  					jiffies_till_next_fqs = HZ;
  				} else if (j < 1) {
  					j = 1;
  					jiffies_till_next_fqs = 1;
  				}
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2224
2225
  			} else {
  				/* Deal with stray signal. */
bde6c3aa9   Paul E. McKenney   rcu: Provide cond...
2226
  				cond_resched_rcu_qs();
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2227
  				WRITE_ONCE(rsp->gp_activity, jiffies);
73a860cd5   Paul E. McKenney   rcu: Replace flus...
2228
  				WARN_ON(signal_pending(current));
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2229
  				trace_rcu_grace_period(rsp->name,
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2230
  						       READ_ONCE(rsp->gpnum),
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2231
  						       TPS("fqswaitsig"));
fcfd0a237   Paul E. McKenney   rcu: Make FQS sch...
2232
2233
2234
2235
2236
2237
  				ret = 1; /* Keep old FQS timing. */
  				j = jiffies;
  				if (time_after(jiffies, rsp->jiffies_force_qs))
  					j = 1;
  				else
  					j = rsp->jiffies_force_qs - j;
d40011f60   Paul E. McKenney   rcu: Control grac...
2238
  			}
cabc49c1f   Paul E. McKenney   rcu: Move RCU gra...
2239
  		}
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2240
2241
  
  		/* Handle grace-period end. */
319362c90   Paul E. McKenney   rcu: Provide more...
2242
  		rsp->gp_state = RCU_GP_CLEANUP;
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2243
  		rcu_gp_cleanup(rsp);
319362c90   Paul E. McKenney   rcu: Provide more...
2244
  		rsp->gp_state = RCU_GP_CLEANED;
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
2245
  	}
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
2246
2247
2248
  }
  
  /*
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2249
2250
   * Start a new RCU grace period if warranted, re-initializing the hierarchy
   * in preparation for detecting the next grace period.  The caller must hold
b8462084a   Paul E. McKenney   rcu: Push lock re...
2251
   * the root node's ->lock and hard irqs must be disabled.
e56014000   Paul E. McKenney   rcu: Simplify off...
2252
2253
2254
2255
   *
   * Note that it is legal for a dying CPU (which is marked as offline) to
   * invoke this function.  This can happen when the dying CPU reports its
   * quiescent state.
48a7639ce   Paul E. McKenney   rcu: Make callers...
2256
2257
   *
   * Returns true if the grace-period kthread must be awakened.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2258
   */
48a7639ce   Paul E. McKenney   rcu: Make callers...
2259
  static bool
910ee45db   Paul E. McKenney   rcu: Make rcu_acc...
2260
2261
  rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
  		      struct rcu_data *rdp)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2262
  {
c0b334c5b   Paul E. McKenney   rcu: Add lockdep_...
2263
  	lockdep_assert_held(&rnp->lock);
b8462084a   Paul E. McKenney   rcu: Push lock re...
2264
  	if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) {
afe24b122   Paul E. McKenney   rcu: Move propaga...
2265
  		/*
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
2266
  		 * Either we have not yet spawned the grace-period
62da19212   Paul E. McKenney   rcu: Accelerate c...
2267
2268
  		 * task, this CPU does not need another grace period,
  		 * or a grace period is already in progress.
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
2269
  		 * Either way, don't start a new grace period.
afe24b122   Paul E. McKenney   rcu: Move propaga...
2270
  		 */
48a7639ce   Paul E. McKenney   rcu: Make callers...
2271
  		return false;
afe24b122   Paul E. McKenney   rcu: Move propaga...
2272
  	}
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2273
2274
  	WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
  	trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum),
bb311eccb   Paul E. McKenney   rcu: Add tracing ...
2275
  			       TPS("newreq"));
62da19212   Paul E. McKenney   rcu: Accelerate c...
2276

016a8d5be   Steven Rostedt   rcu: Don't call w...
2277
2278
  	/*
  	 * We can't do wakeups while holding the rnp->lock, as that
1eafd31c6   Paul E. McKenney   rcu: Avoid redund...
2279
  	 * could cause possible deadlocks with the rq->lock. Defer
48a7639ce   Paul E. McKenney   rcu: Make callers...
2280
  	 * the wakeup to our caller.
016a8d5be   Steven Rostedt   rcu: Don't call w...
2281
  	 */
48a7639ce   Paul E. McKenney   rcu: Make callers...
2282
  	return true;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2283
2284
2285
  }
  
  /*
910ee45db   Paul E. McKenney   rcu: Make rcu_acc...
2286
2287
2288
2289
2290
   * Similar to rcu_start_gp_advanced(), but also advance the calling CPU's
   * callbacks.  Note that rcu_start_gp_advanced() cannot do this because it
   * is invoked indirectly from rcu_advance_cbs(), which would result in
   * endless recursion -- or would do so if it wasn't for the self-deadlock
   * that is encountered beforehand.
48a7639ce   Paul E. McKenney   rcu: Make callers...
2291
2292
   *
   * Returns true if the grace-period kthread needs to be awakened.
910ee45db   Paul E. McKenney   rcu: Make rcu_acc...
2293
   */
48a7639ce   Paul E. McKenney   rcu: Make callers...
2294
  static bool rcu_start_gp(struct rcu_state *rsp)
910ee45db   Paul E. McKenney   rcu: Make rcu_acc...
2295
2296
2297
  {
  	struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
  	struct rcu_node *rnp = rcu_get_root(rsp);
48a7639ce   Paul E. McKenney   rcu: Make callers...
2298
  	bool ret = false;
910ee45db   Paul E. McKenney   rcu: Make rcu_acc...
2299
2300
2301
2302
2303
2304
2305
2306
2307
  
  	/*
  	 * If there is no grace period in progress right now, any
  	 * callbacks we have up to this point will be satisfied by the
  	 * next grace period.  Also, advancing the callbacks reduces the
  	 * probability of false positives from cpu_needs_another_gp()
  	 * resulting in pointless grace periods.  So, advance callbacks
  	 * then start the grace period!
  	 */
48a7639ce   Paul E. McKenney   rcu: Make callers...
2308
2309
2310
  	ret = rcu_advance_cbs(rsp, rnp, rdp) || ret;
  	ret = rcu_start_gp_advanced(rsp, rnp, rdp) || ret;
  	return ret;
910ee45db   Paul E. McKenney   rcu: Make rcu_acc...
2311
2312
2313
  }
  
  /*
8994515cf   Paul E. McKenney   rcu: Update rcu_r...
2314
2315
2316
2317
2318
2319
2320
   * Report a full set of quiescent states to the specified rcu_state data
   * structure.  Invoke rcu_gp_kthread_wake() to awaken the grace-period
   * kthread if another grace period is required.  Whether we wake
   * the grace-period kthread or it awakens itself for the next round
   * of quiescent-state forcing, that kthread will clean up after the
   * just-completed grace period.  Note that the caller must hold rnp->lock,
   * which is released before return.
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
2321
   */
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
2322
  static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
fc2219d49   Paul E. McKenney   rcu: Clean up cod...
2323
  	__releases(rcu_get_root(rsp)->lock)
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
2324
  {
c0b334c5b   Paul E. McKenney   rcu: Add lockdep_...
2325
  	lockdep_assert_held(&rcu_get_root(rsp)->lock);
fc2219d49   Paul E. McKenney   rcu: Clean up cod...
2326
  	WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
cd73ca21c   Paul E. McKenney   rcu: Force wakeup...
2327
  	WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
67c583a7d   Boqun Feng   RCU: Privatize rc...
2328
  	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
94d447767   Jisheng Zhang   rcu: Use rcu_gp_k...
2329
  	rcu_gp_kthread_wake(rsp);
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
2330
2331
2332
  }
  
  /*
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
2333
2334
2335
   * Similar to rcu_report_qs_rdp(), for which it is a helper function.
   * Allows quiescent states for a group of CPUs to be reported at one go
   * to the specified rcu_node structure, though all the CPUs in the group
654e95334   Paul E. McKenney   rcu: Associate qu...
2336
2337
2338
2339
2340
   * must be represented by the same rcu_node structure (which need not be a
   * leaf rcu_node structure, though it often will be).  The gps parameter
   * is the grace-period snapshot, which means that the quiescent states
   * are valid only if rnp->gpnum is equal to gps.  That structure's lock
   * must be held upon entry, and it is released before return.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2341
2342
   */
  static void
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
2343
  rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
654e95334   Paul E. McKenney   rcu: Associate qu...
2344
  		  struct rcu_node *rnp, unsigned long gps, unsigned long flags)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2345
2346
  	__releases(rnp->lock)
  {
654e95334   Paul E. McKenney   rcu: Associate qu...
2347
  	unsigned long oldmask = 0;
28ecd5802   Paul E. McKenney   rcu: Add WARN_ON_...
2348
  	struct rcu_node *rnp_c;
c0b334c5b   Paul E. McKenney   rcu: Add lockdep_...
2349
  	lockdep_assert_held(&rnp->lock);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2350
2351
  	/* Walk up the rcu_node hierarchy. */
  	for (;;) {
654e95334   Paul E. McKenney   rcu: Associate qu...
2352
  		if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2353

654e95334   Paul E. McKenney   rcu: Associate qu...
2354
2355
2356
2357
  			/*
  			 * Our bit has already been cleared, or the
  			 * relevant grace period is already over, so done.
  			 */
67c583a7d   Boqun Feng   RCU: Privatize rc...
2358
  			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2359
2360
  			return;
  		}
654e95334   Paul E. McKenney   rcu: Associate qu...
2361
  		WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
2dee9404f   Paul E. McKenney   rcu: Add assertio...
2362
2363
  		WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1 &&
  			     rcu_preempt_blocked_readers_cgp(rnp));
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2364
  		rnp->qsmask &= ~mask;
d4c08f2ac   Paul E. McKenney   rcu: Add grace-pe...
2365
2366
2367
2368
  		trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
  						 mask, rnp->qsmask, rnp->level,
  						 rnp->grplo, rnp->grphi,
  						 !!rnp->gp_tasks);
27f4d2805   Paul E. McKenney   rcu: priority boo...
2369
  		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2370
2371
  
  			/* Other bits still set at this level, so done. */
67c583a7d   Boqun Feng   RCU: Privatize rc...
2372
  			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2373
2374
2375
2376
2377
2378
2379
2380
2381
  			return;
  		}
  		mask = rnp->grpmask;
  		if (rnp->parent == NULL) {
  
  			/* No more levels.  Exit loop holding root lock. */
  
  			break;
  		}
67c583a7d   Boqun Feng   RCU: Privatize rc...
2382
  		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
28ecd5802   Paul E. McKenney   rcu: Add WARN_ON_...
2383
  		rnp_c = rnp;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2384
  		rnp = rnp->parent;
2a67e741b   Peter Zijlstra   rcu: Create trans...
2385
  		raw_spin_lock_irqsave_rcu_node(rnp, flags);
654e95334   Paul E. McKenney   rcu: Associate qu...
2386
  		oldmask = rnp_c->qsmask;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2387
2388
2389
2390
  	}
  
  	/*
  	 * Get here if we are the last CPU to pass through a quiescent
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
2391
  	 * state for this grace period.  Invoke rcu_report_qs_rsp()
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
2392
  	 * to clean up and start the next grace period if one is needed.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2393
  	 */
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
2394
  	rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2395
2396
2397
  }
  
  /*
cc99a310c   Paul E. McKenney   rcu: Move rcu_rep...
2398
2399
2400
2401
2402
2403
   * Record a quiescent state for all tasks that were previously queued
   * on the specified rcu_node structure and that were blocking the current
   * RCU grace period.  The caller must hold the specified rnp->lock with
   * irqs disabled, and this lock is released upon return, but irqs remain
   * disabled.
   */
0aa04b055   Paul E. McKenney   rcu: Process offl...
2404
  static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
cc99a310c   Paul E. McKenney   rcu: Move rcu_rep...
2405
2406
2407
  				      struct rcu_node *rnp, unsigned long flags)
  	__releases(rnp->lock)
  {
654e95334   Paul E. McKenney   rcu: Associate qu...
2408
  	unsigned long gps;
cc99a310c   Paul E. McKenney   rcu: Move rcu_rep...
2409
2410
  	unsigned long mask;
  	struct rcu_node *rnp_p;
c0b334c5b   Paul E. McKenney   rcu: Add lockdep_...
2411
  	lockdep_assert_held(&rnp->lock);
a77da14ce   Paul E. McKenney   rcu: Yet another ...
2412
2413
  	if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
  	    rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
67c583a7d   Boqun Feng   RCU: Privatize rc...
2414
  		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
cc99a310c   Paul E. McKenney   rcu: Move rcu_rep...
2415
2416
2417
2418
2419
2420
  		return;  /* Still need more quiescent states! */
  	}
  
  	rnp_p = rnp->parent;
  	if (rnp_p == NULL) {
  		/*
a77da14ce   Paul E. McKenney   rcu: Yet another ...
2421
2422
  		 * Only one rcu_node structure in the tree, so don't
  		 * try to report up to its nonexistent parent!
cc99a310c   Paul E. McKenney   rcu: Move rcu_rep...
2423
2424
2425
2426
  		 */
  		rcu_report_qs_rsp(rsp, flags);
  		return;
  	}
654e95334   Paul E. McKenney   rcu: Associate qu...
2427
2428
  	/* Report up the rest of the hierarchy, tracking current ->gpnum. */
  	gps = rnp->gpnum;
cc99a310c   Paul E. McKenney   rcu: Move rcu_rep...
2429
  	mask = rnp->grpmask;
67c583a7d   Boqun Feng   RCU: Privatize rc...
2430
  	raw_spin_unlock_rcu_node(rnp);	/* irqs remain disabled. */
2a67e741b   Peter Zijlstra   rcu: Create trans...
2431
  	raw_spin_lock_rcu_node(rnp_p);	/* irqs already disabled. */
654e95334   Paul E. McKenney   rcu: Associate qu...
2432
  	rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
cc99a310c   Paul E. McKenney   rcu: Move rcu_rep...
2433
2434
2435
  }
  
  /*
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
2436
   * Record a quiescent state for the specified CPU to that CPU's rcu_data
4b455dc3e   Paul E. McKenney   rcu: Catch up rcu...
2437
   * structure.  This must be called from the specified CPU.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2438
2439
   */
  static void
d7d6a11e8   Paul E. McKenney   rcu: Simplify qui...
2440
  rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2441
2442
2443
  {
  	unsigned long flags;
  	unsigned long mask;
48a7639ce   Paul E. McKenney   rcu: Make callers...
2444
  	bool needwake;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2445
2446
2447
  	struct rcu_node *rnp;
  
  	rnp = rdp->mynode;
2a67e741b   Peter Zijlstra   rcu: Create trans...
2448
  	raw_spin_lock_irqsave_rcu_node(rnp, flags);
3a19b46a5   Paul E. McKenney   rcu: Check cond_r...
2449
2450
  	if (rdp->cpu_no_qs.b.norm || rdp->gpnum != rnp->gpnum ||
  	    rnp->completed == rnp->gpnum || rdp->gpwrap) {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2451
2452
  
  		/*
e4cc1f22b   Paul E. McKenney   rcu: Simplify qui...
2453
2454
2455
2456
  		 * The grace period in which this quiescent state was
  		 * recorded has ended, so don't report it upwards.
  		 * We will instead need a new quiescent state that lies
  		 * within the current grace period.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2457
  		 */
5b74c4589   Paul E. McKenney   rcu: Make ->cpu_n...
2458
  		rdp->cpu_no_qs.b.norm = true;	/* need qs for new gp. */
9577df9a3   Paul E. McKenney   rcu: Pull rcu_qs_...
2459
  		rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
67c583a7d   Boqun Feng   RCU: Privatize rc...
2460
  		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2461
2462
2463
2464
  		return;
  	}
  	mask = rdp->grpmask;
  	if ((rnp->qsmask & mask) == 0) {
67c583a7d   Boqun Feng   RCU: Privatize rc...
2465
  		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2466
  	} else {
bb53e416e   Paul E. McKenney   rcu: Assign false...
2467
  		rdp->core_needs_qs = false;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2468
2469
2470
2471
2472
  
  		/*
  		 * This GP can't end until cpu checks in, so all of our
  		 * callbacks can be processed during the next GP.
  		 */
48a7639ce   Paul E. McKenney   rcu: Make callers...
2473
  		needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2474

654e95334   Paul E. McKenney   rcu: Associate qu...
2475
2476
  		rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
  		/* ^^^ Released rnp->lock */
48a7639ce   Paul E. McKenney   rcu: Make callers...
2477
2478
  		if (needwake)
  			rcu_gp_kthread_wake(rsp);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
  	}
  }
  
  /*
   * Check to see if there is a new grace period of which this CPU
   * is not yet aware, and if so, set up local rcu_data state for it.
   * Otherwise, see if this CPU has just passed through its first
   * quiescent state for this grace period, and record that fact if so.
   */
  static void
  rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
  {
05eb552bf   Paul E. McKenney   rcu: Move redunda...
2491
2492
  	/* Check for grace-period ends and beginnings. */
  	note_gp_changes(rsp, rdp);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2493
2494
2495
2496
2497
  
  	/*
  	 * Does this CPU still need to do its part for current grace period?
  	 * If no, return and let the other CPUs do their part as well.
  	 */
97c668b8e   Paul E. McKenney   rcu: Rename qs_pe...
2498
  	if (!rdp->core_needs_qs)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2499
2500
2501
2502
2503
2504
  		return;
  
  	/*
  	 * Was there a quiescent state since the beginning of the grace
  	 * period? If no, then exit and wait for the next call.
  	 */
3a19b46a5   Paul E. McKenney   rcu: Check cond_r...
2505
  	if (rdp->cpu_no_qs.b.norm)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2506
  		return;
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
2507
2508
2509
2510
  	/*
  	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
  	 * judge of that).
  	 */
d7d6a11e8   Paul E. McKenney   rcu: Simplify qui...
2511
  	rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2512
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2513
  /*
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2514
2515
2516
2517
   * Trace the fact that this CPU is going offline.
   */
  static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
  {
88a4976d0   Paul E. McKenney   rcu: Semicolon in...
2518
2519
2520
  	RCU_TRACE(unsigned long mask;)
  	RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda);)
  	RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2521

ea46351ce   Paul E. McKenney   rcu: Eliminate HO...
2522
2523
  	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
  		return;
88a4976d0   Paul E. McKenney   rcu: Semicolon in...
2524
  	RCU_TRACE(mask = rdp->grpmask;)
e56014000   Paul E. McKenney   rcu: Simplify off...
2525
2526
  	trace_rcu_grace_period(rsp->name,
  			       rnp->gpnum + 1 - !!(rnp->qsmask & mask),
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
2527
  			       TPS("cpuofl"));
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2528
2529
2530
  }
  
  /*
8af3a5e78   Paul E. McKenney   rcu: Abstract rcu...
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
   * All CPUs for the specified rcu_node structure have gone offline,
   * and all tasks that were preempted within an RCU read-side critical
   * section while running on one of those CPUs have since exited their RCU
   * read-side critical section.  Some other CPU is reporting this fact with
   * the specified rcu_node structure's ->lock held and interrupts disabled.
   * This function therefore goes up the tree of rcu_node structures,
   * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
   * the leaf rcu_node structure's ->qsmaskinit field has already been
   * updated
   *
   * This function does check that the specified rcu_node structure has
   * all CPUs offline and no blocked tasks, so it is OK to invoke it
   * prematurely.  That said, invoking it after the fact will cost you
   * a needless lock acquisition.  So once it has done its work, don't
   * invoke it again.
   */
  static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
  {
  	long mask;
  	struct rcu_node *rnp = rnp_leaf;
c0b334c5b   Paul E. McKenney   rcu: Add lockdep_...
2551
  	lockdep_assert_held(&rnp->lock);
ea46351ce   Paul E. McKenney   rcu: Eliminate HO...
2552
2553
  	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
  	    rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
8af3a5e78   Paul E. McKenney   rcu: Abstract rcu...
2554
2555
2556
2557
2558
2559
  		return;
  	for (;;) {
  		mask = rnp->grpmask;
  		rnp = rnp->parent;
  		if (!rnp)
  			break;
2a67e741b   Peter Zijlstra   rcu: Create trans...
2560
  		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
8af3a5e78   Paul E. McKenney   rcu: Abstract rcu...
2561
  		rnp->qsmaskinit &= ~mask;
0aa04b055   Paul E. McKenney   rcu: Process offl...
2562
  		rnp->qsmask &= ~mask;
8af3a5e78   Paul E. McKenney   rcu: Abstract rcu...
2563
  		if (rnp->qsmaskinit) {
67c583a7d   Boqun Feng   RCU: Privatize rc...
2564
2565
  			raw_spin_unlock_rcu_node(rnp);
  			/* irqs remain disabled. */
8af3a5e78   Paul E. McKenney   rcu: Abstract rcu...
2566
2567
  			return;
  		}
67c583a7d   Boqun Feng   RCU: Privatize rc...
2568
  		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
8af3a5e78   Paul E. McKenney   rcu: Abstract rcu...
2569
2570
2571
2572
  	}
  }
  
  /*
e56014000   Paul E. McKenney   rcu: Simplify off...
2573
   * The CPU has been completely removed, and some other CPU is reporting
a58163d8c   Paul E. McKenney   rcu: Migrate call...
2574
2575
2576
   * this fact from process context.  Do the remainder of the cleanup.
   * There can only be one CPU hotplug operation at a time, so no need for
   * explicit locking.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2577
   */
e56014000   Paul E. McKenney   rcu: Simplify off...
2578
  static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2579
  {
e56014000   Paul E. McKenney   rcu: Simplify off...
2580
  	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2581
  	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
e56014000   Paul E. McKenney   rcu: Simplify off...
2582

ea46351ce   Paul E. McKenney   rcu: Eliminate HO...
2583
2584
  	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
  		return;
2036d94a7   Paul E. McKenney   rcu: Rework detec...
2585
  	/* Adjust any no-longer-needed kthreads. */
5d01bbd11   Thomas Gleixner   rcu: Yield simpler
2586
  	rcu_boost_kthread_setaffinity(rnp, -1);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2587
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2588
2589
2590
2591
  /*
   * Invoke any RCU callbacks that have made it to the end of their grace
   * period.  Thottle as specified by rdp->blimit.
   */
37c72e56f   Paul E. McKenney   rcu: Prevent RCU ...
2592
  static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2593
2594
  {
  	unsigned long flags;
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
2595
2596
2597
  	struct rcu_head *rhp;
  	struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
  	long bl, count;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2598

dc35c8934   Paul E. McKenney   rcu: Tag callback...
2599
  	/* If no callbacks are ready, just return. */
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
2600
2601
2602
2603
2604
2605
  	if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
  		trace_rcu_batch_start(rsp->name,
  				      rcu_segcblist_n_lazy_cbs(&rdp->cblist),
  				      rcu_segcblist_n_cbs(&rdp->cblist), 0);
  		trace_rcu_batch_end(rsp->name, 0,
  				    !rcu_segcblist_empty(&rdp->cblist),
4968c300e   Paul E. McKenney   rcu: Augment rcu_...
2606
2607
  				    need_resched(), is_idle_task(current),
  				    rcu_is_callbacks_kthread());
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2608
  		return;
29c00b4a1   Paul E. McKenney   rcu: Add event-tr...
2609
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2610
2611
2612
  
  	/*
  	 * Extract the list of ready callbacks, disabling to prevent
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
2613
2614
  	 * races with call_rcu() from interrupt handlers.  Leave the
  	 * callback counts, as rcu_barrier() needs to be conservative.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2615
2616
  	 */
  	local_irq_save(flags);
8146c4e2e   Paul E. McKenney   rcu: Check for ca...
2617
  	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
29c00b4a1   Paul E. McKenney   rcu: Add event-tr...
2618
  	bl = rdp->blimit;
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
2619
2620
2621
  	trace_rcu_batch_start(rsp->name, rcu_segcblist_n_lazy_cbs(&rdp->cblist),
  			      rcu_segcblist_n_cbs(&rdp->cblist), bl);
  	rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2622
2623
2624
  	local_irq_restore(flags);
  
  	/* Invoke callbacks. */
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
2625
2626
2627
2628
2629
2630
2631
2632
2633
  	rhp = rcu_cblist_dequeue(&rcl);
  	for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
  		debug_rcu_head_unqueue(rhp);
  		if (__rcu_reclaim(rsp->name, rhp))
  			rcu_cblist_dequeued_lazy(&rcl);
  		/*
  		 * Stop only if limit reached and CPU has something to do.
  		 * Note: The rcl structure counts down from zero.
  		 */
4b27f20b4   Paul E. McKenney   rcu: Open-code th...
2634
  		if (-rcl.len >= bl &&
dff1672d9   Paul E. McKenney   rcu: Keep invokin...
2635
2636
  		    (need_resched() ||
  		     (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2637
2638
2639
2640
  			break;
  	}
  
  	local_irq_save(flags);
4b27f20b4   Paul E. McKenney   rcu: Open-code th...
2641
  	count = -rcl.len;
8ef0f37ef   Paul E. McKenney   rcu: Open-code th...
2642
2643
  	trace_rcu_batch_end(rsp->name, count, !!rcl.head, need_resched(),
  			    is_idle_task(current), rcu_is_callbacks_kthread());
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2644

15fecf89e   Paul E. McKenney   srcu: Abstract mu...
2645
2646
  	/* Update counts and requeue any remaining callbacks. */
  	rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2647
  	smp_mb(); /* List handling before counting for rcu_barrier(). */
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2648
  	rdp->n_cbs_invoked += count;
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
2649
  	rcu_segcblist_insert_count(&rdp->cblist, &rcl);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2650
2651
  
  	/* Reinstate batch limit if we have worked down the excess. */
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
2652
2653
  	count = rcu_segcblist_n_cbs(&rdp->cblist);
  	if (rdp->blimit == LONG_MAX && count <= qlowmark)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2654
  		rdp->blimit = blimit;
37c72e56f   Paul E. McKenney   rcu: Prevent RCU ...
2655
  	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
2656
  	if (count == 0 && rdp->qlen_last_fqs_check != 0) {
37c72e56f   Paul E. McKenney   rcu: Prevent RCU ...
2657
2658
  		rdp->qlen_last_fqs_check = 0;
  		rdp->n_force_qs_snap = rsp->n_force_qs;
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
2659
2660
2661
  	} else if (count < rdp->qlen_last_fqs_check - qhimark)
  		rdp->qlen_last_fqs_check = count;
  	WARN_ON_ONCE(rcu_segcblist_empty(&rdp->cblist) != (count == 0));
37c72e56f   Paul E. McKenney   rcu: Prevent RCU ...
2662

64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2663
  	local_irq_restore(flags);
e0f23060a   Paul E. McKenney   rcu: Update comme...
2664
  	/* Re-invoke RCU core processing if there are callbacks remaining. */
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
2665
  	if (rcu_segcblist_ready_cbs(&rdp->cblist))
a46e0899e   Paul E. McKenney   rcu: use softirq ...
2666
  		invoke_rcu_core();
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2667
2668
2669
2670
2671
  }
  
  /*
   * Check to see if this CPU is in a non-context-switch quiescent state
   * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
e0f23060a   Paul E. McKenney   rcu: Update comme...
2672
   * Also schedule RCU core processing.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2673
   *
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
2674
   * This function must be called from hardirq context.  It is normally
5403d367a   Paul E. McKenney   rcu: Remove obsol...
2675
   * invoked from the scheduling-clock interrupt.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2676
   */
c3377c2da   Paul E. McKenney   rcu: Remove "cpu"...
2677
  void rcu_check_callbacks(int user)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2678
  {
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
2679
  	trace_rcu_utilization(TPS("Start scheduler-tick"));
a858af287   Paul E. McKenney   rcu: Print schedu...
2680
  	increment_cpu_stall_ticks();
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
2681
  	if (user || rcu_is_cpu_rrupt_from_idle()) {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2682
2683
2684
2685
2686
  
  		/*
  		 * Get here if this CPU took its interrupt from user
  		 * mode or from the idle loop, and if this is not a
  		 * nested interrupt.  In this case, the CPU is in
d6714c22b   Paul E. McKenney   rcu: Renamings to...
2687
  		 * a quiescent state, so note it.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2688
2689
  		 *
  		 * No memory barrier is required here because both
d6714c22b   Paul E. McKenney   rcu: Renamings to...
2690
2691
2692
  		 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
  		 * variables that other CPUs neither access nor modify,
  		 * at least not while the corresponding CPU is online.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2693
  		 */
284a8c93a   Paul E. McKenney   rcu: Per-CPU oper...
2694
2695
  		rcu_sched_qs();
  		rcu_bh_qs();
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2696
2697
2698
2699
2700
2701
2702
  
  	} else if (!in_softirq()) {
  
  		/*
  		 * Get here if this CPU did not take its interrupt from
  		 * softirq, in other words, if it is not interrupting
  		 * a rcu_bh read-side critical section.  This is an _bh
d6714c22b   Paul E. McKenney   rcu: Renamings to...
2703
  		 * critical section, so note it.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2704
  		 */
284a8c93a   Paul E. McKenney   rcu: Per-CPU oper...
2705
  		rcu_bh_qs();
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2706
  	}
86aea0e6e   Paul E. McKenney   rcu: Remove "cpu"...
2707
  	rcu_preempt_check_callbacks();
077506972   Paul E. McKenney   rcu: Make need_re...
2708
2709
2710
2711
2712
2713
2714
2715
2716
  	/* The load-acquire pairs with the store-release setting to true. */
  	if (smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) {
  		/* Idle and userspace execution already are quiescent states. */
  		if (!rcu_is_cpu_rrupt_from_idle() && !user) {
  			set_tsk_need_resched(current);
  			set_preempt_need_resched();
  		}
  		__this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
  	}
e3950ecd5   Paul E. McKenney   rcu: Remove "cpu"...
2717
  	if (rcu_pending())
a46e0899e   Paul E. McKenney   rcu: use softirq ...
2718
  		invoke_rcu_core();
8315f4229   Paul E. McKenney   rcu: Add call_rcu...
2719
2720
  	if (user)
  		rcu_note_voluntary_context_switch(current);
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
2721
  	trace_rcu_utilization(TPS("End scheduler-tick"));
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2722
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2723
2724
2725
  /*
   * Scan the leaf rcu_node structures, processing dyntick state for any that
   * have not yet encountered a quiescent state, using the function specified.
27f4d2805   Paul E. McKenney   rcu: priority boo...
2726
2727
   * Also initiate boosting for any threads blocked on the root rcu_node.
   *
ee47eb9f4   Paul E. McKenney   rcu: Remove leg o...
2728
   * The caller must have suppressed start of new grace periods.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2729
   */
fe5ac724d   Paul E. McKenney   rcu: Remove nohz_...
2730
  static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2731
  {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2732
2733
2734
  	int cpu;
  	unsigned long flags;
  	unsigned long mask;
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
2735
  	struct rcu_node *rnp;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2736

a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
2737
  	rcu_for_each_leaf_node(rsp, rnp) {
bde6c3aa9   Paul E. McKenney   rcu: Provide cond...
2738
  		cond_resched_rcu_qs();
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2739
  		mask = 0;
2a67e741b   Peter Zijlstra   rcu: Create trans...
2740
  		raw_spin_lock_irqsave_rcu_node(rnp, flags);
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
2741
  		if (rnp->qsmask == 0) {
a77da14ce   Paul E. McKenney   rcu: Yet another ...
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
  			if (rcu_state_p == &rcu_sched_state ||
  			    rsp != rcu_state_p ||
  			    rcu_preempt_blocked_readers_cgp(rnp)) {
  				/*
  				 * No point in scanning bits because they
  				 * are all zero.  But we might need to
  				 * priority-boost blocked readers.
  				 */
  				rcu_initiate_boost(rnp, flags);
  				/* rcu_initiate_boost() releases rnp->lock */
  				continue;
  			}
  			if (rnp->parent &&
  			    (rnp->parent->qsmask & rnp->grpmask)) {
  				/*
  				 * Race between grace-period
  				 * initialization and task exiting RCU
  				 * read-side critical section: Report.
  				 */
  				rcu_report_unblock_qs_rnp(rsp, rnp, flags);
  				/* rcu_report_unblock_qs_rnp() rlses ->lock */
  				continue;
  			}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2765
  		}
bc75e9998   Mark Rutland   rcu: Correctly ha...
2766
2767
  		for_each_leaf_node_possible_cpu(rnp, cpu) {
  			unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
0edd1b178   Paul E. McKenney   nohz_full: Add fu...
2768
  			if ((rnp->qsmask & bit) != 0) {
fe5ac724d   Paul E. McKenney   rcu: Remove nohz_...
2769
  				if (f(per_cpu_ptr(rsp->rda, cpu)))
0edd1b178   Paul E. McKenney   nohz_full: Add fu...
2770
2771
  					mask |= bit;
  			}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2772
  		}
45f014c52   Paul E. McKenney   rcu: Remove redun...
2773
  		if (mask != 0) {
654e95334   Paul E. McKenney   rcu: Associate qu...
2774
2775
  			/* Idle/offline CPUs, report (releases rnp->lock. */
  			rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
0aa04b055   Paul E. McKenney   rcu: Process offl...
2776
2777
  		} else {
  			/* Nothing to do here, so just drop the lock. */
67c583a7d   Boqun Feng   RCU: Privatize rc...
2778
  			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2779
  		}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2780
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2781
2782
2783
2784
2785
2786
  }
  
  /*
   * Force quiescent states on reluctant CPUs, and also detect which
   * CPUs are in dyntick-idle mode.
   */
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2787
  static void force_quiescent_state(struct rcu_state *rsp)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2788
2789
  {
  	unsigned long flags;
394f2769a   Paul E. McKenney   rcu: Prevent forc...
2790
2791
2792
2793
2794
  	bool ret;
  	struct rcu_node *rnp;
  	struct rcu_node *rnp_old = NULL;
  
  	/* Funnel through hierarchy to reduce memory contention. */
d860d4032   Shan Wei   rcu: Use __this_c...
2795
  	rnp = __this_cpu_read(rsp->rda->mynode);
394f2769a   Paul E. McKenney   rcu: Prevent forc...
2796
  	for (; rnp != NULL; rnp = rnp->parent) {
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2797
  		ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
394f2769a   Paul E. McKenney   rcu: Prevent forc...
2798
2799
2800
2801
  		      !raw_spin_trylock(&rnp->fqslock);
  		if (rnp_old != NULL)
  			raw_spin_unlock(&rnp_old->fqslock);
  		if (ret) {
a792563bd   Paul E. McKenney   rcu: Eliminate re...
2802
  			rsp->n_force_qs_lh++;
394f2769a   Paul E. McKenney   rcu: Prevent forc...
2803
2804
2805
2806
2807
  			return;
  		}
  		rnp_old = rnp;
  	}
  	/* rnp_old == rcu_get_root(rsp), rnp == NULL. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2808

394f2769a   Paul E. McKenney   rcu: Prevent forc...
2809
  	/* Reached the root of the rcu_node tree, acquire lock. */
2a67e741b   Peter Zijlstra   rcu: Create trans...
2810
  	raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
394f2769a   Paul E. McKenney   rcu: Prevent forc...
2811
  	raw_spin_unlock(&rnp_old->fqslock);
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2812
  	if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
a792563bd   Paul E. McKenney   rcu: Eliminate re...
2813
  		rsp->n_force_qs_lh++;
67c583a7d   Boqun Feng   RCU: Privatize rc...
2814
  		raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2815
  		return;  /* Someone beat us to it. */
46a1e34ed   Paul E. McKenney   rcu: Make force_q...
2816
  	}
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2817
  	WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
67c583a7d   Boqun Feng   RCU: Privatize rc...
2818
  	raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
94d447767   Jisheng Zhang   rcu: Use rcu_gp_k...
2819
  	rcu_gp_kthread_wake(rsp);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2820
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2821
  /*
e0f23060a   Paul E. McKenney   rcu: Update comme...
2822
2823
2824
   * This does the RCU core processing work for the specified rcu_state
   * and rcu_data structures.  This may be called only from the CPU to
   * whom the rdp belongs.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2825
2826
   */
  static void
1bca8cf1a   Paul E. McKenney   rcu: Remove unnee...
2827
  __rcu_process_callbacks(struct rcu_state *rsp)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2828
2829
  {
  	unsigned long flags;
48a7639ce   Paul E. McKenney   rcu: Make callers...
2830
  	bool needwake;
fa07a58f7   Christoph Lameter   rcu: Replace __th...
2831
  	struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2832

50dc7def4   Nicholas Mc Guire   rcu: Use bool val...
2833
  	WARN_ON_ONCE(!rdp->beenonline);
2e5975580   Paul E. McKenney   rcu: Simplify RCU...
2834

64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2835
2836
2837
2838
  	/* Update RCU state based on any recent quiescent states. */
  	rcu_check_quiescent_state(rsp, rdp);
  
  	/* Does this CPU require a not-yet-started grace period? */
dc35c8934   Paul E. McKenney   rcu: Tag callback...
2839
  	local_irq_save(flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2840
  	if (cpu_needs_another_gp(rsp, rdp)) {
6cf100812   Paul E. McKenney   rcu: Add transiti...
2841
  		raw_spin_lock_rcu_node(rcu_get_root(rsp)); /* irqs disabled. */
48a7639ce   Paul E. McKenney   rcu: Make callers...
2842
  		needwake = rcu_start_gp(rsp);
67c583a7d   Boqun Feng   RCU: Privatize rc...
2843
  		raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
48a7639ce   Paul E. McKenney   rcu: Make callers...
2844
2845
  		if (needwake)
  			rcu_gp_kthread_wake(rsp);
dc35c8934   Paul E. McKenney   rcu: Tag callback...
2846
2847
  	} else {
  		local_irq_restore(flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2848
2849
2850
  	}
  
  	/* If there are callbacks ready, invoke them. */
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
2851
  	if (rcu_segcblist_ready_cbs(&rdp->cblist))
a46e0899e   Paul E. McKenney   rcu: use softirq ...
2852
  		invoke_rcu_callbacks(rsp, rdp);
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
2853
2854
2855
  
  	/* Do any needed deferred wakeups of rcuo kthreads. */
  	do_nocb_deferred_wakeup(rdp);
09223371d   Shaohua Li   rcu: Use softirq ...
2856
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2857
  /*
e0f23060a   Paul E. McKenney   rcu: Update comme...
2858
   * Do RCU core processing for the current CPU.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2859
   */
0766f788e   Emese Revfy   latent_entropy: M...
2860
  static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2861
  {
6ce75a232   Paul E. McKenney   rcu: Introduce fo...
2862
  	struct rcu_state *rsp;
bfa00b4c4   Paul E. McKenney   rcu: Prevent offl...
2863
2864
  	if (cpu_is_offline(smp_processor_id()))
  		return;
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
2865
  	trace_rcu_utilization(TPS("Start RCU core"));
6ce75a232   Paul E. McKenney   rcu: Introduce fo...
2866
2867
  	for_each_rcu_flavor(rsp)
  		__rcu_process_callbacks(rsp);
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
2868
  	trace_rcu_utilization(TPS("End RCU core"));
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2869
  }
a26ac2455   Paul E. McKenney   rcu: move TREE_RC...
2870
  /*
e0f23060a   Paul E. McKenney   rcu: Update comme...
2871
2872
2873
   * Schedule RCU callback invocation.  If the specified type of RCU
   * does not support RCU priority boosting, just do a direct call,
   * otherwise wake up the per-CPU kernel kthread.  Note that because we
924df8a01   Paul E. McKenney   rcu: Fix invoke_r...
2874
   * are running on the current CPU with softirqs disabled, the
e0f23060a   Paul E. McKenney   rcu: Update comme...
2875
   * rcu_cpu_kthread_task cannot disappear out from under us.
a26ac2455   Paul E. McKenney   rcu: move TREE_RC...
2876
   */
a46e0899e   Paul E. McKenney   rcu: use softirq ...
2877
  static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
a26ac2455   Paul E. McKenney   rcu: move TREE_RC...
2878
  {
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2879
  	if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
b0d304172   Paul E. McKenney   rcu: Prevent RCU ...
2880
  		return;
a46e0899e   Paul E. McKenney   rcu: use softirq ...
2881
2882
  	if (likely(!rsp->boost)) {
  		rcu_do_batch(rsp, rdp);
a26ac2455   Paul E. McKenney   rcu: move TREE_RC...
2883
2884
  		return;
  	}
a46e0899e   Paul E. McKenney   rcu: use softirq ...
2885
  	invoke_rcu_callbacks_kthread();
a26ac2455   Paul E. McKenney   rcu: move TREE_RC...
2886
  }
a46e0899e   Paul E. McKenney   rcu: use softirq ...
2887
  static void invoke_rcu_core(void)
09223371d   Shaohua Li   rcu: Use softirq ...
2888
  {
b0f740360   Paul E. McKenney   rcu: Avoid invoki...
2889
2890
  	if (cpu_online(smp_processor_id()))
  		raise_softirq(RCU_SOFTIRQ);
09223371d   Shaohua Li   rcu: Use softirq ...
2891
  }
29154c57e   Paul E. McKenney   rcu: Split RCU co...
2892
2893
2894
2895
2896
  /*
   * Handle any core-RCU processing required by a call_rcu() invocation.
   */
  static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
  			    struct rcu_head *head, unsigned long flags)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2897
  {
48a7639ce   Paul E. McKenney   rcu: Make callers...
2898
  	bool needwake;
62fde6edf   Paul E. McKenney   rcu: Make __call_...
2899
2900
2901
2902
  	/*
  	 * If called from an extended quiescent state, invoke the RCU
  	 * core in order to force a re-evaluation of RCU's idleness.
  	 */
9910affa8   Yao Dongdong   rcu: Remove redun...
2903
  	if (!rcu_is_watching())
62fde6edf   Paul E. McKenney   rcu: Make __call_...
2904
  		invoke_rcu_core();
a16b7a693   Paul E. McKenney   rcu: Prevent __ca...
2905
  	/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
29154c57e   Paul E. McKenney   rcu: Split RCU co...
2906
  	if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2655d57ef   Paul E. McKenney   rcu: prevent call...
2907
  		return;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2908

37c72e56f   Paul E. McKenney   rcu: Prevent RCU ...
2909
2910
2911
2912
2913
2914
2915
  	/*
  	 * Force the grace period if too many callbacks or too long waiting.
  	 * Enforce hysteresis, and don't invoke force_quiescent_state()
  	 * if some other CPU has recently done so.  Also, don't bother
  	 * invoking force_quiescent_state() if the newly enqueued callback
  	 * is the only one waiting for a grace period to complete.
  	 */
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
2916
2917
  	if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
  		     rdp->qlen_last_fqs_check + qhimark)) {
b52573d27   Paul E. McKenney   rcu: reduce __cal...
2918
2919
  
  		/* Are we ignoring a completed grace period? */
470716fc0   Paul E. McKenney   rcu: Switch calle...
2920
  		note_gp_changes(rsp, rdp);
b52573d27   Paul E. McKenney   rcu: reduce __cal...
2921
2922
2923
  
  		/* Start a new grace period if one not already started. */
  		if (!rcu_gp_in_progress(rsp)) {
b52573d27   Paul E. McKenney   rcu: reduce __cal...
2924
  			struct rcu_node *rnp_root = rcu_get_root(rsp);
2a67e741b   Peter Zijlstra   rcu: Create trans...
2925
  			raw_spin_lock_rcu_node(rnp_root);
48a7639ce   Paul E. McKenney   rcu: Make callers...
2926
  			needwake = rcu_start_gp(rsp);
67c583a7d   Boqun Feng   RCU: Privatize rc...
2927
  			raw_spin_unlock_rcu_node(rnp_root);
48a7639ce   Paul E. McKenney   rcu: Make callers...
2928
2929
  			if (needwake)
  				rcu_gp_kthread_wake(rsp);
b52573d27   Paul E. McKenney   rcu: reduce __cal...
2930
2931
2932
2933
  		} else {
  			/* Give the grace period a kick. */
  			rdp->blimit = LONG_MAX;
  			if (rsp->n_force_qs == rdp->n_force_qs_snap &&
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
2934
  			    rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2935
  				force_quiescent_state(rsp);
b52573d27   Paul E. McKenney   rcu: reduce __cal...
2936
  			rdp->n_force_qs_snap = rsp->n_force_qs;
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
2937
  			rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
b52573d27   Paul E. McKenney   rcu: reduce __cal...
2938
  		}
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2939
  	}
29154c57e   Paul E. McKenney   rcu: Split RCU co...
2940
  }
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2941
  /*
ae1501845   Paul E. McKenney   rcu: Make call_rc...
2942
2943
2944
2945
2946
2947
2948
   * RCU callback function to leak a callback.
   */
  static void rcu_leak_callback(struct rcu_head *rhp)
  {
  }
  
  /*
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2949
2950
2951
2952
2953
   * Helper function for call_rcu() and friends.  The cpu argument will
   * normally be -1, indicating "currently running CPU".  It may specify
   * a CPU only if that CPU is a no-CBs CPU.  Currently, only _rcu_barrier()
   * is expected to specify a CPU.
   */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2954
  static void
b6a4ae766   Boqun Feng   rcu: Use rcu_call...
2955
  __call_rcu(struct rcu_head *head, rcu_callback_t func,
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2956
  	   struct rcu_state *rsp, int cpu, bool lazy)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2957
2958
2959
  {
  	unsigned long flags;
  	struct rcu_data *rdp;
b8f2ed538   Paul E. McKenney   rcu: Tighten up _...
2960
2961
  	/* Misaligned rcu_head! */
  	WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
ae1501845   Paul E. McKenney   rcu: Make call_rc...
2962
  	if (debug_rcu_head_queue(head)) {
fa3c66476   Paul E. McKenney   rcu: Improve __ca...
2963
2964
2965
2966
2967
2968
2969
2970
  		/*
  		 * Probable double call_rcu(), so leak the callback.
  		 * Use rcu:rcu_callback trace event to find the previous
  		 * time callback was passed to __call_rcu().
  		 */
  		WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pF()!!!
  ",
  			  head, head->func);
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2971
  		WRITE_ONCE(head->func, rcu_leak_callback);
ae1501845   Paul E. McKenney   rcu: Make call_rc...
2972
2973
  		return;
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2974
2975
  	head->func = func;
  	head->next = NULL;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2976
  	local_irq_save(flags);
394f99a90   Lai Jiangshan   rcu: simplify the...
2977
  	rdp = this_cpu_ptr(rsp->rda);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2978
2979
  
  	/* Add the callback to our list. */
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
2980
  	if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist)) || cpu != -1) {
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2981
2982
2983
2984
  		int offline;
  
  		if (cpu != -1)
  			rdp = per_cpu_ptr(rsp->rda, cpu);
143da9c2f   Paul E. McKenney   rcu: Prevent earl...
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
  		if (likely(rdp->mynode)) {
  			/* Post-boot, so this should be for a no-CBs CPU. */
  			offline = !__call_rcu_nocb(rdp, head, lazy, flags);
  			WARN_ON_ONCE(offline);
  			/* Offline CPU, _call_rcu() illegal, leak callback.  */
  			local_irq_restore(flags);
  			return;
  		}
  		/*
  		 * Very early boot, before rcu_init().  Initialize if needed
  		 * and then drop through to queue the callback.
  		 */
  		BUG_ON(cpu != -1);
34404ca8f   Paul E. McKenney   rcu: Move early-b...
2998
  		WARN_ON_ONCE(!rcu_is_watching());
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
2999
3000
  		if (rcu_segcblist_empty(&rdp->cblist))
  			rcu_segcblist_init(&rdp->cblist);
0d8ee37e2   Paul E. McKenney   rcu: Disallow cal...
3001
  	}
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
3002
3003
  	rcu_segcblist_enqueue(&rdp->cblist, head, lazy);
  	if (!lazy)
c57afe80d   Paul E. McKenney   rcu: Make RCU_FAS...
3004
  		rcu_idle_count_callbacks_posted();
2655d57ef   Paul E. McKenney   rcu: prevent call...
3005

d4c08f2ac   Paul E. McKenney   rcu: Add grace-pe...
3006
3007
  	if (__is_kfree_rcu_offset((unsigned long)func))
  		trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
3008
3009
  					 rcu_segcblist_n_lazy_cbs(&rdp->cblist),
  					 rcu_segcblist_n_cbs(&rdp->cblist));
d4c08f2ac   Paul E. McKenney   rcu: Add grace-pe...
3010
  	else
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
3011
3012
3013
  		trace_rcu_callback(rsp->name, head,
  				   rcu_segcblist_n_lazy_cbs(&rdp->cblist),
  				   rcu_segcblist_n_cbs(&rdp->cblist));
d4c08f2ac   Paul E. McKenney   rcu: Add grace-pe...
3014

29154c57e   Paul E. McKenney   rcu: Split RCU co...
3015
3016
  	/* Go handle any RCU core processing required. */
  	__call_rcu_core(rsp, rdp, head, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3017
3018
  	local_irq_restore(flags);
  }
a68a2bb28   Paul E. McKenney   rcu: Move docbook...
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
  /**
   * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
   * @head: structure to be used for queueing the RCU updates.
   * @func: actual callback function to be invoked after the grace period
   *
   * The callback function will be invoked some time after a full grace
   * period elapses, in other words after all currently executing RCU
   * read-side critical sections have completed. call_rcu_sched() assumes
   * that the read-side critical sections end on enabling of preemption
   * or on voluntary preemption.
27fdb35fe   Paul E. McKenney   doc: Fix various ...
3029
3030
3031
3032
   * RCU read-side critical sections are delimited by:
   *
   * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR
   * - anything that disables preemption.
a68a2bb28   Paul E. McKenney   rcu: Move docbook...
3033
3034
3035
3036
3037
   *
   *  These may be nested.
   *
   * See the description of call_rcu() for more detailed information on
   * memory ordering guarantees.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3038
   */
b6a4ae766   Boqun Feng   rcu: Use rcu_call...
3039
  void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3040
  {
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
3041
  	__call_rcu(head, func, &rcu_sched_state, -1, 0);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3042
  }
d6714c22b   Paul E. McKenney   rcu: Renamings to...
3043
  EXPORT_SYMBOL_GPL(call_rcu_sched);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3044

a68a2bb28   Paul E. McKenney   rcu: Move docbook...
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
  /**
   * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
   * @head: structure to be used for queueing the RCU updates.
   * @func: actual callback function to be invoked after the grace period
   *
   * The callback function will be invoked some time after a full grace
   * period elapses, in other words after all currently executing RCU
   * read-side critical sections have completed. call_rcu_bh() assumes
   * that the read-side critical sections end on completion of a softirq
   * handler. This means that read-side critical sections in process
   * context must not be interrupted by softirqs. This interface is to be
   * used when most of the read-side critical sections are in softirq context.
27fdb35fe   Paul E. McKenney   doc: Fix various ...
3057
3058
3059
3060
3061
3062
   * RCU read-side critical sections are delimited by:
   *
   * - rcu_read_lock() and  rcu_read_unlock(), if in interrupt context, OR
   * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
   *
   * These may be nested.
a68a2bb28   Paul E. McKenney   rcu: Move docbook...
3063
3064
3065
   *
   * See the description of call_rcu() for more detailed information on
   * memory ordering guarantees.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3066
   */
b6a4ae766   Boqun Feng   rcu: Use rcu_call...
3067
  void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3068
  {
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
3069
  	__call_rcu(head, func, &rcu_bh_state, -1, 0);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3070
3071
  }
  EXPORT_SYMBOL_GPL(call_rcu_bh);
6d8133919   Paul E. McKenney   rcu: Document why...
3072
  /*
495aa969d   Andreea-Cristina Bernat   rcu: Consolidate ...
3073
3074
3075
3076
3077
3078
3079
   * Queue an RCU callback for lazy invocation after a grace period.
   * This will likely be later named something like "call_rcu_lazy()",
   * but this change will require some way of tagging the lazy RCU
   * callbacks in the list of pending callbacks. Until then, this
   * function may only be called from __kfree_rcu().
   */
  void kfree_call_rcu(struct rcu_head *head,
b6a4ae766   Boqun Feng   rcu: Use rcu_call...
3080
  		    rcu_callback_t func)
495aa969d   Andreea-Cristina Bernat   rcu: Consolidate ...
3081
  {
e534165bb   Uma Sharma   rcu: Variable nam...
3082
  	__call_rcu(head, func, rcu_state_p, -1, 1);
495aa969d   Andreea-Cristina Bernat   rcu: Consolidate ...
3083
3084
3085
3086
  }
  EXPORT_SYMBOL_GPL(kfree_call_rcu);
  
  /*
6d8133919   Paul E. McKenney   rcu: Document why...
3087
3088
3089
3090
3091
3092
3093
   * Because a context switch is a grace period for RCU-sched and RCU-bh,
   * any blocking grace-period wait automatically implies a grace period
   * if there is only one CPU online at any point time during execution
   * of either synchronize_sched() or synchronize_rcu_bh().  It is OK to
   * occasionally incorrectly indicate that there are multiple CPUs online
   * when there was in fact only one the whole time, as this just adds
   * some overhead: RCU still operates correctly.
6d8133919   Paul E. McKenney   rcu: Document why...
3094
3095
3096
   */
  static inline int rcu_blocking_is_gp(void)
  {
95f0c1de3   Paul E. McKenney   rcu: Disable pree...
3097
  	int ret;
6d8133919   Paul E. McKenney   rcu: Document why...
3098
  	might_sleep();  /* Check for RCU read-side critical section. */
95f0c1de3   Paul E. McKenney   rcu: Disable pree...
3099
3100
3101
3102
  	preempt_disable();
  	ret = num_online_cpus() <= 1;
  	preempt_enable();
  	return ret;
6d8133919   Paul E. McKenney   rcu: Document why...
3103
  }
6ebb237be   Paul E. McKenney   rcu: Re-arrange c...
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
  /**
   * synchronize_sched - wait until an rcu-sched grace period has elapsed.
   *
   * Control will return to the caller some time after a full rcu-sched
   * grace period has elapsed, in other words after all currently executing
   * rcu-sched read-side critical sections have completed.   These read-side
   * critical sections are delimited by rcu_read_lock_sched() and
   * rcu_read_unlock_sched(), and may be nested.  Note that preempt_disable(),
   * local_irq_disable(), and so on may be used in place of
   * rcu_read_lock_sched().
   *
   * This means that all preempt_disable code sequences, including NMI and
f0a0e6f28   Paul E. McKenney   rcu: Clarify memo...
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
   * non-threaded hardware-interrupt handlers, in progress on entry will
   * have completed before this primitive returns.  However, this does not
   * guarantee that softirq handlers will have completed, since in some
   * kernels, these handlers can run in process context, and can block.
   *
   * Note that this guarantee implies further memory-ordering guarantees.
   * On systems with more than one CPU, when synchronize_sched() returns,
   * each CPU is guaranteed to have executed a full memory barrier since the
   * end of its last RCU-sched read-side critical section whose beginning
   * preceded the call to synchronize_sched().  In addition, each CPU having
   * an RCU read-side critical section that extends beyond the return from
   * synchronize_sched() is guaranteed to have executed a full memory barrier
   * after the beginning of synchronize_sched() and before the beginning of
   * that RCU read-side critical section.  Note that these guarantees include
   * CPUs that are offline, idle, or executing in user mode, as well as CPUs
   * that are executing in the kernel.
   *
   * Furthermore, if CPU A invoked synchronize_sched(), which returned
   * to its caller on CPU B, then both CPU A and CPU B are guaranteed
   * to have executed a full memory barrier during the execution of
   * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but
   * again only if the system has more than one CPU).
6ebb237be   Paul E. McKenney   rcu: Re-arrange c...
3138
3139
3140
   */
  void synchronize_sched(void)
  {
f78f5b90c   Paul E. McKenney   rcu: Rename rcu_l...
3141
3142
3143
3144
  	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
  			 lock_is_held(&rcu_lock_map) ||
  			 lock_is_held(&rcu_sched_lock_map),
  			 "Illegal synchronize_sched() in RCU-sched read-side critical section");
6ebb237be   Paul E. McKenney   rcu: Re-arrange c...
3145
3146
  	if (rcu_blocking_is_gp())
  		return;
5afff48bd   Paul E. McKenney   rcu: Update from ...
3147
  	if (rcu_gp_is_expedited())
3705b88db   Antti P Miettinen   rcu: Add a module...
3148
3149
3150
  		synchronize_sched_expedited();
  	else
  		wait_rcu_gp(call_rcu_sched);
6ebb237be   Paul E. McKenney   rcu: Re-arrange c...
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
  }
  EXPORT_SYMBOL_GPL(synchronize_sched);
  
  /**
   * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
   *
   * Control will return to the caller some time after a full rcu_bh grace
   * period has elapsed, in other words after all currently executing rcu_bh
   * read-side critical sections have completed.  RCU read-side critical
   * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
   * and may be nested.
f0a0e6f28   Paul E. McKenney   rcu: Clarify memo...
3162
3163
3164
   *
   * See the description of synchronize_sched() for more detailed information
   * on memory ordering guarantees.
6ebb237be   Paul E. McKenney   rcu: Re-arrange c...
3165
3166
3167
   */
  void synchronize_rcu_bh(void)
  {
f78f5b90c   Paul E. McKenney   rcu: Rename rcu_l...
3168
3169
3170
3171
  	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
  			 lock_is_held(&rcu_lock_map) ||
  			 lock_is_held(&rcu_sched_lock_map),
  			 "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
6ebb237be   Paul E. McKenney   rcu: Re-arrange c...
3172
3173
  	if (rcu_blocking_is_gp())
  		return;
5afff48bd   Paul E. McKenney   rcu: Update from ...
3174
  	if (rcu_gp_is_expedited())
3705b88db   Antti P Miettinen   rcu: Add a module...
3175
3176
3177
  		synchronize_rcu_bh_expedited();
  	else
  		wait_rcu_gp(call_rcu_bh);
6ebb237be   Paul E. McKenney   rcu: Re-arrange c...
3178
3179
  }
  EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
765a3f4fe   Paul E. McKenney   rcu: Provide grac...
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
  /**
   * get_state_synchronize_rcu - Snapshot current RCU state
   *
   * Returns a cookie that is used by a later call to cond_synchronize_rcu()
   * to determine whether or not a full grace period has elapsed in the
   * meantime.
   */
  unsigned long get_state_synchronize_rcu(void)
  {
  	/*
  	 * Any prior manipulation of RCU-protected data must happen
  	 * before the load from ->gpnum.
  	 */
  	smp_mb();  /* ^^^ */
  
  	/*
  	 * Make sure this load happens before the purportedly
  	 * time-consuming work between get_state_synchronize_rcu()
  	 * and cond_synchronize_rcu().
  	 */
e534165bb   Uma Sharma   rcu: Variable nam...
3200
  	return smp_load_acquire(&rcu_state_p->gpnum);
765a3f4fe   Paul E. McKenney   rcu: Provide grac...
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
  }
  EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
  
  /**
   * cond_synchronize_rcu - Conditionally wait for an RCU grace period
   *
   * @oldstate: return value from earlier call to get_state_synchronize_rcu()
   *
   * If a full RCU grace period has elapsed since the earlier call to
   * get_state_synchronize_rcu(), just return.  Otherwise, invoke
   * synchronize_rcu() to wait for a full grace period.
   *
   * Yes, this function does not take counter wrap into account.  But
   * counter wrap is harmless.  If the counter wraps, we have waited for
   * more than 2 billion grace periods (and way more on a 64-bit system!),
   * so waiting for one additional grace period should be just fine.
   */
  void cond_synchronize_rcu(unsigned long oldstate)
  {
  	unsigned long newstate;
  
  	/*
  	 * Ensure that this load happens before any RCU-destructive
  	 * actions the caller might carry out after we return.
  	 */
e534165bb   Uma Sharma   rcu: Variable nam...
3226
  	newstate = smp_load_acquire(&rcu_state_p->completed);
765a3f4fe   Paul E. McKenney   rcu: Provide grac...
3227
3228
3229
3230
  	if (ULONG_CMP_GE(oldstate, newstate))
  		synchronize_rcu();
  }
  EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
24560056d   Paul E. McKenney   rcu: Add RCU-sche...
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
  /**
   * get_state_synchronize_sched - Snapshot current RCU-sched state
   *
   * Returns a cookie that is used by a later call to cond_synchronize_sched()
   * to determine whether or not a full grace period has elapsed in the
   * meantime.
   */
  unsigned long get_state_synchronize_sched(void)
  {
  	/*
  	 * Any prior manipulation of RCU-protected data must happen
  	 * before the load from ->gpnum.
  	 */
  	smp_mb();  /* ^^^ */
  
  	/*
  	 * Make sure this load happens before the purportedly
  	 * time-consuming work between get_state_synchronize_sched()
  	 * and cond_synchronize_sched().
  	 */
  	return smp_load_acquire(&rcu_sched_state.gpnum);
  }
  EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
  
  /**
   * cond_synchronize_sched - Conditionally wait for an RCU-sched grace period
   *
   * @oldstate: return value from earlier call to get_state_synchronize_sched()
   *
   * If a full RCU-sched grace period has elapsed since the earlier call to
   * get_state_synchronize_sched(), just return.  Otherwise, invoke
   * synchronize_sched() to wait for a full grace period.
   *
   * Yes, this function does not take counter wrap into account.  But
   * counter wrap is harmless.  If the counter wraps, we have waited for
   * more than 2 billion grace periods (and way more on a 64-bit system!),
   * so waiting for one additional grace period should be just fine.
   */
  void cond_synchronize_sched(unsigned long oldstate)
  {
  	unsigned long newstate;
  
  	/*
  	 * Ensure that this load happens before any RCU-destructive
  	 * actions the caller might carry out after we return.
  	 */
  	newstate = smp_load_acquire(&rcu_sched_state.completed);
  	if (ULONG_CMP_GE(oldstate, newstate))
  		synchronize_sched();
  }
  EXPORT_SYMBOL_GPL(cond_synchronize_sched);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3282
3283
3284
3285
3286
3287
3288
3289
3290
  /*
   * Check to see if there is any immediate RCU-related work to be done
   * by the current CPU, for the specified type of RCU, returning 1 if so.
   * The checks are in order of increasing expense: checks that can be
   * carried out against CPU-local state are performed first.  However,
   * we must check for CPU stalls first, else we might not get a chance.
   */
  static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
  {
2f51f9884   Paul E. McKenney   rcu: Eliminate __...
3291
  	struct rcu_node *rnp = rdp->mynode;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3292
3293
3294
3295
  	rdp->n_rcu_pending++;
  
  	/* Check for CPU stalls, if enabled. */
  	check_cpu_stall(rsp, rdp);
a096932f0   Paul E. McKenney   rcu: Don't activa...
3296
3297
3298
  	/* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
  	if (rcu_nohz_full_cpu(rsp))
  		return 0;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3299
  	/* Is the RCU core waiting for a quiescent state from this CPU? */
5c51dd734   Paul E. McKenney   rcu: Prevent earl...
3300
  	if (rcu_scheduler_fully_active &&
5b74c4589   Paul E. McKenney   rcu: Make ->cpu_n...
3301
  	    rdp->core_needs_qs && rdp->cpu_no_qs.b.norm &&
9577df9a3   Paul E. McKenney   rcu: Pull rcu_qs_...
3302
  	    rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_dynticks.rcu_qs_ctr)) {
97c668b8e   Paul E. McKenney   rcu: Rename qs_pe...
3303
  		rdp->n_rp_core_needs_qs++;
3a19b46a5   Paul E. McKenney   rcu: Check cond_r...
3304
  	} else if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm) {
d21670aca   Paul E. McKenney   rcu: reduce the n...
3305
  		rdp->n_rp_report_qs++;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3306
  		return 1;
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
3307
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3308
3309
  
  	/* Does this CPU have callbacks ready to invoke? */
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
3310
  	if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
3311
  		rdp->n_rp_cb_ready++;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3312
  		return 1;
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
3313
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3314
3315
  
  	/* Has RCU gone idle with this CPU needing another grace period? */
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
3316
3317
  	if (cpu_needs_another_gp(rsp, rdp)) {
  		rdp->n_rp_cpu_needs_gp++;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3318
  		return 1;
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
3319
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3320
3321
  
  	/* Has another RCU grace period completed?  */
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
3322
  	if (READ_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
3323
  		rdp->n_rp_gp_completed++;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3324
  		return 1;
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
3325
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3326
3327
  
  	/* Has a new RCU grace period started? */
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
3328
3329
  	if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
  	    unlikely(READ_ONCE(rdp->gpwrap))) { /* outside lock */
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
3330
  		rdp->n_rp_gp_started++;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3331
  		return 1;
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
3332
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3333

96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
3334
3335
3336
3337
3338
  	/* Does this CPU need a deferred NOCB wakeup? */
  	if (rcu_nocb_need_deferred_wakeup(rdp)) {
  		rdp->n_rp_nocb_defer_wakeup++;
  		return 1;
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3339
  	/* nothing to do */
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
3340
  	rdp->n_rp_need_nothing++;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3341
3342
3343
3344
3345
3346
3347
3348
  	return 0;
  }
  
  /*
   * Check to see if there is any immediate RCU-related work to be done
   * by the current CPU, returning 1 if so.  This function is part of the
   * RCU implementation; it is -not- an exported member of the RCU API.
   */
e3950ecd5   Paul E. McKenney   rcu: Remove "cpu"...
3349
  static int rcu_pending(void)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3350
  {
6ce75a232   Paul E. McKenney   rcu: Introduce fo...
3351
3352
3353
  	struct rcu_state *rsp;
  
  	for_each_rcu_flavor(rsp)
e3950ecd5   Paul E. McKenney   rcu: Remove "cpu"...
3354
  		if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda)))
6ce75a232   Paul E. McKenney   rcu: Introduce fo...
3355
3356
  			return 1;
  	return 0;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3357
3358
3359
  }
  
  /*
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
3360
3361
3362
   * Return true if the specified CPU has any callback.  If all_lazy is
   * non-NULL, store an indication of whether all callbacks are lazy.
   * (If there are no callbacks, all of them are deemed to be lazy.)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3363
   */
82072c4fc   Nicholas Mc Guire   rcu: Change funct...
3364
  static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3365
  {
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
3366
3367
3368
  	bool al = true;
  	bool hc = false;
  	struct rcu_data *rdp;
6ce75a232   Paul E. McKenney   rcu: Introduce fo...
3369
  	struct rcu_state *rsp;
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
3370
  	for_each_rcu_flavor(rsp) {
aa6da5140   Paul E. McKenney   rcu: Remove "cpu"...
3371
  		rdp = this_cpu_ptr(rsp->rda);
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
3372
  		if (rcu_segcblist_empty(&rdp->cblist))
69c8d28c9   Paul E. McKenney   rcu: Micro-optimi...
3373
3374
  			continue;
  		hc = true;
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
3375
  		if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist) || !all_lazy) {
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
3376
  			al = false;
69c8d28c9   Paul E. McKenney   rcu: Micro-optimi...
3377
3378
  			break;
  		}
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
3379
3380
3381
3382
  	}
  	if (all_lazy)
  		*all_lazy = al;
  	return hc;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3383
  }
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3384
  /*
a83eff0a8   Paul E. McKenney   rcu: Add tracing ...
3385
3386
3387
   * Helper function for _rcu_barrier() tracing.  If tracing is disabled,
   * the compiler is expected to optimize this away.
   */
e66c33d57   Steven Rostedt (Red Hat)   rcu: Add const an...
3388
  static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
a83eff0a8   Paul E. McKenney   rcu: Add tracing ...
3389
3390
3391
3392
3393
3394
3395
  			       int cpu, unsigned long done)
  {
  	trace_rcu_barrier(rsp->name, s, cpu,
  			  atomic_read(&rsp->barrier_cpu_count), done);
  }
  
  /*
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3396
3397
3398
   * RCU callback function for _rcu_barrier().  If we are last, wake
   * up the task executing _rcu_barrier().
   */
24ebbca8e   Paul E. McKenney   rcu: Move rcu_bar...
3399
  static void rcu_barrier_callback(struct rcu_head *rhp)
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3400
  {
24ebbca8e   Paul E. McKenney   rcu: Move rcu_bar...
3401
3402
  	struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
  	struct rcu_state *rsp = rdp->rsp;
a83eff0a8   Paul E. McKenney   rcu: Add tracing ...
3403
  	if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
d8db2e86d   Paul E. McKenney   rcu: Add TPS() pr...
3404
3405
  		_rcu_barrier_trace(rsp, TPS("LastCB"), -1,
  				   rsp->barrier_sequence);
7db74df88   Paul E. McKenney   rcu: Move rcu_bar...
3406
  		complete(&rsp->barrier_completion);
a83eff0a8   Paul E. McKenney   rcu: Add tracing ...
3407
  	} else {
d8db2e86d   Paul E. McKenney   rcu: Add TPS() pr...
3408
  		_rcu_barrier_trace(rsp, TPS("CB"), -1, rsp->barrier_sequence);
a83eff0a8   Paul E. McKenney   rcu: Add tracing ...
3409
  	}
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3410
3411
3412
3413
3414
3415
3416
  }
  
  /*
   * Called with preemption disabled, and from cross-cpu IRQ context.
   */
  static void rcu_barrier_func(void *type)
  {
037b64ed0   Paul E. McKenney   rcu: Place pointe...
3417
  	struct rcu_state *rsp = type;
fa07a58f7   Christoph Lameter   rcu: Replace __th...
3418
  	struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3419

d8db2e86d   Paul E. McKenney   rcu: Add TPS() pr...
3420
  	_rcu_barrier_trace(rsp, TPS("IRQ"), -1, rsp->barrier_sequence);
f92c734f0   Paul E. McKenney   rcu: Prevent rcu_...
3421
3422
3423
3424
3425
3426
  	rdp->barrier_head.func = rcu_barrier_callback;
  	debug_rcu_head_queue(&rdp->barrier_head);
  	if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) {
  		atomic_inc(&rsp->barrier_cpu_count);
  	} else {
  		debug_rcu_head_unqueue(&rdp->barrier_head);
d8db2e86d   Paul E. McKenney   rcu: Add TPS() pr...
3427
3428
  		_rcu_barrier_trace(rsp, TPS("IRQNQ"), -1,
  				   rsp->barrier_sequence);
f92c734f0   Paul E. McKenney   rcu: Prevent rcu_...
3429
  	}
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3430
  }
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3431
3432
3433
3434
  /*
   * Orchestrate the specified type of RCU barrier, waiting for all
   * RCU callbacks of the specified type to complete.
   */
037b64ed0   Paul E. McKenney   rcu: Place pointe...
3435
  static void _rcu_barrier(struct rcu_state *rsp)
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3436
  {
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3437
  	int cpu;
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3438
  	struct rcu_data *rdp;
4f525a528   Paul E. McKenney   rcu: Apply rcu_se...
3439
  	unsigned long s = rcu_seq_snap(&rsp->barrier_sequence);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3440

d8db2e86d   Paul E. McKenney   rcu: Add TPS() pr...
3441
  	_rcu_barrier_trace(rsp, TPS("Begin"), -1, s);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3442

e74f4c456   Paul E. McKenney   rcu: Make hot-unp...
3443
  	/* Take mutex to serialize concurrent rcu_barrier() requests. */
7be7f0be9   Paul E. McKenney   rcu: Move rcu_bar...
3444
  	mutex_lock(&rsp->barrier_mutex);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3445

4f525a528   Paul E. McKenney   rcu: Apply rcu_se...
3446
3447
  	/* Did someone else do our work for us? */
  	if (rcu_seq_done(&rsp->barrier_sequence, s)) {
d8db2e86d   Paul E. McKenney   rcu: Add TPS() pr...
3448
3449
  		_rcu_barrier_trace(rsp, TPS("EarlyExit"), -1,
  				   rsp->barrier_sequence);
cf3a9c484   Paul E. McKenney   rcu: Increase rcu...
3450
3451
3452
3453
  		smp_mb(); /* caller's subsequent code after above check. */
  		mutex_unlock(&rsp->barrier_mutex);
  		return;
  	}
4f525a528   Paul E. McKenney   rcu: Apply rcu_se...
3454
3455
  	/* Mark the start of the barrier operation. */
  	rcu_seq_start(&rsp->barrier_sequence);
d8db2e86d   Paul E. McKenney   rcu: Add TPS() pr...
3456
  	_rcu_barrier_trace(rsp, TPS("Inc1"), -1, rsp->barrier_sequence);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3457

d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3458
  	/*
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3459
3460
  	 * Initialize the count to one rather than to zero in order to
  	 * avoid a too-soon return to zero in case of a short grace period
1331e7a1b   Paul E. McKenney   rcu: Remove _rcu_...
3461
3462
  	 * (or preemption of this task).  Exclude CPU-hotplug operations
  	 * to ensure that no offline CPU has callbacks queued.
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3463
  	 */
7db74df88   Paul E. McKenney   rcu: Move rcu_bar...
3464
  	init_completion(&rsp->barrier_completion);
24ebbca8e   Paul E. McKenney   rcu: Move rcu_bar...
3465
  	atomic_set(&rsp->barrier_cpu_count, 1);
1331e7a1b   Paul E. McKenney   rcu: Remove _rcu_...
3466
  	get_online_cpus();
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3467
3468
  
  	/*
1331e7a1b   Paul E. McKenney   rcu: Remove _rcu_...
3469
3470
3471
  	 * Force each CPU with callbacks to register a new callback.
  	 * When that callback is invoked, we will know that all of the
  	 * corresponding CPU's preceding callbacks have been invoked.
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3472
  	 */
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
3473
  	for_each_possible_cpu(cpu) {
d1e43fa5f   Frederic Weisbecker   nohz: Ensure full...
3474
  		if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
3475
  			continue;
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3476
  		rdp = per_cpu_ptr(rsp->rda, cpu);
d1e43fa5f   Frederic Weisbecker   nohz: Ensure full...
3477
  		if (rcu_is_nocb_cpu(cpu)) {
d7e299339   Paul E. McKenney   rcu: Make rcu_bar...
3478
  			if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
d8db2e86d   Paul E. McKenney   rcu: Add TPS() pr...
3479
  				_rcu_barrier_trace(rsp, TPS("OfflineNoCB"), cpu,
4f525a528   Paul E. McKenney   rcu: Apply rcu_se...
3480
  						   rsp->barrier_sequence);
d7e299339   Paul E. McKenney   rcu: Make rcu_bar...
3481
  			} else {
d8db2e86d   Paul E. McKenney   rcu: Add TPS() pr...
3482
  				_rcu_barrier_trace(rsp, TPS("OnlineNoCB"), cpu,
4f525a528   Paul E. McKenney   rcu: Apply rcu_se...
3483
  						   rsp->barrier_sequence);
41050a009   Paul E. McKenney   rcu: Fix rcu_barr...
3484
  				smp_mb__before_atomic();
d7e299339   Paul E. McKenney   rcu: Make rcu_bar...
3485
3486
3487
3488
  				atomic_inc(&rsp->barrier_cpu_count);
  				__call_rcu(&rdp->barrier_head,
  					   rcu_barrier_callback, rsp, cpu, 0);
  			}
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
3489
  		} else if (rcu_segcblist_n_cbs(&rdp->cblist)) {
d8db2e86d   Paul E. McKenney   rcu: Add TPS() pr...
3490
  			_rcu_barrier_trace(rsp, TPS("OnlineQ"), cpu,
4f525a528   Paul E. McKenney   rcu: Apply rcu_se...
3491
  					   rsp->barrier_sequence);
037b64ed0   Paul E. McKenney   rcu: Place pointe...
3492
  			smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3493
  		} else {
d8db2e86d   Paul E. McKenney   rcu: Add TPS() pr...
3494
  			_rcu_barrier_trace(rsp, TPS("OnlineNQ"), cpu,
4f525a528   Paul E. McKenney   rcu: Apply rcu_se...
3495
  					   rsp->barrier_sequence);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3496
3497
  		}
  	}
1331e7a1b   Paul E. McKenney   rcu: Remove _rcu_...
3498
  	put_online_cpus();
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3499
3500
3501
3502
3503
  
  	/*
  	 * Now that we have an rcu_barrier_callback() callback on each
  	 * CPU, and thus each counted, remove the initial count.
  	 */
24ebbca8e   Paul E. McKenney   rcu: Move rcu_bar...
3504
  	if (atomic_dec_and_test(&rsp->barrier_cpu_count))
7db74df88   Paul E. McKenney   rcu: Move rcu_bar...
3505
  		complete(&rsp->barrier_completion);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3506
3507
  
  	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
7db74df88   Paul E. McKenney   rcu: Move rcu_bar...
3508
  	wait_for_completion(&rsp->barrier_completion);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3509

4f525a528   Paul E. McKenney   rcu: Apply rcu_se...
3510
  	/* Mark the end of the barrier operation. */
d8db2e86d   Paul E. McKenney   rcu: Add TPS() pr...
3511
  	_rcu_barrier_trace(rsp, TPS("Inc2"), -1, rsp->barrier_sequence);
4f525a528   Paul E. McKenney   rcu: Apply rcu_se...
3512
  	rcu_seq_end(&rsp->barrier_sequence);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3513
  	/* Other rcu_barrier() invocations can now safely proceed. */
7be7f0be9   Paul E. McKenney   rcu: Move rcu_bar...
3514
  	mutex_unlock(&rsp->barrier_mutex);
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3515
  }
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3516
3517
3518
3519
3520
3521
  
  /**
   * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
   */
  void rcu_barrier_bh(void)
  {
037b64ed0   Paul E. McKenney   rcu: Place pointe...
3522
  	_rcu_barrier(&rcu_bh_state);
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3523
3524
3525
3526
3527
3528
3529
3530
  }
  EXPORT_SYMBOL_GPL(rcu_barrier_bh);
  
  /**
   * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
   */
  void rcu_barrier_sched(void)
  {
037b64ed0   Paul E. McKenney   rcu: Place pointe...
3531
  	_rcu_barrier(&rcu_sched_state);
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3532
3533
  }
  EXPORT_SYMBOL_GPL(rcu_barrier_sched);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3534
  /*
0aa04b055   Paul E. McKenney   rcu: Process offl...
3535
3536
3537
3538
3539
3540
3541
3542
3543
   * Propagate ->qsinitmask bits up the rcu_node tree to account for the
   * first CPU in a given leaf rcu_node structure coming online.  The caller
   * must hold the corresponding leaf rcu_node ->lock with interrrupts
   * disabled.
   */
  static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
  {
  	long mask;
  	struct rcu_node *rnp = rnp_leaf;
c0b334c5b   Paul E. McKenney   rcu: Add lockdep_...
3544
  	lockdep_assert_held(&rnp->lock);
0aa04b055   Paul E. McKenney   rcu: Process offl...
3545
3546
3547
3548
3549
  	for (;;) {
  		mask = rnp->grpmask;
  		rnp = rnp->parent;
  		if (rnp == NULL)
  			return;
6cf100812   Paul E. McKenney   rcu: Add transiti...
3550
  		raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
0aa04b055   Paul E. McKenney   rcu: Process offl...
3551
  		rnp->qsmaskinit |= mask;
67c583a7d   Boqun Feng   RCU: Privatize rc...
3552
  		raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
0aa04b055   Paul E. McKenney   rcu: Process offl...
3553
3554
3555
3556
  	}
  }
  
  /*
27569620c   Paul E. McKenney   rcu: Split hierar...
3557
   * Do boot-time initialization of a CPU's per-CPU RCU data.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3558
   */
27569620c   Paul E. McKenney   rcu: Split hierar...
3559
3560
  static void __init
  rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3561
3562
  {
  	unsigned long flags;
394f99a90   Lai Jiangshan   rcu: simplify the...
3563
  	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
27569620c   Paul E. McKenney   rcu: Split hierar...
3564
3565
3566
  	struct rcu_node *rnp = rcu_get_root(rsp);
  
  	/* Set up local state, ensuring consistent view of global state. */
6cf100812   Paul E. McKenney   rcu: Add transiti...
3567
  	raw_spin_lock_irqsave_rcu_node(rnp, flags);
bc75e9998   Mark Rutland   rcu: Correctly ha...
3568
  	rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
27569620c   Paul E. McKenney   rcu: Split hierar...
3569
  	rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
29e37d814   Paul E. McKenney   rcu: Allow nestin...
3570
  	WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
02a5c550b   Paul E. McKenney   rcu: Abstract ext...
3571
  	WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
27569620c   Paul E. McKenney   rcu: Split hierar...
3572
  	rdp->cpu = cpu;
d4c08f2ac   Paul E. McKenney   rcu: Add grace-pe...
3573
  	rdp->rsp = rsp;
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
3574
  	rcu_boot_init_nocb_percpu_data(rdp);
67c583a7d   Boqun Feng   RCU: Privatize rc...
3575
  	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
27569620c   Paul E. McKenney   rcu: Split hierar...
3576
3577
3578
3579
3580
3581
3582
  }
  
  /*
   * Initialize a CPU's per-CPU RCU data.  Note that only one online or
   * offline event can be happening at a given time.  Note also that we
   * can accept some slop in the rsp->completed access due to the fact
   * that this CPU cannot possibly have any RCU callbacks in flight yet.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3583
   */
49fb4c629   Paul Gortmaker   rcu: delete __cpu...
3584
  static void
9b67122ae   Iulia Manda   rcu: Remove unuse...
3585
  rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3586
3587
  {
  	unsigned long flags;
394f99a90   Lai Jiangshan   rcu: simplify the...
3588
  	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3589
3590
3591
  	struct rcu_node *rnp = rcu_get_root(rsp);
  
  	/* Set up local state, ensuring consistent view of global state. */
6cf100812   Paul E. McKenney   rcu: Add transiti...
3592
  	raw_spin_lock_irqsave_rcu_node(rnp, flags);
37c72e56f   Paul E. McKenney   rcu: Prevent RCU ...
3593
3594
  	rdp->qlen_last_fqs_check = 0;
  	rdp->n_force_qs_snap = rsp->n_force_qs;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3595
  	rdp->blimit = blimit;
15fecf89e   Paul E. McKenney   srcu: Abstract mu...
3596
3597
3598
  	if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
  	    !init_nocb_callback_list(rdp))
  		rcu_segcblist_init(&rdp->cblist);  /* Re-enable callbacks. */
29e37d814   Paul E. McKenney   rcu: Allow nestin...
3599
  	rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
2625d469b   Paul E. McKenney   rcu: Abstract dyn...
3600
  	rcu_dynticks_eqs_online();
67c583a7d   Boqun Feng   RCU: Privatize rc...
3601
  	raw_spin_unlock_rcu_node(rnp);		/* irqs remain disabled. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3602

0aa04b055   Paul E. McKenney   rcu: Process offl...
3603
3604
3605
3606
3607
  	/*
  	 * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
  	 * propagation up the rcu_node tree will happen at the beginning
  	 * of the next grace period.
  	 */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3608
  	rnp = rdp->mynode;
2a67e741b   Peter Zijlstra   rcu: Create trans...
3609
  	raw_spin_lock_rcu_node(rnp);		/* irqs already disabled. */
b9585e940   Paul E. McKenney   rcu: Consolidate ...
3610
  	rdp->beenonline = true;	 /* We have now been online. */
0aa04b055   Paul E. McKenney   rcu: Process offl...
3611
3612
  	rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
  	rdp->completed = rnp->completed;
5b74c4589   Paul E. McKenney   rcu: Make ->cpu_n...
3613
  	rdp->cpu_no_qs.b.norm = true;
9577df9a3   Paul E. McKenney   rcu: Pull rcu_qs_...
3614
  	rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu);
97c668b8e   Paul E. McKenney   rcu: Rename qs_pe...
3615
  	rdp->core_needs_qs = false;
0aa04b055   Paul E. McKenney   rcu: Process offl...
3616
  	trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
67c583a7d   Boqun Feng   RCU: Privatize rc...
3617
  	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3618
  }
deb34f364   Paul E. McKenney   rcu: Improve comm...
3619
3620
3621
3622
  /*
   * Invoked early in the CPU-online process, when pretty much all
   * services are available.  The incoming CPU is not present.
   */
4df837425   Thomas Gleixner   rcu: Convert rcut...
3623
  int rcutree_prepare_cpu(unsigned int cpu)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3624
  {
6ce75a232   Paul E. McKenney   rcu: Introduce fo...
3625
3626
3627
  	struct rcu_state *rsp;
  
  	for_each_rcu_flavor(rsp)
9b67122ae   Iulia Manda   rcu: Remove unuse...
3628
  		rcu_init_percpu_data(cpu, rsp);
4df837425   Thomas Gleixner   rcu: Convert rcut...
3629
3630
3631
3632
3633
3634
  
  	rcu_prepare_kthreads(cpu);
  	rcu_spawn_all_nocb_kthreads(cpu);
  
  	return 0;
  }
deb34f364   Paul E. McKenney   rcu: Improve comm...
3635
3636
3637
  /*
   * Update RCU priority boot kthread affinity for CPU-hotplug changes.
   */
4df837425   Thomas Gleixner   rcu: Convert rcut...
3638
3639
3640
3641
3642
3643
  static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
  {
  	struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
  
  	rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
  }
deb34f364   Paul E. McKenney   rcu: Improve comm...
3644
3645
3646
3647
  /*
   * Near the end of the CPU-online process.  Pretty much all services
   * enabled, and the CPU is now very much alive.
   */
4df837425   Thomas Gleixner   rcu: Convert rcut...
3648
3649
3650
3651
  int rcutree_online_cpu(unsigned int cpu)
  {
  	sync_sched_exp_online_cleanup(cpu);
  	rcutree_affinity_setting(cpu, -1);
da915ad5c   Paul E. McKenney   srcu: Parallelize...
3652
3653
  	if (IS_ENABLED(CONFIG_TREE_SRCU))
  		srcu_online_cpu(cpu);
4df837425   Thomas Gleixner   rcu: Convert rcut...
3654
3655
  	return 0;
  }
deb34f364   Paul E. McKenney   rcu: Improve comm...
3656
3657
3658
3659
  /*
   * Near the beginning of the process.  The CPU is still very much alive
   * with pretty much all services enabled.
   */
4df837425   Thomas Gleixner   rcu: Convert rcut...
3660
3661
3662
  int rcutree_offline_cpu(unsigned int cpu)
  {
  	rcutree_affinity_setting(cpu, cpu);
da915ad5c   Paul E. McKenney   srcu: Parallelize...
3663
3664
  	if (IS_ENABLED(CONFIG_TREE_SRCU))
  		srcu_offline_cpu(cpu);
4df837425   Thomas Gleixner   rcu: Convert rcut...
3665
3666
  	return 0;
  }
deb34f364   Paul E. McKenney   rcu: Improve comm...
3667
3668
3669
  /*
   * Near the end of the offline process.  We do only tracing here.
   */
4df837425   Thomas Gleixner   rcu: Convert rcut...
3670
3671
3672
3673
3674
3675
3676
3677
  int rcutree_dying_cpu(unsigned int cpu)
  {
  	struct rcu_state *rsp;
  
  	for_each_rcu_flavor(rsp)
  		rcu_cleanup_dying_cpu(rsp);
  	return 0;
  }
deb34f364   Paul E. McKenney   rcu: Improve comm...
3678
3679
3680
  /*
   * The outgoing CPU is gone and we are running elsewhere.
   */
4df837425   Thomas Gleixner   rcu: Convert rcut...
3681
3682
3683
3684
3685
3686
3687
3688
3689
  int rcutree_dead_cpu(unsigned int cpu)
  {
  	struct rcu_state *rsp;
  
  	for_each_rcu_flavor(rsp) {
  		rcu_cleanup_dead_cpu(cpu, rsp);
  		do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
  	}
  	return 0;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3690
  }
7ec99de36   Paul E. McKenney   rcu: Provide exac...
3691
3692
3693
3694
3695
3696
  /*
   * Mark the specified CPU as being online so that subsequent grace periods
   * (both expedited and normal) will wait on it.  Note that this means that
   * incoming CPUs are not allowed to use RCU read-side critical sections
   * until this function is called.  Failing to observe this restriction
   * will result in lockdep splats.
deb34f364   Paul E. McKenney   rcu: Improve comm...
3697
3698
3699
3700
   *
   * Note that this function is special in that it is invoked directly
   * from the incoming CPU rather than from the cpuhp_step mechanism.
   * This is because this function must be invoked at a precise location.
7ec99de36   Paul E. McKenney   rcu: Provide exac...
3701
3702
3703
3704
3705
   */
  void rcu_cpu_starting(unsigned int cpu)
  {
  	unsigned long flags;
  	unsigned long mask;
313517fc4   Paul E. McKenney   rcu: Make expedit...
3706
3707
  	int nbits;
  	unsigned long oldmask;
7ec99de36   Paul E. McKenney   rcu: Provide exac...
3708
3709
3710
3711
3712
  	struct rcu_data *rdp;
  	struct rcu_node *rnp;
  	struct rcu_state *rsp;
  
  	for_each_rcu_flavor(rsp) {
fdbb9b315   Paul E. McKenney   rcu: Make rcu_cpu...
3713
  		rdp = per_cpu_ptr(rsp->rda, cpu);
7ec99de36   Paul E. McKenney   rcu: Provide exac...
3714
3715
3716
3717
  		rnp = rdp->mynode;
  		mask = rdp->grpmask;
  		raw_spin_lock_irqsave_rcu_node(rnp, flags);
  		rnp->qsmaskinitnext |= mask;
313517fc4   Paul E. McKenney   rcu: Make expedit...
3718
  		oldmask = rnp->expmaskinitnext;
7ec99de36   Paul E. McKenney   rcu: Provide exac...
3719
  		rnp->expmaskinitnext |= mask;
313517fc4   Paul E. McKenney   rcu: Make expedit...
3720
3721
3722
3723
  		oldmask ^= rnp->expmaskinitnext;
  		nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
  		/* Allow lockless access for expedited grace periods. */
  		smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */
7ec99de36   Paul E. McKenney   rcu: Provide exac...
3724
3725
  		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  	}
313517fc4   Paul E. McKenney   rcu: Make expedit...
3726
  	smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
7ec99de36   Paul E. McKenney   rcu: Provide exac...
3727
  }
27d50c7ee   Thomas Gleixner   rcu: Make CPU_DYI...
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
  #ifdef CONFIG_HOTPLUG_CPU
  /*
   * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
   * function.  We now remove it from the rcu_node tree's ->qsmaskinit
   * bit masks.
   */
  static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
  {
  	unsigned long flags;
  	unsigned long mask;
  	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
  	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
27d50c7ee   Thomas Gleixner   rcu: Make CPU_DYI...
3740
3741
3742
3743
  	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
  	mask = rdp->grpmask;
  	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
  	rnp->qsmaskinitnext &= ~mask;
710d60cbf   Linus Torvalds   Merge branch 'smp...
3744
  	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
27d50c7ee   Thomas Gleixner   rcu: Make CPU_DYI...
3745
  }
deb34f364   Paul E. McKenney   rcu: Improve comm...
3746
3747
3748
3749
3750
3751
3752
3753
  /*
   * The outgoing function has no further need of RCU, so remove it from
   * the list of CPUs that RCU must track.
   *
   * Note that this function is special in that it is invoked directly
   * from the outgoing CPU rather than from the cpuhp_step mechanism.
   * This is because this function must be invoked at a precise location.
   */
27d50c7ee   Thomas Gleixner   rcu: Make CPU_DYI...
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
  void rcu_report_dead(unsigned int cpu)
  {
  	struct rcu_state *rsp;
  
  	/* QS for any half-done expedited RCU-sched GP. */
  	preempt_disable();
  	rcu_report_exp_rdp(&rcu_sched_state,
  			   this_cpu_ptr(rcu_sched_state.rda), true);
  	preempt_enable();
  	for_each_rcu_flavor(rsp)
  		rcu_cleanup_dying_idle_cpu(cpu, rsp);
  }
a58163d8c   Paul E. McKenney   rcu: Migrate call...
3766

f2dbe4a56   Paul E. McKenney   rcu: Localize rcu...
3767
  /* Migrate the dead CPU's callbacks to the current CPU. */
a58163d8c   Paul E. McKenney   rcu: Migrate call...
3768
3769
3770
  static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
  {
  	unsigned long flags;
b1a2d79fe   Paul E. McKenney   rcu: Make NOCB CP...
3771
  	struct rcu_data *my_rdp;
a58163d8c   Paul E. McKenney   rcu: Migrate call...
3772
  	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
9fa46fb8c   Paul E. McKenney   rcu: Advance outg...
3773
  	struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
a58163d8c   Paul E. McKenney   rcu: Migrate call...
3774

95335c035   Paul E. McKenney   rcu: Check for NO...
3775
3776
  	if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist))
  		return;  /* No callbacks to migrate. */
b1a2d79fe   Paul E. McKenney   rcu: Make NOCB CP...
3777
3778
3779
3780
3781
3782
  	local_irq_save(flags);
  	my_rdp = this_cpu_ptr(rsp->rda);
  	if (rcu_nocb_adopt_orphan_cbs(my_rdp, rdp, flags)) {
  		local_irq_restore(flags);
  		return;
  	}
9fa46fb8c   Paul E. McKenney   rcu: Advance outg...
3783
3784
  	raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
  	rcu_advance_cbs(rsp, rnp_root, rdp); /* Leverage recent GPs. */
21cc24838   Paul E. McKenney   rcu: Advance call...
3785
  	rcu_advance_cbs(rsp, rnp_root, my_rdp); /* Assign GP to pending CBs. */
f2dbe4a56   Paul E. McKenney   rcu: Localize rcu...
3786
  	rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
09efeeee1   Paul E. McKenney   rcu: Move callbac...
3787
3788
  	WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
  		     !rcu_segcblist_n_cbs(&my_rdp->cblist));
537b85c87   Paul E. McKenney   rcu: Eliminate rc...
3789
  	raw_spin_unlock_irqrestore_rcu_node(rnp_root, flags);
a58163d8c   Paul E. McKenney   rcu: Migrate call...
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808
3809
  	WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
  		  !rcu_segcblist_empty(&rdp->cblist),
  		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p
  ",
  		  cpu, rcu_segcblist_n_cbs(&rdp->cblist),
  		  rcu_segcblist_first_cb(&rdp->cblist));
  }
  
  /*
   * The outgoing CPU has just passed through the dying-idle state,
   * and we are being invoked from the CPU that was IPIed to continue the
   * offline operation.  We need to migrate the outgoing CPU's callbacks.
   */
  void rcutree_migrate_callbacks(int cpu)
  {
  	struct rcu_state *rsp;
  
  	for_each_rcu_flavor(rsp)
  		rcu_migrate_callbacks(cpu, rsp);
  }
27d50c7ee   Thomas Gleixner   rcu: Make CPU_DYI...
3810
  #endif
deb34f364   Paul E. McKenney   rcu: Improve comm...
3811
3812
3813
3814
  /*
   * On non-huge systems, use expedited RCU grace periods to make suspend
   * and hibernation run faster.
   */
d1d74d14e   Borislav Petkov   rcu: Expedite gra...
3815
3816
3817
3818
3819
3820
3821
  static int rcu_pm_notify(struct notifier_block *self,
  			 unsigned long action, void *hcpu)
  {
  	switch (action) {
  	case PM_HIBERNATION_PREPARE:
  	case PM_SUSPEND_PREPARE:
  		if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
5afff48bd   Paul E. McKenney   rcu: Update from ...
3822
  			rcu_expedite_gp();
d1d74d14e   Borislav Petkov   rcu: Expedite gra...
3823
3824
3825
  		break;
  	case PM_POST_HIBERNATION:
  	case PM_POST_SUSPEND:
5afff48bd   Paul E. McKenney   rcu: Update from ...
3826
3827
  		if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
  			rcu_unexpedite_gp();
d1d74d14e   Borislav Petkov   rcu: Expedite gra...
3828
3829
3830
3831
3832
3833
  		break;
  	default:
  		break;
  	}
  	return NOTIFY_OK;
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3834
  /*
9386c0b75   Paul E. McKenney   rcu: Rationalize ...
3835
   * Spawn the kthreads that handle each RCU flavor's grace periods.
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
3836
3837
3838
3839
   */
  static int __init rcu_spawn_gp_kthread(void)
  {
  	unsigned long flags;
a94844b22   Paul E. McKenney   rcu: Optionally r...
3840
  	int kthread_prio_in = kthread_prio;
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
3841
3842
  	struct rcu_node *rnp;
  	struct rcu_state *rsp;
a94844b22   Paul E. McKenney   rcu: Optionally r...
3843
  	struct sched_param sp;
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
3844
  	struct task_struct *t;
a94844b22   Paul E. McKenney   rcu: Optionally r...
3845
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
  	/* Force priority into range. */
  	if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
  		kthread_prio = 1;
  	else if (kthread_prio < 0)
  		kthread_prio = 0;
  	else if (kthread_prio > 99)
  		kthread_prio = 99;
  	if (kthread_prio != kthread_prio_in)
  		pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d
  ",
  			 kthread_prio, kthread_prio_in);
9386c0b75   Paul E. McKenney   rcu: Rationalize ...
3856
  	rcu_scheduler_fully_active = 1;
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
3857
  	for_each_rcu_flavor(rsp) {
a94844b22   Paul E. McKenney   rcu: Optionally r...
3858
  		t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
3859
3860
  		BUG_ON(IS_ERR(t));
  		rnp = rcu_get_root(rsp);
6cf100812   Paul E. McKenney   rcu: Add transiti...
3861
  		raw_spin_lock_irqsave_rcu_node(rnp, flags);
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
3862
  		rsp->gp_kthread = t;
a94844b22   Paul E. McKenney   rcu: Optionally r...
3863
3864
3865
3866
  		if (kthread_prio) {
  			sp.sched_priority = kthread_prio;
  			sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
  		}
67c583a7d   Boqun Feng   RCU: Privatize rc...
3867
  		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
e11f13355   Peter Zijlstra   rcu: Move wakeup ...
3868
  		wake_up_process(t);
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
3869
  	}
35ce7f29a   Paul E. McKenney   rcu: Create rcuo ...
3870
  	rcu_spawn_nocb_kthreads();
9386c0b75   Paul E. McKenney   rcu: Rationalize ...
3871
  	rcu_spawn_boost_kthreads();
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
3872
3873
3874
3875
3876
  	return 0;
  }
  early_initcall(rcu_spawn_gp_kthread);
  
  /*
52d7e48b8   Paul E. McKenney   rcu: Narrow early...
3877
3878
3879
3880
3881
3882
   * This function is invoked towards the end of the scheduler's
   * initialization process.  Before this is called, the idle task might
   * contain synchronous grace-period primitives (during which time, this idle
   * task is booting the system, and such primitives are no-ops).  After this
   * function is called, any synchronous grace-period primitives are run as
   * expedited, with the requesting task driving the grace period forward.
900b1028e   Paul E. McKenney   srcu: Allow SRCU ...
3883
   * A later core_initcall() rcu_set_runtime_mode() will switch to full
52d7e48b8   Paul E. McKenney   rcu: Narrow early...
3884
   * runtime RCU functionality.
bbad93798   Paul E. McKenney   rcu: slim down rc...
3885
3886
3887
3888
3889
   */
  void rcu_scheduler_starting(void)
  {
  	WARN_ON(num_online_cpus() != 1);
  	WARN_ON(nr_context_switches() > 0);
52d7e48b8   Paul E. McKenney   rcu: Narrow early...
3890
3891
3892
  	rcu_test_sync_prims();
  	rcu_scheduler_active = RCU_SCHEDULER_INIT;
  	rcu_test_sync_prims();
bbad93798   Paul E. McKenney   rcu: slim down rc...
3893
3894
3895
  }
  
  /*
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3896
3897
   * Helper function for rcu_init() that initializes one rcu_state structure.
   */
a87f203e2   Paul E. McKenney   rcu: Eliminate un...
3898
  static void __init rcu_init_one(struct rcu_state *rsp)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3899
  {
cb0071023   Alexander Gordeev   rcu: Limit count ...
3900
3901
  	static const char * const buf[] = RCU_NODE_NAME_INIT;
  	static const char * const fqs[] = RCU_FQS_NAME_INIT;
3dc5dbe9a   Paul E. McKenney   rcu: Move lock_cl...
3902
3903
  	static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
  	static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
199977bff   Alexander Gordeev   rcu: Remove unnec...
3904

199977bff   Alexander Gordeev   rcu: Remove unnec...
3905
  	int levelspread[RCU_NUM_LVLS];		/* kids/node in each level. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3906
3907
3908
3909
  	int cpustride = 1;
  	int i;
  	int j;
  	struct rcu_node *rnp;
05b84aec4   Alexander Gordeev   rcu: Limit rcu_ca...
3910
  	BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
b6407e863   Paul E. McKenney   rcu: Give differe...
3911

3eaaaf6cd   Paul E. McKenney   rcu: Shut up spur...
3912
3913
3914
  	/* Silence gcc 4.8 false positive about array index out of range. */
  	if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
  		panic("rcu_init_one: rcu_num_lvls out of range");
4930521ae   Paul E. McKenney   rcu: Silence comp...
3915

64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3916
  	/* Initialize the level-tracking arrays. */
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
3917
  	for (i = 1; i < rcu_num_lvls; i++)
41f5c6317   Paul E. McKenney   rcu: Remove redun...
3918
3919
  		rsp->level[i] = rsp->level[i - 1] + num_rcu_lvl[i - 1];
  	rcu_init_levelspread(levelspread, num_rcu_lvl);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3920
3921
  
  	/* Initialize the elements themselves, starting from the leaves. */
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
3922
  	for (i = rcu_num_lvls - 1; i >= 0; i--) {
199977bff   Alexander Gordeev   rcu: Remove unnec...
3923
  		cpustride *= levelspread[i];
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3924
  		rnp = rsp->level[i];
41f5c6317   Paul E. McKenney   rcu: Remove redun...
3925
  		for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
67c583a7d   Boqun Feng   RCU: Privatize rc...
3926
3927
  			raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
  			lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
b6407e863   Paul E. McKenney   rcu: Give differe...
3928
  						   &rcu_node_class[i], buf[i]);
394f2769a   Paul E. McKenney   rcu: Prevent forc...
3929
3930
3931
  			raw_spin_lock_init(&rnp->fqslock);
  			lockdep_set_class_and_name(&rnp->fqslock,
  						   &rcu_fqs_class[i], fqs[i]);
25d30cf42   Paul E. McKenney   rcu: Adjust for u...
3932
3933
  			rnp->gpnum = rsp->gpnum;
  			rnp->completed = rsp->completed;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3934
3935
3936
3937
  			rnp->qsmask = 0;
  			rnp->qsmaskinit = 0;
  			rnp->grplo = j * cpustride;
  			rnp->grphi = (j + 1) * cpustride - 1;
595f3900f   Himangi Saraogi   rcu: Replace NR_C...
3938
3939
  			if (rnp->grphi >= nr_cpu_ids)
  				rnp->grphi = nr_cpu_ids - 1;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3940
3941
3942
3943
3944
  			if (i == 0) {
  				rnp->grpnum = 0;
  				rnp->grpmask = 0;
  				rnp->parent = NULL;
  			} else {
199977bff   Alexander Gordeev   rcu: Remove unnec...
3945
  				rnp->grpnum = j % levelspread[i - 1];
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3946
3947
  				rnp->grpmask = 1UL << rnp->grpnum;
  				rnp->parent = rsp->level[i - 1] +
199977bff   Alexander Gordeev   rcu: Remove unnec...
3948
  					      j / levelspread[i - 1];
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3949
3950
  			}
  			rnp->level = i;
12f5f524c   Paul E. McKenney   rcu: merge TREE_P...
3951
  			INIT_LIST_HEAD(&rnp->blkd_tasks);
dae6e64d2   Paul E. McKenney   rcu: Introduce pr...
3952
  			rcu_init_one_nocb(rnp);
f6a12f34a   Paul E. McKenney   rcu: Enforce expe...
3953
3954
  			init_waitqueue_head(&rnp->exp_wq[0]);
  			init_waitqueue_head(&rnp->exp_wq[1]);
3b5f668e7   Paul E. McKenney   rcu: Overlap wake...
3955
3956
  			init_waitqueue_head(&rnp->exp_wq[2]);
  			init_waitqueue_head(&rnp->exp_wq[3]);
f6a12f34a   Paul E. McKenney   rcu: Enforce expe...
3957
  			spin_lock_init(&rnp->exp_lock);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3958
3959
  		}
  	}
0c34029ab   Lai Jiangshan   rcu: move some co...
3960

abedf8e24   Paul Gortmaker   rcu: Use simple w...
3961
3962
  	init_swait_queue_head(&rsp->gp_wq);
  	init_swait_queue_head(&rsp->expedited_wq);
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
3963
  	rnp = rsp->level[rcu_num_lvls - 1];
0c34029ab   Lai Jiangshan   rcu: move some co...
3964
  	for_each_possible_cpu(i) {
4a90a0681   Paul E. McKenney   rcu: permit disco...
3965
  		while (i > rnp->grphi)
0c34029ab   Lai Jiangshan   rcu: move some co...
3966
  			rnp++;
394f99a90   Lai Jiangshan   rcu: simplify the...
3967
  		per_cpu_ptr(rsp->rda, i)->mynode = rnp;
0c34029ab   Lai Jiangshan   rcu: move some co...
3968
3969
  		rcu_boot_init_percpu_data(i, rsp);
  	}
6ce75a232   Paul E. McKenney   rcu: Introduce fo...
3970
  	list_add(&rsp->flavors, &rcu_struct_flavors);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3971
  }
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
3972
3973
  /*
   * Compute the rcu_node tree geometry from kernel parameters.  This cannot
4102adab9   Paul E. McKenney   rcu: Move RCU-rel...
3974
   * replace the definitions in tree.h because those are needed to size
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
3975
3976
3977
3978
   * the ->node array in the rcu_state structure.
   */
  static void __init rcu_init_geometry(void)
  {
026ad2835   Paul E. McKenney   rcu: Drive quiesc...
3979
  	ulong d;
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
3980
  	int i;
05b84aec4   Alexander Gordeev   rcu: Limit rcu_ca...
3981
  	int rcu_capacity[RCU_NUM_LVLS];
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
3982

026ad2835   Paul E. McKenney   rcu: Drive quiesc...
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994
  	/*
  	 * Initialize any unspecified boot parameters.
  	 * The default values of jiffies_till_first_fqs and
  	 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
  	 * value, which is a function of HZ, then adding one for each
  	 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
  	 */
  	d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
  	if (jiffies_till_first_fqs == ULONG_MAX)
  		jiffies_till_first_fqs = d;
  	if (jiffies_till_next_fqs == ULONG_MAX)
  		jiffies_till_next_fqs = d;
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
3995
  	/* If the compile-time values are accurate, just leave. */
47d631af5   Paul E. McKenney   rcu: Make RCU abl...
3996
  	if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
b17c7035f   Paul E. McKenney   rcu: Shrink RCU b...
3997
  	    nr_cpu_ids == NR_CPUS)
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
3998
  		return;
9b130ad5b   Alexey Dobriyan   treewide: make "n...
3999
4000
  	pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u
  ",
394790981   Paul E. McKenney   rcu: Let the worl...
4001
  		rcu_fanout_leaf, nr_cpu_ids);
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4002
4003
  
  	/*
ee968ac61   Paul E. McKenney   rcu: Eliminate pa...
4004
4005
4006
4007
  	 * The boot-time rcu_fanout_leaf parameter must be at least two
  	 * and cannot exceed the number of bits in the rcu_node masks.
  	 * Complain and fall back to the compile-time values if this
  	 * limit is exceeded.
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4008
  	 */
ee968ac61   Paul E. McKenney   rcu: Eliminate pa...
4009
  	if (rcu_fanout_leaf < 2 ||
75cf15a4c   Alexander Gordeev   rcu: Panic if RCU...
4010
  	    rcu_fanout_leaf > sizeof(unsigned long) * 8) {
13bd64947   Paul E. McKenney   rcu: Reset rcu_fa...
4011
  		rcu_fanout_leaf = RCU_FANOUT_LEAF;
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4012
4013
4014
  		WARN_ON(1);
  		return;
  	}
75cf15a4c   Alexander Gordeev   rcu: Panic if RCU...
4015
  	/*
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4016
  	 * Compute number of nodes that can be handled an rcu_node tree
9618138b0   Alexander Gordeev   rcu: Simplify rcu...
4017
  	 * with the given number of levels.
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4018
  	 */
9618138b0   Alexander Gordeev   rcu: Simplify rcu...
4019
  	rcu_capacity[0] = rcu_fanout_leaf;
05b84aec4   Alexander Gordeev   rcu: Limit rcu_ca...
4020
  	for (i = 1; i < RCU_NUM_LVLS; i++)
05c5df31a   Paul E. McKenney   rcu: Make RCU abl...
4021
  		rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4022
4023
  
  	/*
75cf15a4c   Alexander Gordeev   rcu: Panic if RCU...
4024
  	 * The tree must be able to accommodate the configured number of CPUs.
ee968ac61   Paul E. McKenney   rcu: Eliminate pa...
4025
  	 * If this limit is exceeded, fall back to the compile-time values.
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4026
  	 */
ee968ac61   Paul E. McKenney   rcu: Eliminate pa...
4027
4028
4029
4030
4031
  	if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
  		rcu_fanout_leaf = RCU_FANOUT_LEAF;
  		WARN_ON(1);
  		return;
  	}
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4032

679f9858b   Alexander Gordeev   rcu: Cleanup rcu_...
4033
  	/* Calculate the number of levels in the tree. */
9618138b0   Alexander Gordeev   rcu: Simplify rcu...
4034
  	for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
679f9858b   Alexander Gordeev   rcu: Cleanup rcu_...
4035
  	}
9618138b0   Alexander Gordeev   rcu: Simplify rcu...
4036
  	rcu_num_lvls = i + 1;
679f9858b   Alexander Gordeev   rcu: Cleanup rcu_...
4037

f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4038
  	/* Calculate the number of rcu_nodes at each level of the tree. */
679f9858b   Alexander Gordeev   rcu: Cleanup rcu_...
4039
  	for (i = 0; i < rcu_num_lvls; i++) {
9618138b0   Alexander Gordeev   rcu: Simplify rcu...
4040
  		int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
679f9858b   Alexander Gordeev   rcu: Cleanup rcu_...
4041
4042
  		num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
  	}
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4043
4044
4045
  
  	/* Calculate the total number of rcu_node structures. */
  	rcu_num_nodes = 0;
679f9858b   Alexander Gordeev   rcu: Cleanup rcu_...
4046
  	for (i = 0; i < rcu_num_lvls; i++)
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4047
  		rcu_num_nodes += num_rcu_lvl[i];
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4048
  }
a3dc2948c   Paul E. McKenney   rcu: Enable diagn...
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
  /*
   * Dump out the structure of the rcu_node combining tree associated
   * with the rcu_state structure referenced by rsp.
   */
  static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp)
  {
  	int level = 0;
  	struct rcu_node *rnp;
  
  	pr_info("rcu_node tree layout dump
  ");
  	pr_info(" ");
  	rcu_for_each_node_breadth_first(rsp, rnp) {
  		if (rnp->level != level) {
  			pr_cont("
  ");
  			pr_info(" ");
  			level = rnp->level;
  		}
  		pr_cont("%d:%d ^%d  ", rnp->grplo, rnp->grphi, rnp->grpnum);
  	}
  	pr_cont("
  ");
  }
9f680ab41   Paul E. McKenney   rcu: Eliminate un...
4073
  void __init rcu_init(void)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
4074
  {
017c42613   Paul E. McKenney   rcu: Fix sparse w...
4075
  	int cpu;
9f680ab41   Paul E. McKenney   rcu: Eliminate un...
4076

476276781   Paul E. McKenney   rcu: Move early b...
4077
  	rcu_early_boot_tests();
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
4078
  	rcu_bootup_announce();
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4079
  	rcu_init_geometry();
a87f203e2   Paul E. McKenney   rcu: Eliminate un...
4080
4081
  	rcu_init_one(&rcu_bh_state);
  	rcu_init_one(&rcu_sched_state);
a3dc2948c   Paul E. McKenney   rcu: Enable diagn...
4082
4083
  	if (dump_tree)
  		rcu_dump_rcu_node_tree(&rcu_sched_state);
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
4084
  	__rcu_init_preempt();
b5b393601   Jiang Fang   rcu: Fix spacing ...
4085
  	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
9f680ab41   Paul E. McKenney   rcu: Eliminate un...
4086
4087
4088
4089
4090
4091
  
  	/*
  	 * We don't need protection against CPU-hotplug here because
  	 * this is called early in boot, before either interrupts
  	 * or the scheduler are operational.
  	 */
d1d74d14e   Borislav Petkov   rcu: Expedite gra...
4092
  	pm_notifier(rcu_pm_notify, 0);
7ec99de36   Paul E. McKenney   rcu: Provide exac...
4093
  	for_each_online_cpu(cpu) {
4df837425   Thomas Gleixner   rcu: Convert rcut...
4094
  		rcutree_prepare_cpu(cpu);
7ec99de36   Paul E. McKenney   rcu: Provide exac...
4095
  		rcu_cpu_starting(cpu);
da915ad5c   Paul E. McKenney   srcu: Parallelize...
4096
4097
  		if (IS_ENABLED(CONFIG_TREE_SRCU))
  			srcu_online_cpu(cpu);
7ec99de36   Paul E. McKenney   rcu: Provide exac...
4098
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
4099
  }
3549c2bc2   Paul E. McKenney   rcu: Move expedit...
4100
  #include "tree_exp.h"
4102adab9   Paul E. McKenney   rcu: Move RCU-rel...
4101
  #include "tree_plugin.h"