Blame view

kernel/rcu/tree.c 131 KB
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
  /*
   * Read-Copy Update mechanism for mutual exclusion
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published by
   * the Free Software Foundation; either version 2 of the License, or
   * (at your option) any later version.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
87de1cfdc   Paul E. McKenney   rcu: Stop trackin...
15
16
   * along with this program; if not, you can access it online at
   * http://www.gnu.org/licenses/gpl-2.0.html.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
17
18
19
20
21
22
23
24
25
26
27
   *
   * Copyright IBM Corporation, 2008
   *
   * Authors: Dipankar Sarma <dipankar@in.ibm.com>
   *	    Manfred Spraul <manfred@colorfullife.com>
   *	    Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
   *
   * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
   * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
   *
   * For detailed explanation of Read-Copy Update mechanism see -
a71fca58b   Paul E. McKenney   rcu: Fix whitespa...
28
   *	Documentation/RCU
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
29
30
31
32
33
34
35
36
37
   */
  #include <linux/types.h>
  #include <linux/kernel.h>
  #include <linux/init.h>
  #include <linux/spinlock.h>
  #include <linux/smp.h>
  #include <linux/rcupdate.h>
  #include <linux/interrupt.h>
  #include <linux/sched.h>
c1dc0b9c0   Ingo Molnar   debug lockups: Im...
38
  #include <linux/nmi.h>
8826f3b03   Paul E. McKenney   rcu: Avoid acquir...
39
  #include <linux/atomic.h>
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
40
  #include <linux/bitops.h>
9984de1a5   Paul Gortmaker   kernel: Map most ...
41
  #include <linux/export.h>
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
42
43
44
45
46
47
48
  #include <linux/completion.h>
  #include <linux/moduleparam.h>
  #include <linux/percpu.h>
  #include <linux/notifier.h>
  #include <linux/cpu.h>
  #include <linux/mutex.h>
  #include <linux/time.h>
bbad93798   Paul E. McKenney   rcu: slim down rc...
49
  #include <linux/kernel_stat.h>
a26ac2455   Paul E. McKenney   rcu: move TREE_RC...
50
51
  #include <linux/wait.h>
  #include <linux/kthread.h>
268bb0ce3   Linus Torvalds   sanitize <linux/p...
52
  #include <linux/prefetch.h>
3d3b7db0a   Paul E. McKenney   rcu: Move synchro...
53
54
  #include <linux/delay.h>
  #include <linux/stop_machine.h>
661a85dc0   Paul E. McKenney   rcu: Add random P...
55
  #include <linux/random.h>
af658dca2   Steven Rostedt (Red Hat)   tracing: Rename f...
56
  #include <linux/trace_events.h>
d1d74d14e   Borislav Petkov   rcu: Expedite gra...
57
  #include <linux/suspend.h>
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
58

4102adab9   Paul E. McKenney   rcu: Move RCU-rel...
59
  #include "tree.h"
29c00b4a1   Paul E. McKenney   rcu: Add event-tr...
60
  #include "rcu.h"
9f77da9f4   Paul E. McKenney   rcu: Move private...
61

4102adab9   Paul E. McKenney   rcu: Move RCU-rel...
62
63
64
65
  #ifdef MODULE_PARAM_PREFIX
  #undef MODULE_PARAM_PREFIX
  #endif
  #define MODULE_PARAM_PREFIX "rcutree."
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
66
  /* Data structures. */
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
67
68
69
70
71
72
73
74
  /*
   * In order to export the rcu_state name to the tracing tools, it
   * needs to be added in the __tracepoint_string section.
   * This requires defining a separate variable tp_<sname>_varname
   * that points to the string being used, and this will allow
   * the tracing userspace tools to be able to decipher the string
   * address to the matching string.
   */
a8a29b3b7   Ard Biesheuvel   rcu: Define trace...
75
76
  #ifdef CONFIG_TRACING
  # define DEFINE_RCU_TPS(sname) \
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
77
  static char sname##_varname[] = #sname; \
a8a29b3b7   Ard Biesheuvel   rcu: Define trace...
78
79
80
81
82
83
84
85
86
  static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname;
  # define RCU_STATE_NAME(sname) sname##_varname
  #else
  # define DEFINE_RCU_TPS(sname)
  # define RCU_STATE_NAME(sname) __stringify(sname)
  #endif
  
  #define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
  DEFINE_RCU_TPS(sname) \
c92fb0579   Nicolas Iooss   rcu: Make rcu_*_d...
87
  static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \
a41bfeb2f   Steven Rostedt (Red Hat)   rcu: Simplify RCU...
88
  struct rcu_state sname##_state = { \
6c90cc7bf   Paul E. McKenney   rcu: Prevent exce...
89
  	.level = { &sname##_state.node[0] }, \
2723249a3   Paul E. McKenney   rcu: Wire ->rda p...
90
  	.rda = &sname##_data, \
037b64ed0   Paul E. McKenney   rcu: Place pointe...
91
  	.call = cr, \
77f81fe08   Petr Mladek   rcu: Finish foldi...
92
  	.gp_state = RCU_GP_IDLE, \
42c3533ee   Paul E. McKenney   rcu: Fix tracing ...
93
94
  	.gpnum = 0UL - 300UL, \
  	.completed = 0UL - 300UL, \
7b2e6011f   Paul E. McKenney   rcu: Rename ->ono...
95
  	.orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \
6c90cc7bf   Paul E. McKenney   rcu: Prevent exce...
96
97
  	.orphan_nxttail = &sname##_state.orphan_nxtlist, \
  	.orphan_donetail = &sname##_state.orphan_donelist, \
7be7f0be9   Paul E. McKenney   rcu: Move rcu_bar...
98
  	.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
a8a29b3b7   Ard Biesheuvel   rcu: Define trace...
99
  	.name = RCU_STATE_NAME(sname), \
a48898585   Paul E. McKenney   rcu: Distinguish ...
100
  	.abbr = sabbr, \
f6a12f34a   Paul E. McKenney   rcu: Enforce expe...
101
  	.exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
3b5f668e7   Paul E. McKenney   rcu: Overlap wake...
102
  	.exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
2723249a3   Paul E. McKenney   rcu: Wire ->rda p...
103
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
104

a41bfeb2f   Steven Rostedt (Red Hat)   rcu: Simplify RCU...
105
106
  RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
  RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
b1f77b058   Ingo Molnar   kmemtrace, rcu: f...
107

b28a7c016   Paul E. McKenney   rcu: Tell the com...
108
  static struct rcu_state *const rcu_state_p;
6ce75a232   Paul E. McKenney   rcu: Introduce fo...
109
  LIST_HEAD(rcu_struct_flavors);
27f4d2805   Paul E. McKenney   rcu: priority boo...
110

a3dc2948c   Paul E. McKenney   rcu: Enable diagn...
111
112
113
  /* Dump rcu_node combining tree at boot to verify correct setup. */
  static bool dump_tree;
  module_param(dump_tree, bool, 0444);
7fa270010   Paul E. McKenney   rcu: Convert CONF...
114
115
116
  /* Control rcu_node-tree auto-balancing at boot time. */
  static bool rcu_fanout_exact;
  module_param(rcu_fanout_exact, bool, 0444);
47d631af5   Paul E. McKenney   rcu: Make RCU abl...
117
118
  /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
  static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
7e5c2dfb4   Paul E. McKenney   rcu: Make rcutree...
119
  module_param(rcu_fanout_leaf, int, 0444);
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
120
  int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
cb0071023   Alexander Gordeev   rcu: Limit count ...
121
122
  /* Number of rcu_nodes at specified level. */
  static int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
123
  int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
088e9d253   Daniel Bristot de Oliveira   rcu: sysctl: Pani...
124
125
  /* panic() on RCU Stall sysctl. */
  int sysctl_panic_on_rcu_stall __read_mostly;
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
126

b0d304172   Paul E. McKenney   rcu: Prevent RCU ...
127
  /*
90687fc3c   Paul E. McKenney   rcu: Narrow early...
128
129
130
131
   * The rcu_scheduler_active variable is initialized to the value
   * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
   * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
   * RCU can assume that there is but one task, allowing RCU to (for example)
0d95092cc   Paul E. McKenney   rcu: Fix outdated...
132
   * optimize synchronize_rcu() to a simple barrier().  When this variable
90687fc3c   Paul E. McKenney   rcu: Narrow early...
133
134
135
136
137
   * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
   * to detect real grace periods.  This variable is also used to suppress
   * boot-time false positives from lockdep-RCU error checking.  Finally, it
   * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
   * is fully initialized, including all of its kthreads having been spawned.
b0d304172   Paul E. McKenney   rcu: Prevent RCU ...
138
   */
bbad93798   Paul E. McKenney   rcu: slim down rc...
139
140
  int rcu_scheduler_active __read_mostly;
  EXPORT_SYMBOL_GPL(rcu_scheduler_active);
b0d304172   Paul E. McKenney   rcu: Prevent RCU ...
141
142
143
144
145
146
147
148
149
150
151
152
153
  /*
   * The rcu_scheduler_fully_active variable transitions from zero to one
   * during the early_initcall() processing, which is after the scheduler
   * is capable of creating new tasks.  So RCU processing (for example,
   * creating tasks for RCU priority boosting) must be delayed until after
   * rcu_scheduler_fully_active transitions from zero to one.  We also
   * currently delay invocation of any RCU callbacks until after this point.
   *
   * It might later prove better for people registering RCU callbacks during
   * early boot to take responsibility for these callbacks, but one step at
   * a time.
   */
  static int rcu_scheduler_fully_active __read_mostly;
0aa04b055   Paul E. McKenney   rcu: Process offl...
154
155
  static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
  static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
5d01bbd11   Thomas Gleixner   rcu: Yield simpler
156
  static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
a46e0899e   Paul E. McKenney   rcu: use softirq ...
157
158
  static void invoke_rcu_core(void);
  static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
6587a23b6   Paul E. McKenney   rcu: Switch synch...
159
160
  static void rcu_report_exp_rdp(struct rcu_state *rsp,
  			       struct rcu_data *rdp, bool wake);
3549c2bc2   Paul E. McKenney   rcu: Move expedit...
161
  static void sync_sched_exp_online_cleanup(int cpu);
a26ac2455   Paul E. McKenney   rcu: move TREE_RC...
162

a94844b22   Paul E. McKenney   rcu: Optionally r...
163
  /* rcuc/rcub kthread realtime priority */
26730f55c   Paul E. McKenney   rcu: Make RCU abl...
164
  #ifdef CONFIG_RCU_KTHREAD_PRIO
a94844b22   Paul E. McKenney   rcu: Optionally r...
165
  static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
26730f55c   Paul E. McKenney   rcu: Make RCU abl...
166
167
168
  #else /* #ifdef CONFIG_RCU_KTHREAD_PRIO */
  static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
  #endif /* #else #ifdef CONFIG_RCU_KTHREAD_PRIO */
a94844b22   Paul E. McKenney   rcu: Optionally r...
169
  module_param(kthread_prio, int, 0644);
8d7dc9283   Paul E. McKenney   rcu: Control grac...
170
  /* Delay in jiffies for grace-period initialization delays, debug only. */
0f41c0dda   Paul E. McKenney   rcu: Provide diag...
171
172
173
174
175
176
177
  
  #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT
  static int gp_preinit_delay = CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT_DELAY;
  module_param(gp_preinit_delay, int, 0644);
  #else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT */
  static const int gp_preinit_delay;
  #endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT */
8d7dc9283   Paul E. McKenney   rcu: Control grac...
178
179
  #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT
  static int gp_init_delay = CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY;
37745d281   Paul E. McKenney   rcu: Provide diag...
180
  module_param(gp_init_delay, int, 0644);
8d7dc9283   Paul E. McKenney   rcu: Control grac...
181
182
183
  #else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */
  static const int gp_init_delay;
  #endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */
eab128e83   Paul E. McKenney   rcu: Modulate gra...
184

0f41c0dda   Paul E. McKenney   rcu: Provide diag...
185
186
187
188
189
190
  #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP
  static int gp_cleanup_delay = CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP_DELAY;
  module_param(gp_cleanup_delay, int, 0644);
  #else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP */
  static const int gp_cleanup_delay;
  #endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP */
eab128e83   Paul E. McKenney   rcu: Modulate gra...
191
192
193
194
195
196
197
198
199
200
  /*
   * Number of grace periods between delays, normalized by the duration of
   * the delay.  The longer the the delay, the more the grace periods between
   * each delay.  The reason for this normalization is that it means that,
   * for non-zero delays, the overall slowdown of grace periods is constant
   * regardless of the duration of the delay.  This arrangement balances
   * the need for long delays to increase some race probabilities with the
   * need for fast grace periods to increase other race probabilities.
   */
  #define PER_RCU_NODE_PERIOD 3	/* Number of grace periods between delays. */
37745d281   Paul E. McKenney   rcu: Provide diag...
201

a26ac2455   Paul E. McKenney   rcu: move TREE_RC...
202
  /*
4a2986568   Paul E. McKenney   rcu: make rcutort...
203
204
205
206
207
208
209
210
211
212
213
214
   * Track the rcutorture test sequence number and the update version
   * number within a given test.  The rcutorture_testseq is incremented
   * on every rcutorture module load and unload, so has an odd value
   * when a test is running.  The rcutorture_vernum is set to zero
   * when rcutorture starts and is incremented on each rcutorture update.
   * These variables enable correlating rcutorture output with the
   * RCU tracing information.
   */
  unsigned long rcutorture_testseq;
  unsigned long rcutorture_vernum;
  
  /*
0aa04b055   Paul E. McKenney   rcu: Process offl...
215
216
217
218
219
220
221
   * Compute the mask of online CPUs for the specified rcu_node structure.
   * This will not be stable unless the rcu_node structure's ->lock is
   * held, but the bit corresponding to the current CPU will be stable
   * in most contexts.
   */
  unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
  {
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
222
  	return READ_ONCE(rnp->qsmaskinitnext);
0aa04b055   Paul E. McKenney   rcu: Process offl...
223
224
225
  }
  
  /*
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
226
   * Return true if an RCU grace period is in progress.  The READ_ONCE()s
fc2219d49   Paul E. McKenney   rcu: Clean up cod...
227
228
229
230
231
   * permit this function to be invoked without holding the root rcu_node
   * structure's ->lock, but of course results can be subject to change.
   */
  static int rcu_gp_in_progress(struct rcu_state *rsp)
  {
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
232
  	return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum);
fc2219d49   Paul E. McKenney   rcu: Clean up cod...
233
234
235
  }
  
  /*
d6714c22b   Paul E. McKenney   rcu: Renamings to...
236
   * Note a quiescent state.  Because we do not need to know
b1f77b058   Ingo Molnar   kmemtrace, rcu: f...
237
   * how many quiescent states passed, just if there was at least
d6714c22b   Paul E. McKenney   rcu: Renamings to...
238
   * one since the start of the grace period, this just sets a flag.
e4cc1f22b   Paul E. McKenney   rcu: Simplify qui...
239
   * The caller must have disabled preemption.
b1f77b058   Ingo Molnar   kmemtrace, rcu: f...
240
   */
284a8c93a   Paul E. McKenney   rcu: Per-CPU oper...
241
  void rcu_sched_qs(void)
b1f77b058   Ingo Molnar   kmemtrace, rcu: f...
242
  {
fecbf6f01   Paul E. McKenney   rcu: Simplify rcu...
243
244
245
246
247
248
249
250
  	if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
  		return;
  	trace_rcu_grace_period(TPS("rcu_sched"),
  			       __this_cpu_read(rcu_sched_data.gpnum),
  			       TPS("cpuqs"));
  	__this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
  	if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
  		return;
46a5d164d   Paul E. McKenney   rcu: Stop disabli...
251
252
253
  	__this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
  	rcu_report_exp_rdp(&rcu_sched_state,
  			   this_cpu_ptr(&rcu_sched_data), true);
b1f77b058   Ingo Molnar   kmemtrace, rcu: f...
254
  }
284a8c93a   Paul E. McKenney   rcu: Per-CPU oper...
255
  void rcu_bh_qs(void)
b1f77b058   Ingo Molnar   kmemtrace, rcu: f...
256
  {
5b74c4589   Paul E. McKenney   rcu: Make ->cpu_n...
257
  	if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
284a8c93a   Paul E. McKenney   rcu: Per-CPU oper...
258
259
260
  		trace_rcu_grace_period(TPS("rcu_bh"),
  				       __this_cpu_read(rcu_bh_data.gpnum),
  				       TPS("cpuqs"));
5b74c4589   Paul E. McKenney   rcu: Make ->cpu_n...
261
  		__this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
284a8c93a   Paul E. McKenney   rcu: Per-CPU oper...
262
  	}
b1f77b058   Ingo Molnar   kmemtrace, rcu: f...
263
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
264

4a81e8328   Paul E. McKenney   rcu: Reduce overh...
265
266
267
268
269
270
271
272
273
274
  static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
  
  static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
  	.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
  	.dynticks = ATOMIC_INIT(1),
  #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
  	.dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
  	.dynticks_idle = ATOMIC_INIT(1),
  #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
  };
5cd37193c   Paul E. McKenney   rcu: Make cond_re...
275
276
  DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
  EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
277
278
279
280
281
282
283
284
285
286
  /*
   * Let the RCU core know that this CPU has gone through the scheduler,
   * which is a quiescent state.  This is called when the need for a
   * quiescent state is urgent, so we burn an atomic operation and full
   * memory barriers to let the RCU core know about it, regardless of what
   * this CPU might (or might not) do in the near future.
   *
   * We inform the RCU core by emulating a zero-duration dyntick-idle
   * period, which we in turn do by incrementing the ->dynticks counter
   * by two.
46a5d164d   Paul E. McKenney   rcu: Stop disabli...
287
288
   *
   * The caller must have disabled interrupts.
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
289
290
291
   */
  static void rcu_momentary_dyntick_idle(void)
  {
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
292
293
294
295
  	struct rcu_data *rdp;
  	struct rcu_dynticks *rdtp;
  	int resched_mask;
  	struct rcu_state *rsp;
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
296
297
298
299
300
301
302
303
304
305
306
307
308
  	/*
  	 * Yes, we can lose flag-setting operations.  This is OK, because
  	 * the flag will be set again after some delay.
  	 */
  	resched_mask = raw_cpu_read(rcu_sched_qs_mask);
  	raw_cpu_write(rcu_sched_qs_mask, 0);
  
  	/* Find the flavor that needs a quiescent state. */
  	for_each_rcu_flavor(rsp) {
  		rdp = raw_cpu_ptr(rsp->rda);
  		if (!(resched_mask & rsp->flavor_mask))
  			continue;
  		smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
309
310
  		if (READ_ONCE(rdp->mynode->completed) !=
  		    READ_ONCE(rdp->cond_resched_completed))
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
311
312
313
314
315
316
317
318
319
320
321
322
323
324
  			continue;
  
  		/*
  		 * Pretend to be momentarily idle for the quiescent state.
  		 * This allows the grace-period kthread to record the
  		 * quiescent state, with no need for this CPU to do anything
  		 * further.
  		 */
  		rdtp = this_cpu_ptr(&rcu_dynticks);
  		smp_mb__before_atomic(); /* Earlier stuff before QS. */
  		atomic_add(2, &rdtp->dynticks);  /* QS. */
  		smp_mb__after_atomic(); /* Later stuff after QS. */
  		break;
  	}
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
325
  }
25502a6c1   Paul E. McKenney   rcu: refactor RCU...
326
327
328
  /*
   * Note a context switch.  This is a quiescent state for RCU-sched,
   * and requires special handling for preemptible RCU.
46a5d164d   Paul E. McKenney   rcu: Stop disabli...
329
   * The caller must have disabled interrupts.
25502a6c1   Paul E. McKenney   rcu: refactor RCU...
330
   */
38200cf24   Paul E. McKenney   rcu: Remove "cpu"...
331
  void rcu_note_context_switch(void)
25502a6c1   Paul E. McKenney   rcu: refactor RCU...
332
  {
bb73c52ba   Boqun Feng   rcu: Don't disabl...
333
  	barrier(); /* Avoid RCU read-side critical sections leaking down. */
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
334
  	trace_rcu_utilization(TPS("Start context switch"));
284a8c93a   Paul E. McKenney   rcu: Per-CPU oper...
335
  	rcu_sched_qs();
38200cf24   Paul E. McKenney   rcu: Remove "cpu"...
336
  	rcu_preempt_note_context_switch();
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
337
338
  	if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
  		rcu_momentary_dyntick_idle();
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
339
  	trace_rcu_utilization(TPS("End context switch"));
bb73c52ba   Boqun Feng   rcu: Don't disabl...
340
  	barrier(); /* Avoid RCU read-side critical sections leaking up. */
25502a6c1   Paul E. McKenney   rcu: refactor RCU...
341
  }
29ce83100   Gleb Natapov   rcu: provide rcu_...
342
  EXPORT_SYMBOL_GPL(rcu_note_context_switch);
25502a6c1   Paul E. McKenney   rcu: refactor RCU...
343

5cd37193c   Paul E. McKenney   rcu: Make cond_re...
344
  /*
1925d1967   Paul E. McKenney   rcu: Fix a couple...
345
   * Register a quiescent state for all RCU flavors.  If there is an
5cd37193c   Paul E. McKenney   rcu: Make cond_re...
346
347
   * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
   * dyntick-idle quiescent state visible to other CPUs (but only for those
1925d1967   Paul E. McKenney   rcu: Fix a couple...
348
   * RCU flavors in desperate need of a quiescent state, which will normally
5cd37193c   Paul E. McKenney   rcu: Make cond_re...
349
350
   * be none of them).  Either way, do a lightweight quiescent state for
   * all RCU flavors.
bb73c52ba   Boqun Feng   rcu: Don't disabl...
351
352
353
354
355
   *
   * The barrier() calls are redundant in the common case when this is
   * called externally, but just in case this is called from within this
   * file.
   *
5cd37193c   Paul E. McKenney   rcu: Make cond_re...
356
357
358
   */
  void rcu_all_qs(void)
  {
46a5d164d   Paul E. McKenney   rcu: Stop disabli...
359
  	unsigned long flags;
bb73c52ba   Boqun Feng   rcu: Don't disabl...
360
  	barrier(); /* Avoid RCU read-side critical sections leaking down. */
46a5d164d   Paul E. McKenney   rcu: Stop disabli...
361
362
  	if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) {
  		local_irq_save(flags);
5cd37193c   Paul E. McKenney   rcu: Make cond_re...
363
  		rcu_momentary_dyntick_idle();
46a5d164d   Paul E. McKenney   rcu: Stop disabli...
364
365
  		local_irq_restore(flags);
  	}
a1e122484   Paul E. McKenney   rcu: Make cond_re...
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
  	if (unlikely(raw_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))) {
  		/*
  		 * Yes, we just checked a per-CPU variable with preemption
  		 * enabled, so we might be migrated to some other CPU at
  		 * this point.  That is OK because in that case, the
  		 * migration will supply the needed quiescent state.
  		 * We might end up needlessly disabling preemption and
  		 * invoking rcu_sched_qs() on the destination CPU, but
  		 * the probability and cost are both quite low, so this
  		 * should not be a problem in practice.
  		 */
  		preempt_disable();
  		rcu_sched_qs();
  		preempt_enable();
  	}
5cd37193c   Paul E. McKenney   rcu: Make cond_re...
381
  	this_cpu_inc(rcu_qs_ctr);
bb73c52ba   Boqun Feng   rcu: Don't disabl...
382
  	barrier(); /* Avoid RCU read-side critical sections leaking up. */
5cd37193c   Paul E. McKenney   rcu: Make cond_re...
383
384
  }
  EXPORT_SYMBOL_GPL(rcu_all_qs);
878d7439d   Eric Dumazet   rcu: Fix batch-li...
385
386
387
  static long blimit = 10;	/* Maximum callbacks per rcu_do_batch. */
  static long qhimark = 10000;	/* If this many pending, ignore blimit. */
  static long qlowmark = 100;	/* Once only this many pending, use blimit. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
388

878d7439d   Eric Dumazet   rcu: Fix batch-li...
389
390
391
  module_param(blimit, long, 0444);
  module_param(qhimark, long, 0444);
  module_param(qlowmark, long, 0444);
3d76c0829   Paul E. McKenney   rcu: Clean up cod...
392

026ad2835   Paul E. McKenney   rcu: Drive quiesc...
393
394
  static ulong jiffies_till_first_fqs = ULONG_MAX;
  static ulong jiffies_till_next_fqs = ULONG_MAX;
8c7c4829a   Paul E. McKenney   rcu: Awaken grace...
395
  static bool rcu_kick_kthreads;
d40011f60   Paul E. McKenney   rcu: Control grac...
396
397
398
  
  module_param(jiffies_till_first_fqs, ulong, 0644);
  module_param(jiffies_till_next_fqs, ulong, 0644);
8c7c4829a   Paul E. McKenney   rcu: Awaken grace...
399
  module_param(rcu_kick_kthreads, bool, 0644);
d40011f60   Paul E. McKenney   rcu: Control grac...
400

4a81e8328   Paul E. McKenney   rcu: Reduce overh...
401
402
403
404
405
406
  /*
   * How long the grace period must be before we start recruiting
   * quiescent-state help from rcu_note_context_switch().
   */
  static ulong jiffies_till_sched_qs = HZ / 20;
  module_param(jiffies_till_sched_qs, ulong, 0644);
48a7639ce   Paul E. McKenney   rcu: Make callers...
407
  static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
910ee45db   Paul E. McKenney   rcu: Make rcu_acc...
408
  				  struct rcu_data *rdp);
217af2a2f   Paul E. McKenney   nohz_full: Add fu...
409
410
411
412
  static void force_qs_rnp(struct rcu_state *rsp,
  			 int (*f)(struct rcu_data *rsp, bool *isidle,
  				  unsigned long *maxj),
  			 bool *isidle, unsigned long *maxj);
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
413
  static void force_quiescent_state(struct rcu_state *rsp);
e3950ecd5   Paul E. McKenney   rcu: Remove "cpu"...
414
  static int rcu_pending(void);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
415
416
  
  /*
917963d0b   Paul E. McKenney   rcutorture: Check...
417
   * Return the number of RCU batches started thus far for debug & stats.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
418
   */
917963d0b   Paul E. McKenney   rcutorture: Check...
419
420
421
422
423
424
425
426
  unsigned long rcu_batches_started(void)
  {
  	return rcu_state_p->gpnum;
  }
  EXPORT_SYMBOL_GPL(rcu_batches_started);
  
  /*
   * Return the number of RCU-sched batches started thus far for debug & stats.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
427
   */
917963d0b   Paul E. McKenney   rcutorture: Check...
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
  unsigned long rcu_batches_started_sched(void)
  {
  	return rcu_sched_state.gpnum;
  }
  EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
  
  /*
   * Return the number of RCU BH batches started thus far for debug & stats.
   */
  unsigned long rcu_batches_started_bh(void)
  {
  	return rcu_bh_state.gpnum;
  }
  EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
  
  /*
   * Return the number of RCU batches completed thus far for debug & stats.
   */
  unsigned long rcu_batches_completed(void)
  {
  	return rcu_state_p->completed;
  }
  EXPORT_SYMBOL_GPL(rcu_batches_completed);
  
  /*
   * Return the number of RCU-sched batches completed thus far for debug & stats.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
454
   */
9733e4f0a   Paul E. McKenney   rcu: Make _batche...
455
  unsigned long rcu_batches_completed_sched(void)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
456
  {
d6714c22b   Paul E. McKenney   rcu: Renamings to...
457
  	return rcu_sched_state.completed;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
458
  }
d6714c22b   Paul E. McKenney   rcu: Renamings to...
459
  EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
460
461
  
  /*
917963d0b   Paul E. McKenney   rcutorture: Check...
462
   * Return the number of RCU BH batches completed thus far for debug & stats.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
463
   */
9733e4f0a   Paul E. McKenney   rcu: Make _batche...
464
  unsigned long rcu_batches_completed_bh(void)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
465
466
467
468
469
470
  {
  	return rcu_bh_state.completed;
  }
  EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
  
  /*
291783b8a   Paul E. McKenney   rcutorture: Exped...
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
   * Return the number of RCU expedited batches completed thus far for
   * debug & stats.  Odd numbers mean that a batch is in progress, even
   * numbers mean idle.  The value returned will thus be roughly double
   * the cumulative batches since boot.
   */
  unsigned long rcu_exp_batches_completed(void)
  {
  	return rcu_state_p->expedited_sequence;
  }
  EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
  
  /*
   * Return the number of RCU-sched expedited batches completed thus far
   * for debug & stats.  Similar to rcu_exp_batches_completed().
   */
  unsigned long rcu_exp_batches_completed_sched(void)
  {
  	return rcu_sched_state.expedited_sequence;
  }
  EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
  
  /*
a381d757d   Andreea-Cristina Bernat   rcu: Merge rcu_sc...
493
494
495
496
   * Force a quiescent state.
   */
  void rcu_force_quiescent_state(void)
  {
e534165bb   Uma Sharma   rcu: Variable nam...
497
  	force_quiescent_state(rcu_state_p);
a381d757d   Andreea-Cristina Bernat   rcu: Merge rcu_sc...
498
499
500
501
  }
  EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
  
  /*
bf66f18e7   Paul E. McKenney   rcu: Add force_qu...
502
503
504
505
   * Force a quiescent state for RCU BH.
   */
  void rcu_bh_force_quiescent_state(void)
  {
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
506
  	force_quiescent_state(&rcu_bh_state);
bf66f18e7   Paul E. McKenney   rcu: Add force_qu...
507
508
509
510
  }
  EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
  
  /*
e7580f338   Paul E. McKenney   rcu: Get rcu_sche...
511
512
513
514
515
516
517
518
519
   * Force a quiescent state for RCU-sched.
   */
  void rcu_sched_force_quiescent_state(void)
  {
  	force_quiescent_state(&rcu_sched_state);
  }
  EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
  
  /*
afea227fd   Paul E. McKenney   rcutorture: Expor...
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
   * Show the state of the grace-period kthreads.
   */
  void show_rcu_gp_kthreads(void)
  {
  	struct rcu_state *rsp;
  
  	for_each_rcu_flavor(rsp) {
  		pr_info("%s: wait state: %d ->state: %#lx
  ",
  			rsp->name, rsp->gp_state, rsp->gp_kthread->state);
  		/* sched_show_task(rsp->gp_kthread); */
  	}
  }
  EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
  
  /*
4a2986568   Paul E. McKenney   rcu: make rcutort...
536
537
538
539
540
541
542
543
544
545
546
547
548
549
   * Record the number of times rcutorture tests have been initiated and
   * terminated.  This information allows the debugfs tracing stats to be
   * correlated to the rcutorture messages, even when the rcutorture module
   * is being repeatedly loaded and unloaded.  In other words, we cannot
   * store this state in rcutorture itself.
   */
  void rcutorture_record_test_transition(void)
  {
  	rcutorture_testseq++;
  	rcutorture_vernum = 0;
  }
  EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
  
  /*
ad0dc7f94   Paul E. McKenney   rcutorture: Add f...
550
551
552
553
554
555
556
557
558
   * Send along grace-period-related data for rcutorture diagnostics.
   */
  void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
  			    unsigned long *gpnum, unsigned long *completed)
  {
  	struct rcu_state *rsp = NULL;
  
  	switch (test_type) {
  	case RCU_FLAVOR:
e534165bb   Uma Sharma   rcu: Variable nam...
559
  		rsp = rcu_state_p;
ad0dc7f94   Paul E. McKenney   rcutorture: Add f...
560
561
562
563
564
565
566
567
568
569
570
  		break;
  	case RCU_BH_FLAVOR:
  		rsp = &rcu_bh_state;
  		break;
  	case RCU_SCHED_FLAVOR:
  		rsp = &rcu_sched_state;
  		break;
  	default:
  		break;
  	}
  	if (rsp != NULL) {
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
571
572
573
  		*flags = READ_ONCE(rsp->gp_flags);
  		*gpnum = READ_ONCE(rsp->gpnum);
  		*completed = READ_ONCE(rsp->completed);
ad0dc7f94   Paul E. McKenney   rcutorture: Add f...
574
575
576
577
578
579
580
581
582
  		return;
  	}
  	*flags = 0;
  	*gpnum = 0;
  	*completed = 0;
  }
  EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
  
  /*
4a2986568   Paul E. McKenney   rcu: make rcutort...
583
584
585
586
587
588
589
590
591
592
593
   * Record the number of writer passes through the current rcutorture test.
   * This is also used to correlate debugfs tracing stats with the rcutorture
   * messages.
   */
  void rcutorture_record_progress(unsigned long vernum)
  {
  	rcutorture_vernum++;
  }
  EXPORT_SYMBOL_GPL(rcutorture_record_progress);
  
  /*
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
594
595
596
597
598
   * Does the CPU have callbacks ready to be invoked?
   */
  static int
  cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
  {
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
599
600
  	return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL] &&
  	       rdp->nxttail[RCU_DONE_TAIL] != NULL;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
601
602
603
  }
  
  /*
365187fbc   Paul E. McKenney   rcu: Update cpu_n...
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
   * Return the root node of the specified rcu_state structure.
   */
  static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
  {
  	return &rsp->node[0];
  }
  
  /*
   * Is there any need for future grace periods?
   * Interrupts must be disabled.  If the caller does not hold the root
   * rnp_node structure's ->lock, the results are advisory only.
   */
  static int rcu_future_needs_gp(struct rcu_state *rsp)
  {
  	struct rcu_node *rnp = rcu_get_root(rsp);
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
619
  	int idx = (READ_ONCE(rnp->completed) + 1) & 0x1;
365187fbc   Paul E. McKenney   rcu: Update cpu_n...
620
  	int *fp = &rnp->need_future_gp[idx];
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
621
  	return READ_ONCE(*fp);
365187fbc   Paul E. McKenney   rcu: Update cpu_n...
622
623
624
  }
  
  /*
dc35c8934   Paul E. McKenney   rcu: Tag callback...
625
626
627
   * Does the current CPU require a not-yet-started grace period?
   * The caller must have disabled interrupts to prevent races with
   * normal callback registry.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
628
   */
d117c8aa1   Paul E. McKenney   rcu: Make cpu_nee...
629
  static bool
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
630
631
  cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
  {
dc35c8934   Paul E. McKenney   rcu: Tag callback...
632
  	int i;
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
633

dc35c8934   Paul E. McKenney   rcu: Tag callback...
634
  	if (rcu_gp_in_progress(rsp))
d117c8aa1   Paul E. McKenney   rcu: Make cpu_nee...
635
  		return false;  /* No, a grace period is already in progress. */
365187fbc   Paul E. McKenney   rcu: Update cpu_n...
636
  	if (rcu_future_needs_gp(rsp))
d117c8aa1   Paul E. McKenney   rcu: Make cpu_nee...
637
  		return true;  /* Yes, a no-CBs CPU needs one. */
dc35c8934   Paul E. McKenney   rcu: Tag callback...
638
  	if (!rdp->nxttail[RCU_NEXT_TAIL])
d117c8aa1   Paul E. McKenney   rcu: Make cpu_nee...
639
  		return false;  /* No, this is a no-CBs (or offline) CPU. */
dc35c8934   Paul E. McKenney   rcu: Tag callback...
640
  	if (*rdp->nxttail[RCU_NEXT_READY_TAIL])
d117c8aa1   Paul E. McKenney   rcu: Make cpu_nee...
641
  		return true;  /* Yes, CPU has newly registered callbacks. */
dc35c8934   Paul E. McKenney   rcu: Tag callback...
642
643
  	for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++)
  		if (rdp->nxttail[i - 1] != rdp->nxttail[i] &&
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
644
  		    ULONG_CMP_LT(READ_ONCE(rsp->completed),
dc35c8934   Paul E. McKenney   rcu: Tag callback...
645
  				 rdp->nxtcompleted[i]))
d117c8aa1   Paul E. McKenney   rcu: Make cpu_nee...
646
647
  			return true;  /* Yes, CBs for future grace period. */
  	return false; /* No grace period needed. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
648
649
650
  }
  
  /*
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
651
   * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
652
653
654
655
656
   *
   * If the new value of the ->dynticks_nesting counter now is zero,
   * we really have entered idle, and must do the appropriate accounting.
   * The caller must have disabled interrupts.
   */
28ced795c   Christoph Lameter   rcu: Remove rcu_d...
657
  static void rcu_eqs_enter_common(long long oldval, bool user)
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
658
  {
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
659
660
  	struct rcu_state *rsp;
  	struct rcu_data *rdp;
28ced795c   Christoph Lameter   rcu: Remove rcu_d...
661
  	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
662

f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
663
  	trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
1ce46ee59   Paul E. McKenney   rcu: Conditionall...
664
665
  	if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
  	    !user && !is_idle_task(current)) {
289828e62   Paul E. McKenney   rcu: Silence unus...
666
667
  		struct task_struct *idle __maybe_unused =
  			idle_task(smp_processor_id());
0989cb467   Paul E. McKenney   rcu: Add more inf...
668

f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
669
  		trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
274529ba9   Paul E. McKenney   rcu: Consolidate ...
670
  		rcu_ftrace_dump(DUMP_ORIG);
0989cb467   Paul E. McKenney   rcu: Add more inf...
671
672
673
  		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
  			  current->pid, current->comm,
  			  idle->pid, idle->comm); /* must be idle task! */
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
674
  	}
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
675
676
677
678
  	for_each_rcu_flavor(rsp) {
  		rdp = this_cpu_ptr(rsp->rda);
  		do_nocb_deferred_wakeup(rdp);
  	}
198bbf812   Paul E. McKenney   rcu: Remove "cpu"...
679
  	rcu_prepare_for_idle();
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
680
  	/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
4e857c58e   Peter Zijlstra   arch: Mass conver...
681
  	smp_mb__before_atomic();  /* See above. */
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
682
  	atomic_inc(&rdtp->dynticks);
4e857c58e   Peter Zijlstra   arch: Mass conver...
683
  	smp_mb__after_atomic();  /* Force ordering with next sojourn. */
1ce46ee59   Paul E. McKenney   rcu: Conditionall...
684
685
  	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
  		     atomic_read(&rdtp->dynticks) & 0x1);
176f8f7a5   Paul E. McKenney   rcu: Make TASKS_R...
686
  	rcu_dynticks_task_enter();
c44e2cdda   Paul E. McKenney   rcu: Check for id...
687
688
  
  	/*
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
689
  	 * It is illegal to enter an extended quiescent state while
c44e2cdda   Paul E. McKenney   rcu: Check for id...
690
691
  	 * in an RCU read-side critical section.
  	 */
f78f5b90c   Paul E. McKenney   rcu: Rename rcu_l...
692
693
694
695
696
697
  	RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
  			 "Illegal idle entry in RCU read-side critical section.");
  	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map),
  			 "Illegal idle entry in RCU-bh read-side critical section.");
  	RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map),
  			 "Illegal idle entry in RCU-sched read-side critical section.");
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
698
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
699

adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
700
701
702
  /*
   * Enter an RCU extended quiescent state, which can be either the
   * idle loop or adaptive-tickless usermode execution.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
703
   */
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
704
  static void rcu_eqs_enter(bool user)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
705
  {
4145fa7fb   Paul E. McKenney   rcu: Deconfuse dy...
706
  	long long oldval;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
707
  	struct rcu_dynticks *rdtp;
c9d4b0af9   Christoph Lameter   rcu: Replace __ge...
708
  	rdtp = this_cpu_ptr(&rcu_dynticks);
4145fa7fb   Paul E. McKenney   rcu: Deconfuse dy...
709
  	oldval = rdtp->dynticks_nesting;
1ce46ee59   Paul E. McKenney   rcu: Conditionall...
710
711
  	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
  		     (oldval & DYNTICK_TASK_NEST_MASK) == 0);
3a5924052   Paul E. McKenney   rcu: Allow task-l...
712
  	if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) {
29e37d814   Paul E. McKenney   rcu: Allow nestin...
713
  		rdtp->dynticks_nesting = 0;
28ced795c   Christoph Lameter   rcu: Remove rcu_d...
714
  		rcu_eqs_enter_common(oldval, user);
3a5924052   Paul E. McKenney   rcu: Allow task-l...
715
  	} else {
29e37d814   Paul E. McKenney   rcu: Allow nestin...
716
  		rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
3a5924052   Paul E. McKenney   rcu: Allow task-l...
717
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
718
  }
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
  
  /**
   * rcu_idle_enter - inform RCU that current CPU is entering idle
   *
   * Enter idle mode, in other words, -leave- the mode in which RCU
   * read-side critical sections can occur.  (Though RCU read-side
   * critical sections can occur in irq handlers in idle, a possibility
   * handled by irq_enter() and irq_exit().)
   *
   * We crowbar the ->dynticks_nesting field to zero to allow for
   * the possibility of usermode upcalls having messed up our count
   * of interrupt nesting level during the prior busy period.
   */
  void rcu_idle_enter(void)
  {
c5d900bf6   Frederic Weisbecker   rcu: Allow rcu_us...
734
735
736
  	unsigned long flags;
  
  	local_irq_save(flags);
cb349ca95   Paul E. McKenney   rcu: Apply micro-...
737
  	rcu_eqs_enter(false);
28ced795c   Christoph Lameter   rcu: Remove rcu_d...
738
  	rcu_sysidle_enter(0);
c5d900bf6   Frederic Weisbecker   rcu: Allow rcu_us...
739
  	local_irq_restore(flags);
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
740
  }
8a2ecf474   Paul E. McKenney   rcu: Add RCU_NONI...
741
  EXPORT_SYMBOL_GPL(rcu_idle_enter);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
742

d1ec4c34c   Paul E. McKenney   rcu: Drop RCU_USE...
743
  #ifdef CONFIG_NO_HZ_FULL
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
744
  /**
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
745
746
747
748
749
750
751
752
753
   * rcu_user_enter - inform RCU that we are resuming userspace.
   *
   * Enter RCU idle mode right before resuming userspace.  No use of RCU
   * is permitted between this call and rcu_user_exit(). This way the
   * CPU doesn't need to maintain the tick for RCU maintenance purposes
   * when the CPU runs in userspace.
   */
  void rcu_user_enter(void)
  {
91d1aa43d   Frederic Weisbecker   context_tracking:...
754
  	rcu_eqs_enter(1);
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
755
  }
d1ec4c34c   Paul E. McKenney   rcu: Drop RCU_USE...
756
  #endif /* CONFIG_NO_HZ_FULL */
19dd1591f   Frederic Weisbecker   rcu: New rcu_user...
757
758
  
  /**
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
759
760
761
762
   * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
   *
   * Exit from an interrupt handler, which might possibly result in entering
   * idle mode, in other words, leaving the mode in which read-side critical
7c9906ca5   Paul E. McKenney   rcu: Don't redund...
763
   * sections can occur.  The caller must have disabled interrupts.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
764
   *
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
765
766
767
768
769
770
771
772
   * This code assumes that the idle loop never does anything that might
   * result in unbalanced calls to irq_enter() and irq_exit().  If your
   * architecture violates this assumption, RCU will give you what you
   * deserve, good and hard.  But very infrequently and irreproducibly.
   *
   * Use things like work queues to work around this limitation.
   *
   * You have been warned.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
773
   */
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
774
  void rcu_irq_exit(void)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
775
  {
4145fa7fb   Paul E. McKenney   rcu: Deconfuse dy...
776
  	long long oldval;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
777
  	struct rcu_dynticks *rdtp;
7c9906ca5   Paul E. McKenney   rcu: Don't redund...
778
  	RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_exit() invoked with irqs enabled!!!");
c9d4b0af9   Christoph Lameter   rcu: Replace __ge...
779
  	rdtp = this_cpu_ptr(&rcu_dynticks);
4145fa7fb   Paul E. McKenney   rcu: Deconfuse dy...
780
  	oldval = rdtp->dynticks_nesting;
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
781
  	rdtp->dynticks_nesting--;
1ce46ee59   Paul E. McKenney   rcu: Conditionall...
782
783
  	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
  		     rdtp->dynticks_nesting < 0);
b6fc60201   Frederic Weisbecker   rcu: Don't check ...
784
  	if (rdtp->dynticks_nesting)
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
785
  		trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
b6fc60201   Frederic Weisbecker   rcu: Don't check ...
786
  	else
28ced795c   Christoph Lameter   rcu: Remove rcu_d...
787
788
  		rcu_eqs_enter_common(oldval, true);
  	rcu_sysidle_enter(1);
7c9906ca5   Paul E. McKenney   rcu: Don't redund...
789
790
791
792
793
794
795
796
797
798
799
  }
  
  /*
   * Wrapper for rcu_irq_exit() where interrupts are enabled.
   */
  void rcu_irq_exit_irqson(void)
  {
  	unsigned long flags;
  
  	local_irq_save(flags);
  	rcu_irq_exit();
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
800
801
802
803
  	local_irq_restore(flags);
  }
  
  /*
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
804
   * rcu_eqs_exit_common - current CPU moving away from extended quiescent state
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
805
806
807
808
809
   *
   * If the new value of the ->dynticks_nesting counter was previously zero,
   * we really have exited idle, and must do the appropriate accounting.
   * The caller must have disabled interrupts.
   */
28ced795c   Christoph Lameter   rcu: Remove rcu_d...
810
  static void rcu_eqs_exit_common(long long oldval, int user)
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
811
  {
28ced795c   Christoph Lameter   rcu: Remove rcu_d...
812
  	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
176f8f7a5   Paul E. McKenney   rcu: Make TASKS_R...
813
  	rcu_dynticks_task_exit();
4e857c58e   Peter Zijlstra   arch: Mass conver...
814
  	smp_mb__before_atomic();  /* Force ordering w/previous sojourn. */
23b5c8fa0   Paul E. McKenney   rcu: Decrease mem...
815
816
  	atomic_inc(&rdtp->dynticks);
  	/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
4e857c58e   Peter Zijlstra   arch: Mass conver...
817
  	smp_mb__after_atomic();  /* See above. */
1ce46ee59   Paul E. McKenney   rcu: Conditionall...
818
819
  	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
  		     !(atomic_read(&rdtp->dynticks) & 0x1));
8fa7845df   Paul E. McKenney   rcu: Remove "cpu"...
820
  	rcu_cleanup_after_idle();
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
821
  	trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
1ce46ee59   Paul E. McKenney   rcu: Conditionall...
822
823
  	if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
  	    !user && !is_idle_task(current)) {
289828e62   Paul E. McKenney   rcu: Silence unus...
824
825
  		struct task_struct *idle __maybe_unused =
  			idle_task(smp_processor_id());
0989cb467   Paul E. McKenney   rcu: Add more inf...
826

f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
827
  		trace_rcu_dyntick(TPS("Error on exit: not idle task"),
4145fa7fb   Paul E. McKenney   rcu: Deconfuse dy...
828
  				  oldval, rdtp->dynticks_nesting);
274529ba9   Paul E. McKenney   rcu: Consolidate ...
829
  		rcu_ftrace_dump(DUMP_ORIG);
0989cb467   Paul E. McKenney   rcu: Add more inf...
830
831
832
  		WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
  			  current->pid, current->comm,
  			  idle->pid, idle->comm); /* must be idle task! */
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
833
834
  	}
  }
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
835
836
837
  /*
   * Exit an RCU extended quiescent state, which can be either the
   * idle loop or adaptive-tickless usermode execution.
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
838
   */
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
839
  static void rcu_eqs_exit(bool user)
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
840
  {
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
841
842
  	struct rcu_dynticks *rdtp;
  	long long oldval;
c9d4b0af9   Christoph Lameter   rcu: Replace __ge...
843
  	rdtp = this_cpu_ptr(&rcu_dynticks);
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
844
  	oldval = rdtp->dynticks_nesting;
1ce46ee59   Paul E. McKenney   rcu: Conditionall...
845
  	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
3a5924052   Paul E. McKenney   rcu: Allow task-l...
846
  	if (oldval & DYNTICK_TASK_NEST_MASK) {
29e37d814   Paul E. McKenney   rcu: Allow nestin...
847
  		rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
3a5924052   Paul E. McKenney   rcu: Allow task-l...
848
  	} else {
29e37d814   Paul E. McKenney   rcu: Allow nestin...
849
  		rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
28ced795c   Christoph Lameter   rcu: Remove rcu_d...
850
  		rcu_eqs_exit_common(oldval, user);
3a5924052   Paul E. McKenney   rcu: Allow task-l...
851
  	}
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
852
  }
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
853
854
855
856
857
858
859
860
861
862
863
864
865
866
  
  /**
   * rcu_idle_exit - inform RCU that current CPU is leaving idle
   *
   * Exit idle mode, in other words, -enter- the mode in which RCU
   * read-side critical sections can occur.
   *
   * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
   * allow for the possibility of usermode upcalls messing up our count
   * of interrupt nesting level during the busy period that is just
   * now starting.
   */
  void rcu_idle_exit(void)
  {
c5d900bf6   Frederic Weisbecker   rcu: Allow rcu_us...
867
868
869
  	unsigned long flags;
  
  	local_irq_save(flags);
cb349ca95   Paul E. McKenney   rcu: Apply micro-...
870
  	rcu_eqs_exit(false);
28ced795c   Christoph Lameter   rcu: Remove rcu_d...
871
  	rcu_sysidle_exit(0);
c5d900bf6   Frederic Weisbecker   rcu: Allow rcu_us...
872
  	local_irq_restore(flags);
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
873
  }
8a2ecf474   Paul E. McKenney   rcu: Add RCU_NONI...
874
  EXPORT_SYMBOL_GPL(rcu_idle_exit);
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
875

d1ec4c34c   Paul E. McKenney   rcu: Drop RCU_USE...
876
  #ifdef CONFIG_NO_HZ_FULL
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
877
  /**
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
878
879
880
881
882
883
884
   * rcu_user_exit - inform RCU that we are exiting userspace.
   *
   * Exit RCU idle mode while entering the kernel because it can
   * run a RCU read side critical section anytime.
   */
  void rcu_user_exit(void)
  {
91d1aa43d   Frederic Weisbecker   context_tracking:...
885
  	rcu_eqs_exit(1);
adf5091e6   Frederic Weisbecker   rcu: New rcu_user...
886
  }
d1ec4c34c   Paul E. McKenney   rcu: Drop RCU_USE...
887
  #endif /* CONFIG_NO_HZ_FULL */
19dd1591f   Frederic Weisbecker   rcu: New rcu_user...
888
889
  
  /**
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
890
891
892
893
   * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
   *
   * Enter an interrupt handler, which might possibly result in exiting
   * idle mode, in other words, entering the mode in which read-side critical
7c9906ca5   Paul E. McKenney   rcu: Don't redund...
894
   * sections can occur.  The caller must have disabled interrupts.
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
   *
   * Note that the Linux kernel is fully capable of entering an interrupt
   * handler that it never exits, for example when doing upcalls to
   * user mode!  This code assumes that the idle loop never does upcalls to
   * user mode.  If your architecture does do upcalls from the idle loop (or
   * does anything else that results in unbalanced calls to the irq_enter()
   * and irq_exit() functions), RCU will give you what you deserve, good
   * and hard.  But very infrequently and irreproducibly.
   *
   * Use things like work queues to work around this limitation.
   *
   * You have been warned.
   */
  void rcu_irq_enter(void)
  {
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
910
911
  	struct rcu_dynticks *rdtp;
  	long long oldval;
7c9906ca5   Paul E. McKenney   rcu: Don't redund...
912
  	RCU_LOCKDEP_WARN(!irqs_disabled(), "rcu_irq_enter() invoked with irqs enabled!!!");
c9d4b0af9   Christoph Lameter   rcu: Replace __ge...
913
  	rdtp = this_cpu_ptr(&rcu_dynticks);
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
914
915
  	oldval = rdtp->dynticks_nesting;
  	rdtp->dynticks_nesting++;
1ce46ee59   Paul E. McKenney   rcu: Conditionall...
916
917
  	WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
  		     rdtp->dynticks_nesting == 0);
b6fc60201   Frederic Weisbecker   rcu: Don't check ...
918
  	if (oldval)
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
919
  		trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
b6fc60201   Frederic Weisbecker   rcu: Don't check ...
920
  	else
28ced795c   Christoph Lameter   rcu: Remove rcu_d...
921
922
  		rcu_eqs_exit_common(oldval, true);
  	rcu_sysidle_exit(1);
7c9906ca5   Paul E. McKenney   rcu: Don't redund...
923
924
925
926
927
928
929
930
931
932
933
  }
  
  /*
   * Wrapper for rcu_irq_enter() where interrupts are enabled.
   */
  void rcu_irq_enter_irqson(void)
  {
  	unsigned long flags;
  
  	local_irq_save(flags);
  	rcu_irq_enter();
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
934
  	local_irq_restore(flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
935
936
937
938
939
  }
  
  /**
   * rcu_nmi_enter - inform RCU of entry to NMI context
   *
734d16801   Paul E. McKenney   rcu: Make rcu_nmi...
940
941
942
943
944
   * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and
   * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know
   * that the CPU is active.  This implementation permits nested NMIs, as
   * long as the nesting level does not overflow an int.  (You will probably
   * run out of stack space first.)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
945
946
947
   */
  void rcu_nmi_enter(void)
  {
c9d4b0af9   Christoph Lameter   rcu: Replace __ge...
948
  	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
734d16801   Paul E. McKenney   rcu: Make rcu_nmi...
949
  	int incby = 2;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
950

734d16801   Paul E. McKenney   rcu: Make rcu_nmi...
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
  	/* Complain about underflow. */
  	WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
  
  	/*
  	 * If idle from RCU viewpoint, atomically increment ->dynticks
  	 * to mark non-idle and increment ->dynticks_nmi_nesting by one.
  	 * Otherwise, increment ->dynticks_nmi_nesting by two.  This means
  	 * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
  	 * to be in the outermost NMI handler that interrupted an RCU-idle
  	 * period (observation due to Andy Lutomirski).
  	 */
  	if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
  		smp_mb__before_atomic();  /* Force delay from prior write. */
  		atomic_inc(&rdtp->dynticks);
  		/* atomic_inc() before later RCU read-side crit sects */
  		smp_mb__after_atomic();  /* See above. */
  		WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
  		incby = 1;
  	}
  	rdtp->dynticks_nmi_nesting += incby;
  	barrier();
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
972
973
974
975
976
  }
  
  /**
   * rcu_nmi_exit - inform RCU of exit from NMI context
   *
734d16801   Paul E. McKenney   rcu: Make rcu_nmi...
977
978
979
980
   * If we are returning from the outermost NMI handler that interrupted an
   * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
   * to let the RCU grace-period handling know that the CPU is back to
   * being RCU-idle.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
981
982
983
   */
  void rcu_nmi_exit(void)
  {
c9d4b0af9   Christoph Lameter   rcu: Replace __ge...
984
  	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
985

734d16801   Paul E. McKenney   rcu: Make rcu_nmi...
986
987
988
989
990
991
992
993
994
995
996
997
998
999
  	/*
  	 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
  	 * (We are exiting an NMI handler, so RCU better be paying attention
  	 * to us!)
  	 */
  	WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
  	WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
  
  	/*
  	 * If the nesting level is not 1, the CPU wasn't RCU-idle, so
  	 * leave it in non-RCU-idle state.
  	 */
  	if (rdtp->dynticks_nmi_nesting != 1) {
  		rdtp->dynticks_nmi_nesting -= 2;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1000
  		return;
734d16801   Paul E. McKenney   rcu: Make rcu_nmi...
1001
1002
1003
1004
  	}
  
  	/* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
  	rdtp->dynticks_nmi_nesting = 0;
23b5c8fa0   Paul E. McKenney   rcu: Decrease mem...
1005
  	/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
4e857c58e   Peter Zijlstra   arch: Mass conver...
1006
  	smp_mb__before_atomic();  /* See above. */
23b5c8fa0   Paul E. McKenney   rcu: Decrease mem...
1007
  	atomic_inc(&rdtp->dynticks);
4e857c58e   Peter Zijlstra   arch: Mass conver...
1008
  	smp_mb__after_atomic();  /* Force delay to next write. */
23b5c8fa0   Paul E. McKenney   rcu: Decrease mem...
1009
  	WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1010
1011
1012
  }
  
  /**
5c173eb8b   Paul E. McKenney   rcu: Consistent r...
1013
1014
1015
1016
1017
1018
1019
   * __rcu_is_watching - are RCU read-side critical sections safe?
   *
   * Return true if RCU is watching the running CPU, which means that
   * this CPU can safely enter RCU read-side critical sections.  Unlike
   * rcu_is_watching(), the caller of __rcu_is_watching() must have at
   * least disabled preemption.
   */
9418fb208   Steven Rostedt   rcu: Do not trace...
1020
  bool notrace __rcu_is_watching(void)
5c173eb8b   Paul E. McKenney   rcu: Consistent r...
1021
1022
1023
1024
1025
1026
  {
  	return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
  }
  
  /**
   * rcu_is_watching - see if RCU thinks that the current CPU is idle
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1027
   *
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
1028
   * If the current CPU is in its idle loop and is neither in an interrupt
34240697d   Paul E. McKenney   rcu: Disable pree...
1029
   * or NMI handler, return true.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1030
   */
9418fb208   Steven Rostedt   rcu: Do not trace...
1031
  bool notrace rcu_is_watching(void)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1032
  {
f534ed1fd   Pranith Kumar   rcu: Use bool typ...
1033
  	bool ret;
34240697d   Paul E. McKenney   rcu: Disable pree...
1034

46f00d18f   Alexei Starovoitov   rcu: Make rcu_is_...
1035
  	preempt_disable_notrace();
5c173eb8b   Paul E. McKenney   rcu: Consistent r...
1036
  	ret = __rcu_is_watching();
46f00d18f   Alexei Starovoitov   rcu: Make rcu_is_...
1037
  	preempt_enable_notrace();
34240697d   Paul E. McKenney   rcu: Disable pree...
1038
  	return ret;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1039
  }
5c173eb8b   Paul E. McKenney   rcu: Consistent r...
1040
  EXPORT_SYMBOL_GPL(rcu_is_watching);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1041

62fde6edf   Paul E. McKenney   rcu: Make __call_...
1042
  #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
c0d6d01bf   Paul E. McKenney   rcu: Check for il...
1043
1044
1045
1046
1047
1048
1049
  
  /*
   * Is the current CPU online?  Disable preemption to avoid false positives
   * that could otherwise happen due to the current CPU number being sampled,
   * this task being preempted, its old CPU being taken offline, resuming
   * on some other CPU, then determining that its old CPU is now offline.
   * It is OK to use RCU on an offline processor during initial boot, hence
2036d94a7   Paul E. McKenney   rcu: Rework detec...
1050
1051
1052
1053
1054
1055
   * the check for rcu_scheduler_fully_active.  Note also that it is OK
   * for a CPU coming online to use RCU for one jiffy prior to marking itself
   * online in the cpu_online_mask.  Similarly, it is OK for a CPU going
   * offline to continue to use RCU for one jiffy after marking itself
   * offline in the cpu_online_mask.  This leniency is necessary given the
   * non-atomic nature of the online and offline processing, for example,
4df837425   Thomas Gleixner   rcu: Convert rcut...
1056
1057
   * the fact that a CPU enters the scheduler after completing the teardown
   * of the CPU.
2036d94a7   Paul E. McKenney   rcu: Rework detec...
1058
   *
4df837425   Thomas Gleixner   rcu: Convert rcut...
1059
1060
   * This is also why RCU internally marks CPUs online during in the
   * preparation phase and offline after the CPU has been taken down.
c0d6d01bf   Paul E. McKenney   rcu: Check for il...
1061
1062
1063
1064
1065
1066
   *
   * Disable checking if in an NMI handler because we cannot safely report
   * errors from NMI handlers anyway.
   */
  bool rcu_lockdep_current_cpu_online(void)
  {
2036d94a7   Paul E. McKenney   rcu: Rework detec...
1067
1068
  	struct rcu_data *rdp;
  	struct rcu_node *rnp;
c0d6d01bf   Paul E. McKenney   rcu: Check for il...
1069
1070
1071
  	bool ret;
  
  	if (in_nmi())
f6f7ee9af   Fengguang Wu   rcu: Fix coccinel...
1072
  		return true;
c0d6d01bf   Paul E. McKenney   rcu: Check for il...
1073
  	preempt_disable();
c9d4b0af9   Christoph Lameter   rcu: Replace __ge...
1074
  	rdp = this_cpu_ptr(&rcu_sched_data);
2036d94a7   Paul E. McKenney   rcu: Rework detec...
1075
  	rnp = rdp->mynode;
0aa04b055   Paul E. McKenney   rcu: Process offl...
1076
  	ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
c0d6d01bf   Paul E. McKenney   rcu: Check for il...
1077
1078
1079
1080
1081
  	      !rcu_scheduler_fully_active;
  	preempt_enable();
  	return ret;
  }
  EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
62fde6edf   Paul E. McKenney   rcu: Make __call_...
1082
  #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
1083

64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1084
  /**
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
1085
   * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1086
   *
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
1087
1088
1089
   * If the current CPU is idle or running at a first-level (not nested)
   * interrupt from idle, return true.  The caller must have at least
   * disabled preemption.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1090
   */
62e3cb143   Josh Triplett   rcu: Make rcu_is_...
1091
  static int rcu_is_cpu_rrupt_from_idle(void)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1092
  {
c9d4b0af9   Christoph Lameter   rcu: Replace __ge...
1093
  	return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1094
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1095
  /*
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1096
1097
   * Snapshot the specified CPU's dynticks counter so that we can later
   * credit them with an implicit quiescent state.  Return 1 if this CPU
1eba8f843   Paul E. McKenney   rcu: Clean up cod...
1098
   * is in dynticks idle mode, which is an extended quiescent state.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1099
   */
217af2a2f   Paul E. McKenney   nohz_full: Add fu...
1100
1101
  static int dyntick_save_progress_counter(struct rcu_data *rdp,
  					 bool *isidle, unsigned long *maxj)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1102
  {
23b5c8fa0   Paul E. McKenney   rcu: Decrease mem...
1103
  	rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
0edd1b178   Paul E. McKenney   nohz_full: Add fu...
1104
  	rcu_sysidle_check_cpu(rdp, isidle, maxj);
7941dbdeb   Andreea-Cristina Bernat   rcu: Add event tr...
1105
1106
  	if ((rdp->dynticks_snap & 0x1) == 0) {
  		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1107
  		if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
e3663b102   Paul E. McKenney   rcu: Handle gpnum...
1108
  				 rdp->mynode->gpnum))
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1109
  			WRITE_ONCE(rdp->gpwrap, true);
23a9bacd3   Paul E. McKenney   rcu: Set rdp->gpw...
1110
  		return 1;
7941dbdeb   Andreea-Cristina Bernat   rcu: Add event tr...
1111
  	}
23a9bacd3   Paul E. McKenney   rcu: Set rdp->gpw...
1112
  	return 0;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1113
1114
1115
1116
1117
1118
  }
  
  /*
   * Return true if the specified CPU has passed through a quiescent
   * state by virtue of being in or having passed through an dynticks
   * idle state since the last call to dyntick_save_progress_counter()
a82dcc760   Paul E. McKenney   rcu: Make offline...
1119
   * for this same CPU, or by virtue of having been offline.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1120
   */
217af2a2f   Paul E. McKenney   nohz_full: Add fu...
1121
1122
  static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
  				    bool *isidle, unsigned long *maxj)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1123
  {
7eb4f4553   Paul E. McKenney   rcu: Make rcu_imp...
1124
  	unsigned int curr;
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
1125
  	int *rcrmp;
7eb4f4553   Paul E. McKenney   rcu: Make rcu_imp...
1126
  	unsigned int snap;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1127

7eb4f4553   Paul E. McKenney   rcu: Make rcu_imp...
1128
1129
  	curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
  	snap = (unsigned int)rdp->dynticks_snap;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1130
1131
1132
1133
1134
1135
1136
1137
1138
  
  	/*
  	 * If the CPU passed through or entered a dynticks idle phase with
  	 * no active irq/NMI handlers, then we can safely pretend that the CPU
  	 * already acknowledged the request to pass through a quiescent
  	 * state.  Either way, that CPU cannot possibly be in an RCU
  	 * read-side critical section that started before the beginning
  	 * of the current RCU grace period.
  	 */
7eb4f4553   Paul E. McKenney   rcu: Make rcu_imp...
1139
  	if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1140
  		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1141
1142
1143
  		rdp->dynticks_fqs++;
  		return 1;
  	}
a82dcc760   Paul E. McKenney   rcu: Make offline...
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
  	/*
  	 * Check for the CPU being offline, but only if the grace period
  	 * is old enough.  We don't need to worry about the CPU changing
  	 * state: If we see it offline even once, it has been through a
  	 * quiescent state.
  	 *
  	 * The reason for insisting that the grace period be at least
  	 * one jiffy old is that CPUs that are not quite online and that
  	 * have just gone offline can still execute RCU read-side critical
  	 * sections.
  	 */
  	if (ULONG_CMP_GE(rdp->rsp->gp_start + 2, jiffies))
  		return 0;  /* Grace period is not old enough. */
  	barrier();
  	if (cpu_is_offline(rdp->cpu)) {
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1159
  		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
a82dcc760   Paul E. McKenney   rcu: Make offline...
1160
1161
1162
  		rdp->offline_fqs++;
  		return 1;
  	}
65d798f0f   Paul E. McKenney   rcu: Kick adaptiv...
1163
1164
  
  	/*
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
  	 * A CPU running for an extended time within the kernel can
  	 * delay RCU grace periods.  When the CPU is in NO_HZ_FULL mode,
  	 * even context-switching back and forth between a pair of
  	 * in-kernel CPU-bound tasks cannot advance grace periods.
  	 * So if the grace period is old enough, make the CPU pay attention.
  	 * Note that the unsynchronized assignments to the per-CPU
  	 * rcu_sched_qs_mask variable are safe.  Yes, setting of
  	 * bits can be lost, but they will be set again on the next
  	 * force-quiescent-state pass.  So lost bit sets do not result
  	 * in incorrect behavior, merely in a grace period lasting
  	 * a few jiffies longer than it might otherwise.  Because
  	 * there are at most four threads involved, and because the
  	 * updates are only once every few jiffies, the probability of
  	 * lossage (and thus of slight grace-period extension) is
  	 * quite low.
  	 *
  	 * Note that if the jiffies_till_sched_qs boot/sysfs parameter
  	 * is set too high, we override with half of the RCU CPU stall
  	 * warning delay.
6193c76ab   Paul E. McKenney   rcu: Kick CPU hal...
1184
  	 */
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
1185
1186
1187
  	rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu);
  	if (ULONG_CMP_GE(jiffies,
  			 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
cb1e78cfa   Paul E. McKenney   rcu: Remove ACCES...
1188
  	    ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1189
1190
1191
  		if (!(READ_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
  			WRITE_ONCE(rdp->cond_resched_completed,
  				   READ_ONCE(rdp->mynode->completed));
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
1192
  			smp_mb(); /* ->cond_resched_completed before *rcrmp. */
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1193
1194
  			WRITE_ONCE(*rcrmp,
  				   READ_ONCE(*rcrmp) + rdp->rsp->flavor_mask);
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
1195
  		}
4914950aa   Paul E. McKenney   rcu: Stop treatin...
1196
  		rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
6193c76ab   Paul E. McKenney   rcu: Kick CPU hal...
1197
  	}
4914950aa   Paul E. McKenney   rcu: Stop treatin...
1198
1199
1200
1201
1202
  	/* And if it has been a really long time, kick the CPU as well. */
  	if (ULONG_CMP_GE(jiffies,
  			 rdp->rsp->gp_start + 2 * jiffies_till_sched_qs) ||
  	    ULONG_CMP_GE(jiffies, rdp->rsp->gp_start + jiffies_till_sched_qs))
  		resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
a82dcc760   Paul E. McKenney   rcu: Make offline...
1203
  	return 0;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1204
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1205
1206
  static void record_gp_stall_check_time(struct rcu_state *rsp)
  {
cb1e78cfa   Paul E. McKenney   rcu: Remove ACCES...
1207
  	unsigned long j = jiffies;
6193c76ab   Paul E. McKenney   rcu: Kick CPU hal...
1208
  	unsigned long j1;
26cdfedf6   Paul E. McKenney   rcu: Reject memor...
1209
1210
1211
  
  	rsp->gp_start = j;
  	smp_wmb(); /* Record start time before stall time. */
6193c76ab   Paul E. McKenney   rcu: Kick CPU hal...
1212
  	j1 = rcu_jiffies_till_stall_check();
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1213
  	WRITE_ONCE(rsp->jiffies_stall, j + j1);
6193c76ab   Paul E. McKenney   rcu: Kick CPU hal...
1214
  	rsp->jiffies_resched = j + j1 / 2;
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1215
  	rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1216
  }
b637a328b   Paul E. McKenney   rcu: Print remote...
1217
  /*
6b50e119c   Paul E. McKenney   rcutorture: Print...
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
   * Convert a ->gp_state value to a character string.
   */
  static const char *gp_state_getname(short gs)
  {
  	if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
  		return "???";
  	return gp_state_names[gs];
  }
  
  /*
fb81a44b8   Paul E. McKenney   rcu: Add GP-kthre...
1228
1229
1230
1231
1232
1233
1234
1235
   * Complain about starvation of grace-period kthread.
   */
  static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
  {
  	unsigned long gpa;
  	unsigned long j;
  
  	j = jiffies;
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1236
  	gpa = READ_ONCE(rsp->gp_activity);
b1adb3e27   Paul E. McKenney   rcutorture: Dump...
1237
  	if (j - gpa > 2 * HZ) {
6b50e119c   Paul E. McKenney   rcutorture: Print...
1238
1239
  		pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x %s(%d) ->state=%#lx
  ",
81e701e43   Paul E. McKenney   rcu: Add more deb...
1240
  		       rsp->name, j - gpa,
319362c90   Paul E. McKenney   rcu: Provide more...
1241
  		       rsp->gpnum, rsp->completed,
6b50e119c   Paul E. McKenney   rcutorture: Print...
1242
1243
  		       rsp->gp_flags,
  		       gp_state_getname(rsp->gp_state), rsp->gp_state,
a0e3a3aa2   Paul E. McKenney   rcutorture: Flag ...
1244
  		       rsp->gp_kthread ? rsp->gp_kthread->state : ~0);
86057b80a   Paul E. McKenney   rcu: Awaken grace...
1245
  		if (rsp->gp_kthread) {
b1adb3e27   Paul E. McKenney   rcutorture: Dump...
1246
  			sched_show_task(rsp->gp_kthread);
86057b80a   Paul E. McKenney   rcu: Awaken grace...
1247
1248
  			wake_up_process(rsp->gp_kthread);
  		}
b1adb3e27   Paul E. McKenney   rcutorture: Dump...
1249
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1250
  }
b637a328b   Paul E. McKenney   rcu: Print remote...
1251
  /*
bc1dce514   Paul E. McKenney   rcu: Don't use NM...
1252
   * Dump stacks of all tasks running on stalled CPUs.
b637a328b   Paul E. McKenney   rcu: Print remote...
1253
1254
1255
1256
1257
1258
1259
1260
   */
  static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
  {
  	int cpu;
  	unsigned long flags;
  	struct rcu_node *rnp;
  
  	rcu_for_each_leaf_node(rsp, rnp) {
6cf100812   Paul E. McKenney   rcu: Add transiti...
1261
  		raw_spin_lock_irqsave_rcu_node(rnp, flags);
b637a328b   Paul E. McKenney   rcu: Print remote...
1262
  		if (rnp->qsmask != 0) {
bc75e9998   Mark Rutland   rcu: Correctly ha...
1263
1264
1265
  			for_each_leaf_node_possible_cpu(rnp, cpu)
  				if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
  					dump_cpu_task(cpu);
b637a328b   Paul E. McKenney   rcu: Print remote...
1266
  		}
67c583a7d   Boqun Feng   RCU: Privatize rc...
1267
  		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
b637a328b   Paul E. McKenney   rcu: Print remote...
1268
1269
  	}
  }
8c7c4829a   Paul E. McKenney   rcu: Awaken grace...
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
  /*
   * If too much time has passed in the current grace period, and if
   * so configured, go kick the relevant kthreads.
   */
  static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
  {
  	unsigned long j;
  
  	if (!rcu_kick_kthreads)
  		return;
  	j = READ_ONCE(rsp->jiffies_kick_kthreads);
  	if (time_after(jiffies, j) && rsp->gp_kthread) {
  		WARN_ONCE(1, "Kicking %s grace-period kthread
  ", rsp->name);
5dffed1e5   Paul E. McKenney   rcu: Dump ftrace ...
1284
  		rcu_ftrace_dump(DUMP_ALL);
8c7c4829a   Paul E. McKenney   rcu: Awaken grace...
1285
1286
1287
1288
  		wake_up_process(rsp->gp_kthread);
  		WRITE_ONCE(rsp->jiffies_kick_kthreads, j + HZ);
  	}
  }
088e9d253   Daniel Bristot de Oliveira   rcu: sysctl: Pani...
1289
1290
1291
1292
1293
1294
  static inline void panic_on_rcu_stall(void)
  {
  	if (sysctl_panic_on_rcu_stall)
  		panic("RCU Stall
  ");
  }
6ccd2ecd4   Paul E. McKenney   rcu: Improve diag...
1295
  static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1296
1297
1298
1299
  {
  	int cpu;
  	long delta;
  	unsigned long flags;
6ccd2ecd4   Paul E. McKenney   rcu: Improve diag...
1300
1301
  	unsigned long gpa;
  	unsigned long j;
285fe2948   Paul E. McKenney   rcu: Fix detectio...
1302
  	int ndetected = 0;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1303
  	struct rcu_node *rnp = rcu_get_root(rsp);
53bb857c3   Paul E. McKenney   rcu: Dump number ...
1304
  	long totqlen = 0;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1305

8c7c4829a   Paul E. McKenney   rcu: Awaken grace...
1306
1307
1308
1309
  	/* Kick and suppress, if so configured. */
  	rcu_stall_kick_kthreads(rsp);
  	if (rcu_cpu_stall_suppress)
  		return;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1310
  	/* Only let one CPU complain about others per time interval. */
6cf100812   Paul E. McKenney   rcu: Add transiti...
1311
  	raw_spin_lock_irqsave_rcu_node(rnp, flags);
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1312
  	delta = jiffies - READ_ONCE(rsp->jiffies_stall);
fc2219d49   Paul E. McKenney   rcu: Clean up cod...
1313
  	if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
67c583a7d   Boqun Feng   RCU: Privatize rc...
1314
  		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1315
1316
  		return;
  	}
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1317
1318
  	WRITE_ONCE(rsp->jiffies_stall,
  		   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
67c583a7d   Boqun Feng   RCU: Privatize rc...
1319
  	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1320

8cdd32a91   Paul E. McKenney   rcu: refer RCU CP...
1321
1322
1323
1324
1325
  	/*
  	 * OK, time to rat on our buddy...
  	 * See Documentation/RCU/stallwarn.txt for info on how to debug
  	 * RCU CPU stall warnings.
  	 */
d7f3e2073   Paul E. McKenney   rcu: Convert rcut...
1326
  	pr_err("INFO: %s detected stalls on CPUs/tasks:",
4300aa642   Paul E. McKenney   rcu: improve RCU ...
1327
  	       rsp->name);
a858af287   Paul E. McKenney   rcu: Print schedu...
1328
  	print_cpu_stall_info_begin();
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
1329
  	rcu_for_each_leaf_node(rsp, rnp) {
6cf100812   Paul E. McKenney   rcu: Add transiti...
1330
  		raw_spin_lock_irqsave_rcu_node(rnp, flags);
9bc8b5586   Paul E. McKenney   rcu: Suppress NMI...
1331
  		ndetected += rcu_print_task_stall(rnp);
c8020a67e   Paul E. McKenney   rcu: Protect rcu_...
1332
  		if (rnp->qsmask != 0) {
bc75e9998   Mark Rutland   rcu: Correctly ha...
1333
1334
1335
  			for_each_leaf_node_possible_cpu(rnp, cpu)
  				if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
  					print_cpu_stall_info(rsp, cpu);
c8020a67e   Paul E. McKenney   rcu: Protect rcu_...
1336
1337
1338
  					ndetected++;
  				}
  		}
67c583a7d   Boqun Feng   RCU: Privatize rc...
1339
  		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1340
  	}
a858af287   Paul E. McKenney   rcu: Print schedu...
1341

a858af287   Paul E. McKenney   rcu: Print schedu...
1342
  	print_cpu_stall_info_end();
53bb857c3   Paul E. McKenney   rcu: Dump number ...
1343
1344
  	for_each_possible_cpu(cpu)
  		totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
83ebe63ea   Paul E. McKenney   rcu: Print negati...
1345
1346
  	pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)
  ",
eee058826   Paul E. McKenney   rcu: Add grace-pe...
1347
  	       smp_processor_id(), (long)(jiffies - rsp->gp_start),
83ebe63ea   Paul E. McKenney   rcu: Print negati...
1348
  	       (long)rsp->gpnum, (long)rsp->completed, totqlen);
6ccd2ecd4   Paul E. McKenney   rcu: Improve diag...
1349
  	if (ndetected) {
b637a328b   Paul E. McKenney   rcu: Print remote...
1350
  		rcu_dump_cpu_stacks(rsp);
6ccd2ecd4   Paul E. McKenney   rcu: Improve diag...
1351
  	} else {
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1352
1353
  		if (READ_ONCE(rsp->gpnum) != gpnum ||
  		    READ_ONCE(rsp->completed) == gpnum) {
6ccd2ecd4   Paul E. McKenney   rcu: Improve diag...
1354
1355
1356
1357
  			pr_err("INFO: Stall ended before state dump start
  ");
  		} else {
  			j = jiffies;
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1358
  			gpa = READ_ONCE(rsp->gp_activity);
237a0f219   Paul E. McKenney   rcu: Detect stall...
1359
1360
  			pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx
  ",
6ccd2ecd4   Paul E. McKenney   rcu: Improve diag...
1361
  			       rsp->name, j - gpa, j, gpa,
237a0f219   Paul E. McKenney   rcu: Detect stall...
1362
1363
  			       jiffies_till_next_fqs,
  			       rcu_get_root(rsp)->qsmask);
6ccd2ecd4   Paul E. McKenney   rcu: Improve diag...
1364
1365
1366
1367
  			/* In this case, the current CPU might be at fault. */
  			sched_show_task(current);
  		}
  	}
c1dc0b9c0   Ingo Molnar   debug lockups: Im...
1368

4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
1369
  	/* Complain about tasks blocking the grace period. */
1ed509a22   Paul E. McKenney   rcu: Add RCU_CPU_...
1370
  	rcu_print_detail_task_stall(rsp);
fb81a44b8   Paul E. McKenney   rcu: Add GP-kthre...
1371
  	rcu_check_gp_kthread_starvation(rsp);
088e9d253   Daniel Bristot de Oliveira   rcu: sysctl: Pani...
1372
  	panic_on_rcu_stall();
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
1373
  	force_quiescent_state(rsp);  /* Kick them all. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1374
1375
1376
1377
  }
  
  static void print_cpu_stall(struct rcu_state *rsp)
  {
53bb857c3   Paul E. McKenney   rcu: Dump number ...
1378
  	int cpu;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1379
1380
  	unsigned long flags;
  	struct rcu_node *rnp = rcu_get_root(rsp);
53bb857c3   Paul E. McKenney   rcu: Dump number ...
1381
  	long totqlen = 0;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1382

8c7c4829a   Paul E. McKenney   rcu: Awaken grace...
1383
1384
1385
1386
  	/* Kick and suppress, if so configured. */
  	rcu_stall_kick_kthreads(rsp);
  	if (rcu_cpu_stall_suppress)
  		return;
8cdd32a91   Paul E. McKenney   rcu: refer RCU CP...
1387
1388
1389
1390
1391
  	/*
  	 * OK, time to rat on ourselves...
  	 * See Documentation/RCU/stallwarn.txt for info on how to debug
  	 * RCU CPU stall warnings.
  	 */
d7f3e2073   Paul E. McKenney   rcu: Convert rcut...
1392
  	pr_err("INFO: %s self-detected stall on CPU", rsp->name);
a858af287   Paul E. McKenney   rcu: Print schedu...
1393
1394
1395
  	print_cpu_stall_info_begin();
  	print_cpu_stall_info(rsp, smp_processor_id());
  	print_cpu_stall_info_end();
53bb857c3   Paul E. McKenney   rcu: Dump number ...
1396
1397
  	for_each_possible_cpu(cpu)
  		totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
83ebe63ea   Paul E. McKenney   rcu: Print negati...
1398
1399
1400
1401
  	pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)
  ",
  		jiffies - rsp->gp_start,
  		(long)rsp->gpnum, (long)rsp->completed, totqlen);
fb81a44b8   Paul E. McKenney   rcu: Add GP-kthre...
1402
1403
  
  	rcu_check_gp_kthread_starvation(rsp);
bc1dce514   Paul E. McKenney   rcu: Don't use NM...
1404
  	rcu_dump_cpu_stacks(rsp);
c1dc0b9c0   Ingo Molnar   debug lockups: Im...
1405

6cf100812   Paul E. McKenney   rcu: Add transiti...
1406
  	raw_spin_lock_irqsave_rcu_node(rnp, flags);
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1407
1408
1409
  	if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
  		WRITE_ONCE(rsp->jiffies_stall,
  			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
67c583a7d   Boqun Feng   RCU: Privatize rc...
1410
  	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
c1dc0b9c0   Ingo Molnar   debug lockups: Im...
1411

088e9d253   Daniel Bristot de Oliveira   rcu: sysctl: Pani...
1412
  	panic_on_rcu_stall();
b021fe3e2   Peter Zijlstra   sched, rcu: Make ...
1413
1414
1415
1416
1417
1418
1419
1420
  	/*
  	 * Attempt to revive the RCU machinery by forcing a context switch.
  	 *
  	 * A context switch would normally allow the RCU state machine to make
  	 * progress and it could be we're stuck in kernel space without context
  	 * switches for an entirely unreasonable amount of time.
  	 */
  	resched_cpu(smp_processor_id());
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1421
1422
1423
1424
  }
  
  static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
  {
26cdfedf6   Paul E. McKenney   rcu: Reject memor...
1425
1426
1427
  	unsigned long completed;
  	unsigned long gpnum;
  	unsigned long gps;
bad6e1393   Paul E. McKenney   rcu: get rid of s...
1428
1429
  	unsigned long j;
  	unsigned long js;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1430
  	struct rcu_node *rnp;
8c7c4829a   Paul E. McKenney   rcu: Awaken grace...
1431
1432
  	if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
  	    !rcu_gp_in_progress(rsp))
c68de2097   Paul E. McKenney   rcu: disable CPU ...
1433
  		return;
8c7c4829a   Paul E. McKenney   rcu: Awaken grace...
1434
  	rcu_stall_kick_kthreads(rsp);
cb1e78cfa   Paul E. McKenney   rcu: Remove ACCES...
1435
  	j = jiffies;
26cdfedf6   Paul E. McKenney   rcu: Reject memor...
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
  
  	/*
  	 * Lots of memory barriers to reject false positives.
  	 *
  	 * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
  	 * then rsp->gp_start, and finally rsp->completed.  These values
  	 * are updated in the opposite order with memory barriers (or
  	 * equivalent) during grace-period initialization and cleanup.
  	 * Now, a false positive can occur if we get an new value of
  	 * rsp->gp_start and a old value of rsp->jiffies_stall.  But given
  	 * the memory barriers, the only way that this can happen is if one
  	 * grace period ends and another starts between these two fetches.
  	 * Detect this by comparing rsp->completed with the previous fetch
  	 * from rsp->gpnum.
  	 *
  	 * Given this check, comparisons of jiffies, rsp->jiffies_stall,
  	 * and rsp->gp_start suffice to forestall false positives.
  	 */
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1454
  	gpnum = READ_ONCE(rsp->gpnum);
26cdfedf6   Paul E. McKenney   rcu: Reject memor...
1455
  	smp_rmb(); /* Pick up ->gpnum first... */
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1456
  	js = READ_ONCE(rsp->jiffies_stall);
26cdfedf6   Paul E. McKenney   rcu: Reject memor...
1457
  	smp_rmb(); /* ...then ->jiffies_stall before the rest... */
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1458
  	gps = READ_ONCE(rsp->gp_start);
26cdfedf6   Paul E. McKenney   rcu: Reject memor...
1459
  	smp_rmb(); /* ...and finally ->gp_start before ->completed. */
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1460
  	completed = READ_ONCE(rsp->completed);
26cdfedf6   Paul E. McKenney   rcu: Reject memor...
1461
1462
1463
1464
  	if (ULONG_CMP_GE(completed, gpnum) ||
  	    ULONG_CMP_LT(j, js) ||
  	    ULONG_CMP_GE(gps, js))
  		return; /* No stall or GP completed since entering function. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1465
  	rnp = rdp->mynode;
c96ea7cfd   Paul E. McKenney   rcu: Avoid spurio...
1466
  	if (rcu_gp_in_progress(rsp) &&
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1467
  	    (READ_ONCE(rnp->qsmask) & rdp->grpmask)) {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1468
1469
1470
  
  		/* We haven't checked in, so go dump stack. */
  		print_cpu_stall(rsp);
bad6e1393   Paul E. McKenney   rcu: get rid of s...
1471
1472
  	} else if (rcu_gp_in_progress(rsp) &&
  		   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1473

bad6e1393   Paul E. McKenney   rcu: get rid of s...
1474
  		/* They had a few time units to dump stack, so complain. */
6ccd2ecd4   Paul E. McKenney   rcu: Improve diag...
1475
  		print_other_cpu_stall(rsp, gpnum);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1476
1477
  	}
  }
53d84e004   Paul E. McKenney   rcu: permit suppr...
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
  /**
   * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
   *
   * Set the stall-warning timeout way off into the future, thus preventing
   * any RCU CPU stall-warning messages from appearing in the current set of
   * RCU grace periods.
   *
   * The caller must disable hard irqs.
   */
  void rcu_cpu_stall_reset(void)
  {
6ce75a232   Paul E. McKenney   rcu: Introduce fo...
1489
1490
1491
  	struct rcu_state *rsp;
  
  	for_each_rcu_flavor(rsp)
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1492
  		WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
53d84e004   Paul E. McKenney   rcu: permit suppr...
1493
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
1494
  /*
d3f3f3f25   Paul E. McKenney   rcu: Abstract def...
1495
1496
1497
   * Initialize the specified rcu_data structure's default callback list
   * to empty.  The default callback list is the one that is not used by
   * no-callbacks CPUs.
3f5d3ea64   Paul E. McKenney   rcu: Consolidate ...
1498
   */
d3f3f3f25   Paul E. McKenney   rcu: Abstract def...
1499
  static void init_default_callback_list(struct rcu_data *rdp)
3f5d3ea64   Paul E. McKenney   rcu: Consolidate ...
1500
1501
1502
1503
1504
1505
1506
1507
1508
  {
  	int i;
  
  	rdp->nxtlist = NULL;
  	for (i = 0; i < RCU_NEXT_SIZE; i++)
  		rdp->nxttail[i] = &rdp->nxtlist;
  }
  
  /*
d3f3f3f25   Paul E. McKenney   rcu: Abstract def...
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
   * Initialize the specified rcu_data structure's callback list to empty.
   */
  static void init_callback_list(struct rcu_data *rdp)
  {
  	if (init_nocb_callback_list(rdp))
  		return;
  	init_default_callback_list(rdp);
  }
  
  /*
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
   * Determine the value that ->completed will have at the end of the
   * next subsequent grace period.  This is used to tag callbacks so that
   * a CPU can invoke callbacks in a timely fashion even if that CPU has
   * been dyntick-idle for an extended period with callbacks under the
   * influence of RCU_FAST_NO_HZ.
   *
   * The caller must hold rnp->lock with interrupts disabled.
   */
  static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
  				       struct rcu_node *rnp)
  {
  	/*
  	 * If RCU is idle, we just wait for the next grace period.
  	 * But we can only be sure that RCU is idle if we are looking
  	 * at the root rcu_node structure -- otherwise, a new grace
  	 * period might have started, but just not yet gotten around
  	 * to initializing the current non-root rcu_node structure.
  	 */
  	if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
  		return rnp->completed + 1;
  
  	/*
  	 * Otherwise, wait for a possible partial grace period and
  	 * then the subsequent full grace period.
  	 */
  	return rnp->completed + 2;
  }
  
  /*
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1548
1549
1550
1551
   * Trace-event helper function for rcu_start_future_gp() and
   * rcu_nocb_wait_gp().
   */
  static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
e66c33d57   Steven Rostedt (Red Hat)   rcu: Add const an...
1552
  				unsigned long c, const char *s)
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1553
1554
1555
1556
1557
1558
1559
1560
1561
  {
  	trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
  				      rnp->completed, c, rnp->level,
  				      rnp->grplo, rnp->grphi, s);
  }
  
  /*
   * Start some future grace period, as needed to handle newly arrived
   * callbacks.  The required future grace periods are recorded in each
48a7639ce   Paul E. McKenney   rcu: Make callers...
1562
1563
   * rcu_node structure's ->need_future_gp field.  Returns true if there
   * is reason to awaken the grace-period kthread.
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1564
1565
1566
   *
   * The caller must hold the specified rcu_node structure's ->lock.
   */
48a7639ce   Paul E. McKenney   rcu: Make callers...
1567
1568
1569
  static bool __maybe_unused
  rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
  		    unsigned long *c_out)
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1570
1571
1572
  {
  	unsigned long c;
  	int i;
48a7639ce   Paul E. McKenney   rcu: Make callers...
1573
  	bool ret = false;
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1574
1575
1576
1577
1578
1579
1580
  	struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
  
  	/*
  	 * Pick up grace-period number for new callbacks.  If this
  	 * grace period is already marked as needed, return to the caller.
  	 */
  	c = rcu_cbs_completed(rdp->rsp, rnp);
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1581
  	trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1582
  	if (rnp->need_future_gp[c & 0x1]) {
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1583
  		trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
48a7639ce   Paul E. McKenney   rcu: Make callers...
1584
  		goto out;
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1585
1586
1587
1588
1589
1590
1591
  	}
  
  	/*
  	 * If either this rcu_node structure or the root rcu_node structure
  	 * believe that a grace period is in progress, then we must wait
  	 * for the one following, which is in "c".  Because our request
  	 * will be noticed at the end of the current grace period, we don't
48bd8e9b8   Pranith Kumar   rcu: Check both r...
1592
1593
1594
1595
1596
1597
1598
  	 * need to explicitly start one.  We only do the lockless check
  	 * of rnp_root's fields if the current rcu_node structure thinks
  	 * there is no grace period in flight, and because we hold rnp->lock,
  	 * the only possible change is when rnp_root's two fields are
  	 * equal, in which case rnp_root->gpnum might be concurrently
  	 * incremented.  But that is OK, as it will just result in our
  	 * doing some extra useless work.
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1599
1600
  	 */
  	if (rnp->gpnum != rnp->completed ||
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1601
  	    READ_ONCE(rnp_root->gpnum) != READ_ONCE(rnp_root->completed)) {
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1602
  		rnp->need_future_gp[c & 0x1]++;
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1603
  		trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
48a7639ce   Paul E. McKenney   rcu: Make callers...
1604
  		goto out;
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1605
1606
1607
1608
1609
1610
1611
  	}
  
  	/*
  	 * There might be no grace period in progress.  If we don't already
  	 * hold it, acquire the root rcu_node structure's lock in order to
  	 * start one (if needed).
  	 */
2a67e741b   Peter Zijlstra   rcu: Create trans...
1612
1613
  	if (rnp != rnp_root)
  		raw_spin_lock_rcu_node(rnp_root);
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
  
  	/*
  	 * Get a new grace-period number.  If there really is no grace
  	 * period in progress, it will be smaller than the one we obtained
  	 * earlier.  Adjust callbacks as needed.  Note that even no-CBs
  	 * CPUs have a ->nxtcompleted[] array, so no no-CBs checks needed.
  	 */
  	c = rcu_cbs_completed(rdp->rsp, rnp_root);
  	for (i = RCU_DONE_TAIL; i < RCU_NEXT_TAIL; i++)
  		if (ULONG_CMP_LT(c, rdp->nxtcompleted[i]))
  			rdp->nxtcompleted[i] = c;
  
  	/*
  	 * If the needed for the required grace period is already
  	 * recorded, trace and leave.
  	 */
  	if (rnp_root->need_future_gp[c & 0x1]) {
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1631
  		trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1632
1633
1634
1635
1636
1637
1638
1639
  		goto unlock_out;
  	}
  
  	/* Record the need for the future grace period. */
  	rnp_root->need_future_gp[c & 0x1]++;
  
  	/* If a grace period is not already in progress, start one. */
  	if (rnp_root->gpnum != rnp_root->completed) {
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1640
  		trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot"));
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1641
  	} else {
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1642
  		trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot"));
48a7639ce   Paul E. McKenney   rcu: Make callers...
1643
  		ret = rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp);
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1644
1645
1646
  	}
  unlock_out:
  	if (rnp != rnp_root)
67c583a7d   Boqun Feng   RCU: Privatize rc...
1647
  		raw_spin_unlock_rcu_node(rnp_root);
48a7639ce   Paul E. McKenney   rcu: Make callers...
1648
1649
1650
1651
  out:
  	if (c_out != NULL)
  		*c_out = c;
  	return ret;
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
  }
  
  /*
   * Clean up any old requests for the just-ended grace period.  Also return
   * whether any additional grace periods have been requested.  Also invoke
   * rcu_nocb_gp_cleanup() in order to wake up any no-callbacks kthreads
   * waiting for this grace period to complete.
   */
  static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
  {
  	int c = rnp->completed;
  	int needmore;
  	struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1665
1666
  	rnp->need_future_gp[c & 0x1] = 0;
  	needmore = rnp->need_future_gp[(c + 1) & 0x1];
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1667
1668
  	trace_rcu_future_gp(rnp, rdp, c,
  			    needmore ? TPS("CleanupMore") : TPS("Cleanup"));
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1669
1670
1671
1672
  	return needmore;
  }
  
  /*
48a7639ce   Paul E. McKenney   rcu: Make callers...
1673
1674
1675
1676
1677
1678
1679
1680
1681
   * Awaken the grace-period kthread for the specified flavor of RCU.
   * Don't do a self-awaken, and don't bother awakening when there is
   * nothing for the grace-period kthread to do (as in several CPUs
   * raced to awaken, and we lost), and finally don't try to awaken
   * a kthread that has not yet been created.
   */
  static void rcu_gp_kthread_wake(struct rcu_state *rsp)
  {
  	if (current == rsp->gp_kthread ||
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1682
  	    !READ_ONCE(rsp->gp_flags) ||
48a7639ce   Paul E. McKenney   rcu: Make callers...
1683
1684
  	    !rsp->gp_kthread)
  		return;
abedf8e24   Paul Gortmaker   rcu: Use simple w...
1685
  	swake_up(&rsp->gp_wq);
48a7639ce   Paul E. McKenney   rcu: Make callers...
1686
1687
1688
  }
  
  /*
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1689
1690
1691
1692
1693
1694
   * If there is room, assign a ->completed number to any callbacks on
   * this CPU that have not already been assigned.  Also accelerate any
   * callbacks that were previously assigned a ->completed number that has
   * since proven to be too conservative, which can happen if callbacks get
   * assigned a ->completed number while RCU is idle, but with reference to
   * a non-root rcu_node structure.  This function is idempotent, so it does
48a7639ce   Paul E. McKenney   rcu: Make callers...
1695
1696
   * not hurt to call it repeatedly.  Returns an flag saying that we should
   * awaken the RCU grace-period kthread.
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1697
1698
1699
   *
   * The caller must hold rnp->lock with interrupts disabled.
   */
48a7639ce   Paul E. McKenney   rcu: Make callers...
1700
  static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1701
1702
1703
1704
  			       struct rcu_data *rdp)
  {
  	unsigned long c;
  	int i;
48a7639ce   Paul E. McKenney   rcu: Make callers...
1705
  	bool ret;
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1706
1707
1708
  
  	/* If the CPU has no callbacks, nothing to do. */
  	if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
48a7639ce   Paul E. McKenney   rcu: Make callers...
1709
  		return false;
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
  
  	/*
  	 * Starting from the sublist containing the callbacks most
  	 * recently assigned a ->completed number and working down, find the
  	 * first sublist that is not assignable to an upcoming grace period.
  	 * Such a sublist has something in it (first two tests) and has
  	 * a ->completed number assigned that will complete sooner than
  	 * the ->completed number for newly arrived callbacks (last test).
  	 *
  	 * The key point is that any later sublist can be assigned the
  	 * same ->completed number as the newly arrived callbacks, which
  	 * means that the callbacks in any of these later sublist can be
  	 * grouped into a single sublist, whether or not they have already
  	 * been assigned a ->completed number.
  	 */
  	c = rcu_cbs_completed(rsp, rnp);
  	for (i = RCU_NEXT_TAIL - 1; i > RCU_DONE_TAIL; i--)
  		if (rdp->nxttail[i] != rdp->nxttail[i - 1] &&
  		    !ULONG_CMP_GE(rdp->nxtcompleted[i], c))
  			break;
  
  	/*
  	 * If there are no sublist for unassigned callbacks, leave.
  	 * At the same time, advance "i" one sublist, so that "i" will
  	 * index into the sublist where all the remaining callbacks should
  	 * be grouped into.
  	 */
  	if (++i >= RCU_NEXT_TAIL)
48a7639ce   Paul E. McKenney   rcu: Make callers...
1738
  		return false;
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
  
  	/*
  	 * Assign all subsequent callbacks' ->completed number to the next
  	 * full grace period and group them all in the sublist initially
  	 * indexed by "i".
  	 */
  	for (; i <= RCU_NEXT_TAIL; i++) {
  		rdp->nxttail[i] = rdp->nxttail[RCU_NEXT_TAIL];
  		rdp->nxtcompleted[i] = c;
  	}
910ee45db   Paul E. McKenney   rcu: Make rcu_acc...
1749
  	/* Record any needed additional grace periods. */
48a7639ce   Paul E. McKenney   rcu: Make callers...
1750
  	ret = rcu_start_future_gp(rnp, rdp, NULL);
6d4b418c7   Paul E. McKenney   rcu: Trace callba...
1751
1752
1753
  
  	/* Trace depending on how much we were able to accelerate. */
  	if (!*rdp->nxttail[RCU_WAIT_TAIL])
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1754
  		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
6d4b418c7   Paul E. McKenney   rcu: Trace callba...
1755
  	else
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1756
  		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
48a7639ce   Paul E. McKenney   rcu: Make callers...
1757
  	return ret;
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1758
1759
1760
1761
1762
1763
1764
1765
  }
  
  /*
   * Move any callbacks whose grace period has completed to the
   * RCU_DONE_TAIL sublist, then compact the remaining sublists and
   * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL
   * sublist.  This function is idempotent, so it does not hurt to
   * invoke it repeatedly.  As long as it is not invoked -too- often...
48a7639ce   Paul E. McKenney   rcu: Make callers...
1766
   * Returns true if the RCU grace-period kthread needs to be awakened.
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1767
1768
1769
   *
   * The caller must hold rnp->lock with interrupts disabled.
   */
48a7639ce   Paul E. McKenney   rcu: Make callers...
1770
  static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1771
1772
1773
1774
1775
1776
  			    struct rcu_data *rdp)
  {
  	int i, j;
  
  	/* If the CPU has no callbacks, nothing to do. */
  	if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
48a7639ce   Paul E. McKenney   rcu: Make callers...
1777
  		return false;
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
  
  	/*
  	 * Find all callbacks whose ->completed numbers indicate that they
  	 * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
  	 */
  	for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) {
  		if (ULONG_CMP_LT(rnp->completed, rdp->nxtcompleted[i]))
  			break;
  		rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[i];
  	}
  	/* Clean up any sublist tail pointers that were misordered above. */
  	for (j = RCU_WAIT_TAIL; j < i; j++)
  		rdp->nxttail[j] = rdp->nxttail[RCU_DONE_TAIL];
  
  	/* Copy down callbacks to fill in empty sublists. */
  	for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) {
  		if (rdp->nxttail[j] == rdp->nxttail[RCU_NEXT_TAIL])
  			break;
  		rdp->nxttail[j] = rdp->nxttail[i];
  		rdp->nxtcompleted[j] = rdp->nxtcompleted[i];
  	}
  
  	/* Classify any remaining callbacks. */
48a7639ce   Paul E. McKenney   rcu: Make callers...
1801
  	return rcu_accelerate_cbs(rsp, rnp, rdp);
dc35c8934   Paul E. McKenney   rcu: Tag callback...
1802
1803
1804
  }
  
  /*
ba9fbe955   Paul E. McKenney   rcu: Merge __rcu_...
1805
1806
1807
   * Update CPU-local rcu_data state to record the beginnings and ends of
   * grace periods.  The caller must hold the ->lock of the leaf rcu_node
   * structure corresponding to the current CPU, and must have irqs disabled.
48a7639ce   Paul E. McKenney   rcu: Make callers...
1808
   * Returns true if the grace-period kthread needs to be awakened.
d09b62dfa   Paul E. McKenney   rcu: Fix synchron...
1809
   */
48a7639ce   Paul E. McKenney   rcu: Make callers...
1810
1811
  static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
  			      struct rcu_data *rdp)
d09b62dfa   Paul E. McKenney   rcu: Fix synchron...
1812
  {
48a7639ce   Paul E. McKenney   rcu: Make callers...
1813
  	bool ret;
3563a438f   Paul E. McKenney   rcu: Avoid redund...
1814
  	bool need_gp;
48a7639ce   Paul E. McKenney   rcu: Make callers...
1815

ba9fbe955   Paul E. McKenney   rcu: Merge __rcu_...
1816
  	/* Handle the ends of any preceding grace periods first. */
e3663b102   Paul E. McKenney   rcu: Handle gpnum...
1817
  	if (rdp->completed == rnp->completed &&
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1818
  	    !unlikely(READ_ONCE(rdp->gpwrap))) {
d09b62dfa   Paul E. McKenney   rcu: Fix synchron...
1819

ba9fbe955   Paul E. McKenney   rcu: Merge __rcu_...
1820
  		/* No grace period end, so just accelerate recent callbacks. */
48a7639ce   Paul E. McKenney   rcu: Make callers...
1821
  		ret = rcu_accelerate_cbs(rsp, rnp, rdp);
d09b62dfa   Paul E. McKenney   rcu: Fix synchron...
1822

dc35c8934   Paul E. McKenney   rcu: Tag callback...
1823
1824
1825
  	} else {
  
  		/* Advance callbacks. */
48a7639ce   Paul E. McKenney   rcu: Make callers...
1826
  		ret = rcu_advance_cbs(rsp, rnp, rdp);
d09b62dfa   Paul E. McKenney   rcu: Fix synchron...
1827
1828
1829
  
  		/* Remember that we saw this grace-period completion. */
  		rdp->completed = rnp->completed;
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1830
  		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
d09b62dfa   Paul E. McKenney   rcu: Fix synchron...
1831
  	}
398ebe600   Paul E. McKenney   rcu: Make __note_...
1832

7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1833
  	if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) {
6eaef633d   Paul E. McKenney   rcu: Move code to...
1834
1835
1836
1837
1838
1839
  		/*
  		 * If the current grace period is waiting for this CPU,
  		 * set up to detect a quiescent state, otherwise don't
  		 * go looking for one.
  		 */
  		rdp->gpnum = rnp->gpnum;
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1840
  		trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
3563a438f   Paul E. McKenney   rcu: Avoid redund...
1841
1842
  		need_gp = !!(rnp->qsmask & rdp->grpmask);
  		rdp->cpu_no_qs.b.norm = need_gp;
5cd37193c   Paul E. McKenney   rcu: Make cond_re...
1843
  		rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
3563a438f   Paul E. McKenney   rcu: Avoid redund...
1844
  		rdp->core_needs_qs = need_gp;
6eaef633d   Paul E. McKenney   rcu: Move code to...
1845
  		zero_cpu_stall_ticks(rdp);
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1846
  		WRITE_ONCE(rdp->gpwrap, false);
6eaef633d   Paul E. McKenney   rcu: Move code to...
1847
  	}
48a7639ce   Paul E. McKenney   rcu: Make callers...
1848
  	return ret;
6eaef633d   Paul E. McKenney   rcu: Move code to...
1849
  }
d34ea3221   Paul E. McKenney   rcu: Rename note_...
1850
  static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
6eaef633d   Paul E. McKenney   rcu: Move code to...
1851
1852
  {
  	unsigned long flags;
48a7639ce   Paul E. McKenney   rcu: Make callers...
1853
  	bool needwake;
6eaef633d   Paul E. McKenney   rcu: Move code to...
1854
1855
1856
1857
  	struct rcu_node *rnp;
  
  	local_irq_save(flags);
  	rnp = rdp->mynode;
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1858
1859
1860
  	if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
  	     rdp->completed == READ_ONCE(rnp->completed) &&
  	     !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
2a67e741b   Peter Zijlstra   rcu: Create trans...
1861
  	    !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
6eaef633d   Paul E. McKenney   rcu: Move code to...
1862
1863
1864
  		local_irq_restore(flags);
  		return;
  	}
48a7639ce   Paul E. McKenney   rcu: Make callers...
1865
  	needwake = __note_gp_changes(rsp, rnp, rdp);
67c583a7d   Boqun Feng   RCU: Privatize rc...
1866
  	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
48a7639ce   Paul E. McKenney   rcu: Make callers...
1867
1868
  	if (needwake)
  		rcu_gp_kthread_wake(rsp);
6eaef633d   Paul E. McKenney   rcu: Move code to...
1869
  }
0f41c0dda   Paul E. McKenney   rcu: Provide diag...
1870
1871
1872
1873
1874
1875
  static void rcu_gp_slow(struct rcu_state *rsp, int delay)
  {
  	if (delay > 0 &&
  	    !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
  		schedule_timeout_uninterruptible(delay);
  }
6eaef633d   Paul E. McKenney   rcu: Move code to...
1876
  /*
45fed3e7c   Paul E. McKenney   rcu: Make rcu_gp_...
1877
   * Initialize a new grace period.  Return false if no grace period required.
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
1878
   */
45fed3e7c   Paul E. McKenney   rcu: Make rcu_gp_...
1879
  static bool rcu_gp_init(struct rcu_state *rsp)
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
1880
  {
0aa04b055   Paul E. McKenney   rcu: Process offl...
1881
  	unsigned long oldmask;
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
1882
  	struct rcu_data *rdp;
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
1883
  	struct rcu_node *rnp = rcu_get_root(rsp);
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
1884

7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1885
  	WRITE_ONCE(rsp->gp_activity, jiffies);
2a67e741b   Peter Zijlstra   rcu: Create trans...
1886
  	raw_spin_lock_irq_rcu_node(rnp);
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1887
  	if (!READ_ONCE(rsp->gp_flags)) {
f7be82093   Paul E. McKenney   rcu: Improve grac...
1888
  		/* Spurious wakeup, tell caller to go back to sleep.  */
67c583a7d   Boqun Feng   RCU: Privatize rc...
1889
  		raw_spin_unlock_irq_rcu_node(rnp);
45fed3e7c   Paul E. McKenney   rcu: Make rcu_gp_...
1890
  		return false;
f7be82093   Paul E. McKenney   rcu: Improve grac...
1891
  	}
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1892
  	WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
1893

f7be82093   Paul E. McKenney   rcu: Improve grac...
1894
1895
1896
1897
1898
  	if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
  		/*
  		 * Grace period already in progress, don't start another.
  		 * Not supposed to be able to happen.
  		 */
67c583a7d   Boqun Feng   RCU: Privatize rc...
1899
  		raw_spin_unlock_irq_rcu_node(rnp);
45fed3e7c   Paul E. McKenney   rcu: Make rcu_gp_...
1900
  		return false;
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
1901
  	}
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
1902
  	/* Advance to a new grace period and initialize state. */
26cdfedf6   Paul E. McKenney   rcu: Reject memor...
1903
  	record_gp_stall_check_time(rsp);
765a3f4fe   Paul E. McKenney   rcu: Provide grac...
1904
1905
  	/* Record GP times before starting GP, hence smp_store_release(). */
  	smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1906
  	trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
67c583a7d   Boqun Feng   RCU: Privatize rc...
1907
  	raw_spin_unlock_irq_rcu_node(rnp);
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
1908

7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
1909
  	/*
0aa04b055   Paul E. McKenney   rcu: Process offl...
1910
1911
1912
1913
1914
1915
  	 * Apply per-leaf buffered online and offline operations to the
  	 * rcu_node tree.  Note that this new grace period need not wait
  	 * for subsequent online CPUs, and that quiescent-state forcing
  	 * will handle subsequent offline CPUs.
  	 */
  	rcu_for_each_leaf_node(rsp, rnp) {
0f41c0dda   Paul E. McKenney   rcu: Provide diag...
1916
  		rcu_gp_slow(rsp, gp_preinit_delay);
2a67e741b   Peter Zijlstra   rcu: Create trans...
1917
  		raw_spin_lock_irq_rcu_node(rnp);
0aa04b055   Paul E. McKenney   rcu: Process offl...
1918
1919
1920
  		if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
  		    !rnp->wait_blkd_tasks) {
  			/* Nothing to do on this leaf rcu_node structure. */
67c583a7d   Boqun Feng   RCU: Privatize rc...
1921
  			raw_spin_unlock_irq_rcu_node(rnp);
0aa04b055   Paul E. McKenney   rcu: Process offl...
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
  			continue;
  		}
  
  		/* Record old state, apply changes to ->qsmaskinit field. */
  		oldmask = rnp->qsmaskinit;
  		rnp->qsmaskinit = rnp->qsmaskinitnext;
  
  		/* If zero-ness of ->qsmaskinit changed, propagate up tree. */
  		if (!oldmask != !rnp->qsmaskinit) {
  			if (!oldmask) /* First online CPU for this rcu_node. */
  				rcu_init_new_rnp(rnp);
  			else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */
  				rnp->wait_blkd_tasks = true;
  			else /* Last offline CPU and can propagate. */
  				rcu_cleanup_dead_rnp(rnp);
  		}
  
  		/*
  		 * If all waited-on tasks from prior grace period are
  		 * done, and if all this rcu_node structure's CPUs are
  		 * still offline, propagate up the rcu_node tree and
  		 * clear ->wait_blkd_tasks.  Otherwise, if one of this
  		 * rcu_node structure's CPUs has since come back online,
  		 * simply clear ->wait_blkd_tasks (but rcu_cleanup_dead_rnp()
  		 * checks for this, so just call it unconditionally).
  		 */
  		if (rnp->wait_blkd_tasks &&
  		    (!rcu_preempt_has_tasks(rnp) ||
  		     rnp->qsmaskinit)) {
  			rnp->wait_blkd_tasks = false;
  			rcu_cleanup_dead_rnp(rnp);
  		}
67c583a7d   Boqun Feng   RCU: Privatize rc...
1954
  		raw_spin_unlock_irq_rcu_node(rnp);
0aa04b055   Paul E. McKenney   rcu: Process offl...
1955
  	}
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
1956
1957
1958
1959
1960
1961
1962
1963
  
  	/*
  	 * Set the quiescent-state-needed bits in all the rcu_node
  	 * structures for all currently online CPUs in breadth-first order,
  	 * starting from the root rcu_node structure, relying on the layout
  	 * of the tree within the rsp->node[] array.  Note that other CPUs
  	 * will access only the leaves of the hierarchy, thus seeing that no
  	 * grace period is in progress, at least until the corresponding
590d1757b   Paul E. McKenney   rcu: Fix outdated...
1964
  	 * leaf node has been initialized.
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
1965
1966
1967
1968
1969
  	 *
  	 * The grace period cannot complete until the initialization
  	 * process finishes, because this kthread handles both.
  	 */
  	rcu_for_each_node_breadth_first(rsp, rnp) {
0f41c0dda   Paul E. McKenney   rcu: Provide diag...
1970
  		rcu_gp_slow(rsp, gp_init_delay);
2a67e741b   Peter Zijlstra   rcu: Create trans...
1971
  		raw_spin_lock_irq_rcu_node(rnp);
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
1972
  		rdp = this_cpu_ptr(rsp->rda);
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
1973
1974
  		rcu_preempt_check_blocked_tasks(rnp);
  		rnp->qsmask = rnp->qsmaskinit;
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1975
  		WRITE_ONCE(rnp->gpnum, rsp->gpnum);
3f47da0f3   Lai Jiangshan   rcu_tree: Avoid t...
1976
  		if (WARN_ON_ONCE(rnp->completed != rsp->completed))
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1977
  			WRITE_ONCE(rnp->completed, rsp->completed);
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
1978
  		if (rnp == rdp->mynode)
48a7639ce   Paul E. McKenney   rcu: Make callers...
1979
  			(void)__note_gp_changes(rsp, rnp, rdp);
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
1980
1981
1982
1983
  		rcu_preempt_boost_start_gp(rnp);
  		trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
  					    rnp->level, rnp->grplo,
  					    rnp->grphi, rnp->qsmask);
67c583a7d   Boqun Feng   RCU: Privatize rc...
1984
  		raw_spin_unlock_irq_rcu_node(rnp);
bde6c3aa9   Paul E. McKenney   rcu: Provide cond...
1985
  		cond_resched_rcu_qs();
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1986
  		WRITE_ONCE(rsp->gp_activity, jiffies);
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
1987
  	}
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
1988

45fed3e7c   Paul E. McKenney   rcu: Make rcu_gp_...
1989
  	return true;
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
1990
  }
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
1991

7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
1992
  /*
b9a425cfc   Paul E. McKenney   rcu: Pull out wai...
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
   * Helper function for wait_event_interruptible_timeout() wakeup
   * at force-quiescent-state time.
   */
  static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
  {
  	struct rcu_node *rnp = rcu_get_root(rsp);
  
  	/* Someone like call_rcu() requested a force-quiescent-state scan. */
  	*gfp = READ_ONCE(rsp->gp_flags);
  	if (*gfp & RCU_GP_FLAG_FQS)
  		return true;
  
  	/* The current grace period has completed. */
  	if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
  		return true;
  
  	return false;
  }
  
  /*
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2013
2014
   * Do one round of quiescent-state forcing.
   */
77f81fe08   Petr Mladek   rcu: Finish foldi...
2015
  static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2016
  {
217af2a2f   Paul E. McKenney   nohz_full: Add fu...
2017
2018
  	bool isidle = false;
  	unsigned long maxj;
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2019
  	struct rcu_node *rnp = rcu_get_root(rsp);
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2020
  	WRITE_ONCE(rsp->gp_activity, jiffies);
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2021
  	rsp->n_force_qs++;
77f81fe08   Petr Mladek   rcu: Finish foldi...
2022
  	if (first_time) {
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2023
  		/* Collect dyntick-idle snapshots. */
0edd1b178   Paul E. McKenney   nohz_full: Add fu...
2024
  		if (is_sysidle_rcu_state(rsp)) {
e02b2edfa   Pranith Kumar   rcu: Use true/fal...
2025
  			isidle = true;
0edd1b178   Paul E. McKenney   nohz_full: Add fu...
2026
2027
  			maxj = jiffies - ULONG_MAX / 4;
  		}
217af2a2f   Paul E. McKenney   nohz_full: Add fu...
2028
2029
  		force_qs_rnp(rsp, dyntick_save_progress_counter,
  			     &isidle, &maxj);
0edd1b178   Paul E. McKenney   nohz_full: Add fu...
2030
  		rcu_sysidle_report_gp(rsp, isidle, maxj);
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2031
2032
  	} else {
  		/* Handle dyntick-idle and offline CPUs. */
675da67f2   Paul E. McKenney   rcu: Fixes to NO_...
2033
  		isidle = true;
217af2a2f   Paul E. McKenney   nohz_full: Add fu...
2034
  		force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2035
2036
  	}
  	/* Clear flag to prevent immediate re-entry. */
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2037
  	if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2a67e741b   Peter Zijlstra   rcu: Create trans...
2038
  		raw_spin_lock_irq_rcu_node(rnp);
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2039
2040
  		WRITE_ONCE(rsp->gp_flags,
  			   READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
67c583a7d   Boqun Feng   RCU: Privatize rc...
2041
  		raw_spin_unlock_irq_rcu_node(rnp);
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2042
  	}
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2043
2044
2045
  }
  
  /*
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2046
2047
   * Clean up after the old grace period.
   */
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2048
  static void rcu_gp_cleanup(struct rcu_state *rsp)
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2049
2050
  {
  	unsigned long gp_duration;
48a7639ce   Paul E. McKenney   rcu: Make callers...
2051
  	bool needgp = false;
dae6e64d2   Paul E. McKenney   rcu: Introduce pr...
2052
  	int nocb = 0;
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2053
2054
  	struct rcu_data *rdp;
  	struct rcu_node *rnp = rcu_get_root(rsp);
abedf8e24   Paul Gortmaker   rcu: Use simple w...
2055
  	struct swait_queue_head *sq;
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
2056

7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2057
  	WRITE_ONCE(rsp->gp_activity, jiffies);
2a67e741b   Peter Zijlstra   rcu: Create trans...
2058
  	raw_spin_lock_irq_rcu_node(rnp);
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2059
2060
2061
  	gp_duration = jiffies - rsp->gp_start;
  	if (gp_duration > rsp->gp_max)
  		rsp->gp_max = gp_duration;
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
2062

7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2063
2064
2065
2066
2067
2068
2069
  	/*
  	 * We know the grace period is complete, but to everyone else
  	 * it appears to still be ongoing.  But it is also the case
  	 * that to everyone else it looks like there is nothing that
  	 * they can do to advance the grace period.  It is therefore
  	 * safe for us to drop the lock in order to mark the grace
  	 * period as completed in all of the rcu_node structures.
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2070
  	 */
67c583a7d   Boqun Feng   RCU: Privatize rc...
2071
  	raw_spin_unlock_irq_rcu_node(rnp);
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
2072

5d4b86594   Paul E. McKenney   rcu: Fix day-zero...
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
  	/*
  	 * Propagate new ->completed value to rcu_node structures so
  	 * that other CPUs don't have to wait until the start of the next
  	 * grace period to process their callbacks.  This also avoids
  	 * some nasty RCU grace-period initialization races by forcing
  	 * the end of the current grace period to be completely recorded in
  	 * all of the rcu_node structures before the beginning of the next
  	 * grace period is recorded in any of the rcu_node structures.
  	 */
  	rcu_for_each_node_breadth_first(rsp, rnp) {
2a67e741b   Peter Zijlstra   rcu: Create trans...
2083
  		raw_spin_lock_irq_rcu_node(rnp);
5c60d25fa   Paul E. McKenney   rcu: Add diagnost...
2084
2085
  		WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
  		WARN_ON_ONCE(rnp->qsmask);
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2086
  		WRITE_ONCE(rnp->completed, rsp->gpnum);
b11cc5760   Paul E. McKenney   rcu: Accelerate R...
2087
2088
  		rdp = this_cpu_ptr(rsp->rda);
  		if (rnp == rdp->mynode)
48a7639ce   Paul E. McKenney   rcu: Make callers...
2089
  			needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
78e4bc34e   Paul E. McKenney   rcu: Fix and comm...
2090
  		/* smp_mb() provided by prior unlock-lock pair. */
0446be489   Paul E. McKenney   rcu: Abstract rcu...
2091
  		nocb += rcu_future_gp_cleanup(rsp, rnp);
065bb78c5   Daniel Wagner   rcu: Do not call ...
2092
  		sq = rcu_nocb_gp_get(rnp);
67c583a7d   Boqun Feng   RCU: Privatize rc...
2093
  		raw_spin_unlock_irq_rcu_node(rnp);
065bb78c5   Daniel Wagner   rcu: Do not call ...
2094
  		rcu_nocb_gp_cleanup(sq);
bde6c3aa9   Paul E. McKenney   rcu: Provide cond...
2095
  		cond_resched_rcu_qs();
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2096
  		WRITE_ONCE(rsp->gp_activity, jiffies);
0f41c0dda   Paul E. McKenney   rcu: Provide diag...
2097
  		rcu_gp_slow(rsp, gp_cleanup_delay);
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2098
  	}
5d4b86594   Paul E. McKenney   rcu: Fix day-zero...
2099
  	rnp = rcu_get_root(rsp);
2a67e741b   Peter Zijlstra   rcu: Create trans...
2100
  	raw_spin_lock_irq_rcu_node(rnp); /* Order GP before ->completed update. */
dae6e64d2   Paul E. McKenney   rcu: Introduce pr...
2101
  	rcu_nocb_gp_set(rnp, nocb);
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2102

765a3f4fe   Paul E. McKenney   rcu: Provide grac...
2103
  	/* Declare grace period done. */
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2104
  	WRITE_ONCE(rsp->completed, rsp->gpnum);
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
2105
  	trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
77f81fe08   Petr Mladek   rcu: Finish foldi...
2106
  	rsp->gp_state = RCU_GP_IDLE;
5d4b86594   Paul E. McKenney   rcu: Fix day-zero...
2107
  	rdp = this_cpu_ptr(rsp->rda);
48a7639ce   Paul E. McKenney   rcu: Make callers...
2108
2109
2110
  	/* Advance CBs to reduce false positives below. */
  	needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
  	if (needgp || cpu_needs_another_gp(rsp, rdp)) {
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2111
  		WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
bb311eccb   Paul E. McKenney   rcu: Add tracing ...
2112
  		trace_rcu_grace_period(rsp->name,
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2113
  				       READ_ONCE(rsp->gpnum),
bb311eccb   Paul E. McKenney   rcu: Add tracing ...
2114
2115
  				       TPS("newreq"));
  	}
67c583a7d   Boqun Feng   RCU: Privatize rc...
2116
  	raw_spin_unlock_irq_rcu_node(rnp);
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2117
2118
2119
2120
2121
2122
2123
  }
  
  /*
   * Body of kthread that handles grace periods.
   */
  static int __noreturn rcu_gp_kthread(void *arg)
  {
77f81fe08   Petr Mladek   rcu: Finish foldi...
2124
  	bool first_gp_fqs;
88d6df612   Paul E. McKenney   rcu: Prevent spur...
2125
  	int gf;
d40011f60   Paul E. McKenney   rcu: Control grac...
2126
  	unsigned long j;
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2127
  	int ret;
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2128
2129
  	struct rcu_state *rsp = arg;
  	struct rcu_node *rnp = rcu_get_root(rsp);
5871968d5   Paul E. McKenney   rcu: Tighten up a...
2130
  	rcu_bind_gp_kthread();
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2131
2132
2133
2134
  	for (;;) {
  
  		/* Handle grace-period start. */
  		for (;;) {
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2135
  			trace_rcu_grace_period(rsp->name,
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2136
  					       READ_ONCE(rsp->gpnum),
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2137
  					       TPS("reqwait"));
afea227fd   Paul E. McKenney   rcutorture: Expor...
2138
  			rsp->gp_state = RCU_GP_WAIT_GPS;
abedf8e24   Paul Gortmaker   rcu: Use simple w...
2139
  			swait_event_interruptible(rsp->gp_wq,
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2140
  						 READ_ONCE(rsp->gp_flags) &
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2141
  						 RCU_GP_FLAG_INIT);
319362c90   Paul E. McKenney   rcu: Provide more...
2142
  			rsp->gp_state = RCU_GP_DONE_GPS;
78e4bc34e   Paul E. McKenney   rcu: Fix and comm...
2143
  			/* Locking provides needed memory barrier. */
f7be82093   Paul E. McKenney   rcu: Improve grac...
2144
  			if (rcu_gp_init(rsp))
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2145
  				break;
bde6c3aa9   Paul E. McKenney   rcu: Provide cond...
2146
  			cond_resched_rcu_qs();
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2147
  			WRITE_ONCE(rsp->gp_activity, jiffies);
73a860cd5   Paul E. McKenney   rcu: Replace flus...
2148
  			WARN_ON(signal_pending(current));
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2149
  			trace_rcu_grace_period(rsp->name,
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2150
  					       READ_ONCE(rsp->gpnum),
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2151
  					       TPS("reqwaitsig"));
7fdefc10e   Paul E. McKenney   rcu: Break up rcu...
2152
  		}
cabc49c1f   Paul E. McKenney   rcu: Move RCU gra...
2153

4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2154
  		/* Handle quiescent-state forcing. */
77f81fe08   Petr Mladek   rcu: Finish foldi...
2155
  		first_gp_fqs = true;
d40011f60   Paul E. McKenney   rcu: Control grac...
2156
2157
2158
2159
2160
  		j = jiffies_till_first_fqs;
  		if (j > HZ) {
  			j = HZ;
  			jiffies_till_first_fqs = HZ;
  		}
88d6df612   Paul E. McKenney   rcu: Prevent spur...
2161
  		ret = 0;
cabc49c1f   Paul E. McKenney   rcu: Move RCU gra...
2162
  		for (;;) {
8c7c4829a   Paul E. McKenney   rcu: Awaken grace...
2163
  			if (!ret) {
88d6df612   Paul E. McKenney   rcu: Prevent spur...
2164
  				rsp->jiffies_force_qs = jiffies + j;
8c7c4829a   Paul E. McKenney   rcu: Awaken grace...
2165
2166
2167
  				WRITE_ONCE(rsp->jiffies_kick_kthreads,
  					   jiffies + 3 * j);
  			}
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2168
  			trace_rcu_grace_period(rsp->name,
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2169
  					       READ_ONCE(rsp->gpnum),
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2170
  					       TPS("fqswait"));
afea227fd   Paul E. McKenney   rcutorture: Expor...
2171
  			rsp->gp_state = RCU_GP_WAIT_FQS;
abedf8e24   Paul Gortmaker   rcu: Use simple w...
2172
  			ret = swait_event_interruptible_timeout(rsp->gp_wq,
b9a425cfc   Paul E. McKenney   rcu: Pull out wai...
2173
  					rcu_gp_fqs_check_wake(rsp, &gf), j);
32bb1c799   Paul E. McKenney   rcu: Rename RCU_G...
2174
  			rsp->gp_state = RCU_GP_DOING_FQS;
78e4bc34e   Paul E. McKenney   rcu: Fix and comm...
2175
  			/* Locking provides needed memory barriers. */
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2176
  			/* If grace period done, leave loop. */
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2177
  			if (!READ_ONCE(rnp->qsmask) &&
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2178
  			    !rcu_preempt_blocked_readers_cgp(rnp))
cabc49c1f   Paul E. McKenney   rcu: Move RCU gra...
2179
  				break;
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2180
  			/* If time for quiescent-state forcing, do it. */
88d6df612   Paul E. McKenney   rcu: Prevent spur...
2181
2182
  			if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
  			    (gf & RCU_GP_FLAG_FQS)) {
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2183
  				trace_rcu_grace_period(rsp->name,
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2184
  						       READ_ONCE(rsp->gpnum),
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2185
  						       TPS("fqsstart"));
77f81fe08   Petr Mladek   rcu: Finish foldi...
2186
2187
  				rcu_gp_fqs(rsp, first_gp_fqs);
  				first_gp_fqs = false;
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2188
  				trace_rcu_grace_period(rsp->name,
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2189
  						       READ_ONCE(rsp->gpnum),
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2190
  						       TPS("fqsend"));
bde6c3aa9   Paul E. McKenney   rcu: Provide cond...
2191
  				cond_resched_rcu_qs();
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2192
  				WRITE_ONCE(rsp->gp_activity, jiffies);
fcfd0a237   Paul E. McKenney   rcu: Make FQS sch...
2193
2194
2195
2196
2197
2198
2199
2200
2201
  				ret = 0; /* Force full wait till next FQS. */
  				j = jiffies_till_next_fqs;
  				if (j > HZ) {
  					j = HZ;
  					jiffies_till_next_fqs = HZ;
  				} else if (j < 1) {
  					j = 1;
  					jiffies_till_next_fqs = 1;
  				}
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2202
2203
  			} else {
  				/* Deal with stray signal. */
bde6c3aa9   Paul E. McKenney   rcu: Provide cond...
2204
  				cond_resched_rcu_qs();
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2205
  				WRITE_ONCE(rsp->gp_activity, jiffies);
73a860cd5   Paul E. McKenney   rcu: Replace flus...
2206
  				WARN_ON(signal_pending(current));
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2207
  				trace_rcu_grace_period(rsp->name,
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2208
  						       READ_ONCE(rsp->gpnum),
63c4db78e   Paul E. McKenney   rcu: Add tracing ...
2209
  						       TPS("fqswaitsig"));
fcfd0a237   Paul E. McKenney   rcu: Make FQS sch...
2210
2211
2212
2213
2214
2215
  				ret = 1; /* Keep old FQS timing. */
  				j = jiffies;
  				if (time_after(jiffies, rsp->jiffies_force_qs))
  					j = 1;
  				else
  					j = rsp->jiffies_force_qs - j;
d40011f60   Paul E. McKenney   rcu: Control grac...
2216
  			}
cabc49c1f   Paul E. McKenney   rcu: Move RCU gra...
2217
  		}
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2218
2219
  
  		/* Handle grace-period end. */
319362c90   Paul E. McKenney   rcu: Provide more...
2220
  		rsp->gp_state = RCU_GP_CLEANUP;
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2221
  		rcu_gp_cleanup(rsp);
319362c90   Paul E. McKenney   rcu: Provide more...
2222
  		rsp->gp_state = RCU_GP_CLEANED;
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
2223
  	}
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
2224
2225
2226
  }
  
  /*
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2227
2228
   * Start a new RCU grace period if warranted, re-initializing the hierarchy
   * in preparation for detecting the next grace period.  The caller must hold
b8462084a   Paul E. McKenney   rcu: Push lock re...
2229
   * the root node's ->lock and hard irqs must be disabled.
e56014000   Paul E. McKenney   rcu: Simplify off...
2230
2231
2232
2233
   *
   * Note that it is legal for a dying CPU (which is marked as offline) to
   * invoke this function.  This can happen when the dying CPU reports its
   * quiescent state.
48a7639ce   Paul E. McKenney   rcu: Make callers...
2234
2235
   *
   * Returns true if the grace-period kthread must be awakened.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2236
   */
48a7639ce   Paul E. McKenney   rcu: Make callers...
2237
  static bool
910ee45db   Paul E. McKenney   rcu: Make rcu_acc...
2238
2239
  rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
  		      struct rcu_data *rdp)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2240
  {
b8462084a   Paul E. McKenney   rcu: Push lock re...
2241
  	if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) {
afe24b122   Paul E. McKenney   rcu: Move propaga...
2242
  		/*
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
2243
  		 * Either we have not yet spawned the grace-period
62da19212   Paul E. McKenney   rcu: Accelerate c...
2244
2245
  		 * task, this CPU does not need another grace period,
  		 * or a grace period is already in progress.
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
2246
  		 * Either way, don't start a new grace period.
afe24b122   Paul E. McKenney   rcu: Move propaga...
2247
  		 */
48a7639ce   Paul E. McKenney   rcu: Make callers...
2248
  		return false;
afe24b122   Paul E. McKenney   rcu: Move propaga...
2249
  	}
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2250
2251
  	WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
  	trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum),
bb311eccb   Paul E. McKenney   rcu: Add tracing ...
2252
  			       TPS("newreq"));
62da19212   Paul E. McKenney   rcu: Accelerate c...
2253

016a8d5be   Steven Rostedt   rcu: Don't call w...
2254
2255
  	/*
  	 * We can't do wakeups while holding the rnp->lock, as that
1eafd31c6   Paul E. McKenney   rcu: Avoid redund...
2256
  	 * could cause possible deadlocks with the rq->lock. Defer
48a7639ce   Paul E. McKenney   rcu: Make callers...
2257
  	 * the wakeup to our caller.
016a8d5be   Steven Rostedt   rcu: Don't call w...
2258
  	 */
48a7639ce   Paul E. McKenney   rcu: Make callers...
2259
  	return true;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2260
2261
2262
  }
  
  /*
910ee45db   Paul E. McKenney   rcu: Make rcu_acc...
2263
2264
2265
2266
2267
   * Similar to rcu_start_gp_advanced(), but also advance the calling CPU's
   * callbacks.  Note that rcu_start_gp_advanced() cannot do this because it
   * is invoked indirectly from rcu_advance_cbs(), which would result in
   * endless recursion -- or would do so if it wasn't for the self-deadlock
   * that is encountered beforehand.
48a7639ce   Paul E. McKenney   rcu: Make callers...
2268
2269
   *
   * Returns true if the grace-period kthread needs to be awakened.
910ee45db   Paul E. McKenney   rcu: Make rcu_acc...
2270
   */
48a7639ce   Paul E. McKenney   rcu: Make callers...
2271
  static bool rcu_start_gp(struct rcu_state *rsp)
910ee45db   Paul E. McKenney   rcu: Make rcu_acc...
2272
2273
2274
  {
  	struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
  	struct rcu_node *rnp = rcu_get_root(rsp);
48a7639ce   Paul E. McKenney   rcu: Make callers...
2275
  	bool ret = false;
910ee45db   Paul E. McKenney   rcu: Make rcu_acc...
2276
2277
2278
2279
2280
2281
2282
2283
2284
  
  	/*
  	 * If there is no grace period in progress right now, any
  	 * callbacks we have up to this point will be satisfied by the
  	 * next grace period.  Also, advancing the callbacks reduces the
  	 * probability of false positives from cpu_needs_another_gp()
  	 * resulting in pointless grace periods.  So, advance callbacks
  	 * then start the grace period!
  	 */
48a7639ce   Paul E. McKenney   rcu: Make callers...
2285
2286
2287
  	ret = rcu_advance_cbs(rsp, rnp, rdp) || ret;
  	ret = rcu_start_gp_advanced(rsp, rnp, rdp) || ret;
  	return ret;
910ee45db   Paul E. McKenney   rcu: Make rcu_acc...
2288
2289
2290
  }
  
  /*
8994515cf   Paul E. McKenney   rcu: Update rcu_r...
2291
2292
2293
2294
2295
2296
2297
   * Report a full set of quiescent states to the specified rcu_state data
   * structure.  Invoke rcu_gp_kthread_wake() to awaken the grace-period
   * kthread if another grace period is required.  Whether we wake
   * the grace-period kthread or it awakens itself for the next round
   * of quiescent-state forcing, that kthread will clean up after the
   * just-completed grace period.  Note that the caller must hold rnp->lock,
   * which is released before return.
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
2298
   */
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
2299
  static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
fc2219d49   Paul E. McKenney   rcu: Clean up cod...
2300
  	__releases(rcu_get_root(rsp)->lock)
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
2301
  {
fc2219d49   Paul E. McKenney   rcu: Clean up cod...
2302
  	WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
cd73ca21c   Paul E. McKenney   rcu: Force wakeup...
2303
  	WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
67c583a7d   Boqun Feng   RCU: Privatize rc...
2304
  	raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
94d447767   Jisheng Zhang   rcu: Use rcu_gp_k...
2305
  	rcu_gp_kthread_wake(rsp);
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
2306
2307
2308
  }
  
  /*
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
2309
2310
2311
   * Similar to rcu_report_qs_rdp(), for which it is a helper function.
   * Allows quiescent states for a group of CPUs to be reported at one go
   * to the specified rcu_node structure, though all the CPUs in the group
654e95334   Paul E. McKenney   rcu: Associate qu...
2312
2313
2314
2315
2316
   * must be represented by the same rcu_node structure (which need not be a
   * leaf rcu_node structure, though it often will be).  The gps parameter
   * is the grace-period snapshot, which means that the quiescent states
   * are valid only if rnp->gpnum is equal to gps.  That structure's lock
   * must be held upon entry, and it is released before return.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2317
2318
   */
  static void
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
2319
  rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
654e95334   Paul E. McKenney   rcu: Associate qu...
2320
  		  struct rcu_node *rnp, unsigned long gps, unsigned long flags)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2321
2322
  	__releases(rnp->lock)
  {
654e95334   Paul E. McKenney   rcu: Associate qu...
2323
  	unsigned long oldmask = 0;
28ecd5802   Paul E. McKenney   rcu: Add WARN_ON_...
2324
  	struct rcu_node *rnp_c;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2325
2326
  	/* Walk up the rcu_node hierarchy. */
  	for (;;) {
654e95334   Paul E. McKenney   rcu: Associate qu...
2327
  		if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2328

654e95334   Paul E. McKenney   rcu: Associate qu...
2329
2330
2331
2332
  			/*
  			 * Our bit has already been cleared, or the
  			 * relevant grace period is already over, so done.
  			 */
67c583a7d   Boqun Feng   RCU: Privatize rc...
2333
  			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2334
2335
  			return;
  		}
654e95334   Paul E. McKenney   rcu: Associate qu...
2336
  		WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2337
  		rnp->qsmask &= ~mask;
d4c08f2ac   Paul E. McKenney   rcu: Add grace-pe...
2338
2339
2340
2341
  		trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
  						 mask, rnp->qsmask, rnp->level,
  						 rnp->grplo, rnp->grphi,
  						 !!rnp->gp_tasks);
27f4d2805   Paul E. McKenney   rcu: priority boo...
2342
  		if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2343
2344
  
  			/* Other bits still set at this level, so done. */
67c583a7d   Boqun Feng   RCU: Privatize rc...
2345
  			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2346
2347
2348
2349
2350
2351
2352
2353
2354
  			return;
  		}
  		mask = rnp->grpmask;
  		if (rnp->parent == NULL) {
  
  			/* No more levels.  Exit loop holding root lock. */
  
  			break;
  		}
67c583a7d   Boqun Feng   RCU: Privatize rc...
2355
  		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
28ecd5802   Paul E. McKenney   rcu: Add WARN_ON_...
2356
  		rnp_c = rnp;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2357
  		rnp = rnp->parent;
2a67e741b   Peter Zijlstra   rcu: Create trans...
2358
  		raw_spin_lock_irqsave_rcu_node(rnp, flags);
654e95334   Paul E. McKenney   rcu: Associate qu...
2359
  		oldmask = rnp_c->qsmask;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2360
2361
2362
2363
  	}
  
  	/*
  	 * Get here if we are the last CPU to pass through a quiescent
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
2364
  	 * state for this grace period.  Invoke rcu_report_qs_rsp()
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
2365
  	 * to clean up and start the next grace period if one is needed.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2366
  	 */
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
2367
  	rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2368
2369
2370
  }
  
  /*
cc99a310c   Paul E. McKenney   rcu: Move rcu_rep...
2371
2372
2373
2374
2375
2376
   * Record a quiescent state for all tasks that were previously queued
   * on the specified rcu_node structure and that were blocking the current
   * RCU grace period.  The caller must hold the specified rnp->lock with
   * irqs disabled, and this lock is released upon return, but irqs remain
   * disabled.
   */
0aa04b055   Paul E. McKenney   rcu: Process offl...
2377
  static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
cc99a310c   Paul E. McKenney   rcu: Move rcu_rep...
2378
2379
2380
  				      struct rcu_node *rnp, unsigned long flags)
  	__releases(rnp->lock)
  {
654e95334   Paul E. McKenney   rcu: Associate qu...
2381
  	unsigned long gps;
cc99a310c   Paul E. McKenney   rcu: Move rcu_rep...
2382
2383
  	unsigned long mask;
  	struct rcu_node *rnp_p;
a77da14ce   Paul E. McKenney   rcu: Yet another ...
2384
2385
  	if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
  	    rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
67c583a7d   Boqun Feng   RCU: Privatize rc...
2386
  		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
cc99a310c   Paul E. McKenney   rcu: Move rcu_rep...
2387
2388
2389
2390
2391
2392
  		return;  /* Still need more quiescent states! */
  	}
  
  	rnp_p = rnp->parent;
  	if (rnp_p == NULL) {
  		/*
a77da14ce   Paul E. McKenney   rcu: Yet another ...
2393
2394
  		 * Only one rcu_node structure in the tree, so don't
  		 * try to report up to its nonexistent parent!
cc99a310c   Paul E. McKenney   rcu: Move rcu_rep...
2395
2396
2397
2398
  		 */
  		rcu_report_qs_rsp(rsp, flags);
  		return;
  	}
654e95334   Paul E. McKenney   rcu: Associate qu...
2399
2400
  	/* Report up the rest of the hierarchy, tracking current ->gpnum. */
  	gps = rnp->gpnum;
cc99a310c   Paul E. McKenney   rcu: Move rcu_rep...
2401
  	mask = rnp->grpmask;
67c583a7d   Boqun Feng   RCU: Privatize rc...
2402
  	raw_spin_unlock_rcu_node(rnp);	/* irqs remain disabled. */
2a67e741b   Peter Zijlstra   rcu: Create trans...
2403
  	raw_spin_lock_rcu_node(rnp_p);	/* irqs already disabled. */
654e95334   Paul E. McKenney   rcu: Associate qu...
2404
  	rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
cc99a310c   Paul E. McKenney   rcu: Move rcu_rep...
2405
2406
2407
  }
  
  /*
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
2408
   * Record a quiescent state for the specified CPU to that CPU's rcu_data
4b455dc3e   Paul E. McKenney   rcu: Catch up rcu...
2409
   * structure.  This must be called from the specified CPU.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2410
2411
   */
  static void
d7d6a11e8   Paul E. McKenney   rcu: Simplify qui...
2412
  rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2413
2414
2415
  {
  	unsigned long flags;
  	unsigned long mask;
48a7639ce   Paul E. McKenney   rcu: Make callers...
2416
  	bool needwake;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2417
2418
2419
  	struct rcu_node *rnp;
  
  	rnp = rdp->mynode;
2a67e741b   Peter Zijlstra   rcu: Create trans...
2420
  	raw_spin_lock_irqsave_rcu_node(rnp, flags);
5b74c4589   Paul E. McKenney   rcu: Make ->cpu_n...
2421
  	if ((rdp->cpu_no_qs.b.norm &&
5cd37193c   Paul E. McKenney   rcu: Make cond_re...
2422
2423
2424
  	     rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) ||
  	    rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum ||
  	    rdp->gpwrap) {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2425
2426
  
  		/*
e4cc1f22b   Paul E. McKenney   rcu: Simplify qui...
2427
2428
2429
2430
  		 * The grace period in which this quiescent state was
  		 * recorded has ended, so don't report it upwards.
  		 * We will instead need a new quiescent state that lies
  		 * within the current grace period.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2431
  		 */
5b74c4589   Paul E. McKenney   rcu: Make ->cpu_n...
2432
  		rdp->cpu_no_qs.b.norm = true;	/* need qs for new gp. */
5cd37193c   Paul E. McKenney   rcu: Make cond_re...
2433
  		rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
67c583a7d   Boqun Feng   RCU: Privatize rc...
2434
  		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2435
2436
2437
2438
  		return;
  	}
  	mask = rdp->grpmask;
  	if ((rnp->qsmask & mask) == 0) {
67c583a7d   Boqun Feng   RCU: Privatize rc...
2439
  		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2440
  	} else {
bb53e416e   Paul E. McKenney   rcu: Assign false...
2441
  		rdp->core_needs_qs = false;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2442
2443
2444
2445
2446
  
  		/*
  		 * This GP can't end until cpu checks in, so all of our
  		 * callbacks can be processed during the next GP.
  		 */
48a7639ce   Paul E. McKenney   rcu: Make callers...
2447
  		needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2448

654e95334   Paul E. McKenney   rcu: Associate qu...
2449
2450
  		rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
  		/* ^^^ Released rnp->lock */
48a7639ce   Paul E. McKenney   rcu: Make callers...
2451
2452
  		if (needwake)
  			rcu_gp_kthread_wake(rsp);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
  	}
  }
  
  /*
   * Check to see if there is a new grace period of which this CPU
   * is not yet aware, and if so, set up local rcu_data state for it.
   * Otherwise, see if this CPU has just passed through its first
   * quiescent state for this grace period, and record that fact if so.
   */
  static void
  rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
  {
05eb552bf   Paul E. McKenney   rcu: Move redunda...
2465
2466
  	/* Check for grace-period ends and beginnings. */
  	note_gp_changes(rsp, rdp);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2467
2468
2469
2470
2471
  
  	/*
  	 * Does this CPU still need to do its part for current grace period?
  	 * If no, return and let the other CPUs do their part as well.
  	 */
97c668b8e   Paul E. McKenney   rcu: Rename qs_pe...
2472
  	if (!rdp->core_needs_qs)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2473
2474
2475
2476
2477
2478
  		return;
  
  	/*
  	 * Was there a quiescent state since the beginning of the grace
  	 * period? If no, then exit and wait for the next call.
  	 */
5b74c4589   Paul E. McKenney   rcu: Make ->cpu_n...
2479
  	if (rdp->cpu_no_qs.b.norm &&
5cd37193c   Paul E. McKenney   rcu: Make cond_re...
2480
  	    rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr))
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2481
  		return;
d3f6bad39   Paul E. McKenney   rcu: Rename "quie...
2482
2483
2484
2485
  	/*
  	 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
  	 * judge of that).
  	 */
d7d6a11e8   Paul E. McKenney   rcu: Simplify qui...
2486
  	rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2487
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2488
  /*
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2489
2490
   * Send the specified CPU's RCU callbacks to the orphanage.  The
   * specified CPU must be offline, and the caller must hold the
7b2e6011f   Paul E. McKenney   rcu: Rename ->ono...
2491
   * ->orphan_lock.
e74f4c456   Paul E. McKenney   rcu: Make hot-unp...
2492
   */
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2493
2494
2495
  static void
  rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
  			  struct rcu_node *rnp, struct rcu_data *rdp)
e74f4c456   Paul E. McKenney   rcu: Make hot-unp...
2496
  {
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2497
  	/* No-CBs CPUs do not have orphanable callbacks. */
ea46351ce   Paul E. McKenney   rcu: Eliminate HO...
2498
  	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || rcu_is_nocb_cpu(rdp->cpu))
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2499
  		return;
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2500
2501
  	/*
  	 * Orphan the callbacks.  First adjust the counts.  This is safe
abfd6e58a   Paul E. McKenney   rcu: Fix comment ...
2502
2503
  	 * because _rcu_barrier() excludes CPU-hotplug operations, so it
  	 * cannot be running now.  Thus no memory barrier is required.
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2504
  	 */
a50c3af91   Paul E. McKenney   rcu: Don't make c...
2505
  	if (rdp->nxtlist != NULL) {
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2506
2507
2508
  		rsp->qlen_lazy += rdp->qlen_lazy;
  		rsp->qlen += rdp->qlen;
  		rdp->n_cbs_orphaned += rdp->qlen;
a50c3af91   Paul E. McKenney   rcu: Don't make c...
2509
  		rdp->qlen_lazy = 0;
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2510
  		WRITE_ONCE(rdp->qlen, 0);
a50c3af91   Paul E. McKenney   rcu: Don't make c...
2511
2512
2513
  	}
  
  	/*
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2514
2515
2516
2517
2518
2519
2520
  	 * Next, move those callbacks still needing a grace period to
  	 * the orphanage, where some other CPU will pick them up.
  	 * Some of the callbacks might have gone partway through a grace
  	 * period, but that is too bad.  They get to start over because we
  	 * cannot assume that grace periods are synchronized across CPUs.
  	 * We don't bother updating the ->nxttail[] array yet, instead
  	 * we just reset the whole thing later on.
a50c3af91   Paul E. McKenney   rcu: Don't make c...
2521
  	 */
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2522
2523
2524
2525
  	if (*rdp->nxttail[RCU_DONE_TAIL] != NULL) {
  		*rsp->orphan_nxttail = *rdp->nxttail[RCU_DONE_TAIL];
  		rsp->orphan_nxttail = rdp->nxttail[RCU_NEXT_TAIL];
  		*rdp->nxttail[RCU_DONE_TAIL] = NULL;
a50c3af91   Paul E. McKenney   rcu: Don't make c...
2526
2527
2528
  	}
  
  	/*
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2529
2530
2531
  	 * Then move the ready-to-invoke callbacks to the orphanage,
  	 * where some other CPU will pick them up.  These will not be
  	 * required to pass though another grace period: They are done.
a50c3af91   Paul E. McKenney   rcu: Don't make c...
2532
  	 */
e56014000   Paul E. McKenney   rcu: Simplify off...
2533
  	if (rdp->nxtlist != NULL) {
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2534
2535
  		*rsp->orphan_donetail = rdp->nxtlist;
  		rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL];
e56014000   Paul E. McKenney   rcu: Simplify off...
2536
  	}
e74f4c456   Paul E. McKenney   rcu: Make hot-unp...
2537

b33078b60   Paul E. McKenney   rcu: Consolidate ...
2538
2539
2540
2541
  	/*
  	 * Finally, initialize the rcu_data structure's list to empty and
  	 * disallow further callbacks on this CPU.
  	 */
3f5d3ea64   Paul E. McKenney   rcu: Consolidate ...
2542
  	init_callback_list(rdp);
b33078b60   Paul E. McKenney   rcu: Consolidate ...
2543
  	rdp->nxttail[RCU_NEXT_TAIL] = NULL;
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2544
2545
2546
2547
  }
  
  /*
   * Adopt the RCU callbacks from the specified rcu_state structure's
7b2e6011f   Paul E. McKenney   rcu: Rename ->ono...
2548
   * orphanage.  The caller must hold the ->orphan_lock.
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2549
   */
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
2550
  static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2551
2552
  {
  	int i;
fa07a58f7   Christoph Lameter   rcu: Replace __th...
2553
  	struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2554

3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2555
  	/* No-CBs CPUs are handled specially. */
ea46351ce   Paul E. McKenney   rcu: Eliminate HO...
2556
2557
  	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
  	    rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags))
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2558
  		return;
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2559
2560
2561
2562
  	/* Do the accounting first. */
  	rdp->qlen_lazy += rsp->qlen_lazy;
  	rdp->qlen += rsp->qlen;
  	rdp->n_cbs_adopted += rsp->qlen;
8f5af6f1f   Paul E. McKenney   rcu: RCU_FAST_NO_...
2563
2564
  	if (rsp->qlen_lazy != rsp->qlen)
  		rcu_idle_count_callbacks_posted();
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
  	rsp->qlen_lazy = 0;
  	rsp->qlen = 0;
  
  	/*
  	 * We do not need a memory barrier here because the only way we
  	 * can get here if there is an rcu_barrier() in flight is if
  	 * we are the task doing the rcu_barrier().
  	 */
  
  	/* First adopt the ready-to-invoke callbacks. */
  	if (rsp->orphan_donelist != NULL) {
  		*rsp->orphan_donetail = *rdp->nxttail[RCU_DONE_TAIL];
  		*rdp->nxttail[RCU_DONE_TAIL] = rsp->orphan_donelist;
  		for (i = RCU_NEXT_SIZE - 1; i >= RCU_DONE_TAIL; i--)
  			if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
  				rdp->nxttail[i] = rsp->orphan_donetail;
  		rsp->orphan_donelist = NULL;
  		rsp->orphan_donetail = &rsp->orphan_donelist;
  	}
  
  	/* And then adopt the callbacks that still need a grace period. */
  	if (rsp->orphan_nxtlist != NULL) {
  		*rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxtlist;
  		rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxttail;
  		rsp->orphan_nxtlist = NULL;
  		rsp->orphan_nxttail = &rsp->orphan_nxtlist;
  	}
  }
  
  /*
   * Trace the fact that this CPU is going offline.
   */
  static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
  {
  	RCU_TRACE(unsigned long mask);
  	RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda));
  	RCU_TRACE(struct rcu_node *rnp = rdp->mynode);
ea46351ce   Paul E. McKenney   rcu: Eliminate HO...
2602
2603
  	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
  		return;
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2604
  	RCU_TRACE(mask = rdp->grpmask);
e56014000   Paul E. McKenney   rcu: Simplify off...
2605
2606
  	trace_rcu_grace_period(rsp->name,
  			       rnp->gpnum + 1 - !!(rnp->qsmask & mask),
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
2607
  			       TPS("cpuofl"));
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2608
2609
2610
  }
  
  /*
8af3a5e78   Paul E. McKenney   rcu: Abstract rcu...
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
   * All CPUs for the specified rcu_node structure have gone offline,
   * and all tasks that were preempted within an RCU read-side critical
   * section while running on one of those CPUs have since exited their RCU
   * read-side critical section.  Some other CPU is reporting this fact with
   * the specified rcu_node structure's ->lock held and interrupts disabled.
   * This function therefore goes up the tree of rcu_node structures,
   * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
   * the leaf rcu_node structure's ->qsmaskinit field has already been
   * updated
   *
   * This function does check that the specified rcu_node structure has
   * all CPUs offline and no blocked tasks, so it is OK to invoke it
   * prematurely.  That said, invoking it after the fact will cost you
   * a needless lock acquisition.  So once it has done its work, don't
   * invoke it again.
   */
  static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
  {
  	long mask;
  	struct rcu_node *rnp = rnp_leaf;
ea46351ce   Paul E. McKenney   rcu: Eliminate HO...
2631
2632
  	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
  	    rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
8af3a5e78   Paul E. McKenney   rcu: Abstract rcu...
2633
2634
2635
2636
2637
2638
  		return;
  	for (;;) {
  		mask = rnp->grpmask;
  		rnp = rnp->parent;
  		if (!rnp)
  			break;
2a67e741b   Peter Zijlstra   rcu: Create trans...
2639
  		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
8af3a5e78   Paul E. McKenney   rcu: Abstract rcu...
2640
  		rnp->qsmaskinit &= ~mask;
0aa04b055   Paul E. McKenney   rcu: Process offl...
2641
  		rnp->qsmask &= ~mask;
8af3a5e78   Paul E. McKenney   rcu: Abstract rcu...
2642
  		if (rnp->qsmaskinit) {
67c583a7d   Boqun Feng   RCU: Privatize rc...
2643
2644
  			raw_spin_unlock_rcu_node(rnp);
  			/* irqs remain disabled. */
8af3a5e78   Paul E. McKenney   rcu: Abstract rcu...
2645
2646
  			return;
  		}
67c583a7d   Boqun Feng   RCU: Privatize rc...
2647
  		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
8af3a5e78   Paul E. McKenney   rcu: Abstract rcu...
2648
2649
2650
2651
  	}
  }
  
  /*
e56014000   Paul E. McKenney   rcu: Simplify off...
2652
   * The CPU has been completely removed, and some other CPU is reporting
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2653
2654
   * this fact from process context.  Do the remainder of the cleanup,
   * including orphaning the outgoing CPU's RCU callbacks, and also
1331e7a1b   Paul E. McKenney   rcu: Remove _rcu_...
2655
2656
   * adopting them.  There can only be one CPU hotplug operation at a time,
   * so no other CPU can be attempting to update rcu_cpu_kthread_task.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2657
   */
e56014000   Paul E. McKenney   rcu: Simplify off...
2658
  static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2659
  {
2036d94a7   Paul E. McKenney   rcu: Rework detec...
2660
  	unsigned long flags;
e56014000   Paul E. McKenney   rcu: Simplify off...
2661
  	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2662
  	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
e56014000   Paul E. McKenney   rcu: Simplify off...
2663

ea46351ce   Paul E. McKenney   rcu: Eliminate HO...
2664
2665
  	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
  		return;
2036d94a7   Paul E. McKenney   rcu: Rework detec...
2666
  	/* Adjust any no-longer-needed kthreads. */
5d01bbd11   Thomas Gleixner   rcu: Yield simpler
2667
  	rcu_boost_kthread_setaffinity(rnp, -1);
2036d94a7   Paul E. McKenney   rcu: Rework detec...
2668

b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2669
  	/* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
78043c467   Paul E. McKenney   rcu: Put all orph...
2670
  	raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2671
  	rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
2672
  	rcu_adopt_orphan_cbs(rsp, flags);
a8f4cbadf   Paul E. McKenney   rcu: Shorten irq-...
2673
  	raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2674

cf01537ec   Paul E. McKenney   rcu: Add check fo...
2675
2676
2677
2678
  	WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
  		  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p
  ",
  		  cpu, rdp->qlen, rdp->nxtlist);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2679
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2680
2681
2682
2683
  /*
   * Invoke any RCU callbacks that have made it to the end of their grace
   * period.  Thottle as specified by rdp->blimit.
   */
37c72e56f   Paul E. McKenney   rcu: Prevent RCU ...
2684
  static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2685
2686
2687
  {
  	unsigned long flags;
  	struct rcu_head *next, *list, **tail;
878d7439d   Eric Dumazet   rcu: Fix batch-li...
2688
2689
  	long bl, count, count_lazy;
  	int i;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2690

dc35c8934   Paul E. McKenney   rcu: Tag callback...
2691
  	/* If no callbacks are ready, just return. */
29c00b4a1   Paul E. McKenney   rcu: Add event-tr...
2692
  	if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
486e25934   Paul E. McKenney   rcu: Avoid waking...
2693
  		trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2694
  		trace_rcu_batch_end(rsp->name, 0, !!READ_ONCE(rdp->nxtlist),
4968c300e   Paul E. McKenney   rcu: Augment rcu_...
2695
2696
  				    need_resched(), is_idle_task(current),
  				    rcu_is_callbacks_kthread());
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2697
  		return;
29c00b4a1   Paul E. McKenney   rcu: Add event-tr...
2698
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2699
2700
2701
2702
2703
2704
  
  	/*
  	 * Extract the list of ready callbacks, disabling to prevent
  	 * races with call_rcu() from interrupt handlers.
  	 */
  	local_irq_save(flags);
8146c4e2e   Paul E. McKenney   rcu: Check for ca...
2705
  	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
29c00b4a1   Paul E. McKenney   rcu: Add event-tr...
2706
  	bl = rdp->blimit;
486e25934   Paul E. McKenney   rcu: Avoid waking...
2707
  	trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, bl);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2708
2709
2710
2711
  	list = rdp->nxtlist;
  	rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
  	*rdp->nxttail[RCU_DONE_TAIL] = NULL;
  	tail = rdp->nxttail[RCU_DONE_TAIL];
b41772abe   Paul E. McKenney   rcu: Stop rcu_do_...
2712
2713
2714
  	for (i = RCU_NEXT_SIZE - 1; i >= 0; i--)
  		if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
  			rdp->nxttail[i] = &rdp->nxtlist;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2715
2716
2717
  	local_irq_restore(flags);
  
  	/* Invoke callbacks. */
486e25934   Paul E. McKenney   rcu: Avoid waking...
2718
  	count = count_lazy = 0;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2719
2720
2721
  	while (list) {
  		next = list->next;
  		prefetch(next);
551d55a94   Mathieu Desnoyers   tree/tiny rcu: Ad...
2722
  		debug_rcu_head_unqueue(list);
486e25934   Paul E. McKenney   rcu: Avoid waking...
2723
2724
  		if (__rcu_reclaim(rsp->name, list))
  			count_lazy++;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2725
  		list = next;
dff1672d9   Paul E. McKenney   rcu: Keep invokin...
2726
2727
2728
2729
  		/* Stop only if limit reached and CPU has something to do. */
  		if (++count >= bl &&
  		    (need_resched() ||
  		     (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2730
2731
2732
2733
  			break;
  	}
  
  	local_irq_save(flags);
4968c300e   Paul E. McKenney   rcu: Augment rcu_...
2734
2735
2736
  	trace_rcu_batch_end(rsp->name, count, !!list, need_resched(),
  			    is_idle_task(current),
  			    rcu_is_callbacks_kthread());
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2737
2738
  
  	/* Update count, and requeue any remaining callbacks. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2739
2740
2741
  	if (list != NULL) {
  		*tail = rdp->nxtlist;
  		rdp->nxtlist = list;
b41772abe   Paul E. McKenney   rcu: Stop rcu_do_...
2742
2743
2744
  		for (i = 0; i < RCU_NEXT_SIZE; i++)
  			if (&rdp->nxtlist == rdp->nxttail[i])
  				rdp->nxttail[i] = tail;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2745
2746
2747
  			else
  				break;
  	}
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2748
2749
  	smp_mb(); /* List handling before counting for rcu_barrier(). */
  	rdp->qlen_lazy -= count_lazy;
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2750
  	WRITE_ONCE(rdp->qlen, rdp->qlen - count);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
2751
  	rdp->n_cbs_invoked += count;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2752
2753
2754
2755
  
  	/* Reinstate batch limit if we have worked down the excess. */
  	if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
  		rdp->blimit = blimit;
37c72e56f   Paul E. McKenney   rcu: Prevent RCU ...
2756
2757
2758
2759
2760
2761
  	/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
  	if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
  		rdp->qlen_last_fqs_check = 0;
  		rdp->n_force_qs_snap = rsp->n_force_qs;
  	} else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
  		rdp->qlen_last_fqs_check = rdp->qlen;
cfca92797   Paul E. McKenney   rcu: Introduce ch...
2762
  	WARN_ON_ONCE((rdp->nxtlist == NULL) != (rdp->qlen == 0));
37c72e56f   Paul E. McKenney   rcu: Prevent RCU ...
2763

64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2764
  	local_irq_restore(flags);
e0f23060a   Paul E. McKenney   rcu: Update comme...
2765
  	/* Re-invoke RCU core processing if there are callbacks remaining. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2766
  	if (cpu_has_callbacks_ready_to_invoke(rdp))
a46e0899e   Paul E. McKenney   rcu: use softirq ...
2767
  		invoke_rcu_core();
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2768
2769
2770
2771
2772
  }
  
  /*
   * Check to see if this CPU is in a non-context-switch quiescent state
   * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
e0f23060a   Paul E. McKenney   rcu: Update comme...
2773
   * Also schedule RCU core processing.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2774
   *
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
2775
   * This function must be called from hardirq context.  It is normally
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2776
2777
2778
   * invoked from the scheduling-clock interrupt.  If rcu_pending returns
   * false, there is no point in invoking rcu_check_callbacks().
   */
c3377c2da   Paul E. McKenney   rcu: Remove "cpu"...
2779
  void rcu_check_callbacks(int user)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2780
  {
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
2781
  	trace_rcu_utilization(TPS("Start scheduler-tick"));
a858af287   Paul E. McKenney   rcu: Print schedu...
2782
  	increment_cpu_stall_ticks();
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
2783
  	if (user || rcu_is_cpu_rrupt_from_idle()) {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2784
2785
2786
2787
2788
  
  		/*
  		 * Get here if this CPU took its interrupt from user
  		 * mode or from the idle loop, and if this is not a
  		 * nested interrupt.  In this case, the CPU is in
d6714c22b   Paul E. McKenney   rcu: Renamings to...
2789
  		 * a quiescent state, so note it.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2790
2791
  		 *
  		 * No memory barrier is required here because both
d6714c22b   Paul E. McKenney   rcu: Renamings to...
2792
2793
2794
  		 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
  		 * variables that other CPUs neither access nor modify,
  		 * at least not while the corresponding CPU is online.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2795
  		 */
284a8c93a   Paul E. McKenney   rcu: Per-CPU oper...
2796
2797
  		rcu_sched_qs();
  		rcu_bh_qs();
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2798
2799
2800
2801
2802
2803
2804
  
  	} else if (!in_softirq()) {
  
  		/*
  		 * Get here if this CPU did not take its interrupt from
  		 * softirq, in other words, if it is not interrupting
  		 * a rcu_bh read-side critical section.  This is an _bh
d6714c22b   Paul E. McKenney   rcu: Renamings to...
2805
  		 * critical section, so note it.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2806
  		 */
284a8c93a   Paul E. McKenney   rcu: Per-CPU oper...
2807
  		rcu_bh_qs();
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2808
  	}
86aea0e6e   Paul E. McKenney   rcu: Remove "cpu"...
2809
  	rcu_preempt_check_callbacks();
e3950ecd5   Paul E. McKenney   rcu: Remove "cpu"...
2810
  	if (rcu_pending())
a46e0899e   Paul E. McKenney   rcu: use softirq ...
2811
  		invoke_rcu_core();
8315f4229   Paul E. McKenney   rcu: Add call_rcu...
2812
2813
  	if (user)
  		rcu_note_voluntary_context_switch(current);
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
2814
  	trace_rcu_utilization(TPS("End scheduler-tick"));
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2815
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2816
2817
2818
  /*
   * Scan the leaf rcu_node structures, processing dyntick state for any that
   * have not yet encountered a quiescent state, using the function specified.
27f4d2805   Paul E. McKenney   rcu: priority boo...
2819
2820
   * Also initiate boosting for any threads blocked on the root rcu_node.
   *
ee47eb9f4   Paul E. McKenney   rcu: Remove leg o...
2821
   * The caller must have suppressed start of new grace periods.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2822
   */
217af2a2f   Paul E. McKenney   nohz_full: Add fu...
2823
2824
2825
2826
  static void force_qs_rnp(struct rcu_state *rsp,
  			 int (*f)(struct rcu_data *rsp, bool *isidle,
  				  unsigned long *maxj),
  			 bool *isidle, unsigned long *maxj)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2827
  {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2828
2829
2830
  	int cpu;
  	unsigned long flags;
  	unsigned long mask;
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
2831
  	struct rcu_node *rnp;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2832

a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
2833
  	rcu_for_each_leaf_node(rsp, rnp) {
bde6c3aa9   Paul E. McKenney   rcu: Provide cond...
2834
  		cond_resched_rcu_qs();
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2835
  		mask = 0;
2a67e741b   Peter Zijlstra   rcu: Create trans...
2836
  		raw_spin_lock_irqsave_rcu_node(rnp, flags);
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
2837
  		if (rnp->qsmask == 0) {
a77da14ce   Paul E. McKenney   rcu: Yet another ...
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
  			if (rcu_state_p == &rcu_sched_state ||
  			    rsp != rcu_state_p ||
  			    rcu_preempt_blocked_readers_cgp(rnp)) {
  				/*
  				 * No point in scanning bits because they
  				 * are all zero.  But we might need to
  				 * priority-boost blocked readers.
  				 */
  				rcu_initiate_boost(rnp, flags);
  				/* rcu_initiate_boost() releases rnp->lock */
  				continue;
  			}
  			if (rnp->parent &&
  			    (rnp->parent->qsmask & rnp->grpmask)) {
  				/*
  				 * Race between grace-period
  				 * initialization and task exiting RCU
  				 * read-side critical section: Report.
  				 */
  				rcu_report_unblock_qs_rnp(rsp, rnp, flags);
  				/* rcu_report_unblock_qs_rnp() rlses ->lock */
  				continue;
  			}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2861
  		}
bc75e9998   Mark Rutland   rcu: Correctly ha...
2862
2863
  		for_each_leaf_node_possible_cpu(rnp, cpu) {
  			unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
0edd1b178   Paul E. McKenney   nohz_full: Add fu...
2864
  			if ((rnp->qsmask & bit) != 0) {
0edd1b178   Paul E. McKenney   nohz_full: Add fu...
2865
2866
2867
  				if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
  					mask |= bit;
  			}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2868
  		}
45f014c52   Paul E. McKenney   rcu: Remove redun...
2869
  		if (mask != 0) {
654e95334   Paul E. McKenney   rcu: Associate qu...
2870
2871
  			/* Idle/offline CPUs, report (releases rnp->lock. */
  			rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
0aa04b055   Paul E. McKenney   rcu: Process offl...
2872
2873
  		} else {
  			/* Nothing to do here, so just drop the lock. */
67c583a7d   Boqun Feng   RCU: Privatize rc...
2874
  			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2875
  		}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2876
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2877
2878
2879
2880
2881
2882
  }
  
  /*
   * Force quiescent states on reluctant CPUs, and also detect which
   * CPUs are in dyntick-idle mode.
   */
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2883
  static void force_quiescent_state(struct rcu_state *rsp)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2884
2885
  {
  	unsigned long flags;
394f2769a   Paul E. McKenney   rcu: Prevent forc...
2886
2887
2888
2889
2890
  	bool ret;
  	struct rcu_node *rnp;
  	struct rcu_node *rnp_old = NULL;
  
  	/* Funnel through hierarchy to reduce memory contention. */
d860d4032   Shan Wei   rcu: Use __this_c...
2891
  	rnp = __this_cpu_read(rsp->rda->mynode);
394f2769a   Paul E. McKenney   rcu: Prevent forc...
2892
  	for (; rnp != NULL; rnp = rnp->parent) {
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2893
  		ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
394f2769a   Paul E. McKenney   rcu: Prevent forc...
2894
2895
2896
2897
  		      !raw_spin_trylock(&rnp->fqslock);
  		if (rnp_old != NULL)
  			raw_spin_unlock(&rnp_old->fqslock);
  		if (ret) {
a792563bd   Paul E. McKenney   rcu: Eliminate re...
2898
  			rsp->n_force_qs_lh++;
394f2769a   Paul E. McKenney   rcu: Prevent forc...
2899
2900
2901
2902
2903
  			return;
  		}
  		rnp_old = rnp;
  	}
  	/* rnp_old == rcu_get_root(rsp), rnp == NULL. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2904

394f2769a   Paul E. McKenney   rcu: Prevent forc...
2905
  	/* Reached the root of the rcu_node tree, acquire lock. */
2a67e741b   Peter Zijlstra   rcu: Create trans...
2906
  	raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
394f2769a   Paul E. McKenney   rcu: Prevent forc...
2907
  	raw_spin_unlock(&rnp_old->fqslock);
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2908
  	if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
a792563bd   Paul E. McKenney   rcu: Eliminate re...
2909
  		rsp->n_force_qs_lh++;
67c583a7d   Boqun Feng   RCU: Privatize rc...
2910
  		raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
2911
  		return;  /* Someone beat us to it. */
46a1e34ed   Paul E. McKenney   rcu: Make force_q...
2912
  	}
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2913
  	WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
67c583a7d   Boqun Feng   RCU: Privatize rc...
2914
  	raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
94d447767   Jisheng Zhang   rcu: Use rcu_gp_k...
2915
  	rcu_gp_kthread_wake(rsp);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2916
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2917
  /*
e0f23060a   Paul E. McKenney   rcu: Update comme...
2918
2919
2920
   * This does the RCU core processing work for the specified rcu_state
   * and rcu_data structures.  This may be called only from the CPU to
   * whom the rdp belongs.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2921
2922
   */
  static void
1bca8cf1a   Paul E. McKenney   rcu: Remove unnee...
2923
  __rcu_process_callbacks(struct rcu_state *rsp)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2924
2925
  {
  	unsigned long flags;
48a7639ce   Paul E. McKenney   rcu: Make callers...
2926
  	bool needwake;
fa07a58f7   Christoph Lameter   rcu: Replace __th...
2927
  	struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2928

2e5975580   Paul E. McKenney   rcu: Simplify RCU...
2929
  	WARN_ON_ONCE(rdp->beenonline == 0);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2930
2931
2932
2933
  	/* Update RCU state based on any recent quiescent states. */
  	rcu_check_quiescent_state(rsp, rdp);
  
  	/* Does this CPU require a not-yet-started grace period? */
dc35c8934   Paul E. McKenney   rcu: Tag callback...
2934
  	local_irq_save(flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2935
  	if (cpu_needs_another_gp(rsp, rdp)) {
6cf100812   Paul E. McKenney   rcu: Add transiti...
2936
  		raw_spin_lock_rcu_node(rcu_get_root(rsp)); /* irqs disabled. */
48a7639ce   Paul E. McKenney   rcu: Make callers...
2937
  		needwake = rcu_start_gp(rsp);
67c583a7d   Boqun Feng   RCU: Privatize rc...
2938
  		raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
48a7639ce   Paul E. McKenney   rcu: Make callers...
2939
2940
  		if (needwake)
  			rcu_gp_kthread_wake(rsp);
dc35c8934   Paul E. McKenney   rcu: Tag callback...
2941
2942
  	} else {
  		local_irq_restore(flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2943
2944
2945
  	}
  
  	/* If there are callbacks ready, invoke them. */
09223371d   Shaohua Li   rcu: Use softirq ...
2946
  	if (cpu_has_callbacks_ready_to_invoke(rdp))
a46e0899e   Paul E. McKenney   rcu: use softirq ...
2947
  		invoke_rcu_callbacks(rsp, rdp);
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
2948
2949
2950
  
  	/* Do any needed deferred wakeups of rcuo kthreads. */
  	do_nocb_deferred_wakeup(rdp);
09223371d   Shaohua Li   rcu: Use softirq ...
2951
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2952
  /*
e0f23060a   Paul E. McKenney   rcu: Update comme...
2953
   * Do RCU core processing for the current CPU.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2954
   */
0766f788e   Emese Revfy   latent_entropy: M...
2955
  static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2956
  {
6ce75a232   Paul E. McKenney   rcu: Introduce fo...
2957
  	struct rcu_state *rsp;
bfa00b4c4   Paul E. McKenney   rcu: Prevent offl...
2958
2959
  	if (cpu_is_offline(smp_processor_id()))
  		return;
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
2960
  	trace_rcu_utilization(TPS("Start RCU core"));
6ce75a232   Paul E. McKenney   rcu: Introduce fo...
2961
2962
  	for_each_rcu_flavor(rsp)
  		__rcu_process_callbacks(rsp);
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
2963
  	trace_rcu_utilization(TPS("End RCU core"));
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2964
  }
a26ac2455   Paul E. McKenney   rcu: move TREE_RC...
2965
  /*
e0f23060a   Paul E. McKenney   rcu: Update comme...
2966
2967
2968
   * Schedule RCU callback invocation.  If the specified type of RCU
   * does not support RCU priority boosting, just do a direct call,
   * otherwise wake up the per-CPU kernel kthread.  Note that because we
924df8a01   Paul E. McKenney   rcu: Fix invoke_r...
2969
   * are running on the current CPU with softirqs disabled, the
e0f23060a   Paul E. McKenney   rcu: Update comme...
2970
   * rcu_cpu_kthread_task cannot disappear out from under us.
a26ac2455   Paul E. McKenney   rcu: move TREE_RC...
2971
   */
a46e0899e   Paul E. McKenney   rcu: use softirq ...
2972
  static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
a26ac2455   Paul E. McKenney   rcu: move TREE_RC...
2973
  {
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2974
  	if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
b0d304172   Paul E. McKenney   rcu: Prevent RCU ...
2975
  		return;
a46e0899e   Paul E. McKenney   rcu: use softirq ...
2976
2977
  	if (likely(!rsp->boost)) {
  		rcu_do_batch(rsp, rdp);
a26ac2455   Paul E. McKenney   rcu: move TREE_RC...
2978
2979
  		return;
  	}
a46e0899e   Paul E. McKenney   rcu: use softirq ...
2980
  	invoke_rcu_callbacks_kthread();
a26ac2455   Paul E. McKenney   rcu: move TREE_RC...
2981
  }
a46e0899e   Paul E. McKenney   rcu: use softirq ...
2982
  static void invoke_rcu_core(void)
09223371d   Shaohua Li   rcu: Use softirq ...
2983
  {
b0f740360   Paul E. McKenney   rcu: Avoid invoki...
2984
2985
  	if (cpu_online(smp_processor_id()))
  		raise_softirq(RCU_SOFTIRQ);
09223371d   Shaohua Li   rcu: Use softirq ...
2986
  }
29154c57e   Paul E. McKenney   rcu: Split RCU co...
2987
2988
2989
2990
2991
  /*
   * Handle any core-RCU processing required by a call_rcu() invocation.
   */
  static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
  			    struct rcu_head *head, unsigned long flags)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
2992
  {
48a7639ce   Paul E. McKenney   rcu: Make callers...
2993
  	bool needwake;
62fde6edf   Paul E. McKenney   rcu: Make __call_...
2994
2995
2996
2997
  	/*
  	 * If called from an extended quiescent state, invoke the RCU
  	 * core in order to force a re-evaluation of RCU's idleness.
  	 */
9910affa8   Yao Dongdong   rcu: Remove redun...
2998
  	if (!rcu_is_watching())
62fde6edf   Paul E. McKenney   rcu: Make __call_...
2999
  		invoke_rcu_core();
a16b7a693   Paul E. McKenney   rcu: Prevent __ca...
3000
  	/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
29154c57e   Paul E. McKenney   rcu: Split RCU co...
3001
  	if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2655d57ef   Paul E. McKenney   rcu: prevent call...
3002
  		return;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3003

37c72e56f   Paul E. McKenney   rcu: Prevent RCU ...
3004
3005
3006
3007
3008
3009
3010
  	/*
  	 * Force the grace period if too many callbacks or too long waiting.
  	 * Enforce hysteresis, and don't invoke force_quiescent_state()
  	 * if some other CPU has recently done so.  Also, don't bother
  	 * invoking force_quiescent_state() if the newly enqueued callback
  	 * is the only one waiting for a grace period to complete.
  	 */
2655d57ef   Paul E. McKenney   rcu: prevent call...
3011
  	if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
b52573d27   Paul E. McKenney   rcu: reduce __cal...
3012
3013
  
  		/* Are we ignoring a completed grace period? */
470716fc0   Paul E. McKenney   rcu: Switch calle...
3014
  		note_gp_changes(rsp, rdp);
b52573d27   Paul E. McKenney   rcu: reduce __cal...
3015
3016
3017
  
  		/* Start a new grace period if one not already started. */
  		if (!rcu_gp_in_progress(rsp)) {
b52573d27   Paul E. McKenney   rcu: reduce __cal...
3018
  			struct rcu_node *rnp_root = rcu_get_root(rsp);
2a67e741b   Peter Zijlstra   rcu: Create trans...
3019
  			raw_spin_lock_rcu_node(rnp_root);
48a7639ce   Paul E. McKenney   rcu: Make callers...
3020
  			needwake = rcu_start_gp(rsp);
67c583a7d   Boqun Feng   RCU: Privatize rc...
3021
  			raw_spin_unlock_rcu_node(rnp_root);
48a7639ce   Paul E. McKenney   rcu: Make callers...
3022
3023
  			if (needwake)
  				rcu_gp_kthread_wake(rsp);
b52573d27   Paul E. McKenney   rcu: reduce __cal...
3024
3025
3026
3027
3028
  		} else {
  			/* Give the grace period a kick. */
  			rdp->blimit = LONG_MAX;
  			if (rsp->n_force_qs == rdp->n_force_qs_snap &&
  			    *rdp->nxttail[RCU_DONE_TAIL] != head)
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
3029
  				force_quiescent_state(rsp);
b52573d27   Paul E. McKenney   rcu: reduce __cal...
3030
3031
3032
  			rdp->n_force_qs_snap = rsp->n_force_qs;
  			rdp->qlen_last_fqs_check = rdp->qlen;
  		}
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
3033
  	}
29154c57e   Paul E. McKenney   rcu: Split RCU co...
3034
  }
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
3035
  /*
ae1501845   Paul E. McKenney   rcu: Make call_rc...
3036
3037
3038
3039
3040
3041
3042
   * RCU callback function to leak a callback.
   */
  static void rcu_leak_callback(struct rcu_head *rhp)
  {
  }
  
  /*
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
3043
3044
3045
3046
3047
   * Helper function for call_rcu() and friends.  The cpu argument will
   * normally be -1, indicating "currently running CPU".  It may specify
   * a CPU only if that CPU is a no-CBs CPU.  Currently, only _rcu_barrier()
   * is expected to specify a CPU.
   */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3048
  static void
b6a4ae766   Boqun Feng   rcu: Use rcu_call...
3049
  __call_rcu(struct rcu_head *head, rcu_callback_t func,
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
3050
  	   struct rcu_state *rsp, int cpu, bool lazy)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3051
3052
3053
  {
  	unsigned long flags;
  	struct rcu_data *rdp;
1146edcbe   Paul E. McKenney   rcu: Loosen __cal...
3054
  	WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
ae1501845   Paul E. McKenney   rcu: Make call_rc...
3055
3056
  	if (debug_rcu_head_queue(head)) {
  		/* Probable double call_rcu(), so leak the callback. */
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
3057
  		WRITE_ONCE(head->func, rcu_leak_callback);
ae1501845   Paul E. McKenney   rcu: Make call_rc...
3058
3059
3060
3061
  		WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback
  ");
  		return;
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3062
3063
  	head->func = func;
  	head->next = NULL;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3064
3065
3066
3067
3068
3069
3070
  	/*
  	 * Opportunistically note grace-period endings and beginnings.
  	 * Note that we might see a beginning right after we see an
  	 * end, but never vice versa, since this CPU has to pass through
  	 * a quiescent state betweentimes.
  	 */
  	local_irq_save(flags);
394f99a90   Lai Jiangshan   rcu: simplify the...
3071
  	rdp = this_cpu_ptr(rsp->rda);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3072
3073
  
  	/* Add the callback to our list. */
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
3074
3075
3076
3077
3078
  	if (unlikely(rdp->nxttail[RCU_NEXT_TAIL] == NULL) || cpu != -1) {
  		int offline;
  
  		if (cpu != -1)
  			rdp = per_cpu_ptr(rsp->rda, cpu);
143da9c2f   Paul E. McKenney   rcu: Prevent earl...
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
  		if (likely(rdp->mynode)) {
  			/* Post-boot, so this should be for a no-CBs CPU. */
  			offline = !__call_rcu_nocb(rdp, head, lazy, flags);
  			WARN_ON_ONCE(offline);
  			/* Offline CPU, _call_rcu() illegal, leak callback.  */
  			local_irq_restore(flags);
  			return;
  		}
  		/*
  		 * Very early boot, before rcu_init().  Initialize if needed
  		 * and then drop through to queue the callback.
  		 */
  		BUG_ON(cpu != -1);
34404ca8f   Paul E. McKenney   rcu: Move early-b...
3092
  		WARN_ON_ONCE(!rcu_is_watching());
143da9c2f   Paul E. McKenney   rcu: Prevent earl...
3093
3094
  		if (!likely(rdp->nxtlist))
  			init_default_callback_list(rdp);
0d8ee37e2   Paul E. McKenney   rcu: Disallow cal...
3095
  	}
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
3096
  	WRITE_ONCE(rdp->qlen, rdp->qlen + 1);
486e25934   Paul E. McKenney   rcu: Avoid waking...
3097
3098
  	if (lazy)
  		rdp->qlen_lazy++;
c57afe80d   Paul E. McKenney   rcu: Make RCU_FAS...
3099
3100
  	else
  		rcu_idle_count_callbacks_posted();
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3101
3102
3103
  	smp_mb();  /* Count before adding callback for rcu_barrier(). */
  	*rdp->nxttail[RCU_NEXT_TAIL] = head;
  	rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
2655d57ef   Paul E. McKenney   rcu: prevent call...
3104

d4c08f2ac   Paul E. McKenney   rcu: Add grace-pe...
3105
3106
  	if (__is_kfree_rcu_offset((unsigned long)func))
  		trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
486e25934   Paul E. McKenney   rcu: Avoid waking...
3107
  					 rdp->qlen_lazy, rdp->qlen);
d4c08f2ac   Paul E. McKenney   rcu: Add grace-pe...
3108
  	else
486e25934   Paul E. McKenney   rcu: Avoid waking...
3109
  		trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
d4c08f2ac   Paul E. McKenney   rcu: Add grace-pe...
3110

29154c57e   Paul E. McKenney   rcu: Split RCU co...
3111
3112
  	/* Go handle any RCU core processing required. */
  	__call_rcu_core(rsp, rdp, head, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3113
3114
3115
3116
  	local_irq_restore(flags);
  }
  
  /*
d6714c22b   Paul E. McKenney   rcu: Renamings to...
3117
   * Queue an RCU-sched callback for invocation after a grace period.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3118
   */
b6a4ae766   Boqun Feng   rcu: Use rcu_call...
3119
  void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3120
  {
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
3121
  	__call_rcu(head, func, &rcu_sched_state, -1, 0);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3122
  }
d6714c22b   Paul E. McKenney   rcu: Renamings to...
3123
  EXPORT_SYMBOL_GPL(call_rcu_sched);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3124
3125
  
  /*
486e25934   Paul E. McKenney   rcu: Avoid waking...
3126
   * Queue an RCU callback for invocation after a quicker grace period.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3127
   */
b6a4ae766   Boqun Feng   rcu: Use rcu_call...
3128
  void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3129
  {
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
3130
  	__call_rcu(head, func, &rcu_bh_state, -1, 0);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3131
3132
  }
  EXPORT_SYMBOL_GPL(call_rcu_bh);
6d8133919   Paul E. McKenney   rcu: Document why...
3133
  /*
495aa969d   Andreea-Cristina Bernat   rcu: Consolidate ...
3134
3135
3136
3137
3138
3139
3140
   * Queue an RCU callback for lazy invocation after a grace period.
   * This will likely be later named something like "call_rcu_lazy()",
   * but this change will require some way of tagging the lazy RCU
   * callbacks in the list of pending callbacks. Until then, this
   * function may only be called from __kfree_rcu().
   */
  void kfree_call_rcu(struct rcu_head *head,
b6a4ae766   Boqun Feng   rcu: Use rcu_call...
3141
  		    rcu_callback_t func)
495aa969d   Andreea-Cristina Bernat   rcu: Consolidate ...
3142
  {
e534165bb   Uma Sharma   rcu: Variable nam...
3143
  	__call_rcu(head, func, rcu_state_p, -1, 1);
495aa969d   Andreea-Cristina Bernat   rcu: Consolidate ...
3144
3145
3146
3147
  }
  EXPORT_SYMBOL_GPL(kfree_call_rcu);
  
  /*
6d8133919   Paul E. McKenney   rcu: Document why...
3148
3149
3150
3151
3152
3153
3154
   * Because a context switch is a grace period for RCU-sched and RCU-bh,
   * any blocking grace-period wait automatically implies a grace period
   * if there is only one CPU online at any point time during execution
   * of either synchronize_sched() or synchronize_rcu_bh().  It is OK to
   * occasionally incorrectly indicate that there are multiple CPUs online
   * when there was in fact only one the whole time, as this just adds
   * some overhead: RCU still operates correctly.
6d8133919   Paul E. McKenney   rcu: Document why...
3155
3156
3157
   */
  static inline int rcu_blocking_is_gp(void)
  {
95f0c1de3   Paul E. McKenney   rcu: Disable pree...
3158
  	int ret;
6d8133919   Paul E. McKenney   rcu: Document why...
3159
  	might_sleep();  /* Check for RCU read-side critical section. */
95f0c1de3   Paul E. McKenney   rcu: Disable pree...
3160
3161
3162
3163
  	preempt_disable();
  	ret = num_online_cpus() <= 1;
  	preempt_enable();
  	return ret;
6d8133919   Paul E. McKenney   rcu: Document why...
3164
  }
6ebb237be   Paul E. McKenney   rcu: Re-arrange c...
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
  /**
   * synchronize_sched - wait until an rcu-sched grace period has elapsed.
   *
   * Control will return to the caller some time after a full rcu-sched
   * grace period has elapsed, in other words after all currently executing
   * rcu-sched read-side critical sections have completed.   These read-side
   * critical sections are delimited by rcu_read_lock_sched() and
   * rcu_read_unlock_sched(), and may be nested.  Note that preempt_disable(),
   * local_irq_disable(), and so on may be used in place of
   * rcu_read_lock_sched().
   *
   * This means that all preempt_disable code sequences, including NMI and
f0a0e6f28   Paul E. McKenney   rcu: Clarify memo...
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
   * non-threaded hardware-interrupt handlers, in progress on entry will
   * have completed before this primitive returns.  However, this does not
   * guarantee that softirq handlers will have completed, since in some
   * kernels, these handlers can run in process context, and can block.
   *
   * Note that this guarantee implies further memory-ordering guarantees.
   * On systems with more than one CPU, when synchronize_sched() returns,
   * each CPU is guaranteed to have executed a full memory barrier since the
   * end of its last RCU-sched read-side critical section whose beginning
   * preceded the call to synchronize_sched().  In addition, each CPU having
   * an RCU read-side critical section that extends beyond the return from
   * synchronize_sched() is guaranteed to have executed a full memory barrier
   * after the beginning of synchronize_sched() and before the beginning of
   * that RCU read-side critical section.  Note that these guarantees include
   * CPUs that are offline, idle, or executing in user mode, as well as CPUs
   * that are executing in the kernel.
   *
   * Furthermore, if CPU A invoked synchronize_sched(), which returned
   * to its caller on CPU B, then both CPU A and CPU B are guaranteed
   * to have executed a full memory barrier during the execution of
   * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but
   * again only if the system has more than one CPU).
6ebb237be   Paul E. McKenney   rcu: Re-arrange c...
3199
3200
3201
3202
3203
3204
3205
3206
3207
   *
   * This primitive provides the guarantees made by the (now removed)
   * synchronize_kernel() API.  In contrast, synchronize_rcu() only
   * guarantees that rcu_read_lock() sections will have completed.
   * In "classic RCU", these two guarantees happen to be one and
   * the same, but can differ in realtime RCU implementations.
   */
  void synchronize_sched(void)
  {
f78f5b90c   Paul E. McKenney   rcu: Rename rcu_l...
3208
3209
3210
3211
  	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
  			 lock_is_held(&rcu_lock_map) ||
  			 lock_is_held(&rcu_sched_lock_map),
  			 "Illegal synchronize_sched() in RCU-sched read-side critical section");
6ebb237be   Paul E. McKenney   rcu: Re-arrange c...
3212
3213
  	if (rcu_blocking_is_gp())
  		return;
5afff48bd   Paul E. McKenney   rcu: Update from ...
3214
  	if (rcu_gp_is_expedited())
3705b88db   Antti P Miettinen   rcu: Add a module...
3215
3216
3217
  		synchronize_sched_expedited();
  	else
  		wait_rcu_gp(call_rcu_sched);
6ebb237be   Paul E. McKenney   rcu: Re-arrange c...
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
  }
  EXPORT_SYMBOL_GPL(synchronize_sched);
  
  /**
   * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
   *
   * Control will return to the caller some time after a full rcu_bh grace
   * period has elapsed, in other words after all currently executing rcu_bh
   * read-side critical sections have completed.  RCU read-side critical
   * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
   * and may be nested.
f0a0e6f28   Paul E. McKenney   rcu: Clarify memo...
3229
3230
3231
   *
   * See the description of synchronize_sched() for more detailed information
   * on memory ordering guarantees.
6ebb237be   Paul E. McKenney   rcu: Re-arrange c...
3232
3233
3234
   */
  void synchronize_rcu_bh(void)
  {
f78f5b90c   Paul E. McKenney   rcu: Rename rcu_l...
3235
3236
3237
3238
  	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
  			 lock_is_held(&rcu_lock_map) ||
  			 lock_is_held(&rcu_sched_lock_map),
  			 "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
6ebb237be   Paul E. McKenney   rcu: Re-arrange c...
3239
3240
  	if (rcu_blocking_is_gp())
  		return;
5afff48bd   Paul E. McKenney   rcu: Update from ...
3241
  	if (rcu_gp_is_expedited())
3705b88db   Antti P Miettinen   rcu: Add a module...
3242
3243
3244
  		synchronize_rcu_bh_expedited();
  	else
  		wait_rcu_gp(call_rcu_bh);
6ebb237be   Paul E. McKenney   rcu: Re-arrange c...
3245
3246
  }
  EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
765a3f4fe   Paul E. McKenney   rcu: Provide grac...
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
  /**
   * get_state_synchronize_rcu - Snapshot current RCU state
   *
   * Returns a cookie that is used by a later call to cond_synchronize_rcu()
   * to determine whether or not a full grace period has elapsed in the
   * meantime.
   */
  unsigned long get_state_synchronize_rcu(void)
  {
  	/*
  	 * Any prior manipulation of RCU-protected data must happen
  	 * before the load from ->gpnum.
  	 */
  	smp_mb();  /* ^^^ */
  
  	/*
  	 * Make sure this load happens before the purportedly
  	 * time-consuming work between get_state_synchronize_rcu()
  	 * and cond_synchronize_rcu().
  	 */
e534165bb   Uma Sharma   rcu: Variable nam...
3267
  	return smp_load_acquire(&rcu_state_p->gpnum);
765a3f4fe   Paul E. McKenney   rcu: Provide grac...
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
  }
  EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
  
  /**
   * cond_synchronize_rcu - Conditionally wait for an RCU grace period
   *
   * @oldstate: return value from earlier call to get_state_synchronize_rcu()
   *
   * If a full RCU grace period has elapsed since the earlier call to
   * get_state_synchronize_rcu(), just return.  Otherwise, invoke
   * synchronize_rcu() to wait for a full grace period.
   *
   * Yes, this function does not take counter wrap into account.  But
   * counter wrap is harmless.  If the counter wraps, we have waited for
   * more than 2 billion grace periods (and way more on a 64-bit system!),
   * so waiting for one additional grace period should be just fine.
   */
  void cond_synchronize_rcu(unsigned long oldstate)
  {
  	unsigned long newstate;
  
  	/*
  	 * Ensure that this load happens before any RCU-destructive
  	 * actions the caller might carry out after we return.
  	 */
e534165bb   Uma Sharma   rcu: Variable nam...
3293
  	newstate = smp_load_acquire(&rcu_state_p->completed);
765a3f4fe   Paul E. McKenney   rcu: Provide grac...
3294
3295
3296
3297
  	if (ULONG_CMP_GE(oldstate, newstate))
  		synchronize_rcu();
  }
  EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
24560056d   Paul E. McKenney   rcu: Add RCU-sche...
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
  /**
   * get_state_synchronize_sched - Snapshot current RCU-sched state
   *
   * Returns a cookie that is used by a later call to cond_synchronize_sched()
   * to determine whether or not a full grace period has elapsed in the
   * meantime.
   */
  unsigned long get_state_synchronize_sched(void)
  {
  	/*
  	 * Any prior manipulation of RCU-protected data must happen
  	 * before the load from ->gpnum.
  	 */
  	smp_mb();  /* ^^^ */
  
  	/*
  	 * Make sure this load happens before the purportedly
  	 * time-consuming work between get_state_synchronize_sched()
  	 * and cond_synchronize_sched().
  	 */
  	return smp_load_acquire(&rcu_sched_state.gpnum);
  }
  EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
  
  /**
   * cond_synchronize_sched - Conditionally wait for an RCU-sched grace period
   *
   * @oldstate: return value from earlier call to get_state_synchronize_sched()
   *
   * If a full RCU-sched grace period has elapsed since the earlier call to
   * get_state_synchronize_sched(), just return.  Otherwise, invoke
   * synchronize_sched() to wait for a full grace period.
   *
   * Yes, this function does not take counter wrap into account.  But
   * counter wrap is harmless.  If the counter wraps, we have waited for
   * more than 2 billion grace periods (and way more on a 64-bit system!),
   * so waiting for one additional grace period should be just fine.
   */
  void cond_synchronize_sched(unsigned long oldstate)
  {
  	unsigned long newstate;
  
  	/*
  	 * Ensure that this load happens before any RCU-destructive
  	 * actions the caller might carry out after we return.
  	 */
  	newstate = smp_load_acquire(&rcu_sched_state.completed);
  	if (ULONG_CMP_GE(oldstate, newstate))
  		synchronize_sched();
  }
  EXPORT_SYMBOL_GPL(cond_synchronize_sched);
28f00767e   Paul E. McKenney   rcu: Abstract seq...
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
  /* Adjust sequence number for start of update-side operation. */
  static void rcu_seq_start(unsigned long *sp)
  {
  	WRITE_ONCE(*sp, *sp + 1);
  	smp_mb(); /* Ensure update-side operation after counter increment. */
  	WARN_ON_ONCE(!(*sp & 0x1));
  }
  
  /* Adjust sequence number for end of update-side operation. */
  static void rcu_seq_end(unsigned long *sp)
  {
  	smp_mb(); /* Ensure update-side operation before counter increment. */
  	WRITE_ONCE(*sp, *sp + 1);
  	WARN_ON_ONCE(*sp & 0x1);
  }
  
  /* Take a snapshot of the update side's sequence number. */
  static unsigned long rcu_seq_snap(unsigned long *sp)
  {
  	unsigned long s;
28f00767e   Paul E. McKenney   rcu: Abstract seq...
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
  	s = (READ_ONCE(*sp) + 3) & ~0x1;
  	smp_mb(); /* Above access must not bleed into critical section. */
  	return s;
  }
  
  /*
   * Given a snapshot from rcu_seq_snap(), determine whether or not a
   * full update-side operation has occurred.
   */
  static bool rcu_seq_done(unsigned long *sp, unsigned long s)
  {
  	return ULONG_CMP_GE(READ_ONCE(*sp), s);
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3382
3383
3384
3385
3386
3387
3388
3389
3390
  /*
   * Check to see if there is any immediate RCU-related work to be done
   * by the current CPU, for the specified type of RCU, returning 1 if so.
   * The checks are in order of increasing expense: checks that can be
   * carried out against CPU-local state are performed first.  However,
   * we must check for CPU stalls first, else we might not get a chance.
   */
  static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
  {
2f51f9884   Paul E. McKenney   rcu: Eliminate __...
3391
  	struct rcu_node *rnp = rdp->mynode;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3392
3393
3394
3395
  	rdp->n_rcu_pending++;
  
  	/* Check for CPU stalls, if enabled. */
  	check_cpu_stall(rsp, rdp);
a096932f0   Paul E. McKenney   rcu: Don't activa...
3396
3397
3398
  	/* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
  	if (rcu_nohz_full_cpu(rsp))
  		return 0;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3399
  	/* Is the RCU core waiting for a quiescent state from this CPU? */
5c51dd734   Paul E. McKenney   rcu: Prevent earl...
3400
  	if (rcu_scheduler_fully_active &&
5b74c4589   Paul E. McKenney   rcu: Make ->cpu_n...
3401
  	    rdp->core_needs_qs && rdp->cpu_no_qs.b.norm &&
5cd37193c   Paul E. McKenney   rcu: Make cond_re...
3402
  	    rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) {
97c668b8e   Paul E. McKenney   rcu: Rename qs_pe...
3403
3404
  		rdp->n_rp_core_needs_qs++;
  	} else if (rdp->core_needs_qs &&
5b74c4589   Paul E. McKenney   rcu: Make ->cpu_n...
3405
  		   (!rdp->cpu_no_qs.b.norm ||
5cd37193c   Paul E. McKenney   rcu: Make cond_re...
3406
  		    rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) {
d21670aca   Paul E. McKenney   rcu: reduce the n...
3407
  		rdp->n_rp_report_qs++;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3408
  		return 1;
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
3409
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3410
3411
  
  	/* Does this CPU have callbacks ready to invoke? */
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
3412
3413
  	if (cpu_has_callbacks_ready_to_invoke(rdp)) {
  		rdp->n_rp_cb_ready++;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3414
  		return 1;
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
3415
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3416
3417
  
  	/* Has RCU gone idle with this CPU needing another grace period? */
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
3418
3419
  	if (cpu_needs_another_gp(rsp, rdp)) {
  		rdp->n_rp_cpu_needs_gp++;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3420
  		return 1;
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
3421
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3422
3423
  
  	/* Has another RCU grace period completed?  */
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
3424
  	if (READ_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
3425
  		rdp->n_rp_gp_completed++;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3426
  		return 1;
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
3427
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3428
3429
  
  	/* Has a new RCU grace period started? */
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
3430
3431
  	if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
  	    unlikely(READ_ONCE(rdp->gpwrap))) { /* outside lock */
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
3432
  		rdp->n_rp_gp_started++;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3433
  		return 1;
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
3434
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3435

96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
3436
3437
3438
3439
3440
  	/* Does this CPU need a deferred NOCB wakeup? */
  	if (rcu_nocb_need_deferred_wakeup(rdp)) {
  		rdp->n_rp_nocb_defer_wakeup++;
  		return 1;
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3441
  	/* nothing to do */
7ba5c840e   Paul E. McKenney   rcu: Add __rcu_pe...
3442
  	rdp->n_rp_need_nothing++;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3443
3444
3445
3446
3447
3448
3449
3450
  	return 0;
  }
  
  /*
   * Check to see if there is any immediate RCU-related work to be done
   * by the current CPU, returning 1 if so.  This function is part of the
   * RCU implementation; it is -not- an exported member of the RCU API.
   */
e3950ecd5   Paul E. McKenney   rcu: Remove "cpu"...
3451
  static int rcu_pending(void)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3452
  {
6ce75a232   Paul E. McKenney   rcu: Introduce fo...
3453
3454
3455
  	struct rcu_state *rsp;
  
  	for_each_rcu_flavor(rsp)
e3950ecd5   Paul E. McKenney   rcu: Remove "cpu"...
3456
  		if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda)))
6ce75a232   Paul E. McKenney   rcu: Introduce fo...
3457
3458
  			return 1;
  	return 0;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3459
3460
3461
  }
  
  /*
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
3462
3463
3464
   * Return true if the specified CPU has any callback.  If all_lazy is
   * non-NULL, store an indication of whether all callbacks are lazy.
   * (If there are no callbacks, all of them are deemed to be lazy.)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3465
   */
82072c4fc   Nicholas Mc Guire   rcu: Change funct...
3466
  static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3467
  {
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
3468
3469
3470
  	bool al = true;
  	bool hc = false;
  	struct rcu_data *rdp;
6ce75a232   Paul E. McKenney   rcu: Introduce fo...
3471
  	struct rcu_state *rsp;
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
3472
  	for_each_rcu_flavor(rsp) {
aa6da5140   Paul E. McKenney   rcu: Remove "cpu"...
3473
  		rdp = this_cpu_ptr(rsp->rda);
69c8d28c9   Paul E. McKenney   rcu: Micro-optimi...
3474
3475
3476
3477
  		if (!rdp->nxtlist)
  			continue;
  		hc = true;
  		if (rdp->qlen != rdp->qlen_lazy || !all_lazy) {
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
3478
  			al = false;
69c8d28c9   Paul E. McKenney   rcu: Micro-optimi...
3479
3480
  			break;
  		}
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
3481
3482
3483
3484
  	}
  	if (all_lazy)
  		*all_lazy = al;
  	return hc;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3485
  }
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3486
  /*
a83eff0a8   Paul E. McKenney   rcu: Add tracing ...
3487
3488
3489
   * Helper function for _rcu_barrier() tracing.  If tracing is disabled,
   * the compiler is expected to optimize this away.
   */
e66c33d57   Steven Rostedt (Red Hat)   rcu: Add const an...
3490
  static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
a83eff0a8   Paul E. McKenney   rcu: Add tracing ...
3491
3492
3493
3494
3495
3496
3497
  			       int cpu, unsigned long done)
  {
  	trace_rcu_barrier(rsp->name, s, cpu,
  			  atomic_read(&rsp->barrier_cpu_count), done);
  }
  
  /*
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3498
3499
3500
   * RCU callback function for _rcu_barrier().  If we are last, wake
   * up the task executing _rcu_barrier().
   */
24ebbca8e   Paul E. McKenney   rcu: Move rcu_bar...
3501
  static void rcu_barrier_callback(struct rcu_head *rhp)
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3502
  {
24ebbca8e   Paul E. McKenney   rcu: Move rcu_bar...
3503
3504
  	struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
  	struct rcu_state *rsp = rdp->rsp;
a83eff0a8   Paul E. McKenney   rcu: Add tracing ...
3505
  	if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
4f525a528   Paul E. McKenney   rcu: Apply rcu_se...
3506
  		_rcu_barrier_trace(rsp, "LastCB", -1, rsp->barrier_sequence);
7db74df88   Paul E. McKenney   rcu: Move rcu_bar...
3507
  		complete(&rsp->barrier_completion);
a83eff0a8   Paul E. McKenney   rcu: Add tracing ...
3508
  	} else {
4f525a528   Paul E. McKenney   rcu: Apply rcu_se...
3509
  		_rcu_barrier_trace(rsp, "CB", -1, rsp->barrier_sequence);
a83eff0a8   Paul E. McKenney   rcu: Add tracing ...
3510
  	}
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3511
3512
3513
3514
3515
3516
3517
  }
  
  /*
   * Called with preemption disabled, and from cross-cpu IRQ context.
   */
  static void rcu_barrier_func(void *type)
  {
037b64ed0   Paul E. McKenney   rcu: Place pointe...
3518
  	struct rcu_state *rsp = type;
fa07a58f7   Christoph Lameter   rcu: Replace __th...
3519
  	struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3520

4f525a528   Paul E. McKenney   rcu: Apply rcu_se...
3521
  	_rcu_barrier_trace(rsp, "IRQ", -1, rsp->barrier_sequence);
24ebbca8e   Paul E. McKenney   rcu: Move rcu_bar...
3522
  	atomic_inc(&rsp->barrier_cpu_count);
06668efa9   Paul E. McKenney   rcu: Move _rcu_ba...
3523
  	rsp->call(&rdp->barrier_head, rcu_barrier_callback);
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3524
  }
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3525
3526
3527
3528
  /*
   * Orchestrate the specified type of RCU barrier, waiting for all
   * RCU callbacks of the specified type to complete.
   */
037b64ed0   Paul E. McKenney   rcu: Place pointe...
3529
  static void _rcu_barrier(struct rcu_state *rsp)
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3530
  {
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3531
  	int cpu;
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3532
  	struct rcu_data *rdp;
4f525a528   Paul E. McKenney   rcu: Apply rcu_se...
3533
  	unsigned long s = rcu_seq_snap(&rsp->barrier_sequence);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3534

4f525a528   Paul E. McKenney   rcu: Apply rcu_se...
3535
  	_rcu_barrier_trace(rsp, "Begin", -1, s);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3536

e74f4c456   Paul E. McKenney   rcu: Make hot-unp...
3537
  	/* Take mutex to serialize concurrent rcu_barrier() requests. */
7be7f0be9   Paul E. McKenney   rcu: Move rcu_bar...
3538
  	mutex_lock(&rsp->barrier_mutex);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3539

4f525a528   Paul E. McKenney   rcu: Apply rcu_se...
3540
3541
3542
  	/* Did someone else do our work for us? */
  	if (rcu_seq_done(&rsp->barrier_sequence, s)) {
  		_rcu_barrier_trace(rsp, "EarlyExit", -1, rsp->barrier_sequence);
cf3a9c484   Paul E. McKenney   rcu: Increase rcu...
3543
3544
3545
3546
  		smp_mb(); /* caller's subsequent code after above check. */
  		mutex_unlock(&rsp->barrier_mutex);
  		return;
  	}
4f525a528   Paul E. McKenney   rcu: Apply rcu_se...
3547
3548
3549
  	/* Mark the start of the barrier operation. */
  	rcu_seq_start(&rsp->barrier_sequence);
  	_rcu_barrier_trace(rsp, "Inc1", -1, rsp->barrier_sequence);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3550

d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3551
  	/*
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3552
3553
  	 * Initialize the count to one rather than to zero in order to
  	 * avoid a too-soon return to zero in case of a short grace period
1331e7a1b   Paul E. McKenney   rcu: Remove _rcu_...
3554
3555
  	 * (or preemption of this task).  Exclude CPU-hotplug operations
  	 * to ensure that no offline CPU has callbacks queued.
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3556
  	 */
7db74df88   Paul E. McKenney   rcu: Move rcu_bar...
3557
  	init_completion(&rsp->barrier_completion);
24ebbca8e   Paul E. McKenney   rcu: Move rcu_bar...
3558
  	atomic_set(&rsp->barrier_cpu_count, 1);
1331e7a1b   Paul E. McKenney   rcu: Remove _rcu_...
3559
  	get_online_cpus();
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3560
3561
  
  	/*
1331e7a1b   Paul E. McKenney   rcu: Remove _rcu_...
3562
3563
3564
  	 * Force each CPU with callbacks to register a new callback.
  	 * When that callback is invoked, we will know that all of the
  	 * corresponding CPU's preceding callbacks have been invoked.
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3565
  	 */
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
3566
  	for_each_possible_cpu(cpu) {
d1e43fa5f   Frederic Weisbecker   nohz: Ensure full...
3567
  		if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
3568
  			continue;
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3569
  		rdp = per_cpu_ptr(rsp->rda, cpu);
d1e43fa5f   Frederic Weisbecker   nohz: Ensure full...
3570
  		if (rcu_is_nocb_cpu(cpu)) {
d7e299339   Paul E. McKenney   rcu: Make rcu_bar...
3571
3572
  			if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
  				_rcu_barrier_trace(rsp, "OfflineNoCB", cpu,
4f525a528   Paul E. McKenney   rcu: Apply rcu_se...
3573
  						   rsp->barrier_sequence);
d7e299339   Paul E. McKenney   rcu: Make rcu_bar...
3574
3575
  			} else {
  				_rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
4f525a528   Paul E. McKenney   rcu: Apply rcu_se...
3576
  						   rsp->barrier_sequence);
41050a009   Paul E. McKenney   rcu: Fix rcu_barr...
3577
  				smp_mb__before_atomic();
d7e299339   Paul E. McKenney   rcu: Make rcu_bar...
3578
3579
3580
3581
  				atomic_inc(&rsp->barrier_cpu_count);
  				__call_rcu(&rdp->barrier_head,
  					   rcu_barrier_callback, rsp, cpu, 0);
  			}
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
3582
  		} else if (READ_ONCE(rdp->qlen)) {
a83eff0a8   Paul E. McKenney   rcu: Add tracing ...
3583
  			_rcu_barrier_trace(rsp, "OnlineQ", cpu,
4f525a528   Paul E. McKenney   rcu: Apply rcu_se...
3584
  					   rsp->barrier_sequence);
037b64ed0   Paul E. McKenney   rcu: Place pointe...
3585
  			smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3586
  		} else {
a83eff0a8   Paul E. McKenney   rcu: Add tracing ...
3587
  			_rcu_barrier_trace(rsp, "OnlineNQ", cpu,
4f525a528   Paul E. McKenney   rcu: Apply rcu_se...
3588
  					   rsp->barrier_sequence);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3589
3590
  		}
  	}
1331e7a1b   Paul E. McKenney   rcu: Remove _rcu_...
3591
  	put_online_cpus();
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3592
3593
3594
3595
3596
  
  	/*
  	 * Now that we have an rcu_barrier_callback() callback on each
  	 * CPU, and thus each counted, remove the initial count.
  	 */
24ebbca8e   Paul E. McKenney   rcu: Move rcu_bar...
3597
  	if (atomic_dec_and_test(&rsp->barrier_cpu_count))
7db74df88   Paul E. McKenney   rcu: Move rcu_bar...
3598
  		complete(&rsp->barrier_completion);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3599
3600
  
  	/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
7db74df88   Paul E. McKenney   rcu: Move rcu_bar...
3601
  	wait_for_completion(&rsp->barrier_completion);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3602

4f525a528   Paul E. McKenney   rcu: Apply rcu_se...
3603
3604
3605
  	/* Mark the end of the barrier operation. */
  	_rcu_barrier_trace(rsp, "Inc2", -1, rsp->barrier_sequence);
  	rcu_seq_end(&rsp->barrier_sequence);
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
3606
  	/* Other rcu_barrier() invocations can now safely proceed. */
7be7f0be9   Paul E. McKenney   rcu: Move rcu_bar...
3607
  	mutex_unlock(&rsp->barrier_mutex);
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3608
  }
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3609
3610
3611
3612
3613
3614
  
  /**
   * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
   */
  void rcu_barrier_bh(void)
  {
037b64ed0   Paul E. McKenney   rcu: Place pointe...
3615
  	_rcu_barrier(&rcu_bh_state);
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3616
3617
3618
3619
3620
3621
3622
3623
  }
  EXPORT_SYMBOL_GPL(rcu_barrier_bh);
  
  /**
   * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
   */
  void rcu_barrier_sched(void)
  {
037b64ed0   Paul E. McKenney   rcu: Place pointe...
3624
  	_rcu_barrier(&rcu_sched_state);
d0ec774cb   Paul E. McKenney   rcu: Move rcu_bar...
3625
3626
  }
  EXPORT_SYMBOL_GPL(rcu_barrier_sched);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3627
  /*
0aa04b055   Paul E. McKenney   rcu: Process offl...
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
   * Propagate ->qsinitmask bits up the rcu_node tree to account for the
   * first CPU in a given leaf rcu_node structure coming online.  The caller
   * must hold the corresponding leaf rcu_node ->lock with interrrupts
   * disabled.
   */
  static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
  {
  	long mask;
  	struct rcu_node *rnp = rnp_leaf;
  
  	for (;;) {
  		mask = rnp->grpmask;
  		rnp = rnp->parent;
  		if (rnp == NULL)
  			return;
6cf100812   Paul E. McKenney   rcu: Add transiti...
3643
  		raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
0aa04b055   Paul E. McKenney   rcu: Process offl...
3644
  		rnp->qsmaskinit |= mask;
67c583a7d   Boqun Feng   RCU: Privatize rc...
3645
  		raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
0aa04b055   Paul E. McKenney   rcu: Process offl...
3646
3647
3648
3649
  	}
  }
  
  /*
27569620c   Paul E. McKenney   rcu: Split hierar...
3650
   * Do boot-time initialization of a CPU's per-CPU RCU data.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3651
   */
27569620c   Paul E. McKenney   rcu: Split hierar...
3652
3653
  static void __init
  rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3654
3655
  {
  	unsigned long flags;
394f99a90   Lai Jiangshan   rcu: simplify the...
3656
  	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
27569620c   Paul E. McKenney   rcu: Split hierar...
3657
3658
3659
  	struct rcu_node *rnp = rcu_get_root(rsp);
  
  	/* Set up local state, ensuring consistent view of global state. */
6cf100812   Paul E. McKenney   rcu: Add transiti...
3660
  	raw_spin_lock_irqsave_rcu_node(rnp, flags);
bc75e9998   Mark Rutland   rcu: Correctly ha...
3661
  	rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
27569620c   Paul E. McKenney   rcu: Split hierar...
3662
  	rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
29e37d814   Paul E. McKenney   rcu: Allow nestin...
3663
  	WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
3664
  	WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
27569620c   Paul E. McKenney   rcu: Split hierar...
3665
  	rdp->cpu = cpu;
d4c08f2ac   Paul E. McKenney   rcu: Add grace-pe...
3666
  	rdp->rsp = rsp;
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
3667
  	rcu_boot_init_nocb_percpu_data(rdp);
67c583a7d   Boqun Feng   RCU: Privatize rc...
3668
  	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
27569620c   Paul E. McKenney   rcu: Split hierar...
3669
3670
3671
3672
3673
3674
3675
  }
  
  /*
   * Initialize a CPU's per-CPU RCU data.  Note that only one online or
   * offline event can be happening at a given time.  Note also that we
   * can accept some slop in the rsp->completed access due to the fact
   * that this CPU cannot possibly have any RCU callbacks in flight yet.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3676
   */
49fb4c629   Paul Gortmaker   rcu: delete __cpu...
3677
  static void
9b67122ae   Iulia Manda   rcu: Remove unuse...
3678
  rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3679
3680
  {
  	unsigned long flags;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3681
  	unsigned long mask;
394f99a90   Lai Jiangshan   rcu: simplify the...
3682
  	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3683
3684
3685
  	struct rcu_node *rnp = rcu_get_root(rsp);
  
  	/* Set up local state, ensuring consistent view of global state. */
6cf100812   Paul E. McKenney   rcu: Add transiti...
3686
  	raw_spin_lock_irqsave_rcu_node(rnp, flags);
37c72e56f   Paul E. McKenney   rcu: Prevent RCU ...
3687
3688
  	rdp->qlen_last_fqs_check = 0;
  	rdp->n_force_qs_snap = rsp->n_force_qs;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3689
  	rdp->blimit = blimit;
39c8d313c   Paul E. McKenney   rcu: Avoid clobbe...
3690
3691
  	if (!rdp->nxtlist)
  		init_callback_list(rdp);  /* Re-enable callbacks on this CPU. */
29e37d814   Paul E. McKenney   rcu: Allow nestin...
3692
  	rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
2333210b2   Paul E. McKenney   nohz_full: Add rc...
3693
  	rcu_sysidle_init_percpu_data(rdp->dynticks);
c92b131bd   Paul E. McKenney   rcu: Remove dynti...
3694
3695
  	atomic_set(&rdp->dynticks->dynticks,
  		   (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
67c583a7d   Boqun Feng   RCU: Privatize rc...
3696
  	raw_spin_unlock_rcu_node(rnp);		/* irqs remain disabled. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3697

0aa04b055   Paul E. McKenney   rcu: Process offl...
3698
3699
3700
3701
3702
  	/*
  	 * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
  	 * propagation up the rcu_node tree will happen at the beginning
  	 * of the next grace period.
  	 */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3703
3704
  	rnp = rdp->mynode;
  	mask = rdp->grpmask;
2a67e741b   Peter Zijlstra   rcu: Create trans...
3705
  	raw_spin_lock_rcu_node(rnp);		/* irqs already disabled. */
b9585e940   Paul E. McKenney   rcu: Consolidate ...
3706
3707
3708
  	if (!rdp->beenonline)
  		WRITE_ONCE(rsp->ncpus, READ_ONCE(rsp->ncpus) + 1);
  	rdp->beenonline = true;	 /* We have now been online. */
0aa04b055   Paul E. McKenney   rcu: Process offl...
3709
3710
  	rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
  	rdp->completed = rnp->completed;
5b74c4589   Paul E. McKenney   rcu: Make ->cpu_n...
3711
  	rdp->cpu_no_qs.b.norm = true;
a738eec6c   Paul E. McKenney   rcu: Correctly in...
3712
  	rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu);
97c668b8e   Paul E. McKenney   rcu: Rename qs_pe...
3713
  	rdp->core_needs_qs = false;
0aa04b055   Paul E. McKenney   rcu: Process offl...
3714
  	trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
67c583a7d   Boqun Feng   RCU: Privatize rc...
3715
  	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3716
  }
4df837425   Thomas Gleixner   rcu: Convert rcut...
3717
  int rcutree_prepare_cpu(unsigned int cpu)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3718
  {
6ce75a232   Paul E. McKenney   rcu: Introduce fo...
3719
3720
3721
  	struct rcu_state *rsp;
  
  	for_each_rcu_flavor(rsp)
9b67122ae   Iulia Manda   rcu: Remove unuse...
3722
  		rcu_init_percpu_data(cpu, rsp);
4df837425   Thomas Gleixner   rcu: Convert rcut...
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
  
  	rcu_prepare_kthreads(cpu);
  	rcu_spawn_all_nocb_kthreads(cpu);
  
  	return 0;
  }
  
  static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
  {
  	struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
  
  	rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
  }
  
  int rcutree_online_cpu(unsigned int cpu)
  {
  	sync_sched_exp_online_cleanup(cpu);
  	rcutree_affinity_setting(cpu, -1);
  	return 0;
  }
  
  int rcutree_offline_cpu(unsigned int cpu)
  {
  	rcutree_affinity_setting(cpu, cpu);
  	return 0;
  }
  
  
  int rcutree_dying_cpu(unsigned int cpu)
  {
  	struct rcu_state *rsp;
  
  	for_each_rcu_flavor(rsp)
  		rcu_cleanup_dying_cpu(rsp);
  	return 0;
  }
  
  int rcutree_dead_cpu(unsigned int cpu)
  {
  	struct rcu_state *rsp;
  
  	for_each_rcu_flavor(rsp) {
  		rcu_cleanup_dead_cpu(cpu, rsp);
  		do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
  	}
  	return 0;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3769
  }
7ec99de36   Paul E. McKenney   rcu: Provide exac...
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794
  /*
   * Mark the specified CPU as being online so that subsequent grace periods
   * (both expedited and normal) will wait on it.  Note that this means that
   * incoming CPUs are not allowed to use RCU read-side critical sections
   * until this function is called.  Failing to observe this restriction
   * will result in lockdep splats.
   */
  void rcu_cpu_starting(unsigned int cpu)
  {
  	unsigned long flags;
  	unsigned long mask;
  	struct rcu_data *rdp;
  	struct rcu_node *rnp;
  	struct rcu_state *rsp;
  
  	for_each_rcu_flavor(rsp) {
  		rdp = this_cpu_ptr(rsp->rda);
  		rnp = rdp->mynode;
  		mask = rdp->grpmask;
  		raw_spin_lock_irqsave_rcu_node(rnp, flags);
  		rnp->qsmaskinitnext |= mask;
  		rnp->expmaskinitnext |= mask;
  		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  	}
  }
27d50c7ee   Thomas Gleixner   rcu: Make CPU_DYI...
3795
3796
3797
3798
3799
  #ifdef CONFIG_HOTPLUG_CPU
  /*
   * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
   * function.  We now remove it from the rcu_node tree's ->qsmaskinit
   * bit masks.
710d60cbf   Linus Torvalds   Merge branch 'smp...
3800
3801
3802
   * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
   * function.  We now remove it from the rcu_node tree's ->qsmaskinit
   * bit masks.
27d50c7ee   Thomas Gleixner   rcu: Make CPU_DYI...
3803
3804
3805
3806
3807
3808
3809
   */
  static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
  {
  	unsigned long flags;
  	unsigned long mask;
  	struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
  	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
27d50c7ee   Thomas Gleixner   rcu: Make CPU_DYI...
3810
3811
3812
3813
  	/* Remove outgoing CPU from mask in the leaf rcu_node structure. */
  	mask = rdp->grpmask;
  	raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
  	rnp->qsmaskinitnext &= ~mask;
710d60cbf   Linus Torvalds   Merge branch 'smp...
3814
  	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
27d50c7ee   Thomas Gleixner   rcu: Make CPU_DYI...
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
  }
  
  void rcu_report_dead(unsigned int cpu)
  {
  	struct rcu_state *rsp;
  
  	/* QS for any half-done expedited RCU-sched GP. */
  	preempt_disable();
  	rcu_report_exp_rdp(&rcu_sched_state,
  			   this_cpu_ptr(rcu_sched_state.rda), true);
  	preempt_enable();
  	for_each_rcu_flavor(rsp)
  		rcu_cleanup_dying_idle_cpu(cpu, rsp);
  }
  #endif
d1d74d14e   Borislav Petkov   rcu: Expedite gra...
3830
3831
3832
3833
3834
3835
3836
  static int rcu_pm_notify(struct notifier_block *self,
  			 unsigned long action, void *hcpu)
  {
  	switch (action) {
  	case PM_HIBERNATION_PREPARE:
  	case PM_SUSPEND_PREPARE:
  		if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
5afff48bd   Paul E. McKenney   rcu: Update from ...
3837
  			rcu_expedite_gp();
d1d74d14e   Borislav Petkov   rcu: Expedite gra...
3838
3839
3840
  		break;
  	case PM_POST_HIBERNATION:
  	case PM_POST_SUSPEND:
5afff48bd   Paul E. McKenney   rcu: Update from ...
3841
3842
  		if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
  			rcu_unexpedite_gp();
d1d74d14e   Borislav Petkov   rcu: Expedite gra...
3843
3844
3845
3846
3847
3848
  		break;
  	default:
  		break;
  	}
  	return NOTIFY_OK;
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3849
  /*
9386c0b75   Paul E. McKenney   rcu: Rationalize ...
3850
   * Spawn the kthreads that handle each RCU flavor's grace periods.
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
3851
3852
3853
3854
   */
  static int __init rcu_spawn_gp_kthread(void)
  {
  	unsigned long flags;
a94844b22   Paul E. McKenney   rcu: Optionally r...
3855
  	int kthread_prio_in = kthread_prio;
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
3856
3857
  	struct rcu_node *rnp;
  	struct rcu_state *rsp;
a94844b22   Paul E. McKenney   rcu: Optionally r...
3858
  	struct sched_param sp;
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
3859
  	struct task_struct *t;
a94844b22   Paul E. McKenney   rcu: Optionally r...
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
  	/* Force priority into range. */
  	if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
  		kthread_prio = 1;
  	else if (kthread_prio < 0)
  		kthread_prio = 0;
  	else if (kthread_prio > 99)
  		kthread_prio = 99;
  	if (kthread_prio != kthread_prio_in)
  		pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d
  ",
  			 kthread_prio, kthread_prio_in);
9386c0b75   Paul E. McKenney   rcu: Rationalize ...
3871
  	rcu_scheduler_fully_active = 1;
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
3872
  	for_each_rcu_flavor(rsp) {
a94844b22   Paul E. McKenney   rcu: Optionally r...
3873
  		t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
3874
3875
  		BUG_ON(IS_ERR(t));
  		rnp = rcu_get_root(rsp);
6cf100812   Paul E. McKenney   rcu: Add transiti...
3876
  		raw_spin_lock_irqsave_rcu_node(rnp, flags);
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
3877
  		rsp->gp_kthread = t;
a94844b22   Paul E. McKenney   rcu: Optionally r...
3878
3879
3880
3881
  		if (kthread_prio) {
  			sp.sched_priority = kthread_prio;
  			sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
  		}
67c583a7d   Boqun Feng   RCU: Privatize rc...
3882
  		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
e11f13355   Peter Zijlstra   rcu: Move wakeup ...
3883
  		wake_up_process(t);
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
3884
  	}
35ce7f29a   Paul E. McKenney   rcu: Create rcuo ...
3885
  	rcu_spawn_nocb_kthreads();
9386c0b75   Paul E. McKenney   rcu: Rationalize ...
3886
  	rcu_spawn_boost_kthreads();
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
3887
3888
3889
3890
3891
  	return 0;
  }
  early_initcall(rcu_spawn_gp_kthread);
  
  /*
90687fc3c   Paul E. McKenney   rcu: Narrow early...
3892
3893
3894
3895
3896
3897
3898
3899
   * This function is invoked towards the end of the scheduler's
   * initialization process.  Before this is called, the idle task might
   * contain synchronous grace-period primitives (during which time, this idle
   * task is booting the system, and such primitives are no-ops).  After this
   * function is called, any synchronous grace-period primitives are run as
   * expedited, with the requesting task driving the grace period forward.
   * A later core_initcall() rcu_exp_runtime_mode() will switch to full
   * runtime RCU functionality.
bbad93798   Paul E. McKenney   rcu: slim down rc...
3900
3901
3902
3903
3904
   */
  void rcu_scheduler_starting(void)
  {
  	WARN_ON(num_online_cpus() != 1);
  	WARN_ON(nr_context_switches() > 0);
90687fc3c   Paul E. McKenney   rcu: Narrow early...
3905
3906
3907
  	rcu_test_sync_prims();
  	rcu_scheduler_active = RCU_SCHEDULER_INIT;
  	rcu_test_sync_prims();
bbad93798   Paul E. McKenney   rcu: slim down rc...
3908
3909
3910
  }
  
  /*
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3911
   * Compute the per-level fanout, either using the exact fanout specified
7fa270010   Paul E. McKenney   rcu: Convert CONF...
3912
   * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3913
   */
199977bff   Alexander Gordeev   rcu: Remove unnec...
3914
  static void __init rcu_init_levelspread(int *levelspread, const int *levelcnt)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3915
  {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3916
  	int i;
7fa270010   Paul E. McKenney   rcu: Convert CONF...
3917
  	if (rcu_fanout_exact) {
199977bff   Alexander Gordeev   rcu: Remove unnec...
3918
  		levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
662924057   Paul E. McKenney   rcu: Use IS_ENABL...
3919
  		for (i = rcu_num_lvls - 2; i >= 0; i--)
199977bff   Alexander Gordeev   rcu: Remove unnec...
3920
  			levelspread[i] = RCU_FANOUT;
662924057   Paul E. McKenney   rcu: Use IS_ENABL...
3921
3922
3923
3924
3925
3926
  	} else {
  		int ccur;
  		int cprv;
  
  		cprv = nr_cpu_ids;
  		for (i = rcu_num_lvls - 1; i >= 0; i--) {
199977bff   Alexander Gordeev   rcu: Remove unnec...
3927
3928
  			ccur = levelcnt[i];
  			levelspread[i] = (cprv + ccur - 1) / ccur;
662924057   Paul E. McKenney   rcu: Use IS_ENABL...
3929
3930
  			cprv = ccur;
  		}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3931
3932
  	}
  }
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3933
3934
3935
3936
  
  /*
   * Helper function for rcu_init() that initializes one rcu_state structure.
   */
a87f203e2   Paul E. McKenney   rcu: Eliminate un...
3937
  static void __init rcu_init_one(struct rcu_state *rsp)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3938
  {
cb0071023   Alexander Gordeev   rcu: Limit count ...
3939
3940
  	static const char * const buf[] = RCU_NODE_NAME_INIT;
  	static const char * const fqs[] = RCU_FQS_NAME_INIT;
3dc5dbe9a   Paul E. McKenney   rcu: Move lock_cl...
3941
3942
  	static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
  	static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
3943
  	static u8 fl_mask = 0x1;
199977bff   Alexander Gordeev   rcu: Remove unnec...
3944
3945
3946
  
  	int levelcnt[RCU_NUM_LVLS];		/* # nodes in each level. */
  	int levelspread[RCU_NUM_LVLS];		/* kids/node in each level. */
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3947
3948
3949
3950
  	int cpustride = 1;
  	int i;
  	int j;
  	struct rcu_node *rnp;
05b84aec4   Alexander Gordeev   rcu: Limit rcu_ca...
3951
  	BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
b6407e863   Paul E. McKenney   rcu: Give differe...
3952

3eaaaf6cd   Paul E. McKenney   rcu: Shut up spur...
3953
3954
3955
  	/* Silence gcc 4.8 false positive about array index out of range. */
  	if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
  		panic("rcu_init_one: rcu_num_lvls out of range");
4930521ae   Paul E. McKenney   rcu: Silence comp...
3956

64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3957
  	/* Initialize the level-tracking arrays. */
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
3958
  	for (i = 0; i < rcu_num_lvls; i++)
199977bff   Alexander Gordeev   rcu: Remove unnec...
3959
  		levelcnt[i] = num_rcu_lvl[i];
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
3960
  	for (i = 1; i < rcu_num_lvls; i++)
199977bff   Alexander Gordeev   rcu: Remove unnec...
3961
3962
  		rsp->level[i] = rsp->level[i - 1] + levelcnt[i - 1];
  	rcu_init_levelspread(levelspread, levelcnt);
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
3963
3964
  	rsp->flavor_mask = fl_mask;
  	fl_mask <<= 1;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3965
3966
  
  	/* Initialize the elements themselves, starting from the leaves. */
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
3967
  	for (i = rcu_num_lvls - 1; i >= 0; i--) {
199977bff   Alexander Gordeev   rcu: Remove unnec...
3968
  		cpustride *= levelspread[i];
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3969
  		rnp = rsp->level[i];
199977bff   Alexander Gordeev   rcu: Remove unnec...
3970
  		for (j = 0; j < levelcnt[i]; j++, rnp++) {
67c583a7d   Boqun Feng   RCU: Privatize rc...
3971
3972
  			raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
  			lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
b6407e863   Paul E. McKenney   rcu: Give differe...
3973
  						   &rcu_node_class[i], buf[i]);
394f2769a   Paul E. McKenney   rcu: Prevent forc...
3974
3975
3976
  			raw_spin_lock_init(&rnp->fqslock);
  			lockdep_set_class_and_name(&rnp->fqslock,
  						   &rcu_fqs_class[i], fqs[i]);
25d30cf42   Paul E. McKenney   rcu: Adjust for u...
3977
3978
  			rnp->gpnum = rsp->gpnum;
  			rnp->completed = rsp->completed;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3979
3980
3981
3982
  			rnp->qsmask = 0;
  			rnp->qsmaskinit = 0;
  			rnp->grplo = j * cpustride;
  			rnp->grphi = (j + 1) * cpustride - 1;
595f3900f   Himangi Saraogi   rcu: Replace NR_C...
3983
3984
  			if (rnp->grphi >= nr_cpu_ids)
  				rnp->grphi = nr_cpu_ids - 1;
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3985
3986
3987
3988
3989
  			if (i == 0) {
  				rnp->grpnum = 0;
  				rnp->grpmask = 0;
  				rnp->parent = NULL;
  			} else {
199977bff   Alexander Gordeev   rcu: Remove unnec...
3990
  				rnp->grpnum = j % levelspread[i - 1];
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3991
3992
  				rnp->grpmask = 1UL << rnp->grpnum;
  				rnp->parent = rsp->level[i - 1] +
199977bff   Alexander Gordeev   rcu: Remove unnec...
3993
  					      j / levelspread[i - 1];
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
3994
3995
  			}
  			rnp->level = i;
12f5f524c   Paul E. McKenney   rcu: merge TREE_P...
3996
  			INIT_LIST_HEAD(&rnp->blkd_tasks);
dae6e64d2   Paul E. McKenney   rcu: Introduce pr...
3997
  			rcu_init_one_nocb(rnp);
f6a12f34a   Paul E. McKenney   rcu: Enforce expe...
3998
3999
  			init_waitqueue_head(&rnp->exp_wq[0]);
  			init_waitqueue_head(&rnp->exp_wq[1]);
3b5f668e7   Paul E. McKenney   rcu: Overlap wake...
4000
4001
  			init_waitqueue_head(&rnp->exp_wq[2]);
  			init_waitqueue_head(&rnp->exp_wq[3]);
f6a12f34a   Paul E. McKenney   rcu: Enforce expe...
4002
  			spin_lock_init(&rnp->exp_lock);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
4003
4004
  		}
  	}
0c34029ab   Lai Jiangshan   rcu: move some co...
4005

abedf8e24   Paul Gortmaker   rcu: Use simple w...
4006
4007
  	init_swait_queue_head(&rsp->gp_wq);
  	init_swait_queue_head(&rsp->expedited_wq);
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4008
  	rnp = rsp->level[rcu_num_lvls - 1];
0c34029ab   Lai Jiangshan   rcu: move some co...
4009
  	for_each_possible_cpu(i) {
4a90a0681   Paul E. McKenney   rcu: permit disco...
4010
  		while (i > rnp->grphi)
0c34029ab   Lai Jiangshan   rcu: move some co...
4011
  			rnp++;
394f99a90   Lai Jiangshan   rcu: simplify the...
4012
  		per_cpu_ptr(rsp->rda, i)->mynode = rnp;
0c34029ab   Lai Jiangshan   rcu: move some co...
4013
4014
  		rcu_boot_init_percpu_data(i, rsp);
  	}
6ce75a232   Paul E. McKenney   rcu: Introduce fo...
4015
  	list_add(&rsp->flavors, &rcu_struct_flavors);
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
4016
  }
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4017
4018
  /*
   * Compute the rcu_node tree geometry from kernel parameters.  This cannot
4102adab9   Paul E. McKenney   rcu: Move RCU-rel...
4019
   * replace the definitions in tree.h because those are needed to size
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4020
4021
4022
4023
   * the ->node array in the rcu_state structure.
   */
  static void __init rcu_init_geometry(void)
  {
026ad2835   Paul E. McKenney   rcu: Drive quiesc...
4024
  	ulong d;
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4025
  	int i;
05b84aec4   Alexander Gordeev   rcu: Limit rcu_ca...
4026
  	int rcu_capacity[RCU_NUM_LVLS];
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4027

026ad2835   Paul E. McKenney   rcu: Drive quiesc...
4028
4029
4030
4031
4032
4033
4034
4035
4036
4037
4038
4039
  	/*
  	 * Initialize any unspecified boot parameters.
  	 * The default values of jiffies_till_first_fqs and
  	 * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
  	 * value, which is a function of HZ, then adding one for each
  	 * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
  	 */
  	d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
  	if (jiffies_till_first_fqs == ULONG_MAX)
  		jiffies_till_first_fqs = d;
  	if (jiffies_till_next_fqs == ULONG_MAX)
  		jiffies_till_next_fqs = d;
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4040
  	/* If the compile-time values are accurate, just leave. */
47d631af5   Paul E. McKenney   rcu: Make RCU abl...
4041
  	if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
b17c7035f   Paul E. McKenney   rcu: Shrink RCU b...
4042
  	    nr_cpu_ids == NR_CPUS)
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4043
  		return;
394790981   Paul E. McKenney   rcu: Let the worl...
4044
4045
4046
  	pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%d
  ",
  		rcu_fanout_leaf, nr_cpu_ids);
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4047
4048
  
  	/*
ee968ac61   Paul E. McKenney   rcu: Eliminate pa...
4049
4050
4051
4052
  	 * The boot-time rcu_fanout_leaf parameter must be at least two
  	 * and cannot exceed the number of bits in the rcu_node masks.
  	 * Complain and fall back to the compile-time values if this
  	 * limit is exceeded.
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4053
  	 */
ee968ac61   Paul E. McKenney   rcu: Eliminate pa...
4054
  	if (rcu_fanout_leaf < 2 ||
75cf15a4c   Alexander Gordeev   rcu: Panic if RCU...
4055
  	    rcu_fanout_leaf > sizeof(unsigned long) * 8) {
13bd64947   Paul E. McKenney   rcu: Reset rcu_fa...
4056
  		rcu_fanout_leaf = RCU_FANOUT_LEAF;
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4057
4058
4059
  		WARN_ON(1);
  		return;
  	}
75cf15a4c   Alexander Gordeev   rcu: Panic if RCU...
4060
  	/*
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4061
  	 * Compute number of nodes that can be handled an rcu_node tree
9618138b0   Alexander Gordeev   rcu: Simplify rcu...
4062
  	 * with the given number of levels.
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4063
  	 */
9618138b0   Alexander Gordeev   rcu: Simplify rcu...
4064
  	rcu_capacity[0] = rcu_fanout_leaf;
05b84aec4   Alexander Gordeev   rcu: Limit rcu_ca...
4065
  	for (i = 1; i < RCU_NUM_LVLS; i++)
05c5df31a   Paul E. McKenney   rcu: Make RCU abl...
4066
  		rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4067
4068
  
  	/*
75cf15a4c   Alexander Gordeev   rcu: Panic if RCU...
4069
  	 * The tree must be able to accommodate the configured number of CPUs.
ee968ac61   Paul E. McKenney   rcu: Eliminate pa...
4070
  	 * If this limit is exceeded, fall back to the compile-time values.
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4071
  	 */
ee968ac61   Paul E. McKenney   rcu: Eliminate pa...
4072
4073
4074
4075
4076
  	if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
  		rcu_fanout_leaf = RCU_FANOUT_LEAF;
  		WARN_ON(1);
  		return;
  	}
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4077

679f9858b   Alexander Gordeev   rcu: Cleanup rcu_...
4078
  	/* Calculate the number of levels in the tree. */
9618138b0   Alexander Gordeev   rcu: Simplify rcu...
4079
  	for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
679f9858b   Alexander Gordeev   rcu: Cleanup rcu_...
4080
  	}
9618138b0   Alexander Gordeev   rcu: Simplify rcu...
4081
  	rcu_num_lvls = i + 1;
679f9858b   Alexander Gordeev   rcu: Cleanup rcu_...
4082

f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4083
  	/* Calculate the number of rcu_nodes at each level of the tree. */
679f9858b   Alexander Gordeev   rcu: Cleanup rcu_...
4084
  	for (i = 0; i < rcu_num_lvls; i++) {
9618138b0   Alexander Gordeev   rcu: Simplify rcu...
4085
  		int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
679f9858b   Alexander Gordeev   rcu: Cleanup rcu_...
4086
4087
  		num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
  	}
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4088
4089
4090
  
  	/* Calculate the total number of rcu_node structures. */
  	rcu_num_nodes = 0;
679f9858b   Alexander Gordeev   rcu: Cleanup rcu_...
4091
  	for (i = 0; i < rcu_num_lvls; i++)
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4092
  		rcu_num_nodes += num_rcu_lvl[i];
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4093
  }
a3dc2948c   Paul E. McKenney   rcu: Enable diagn...
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115
4116
4117
  /*
   * Dump out the structure of the rcu_node combining tree associated
   * with the rcu_state structure referenced by rsp.
   */
  static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp)
  {
  	int level = 0;
  	struct rcu_node *rnp;
  
  	pr_info("rcu_node tree layout dump
  ");
  	pr_info(" ");
  	rcu_for_each_node_breadth_first(rsp, rnp) {
  		if (rnp->level != level) {
  			pr_cont("
  ");
  			pr_info(" ");
  			level = rnp->level;
  		}
  		pr_cont("%d:%d ^%d  ", rnp->grplo, rnp->grphi, rnp->grpnum);
  	}
  	pr_cont("
  ");
  }
9f680ab41   Paul E. McKenney   rcu: Eliminate un...
4118
  void __init rcu_init(void)
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
4119
  {
017c42613   Paul E. McKenney   rcu: Fix sparse w...
4120
  	int cpu;
9f680ab41   Paul E. McKenney   rcu: Eliminate un...
4121

476276781   Paul E. McKenney   rcu: Move early b...
4122
  	rcu_early_boot_tests();
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
4123
  	rcu_bootup_announce();
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
4124
  	rcu_init_geometry();
a87f203e2   Paul E. McKenney   rcu: Eliminate un...
4125
4126
  	rcu_init_one(&rcu_bh_state);
  	rcu_init_one(&rcu_sched_state);
a3dc2948c   Paul E. McKenney   rcu: Enable diagn...
4127
4128
  	if (dump_tree)
  		rcu_dump_rcu_node_tree(&rcu_sched_state);
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
4129
  	__rcu_init_preempt();
b5b393601   Jiang Fang   rcu: Fix spacing ...
4130
  	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
9f680ab41   Paul E. McKenney   rcu: Eliminate un...
4131
4132
4133
4134
4135
4136
  
  	/*
  	 * We don't need protection against CPU-hotplug here because
  	 * this is called early in boot, before either interrupts
  	 * or the scheduler are operational.
  	 */
d1d74d14e   Borislav Petkov   rcu: Expedite gra...
4137
  	pm_notifier(rcu_pm_notify, 0);
7ec99de36   Paul E. McKenney   rcu: Provide exac...
4138
  	for_each_online_cpu(cpu) {
4df837425   Thomas Gleixner   rcu: Convert rcut...
4139
  		rcutree_prepare_cpu(cpu);
7ec99de36   Paul E. McKenney   rcu: Provide exac...
4140
4141
  		rcu_cpu_starting(cpu);
  	}
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
4142
  }
3549c2bc2   Paul E. McKenney   rcu: Move expedit...
4143
  #include "tree_exp.h"
4102adab9   Paul E. McKenney   rcu: Move RCU-rel...
4144
  #include "tree_plugin.h"