Blame view

kernel/rcu/tree_plugin.h 80.9 KB
22e409253   Paul E. McKenney   rcu/tree: Convert...
1
  /* SPDX-License-Identifier: GPL-2.0+ */
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
2
3
4
  /*
   * Read-Copy Update mechanism for mutual exclusion (tree-based version)
   * Internal non-public definitions that provide either classic
6cc68793e   Paul E. McKenney   rcu: fix spelling
5
   * or preemptible semantics.
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
6
   *
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
7
8
9
10
   * Copyright Red Hat, 2009
   * Copyright IBM Corporation, 2009
   *
   * Author: Ingo Molnar <mingo@elte.hu>
22e409253   Paul E. McKenney   rcu/tree: Convert...
11
   *	   Paul E. McKenney <paulmck@linux.ibm.com>
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
12
   */
abaa93d9e   Paul E. McKenney   rcu: Simplify pri...
13
  #include "../locking/rtmutex_common.h"
5b61b0baa   Mike Galbraith   rcu: Wire up RCU_...
14

3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
15
16
  #ifdef CONFIG_RCU_NOCB_CPU
  static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
1b0048a44   Paul Gortmaker   rcu: Make rcu_noc...
17
  static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
18
  #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
26845c286   Paul E. McKenney   rcu: print boot-t...
19
20
  /*
   * Check the RCU kernel configuration parameters and print informative
699d40352   Paul E. McKenney   rcu: Fix obsolete...
21
   * messages about anything out of the ordinary.
26845c286   Paul E. McKenney   rcu: print boot-t...
22
23
24
   */
  static void __init rcu_bootup_announce_oddness(void)
  {
ab6f5bd67   Paul E. McKenney   rcu: Use IS_ENABL...
25
  	if (IS_ENABLED(CONFIG_RCU_TRACE))
ae91aa0ad   Paul E. McKenney   rcu: Remove debug...
26
27
  		pr_info("\tRCU event tracing is enabled.
  ");
05c5df31a   Paul E. McKenney   rcu: Make RCU abl...
28
29
  	if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
  	    (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
a7538352d   Joe Perches   rcu: Use pr_fmt t...
30
31
32
  		pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.
  ",
  			RCU_FANOUT);
7fa270010   Paul E. McKenney   rcu: Convert CONF...
33
  	if (rcu_fanout_exact)
ab6f5bd67   Paul E. McKenney   rcu: Use IS_ENABL...
34
35
36
37
38
  		pr_info("\tHierarchical RCU autobalancing is disabled.
  ");
  	if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ))
  		pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.
  ");
c4a09ff75   Paul E. McKenney   rcu: Remove the n...
39
  	if (IS_ENABLED(CONFIG_PROVE_RCU))
ab6f5bd67   Paul E. McKenney   rcu: Use IS_ENABL...
40
41
  		pr_info("\tRCU lockdep checking is enabled.
  ");
8cbd0e38a   Paul E. McKenney   rcu: Add Kconfig ...
42
43
44
  	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
  		pr_info("\tRCU strict (and thus non-scalable) grace periods enabled.
  ");
426216970   Alexander Gordeev   rcu: Simplify ari...
45
46
47
  	if (RCU_NUM_LVLS >= 4)
  		pr_info("\tFour(or more)-level hierarchy is enabled.
  ");
47d631af5   Paul E. McKenney   rcu: Make RCU abl...
48
  	if (RCU_FANOUT_LEAF != 16)
a3bd2c09a   Paul E. McKenney   rcu: Add boot-up ...
49
50
  		pr_info("\tBuild-time adjustment of leaf fanout to %d.
  ",
47d631af5   Paul E. McKenney   rcu: Make RCU abl...
51
52
  			RCU_FANOUT_LEAF);
  	if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
a7538352d   Joe Perches   rcu: Use pr_fmt t...
53
54
55
  		pr_info("\tBoot-time adjustment of leaf fanout to %d.
  ",
  			rcu_fanout_leaf);
cca6f3931   Paul E. McKenney   rcu: Size rcu_nod...
56
  	if (nr_cpu_ids != NR_CPUS)
9b130ad5b   Alexey Dobriyan   treewide: make "n...
57
58
  		pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.
  ", NR_CPUS, nr_cpu_ids);
17c7798be   Paul E. McKenney   rcu: Update rcu_b...
59
  #ifdef CONFIG_RCU_BOOST
a7538352d   Joe Perches   rcu: Use pr_fmt t...
60
61
62
  	pr_info("\tRCU priority boosting: priority %d delay %d ms.
  ",
  		kthread_prio, CONFIG_RCU_BOOST_DELAY);
17c7798be   Paul E. McKenney   rcu: Update rcu_b...
63
64
65
66
67
68
69
70
71
72
  #endif
  	if (blimit != DEFAULT_RCU_BLIMIT)
  		pr_info("\tBoot-time adjustment of callback invocation limit to %ld.
  ", blimit);
  	if (qhimark != DEFAULT_RCU_QHIMARK)
  		pr_info("\tBoot-time adjustment of callback high-water mark to %ld.
  ", qhimark);
  	if (qlowmark != DEFAULT_RCU_QLOMARK)
  		pr_info("\tBoot-time adjustment of callback low-water mark to %ld.
  ", qlowmark);
b2b00ddf1   Paul E. McKenney   rcu: React to cal...
73
  	if (qovld != DEFAULT_RCU_QOVLD)
aa96a93ba   Colin Ian King   rcu: Fix spelling...
74
75
  		pr_info("\tBoot-time adjustment of callback overload level to %ld.
  ", qovld);
17c7798be   Paul E. McKenney   rcu: Update rcu_b...
76
77
78
79
80
81
  	if (jiffies_till_first_fqs != ULONG_MAX)
  		pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.
  ", jiffies_till_first_fqs);
  	if (jiffies_till_next_fqs != ULONG_MAX)
  		pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.
  ", jiffies_till_next_fqs);
c06aed0e3   Paul E. McKenney   rcu: Compute jiff...
82
83
84
  	if (jiffies_till_sched_qs != ULONG_MAX)
  		pr_info("\tBoot-time adjustment of scheduler-enlistment delay to %ld jiffies.
  ", jiffies_till_sched_qs);
17c7798be   Paul E. McKenney   rcu: Update rcu_b...
85
86
87
88
89
90
  	if (rcu_kick_kthreads)
  		pr_info("\tKick kthreads if too-long grace period.
  ");
  	if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD))
  		pr_info("\tRCU callback double-/use-after-free debug enabled.
  ");
90040c9e3   Paul E. McKenney   rcu: Remove *_SLO...
91
  	if (gp_preinit_delay)
17c7798be   Paul E. McKenney   rcu: Update rcu_b...
92
93
  		pr_info("\tRCU debug GP pre-init slowdown %d jiffies.
  ", gp_preinit_delay);
90040c9e3   Paul E. McKenney   rcu: Remove *_SLO...
94
  	if (gp_init_delay)
17c7798be   Paul E. McKenney   rcu: Update rcu_b...
95
96
  		pr_info("\tRCU debug GP init slowdown %d jiffies.
  ", gp_init_delay);
90040c9e3   Paul E. McKenney   rcu: Remove *_SLO...
97
  	if (gp_cleanup_delay)
17c7798be   Paul E. McKenney   rcu: Update rcu_b...
98
99
  		pr_info("\tRCU debug GP init slowdown %d jiffies.
  ", gp_cleanup_delay);
48d07c04b   Sebastian Andrzej Siewior   rcu: Enable elimi...
100
101
102
  	if (!use_softirq)
  		pr_info("\tRCU_SOFTIRQ processing moved to rcuc kthreads.
  ");
17c7798be   Paul E. McKenney   rcu: Update rcu_b...
103
104
105
  	if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG))
  		pr_info("\tRCU debug extended QS entry/exit.
  ");
59d80fd83   Paul E. McKenney   rcu: Print out rc...
106
  	rcupdate_announce_bootup_oddness();
26845c286   Paul E. McKenney   rcu: print boot-t...
107
  }
28f6569ab   Pranith Kumar   rcu: Remove redun...
108
  #ifdef CONFIG_PREEMPT_RCU
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
109

63d4c8c97   Paul E. McKenney   rcu: Remove rsp p...
110
  static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake);
3949fa9ba   Paul E. McKenney   rcu: Make rcu_rea...
111
  static void rcu_read_unlock_special(struct task_struct *t);
d9a3da069   Paul E. McKenney   rcu: Add expedite...
112

f41d911f8   Paul E. McKenney   rcu: Merge preemp...
113
114
115
  /*
   * Tell them what RCU they are running.
   */
0e0fc1c23   Paul E. McKenney   rcu: Mark init-ti...
116
  static void __init rcu_bootup_announce(void)
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
117
  {
efc151c33   Paul E. McKenney   rcu: Convert rcut...
118
119
  	pr_info("Preemptible hierarchical RCU implementation.
  ");
26845c286   Paul E. McKenney   rcu: print boot-t...
120
  	rcu_bootup_announce_oddness();
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
121
  }
8203d6d0e   Paul E. McKenney   rcu: Use single-s...
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
  /* Flags for rcu_preempt_ctxt_queue() decision table. */
  #define RCU_GP_TASKS	0x8
  #define RCU_EXP_TASKS	0x4
  #define RCU_GP_BLKD	0x2
  #define RCU_EXP_BLKD	0x1
  
  /*
   * Queues a task preempted within an RCU-preempt read-side critical
   * section into the appropriate location within the ->blkd_tasks list,
   * depending on the states of any ongoing normal and expedited grace
   * periods.  The ->gp_tasks pointer indicates which element the normal
   * grace period is waiting on (NULL if none), and the ->exp_tasks pointer
   * indicates which element the expedited grace period is waiting on (again,
   * NULL if none).  If a grace period is waiting on a given element in the
   * ->blkd_tasks list, it also waits on all subsequent elements.  Thus,
   * adding a task to the tail of the list blocks any grace period that is
   * already waiting on one of the elements.  In contrast, adding a task
   * to the head of the list won't block any grace period that is already
   * waiting on one of the elements.
   *
   * This queuing is imprecise, and can sometimes make an ongoing grace
   * period wait for a task that is not strictly speaking blocking it.
   * Given the choice, we needlessly block a normal grace period rather than
   * blocking an expedited grace period.
   *
   * Note that an endless sequence of expedited grace periods still cannot
   * indefinitely postpone a normal grace period.  Eventually, all of the
   * fixed number of preempted tasks blocking the normal grace period that are
   * not also blocking the expedited grace period will resume and complete
   * their RCU read-side critical sections.  At that point, the ->gp_tasks
   * pointer will equal the ->exp_tasks pointer, at which point the end of
   * the corresponding expedited grace period will also be the end of the
   * normal grace period.
   */
46a5d164d   Paul E. McKenney   rcu: Stop disabli...
156
157
  static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
  	__releases(rnp->lock) /* But leaves rrupts disabled. */
8203d6d0e   Paul E. McKenney   rcu: Use single-s...
158
159
160
161
162
163
  {
  	int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
  			 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
  			 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) +
  			 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0);
  	struct task_struct *t = current;
a32e01ee6   Matthew Wilcox   rcu: Use wrapper ...
164
  	raw_lockdep_assert_held_rcu_node(rnp);
2dee9404f   Paul E. McKenney   rcu: Add assertio...
165
  	WARN_ON_ONCE(rdp->mynode != rnp);
5b4c11d54   Paul E. McKenney   rcu: Add leaf-nod...
166
  	WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
1f3e5f51b   Paul E. McKenney   rcu: Add RCU-pree...
167
168
169
  	/* RCU better not be waiting on newly onlined CPUs! */
  	WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask &
  		     rdp->grpmask);
ea9b0c8a2   Paul E. McKenney   rcu: Add lockdep_...
170

8203d6d0e   Paul E. McKenney   rcu: Use single-s...
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
  	/*
  	 * Decide where to queue the newly blocked task.  In theory,
  	 * this could be an if-statement.  In practice, when I tried
  	 * that, it was quite messy.
  	 */
  	switch (blkd_state) {
  	case 0:
  	case                RCU_EXP_TASKS:
  	case                RCU_EXP_TASKS + RCU_GP_BLKD:
  	case RCU_GP_TASKS:
  	case RCU_GP_TASKS + RCU_EXP_TASKS:
  
  		/*
  		 * Blocking neither GP, or first task blocking the normal
  		 * GP but not blocking the already-waiting expedited GP.
  		 * Queue at the head of the list to avoid unnecessarily
  		 * blocking the already-waiting GPs.
  		 */
  		list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
  		break;
  
  	case                                              RCU_EXP_BLKD:
  	case                                RCU_GP_BLKD:
  	case                                RCU_GP_BLKD + RCU_EXP_BLKD:
  	case RCU_GP_TASKS +                               RCU_EXP_BLKD:
  	case RCU_GP_TASKS +                 RCU_GP_BLKD + RCU_EXP_BLKD:
  	case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
  
  		/*
  		 * First task arriving that blocks either GP, or first task
  		 * arriving that blocks the expedited GP (with the normal
  		 * GP already waiting), or a task arriving that blocks
  		 * both GPs with both GPs already waiting.  Queue at the
  		 * tail of the list to avoid any GP waiting on any of the
  		 * already queued tasks that are not blocking it.
  		 */
  		list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks);
  		break;
  
  	case                RCU_EXP_TASKS +               RCU_EXP_BLKD:
  	case                RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
  	case RCU_GP_TASKS + RCU_EXP_TASKS +               RCU_EXP_BLKD:
  
  		/*
  		 * Second or subsequent task blocking the expedited GP.
  		 * The task either does not block the normal GP, or is the
  		 * first task blocking the normal GP.  Queue just after
  		 * the first task blocking the expedited GP.
  		 */
  		list_add(&t->rcu_node_entry, rnp->exp_tasks);
  		break;
  
  	case RCU_GP_TASKS +                 RCU_GP_BLKD:
  	case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD:
  
  		/*
  		 * Second or subsequent task blocking the normal GP.
  		 * The task does not block the expedited GP. Queue just
  		 * after the first task blocking the normal GP.
  		 */
  		list_add(&t->rcu_node_entry, rnp->gp_tasks);
  		break;
  
  	default:
  
  		/* Yet another exercise in excessive paranoia. */
  		WARN_ON_ONCE(1);
  		break;
  	}
  
  	/*
  	 * We have now queued the task.  If it was the first one to
  	 * block either grace period, update the ->gp_tasks and/or
  	 * ->exp_tasks pointers, respectively, to reference the newly
  	 * blocked tasks.
  	 */
4bc8d5557   Paul E. McKenney   rcu: Add debuggin...
247
  	if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) {
6935c3983   Eric Dumazet   rcu: Avoid data-r...
248
  		WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry);
d43a5d32e   Paul E. McKenney   rcu: Convert ->co...
249
  		WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq);
4bc8d5557   Paul E. McKenney   rcu: Add debuggin...
250
  	}
8203d6d0e   Paul E. McKenney   rcu: Use single-s...
251
  	if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
314eeb43e   Paul E. McKenney   rcu: Add *_ONCE()...
252
  		WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry);
2dee9404f   Paul E. McKenney   rcu: Add assertio...
253
254
255
256
  	WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) !=
  		     !(rnp->qsmask & rdp->grpmask));
  	WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) !=
  		     !(rnp->expmask & rdp->grpmask));
67c583a7d   Boqun Feng   RCU: Privatize rc...
257
  	raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
8203d6d0e   Paul E. McKenney   rcu: Use single-s...
258
259
260
261
262
263
264
  
  	/*
  	 * Report the quiescent state for the expedited GP.  This expedited
  	 * GP should not be able to end until we report, so there should be
  	 * no need to check for a subsequent expedited GP.  (Though we are
  	 * still in a quiescent state in any case.)
  	 */
1bb336443   Paul E. McKenney   rcu: Rename rcu_d...
265
  	if (blkd_state & RCU_EXP_BLKD && rdp->exp_deferred_qs)
63d4c8c97   Paul E. McKenney   rcu: Remove rsp p...
266
  		rcu_report_exp_rdp(rdp);
fcc878e4d   Paul E. McKenney   rcu: Remove now-u...
267
  	else
1bb336443   Paul E. McKenney   rcu: Rename rcu_d...
268
  		WARN_ON_ONCE(rdp->exp_deferred_qs);
8203d6d0e   Paul E. McKenney   rcu: Use single-s...
269
  }
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
270
  /*
c7037ff52   Paul E. McKenney   rcu: Clarify and ...
271
272
273
274
275
276
277
278
279
   * Record a preemptible-RCU quiescent state for the specified CPU.
   * Note that this does not necessarily mean that the task currently running
   * on the CPU is in a quiescent state:  Instead, it means that the current
   * grace period need not wait on any RCU read-side critical section that
   * starts later on this CPU.  It also means that if the current task is
   * in an RCU read-side critical section, it has already added itself to
   * some leaf rcu_node structure's ->blkd_tasks list.  In addition to the
   * current task, there might be any number of other tasks blocked while
   * in an RCU read-side critical section.
25502a6c1   Paul E. McKenney   rcu: refactor RCU...
280
   *
c7037ff52   Paul E. McKenney   rcu: Clarify and ...
281
   * Callers to this function must disable preemption.
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
282
   */
45975c7d2   Paul E. McKenney   rcu: Define RCU-s...
283
  static void rcu_qs(void)
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
284
  {
45975c7d2   Paul E. McKenney   rcu: Define RCU-s...
285
286
  	RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!
  ");
2280ee5a7   Paul E. McKenney   rcu: Remove rcu_d...
287
  	if (__this_cpu_read(rcu_data.cpu_no_qs.s)) {
284a8c93a   Paul E. McKenney   rcu: Per-CPU oper...
288
  		trace_rcu_grace_period(TPS("rcu_preempt"),
2280ee5a7   Paul E. McKenney   rcu: Remove rcu_d...
289
  				       __this_cpu_read(rcu_data.gp_seq),
284a8c93a   Paul E. McKenney   rcu: Per-CPU oper...
290
  				       TPS("cpuqs"));
2280ee5a7   Paul E. McKenney   rcu: Remove rcu_d...
291
  		__this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
c98cac603   Paul E. McKenney   rcu: Rename rcu_c...
292
  		barrier(); /* Coordinate with rcu_flavor_sched_clock_irq(). */
add0d37b4   Paul E. McKenney   rcu: Correct READ...
293
  		WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false);
284a8c93a   Paul E. McKenney   rcu: Per-CPU oper...
294
  	}
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
295
296
297
  }
  
  /*
c3422bea5   Paul E. McKenney   rcu: Simplify rcu...
298
299
300
   * We have entered the scheduler, and the current task might soon be
   * context-switched away from.  If this task is in an RCU read-side
   * critical section, we will no longer be able to rely on the CPU to
12f5f524c   Paul E. McKenney   rcu: merge TREE_P...
301
302
303
304
305
306
   * record that fact, so we enqueue the task on the blkd_tasks list.
   * The task will dequeue itself when it exits the outermost enclosing
   * RCU read-side critical section.  Therefore, the current grace period
   * cannot be permitted to complete until the blkd_tasks list entries
   * predating the current grace period drain, in other words, until
   * rnp->gp_tasks becomes NULL.
c3422bea5   Paul E. McKenney   rcu: Simplify rcu...
307
   *
46a5d164d   Paul E. McKenney   rcu: Stop disabli...
308
   * Caller must disable interrupts.
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
309
   */
45975c7d2   Paul E. McKenney   rcu: Define RCU-s...
310
  void rcu_note_context_switch(bool preempt)
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
311
312
  {
  	struct task_struct *t = current;
da1df50d1   Paul E. McKenney   rcu: Remove rcu_s...
313
  	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
314
  	struct rcu_node *rnp;
45975c7d2   Paul E. McKenney   rcu: Define RCU-s...
315
  	trace_rcu_utilization(TPS("Start context switch"));
b04db8e19   Frederic Weisbecker   rcu: Use lockdep ...
316
  	lockdep_assert_irqs_disabled();
77339e61a   Lai Jiangshan   rcu: Provide wrap...
317
318
  	WARN_ON_ONCE(!preempt && rcu_preempt_depth() > 0);
  	if (rcu_preempt_depth() > 0 &&
1d082fd06   Paul E. McKenney   rcu: Remove local...
319
  	    !t->rcu_read_unlock_special.b.blocked) {
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
320
321
  
  		/* Possibly blocking in an RCU read-side critical section. */
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
322
  		rnp = rdp->mynode;
46a5d164d   Paul E. McKenney   rcu: Stop disabli...
323
  		raw_spin_lock_rcu_node(rnp);
1d082fd06   Paul E. McKenney   rcu: Remove local...
324
  		t->rcu_read_unlock_special.b.blocked = true;
868489660   Paul E. McKenney   rcu: Changes from...
325
  		t->rcu_blocked_node = rnp;
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
326
327
  
  		/*
8203d6d0e   Paul E. McKenney   rcu: Use single-s...
328
329
330
  		 * Verify the CPU's sanity, trace the preemption, and
  		 * then queue the task as required based on the states
  		 * of any ongoing and expedited grace periods.
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
331
  		 */
0aa04b055   Paul E. McKenney   rcu: Process offl...
332
  		WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0);
e7d8842ed   Paul E. McKenney   rcu: Apply result...
333
  		WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
88d1bead8   Paul E. McKenney   rcu: Remove rcu_d...
334
  		trace_rcu_preempt_task(rcu_state.name,
d4c08f2ac   Paul E. McKenney   rcu: Add grace-pe...
335
336
  				       t->pid,
  				       (rnp->qsmask & rdp->grpmask)
598ce0948   Paul E. McKenney   rcu: Convert rcu_...
337
338
  				       ? rnp->gp_seq
  				       : rcu_seq_snap(&rnp->gp_seq));
46a5d164d   Paul E. McKenney   rcu: Stop disabli...
339
  		rcu_preempt_ctxt_queue(rnp, rdp);
3e3100989   Paul E. McKenney   rcu: Defer report...
340
341
  	} else {
  		rcu_preempt_deferred_qs(t);
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
342
343
344
345
346
347
348
349
350
351
352
  	}
  
  	/*
  	 * Either we were not in an RCU read-side critical section to
  	 * begin with, or we have now recorded that critical section
  	 * globally.  Either way, we can now note a quiescent state
  	 * for this CPU.  Again, if we were in an RCU read-side critical
  	 * section, and if that critical section was blocking the current
  	 * grace period, then the fact that the task has been enqueued
  	 * means that we continue to block the current grace period.
  	 */
45975c7d2   Paul E. McKenney   rcu: Define RCU-s...
353
  	rcu_qs();
1bb336443   Paul E. McKenney   rcu: Rename rcu_d...
354
  	if (rdp->exp_deferred_qs)
63d4c8c97   Paul E. McKenney   rcu: Remove rsp p...
355
  		rcu_report_exp_rdp(rdp);
43766c3ea   Paul E. McKenney   rcu-tasks: Make R...
356
  	rcu_tasks_qs(current, preempt);
45975c7d2   Paul E. McKenney   rcu: Define RCU-s...
357
  	trace_rcu_utilization(TPS("End context switch"));
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
358
  }
45975c7d2   Paul E. McKenney   rcu: Define RCU-s...
359
  EXPORT_SYMBOL_GPL(rcu_note_context_switch);
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
360
361
  
  /*
fc2219d49   Paul E. McKenney   rcu: Clean up cod...
362
363
364
365
   * Check for preempted RCU readers blocking the current grace period
   * for the specified rcu_node structure.  If the caller needs a reliable
   * answer, it must hold the rcu_node's ->lock.
   */
27f4d2805   Paul E. McKenney   rcu: priority boo...
366
  static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
fc2219d49   Paul E. McKenney   rcu: Clean up cod...
367
  {
6935c3983   Eric Dumazet   rcu: Avoid data-r...
368
  	return READ_ONCE(rnp->gp_tasks) != NULL;
fc2219d49   Paul E. McKenney   rcu: Clean up cod...
369
  }
5f5fa7ea8   Lai Jiangshan   rcu: Don't use ne...
370
  /* limit value for ->rcu_read_lock_nesting. */
5f1a6ef37   Paul E. McKenney   rcu: Avoid signed...
371
  #define RCU_NEST_PMAX (INT_MAX / 2)
77339e61a   Lai Jiangshan   rcu: Provide wrap...
372
373
374
375
  static void rcu_preempt_read_enter(void)
  {
  	current->rcu_read_lock_nesting++;
  }
5f5fa7ea8   Lai Jiangshan   rcu: Don't use ne...
376
  static int rcu_preempt_read_exit(void)
77339e61a   Lai Jiangshan   rcu: Provide wrap...
377
  {
5f5fa7ea8   Lai Jiangshan   rcu: Don't use ne...
378
  	return --current->rcu_read_lock_nesting;
77339e61a   Lai Jiangshan   rcu: Provide wrap...
379
380
381
382
383
384
  }
  
  static void rcu_preempt_depth_set(int val)
  {
  	current->rcu_read_lock_nesting = val;
  }
b668c9cf3   Paul E. McKenney   rcu: Fix grace-pe...
385
  /*
0e5da22e3   Paul E. McKenney   rcu: Move __rcu_r...
386
387
388
389
390
391
   * Preemptible RCU implementation for rcu_read_lock().
   * Just increment ->rcu_read_lock_nesting, shared state will be updated
   * if we block.
   */
  void __rcu_read_lock(void)
  {
77339e61a   Lai Jiangshan   rcu: Provide wrap...
392
  	rcu_preempt_read_enter();
5f1a6ef37   Paul E. McKenney   rcu: Avoid signed...
393
  	if (IS_ENABLED(CONFIG_PROVE_LOCKING))
77339e61a   Lai Jiangshan   rcu: Provide wrap...
394
  		WARN_ON_ONCE(rcu_preempt_depth() > RCU_NEST_PMAX);
f19920e41   Paul E. McKenney   rcu: Always set ....
395
396
  	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) && rcu_state.gp_kthread)
  		WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, true);
0e5da22e3   Paul E. McKenney   rcu: Move __rcu_r...
397
398
399
400
401
402
403
404
405
406
407
408
409
410
  	barrier();  /* critical section after entry code. */
  }
  EXPORT_SYMBOL_GPL(__rcu_read_lock);
  
  /*
   * Preemptible RCU implementation for rcu_read_unlock().
   * Decrement ->rcu_read_lock_nesting.  If the result is zero (outermost
   * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
   * invoke rcu_read_unlock_special() to clean up after a context switch
   * in an RCU read-side critical section and other special cases.
   */
  void __rcu_read_unlock(void)
  {
  	struct task_struct *t = current;
5f5fa7ea8   Lai Jiangshan   rcu: Don't use ne...
411
  	if (rcu_preempt_read_exit() == 0) {
0e5da22e3   Paul E. McKenney   rcu: Move __rcu_r...
412
  		barrier();  /* critical section before exit code. */
0e5da22e3   Paul E. McKenney   rcu: Move __rcu_r...
413
414
  		if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
  			rcu_read_unlock_special(t);
0e5da22e3   Paul E. McKenney   rcu: Move __rcu_r...
415
  	}
5f1a6ef37   Paul E. McKenney   rcu: Avoid signed...
416
  	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
77339e61a   Lai Jiangshan   rcu: Provide wrap...
417
  		int rrln = rcu_preempt_depth();
0e5da22e3   Paul E. McKenney   rcu: Move __rcu_r...
418

5f5fa7ea8   Lai Jiangshan   rcu: Don't use ne...
419
  		WARN_ON_ONCE(rrln < 0 || rrln > RCU_NEST_PMAX);
0e5da22e3   Paul E. McKenney   rcu: Move __rcu_r...
420
  	}
0e5da22e3   Paul E. McKenney   rcu: Move __rcu_r...
421
422
423
424
  }
  EXPORT_SYMBOL_GPL(__rcu_read_unlock);
  
  /*
12f5f524c   Paul E. McKenney   rcu: merge TREE_P...
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
   * Advance a ->blkd_tasks-list pointer to the next entry, instead
   * returning NULL if at the end of the list.
   */
  static struct list_head *rcu_next_node_entry(struct task_struct *t,
  					     struct rcu_node *rnp)
  {
  	struct list_head *np;
  
  	np = t->rcu_node_entry.next;
  	if (np == &rnp->blkd_tasks)
  		np = NULL;
  	return np;
  }
  
  /*
8af3a5e78   Paul E. McKenney   rcu: Abstract rcu...
440
441
442
443
444
445
446
447
448
   * Return true if the specified rcu_node structure has tasks that were
   * preempted within an RCU read-side critical section.
   */
  static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
  {
  	return !list_empty(&rnp->blkd_tasks);
  }
  
  /*
3e3100989   Paul E. McKenney   rcu: Defer report...
449
450
451
   * Report deferred quiescent states.  The deferral time can
   * be quite short, for example, in the case of the call from
   * rcu_read_unlock_special().
b668c9cf3   Paul E. McKenney   rcu: Fix grace-pe...
452
   */
3e3100989   Paul E. McKenney   rcu: Defer report...
453
454
  static void
  rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
455
  {
b6a932d1d   Paul E. McKenney   rcu: Make rcu_rea...
456
457
458
  	bool empty_exp;
  	bool empty_norm;
  	bool empty_exp_now;
12f5f524c   Paul E. McKenney   rcu: merge TREE_P...
459
  	struct list_head *np;
abaa93d9e   Paul E. McKenney   rcu: Simplify pri...
460
  	bool drop_boost_mutex = false;
8203d6d0e   Paul E. McKenney   rcu: Use single-s...
461
  	struct rcu_data *rdp;
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
462
  	struct rcu_node *rnp;
1d082fd06   Paul E. McKenney   rcu: Remove local...
463
  	union rcu_special special;
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
464

f41d911f8   Paul E. McKenney   rcu: Merge preemp...
465
  	/*
8203d6d0e   Paul E. McKenney   rcu: Use single-s...
466
467
  	 * If RCU core is waiting for this CPU to exit its critical section,
  	 * report the fact that it has exited.  Because irqs are disabled,
1d082fd06   Paul E. McKenney   rcu: Remove local...
468
  	 * t->rcu_read_unlock_special cannot change.
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
469
470
  	 */
  	special = t->rcu_read_unlock_special;
da1df50d1   Paul E. McKenney   rcu: Remove rcu_s...
471
  	rdp = this_cpu_ptr(&rcu_data);
1bb336443   Paul E. McKenney   rcu: Rename rcu_d...
472
  	if (!special.s && !rdp->exp_deferred_qs) {
3e3100989   Paul E. McKenney   rcu: Defer report...
473
474
475
  		local_irq_restore(flags);
  		return;
  	}
3717e1e9f   Lai Jiangshan   rcu: Clear ->rcu_...
476
  	t->rcu_read_unlock_special.s = 0;
44bad5b3c   Paul E. McKenney   rcu: Do full repo...
477
  	if (special.b.need_qs) {
3d29aaf1e   Paul E. McKenney   rcu: Provide opti...
478
  		if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
cfeac3977   Paul E. McKenney   rcu: Remove unuse...
479
  			rcu_report_qs_rdp(rdp);
3d29aaf1e   Paul E. McKenney   rcu: Provide opti...
480
481
  			udelay(rcu_unlock_delay);
  		} else {
44bad5b3c   Paul E. McKenney   rcu: Do full repo...
482
  			rcu_qs();
3d29aaf1e   Paul E. McKenney   rcu: Provide opti...
483
  		}
44bad5b3c   Paul E. McKenney   rcu: Do full repo...
484
  	}
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
485

8203d6d0e   Paul E. McKenney   rcu: Use single-s...
486
  	/*
3e3100989   Paul E. McKenney   rcu: Defer report...
487
488
489
490
  	 * Respond to a request by an expedited grace period for a
  	 * quiescent state from this CPU.  Note that requests from
  	 * tasks are handled when removing the task from the
  	 * blocked-tasks list below.
8203d6d0e   Paul E. McKenney   rcu: Use single-s...
491
  	 */
3717e1e9f   Lai Jiangshan   rcu: Clear ->rcu_...
492
  	if (rdp->exp_deferred_qs)
63d4c8c97   Paul E. McKenney   rcu: Remove rsp p...
493
  		rcu_report_exp_rdp(rdp);
8203d6d0e   Paul E. McKenney   rcu: Use single-s...
494

f41d911f8   Paul E. McKenney   rcu: Merge preemp...
495
  	/* Clean up if blocked during RCU read-side critical section. */
1d082fd06   Paul E. McKenney   rcu: Remove local...
496
  	if (special.b.blocked) {
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
497

dd5d19baf   Paul E. McKenney   rcu: Create rcutr...
498
  		/*
0a0ba1c93   Paul E. McKenney   rcu: Adjust ->loc...
499
  		 * Remove this task from the list it blocked on.  The task
8ba9153b2   Paul E. McKenney   rcu: Remove lock-...
500
501
502
  		 * now remains queued on the rcu_node corresponding to the
  		 * CPU it first blocked on, so there is no longer any need
  		 * to loop.  Retain a WARN_ON_ONCE() out of sheer paranoia.
dd5d19baf   Paul E. McKenney   rcu: Create rcutr...
503
  		 */
8ba9153b2   Paul E. McKenney   rcu: Remove lock-...
504
505
506
  		rnp = t->rcu_blocked_node;
  		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
  		WARN_ON_ONCE(rnp != t->rcu_blocked_node);
5b4c11d54   Paul E. McKenney   rcu: Add leaf-nod...
507
  		WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
74e871ac6   Paul E. McKenney   rcu: Rename "empt...
508
  		empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
d43a5d32e   Paul E. McKenney   rcu: Convert ->co...
509
  		WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq &&
4bc8d5557   Paul E. McKenney   rcu: Add debuggin...
510
  			     (!empty_norm || rnp->qsmask));
6c7d7dbf5   Paul E. McKenney   rcu: Rename sync_...
511
  		empty_exp = sync_rcu_exp_done(rnp);
d9a3da069   Paul E. McKenney   rcu: Add expedite...
512
  		smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
12f5f524c   Paul E. McKenney   rcu: merge TREE_P...
513
  		np = rcu_next_node_entry(t, rnp);
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
514
  		list_del_init(&t->rcu_node_entry);
82e78d80f   Paul E. McKenney   rcu: Simplify unb...
515
  		t->rcu_blocked_node = NULL;
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
516
  		trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
865aa1e08   Paul E. McKenney   rcu: Convert rcu_...
517
  						rnp->gp_seq, t->pid);
12f5f524c   Paul E. McKenney   rcu: merge TREE_P...
518
  		if (&t->rcu_node_entry == rnp->gp_tasks)
6935c3983   Eric Dumazet   rcu: Avoid data-r...
519
  			WRITE_ONCE(rnp->gp_tasks, np);
12f5f524c   Paul E. McKenney   rcu: merge TREE_P...
520
  		if (&t->rcu_node_entry == rnp->exp_tasks)
314eeb43e   Paul E. McKenney   rcu: Add *_ONCE()...
521
  			WRITE_ONCE(rnp->exp_tasks, np);
727b705ba   Paul E. McKenney   rcu: Eliminate a ...
522
  		if (IS_ENABLED(CONFIG_RCU_BOOST)) {
727b705ba   Paul E. McKenney   rcu: Eliminate a ...
523
524
  			/* Snapshot ->boost_mtx ownership w/rnp->lock held. */
  			drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
2dee9404f   Paul E. McKenney   rcu: Add assertio...
525
  			if (&t->rcu_node_entry == rnp->boost_tasks)
5822b8126   Paul E. McKenney   rcu: Add WRITE_ON...
526
  				WRITE_ONCE(rnp->boost_tasks, np);
727b705ba   Paul E. McKenney   rcu: Eliminate a ...
527
  		}
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
528
529
530
531
  
  		/*
  		 * If this was the last task on the current list, and if
  		 * we aren't waiting on any CPUs, report the quiescent state.
389abd48e   Paul E. McKenney   rcu: Avoid RCU-pr...
532
533
  		 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
  		 * so we must take a snapshot of the expedited state.
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
534
  		 */
6c7d7dbf5   Paul E. McKenney   rcu: Rename sync_...
535
  		empty_exp_now = sync_rcu_exp_done(rnp);
74e871ac6   Paul E. McKenney   rcu: Rename "empt...
536
  		if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
537
  			trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
db023296f   Paul E. McKenney   rcu: Convert rcu_...
538
  							 rnp->gp_seq,
d4c08f2ac   Paul E. McKenney   rcu: Add grace-pe...
539
540
541
542
543
  							 0, rnp->qsmask,
  							 rnp->level,
  							 rnp->grplo,
  							 rnp->grphi,
  							 !!rnp->gp_tasks);
139ad4da5   Paul E. McKenney   rcu: Remove rsp p...
544
  			rcu_report_unblock_qs_rnp(rnp, flags);
c701d5d9b   Paul E. McKenney   rcu: Fix code-sty...
545
  		} else {
67c583a7d   Boqun Feng   RCU: Privatize rc...
546
  			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
c701d5d9b   Paul E. McKenney   rcu: Fix code-sty...
547
  		}
d9a3da069   Paul E. McKenney   rcu: Add expedite...
548

27f4d2805   Paul E. McKenney   rcu: priority boo...
549
  		/* Unboost if we were boosted. */
727b705ba   Paul E. McKenney   rcu: Eliminate a ...
550
  		if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
02a7c234e   Paul E. McKenney   rcu: Suppress loc...
551
  			rt_mutex_futex_unlock(&rnp->boost_mtx);
27f4d2805   Paul E. McKenney   rcu: priority boo...
552

d9a3da069   Paul E. McKenney   rcu: Add expedite...
553
554
555
556
  		/*
  		 * If this was the last task on the expedited lists,
  		 * then we need to report up the rcu_node hierarchy.
  		 */
389abd48e   Paul E. McKenney   rcu: Avoid RCU-pr...
557
  		if (!empty_exp && empty_exp_now)
63d4c8c97   Paul E. McKenney   rcu: Remove rsp p...
558
  			rcu_report_exp_rnp(rnp, true);
b668c9cf3   Paul E. McKenney   rcu: Fix grace-pe...
559
560
  	} else {
  		local_irq_restore(flags);
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
561
  	}
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
562
  }
1ed509a22   Paul E. McKenney   rcu: Add RCU_CPU_...
563
  /*
3e3100989   Paul E. McKenney   rcu: Defer report...
564
565
566
567
568
569
570
571
572
573
   * Is a deferred quiescent-state pending, and are we also not in
   * an RCU read-side critical section?  It is the caller's responsibility
   * to ensure it is otherwise safe to report any deferred quiescent
   * states.  The reason for this is that it is safe to report a
   * quiescent state during context switch even though preemption
   * is disabled.  This function cannot be expected to understand these
   * nuances, so the caller must handle them.
   */
  static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
  {
1bb336443   Paul E. McKenney   rcu: Rename rcu_d...
574
  	return (__this_cpu_read(rcu_data.exp_deferred_qs) ||
3e3100989   Paul E. McKenney   rcu: Defer report...
575
  		READ_ONCE(t->rcu_read_unlock_special.s)) &&
5f5fa7ea8   Lai Jiangshan   rcu: Don't use ne...
576
  	       rcu_preempt_depth() == 0;
3e3100989   Paul E. McKenney   rcu: Defer report...
577
578
579
580
581
582
583
584
585
586
587
588
  }
  
  /*
   * Report a deferred quiescent state if needed and safe to do so.
   * As with rcu_preempt_need_deferred_qs(), "safe" involves only
   * not being in an RCU read-side critical section.  The caller must
   * evaluate safety in terms of interrupt, softirq, and preemption
   * disabling.
   */
  static void rcu_preempt_deferred_qs(struct task_struct *t)
  {
  	unsigned long flags;
3e3100989   Paul E. McKenney   rcu: Defer report...
589
590
591
  
  	if (!rcu_preempt_need_deferred_qs(t))
  		return;
3e3100989   Paul E. McKenney   rcu: Defer report...
592
593
  	local_irq_save(flags);
  	rcu_preempt_deferred_qs_irqrestore(t, flags);
3e3100989   Paul E. McKenney   rcu: Defer report...
594
595
596
  }
  
  /*
0864f057b   Paul E. McKenney   rcu: Use irq_work...
597
598
599
600
601
602
603
604
605
606
607
   * Minimal handler to give the scheduler a chance to re-evaluate.
   */
  static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp)
  {
  	struct rcu_data *rdp;
  
  	rdp = container_of(iwp, struct rcu_data, defer_qs_iw);
  	rdp->defer_qs_iw_pending = false;
  }
  
  /*
3e3100989   Paul E. McKenney   rcu: Defer report...
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
   * Handle special cases during rcu_read_unlock(), such as needing to
   * notify RCU core processing or task having blocked during the RCU
   * read-side critical section.
   */
  static void rcu_read_unlock_special(struct task_struct *t)
  {
  	unsigned long flags;
  	bool preempt_bh_were_disabled =
  			!!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
  	bool irqs_were_disabled;
  
  	/* NMI handlers cannot block and cannot safely manipulate state. */
  	if (in_nmi())
  		return;
  
  	local_irq_save(flags);
  	irqs_were_disabled = irqs_disabled_flags(flags);
05f415715   Paul E. McKenney   rcu: Speed up exp...
625
  	if (preempt_bh_were_disabled || irqs_were_disabled) {
25102de65   Paul E. McKenney   rcu: Only do rcu_...
626
627
628
  		bool exp;
  		struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
  		struct rcu_node *rnp = rdp->mynode;
e4453d8a1   Paul E. McKenney   rcu: Make rcu_rea...
629
630
631
  		exp = (t->rcu_blocked_node &&
  		       READ_ONCE(t->rcu_blocked_node->exp_tasks)) ||
  		      (rdp->grpmask & READ_ONCE(rnp->expmask));
23634ebc1   Paul E. McKenney   rcu: Check for wa...
632
  		// Need to defer quiescent state until everything is enabled.
e4453d8a1   Paul E. McKenney   rcu: Make rcu_rea...
633
634
635
  		if (use_softirq && (in_irq() || (exp && !irqs_were_disabled))) {
  			// Using softirq, safe to awaken, and either the
  			// wakeup is free or there is an expedited GP.
05f415715   Paul E. McKenney   rcu: Speed up exp...
636
637
  			raise_softirq_irqoff(RCU_SOFTIRQ);
  		} else {
23634ebc1   Paul E. McKenney   rcu: Check for wa...
638
  			// Enabling BH or preempt does reschedule, so...
e4453d8a1   Paul E. McKenney   rcu: Make rcu_rea...
639
640
  			// Also if no expediting, slow is OK.
  			// Plus nohz_full CPUs eventually get tick enabled.
05f415715   Paul E. McKenney   rcu: Speed up exp...
641
642
  			set_tsk_need_resched(current);
  			set_preempt_need_resched();
d143b3d1c   Paul E. McKenney   rcu: Simplify rcu...
643
  			if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled &&
0864f057b   Paul E. McKenney   rcu: Use irq_work...
644
645
646
647
648
649
650
651
  			    !rdp->defer_qs_iw_pending && exp) {
  				// Get scheduler to re-evaluate and call hooks.
  				// If !IRQ_WORK, FQS scan will eventually IPI.
  				init_irq_work(&rdp->defer_qs_iw,
  					      rcu_preempt_deferred_qs_handler);
  				rdp->defer_qs_iw_pending = true;
  				irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
  			}
05f415715   Paul E. McKenney   rcu: Speed up exp...
652
  		}
3e3100989   Paul E. McKenney   rcu: Defer report...
653
654
655
656
657
658
659
  		local_irq_restore(flags);
  		return;
  	}
  	rcu_preempt_deferred_qs_irqrestore(t, flags);
  }
  
  /*
b0e165c03   Paul E. McKenney   rcu: Add debug ch...
660
661
662
   * Check that the list of blocked tasks for the newly completed grace
   * period is in fact empty.  It is a serious bug to complete a grace
   * period that still has RCU readers blocked!  This function must be
03bd2983d   Paul E. McKenney   rcu: Use lockdep ...
663
   * invoked -before- updating this rnp's ->gp_seq.
12f5f524c   Paul E. McKenney   rcu: merge TREE_P...
664
665
666
   *
   * Also, if there are blocked tasks on the list, they automatically
   * block the newly created grace period, so set up ->gp_tasks accordingly.
b0e165c03   Paul E. McKenney   rcu: Add debug ch...
667
   */
81ab59a3a   Paul E. McKenney   rcu: Remove rsp p...
668
  static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
b0e165c03   Paul E. McKenney   rcu: Add debug ch...
669
  {
c5ebe66ce   Paul E. McKenney   rcu: Add event tr...
670
  	struct task_struct *t;
ea9b0c8a2   Paul E. McKenney   rcu: Add lockdep_...
671
672
  	RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!
  ");
03bd2983d   Paul E. McKenney   rcu: Use lockdep ...
673
  	raw_lockdep_assert_held_rcu_node(rnp);
4bc8d5557   Paul E. McKenney   rcu: Add debuggin...
674
  	if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
81ab59a3a   Paul E. McKenney   rcu: Remove rsp p...
675
  		dump_blkd_tasks(rnp, 10);
0b107d24d   Paul E. McKenney   rcu: Suppress fal...
676
677
  	if (rcu_preempt_has_tasks(rnp) &&
  	    (rnp->qsmaskinit || rnp->wait_blkd_tasks)) {
6935c3983   Eric Dumazet   rcu: Avoid data-r...
678
  		WRITE_ONCE(rnp->gp_tasks, rnp->blkd_tasks.next);
c5ebe66ce   Paul E. McKenney   rcu: Add event tr...
679
680
681
  		t = container_of(rnp->gp_tasks, struct task_struct,
  				 rcu_node_entry);
  		trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"),
865aa1e08   Paul E. McKenney   rcu: Convert rcu_...
682
  						rnp->gp_seq, t->pid);
c5ebe66ce   Paul E. McKenney   rcu: Add event tr...
683
  	}
28ecd5802   Paul E. McKenney   rcu: Add WARN_ON_...
684
  	WARN_ON_ONCE(rnp->qsmask);
b0e165c03   Paul E. McKenney   rcu: Add debug ch...
685
  }
dd5d19baf   Paul E. McKenney   rcu: Create rcutr...
686
  /*
c98cac603   Paul E. McKenney   rcu: Rename rcu_c...
687
688
689
690
691
   * Check for a quiescent state from the current CPU, including voluntary
   * context switches for Tasks RCU.  When a task blocks, the task is
   * recorded in the corresponding CPU's rcu_node structure, which is checked
   * elsewhere, hence this function need only check for quiescent states
   * related to the current CPU, not to those related to tasks.
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
692
   */
c98cac603   Paul E. McKenney   rcu: Rename rcu_c...
693
  static void rcu_flavor_sched_clock_irq(int user)
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
694
695
  {
  	struct task_struct *t = current;
45975c7d2   Paul E. McKenney   rcu: Define RCU-s...
696
697
698
  	if (user || rcu_is_cpu_rrupt_from_idle()) {
  		rcu_note_voluntary_context_switch(current);
  	}
77339e61a   Lai Jiangshan   rcu: Provide wrap...
699
  	if (rcu_preempt_depth() > 0 ||
3e3100989   Paul E. McKenney   rcu: Defer report...
700
701
  	    (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
  		/* No QS, force context switch if deferred. */
fced9c8cf   Paul E. McKenney   rcu: Avoid resche...
702
703
704
705
  		if (rcu_preempt_need_deferred_qs(t)) {
  			set_tsk_need_resched(t);
  			set_preempt_need_resched();
  		}
3e3100989   Paul E. McKenney   rcu: Defer report...
706
707
708
  	} else if (rcu_preempt_need_deferred_qs(t)) {
  		rcu_preempt_deferred_qs(t); /* Report deferred QS. */
  		return;
5f5fa7ea8   Lai Jiangshan   rcu: Don't use ne...
709
  	} else if (!WARN_ON_ONCE(rcu_preempt_depth())) {
45975c7d2   Paul E. McKenney   rcu: Define RCU-s...
710
  		rcu_qs(); /* Report immediate QS. */
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
711
712
  		return;
  	}
3e3100989   Paul E. McKenney   rcu: Defer report...
713
714
  
  	/* If GP is oldish, ask for help from rcu_read_unlock_special(). */
77339e61a   Lai Jiangshan   rcu: Provide wrap...
715
  	if (rcu_preempt_depth() > 0 &&
2280ee5a7   Paul E. McKenney   rcu: Remove rcu_d...
716
717
  	    __this_cpu_read(rcu_data.core_needs_qs) &&
  	    __this_cpu_read(rcu_data.cpu_no_qs.b.norm) &&
15651201f   Paul E. McKenney   rcu: Mark task as...
718
  	    !t->rcu_read_unlock_special.b.need_qs &&
564a9ae60   Paul E. McKenney   rcu: Remove last ...
719
  	    time_after(jiffies, rcu_state.gp_start + HZ))
1d082fd06   Paul E. McKenney   rcu: Remove local...
720
  		t->rcu_read_unlock_special.b.need_qs = true;
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
721
  }
2439b696c   Paul E. McKenney   rcu: Shrink TINY_...
722
723
  /*
   * Check for a task exiting while in a preemptible-RCU read-side
884157cef   Paul E. McKenney   rcu: Make exit_rc...
724
725
726
727
728
   * critical section, clean up if so.  No need to issue warnings, as
   * debug_check_no_locks_held() already does this if lockdep is enabled.
   * Besides, if this function does anything other than just immediately
   * return, there was a bug of some sort.  Spewing warnings from this
   * function is like as not to simply obscure important prior warnings.
2439b696c   Paul E. McKenney   rcu: Shrink TINY_...
729
730
731
732
   */
  void exit_rcu(void)
  {
  	struct task_struct *t = current;
884157cef   Paul E. McKenney   rcu: Make exit_rc...
733
  	if (unlikely(!list_empty(&current->rcu_node_entry))) {
77339e61a   Lai Jiangshan   rcu: Provide wrap...
734
  		rcu_preempt_depth_set(1);
884157cef   Paul E. McKenney   rcu: Make exit_rc...
735
  		barrier();
add0d37b4   Paul E. McKenney   rcu: Correct READ...
736
  		WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true);
77339e61a   Lai Jiangshan   rcu: Provide wrap...
737
738
  	} else if (unlikely(rcu_preempt_depth())) {
  		rcu_preempt_depth_set(1);
884157cef   Paul E. McKenney   rcu: Make exit_rc...
739
  	} else {
2439b696c   Paul E. McKenney   rcu: Shrink TINY_...
740
  		return;
884157cef   Paul E. McKenney   rcu: Make exit_rc...
741
  	}
2439b696c   Paul E. McKenney   rcu: Shrink TINY_...
742
  	__rcu_read_unlock();
3e3100989   Paul E. McKenney   rcu: Defer report...
743
  	rcu_preempt_deferred_qs(current);
2439b696c   Paul E. McKenney   rcu: Shrink TINY_...
744
  }
4bc8d5557   Paul E. McKenney   rcu: Add debuggin...
745
746
747
748
  /*
   * Dump the blocked-tasks state, but limit the list dump to the
   * specified number of elements.
   */
577389423   Paul E. McKenney   rcu: Add CPU onli...
749
  static void
81ab59a3a   Paul E. McKenney   rcu: Remove rsp p...
750
  dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
4bc8d5557   Paul E. McKenney   rcu: Add debuggin...
751
  {
577389423   Paul E. McKenney   rcu: Add CPU onli...
752
  	int cpu;
4bc8d5557   Paul E. McKenney   rcu: Add debuggin...
753
754
  	int i;
  	struct list_head *lhp;
577389423   Paul E. McKenney   rcu: Add CPU onli...
755
756
  	bool onl;
  	struct rcu_data *rdp;
ff3cee390   Paul E. McKenney   rcu: Add up-tree ...
757
  	struct rcu_node *rnp1;
4bc8d5557   Paul E. McKenney   rcu: Add debuggin...
758

ce11fae8d   Boqun Feng   rcu: Use the prop...
759
  	raw_lockdep_assert_held_rcu_node(rnp);
ff3cee390   Paul E. McKenney   rcu: Add up-tree ...
760
761
  	pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld
  ",
77cfc7bf2   Paul E. McKenney   rcu: Fix typo and...
762
  		__func__, rnp->grplo, rnp->grphi, rnp->level,
8ff37290d   Paul E. McKenney   rcu: Add *_ONCE()...
763
  		(long)READ_ONCE(rnp->gp_seq), (long)rnp->completedqs);
ff3cee390   Paul E. McKenney   rcu: Add up-tree ...
764
765
766
767
  	for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
  		pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx
  ",
  			__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext);
77cfc7bf2   Paul E. McKenney   rcu: Fix typo and...
768
769
  	pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p
  ",
065a6db12   Paul E. McKenney   rcu: Add READ_ONC...
770
  		__func__, READ_ONCE(rnp->gp_tasks), data_race(rnp->boost_tasks),
314eeb43e   Paul E. McKenney   rcu: Add *_ONCE()...
771
  		READ_ONCE(rnp->exp_tasks));
77cfc7bf2   Paul E. McKenney   rcu: Fix typo and...
772
  	pr_info("%s: ->blkd_tasks", __func__);
4bc8d5557   Paul E. McKenney   rcu: Add debuggin...
773
774
775
  	i = 0;
  	list_for_each(lhp, &rnp->blkd_tasks) {
  		pr_cont(" %p", lhp);
cd6d17b4a   Neeraj Upadhyay   rcu: Dump specifi...
776
  		if (++i >= ncheck)
4bc8d5557   Paul E. McKenney   rcu: Add debuggin...
777
778
779
780
  			break;
  	}
  	pr_cont("
  ");
577389423   Paul E. McKenney   rcu: Add CPU onli...
781
  	for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
da1df50d1   Paul E. McKenney   rcu: Remove rcu_s...
782
  		rdp = per_cpu_ptr(&rcu_data, cpu);
577389423   Paul E. McKenney   rcu: Add CPU onli...
783
784
785
786
787
788
789
  		onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
  		pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)
  ",
  			cpu, ".o"[onl],
  			(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
  			(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
  	}
4bc8d5557   Paul E. McKenney   rcu: Add debuggin...
790
  }
28f6569ab   Pranith Kumar   rcu: Remove redun...
791
  #else /* #ifdef CONFIG_PREEMPT_RCU */
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
792
793
  
  /*
aa40c138c   Paul E. McKenney   rcu: Report QS fo...
794
795
796
797
798
799
800
801
802
803
804
805
   * If strict grace periods are enabled, and if the calling
   * __rcu_read_unlock() marks the beginning of a quiescent state, immediately
   * report that quiescent state and, if requested, spin for a bit.
   */
  void rcu_read_unlock_strict(void)
  {
  	struct rcu_data *rdp;
  
  	if (!IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ||
  	   irqs_disabled() || preempt_count() || !rcu_state.gp_kthread)
  		return;
  	rdp = this_cpu_ptr(&rcu_data);
cfeac3977   Paul E. McKenney   rcu: Remove unuse...
806
  	rcu_report_qs_rdp(rdp);
aa40c138c   Paul E. McKenney   rcu: Report QS fo...
807
808
809
810
811
  	udelay(rcu_unlock_delay);
  }
  EXPORT_SYMBOL_GPL(rcu_read_unlock_strict);
  
  /*
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
812
813
   * Tell them what RCU they are running.
   */
0e0fc1c23   Paul E. McKenney   rcu: Mark init-ti...
814
  static void __init rcu_bootup_announce(void)
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
815
  {
efc151c33   Paul E. McKenney   rcu: Convert rcut...
816
817
  	pr_info("Hierarchical RCU implementation.
  ");
26845c286   Paul E. McKenney   rcu: print boot-t...
818
  	rcu_bootup_announce_oddness();
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
819
  }
45975c7d2   Paul E. McKenney   rcu: Define RCU-s...
820
  /*
90326f052   Sebastian Andrzej Siewior   rcu: Use CONFIG_P...
821
   * Note a quiescent state for PREEMPTION=n.  Because we do not need to know
45975c7d2   Paul E. McKenney   rcu: Define RCU-s...
822
823
824
825
826
   * how many quiescent states passed, just if there was at least one since
   * the start of the grace period, this just sets a flag.  The caller must
   * have disabled preemption.
   */
  static void rcu_qs(void)
d28139c4e   Paul E. McKenney   rcu: Apply RCU-bh...
827
  {
45975c7d2   Paul E. McKenney   rcu: Define RCU-s...
828
829
830
831
832
833
834
835
836
  	RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!");
  	if (!__this_cpu_read(rcu_data.cpu_no_qs.s))
  		return;
  	trace_rcu_grace_period(TPS("rcu_sched"),
  			       __this_cpu_read(rcu_data.gp_seq), TPS("cpuqs"));
  	__this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
  	if (!__this_cpu_read(rcu_data.cpu_no_qs.b.exp))
  		return;
  	__this_cpu_write(rcu_data.cpu_no_qs.b.exp, false);
63d4c8c97   Paul E. McKenney   rcu: Remove rsp p...
837
  	rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
d28139c4e   Paul E. McKenney   rcu: Apply RCU-bh...
838
  }
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
839
  /*
395a2f097   Paul E. McKenney   rcu: Define rcu_a...
840
841
842
843
844
   * Register an urgently needed quiescent state.  If there is an
   * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
   * dyntick-idle quiescent state visible to other CPUs, which will in
   * some cases serve for expedited as well as normal grace periods.
   * Either way, register a lightweight quiescent state.
395a2f097   Paul E. McKenney   rcu: Define rcu_a...
845
846
847
848
   */
  void rcu_all_qs(void)
  {
  	unsigned long flags;
2dba13f0b   Paul E. McKenney   rcu: Switch urgen...
849
  	if (!raw_cpu_read(rcu_data.rcu_urgent_qs))
395a2f097   Paul E. McKenney   rcu: Define rcu_a...
850
851
852
  		return;
  	preempt_disable();
  	/* Load rcu_urgent_qs before other flags. */
2dba13f0b   Paul E. McKenney   rcu: Switch urgen...
853
  	if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
395a2f097   Paul E. McKenney   rcu: Define rcu_a...
854
855
856
  		preempt_enable();
  		return;
  	}
2dba13f0b   Paul E. McKenney   rcu: Switch urgen...
857
  	this_cpu_write(rcu_data.rcu_urgent_qs, false);
2dba13f0b   Paul E. McKenney   rcu: Switch urgen...
858
  	if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) {
395a2f097   Paul E. McKenney   rcu: Define rcu_a...
859
860
861
862
  		local_irq_save(flags);
  		rcu_momentary_dyntick_idle();
  		local_irq_restore(flags);
  	}
7e28c5af4   Paul E. McKenney   rcu: Eliminate ->...
863
  	rcu_qs();
395a2f097   Paul E. McKenney   rcu: Define rcu_a...
864
865
866
867
868
  	preempt_enable();
  }
  EXPORT_SYMBOL_GPL(rcu_all_qs);
  
  /*
90326f052   Sebastian Andrzej Siewior   rcu: Use CONFIG_P...
869
   * Note a PREEMPTION=n context switch. The caller must have disabled interrupts.
cba6d0d64   Paul E. McKenney   Revert "rcu: Move...
870
   */
45975c7d2   Paul E. McKenney   rcu: Define RCU-s...
871
  void rcu_note_context_switch(bool preempt)
cba6d0d64   Paul E. McKenney   Revert "rcu: Move...
872
  {
45975c7d2   Paul E. McKenney   rcu: Define RCU-s...
873
874
875
  	trace_rcu_utilization(TPS("Start context switch"));
  	rcu_qs();
  	/* Load rcu_urgent_qs before other flags. */
2dba13f0b   Paul E. McKenney   rcu: Switch urgen...
876
  	if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs)))
45975c7d2   Paul E. McKenney   rcu: Define RCU-s...
877
  		goto out;
2dba13f0b   Paul E. McKenney   rcu: Switch urgen...
878
879
  	this_cpu_write(rcu_data.rcu_urgent_qs, false);
  	if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
45975c7d2   Paul E. McKenney   rcu: Define RCU-s...
880
  		rcu_momentary_dyntick_idle();
43766c3ea   Paul E. McKenney   rcu-tasks: Make R...
881
  	rcu_tasks_qs(current, preempt);
45975c7d2   Paul E. McKenney   rcu: Define RCU-s...
882
883
  out:
  	trace_rcu_utilization(TPS("End context switch"));
cba6d0d64   Paul E. McKenney   Revert "rcu: Move...
884
  }
45975c7d2   Paul E. McKenney   rcu: Define RCU-s...
885
  EXPORT_SYMBOL_GPL(rcu_note_context_switch);
cba6d0d64   Paul E. McKenney   Revert "rcu: Move...
886
887
  
  /*
6cc68793e   Paul E. McKenney   rcu: fix spelling
888
   * Because preemptible RCU does not exist, there are never any preempted
fc2219d49   Paul E. McKenney   rcu: Clean up cod...
889
890
   * RCU readers.
   */
27f4d2805   Paul E. McKenney   rcu: priority boo...
891
  static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
fc2219d49   Paul E. McKenney   rcu: Clean up cod...
892
893
894
  {
  	return 0;
  }
8af3a5e78   Paul E. McKenney   rcu: Abstract rcu...
895
896
897
898
  /*
   * Because there is no preemptible RCU, there can be no readers blocked.
   */
  static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
b668c9cf3   Paul E. McKenney   rcu: Fix grace-pe...
899
  {
8af3a5e78   Paul E. McKenney   rcu: Abstract rcu...
900
  	return false;
b668c9cf3   Paul E. McKenney   rcu: Fix grace-pe...
901
  }
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
902
  /*
3e3100989   Paul E. McKenney   rcu: Defer report...
903
904
905
906
907
908
909
910
911
912
   * Because there is no preemptible RCU, there can be no deferred quiescent
   * states.
   */
  static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
  {
  	return false;
  }
  static void rcu_preempt_deferred_qs(struct task_struct *t) { }
  
  /*
6cc68793e   Paul E. McKenney   rcu: fix spelling
913
   * Because there is no preemptible RCU, there can be no readers blocked,
49e291266   Paul E. McKenney   rcu: Fix thinko, ...
914
915
   * so there is no need to check for blocked tasks.  So check only for
   * bogus qsmask values.
b0e165c03   Paul E. McKenney   rcu: Add debug ch...
916
   */
81ab59a3a   Paul E. McKenney   rcu: Remove rsp p...
917
  static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
b0e165c03   Paul E. McKenney   rcu: Add debug ch...
918
  {
49e291266   Paul E. McKenney   rcu: Fix thinko, ...
919
  	WARN_ON_ONCE(rnp->qsmask);
b0e165c03   Paul E. McKenney   rcu: Add debug ch...
920
  }
dd5d19baf   Paul E. McKenney   rcu: Create rcutr...
921
  /*
c98cac603   Paul E. McKenney   rcu: Rename rcu_c...
922
923
   * Check to see if this CPU is in a non-context-switch quiescent state,
   * namely user mode and idle loop.
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
924
   */
c98cac603   Paul E. McKenney   rcu: Rename rcu_c...
925
  static void rcu_flavor_sched_clock_irq(int user)
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
926
  {
45975c7d2   Paul E. McKenney   rcu: Define RCU-s...
927
  	if (user || rcu_is_cpu_rrupt_from_idle()) {
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
928

45975c7d2   Paul E. McKenney   rcu: Define RCU-s...
929
930
931
932
933
934
935
936
937
938
939
940
941
942
  		/*
  		 * Get here if this CPU took its interrupt from user
  		 * mode or from the idle loop, and if this is not a
  		 * nested interrupt.  In this case, the CPU is in
  		 * a quiescent state, so note it.
  		 *
  		 * No memory barrier is required here because rcu_qs()
  		 * references only CPU-local variables that other CPUs
  		 * neither access nor modify, at least not while the
  		 * corresponding CPU is online.
  		 */
  
  		rcu_qs();
  	}
e74f4c456   Paul E. McKenney   rcu: Make hot-unp...
943
  }
e74f4c456   Paul E. McKenney   rcu: Make hot-unp...
944

2439b696c   Paul E. McKenney   rcu: Shrink TINY_...
945
946
947
948
949
950
951
  /*
   * Because preemptible RCU does not exist, tasks cannot possibly exit
   * while in preemptible RCU read-side critical sections.
   */
  void exit_rcu(void)
  {
  }
4bc8d5557   Paul E. McKenney   rcu: Add debuggin...
952
953
954
  /*
   * Dump the guaranteed-empty blocked-tasks state.  Trust but verify.
   */
577389423   Paul E. McKenney   rcu: Add CPU onli...
955
  static void
81ab59a3a   Paul E. McKenney   rcu: Remove rsp p...
956
  dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
4bc8d5557   Paul E. McKenney   rcu: Add debuggin...
957
958
959
  {
  	WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
  }
28f6569ab   Pranith Kumar   rcu: Remove redun...
960
  #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
8bd93a2c5   Paul E. McKenney   rcu: Accelerate g...
961

48d07c04b   Sebastian Andrzej Siewior   rcu: Enable elimi...
962
963
964
965
966
  /*
   * If boosting, set rcuc kthreads to realtime priority.
   */
  static void rcu_cpu_kthread_setup(unsigned int cpu)
  {
27f4d2805   Paul E. McKenney   rcu: priority boo...
967
  #ifdef CONFIG_RCU_BOOST
48d07c04b   Sebastian Andrzej Siewior   rcu: Enable elimi...
968
  	struct sched_param sp;
27f4d2805   Paul E. McKenney   rcu: priority boo...
969

48d07c04b   Sebastian Andrzej Siewior   rcu: Enable elimi...
970
971
972
  	sp.sched_priority = kthread_prio;
  	sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
  #endif /* #ifdef CONFIG_RCU_BOOST */
5d01bbd11   Thomas Gleixner   rcu: Yield simpler
973
  }
48d07c04b   Sebastian Andrzej Siewior   rcu: Enable elimi...
974
  #ifdef CONFIG_RCU_BOOST
27f4d2805   Paul E. McKenney   rcu: priority boo...
975
976
977
978
979
980
981
982
983
984
985
  /*
   * Carry out RCU priority boosting on the task indicated by ->exp_tasks
   * or ->boost_tasks, advancing the pointer to the next task in the
   * ->blkd_tasks list.
   *
   * Note that irqs must be enabled: boosting the task can block.
   * Returns 1 if there are more tasks needing to be boosted.
   */
  static int rcu_boost(struct rcu_node *rnp)
  {
  	unsigned long flags;
27f4d2805   Paul E. McKenney   rcu: priority boo...
986
987
  	struct task_struct *t;
  	struct list_head *tb;
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
988
989
  	if (READ_ONCE(rnp->exp_tasks) == NULL &&
  	    READ_ONCE(rnp->boost_tasks) == NULL)
27f4d2805   Paul E. McKenney   rcu: priority boo...
990
  		return 0;  /* Nothing left to boost. */
2a67e741b   Peter Zijlstra   rcu: Create trans...
991
  	raw_spin_lock_irqsave_rcu_node(rnp, flags);
27f4d2805   Paul E. McKenney   rcu: priority boo...
992
993
994
995
996
997
  
  	/*
  	 * Recheck under the lock: all tasks in need of boosting
  	 * might exit their RCU read-side critical sections on their own.
  	 */
  	if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
67c583a7d   Boqun Feng   RCU: Privatize rc...
998
  		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
27f4d2805   Paul E. McKenney   rcu: priority boo...
999
1000
1001
1002
1003
1004
1005
1006
1007
  		return 0;
  	}
  
  	/*
  	 * Preferentially boost tasks blocking expedited grace periods.
  	 * This cannot starve the normal grace periods because a second
  	 * expedited grace period must boost all blocked tasks, including
  	 * those blocking the pre-existing normal grace period.
  	 */
bec06785f   Paul E. McKenney   rcu: Remove obsol...
1008
  	if (rnp->exp_tasks != NULL)
27f4d2805   Paul E. McKenney   rcu: priority boo...
1009
  		tb = rnp->exp_tasks;
bec06785f   Paul E. McKenney   rcu: Remove obsol...
1010
  	else
27f4d2805   Paul E. McKenney   rcu: priority boo...
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
  		tb = rnp->boost_tasks;
  
  	/*
  	 * We boost task t by manufacturing an rt_mutex that appears to
  	 * be held by task t.  We leave a pointer to that rt_mutex where
  	 * task t can find it, and task t will release the mutex when it
  	 * exits its outermost RCU read-side critical section.  Then
  	 * simply acquiring this artificial rt_mutex will boost task
  	 * t's priority.  (Thanks to tglx for suggesting this approach!)
  	 *
  	 * Note that task t must acquire rnp->lock to remove itself from
  	 * the ->blkd_tasks list, which it will do from exit() if from
  	 * nowhere else.  We therefore are guaranteed that task t will
  	 * stay around at least until we drop rnp->lock.  Note that
  	 * rnp->lock also resolves races between our priority boosting
  	 * and task t's exiting its outermost RCU read-side critical
  	 * section.
  	 */
  	t = container_of(tb, struct task_struct, rcu_node_entry);
abaa93d9e   Paul E. McKenney   rcu: Simplify pri...
1030
  	rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
67c583a7d   Boqun Feng   RCU: Privatize rc...
1031
  	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
abaa93d9e   Paul E. McKenney   rcu: Simplify pri...
1032
1033
1034
  	/* Lock only for side effect: boosts task t's priority. */
  	rt_mutex_lock(&rnp->boost_mtx);
  	rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
27f4d2805   Paul E. McKenney   rcu: priority boo...
1035

7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1036
1037
  	return READ_ONCE(rnp->exp_tasks) != NULL ||
  	       READ_ONCE(rnp->boost_tasks) != NULL;
27f4d2805   Paul E. McKenney   rcu: priority boo...
1038
1039
1040
  }
  
  /*
bc17ea109   Paul E. McKenney   rcu: Fix obsolete...
1041
   * Priority-boosting kthread, one per leaf rcu_node.
27f4d2805   Paul E. McKenney   rcu: priority boo...
1042
1043
1044
1045
1046
1047
   */
  static int rcu_boost_kthread(void *arg)
  {
  	struct rcu_node *rnp = (struct rcu_node *)arg;
  	int spincnt = 0;
  	int more2boost;
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1048
  	trace_rcu_utilization(TPS("Start boost kthread@init"));
27f4d2805   Paul E. McKenney   rcu: priority boo...
1049
  	for (;;) {
3ca3b0e2c   Paul E. McKenney   rcu: Add *_ONCE()...
1050
  		WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_WAITING);
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1051
  		trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
065a6db12   Paul E. McKenney   rcu: Add READ_ONC...
1052
1053
  		rcu_wait(READ_ONCE(rnp->boost_tasks) ||
  			 READ_ONCE(rnp->exp_tasks));
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1054
  		trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
3ca3b0e2c   Paul E. McKenney   rcu: Add *_ONCE()...
1055
  		WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_RUNNING);
27f4d2805   Paul E. McKenney   rcu: priority boo...
1056
1057
1058
1059
1060
1061
  		more2boost = rcu_boost(rnp);
  		if (more2boost)
  			spincnt++;
  		else
  			spincnt = 0;
  		if (spincnt > 10) {
3ca3b0e2c   Paul E. McKenney   rcu: Add *_ONCE()...
1062
  			WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_YIELDING);
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1063
  			trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
a9352f72d   Paul E. McKenney   rcu: Priority-boo...
1064
  			schedule_timeout_idle(2);
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1065
  			trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
27f4d2805   Paul E. McKenney   rcu: priority boo...
1066
1067
1068
  			spincnt = 0;
  		}
  	}
1217ed1ba   Paul E. McKenney   rcu: permit rcu_r...
1069
  	/* NOTREACHED */
f7f7bac9c   Steven Rostedt (Red Hat)   rcu: Have the RCU...
1070
  	trace_rcu_utilization(TPS("End boost kthread@notreached"));
27f4d2805   Paul E. McKenney   rcu: priority boo...
1071
1072
1073
1074
1075
1076
1077
1078
1079
  	return 0;
  }
  
  /*
   * Check to see if it is time to start boosting RCU readers that are
   * blocking the current grace period, and, if so, tell the per-rcu_node
   * kthread to start boosting them.  If there is an expedited grace
   * period in progress, it is always time to boost.
   *
b065a8535   Paul E. McKenney   rcu: Fix obsolete...
1080
1081
1082
   * The caller must hold rnp->lock, which this function releases.
   * The ->boost_kthread_task is immortal, so we don't need to worry
   * about it going away.
27f4d2805   Paul E. McKenney   rcu: priority boo...
1083
   */
1217ed1ba   Paul E. McKenney   rcu: permit rcu_r...
1084
  static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
615e41c60   Pranith Kumar   rcu: Fix a sparse...
1085
  	__releases(rnp->lock)
27f4d2805   Paul E. McKenney   rcu: priority boo...
1086
  {
a32e01ee6   Matthew Wilcox   rcu: Use wrapper ...
1087
  	raw_lockdep_assert_held_rcu_node(rnp);
0ea1f2ebe   Paul E. McKenney   rcu: Add boosting...
1088
  	if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
67c583a7d   Boqun Feng   RCU: Privatize rc...
1089
  		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
27f4d2805   Paul E. McKenney   rcu: priority boo...
1090
  		return;
0ea1f2ebe   Paul E. McKenney   rcu: Add boosting...
1091
  	}
27f4d2805   Paul E. McKenney   rcu: priority boo...
1092
1093
1094
1095
  	if (rnp->exp_tasks != NULL ||
  	    (rnp->gp_tasks != NULL &&
  	     rnp->boost_tasks == NULL &&
  	     rnp->qsmask == 0 &&
7b2413111   Paul E. McKenney   rcu: Convert rcu_...
1096
  	     (!time_after(rnp->boost_time, jiffies) || rcu_state.cbovld))) {
27f4d2805   Paul E. McKenney   rcu: priority boo...
1097
  		if (rnp->exp_tasks == NULL)
5822b8126   Paul E. McKenney   rcu: Add WRITE_ON...
1098
  			WRITE_ONCE(rnp->boost_tasks, rnp->gp_tasks);
67c583a7d   Boqun Feng   RCU: Privatize rc...
1099
  		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
a2badefa8   Paul E. McKenney   rcu: Eliminate re...
1100
  		rcu_wake_cond(rnp->boost_kthread_task,
3ca3b0e2c   Paul E. McKenney   rcu: Add *_ONCE()...
1101
  			      READ_ONCE(rnp->boost_kthread_status));
1217ed1ba   Paul E. McKenney   rcu: permit rcu_r...
1102
  	} else {
67c583a7d   Boqun Feng   RCU: Privatize rc...
1103
  		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1217ed1ba   Paul E. McKenney   rcu: permit rcu_r...
1104
  	}
27f4d2805   Paul E. McKenney   rcu: priority boo...
1105
  }
0f962a5e7   Paul E. McKenney   rcu: Force per-rc...
1106
  /*
dff1672d9   Paul E. McKenney   rcu: Keep invokin...
1107
1108
1109
1110
1111
   * Is the current CPU running the RCU-callbacks kthread?
   * Caller must have preemption disabled.
   */
  static bool rcu_is_callbacks_kthread(void)
  {
37f62d7cf   Paul E. McKenney   rcu: Move rcu_cpu...
1112
  	return __this_cpu_read(rcu_data.rcu_cpu_kthread_task) == current;
dff1672d9   Paul E. McKenney   rcu: Keep invokin...
1113
  }
27f4d2805   Paul E. McKenney   rcu: priority boo...
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
  #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
  
  /*
   * Do priority-boost accounting for the start of a new grace period.
   */
  static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
  {
  	rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
  }
  
  /*
27f4d2805   Paul E. McKenney   rcu: priority boo...
1125
1126
1127
1128
   * Create an RCU-boost kthread for the specified node if one does not
   * already exist.  We only create this kthread for preemptible RCU.
   * Returns zero if all is well, a negated errno otherwise.
   */
3545832fc   Byungchul Park   rcu: Change retur...
1129
  static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
27f4d2805   Paul E. McKenney   rcu: priority boo...
1130
  {
6dbfdc140   Paul E. McKenney   rcu: Remove rsp p...
1131
  	int rnp_index = rnp - rcu_get_root();
27f4d2805   Paul E. McKenney   rcu: priority boo...
1132
1133
1134
  	unsigned long flags;
  	struct sched_param sp;
  	struct task_struct *t;
6dbfdc140   Paul E. McKenney   rcu: Remove rsp p...
1135
  	if (!IS_ENABLED(CONFIG_PREEMPT_RCU))
3545832fc   Byungchul Park   rcu: Change retur...
1136
  		return;
5d01bbd11   Thomas Gleixner   rcu: Yield simpler
1137

0aa04b055   Paul E. McKenney   rcu: Process offl...
1138
  	if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
3545832fc   Byungchul Park   rcu: Change retur...
1139
  		return;
5d01bbd11   Thomas Gleixner   rcu: Yield simpler
1140

6dbfdc140   Paul E. McKenney   rcu: Remove rsp p...
1141
  	rcu_state.boost = 1;
3545832fc   Byungchul Park   rcu: Change retur...
1142

27f4d2805   Paul E. McKenney   rcu: priority boo...
1143
  	if (rnp->boost_kthread_task != NULL)
3545832fc   Byungchul Park   rcu: Change retur...
1144
  		return;
27f4d2805   Paul E. McKenney   rcu: priority boo...
1145
  	t = kthread_create(rcu_boost_kthread, (void *)rnp,
5b61b0baa   Mike Galbraith   rcu: Wire up RCU_...
1146
  			   "rcub/%d", rnp_index);
3545832fc   Byungchul Park   rcu: Change retur...
1147
1148
  	if (WARN_ON_ONCE(IS_ERR(t)))
  		return;
2a67e741b   Peter Zijlstra   rcu: Create trans...
1149
  	raw_spin_lock_irqsave_rcu_node(rnp, flags);
27f4d2805   Paul E. McKenney   rcu: priority boo...
1150
  	rnp->boost_kthread_task = t;
67c583a7d   Boqun Feng   RCU: Privatize rc...
1151
  	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
21871d7ef   Clark Williams   rcu: Unify boost ...
1152
  	sp.sched_priority = kthread_prio;
27f4d2805   Paul E. McKenney   rcu: priority boo...
1153
  	sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
9a4327369   Paul E. McKenney   rcu: Simplify cur...
1154
  	wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
27f4d2805   Paul E. McKenney   rcu: priority boo...
1155
  }
f8b7fc6b5   Paul E. McKenney   rcu: Move RCU_BOO...
1156
1157
1158
1159
1160
1161
1162
1163
1164
  /*
   * Set the per-rcu_node kthread's affinity to cover all CPUs that are
   * served by the rcu_node in question.  The CPU hotplug lock is still
   * held, so the value of rnp->qsmaskinit will be stable.
   *
   * We don't include outgoingcpu in the affinity set, use -1 if there is
   * no outgoing CPU.  If there are no CPUs left in the affinity set,
   * this function allows the kthread to execute on any CPU.
   */
5d01bbd11   Thomas Gleixner   rcu: Yield simpler
1165
  static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
f8b7fc6b5   Paul E. McKenney   rcu: Move RCU_BOO...
1166
  {
5d01bbd11   Thomas Gleixner   rcu: Yield simpler
1167
  	struct task_struct *t = rnp->boost_kthread_task;
0aa04b055   Paul E. McKenney   rcu: Process offl...
1168
  	unsigned long mask = rcu_rnp_online_cpus(rnp);
f8b7fc6b5   Paul E. McKenney   rcu: Move RCU_BOO...
1169
1170
  	cpumask_var_t cm;
  	int cpu;
f8b7fc6b5   Paul E. McKenney   rcu: Move RCU_BOO...
1171

5d01bbd11   Thomas Gleixner   rcu: Yield simpler
1172
  	if (!t)
f8b7fc6b5   Paul E. McKenney   rcu: Move RCU_BOO...
1173
  		return;
5d01bbd11   Thomas Gleixner   rcu: Yield simpler
1174
  	if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
f8b7fc6b5   Paul E. McKenney   rcu: Move RCU_BOO...
1175
  		return;
bc75e9998   Mark Rutland   rcu: Correctly ha...
1176
1177
1178
  	for_each_leaf_node_possible_cpu(rnp, cpu)
  		if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
  		    cpu != outgoingcpu)
f8b7fc6b5   Paul E. McKenney   rcu: Move RCU_BOO...
1179
  			cpumask_set_cpu(cpu, cm);
5d0b02497   Paul E. McKenney   rcu: Don't bother...
1180
  	if (cpumask_weight(cm) == 0)
f8b7fc6b5   Paul E. McKenney   rcu: Move RCU_BOO...
1181
  		cpumask_setall(cm);
5d01bbd11   Thomas Gleixner   rcu: Yield simpler
1182
  	set_cpus_allowed_ptr(t, cm);
f8b7fc6b5   Paul E. McKenney   rcu: Move RCU_BOO...
1183
1184
  	free_cpumask_var(cm);
  }
f8b7fc6b5   Paul E. McKenney   rcu: Move RCU_BOO...
1185
  /*
9386c0b75   Paul E. McKenney   rcu: Rationalize ...
1186
   * Spawn boost kthreads -- called as soon as the scheduler is running.
f8b7fc6b5   Paul E. McKenney   rcu: Move RCU_BOO...
1187
   */
9386c0b75   Paul E. McKenney   rcu: Rationalize ...
1188
  static void __init rcu_spawn_boost_kthreads(void)
f8b7fc6b5   Paul E. McKenney   rcu: Move RCU_BOO...
1189
  {
f8b7fc6b5   Paul E. McKenney   rcu: Move RCU_BOO...
1190
  	struct rcu_node *rnp;
aedf4ba98   Paul E. McKenney   rcu: Remove rsp p...
1191
  	rcu_for_each_leaf_node(rnp)
3545832fc   Byungchul Park   rcu: Change retur...
1192
  		rcu_spawn_one_boost_kthread(rnp);
f8b7fc6b5   Paul E. McKenney   rcu: Move RCU_BOO...
1193
  }
f8b7fc6b5   Paul E. McKenney   rcu: Move RCU_BOO...
1194

49fb4c629   Paul Gortmaker   rcu: delete __cpu...
1195
  static void rcu_prepare_kthreads(int cpu)
f8b7fc6b5   Paul E. McKenney   rcu: Move RCU_BOO...
1196
  {
da1df50d1   Paul E. McKenney   rcu: Remove rcu_s...
1197
  	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
f8b7fc6b5   Paul E. McKenney   rcu: Move RCU_BOO...
1198
1199
1200
  	struct rcu_node *rnp = rdp->mynode;
  
  	/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
62ab70724   Paul E. McKenney   rcu: Use smp_hotp...
1201
  	if (rcu_scheduler_fully_active)
3545832fc   Byungchul Park   rcu: Change retur...
1202
  		rcu_spawn_one_boost_kthread(rnp);
f8b7fc6b5   Paul E. McKenney   rcu: Move RCU_BOO...
1203
  }
27f4d2805   Paul E. McKenney   rcu: priority boo...
1204
  #else /* #ifdef CONFIG_RCU_BOOST */
1217ed1ba   Paul E. McKenney   rcu: permit rcu_r...
1205
  static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
615e41c60   Pranith Kumar   rcu: Fix a sparse...
1206
  	__releases(rnp->lock)
27f4d2805   Paul E. McKenney   rcu: priority boo...
1207
  {
67c583a7d   Boqun Feng   RCU: Privatize rc...
1208
  	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
27f4d2805   Paul E. McKenney   rcu: priority boo...
1209
  }
dff1672d9   Paul E. McKenney   rcu: Keep invokin...
1210
1211
1212
1213
  static bool rcu_is_callbacks_kthread(void)
  {
  	return false;
  }
27f4d2805   Paul E. McKenney   rcu: priority boo...
1214
1215
1216
  static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
  {
  }
5d01bbd11   Thomas Gleixner   rcu: Yield simpler
1217
  static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
f8b7fc6b5   Paul E. McKenney   rcu: Move RCU_BOO...
1218
1219
  {
  }
9386c0b75   Paul E. McKenney   rcu: Rationalize ...
1220
  static void __init rcu_spawn_boost_kthreads(void)
b0d304172   Paul E. McKenney   rcu: Prevent RCU ...
1221
  {
b0d304172   Paul E. McKenney   rcu: Prevent RCU ...
1222
  }
b0d304172   Paul E. McKenney   rcu: Prevent RCU ...
1223

49fb4c629   Paul Gortmaker   rcu: delete __cpu...
1224
  static void rcu_prepare_kthreads(int cpu)
f8b7fc6b5   Paul E. McKenney   rcu: Move RCU_BOO...
1225
1226
  {
  }
27f4d2805   Paul E. McKenney   rcu: priority boo...
1227
  #endif /* #else #ifdef CONFIG_RCU_BOOST */
8bd93a2c5   Paul E. McKenney   rcu: Accelerate g...
1228
1229
1230
  #if !defined(CONFIG_RCU_FAST_NO_HZ)
  
  /*
0bd55c693   Paul E. McKenney   rcu/nohz: Turn of...
1231
1232
1233
1234
   * Check to see if any future non-offloaded RCU-related work will need
   * to be done by the current CPU, even if none need be done immediately,
   * returning 1 if so.  This function is part of the RCU implementation;
   * it is -not- an exported member of the RCU API.
8bd93a2c5   Paul E. McKenney   rcu: Accelerate g...
1235
   *
0ae86a272   Paul E. McKenney   rcu: Clean up fla...
1236
1237
   * Because we not have RCU_FAST_NO_HZ, just check whether or not this
   * CPU has RCU callbacks queued.
8bd93a2c5   Paul E. McKenney   rcu: Accelerate g...
1238
   */
c1ad348b4   Thomas Gleixner   tick: Nohz: Rewor...
1239
  int rcu_needs_cpu(u64 basemono, u64 *nextevt)
8bd93a2c5   Paul E. McKenney   rcu: Accelerate g...
1240
  {
c1ad348b4   Thomas Gleixner   tick: Nohz: Rewor...
1241
  	*nextevt = KTIME_MAX;
0bd55c693   Paul E. McKenney   rcu/nohz: Turn of...
1242
1243
  	return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
  	       !rcu_segcblist_is_offloaded(&this_cpu_ptr(&rcu_data)->cblist);
7cb924990   Paul E. McKenney   rcu: Permit dynti...
1244
1245
1246
1247
1248
1249
  }
  
  /*
   * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
   * after it.
   */
8fa7845df   Paul E. McKenney   rcu: Remove "cpu"...
1250
  static void rcu_cleanup_after_idle(void)
7cb924990   Paul E. McKenney   rcu: Permit dynti...
1251
1252
1253
1254
  {
  }
  
  /*
a858af287   Paul E. McKenney   rcu: Print schedu...
1255
   * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
aea1b35e2   Paul E. McKenney   rcu: Allow dyntic...
1256
1257
   * is nothing.
   */
198bbf812   Paul E. McKenney   rcu: Remove "cpu"...
1258
  static void rcu_prepare_for_idle(void)
aea1b35e2   Paul E. McKenney   rcu: Allow dyntic...
1259
1260
  {
  }
8bd93a2c5   Paul E. McKenney   rcu: Accelerate g...
1261
  #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
f23f7fa1c   Paul E. McKenney   rcu: Adaptive dyn...
1262
1263
1264
  /*
   * This code is invoked when a CPU goes idle, at which point we want
   * to have the CPU do everything required for RCU so that it can enter
77a40f970   Joel Fernandes (Google)   rcu: Remove kfree...
1265
   * the energy-efficient dyntick-idle mode.
f23f7fa1c   Paul E. McKenney   rcu: Adaptive dyn...
1266
   *
77a40f970   Joel Fernandes (Google)   rcu: Remove kfree...
1267
   * The following preprocessor symbol controls this:
f23f7fa1c   Paul E. McKenney   rcu: Adaptive dyn...
1268
   *
f23f7fa1c   Paul E. McKenney   rcu: Adaptive dyn...
1269
1270
1271
1272
1273
1274
1275
1276
   * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
   *	to sleep in dyntick-idle mode with RCU callbacks pending.  This
   *	is sized to be roughly one RCU grace period.  Those energy-efficiency
   *	benchmarkers who might otherwise be tempted to set this to a large
   *	number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
   *	system.  And if you are -that- concerned about energy efficiency,
   *	just power the system down and be done with it!
   *
77a40f970   Joel Fernandes (Google)   rcu: Remove kfree...
1277
   * The value below works well in practice.  If future workloads require
f23f7fa1c   Paul E. McKenney   rcu: Adaptive dyn...
1278
1279
1280
   * adjustment, they can be converted into kernel config parameters, though
   * making the state machine smarter might be a better option.
   */
e84c48ae3   Paul E. McKenney   rcu: Round FAST_N...
1281
  #define RCU_IDLE_GP_DELAY 4		/* Roughly one grace period. */
f23f7fa1c   Paul E. McKenney   rcu: Adaptive dyn...
1282

5e44ce35a   Paul E. McKenney   rcu: Export RCU_F...
1283
1284
  static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY;
  module_param(rcu_idle_gp_delay, int, 0644);
486e25934   Paul E. McKenney   rcu: Avoid waking...
1285

486e25934   Paul E. McKenney   rcu: Avoid waking...
1286
  /*
0ae86a272   Paul E. McKenney   rcu: Clean up fla...
1287
1288
1289
   * Try to advance callbacks on the current CPU, but only if it has been
   * awhile since the last time we did so.  Afterwards, if there are any
   * callbacks ready for immediate invocation, return true.
486e25934   Paul E. McKenney   rcu: Avoid waking...
1290
   */
f1f399d12   Paul E. McKenney   rcu: Optimize RCU...
1291
  static bool __maybe_unused rcu_try_advance_all_cbs(void)
486e25934   Paul E. McKenney   rcu: Avoid waking...
1292
  {
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
1293
  	bool cbs_ready = false;
5998a75ad   Paul E. McKenney   rcu: Switch last ...
1294
  	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
1295
  	struct rcu_node *rnp;
486e25934   Paul E. McKenney   rcu: Avoid waking...
1296

c229828ca   Paul E. McKenney   rcu: Throttle rcu...
1297
  	/* Exit early if we advanced recently. */
5998a75ad   Paul E. McKenney   rcu: Switch last ...
1298
  	if (jiffies == rdp->last_advance_all)
d0bc90fd3   Pranith Kumar   rcu: Return bool ...
1299
  		return false;
5998a75ad   Paul E. McKenney   rcu: Switch last ...
1300
  	rdp->last_advance_all = jiffies;
c229828ca   Paul E. McKenney   rcu: Throttle rcu...
1301

b97d23c51   Paul E. McKenney   rcu: Remove for_e...
1302
  	rnp = rdp->mynode;
486e25934   Paul E. McKenney   rcu: Avoid waking...
1303

b97d23c51   Paul E. McKenney   rcu: Remove for_e...
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
  	/*
  	 * Don't bother checking unless a grace period has
  	 * completed since we last checked and there are
  	 * callbacks not yet ready to invoke.
  	 */
  	if ((rcu_seq_completed_gp(rdp->gp_seq,
  				  rcu_seq_current(&rnp->gp_seq)) ||
  	     unlikely(READ_ONCE(rdp->gpwrap))) &&
  	    rcu_segcblist_pend_cbs(&rdp->cblist))
  		note_gp_changes(rdp);
  
  	if (rcu_segcblist_ready_cbs(&rdp->cblist))
  		cbs_ready = true;
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
1317
  	return cbs_ready;
486e25934   Paul E. McKenney   rcu: Avoid waking...
1318
1319
1320
  }
  
  /*
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
1321
1322
   * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
   * to invoke.  If the CPU has callbacks, try to advance them.  Tell the
77a40f970   Joel Fernandes (Google)   rcu: Remove kfree...
1323
   * caller about what to set the timeout.
aa9b16306   Paul E. McKenney   rcu: Precompute R...
1324
   *
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
1325
   * The caller must have disabled interrupts.
aa9b16306   Paul E. McKenney   rcu: Precompute R...
1326
   */
c1ad348b4   Thomas Gleixner   tick: Nohz: Rewor...
1327
  int rcu_needs_cpu(u64 basemono, u64 *nextevt)
aa9b16306   Paul E. McKenney   rcu: Precompute R...
1328
  {
5998a75ad   Paul E. McKenney   rcu: Switch last ...
1329
  	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
c1ad348b4   Thomas Gleixner   tick: Nohz: Rewor...
1330
  	unsigned long dj;
aa9b16306   Paul E. McKenney   rcu: Precompute R...
1331

b04db8e19   Frederic Weisbecker   rcu: Use lockdep ...
1332
  	lockdep_assert_irqs_disabled();
3382adbc1   Paul E. McKenney   rcu: Eliminate a ...
1333

0bd55c693   Paul E. McKenney   rcu/nohz: Turn of...
1334
1335
1336
  	/* If no non-offloaded callbacks, RCU doesn't need the CPU. */
  	if (rcu_segcblist_empty(&rdp->cblist) ||
  	    rcu_segcblist_is_offloaded(&this_cpu_ptr(&rcu_data)->cblist)) {
c1ad348b4   Thomas Gleixner   tick: Nohz: Rewor...
1337
  		*nextevt = KTIME_MAX;
aa9b16306   Paul E. McKenney   rcu: Precompute R...
1338
1339
  		return 0;
  	}
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
1340
1341
1342
1343
1344
  
  	/* Attempt to advance callbacks. */
  	if (rcu_try_advance_all_cbs()) {
  		/* Some ready to invoke, so initiate later invocation. */
  		invoke_rcu_core();
aa9b16306   Paul E. McKenney   rcu: Precompute R...
1345
1346
  		return 1;
  	}
5998a75ad   Paul E. McKenney   rcu: Switch last ...
1347
  	rdp->last_accelerate = jiffies;
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
1348

77a40f970   Joel Fernandes (Google)   rcu: Remove kfree...
1349
1350
  	/* Request timer and round. */
  	dj = round_up(rcu_idle_gp_delay + jiffies, rcu_idle_gp_delay) - jiffies;
c1ad348b4   Thomas Gleixner   tick: Nohz: Rewor...
1351
  	*nextevt = basemono + dj * TICK_NSEC;
aa9b16306   Paul E. McKenney   rcu: Precompute R...
1352
1353
1354
1355
  	return 0;
  }
  
  /*
77a40f970   Joel Fernandes (Google)   rcu: Remove kfree...
1356
1357
1358
1359
   * Prepare a CPU for idle from an RCU perspective.  The first major task is to
   * sense whether nohz mode has been enabled or disabled via sysfs.  The second
   * major task is to accelerate (that is, assign grace-period numbers to) any
   * recently arrived callbacks.
aea1b35e2   Paul E. McKenney   rcu: Allow dyntic...
1360
1361
   *
   * The caller must have disabled interrupts.
8bd93a2c5   Paul E. McKenney   rcu: Accelerate g...
1362
   */
198bbf812   Paul E. McKenney   rcu: Remove "cpu"...
1363
  static void rcu_prepare_for_idle(void)
8bd93a2c5   Paul E. McKenney   rcu: Accelerate g...
1364
  {
48a7639ce   Paul E. McKenney   rcu: Make callers...
1365
  	bool needwake;
0fd79e752   Paul E. McKenney   rcu: Switch ->tic...
1366
  	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
1367
  	struct rcu_node *rnp;
9d2ad2430   Paul E. McKenney   rcu: Make RCU_FAS...
1368
  	int tne;
b04db8e19   Frederic Weisbecker   rcu: Use lockdep ...
1369
  	lockdep_assert_irqs_disabled();
ce5215c13   Paul E. McKenney   rcu/nocb: Use sep...
1370
  	if (rcu_segcblist_is_offloaded(&rdp->cblist))
3382adbc1   Paul E. McKenney   rcu: Eliminate a ...
1371
  		return;
9d2ad2430   Paul E. McKenney   rcu: Make RCU_FAS...
1372
  	/* Handle nohz enablement switches conservatively. */
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
1373
  	tne = READ_ONCE(tick_nohz_active);
0fd79e752   Paul E. McKenney   rcu: Switch ->tic...
1374
  	if (tne != rdp->tick_nohz_enabled_snap) {
260e1e4fd   Paul E. McKenney   rcu: Discard sepa...
1375
  		if (!rcu_segcblist_empty(&rdp->cblist))
9d2ad2430   Paul E. McKenney   rcu: Make RCU_FAS...
1376
  			invoke_rcu_core(); /* force nohz to see update. */
0fd79e752   Paul E. McKenney   rcu: Switch ->tic...
1377
  		rdp->tick_nohz_enabled_snap = tne;
9d2ad2430   Paul E. McKenney   rcu: Make RCU_FAS...
1378
1379
1380
1381
  		return;
  	}
  	if (!tne)
  		return;
f511fc624   Paul E. McKenney   rcu: Ensure that ...
1382

3084f2f80   Paul E. McKenney   rcu: Go dyntick-i...
1383
  	/*
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
1384
1385
  	 * If we have not yet accelerated this jiffy, accelerate all
  	 * callbacks on this CPU.
3084f2f80   Paul E. McKenney   rcu: Go dyntick-i...
1386
  	 */
5998a75ad   Paul E. McKenney   rcu: Switch last ...
1387
  	if (rdp->last_accelerate == jiffies)
aea1b35e2   Paul E. McKenney   rcu: Allow dyntic...
1388
  		return;
5998a75ad   Paul E. McKenney   rcu: Switch last ...
1389
  	rdp->last_accelerate = jiffies;
b97d23c51   Paul E. McKenney   rcu: Remove for_e...
1390
  	if (rcu_segcblist_pend_cbs(&rdp->cblist)) {
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
1391
  		rnp = rdp->mynode;
2a67e741b   Peter Zijlstra   rcu: Create trans...
1392
  		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
02f501423   Paul E. McKenney   rcu: Remove rsp p...
1393
  		needwake = rcu_accelerate_cbs(rnp, rdp);
67c583a7d   Boqun Feng   RCU: Privatize rc...
1394
  		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
48a7639ce   Paul E. McKenney   rcu: Make callers...
1395
  		if (needwake)
532c00c97   Paul E. McKenney   rcu: Remove rsp p...
1396
  			rcu_gp_kthread_wake();
77e38ed34   Paul E. McKenney   rcu: RCU_FAST_NO_...
1397
  	}
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
1398
  }
3084f2f80   Paul E. McKenney   rcu: Go dyntick-i...
1399

c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
1400
1401
1402
1403
1404
  /*
   * Clean up for exit from idle.  Attempt to advance callbacks based on
   * any grace periods that elapsed while the CPU was idle, and if any
   * callbacks are now ready to invoke, initiate invocation.
   */
8fa7845df   Paul E. McKenney   rcu: Remove "cpu"...
1405
  static void rcu_cleanup_after_idle(void)
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
1406
  {
ce5215c13   Paul E. McKenney   rcu/nocb: Use sep...
1407
  	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
b04db8e19   Frederic Weisbecker   rcu: Use lockdep ...
1408
  	lockdep_assert_irqs_disabled();
ce5215c13   Paul E. McKenney   rcu/nocb: Use sep...
1409
  	if (rcu_segcblist_is_offloaded(&rdp->cblist))
aea1b35e2   Paul E. McKenney   rcu: Allow dyntic...
1410
  		return;
7a497c963   Paul E. McKenney   rcu: Remove redun...
1411
1412
  	if (rcu_try_advance_all_cbs())
  		invoke_rcu_core();
8bd93a2c5   Paul E. McKenney   rcu: Accelerate g...
1413
1414
1415
  }
  
  #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
a858af287   Paul E. McKenney   rcu: Print schedu...
1416

3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
1417
1418
1419
1420
  #ifdef CONFIG_RCU_NOCB_CPU
  
  /*
   * Offload callback processing from the boot-time-specified set of CPUs
a9fefdb25   Paul E. McKenney   rcu: Update NOCB ...
1421
1422
1423
   * specified by rcu_nocb_mask.  For the CPUs in the set, there are kthreads
   * created that pull the callbacks from the corresponding CPU, wait for
   * a grace period to elapse, and invoke the callbacks.  These kthreads
6484fe54b   Paul E. McKenney   rcu/nocb: Update ...
1424
1425
1426
1427
   * are organized into GP kthreads, which manage incoming callbacks, wait for
   * grace periods, and awaken CB kthreads, and the CB kthreads, which only
   * invoke callbacks.  Each GP kthread invokes its own CBs.  The no-CBs CPUs
   * do a wake_up() on their GP kthread when they insert a callback into any
a9fefdb25   Paul E. McKenney   rcu: Update NOCB ...
1428
1429
1430
   * empty list, unless the rcu_nocb_poll boot parameter has been specified,
   * in which case each kthread actively polls its CPU.  (Which isn't so great
   * for energy efficiency, but which does reduce RCU's overhead on that CPU.)
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
1431
1432
1433
1434
1435
   *
   * This is intended to be used in conjunction with Frederic Weisbecker's
   * adaptive-idle work, which would seriously reduce OS jitter on CPUs
   * running CPU-bound user-mode computations.
   *
a9fefdb25   Paul E. McKenney   rcu: Update NOCB ...
1436
1437
1438
   * Offloading of callbacks can also be used as an energy-efficiency
   * measure because CPUs with no RCU callbacks queued are more aggressive
   * about entering dyntick-idle mode.
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
1439
   */
497e42600   Paul E. McKenney   rcu: Report error...
1440
1441
1442
1443
1444
1445
  /*
   * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
   * The string after the "rcu_nocbs=" is either "all" for all CPUs, or a
   * comma-separated list of CPUs and/or CPU ranges.  If an invalid list is
   * given, a warning is emitted and all CPUs are offloaded.
   */
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
1446
1447
1448
  static int __init rcu_nocb_setup(char *str)
  {
  	alloc_bootmem_cpumask_var(&rcu_nocb_mask);
da8739f23   Paul E. McKenney   rcu: Allow rcu_no...
1449
1450
1451
  	if (!strcasecmp(str, "all"))
  		cpumask_setall(rcu_nocb_mask);
  	else
497e42600   Paul E. McKenney   rcu: Report error...
1452
1453
1454
1455
1456
  		if (cpulist_parse(str, rcu_nocb_mask)) {
  			pr_warn("rcu_nocbs= bad CPU range, all CPUs set
  ");
  			cpumask_setall(rcu_nocb_mask);
  		}
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
1457
1458
1459
  	return 1;
  }
  __setup("rcu_nocbs=", rcu_nocb_setup);
1b0048a44   Paul Gortmaker   rcu: Make rcu_noc...
1460
1461
  static int __init parse_rcu_nocb_poll(char *arg)
  {
5455a7f6a   Nicholas Mc Guire   rcu: Use true/fal...
1462
  	rcu_nocb_poll = true;
1b0048a44   Paul Gortmaker   rcu: Make rcu_noc...
1463
1464
1465
  	return 0;
  }
  early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
34ed62461   Paul E. McKenney   rcu: Remove restr...
1466
  /*
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1467
1468
1469
   * Don't bother bypassing ->cblist if the call_rcu() rate is low.
   * After all, the main point of bypassing is to avoid lock contention
   * on ->nocb_lock, which only can happen at high call_rcu() rates.
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1470
   */
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1471
1472
1473
1474
1475
1476
1477
1478
1479
  int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ;
  module_param(nocb_nobypass_lim_per_jiffy, int, 0);
  
  /*
   * Acquire the specified rcu_data structure's ->nocb_bypass_lock.  If the
   * lock isn't immediately available, increment ->nocb_lock_contended to
   * flag the contention.
   */
  static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
9ced45480   Jules Irenge   rcu: Add missing ...
1480
  	__acquires(&rdp->nocb_bypass_lock)
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1481
  {
81c0b3d72   Paul E. McKenney   rcu/nocb: Avoid -...
1482
  	lockdep_assert_irqs_disabled();
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1483
  	if (raw_spin_trylock(&rdp->nocb_bypass_lock))
81c0b3d72   Paul E. McKenney   rcu/nocb: Avoid -...
1484
1485
  		return;
  	atomic_inc(&rdp->nocb_lock_contended);
6aacd88d1   Paul E. McKenney   rcu/nocb: EXP Che...
1486
  	WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
81c0b3d72   Paul E. McKenney   rcu/nocb: Avoid -...
1487
  	smp_mb__after_atomic(); /* atomic_inc() before lock. */
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1488
  	raw_spin_lock(&rdp->nocb_bypass_lock);
81c0b3d72   Paul E. McKenney   rcu/nocb: Avoid -...
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
  	smp_mb__before_atomic(); /* atomic_dec() after lock. */
  	atomic_dec(&rdp->nocb_lock_contended);
  }
  
  /*
   * Spinwait until the specified rcu_data structure's ->nocb_lock is
   * not contended.  Please note that this is extremely special-purpose,
   * relying on the fact that at most two kthreads and one CPU contend for
   * this lock, and also that the two kthreads are guaranteed to have frequent
   * grace-period-duration time intervals between successive acquisitions
   * of the lock.  This allows us to use an extremely simple throttling
   * mechanism, and further to apply it only to the CPU doing floods of
   * call_rcu() invocations.  Don't try this at home!
   */
  static void rcu_nocb_wait_contended(struct rcu_data *rdp)
  {
6aacd88d1   Paul E. McKenney   rcu/nocb: EXP Che...
1505
1506
  	WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
  	while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended)))
81c0b3d72   Paul E. McKenney   rcu/nocb: Avoid -...
1507
  		cpu_relax();
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1508
1509
1510
  }
  
  /*
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
   * Conditionally acquire the specified rcu_data structure's
   * ->nocb_bypass_lock.
   */
  static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp)
  {
  	lockdep_assert_irqs_disabled();
  	return raw_spin_trylock(&rdp->nocb_bypass_lock);
  }
  
  /*
   * Release the specified rcu_data structure's ->nocb_bypass_lock.
   */
  static void rcu_nocb_bypass_unlock(struct rcu_data *rdp)
92c0b889f   Jules Irenge   rcu/nocb: Add mis...
1524
  	__releases(&rdp->nocb_bypass_lock)
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
  {
  	lockdep_assert_irqs_disabled();
  	raw_spin_unlock(&rdp->nocb_bypass_lock);
  }
  
  /*
   * Acquire the specified rcu_data structure's ->nocb_lock, but only
   * if it corresponds to a no-CBs CPU.
   */
  static void rcu_nocb_lock(struct rcu_data *rdp)
  {
  	lockdep_assert_irqs_disabled();
  	if (!rcu_segcblist_is_offloaded(&rdp->cblist))
  		return;
  	raw_spin_lock(&rdp->nocb_lock);
  }
  
  /*
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
   * Release the specified rcu_data structure's ->nocb_lock, but only
   * if it corresponds to a no-CBs CPU.
   */
  static void rcu_nocb_unlock(struct rcu_data *rdp)
  {
  	if (rcu_segcblist_is_offloaded(&rdp->cblist)) {
  		lockdep_assert_irqs_disabled();
  		raw_spin_unlock(&rdp->nocb_lock);
  	}
  }
  
  /*
   * Release the specified rcu_data structure's ->nocb_lock and restore
   * interrupts, but only if it corresponds to a no-CBs CPU.
   */
  static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
  				       unsigned long flags)
  {
  	if (rcu_segcblist_is_offloaded(&rdp->cblist)) {
  		lockdep_assert_irqs_disabled();
  		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
  	} else {
  		local_irq_restore(flags);
  	}
  }
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1568
1569
1570
1571
  /* Lockdep check that ->cblist may be safely accessed. */
  static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
  {
  	lockdep_assert_irqs_disabled();
13817dd58   Paul E. McKenney   rcu: Tighten rcu_...
1572
  	if (rcu_segcblist_is_offloaded(&rdp->cblist))
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1573
1574
  		lockdep_assert_held(&rdp->nocb_lock);
  }
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1575
  /*
0446be489   Paul E. McKenney   rcu: Abstract rcu...
1576
1577
   * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
   * grace period.
dae6e64d2   Paul E. McKenney   rcu: Introduce pr...
1578
   */
abedf8e24   Paul Gortmaker   rcu: Use simple w...
1579
  static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
dae6e64d2   Paul E. McKenney   rcu: Introduce pr...
1580
  {
abedf8e24   Paul Gortmaker   rcu: Use simple w...
1581
  	swake_up_all(sq);
dae6e64d2   Paul E. McKenney   rcu: Introduce pr...
1582
  }
abedf8e24   Paul Gortmaker   rcu: Use simple w...
1583
  static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
065bb78c5   Daniel Wagner   rcu: Do not call ...
1584
  {
e0da2374c   Paul E. McKenney   rcu: Move rcu_noc...
1585
  	return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
065bb78c5   Daniel Wagner   rcu: Do not call ...
1586
  }
dae6e64d2   Paul E. McKenney   rcu: Introduce pr...
1587
  static void rcu_init_one_nocb(struct rcu_node *rnp)
34ed62461   Paul E. McKenney   rcu: Remove restr...
1588
  {
abedf8e24   Paul Gortmaker   rcu: Use simple w...
1589
1590
  	init_swait_queue_head(&rnp->nocb_gp_wq[0]);
  	init_swait_queue_head(&rnp->nocb_gp_wq[1]);
34ed62461   Paul E. McKenney   rcu: Remove restr...
1591
  }
24342c963   Liu Ping Fan   rcu: Fix incorrec...
1592
  /* Is the specified CPU a no-CBs CPU? */
d1e43fa5f   Frederic Weisbecker   nohz: Ensure full...
1593
  bool rcu_is_nocb_cpu(int cpu)
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
1594
  {
84b12b752   Rakib Mullick   rcu: Remove have_...
1595
  	if (cpumask_available(rcu_nocb_mask))
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
1596
1597
1598
1599
1600
  		return cpumask_test_cpu(cpu, rcu_nocb_mask);
  	return false;
  }
  
  /*
6484fe54b   Paul E. McKenney   rcu/nocb: Update ...
1601
   * Kick the GP kthread for this NOCB group.  Caller holds ->nocb_lock
8be6e1b15   Paul E. McKenney   rcu: Use timer as...
1602
   * and this function releases it.
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
1603
   */
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1604
  static void wake_nocb_gp(struct rcu_data *rdp, bool force,
5f675ba6e   Paul E. McKenney   rcu/nocb: Rename ...
1605
  			   unsigned long flags)
8be6e1b15   Paul E. McKenney   rcu: Use timer as...
1606
  	__releases(rdp->nocb_lock)
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
1607
  {
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1608
  	bool needwake = false;
5f675ba6e   Paul E. McKenney   rcu/nocb: Rename ...
1609
  	struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
1610

8be6e1b15   Paul E. McKenney   rcu: Use timer as...
1611
  	lockdep_assert_held(&rdp->nocb_lock);
5f675ba6e   Paul E. McKenney   rcu/nocb: Rename ...
1612
  	if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1613
1614
  		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
  				    TPS("AlreadyAwake"));
81c0b3d72   Paul E. McKenney   rcu/nocb: Avoid -...
1615
  		rcu_nocb_unlock_irqrestore(rdp, flags);
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
1616
  		return;
8be6e1b15   Paul E. McKenney   rcu: Use timer as...
1617
  	}
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1618
1619
1620
1621
  	del_timer(&rdp->nocb_timer);
  	rcu_nocb_unlock_irqrestore(rdp, flags);
  	raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
  	if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1622
  		WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1623
1624
  		needwake = true;
  		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
1625
  	}
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1626
1627
1628
  	raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
  	if (needwake)
  		wake_up_process(rdp_gp->nocb_gp_kthread);
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
1629
1630
1631
  }
  
  /*
6484fe54b   Paul E. McKenney   rcu/nocb: Update ...
1632
1633
   * Arrange to wake the GP kthread for this NOCB group at some future
   * time when it is safe to do so.
8be6e1b15   Paul E. McKenney   rcu: Use timer as...
1634
   */
0d52a6652   Paul E. McKenney   rcu/nocb: Rename ...
1635
1636
  static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
  			       const char *reason)
8be6e1b15   Paul E. McKenney   rcu: Use timer as...
1637
  {
8be6e1b15   Paul E. McKenney   rcu: Use timer as...
1638
1639
  	if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT)
  		mod_timer(&rdp->nocb_timer, jiffies + 1);
383e13328   Paul E. McKenney   rcu/nocb: Never d...
1640
1641
  	if (rdp->nocb_defer_wakeup < waketype)
  		WRITE_ONCE(rdp->nocb_defer_wakeup, waketype);
88d1bead8   Paul E. McKenney   rcu: Remove rcu_d...
1642
  	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
d7e299339   Paul E. McKenney   rcu: Make rcu_bar...
1643
1644
1645
  }
  
  /*
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
   * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
   * However, if there is a callback to be enqueued and if ->nocb_bypass
   * proves to be initially empty, just return false because the no-CB GP
   * kthread may need to be awakened in this case.
   *
   * Note that this function always returns true if rhp is NULL.
   */
  static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
  				     unsigned long j)
  {
  	struct rcu_cblist rcl;
  
  	WARN_ON_ONCE(!rcu_segcblist_is_offloaded(&rdp->cblist));
  	rcu_lockdep_assert_cblist_protected(rdp);
  	lockdep_assert_held(&rdp->nocb_bypass_lock);
  	if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) {
  		raw_spin_unlock(&rdp->nocb_bypass_lock);
  		return false;
  	}
  	/* Note: ->cblist.len already accounts for ->nocb_bypass contents. */
  	if (rhp)
  		rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
  	rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
  	rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
  	WRITE_ONCE(rdp->nocb_bypass_first, j);
  	rcu_nocb_bypass_unlock(rdp);
  	return true;
  }
  
  /*
   * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
   * However, if there is a callback to be enqueued and if ->nocb_bypass
   * proves to be initially empty, just return false because the no-CB GP
   * kthread may need to be awakened in this case.
   *
   * Note that this function always returns true if rhp is NULL.
   */
  static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
  				  unsigned long j)
  {
  	if (!rcu_segcblist_is_offloaded(&rdp->cblist))
  		return true;
  	rcu_lockdep_assert_cblist_protected(rdp);
  	rcu_nocb_bypass_lock(rdp);
  	return rcu_nocb_do_flush_bypass(rdp, rhp, j);
  }
  
  /*
   * If the ->nocb_bypass_lock is immediately available, flush the
   * ->nocb_bypass queue into ->cblist.
   */
  static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j)
  {
  	rcu_lockdep_assert_cblist_protected(rdp);
  	if (!rcu_segcblist_is_offloaded(&rdp->cblist) ||
  	    !rcu_nocb_bypass_trylock(rdp))
  		return;
  	WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j));
  }
  
  /*
   * See whether it is appropriate to use the ->nocb_bypass list in order
   * to control contention on ->nocb_lock.  A limited number of direct
   * enqueues are permitted into ->cblist per jiffy.  If ->nocb_bypass
   * is non-empty, further callbacks must be placed into ->nocb_bypass,
   * otherwise rcu_barrier() breaks.  Use rcu_nocb_flush_bypass() to switch
   * back to direct use of ->cblist.  However, ->nocb_bypass should not be
   * used if ->cblist is empty, because otherwise callbacks can be stranded
   * on ->nocb_bypass because we cannot count on the current CPU ever again
   * invoking call_rcu().  The general rule is that if ->nocb_bypass is
   * non-empty, the corresponding no-CBs grace-period kthread must not be
   * in an indefinite sleep state.
   *
   * Finally, it is not permitted to use the bypass during early boot,
   * as doing so would confuse the auto-initialization code.  Besides
   * which, there is no point in worrying about lock contention while
   * there is only one CPU in operation.
   */
  static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
  				bool *was_alldone, unsigned long flags)
  {
  	unsigned long c;
  	unsigned long cur_gp_seq;
  	unsigned long j = jiffies;
  	long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
  
  	if (!rcu_segcblist_is_offloaded(&rdp->cblist)) {
  		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
  		return false; /* Not offloaded, no bypassing. */
  	}
  	lockdep_assert_irqs_disabled();
  
  	// Don't use ->nocb_bypass during early boot.
  	if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
  		rcu_nocb_lock(rdp);
  		WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
  		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
  		return false;
  	}
  
  	// If we have advanced to a new jiffy, reset counts to allow
  	// moving back from ->nocb_bypass to ->cblist.
  	if (j == rdp->nocb_nobypass_last) {
  		c = rdp->nocb_nobypass_count + 1;
  	} else {
  		WRITE_ONCE(rdp->nocb_nobypass_last, j);
  		c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
  		if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
  				 nocb_nobypass_lim_per_jiffy))
  			c = 0;
  		else if (c > nocb_nobypass_lim_per_jiffy)
  			c = nocb_nobypass_lim_per_jiffy;
  	}
  	WRITE_ONCE(rdp->nocb_nobypass_count, c);
  
  	// If there hasn't yet been all that many ->cblist enqueues
  	// this jiffy, tell the caller to enqueue onto ->cblist.  But flush
  	// ->nocb_bypass first.
  	if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy) {
  		rcu_nocb_lock(rdp);
  		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
  		if (*was_alldone)
  			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
  					    TPS("FirstQ"));
  		WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j));
  		WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
  		return false; // Caller must enqueue the callback.
  	}
  
  	// If ->nocb_bypass has been used too long or is too full,
  	// flush ->nocb_bypass to ->cblist.
  	if ((ncbs && j != READ_ONCE(rdp->nocb_bypass_first)) ||
  	    ncbs >= qhimark) {
  		rcu_nocb_lock(rdp);
  		if (!rcu_nocb_flush_bypass(rdp, rhp, j)) {
  			*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
  			if (*was_alldone)
  				trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
  						    TPS("FirstQ"));
  			WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
  			return false; // Caller must enqueue the callback.
  		}
  		if (j != rdp->nocb_gp_adv_time &&
  		    rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
  		    rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
  			rcu_advance_cbs_nowake(rdp->mynode, rdp);
  			rdp->nocb_gp_adv_time = j;
  		}
  		rcu_nocb_unlock_irqrestore(rdp, flags);
  		return true; // Callback already enqueued.
  	}
  
  	// We need to use the bypass.
  	rcu_nocb_wait_contended(rdp);
  	rcu_nocb_bypass_lock(rdp);
  	ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
  	rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
  	rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
  	if (!ncbs) {
  		WRITE_ONCE(rdp->nocb_bypass_first, j);
  		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ"));
  	}
  	rcu_nocb_bypass_unlock(rdp);
  	smp_mb(); /* Order enqueue before wake. */
  	if (ncbs) {
  		local_irq_restore(flags);
  	} else {
  		// No-CBs GP kthread might be indefinitely asleep, if so, wake.
  		rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
  		if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
  			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
  					    TPS("FirstBQwake"));
  			__call_rcu_nocb_wake(rdp, true, flags);
  		} else {
  			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
  					    TPS("FirstBQnoWake"));
  			rcu_nocb_unlock_irqrestore(rdp, flags);
  		}
  	}
  	return true; // Callback already enqueued.
  }
  
  /*
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1829
1830
   * Awaken the no-CBs grace-period kthead if needed, either due to it
   * legitimately being asleep or due to overload conditions.
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
1831
1832
1833
   *
   * If warranted, also wake up the kthread servicing this CPUs queues.
   */
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1834
1835
1836
  static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
  				 unsigned long flags)
  				 __releases(rdp->nocb_lock)
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
1837
  {
296181d78   Paul E. McKenney   rcu/nocb: Reduce ...
1838
1839
  	unsigned long cur_gp_seq;
  	unsigned long j;
ce0a825e4   Paul E. McKenney   rcu/nocb: Make __...
1840
  	long len;
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
1841
  	struct task_struct *t;
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1842
  	// If we are being polled or there is no kthread, just leave.
12f54c3a8   Paul E. McKenney   rcu/nocb: Provide...
1843
  	t = READ_ONCE(rdp->nocb_gp_kthread);
25e03a74e   Paul E. McKenney   Merge branch 'gp....
1844
  	if (rcu_nocb_poll || !t) {
88d1bead8   Paul E. McKenney   rcu: Remove rcu_d...
1845
  		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
9261dd0da   Paul E. McKenney   rcu: Add tracing ...
1846
  				    TPS("WakeNotPoll"));
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1847
  		rcu_nocb_unlock_irqrestore(rdp, flags);
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
1848
  		return;
9261dd0da   Paul E. McKenney   rcu: Add tracing ...
1849
  	}
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1850
1851
1852
  	// Need to actually to a wakeup.
  	len = rcu_segcblist_n_cbs(&rdp->cblist);
  	if (was_alldone) {
aeeacd9d8   Paul E. McKenney   rcu/nocb: Enable ...
1853
  		rdp->qlen_last_fqs_check = len;
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
1854
  		if (!irqs_disabled_flags(flags)) {
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
1855
  			/* ... if queue was empty ... */
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1856
  			wake_nocb_gp(rdp, false, flags);
88d1bead8   Paul E. McKenney   rcu: Remove rcu_d...
1857
  			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
1858
1859
  					    TPS("WakeEmpty"));
  		} else {
0d52a6652   Paul E. McKenney   rcu/nocb: Rename ...
1860
1861
  			wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
  					   TPS("WakeEmptyIsDeferred"));
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1862
  			rcu_nocb_unlock_irqrestore(rdp, flags);
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
1863
  		}
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
1864
  	} else if (len > rdp->qlen_last_fqs_check + qhimark) {
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
1865
  		/* ... or if many callbacks queued. */
aeeacd9d8   Paul E. McKenney   rcu/nocb: Enable ...
1866
  		rdp->qlen_last_fqs_check = len;
296181d78   Paul E. McKenney   rcu/nocb: Reduce ...
1867
1868
1869
1870
  		j = jiffies;
  		if (j != rdp->nocb_gp_adv_time &&
  		    rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
  		    rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
faca5c250   Paul E. McKenney   rcu/nocb: Uncondi...
1871
  			rcu_advance_cbs_nowake(rdp->mynode, rdp);
296181d78   Paul E. McKenney   rcu/nocb: Reduce ...
1872
1873
  			rdp->nocb_gp_adv_time = j;
  		}
f48fe4c58   Paul E. McKenney   rcu/nocb: Don't w...
1874
1875
1876
1877
  		smp_mb(); /* Enqueue before timer_pending(). */
  		if ((rdp->nocb_cb_sleep ||
  		     !rcu_segcblist_ready_cbs(&rdp->cblist)) &&
  		    !timer_pending(&rdp->nocb_bypass_timer))
273f03406   Paul E. McKenney   rcu/nocb: Avoid s...
1878
1879
  			wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
  					   TPS("WakeOvfIsDeferred"));
273f03406   Paul E. McKenney   rcu/nocb: Avoid s...
1880
  		rcu_nocb_unlock_irqrestore(rdp, flags);
9261dd0da   Paul E. McKenney   rcu: Add tracing ...
1881
  	} else {
88d1bead8   Paul E. McKenney   rcu: Remove rcu_d...
1882
  		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1883
  		rcu_nocb_unlock_irqrestore(rdp, flags);
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
1884
1885
1886
  	}
  	return;
  }
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1887
1888
1889
1890
1891
1892
1893
1894
  /* Wake up the no-CBs GP kthread to flush ->nocb_bypass. */
  static void do_nocb_bypass_wakeup_timer(struct timer_list *t)
  {
  	unsigned long flags;
  	struct rcu_data *rdp = from_timer(rdp, t, nocb_bypass_timer);
  
  	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
  	rcu_nocb_lock_irqsave(rdp, flags);
f48fe4c58   Paul E. McKenney   rcu/nocb: Don't w...
1895
  	smp_mb__after_spinlock(); /* Timer expire before wakeup. */
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1896
1897
  	__call_rcu_nocb_wake(rdp, true, flags);
  }
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
1898
  /*
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1899
1900
   * No-CBs GP kthreads come here to wait for additional callbacks to show up
   * or for grace periods to end.
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
1901
   */
12f54c3a8   Paul E. McKenney   rcu/nocb: Provide...
1902
  static void nocb_gp_wait(struct rcu_data *my_rdp)
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
1903
  {
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1904
1905
  	bool bypass = false;
  	long bypass_ncbs;
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1906
1907
  	int __maybe_unused cpu = my_rdp->cpu;
  	unsigned long cur_gp_seq;
8be6e1b15   Paul E. McKenney   rcu: Use timer as...
1908
  	unsigned long flags;
b8889c9c8   Dan Carpenter   rcu: Fix uninitia...
1909
  	bool gotcbs = false;
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1910
  	unsigned long j = jiffies;
969974e5c   Paul E. McKenney   rcu/nocb: Suppres...
1911
  	bool needwait_gp = false; // This prevents actual uninitialized use.
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1912
1913
  	bool needwake;
  	bool needwake_gp;
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
1914
  	struct rcu_data *rdp;
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1915
  	struct rcu_node *rnp;
969974e5c   Paul E. McKenney   rcu/nocb: Suppres...
1916
  	unsigned long wait_gp_seq = 0; // Suppress "use uninitialized" warning.
3d05031ae   Paul E. McKenney   rcu: Make nocb_gp...
1917
  	bool wasempty = false;
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
1918
1919
  
  	/*
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1920
1921
1922
  	 * Each pass through the following loop checks for CBs and for the
  	 * nearest grace period (if any) to wait for next.  The CB kthreads
  	 * and the global grace-period kthread are awakened if needed.
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
1923
  	 */
4569c5ee9   Paul E. McKenney   rcu/nocb: Add a w...
1924
  	WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
58bf6f77c   Paul E. McKenney   rcu/nocb: Rename ...
1925
  	for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) {
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
  		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
  		rcu_nocb_lock_irqsave(rdp, flags);
  		bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
  		if (bypass_ncbs &&
  		    (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
  		     bypass_ncbs > 2 * qhimark)) {
  			// Bypass full or old, so flush it.
  			(void)rcu_nocb_try_flush_bypass(rdp, j);
  			bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
  		} else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) {
  			rcu_nocb_unlock_irqrestore(rdp, flags);
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1937
  			continue; /* No callbacks here, try next. */
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1938
1939
1940
1941
1942
1943
  		}
  		if (bypass_ncbs) {
  			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
  					    TPS("Bypass"));
  			bypass = true;
  		}
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1944
  		rnp = rdp->mynode;
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
  		if (bypass) {  // Avoid race with first bypass CB.
  			WRITE_ONCE(my_rdp->nocb_defer_wakeup,
  				   RCU_NOCB_WAKE_NOT);
  			del_timer(&my_rdp->nocb_timer);
  		}
  		// Advance callbacks if helpful and low contention.
  		needwake_gp = false;
  		if (!rcu_segcblist_restempty(&rdp->cblist,
  					     RCU_NEXT_READY_TAIL) ||
  		    (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
  		     rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) {
  			raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
  			needwake_gp = rcu_advance_cbs(rnp, rdp);
3d05031ae   Paul E. McKenney   rcu: Make nocb_gp...
1958
1959
  			wasempty = rcu_segcblist_restempty(&rdp->cblist,
  							   RCU_NEXT_READY_TAIL);
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1960
1961
  			raw_spin_unlock_rcu_node(rnp); /* irqs disabled. */
  		}
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1962
  		// Need to wait on some grace period?
3d05031ae   Paul E. McKenney   rcu: Make nocb_gp...
1963
1964
  		WARN_ON_ONCE(wasempty &&
  			     !rcu_segcblist_restempty(&rdp->cblist,
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1965
  						      RCU_NEXT_READY_TAIL));
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1966
1967
1968
1969
1970
  		if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) {
  			if (!needwait_gp ||
  			    ULONG_CMP_LT(cur_gp_seq, wait_gp_seq))
  				wait_gp_seq = cur_gp_seq;
  			needwait_gp = true;
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1971
1972
  			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
  					    TPS("NeedWaitGP"));
8be6e1b15   Paul E. McKenney   rcu: Use timer as...
1973
  		}
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1974
1975
1976
1977
1978
1979
  		if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
  			needwake = rdp->nocb_cb_sleep;
  			WRITE_ONCE(rdp->nocb_cb_sleep, false);
  			smp_mb(); /* CB invocation -after- GP end. */
  		} else {
  			needwake = false;
8be6e1b15   Paul E. McKenney   rcu: Use timer as...
1980
  		}
81c0b3d72   Paul E. McKenney   rcu/nocb: Avoid -...
1981
  		rcu_nocb_unlock_irqrestore(rdp, flags);
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1982
  		if (needwake) {
12f54c3a8   Paul E. McKenney   rcu/nocb: Provide...
1983
  			swake_up_one(&rdp->nocb_cb_wq);
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1984
  			gotcbs = true;
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
1985
  		}
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1986
1987
1988
  		if (needwake_gp)
  			rcu_gp_kthread_wake();
  	}
f7a81b12d   Paul E. McKenney   rcu/nocb: Print n...
1989
1990
1991
  	my_rdp->nocb_gp_bypass = bypass;
  	my_rdp->nocb_gp_gp = needwait_gp;
  	my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0;
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
1992
1993
1994
1995
1996
1997
1998
  	if (bypass && !rcu_nocb_poll) {
  		// At least one child with non-empty ->nocb_bypass, so set
  		// timer in order to avoid stranding its callbacks.
  		raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
  		mod_timer(&my_rdp->nocb_bypass_timer, j + 2);
  		raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
  	}
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
1999
2000
2001
2002
  	if (rcu_nocb_poll) {
  		/* Polling, so trace if first poll in the series. */
  		if (gotcbs)
  			trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll"));
f5ca34643   Paul E. McKenney   rcu: No-CBs-relat...
2003
  		schedule_timeout_idle(1);
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
2004
2005
2006
2007
2008
  	} else if (!needwait_gp) {
  		/* Wait for callbacks to appear. */
  		trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep"));
  		swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq,
  				!READ_ONCE(my_rdp->nocb_gp_sleep));
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
2009
  		trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep"));
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
  	} else {
  		rnp = my_rdp->mynode;
  		trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("StartWait"));
  		swait_event_interruptible_exclusive(
  			rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1],
  			rcu_seq_done(&rnp->gp_seq, wait_gp_seq) ||
  			!READ_ONCE(my_rdp->nocb_gp_sleep));
  		trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait"));
  	}
  	if (!rcu_nocb_poll) {
4fd8c5f15   Paul E. McKenney   rcu/nocb: Reduce ...
2020
  		raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
2021
2022
  		if (bypass)
  			del_timer(&my_rdp->nocb_bypass_timer);
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
2023
  		WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
4fd8c5f15   Paul E. McKenney   rcu/nocb: Reduce ...
2024
  		raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
2025
  	}
f7a81b12d   Paul E. McKenney   rcu/nocb: Print n...
2026
  	my_rdp->nocb_gp_seq = -1;
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
2027
  	WARN_ON(signal_pending(current));
12f54c3a8   Paul E. McKenney   rcu/nocb: Provide...
2028
  }
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
2029

12f54c3a8   Paul E. McKenney   rcu/nocb: Provide...
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
  /*
   * No-CBs grace-period-wait kthread.  There is one of these per group
   * of CPUs, but only once at least one CPU in that group has come online
   * at least once since boot.  This kthread checks for newly posted
   * callbacks from any of the CPUs it is responsible for, waits for a
   * grace period, then awakens all of the rcu_nocb_cb_kthread() instances
   * that then have callback-invocation work to do.
   */
  static int rcu_nocb_gp_kthread(void *arg)
  {
  	struct rcu_data *rdp = arg;
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
2041
  	for (;;) {
f7a81b12d   Paul E. McKenney   rcu/nocb: Print n...
2042
  		WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1);
12f54c3a8   Paul E. McKenney   rcu/nocb: Provide...
2043
  		nocb_gp_wait(rdp);
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
2044
2045
  		cond_resched_tasks_rcu_qs();
  	}
12f54c3a8   Paul E. McKenney   rcu/nocb: Provide...
2046
  	return 0;
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
2047
2048
2049
  }
  
  /*
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
2050
2051
   * Invoke any ready callbacks from the corresponding no-CBs CPU,
   * then, if there are no more, wait for more to appear.
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
2052
   */
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
2053
  static void nocb_cb_wait(struct rcu_data *rdp)
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
2054
  {
1d5a81c18   Paul E. McKenney   rcu/nocb: Reduce ...
2055
  	unsigned long cur_gp_seq;
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
  	unsigned long flags;
  	bool needwake_gp = false;
  	struct rcu_node *rnp = rdp->mynode;
  
  	local_irq_save(flags);
  	rcu_momentary_dyntick_idle();
  	local_irq_restore(flags);
  	local_bh_disable();
  	rcu_do_batch(rdp);
  	local_bh_enable();
  	lockdep_assert_irqs_enabled();
81c0b3d72   Paul E. McKenney   rcu/nocb: Avoid -...
2067
  	rcu_nocb_lock_irqsave(rdp, flags);
1d5a81c18   Paul E. McKenney   rcu/nocb: Reduce ...
2068
2069
2070
  	if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
  	    rcu_seq_done(&rnp->gp_seq, cur_gp_seq) &&
  	    raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */
523bddd55   Paul E. McKenney   rcu/nocb: Reduce ...
2071
2072
2073
  		needwake_gp = rcu_advance_cbs(rdp->mynode, rdp);
  		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
  	}
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
2074
  	if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
81c0b3d72   Paul E. McKenney   rcu/nocb: Avoid -...
2075
  		rcu_nocb_unlock_irqrestore(rdp, flags);
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
2076
2077
2078
2079
  		if (needwake_gp)
  			rcu_gp_kthread_wake();
  		return;
  	}
f7c9a9b66   Paul E. McKenney   rcu/nocb: Rename ...
2080
  	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
2081
  	WRITE_ONCE(rdp->nocb_cb_sleep, true);
81c0b3d72   Paul E. McKenney   rcu/nocb: Avoid -...
2082
  	rcu_nocb_unlock_irqrestore(rdp, flags);
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
2083
2084
  	if (needwake_gp)
  		rcu_gp_kthread_wake();
12f54c3a8   Paul E. McKenney   rcu/nocb: Provide...
2085
  	swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
2086
2087
2088
2089
  				 !READ_ONCE(rdp->nocb_cb_sleep));
  	if (!smp_load_acquire(&rdp->nocb_cb_sleep)) { /* VVV */
  		/* ^^^ Ensure CB invocation follows _sleep test. */
  		return;
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
2090
  	}
12f54c3a8   Paul E. McKenney   rcu/nocb: Provide...
2091
2092
  	WARN_ON(signal_pending(current));
  	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
2093
2094
2095
  }
  
  /*
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
2096
2097
   * Per-rcu_data kthread, but only for no-CBs CPUs.  Repeatedly invoke
   * nocb_cb_wait() to do the dirty work.
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2098
   */
12f54c3a8   Paul E. McKenney   rcu/nocb: Provide...
2099
  static int rcu_nocb_cb_kthread(void *arg)
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2100
  {
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2101
  	struct rcu_data *rdp = arg;
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
2102
2103
  	// Each pass through this loop does one callback batch, and,
  	// if there are no more ready callbacks, waits for them.
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2104
  	for (;;) {
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
2105
2106
  		nocb_cb_wait(rdp);
  		cond_resched_tasks_rcu_qs();
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2107
2108
2109
  	}
  	return 0;
  }
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
2110
  /* Is a deferred wakeup of rcu_nocb_kthread() required? */
9fdd3bc90   Paul E. McKenney   rcu: Break more c...
2111
  static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
2112
  {
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2113
  	return READ_ONCE(rdp->nocb_defer_wakeup);
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
2114
2115
2116
  }
  
  /* Do a deferred wakeup of rcu_nocb_kthread(). */
8be6e1b15   Paul E. McKenney   rcu: Use timer as...
2117
  static void do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
2118
  {
8be6e1b15   Paul E. McKenney   rcu: Use timer as...
2119
  	unsigned long flags;
9fdd3bc90   Paul E. McKenney   rcu: Break more c...
2120
  	int ndw;
81c0b3d72   Paul E. McKenney   rcu/nocb: Avoid -...
2121
  	rcu_nocb_lock_irqsave(rdp, flags);
8be6e1b15   Paul E. McKenney   rcu: Use timer as...
2122
  	if (!rcu_nocb_need_deferred_wakeup(rdp)) {
81c0b3d72   Paul E. McKenney   rcu/nocb: Avoid -...
2123
  		rcu_nocb_unlock_irqrestore(rdp, flags);
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
2124
  		return;
8be6e1b15   Paul E. McKenney   rcu: Use timer as...
2125
  	}
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2126
  	ndw = READ_ONCE(rdp->nocb_defer_wakeup);
511324e46   Paul E. McKenney   rcu: Use RCU_NOCB...
2127
  	WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
2128
  	wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
88d1bead8   Paul E. McKenney   rcu: Remove rcu_d...
2129
  	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
2130
  }
8be6e1b15   Paul E. McKenney   rcu: Use timer as...
2131
  /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
fd30b717b   Kees Cook   rcu: Convert time...
2132
  static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
8be6e1b15   Paul E. McKenney   rcu: Use timer as...
2133
  {
fd30b717b   Kees Cook   rcu: Convert time...
2134
2135
2136
  	struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
  
  	do_nocb_deferred_wakeup_common(rdp);
8be6e1b15   Paul E. McKenney   rcu: Use timer as...
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
  }
  
  /*
   * Do a deferred wakeup of rcu_nocb_kthread() from fastpath.
   * This means we do an inexact common-case check.  Note that if
   * we miss, ->nocb_timer will eventually clean things up.
   */
  static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
  {
  	if (rcu_nocb_need_deferred_wakeup(rdp))
  		do_nocb_deferred_wakeup_common(rdp);
  }
f4579fc57   Paul E. McKenney   rcu: Fix attempt ...
2149
2150
2151
  void __init rcu_init_nohz(void)
  {
  	int cpu;
ef1262062   Paul E. McKenney   rcu: Don't alloca...
2152
  	bool need_rcu_nocb_mask = false;
e83e73f5b   Paul E. McKenney   rcu/nocb: Leave -...
2153
  	struct rcu_data *rdp;
f4579fc57   Paul E. McKenney   rcu: Fix attempt ...
2154

f4579fc57   Paul E. McKenney   rcu: Fix attempt ...
2155
2156
2157
2158
  #if defined(CONFIG_NO_HZ_FULL)
  	if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
  		need_rcu_nocb_mask = true;
  #endif /* #if defined(CONFIG_NO_HZ_FULL) */
84b12b752   Rakib Mullick   rcu: Remove have_...
2159
  	if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) {
949cccdbe   Pranith Kumar   rcu: Check the re...
2160
2161
2162
2163
2164
  		if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
  			pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.
  ");
  			return;
  		}
f4579fc57   Paul E. McKenney   rcu: Fix attempt ...
2165
  	}
84b12b752   Rakib Mullick   rcu: Remove have_...
2166
  	if (!cpumask_available(rcu_nocb_mask))
f4579fc57   Paul E. McKenney   rcu: Fix attempt ...
2167
  		return;
f4579fc57   Paul E. McKenney   rcu: Fix attempt ...
2168
2169
2170
2171
2172
2173
  #if defined(CONFIG_NO_HZ_FULL)
  	if (tick_nohz_full_running)
  		cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
  #endif /* #if defined(CONFIG_NO_HZ_FULL) */
  
  	if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
ef1262062   Paul E. McKenney   rcu: Don't alloca...
2174
2175
  		pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.
  ");
f4579fc57   Paul E. McKenney   rcu: Fix attempt ...
2176
2177
2178
  		cpumask_and(rcu_nocb_mask, cpu_possible_mask,
  			    rcu_nocb_mask);
  	}
3016611ee   Paul E. McKenney   rcu: Fix CPU offl...
2179
2180
2181
2182
2183
2184
2185
  	if (cpumask_empty(rcu_nocb_mask))
  		pr_info("\tOffload RCU callbacks from CPUs: (none).
  ");
  	else
  		pr_info("\tOffload RCU callbacks from CPUs: %*pbl.
  ",
  			cpumask_pr_args(rcu_nocb_mask));
f4579fc57   Paul E. McKenney   rcu: Fix attempt ...
2186
2187
2188
  	if (rcu_nocb_poll)
  		pr_info("\tPoll for callbacks from no-CBs CPUs.
  ");
e83e73f5b   Paul E. McKenney   rcu/nocb: Leave -...
2189
2190
2191
2192
2193
2194
  	for_each_cpu(cpu, rcu_nocb_mask) {
  		rdp = per_cpu_ptr(&rcu_data, cpu);
  		if (rcu_segcblist_empty(&rdp->cblist))
  			rcu_segcblist_init(&rdp->cblist);
  		rcu_segcblist_offload(&rdp->cblist);
  	}
b97d23c51   Paul E. McKenney   rcu: Remove for_e...
2195
  	rcu_organize_nocb_kthreads();
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
2196
  }
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2197
2198
2199
  /* Initialize per-rcu_data variables for no-CBs CPUs. */
  static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
  {
12f54c3a8   Paul E. McKenney   rcu/nocb: Provide...
2200
2201
  	init_swait_queue_head(&rdp->nocb_cb_wq);
  	init_swait_queue_head(&rdp->nocb_gp_wq);
8be6e1b15   Paul E. McKenney   rcu: Use timer as...
2202
  	raw_spin_lock_init(&rdp->nocb_lock);
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
2203
  	raw_spin_lock_init(&rdp->nocb_bypass_lock);
4fd8c5f15   Paul E. McKenney   rcu/nocb: Reduce ...
2204
  	raw_spin_lock_init(&rdp->nocb_gp_lock);
fd30b717b   Kees Cook   rcu: Convert time...
2205
  	timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
2206
2207
  	timer_setup(&rdp->nocb_bypass_timer, do_nocb_bypass_wakeup_timer, 0);
  	rcu_cblist_init(&rdp->nocb_bypass);
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2208
  }
35ce7f29a   Paul E. McKenney   rcu: Create rcuo ...
2209
2210
  /*
   * If the specified CPU is a no-CBs CPU that does not already have its
12f54c3a8   Paul E. McKenney   rcu/nocb: Provide...
2211
2212
   * rcuo CB kthread, spawn it.  Additionally, if the rcuo GP kthread
   * for this CPU's group has not yet been created, spawn it as well.
35ce7f29a   Paul E. McKenney   rcu: Create rcuo ...
2213
   */
4580b0541   Paul E. McKenney   rcu: Remove rsp p...
2214
  static void rcu_spawn_one_nocb_kthread(int cpu)
35ce7f29a   Paul E. McKenney   rcu: Create rcuo ...
2215
  {
12f54c3a8   Paul E. McKenney   rcu/nocb: Provide...
2216
2217
  	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
  	struct rcu_data *rdp_gp;
35ce7f29a   Paul E. McKenney   rcu: Create rcuo ...
2218
2219
2220
2221
2222
2223
  	struct task_struct *t;
  
  	/*
  	 * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
  	 * then nothing to do.
  	 */
12f54c3a8   Paul E. McKenney   rcu/nocb: Provide...
2224
  	if (!rcu_is_nocb_cpu(cpu) || rdp->nocb_cb_kthread)
35ce7f29a   Paul E. McKenney   rcu: Create rcuo ...
2225
  		return;
6484fe54b   Paul E. McKenney   rcu/nocb: Update ...
2226
  	/* If we didn't spawn the GP kthread first, reorganize! */
12f54c3a8   Paul E. McKenney   rcu/nocb: Provide...
2227
2228
2229
2230
2231
2232
2233
2234
  	rdp_gp = rdp->nocb_gp_rdp;
  	if (!rdp_gp->nocb_gp_kthread) {
  		t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
  				"rcuog/%d", rdp_gp->cpu);
  		if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior
  ", __func__))
  			return;
  		WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
35ce7f29a   Paul E. McKenney   rcu: Create rcuo ...
2235
  	}
0ae86a272   Paul E. McKenney   rcu: Clean up fla...
2236
  	/* Spawn the kthread for this CPU. */
12f54c3a8   Paul E. McKenney   rcu/nocb: Provide...
2237
  	t = kthread_run(rcu_nocb_cb_kthread, rdp,
4580b0541   Paul E. McKenney   rcu: Remove rsp p...
2238
  			"rcuo%c/%d", rcu_state.abbr, cpu);
12f54c3a8   Paul E. McKenney   rcu/nocb: Provide...
2239
2240
  	if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior
  ", __func__))
9213784b4   Paul E. McKenney   rcu: Eliminate BU...
2241
  		return;
12f54c3a8   Paul E. McKenney   rcu/nocb: Provide...
2242
2243
  	WRITE_ONCE(rdp->nocb_cb_kthread, t);
  	WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
35ce7f29a   Paul E. McKenney   rcu: Create rcuo ...
2244
2245
2246
2247
  }
  
  /*
   * If the specified CPU is a no-CBs CPU that does not already have its
ad368d15b   Paul E. McKenney   rcu: Rename and c...
2248
   * rcuo kthread, spawn it.
35ce7f29a   Paul E. McKenney   rcu: Create rcuo ...
2249
   */
ad368d15b   Paul E. McKenney   rcu: Rename and c...
2250
  static void rcu_spawn_cpu_nocb_kthread(int cpu)
35ce7f29a   Paul E. McKenney   rcu: Create rcuo ...
2251
  {
35ce7f29a   Paul E. McKenney   rcu: Create rcuo ...
2252
  	if (rcu_scheduler_fully_active)
b97d23c51   Paul E. McKenney   rcu: Remove for_e...
2253
  		rcu_spawn_one_nocb_kthread(cpu);
35ce7f29a   Paul E. McKenney   rcu: Create rcuo ...
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
  }
  
  /*
   * Once the scheduler is running, spawn rcuo kthreads for all online
   * no-CBs CPUs.  This assumes that the early_initcall()s happen before
   * non-boot CPUs come online -- if this changes, we will need to add
   * some mutual exclusion.
   */
  static void __init rcu_spawn_nocb_kthreads(void)
  {
  	int cpu;
  
  	for_each_online_cpu(cpu)
ad368d15b   Paul E. McKenney   rcu: Rename and c...
2267
  		rcu_spawn_cpu_nocb_kthread(cpu);
35ce7f29a   Paul E. McKenney   rcu: Create rcuo ...
2268
  }
6484fe54b   Paul E. McKenney   rcu/nocb: Update ...
2269
  /* How many CB CPU IDs per GP kthread?  Default of -1 for sqrt(nr_cpu_ids). */
f7c612b00   Paul E. McKenney   rcu/nocb: Rename ...
2270
2271
  static int rcu_nocb_gp_stride = -1;
  module_param(rcu_nocb_gp_stride, int, 0444);
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
2272
2273
  
  /*
6484fe54b   Paul E. McKenney   rcu/nocb: Update ...
2274
   * Initialize GP-CB relationships for all no-CBs CPU.
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
2275
   */
4580b0541   Paul E. McKenney   rcu: Remove rsp p...
2276
  static void __init rcu_organize_nocb_kthreads(void)
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2277
2278
  {
  	int cpu;
18cd8c93e   Paul E. McKenney   rcu/nocb: Print g...
2279
  	bool firsttime = true;
610dea36d   Stefan Reiter   rcu/nocb: Fix dum...
2280
2281
  	bool gotnocbs = false;
  	bool gotnocbscbs = true;
f7c612b00   Paul E. McKenney   rcu/nocb: Rename ...
2282
  	int ls = rcu_nocb_gp_stride;
6484fe54b   Paul E. McKenney   rcu/nocb: Update ...
2283
  	int nl = 0;  /* Next GP kthread. */
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2284
  	struct rcu_data *rdp;
0bdc33dae   Paul E. McKenney   rcu/nocb: Rename ...
2285
  	struct rcu_data *rdp_gp = NULL;  /* Suppress misguided gcc warn. */
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
2286
  	struct rcu_data *rdp_prev = NULL;
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2287

84b12b752   Rakib Mullick   rcu: Remove have_...
2288
  	if (!cpumask_available(rcu_nocb_mask))
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2289
  		return;
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
2290
  	if (ls == -1) {
9fcb09bdd   Paul E. McKenney   rcu/nocb: Round d...
2291
  		ls = nr_cpu_ids / int_sqrt(nr_cpu_ids);
f7c612b00   Paul E. McKenney   rcu/nocb: Rename ...
2292
  		rcu_nocb_gp_stride = ls;
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
2293
2294
2295
  	}
  
  	/*
9831ce3bb   Paul E. McKenney   rcu: Fix comment ...
2296
2297
2298
  	 * Each pass through this loop sets up one rcu_data structure.
  	 * Should the corresponding CPU come online in the future, then
  	 * we will spawn the needed set of rcu_nocb_kthread() kthreads.
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
2299
  	 */
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2300
  	for_each_cpu(cpu, rcu_nocb_mask) {
da1df50d1   Paul E. McKenney   rcu: Remove rcu_s...
2301
  		rdp = per_cpu_ptr(&rcu_data, cpu);
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
2302
  		if (rdp->cpu >= nl) {
6484fe54b   Paul E. McKenney   rcu/nocb: Update ...
2303
  			/* New GP kthread, set up for CBs & next GP. */
610dea36d   Stefan Reiter   rcu/nocb: Fix dum...
2304
  			gotnocbs = true;
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
2305
  			nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
58bf6f77c   Paul E. McKenney   rcu/nocb: Rename ...
2306
  			rdp->nocb_gp_rdp = rdp;
0bdc33dae   Paul E. McKenney   rcu/nocb: Rename ...
2307
  			rdp_gp = rdp;
610dea36d   Stefan Reiter   rcu/nocb: Fix dum...
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
  			if (dump_tree) {
  				if (!firsttime)
  					pr_cont("%s
  ", gotnocbscbs
  							? "" : " (self only)");
  				gotnocbscbs = false;
  				firsttime = false;
  				pr_alert("%s: No-CB GP kthread CPU %d:",
  					 __func__, cpu);
  			}
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
2318
  		} else {
6484fe54b   Paul E. McKenney   rcu/nocb: Update ...
2319
  			/* Another CB kthread, link to previous GP kthread. */
610dea36d   Stefan Reiter   rcu/nocb: Fix dum...
2320
  			gotnocbscbs = true;
0bdc33dae   Paul E. McKenney   rcu/nocb: Rename ...
2321
  			rdp->nocb_gp_rdp = rdp_gp;
58bf6f77c   Paul E. McKenney   rcu/nocb: Rename ...
2322
  			rdp_prev->nocb_next_cb_rdp = rdp;
610dea36d   Stefan Reiter   rcu/nocb: Fix dum...
2323
2324
  			if (dump_tree)
  				pr_cont(" %d", cpu);
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
2325
2326
  		}
  		rdp_prev = rdp;
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2327
  	}
610dea36d   Stefan Reiter   rcu/nocb: Fix dum...
2328
2329
2330
  	if (gotnocbs && dump_tree)
  		pr_cont("%s
  ", gotnocbscbs ? "" : " (self only)");
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2331
  }
5ab7ab836   Paul E. McKenney   rcutorture: Affin...
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
  /*
   * Bind the current task to the offloaded CPUs.  If there are no offloaded
   * CPUs, leave the task unbound.  Splat if the bind attempt fails.
   */
  void rcu_bind_current_to_nocb(void)
  {
  	if (cpumask_available(rcu_nocb_mask) && cpumask_weight(rcu_nocb_mask))
  		WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
  }
  EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
f7a81b12d   Paul E. McKenney   rcu/nocb: Print n...
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
  /*
   * Dump out nocb grace-period kthread state for the specified rcu_data
   * structure.
   */
  static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
  {
  	struct rcu_node *rnp = rdp->mynode;
  
  	pr_info("nocb GP %d %c%c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu
  ",
  		rdp->cpu,
  		"kK"[!!rdp->nocb_gp_kthread],
  		"lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)],
  		"dD"[!!rdp->nocb_defer_wakeup],
  		"tT"[timer_pending(&rdp->nocb_timer)],
  		"bB"[timer_pending(&rdp->nocb_bypass_timer)],
  		"sS"[!!rdp->nocb_gp_sleep],
  		".W"[swait_active(&rdp->nocb_gp_wq)],
  		".W"[swait_active(&rnp->nocb_gp_wq[0])],
  		".W"[swait_active(&rnp->nocb_gp_wq[1])],
  		".B"[!!rdp->nocb_gp_bypass],
  		".G"[!!rdp->nocb_gp_gp],
  		(long)rdp->nocb_gp_seq,
  		rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops));
  }
  
  /* Dump out nocb kthread state for the specified rcu_data structure. */
  static void show_rcu_nocb_state(struct rcu_data *rdp)
  {
  	struct rcu_segcblist *rsclp = &rdp->cblist;
  	bool waslocked;
  	bool wastimer;
  	bool wassleep;
  
  	if (rdp->nocb_gp_rdp == rdp)
  		show_rcu_nocb_gp_state(rdp);
  
  	pr_info("   CB %d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%c%c%c q%ld
  ",
  		rdp->cpu, rdp->nocb_gp_rdp->cpu,
  		"kK"[!!rdp->nocb_cb_kthread],
  		"bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
  		"cC"[!!atomic_read(&rdp->nocb_lock_contended)],
  		"lL"[raw_spin_is_locked(&rdp->nocb_lock)],
  		"sS"[!!rdp->nocb_cb_sleep],
  		".W"[swait_active(&rdp->nocb_cb_wq)],
  		jiffies - rdp->nocb_bypass_first,
  		jiffies - rdp->nocb_nobypass_last,
  		rdp->nocb_nobypass_count,
  		".D"[rcu_segcblist_ready_cbs(rsclp)],
  		".W"[!rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)],
  		".R"[!rcu_segcblist_restempty(rsclp, RCU_WAIT_TAIL)],
  		".N"[!rcu_segcblist_restempty(rsclp, RCU_NEXT_READY_TAIL)],
  		".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)],
  		rcu_segcblist_n_cbs(&rdp->cblist));
  
  	/* It is OK for GP kthreads to have GP state. */
  	if (rdp->nocb_gp_rdp == rdp)
  		return;
  
  	waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
2130c6b4f   Paul E. McKenney   nocb: Remove show...
2403
  	wastimer = timer_pending(&rdp->nocb_bypass_timer);
f7a81b12d   Paul E. McKenney   rcu/nocb: Print n...
2404
  	wassleep = swait_active(&rdp->nocb_gp_wq);
2130c6b4f   Paul E. McKenney   nocb: Remove show...
2405
  	if (!rdp->nocb_gp_sleep && !waslocked && !wastimer && !wassleep)
f7a81b12d   Paul E. McKenney   rcu/nocb: Print n...
2406
  		return;  /* Nothing untowards. */
e082c7b38   Paul E. McKenney   nocb: Clarify RCU...
2407
2408
  	pr_info("   nocb GP activity on CB-only CPU!!! %c%c%c%c %c
  ",
f7a81b12d   Paul E. McKenney   rcu/nocb: Print n...
2409
2410
2411
2412
2413
2414
  		"lL"[waslocked],
  		"dD"[!!rdp->nocb_defer_wakeup],
  		"tT"[wastimer],
  		"sS"[!!rdp->nocb_gp_sleep],
  		".W"[wassleep]);
  }
34ed62461   Paul E. McKenney   rcu: Remove restr...
2415
  #else /* #ifdef CONFIG_RCU_NOCB_CPU */
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
2416
2417
  /* No ->nocb_lock to acquire.  */
  static void rcu_nocb_lock(struct rcu_data *rdp)
d7e299339   Paul E. McKenney   rcu: Make rcu_bar...
2418
  {
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
  }
  
  /* No ->nocb_lock to release.  */
  static void rcu_nocb_unlock(struct rcu_data *rdp)
  {
  }
  
  /* No ->nocb_lock to release.  */
  static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
  				       unsigned long flags)
  {
  	local_irq_restore(flags);
d7e299339   Paul E. McKenney   rcu: Make rcu_bar...
2431
  }
d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
2432
2433
2434
2435
2436
  /* Lockdep check that ->cblist may be safely accessed. */
  static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
  {
  	lockdep_assert_irqs_disabled();
  }
abedf8e24   Paul Gortmaker   rcu: Use simple w...
2437
  static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2438
  {
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2439
  }
abedf8e24   Paul Gortmaker   rcu: Use simple w...
2440
  static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
065bb78c5   Daniel Wagner   rcu: Do not call ...
2441
2442
2443
  {
  	return NULL;
  }
dae6e64d2   Paul E. McKenney   rcu: Introduce pr...
2444
2445
2446
  static void rcu_init_one_nocb(struct rcu_node *rnp)
  {
  }
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2447

d1b222c6b   Paul E. McKenney   rcu/nocb: Add byp...
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
  static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
  				  unsigned long j)
  {
  	return true;
  }
  
  static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
  				bool *was_alldone, unsigned long flags)
  {
  	return false;
  }
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
2459
2460
  static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
  				 unsigned long flags)
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2461
  {
5d6742b37   Paul E. McKenney   rcu/nocb: Use rcu...
2462
  	WARN_ON_ONCE(1);  /* Should be dead code! */
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2463
  }
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2464
2465
2466
  static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
  {
  }
9fdd3bc90   Paul E. McKenney   rcu: Break more c...
2467
  static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
2468
2469
2470
2471
2472
2473
2474
  {
  	return false;
  }
  
  static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
  {
  }
ad368d15b   Paul E. McKenney   rcu: Rename and c...
2475
  static void rcu_spawn_cpu_nocb_kthread(int cpu)
35ce7f29a   Paul E. McKenney   rcu: Create rcuo ...
2476
2477
2478
2479
  {
  }
  
  static void __init rcu_spawn_nocb_kthreads(void)
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2480
2481
  {
  }
f7a81b12d   Paul E. McKenney   rcu/nocb: Print n...
2482
2483
2484
  static void show_rcu_nocb_state(struct rcu_data *rdp)
  {
  }
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
2485
  #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
65d798f0f   Paul E. McKenney   rcu: Kick adaptiv...
2486
2487
  
  /*
a096932f0   Paul E. McKenney   rcu: Don't activa...
2488
2489
2490
2491
2492
2493
   * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
   * grace-period kthread will do force_quiescent_state() processing?
   * The idea is to avoid waking up RCU core processing on such a
   * CPU unless the grace period has extended for too long.
   *
   * This code relies on the fact that all NO_HZ_FULL CPUs are also
52e2bb958   Paul Bolle   rcu: Disambiguate...
2494
   * CONFIG_RCU_NOCB_CPU CPUs.
a096932f0   Paul E. McKenney   rcu: Don't activa...
2495
   */
4580b0541   Paul E. McKenney   rcu: Remove rsp p...
2496
  static bool rcu_nohz_full_cpu(void)
a096932f0   Paul E. McKenney   rcu: Don't activa...
2497
2498
2499
  {
  #ifdef CONFIG_NO_HZ_FULL
  	if (tick_nohz_full_cpu(smp_processor_id()) &&
de8e87305   Paul E. McKenney   rcu: Remove rsp p...
2500
  	    (!rcu_gp_in_progress() ||
e2f3ccfa6   Paul E. McKenney   rcu: Convert rcu_...
2501
  	     time_before(jiffies, READ_ONCE(rcu_state.gp_start) + HZ)))
5ce035fb7   Joe Perches   rcu: tree_plugin:...
2502
  		return true;
a096932f0   Paul E. McKenney   rcu: Don't activa...
2503
  #endif /* #ifdef CONFIG_NO_HZ_FULL */
5ce035fb7   Joe Perches   rcu: tree_plugin:...
2504
  	return false;
a096932f0   Paul E. McKenney   rcu: Don't activa...
2505
  }
5057f55e5   Paul E. McKenney   rcu: Bind RCU gra...
2506
2507
  
  /*
265f5f28f   Paul E. McKenney   rcu: Update rcu_b...
2508
   * Bind the RCU grace-period kthreads to the housekeeping CPU.
5057f55e5   Paul E. McKenney   rcu: Bind RCU gra...
2509
2510
2511
   */
  static void rcu_bind_gp_kthread(void)
  {
c0f489d2c   Paul E. McKenney   rcu: Bind grace-p...
2512
  	if (!tick_nohz_full_enabled())
5057f55e5   Paul E. McKenney   rcu: Bind RCU gra...
2513
  		return;
de201559d   Frederic Weisbecker   sched/isolation: ...
2514
  	housekeeping_affine(current, HK_FLAG_RCU);
5057f55e5   Paul E. McKenney   rcu: Bind RCU gra...
2515
  }
176f8f7a5   Paul E. McKenney   rcu: Make TASKS_R...
2516
2517
  
  /* Record the current task on dyntick-idle entry. */
ff5c4f5ca   Thomas Gleixner   rcu/tree: Mark th...
2518
  static void noinstr rcu_dynticks_task_enter(void)
176f8f7a5   Paul E. McKenney   rcu: Make TASKS_R...
2519
2520
  {
  #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2521
  	WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id());
176f8f7a5   Paul E. McKenney   rcu: Make TASKS_R...
2522
2523
2524
2525
  #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
  }
  
  /* Record no current task on dyntick-idle exit. */
ff5c4f5ca   Thomas Gleixner   rcu/tree: Mark th...
2526
  static void noinstr rcu_dynticks_task_exit(void)
176f8f7a5   Paul E. McKenney   rcu: Make TASKS_R...
2527
2528
  {
  #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
7d0ae8086   Paul E. McKenney   rcu: Convert ACCE...
2529
  	WRITE_ONCE(current->rcu_tasks_idle_cpu, -1);
176f8f7a5   Paul E. McKenney   rcu: Make TASKS_R...
2530
2531
  #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
  }
7d0c9c50c   Paul E. McKenney   rcu-tasks: Avoid ...
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
  
  /* Turn on heavyweight RCU tasks trace readers on idle/user entry. */
  static void rcu_dynticks_task_trace_enter(void)
  {
  #ifdef CONFIG_TASKS_RCU_TRACE
  	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
  		current->trc_reader_special.b.need_mb = true;
  #endif /* #ifdef CONFIG_TASKS_RCU_TRACE */
  }
  
  /* Turn off heavyweight RCU tasks trace readers on idle/user exit. */
  static void rcu_dynticks_task_trace_exit(void)
  {
  #ifdef CONFIG_TASKS_RCU_TRACE
  	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
  		current->trc_reader_special.b.need_mb = false;
  #endif /* #ifdef CONFIG_TASKS_RCU_TRACE */
  }