Blame view

kernel/rcu/tree.h 29.5 KB
9f77da9f4   Paul E. McKenney   rcu: Move private...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
  /*
   * Read-Copy Update mechanism for mutual exclusion (tree-based version)
   * Internal non-public definitions.
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published by
   * the Free Software Foundation; either version 2 of the License, or
   * (at your option) any later version.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
87de1cfdc   Paul E. McKenney   rcu: Stop trackin...
16
17
   * along with this program; if not, you can access it online at
   * http://www.gnu.org/licenses/gpl-2.0.html.
9f77da9f4   Paul E. McKenney   rcu: Move private...
18
19
20
21
22
23
24
25
26
27
28
29
   *
   * Copyright IBM Corporation, 2008
   *
   * Author: Ingo Molnar <mingo@elte.hu>
   *	   Paul E. McKenney <paulmck@linux.vnet.ibm.com>
   */
  
  #include <linux/cache.h>
  #include <linux/spinlock.h>
  #include <linux/threads.h>
  #include <linux/cpumask.h>
  #include <linux/seqlock.h>
abedf8e24   Paul Gortmaker   rcu: Use simple w...
30
  #include <linux/swait.h>
3a6d7c64d   Peter Zijlstra   rcu: Make expedit...
31
  #include <linux/stop_machine.h>
9f77da9f4   Paul E. McKenney   rcu: Move private...
32
33
  
  /*
8932a63d5   Paul E. McKenney   rcu: Reduce cache...
34
35
   * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
   * CONFIG_RCU_FANOUT_LEAF.
9f77da9f4   Paul E. McKenney   rcu: Move private...
36
   * In theory, it should be possible to add more levels straightforwardly.
0209f6490   Paul E. McKenney   rcu: limit rcu_no...
37
38
   * In practice, this did work well going from three levels to four.
   * Of course, your mileage may vary.
9f77da9f4   Paul E. McKenney   rcu: Move private...
39
   */
05c5df31a   Paul E. McKenney   rcu: Make RCU abl...
40

05c5df31a   Paul E. McKenney   rcu: Make RCU abl...
41
42
43
44
45
46
47
48
49
  #ifdef CONFIG_RCU_FANOUT
  #define RCU_FANOUT CONFIG_RCU_FANOUT
  #else /* #ifdef CONFIG_RCU_FANOUT */
  # ifdef CONFIG_64BIT
  # define RCU_FANOUT 64
  # else
  # define RCU_FANOUT 32
  # endif
  #endif /* #else #ifdef CONFIG_RCU_FANOUT */
47d631af5   Paul E. McKenney   rcu: Make RCU abl...
50
51
52
53
54
55
56
57
58
59
60
  #ifdef CONFIG_RCU_FANOUT_LEAF
  #define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF
  #else /* #ifdef CONFIG_RCU_FANOUT_LEAF */
  # ifdef CONFIG_64BIT
  # define RCU_FANOUT_LEAF 64
  # else
  # define RCU_FANOUT_LEAF 32
  # endif
  #endif /* #else #ifdef CONFIG_RCU_FANOUT_LEAF */
  
  #define RCU_FANOUT_1	      (RCU_FANOUT_LEAF)
05c5df31a   Paul E. McKenney   rcu: Make RCU abl...
61
62
63
  #define RCU_FANOUT_2	      (RCU_FANOUT_1 * RCU_FANOUT)
  #define RCU_FANOUT_3	      (RCU_FANOUT_2 * RCU_FANOUT)
  #define RCU_FANOUT_4	      (RCU_FANOUT_3 * RCU_FANOUT)
9f77da9f4   Paul E. McKenney   rcu: Move private...
64

0209f6490   Paul E. McKenney   rcu: limit rcu_no...
65
  #if NR_CPUS <= RCU_FANOUT_1
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
66
  #  define RCU_NUM_LVLS	      1
9f77da9f4   Paul E. McKenney   rcu: Move private...
67
  #  define NUM_RCU_LVL_0	      1
426216970   Alexander Gordeev   rcu: Simplify ari...
68
  #  define NUM_RCU_NODES	      NUM_RCU_LVL_0
cb0071023   Alexander Gordeev   rcu: Limit count ...
69
70
71
  #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0 }
  #  define RCU_NODE_NAME_INIT  { "rcu_node_0" }
  #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0" }
0209f6490   Paul E. McKenney   rcu: limit rcu_no...
72
  #elif NR_CPUS <= RCU_FANOUT_2
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
73
  #  define RCU_NUM_LVLS	      2
9f77da9f4   Paul E. McKenney   rcu: Move private...
74
  #  define NUM_RCU_LVL_0	      1
0209f6490   Paul E. McKenney   rcu: limit rcu_no...
75
  #  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
426216970   Alexander Gordeev   rcu: Simplify ari...
76
  #  define NUM_RCU_NODES	      (NUM_RCU_LVL_0 + NUM_RCU_LVL_1)
cb0071023   Alexander Gordeev   rcu: Limit count ...
77
78
79
  #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1 }
  #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1" }
  #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1" }
0209f6490   Paul E. McKenney   rcu: limit rcu_no...
80
  #elif NR_CPUS <= RCU_FANOUT_3
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
81
  #  define RCU_NUM_LVLS	      3
9f77da9f4   Paul E. McKenney   rcu: Move private...
82
  #  define NUM_RCU_LVL_0	      1
0209f6490   Paul E. McKenney   rcu: limit rcu_no...
83
84
  #  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
  #  define NUM_RCU_LVL_2	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
426216970   Alexander Gordeev   rcu: Simplify ari...
85
  #  define NUM_RCU_NODES	      (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2)
cb0071023   Alexander Gordeev   rcu: Limit count ...
86
87
88
  #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 }
  #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2" }
  #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" }
0209f6490   Paul E. McKenney   rcu: limit rcu_no...
89
  #elif NR_CPUS <= RCU_FANOUT_4
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
90
  #  define RCU_NUM_LVLS	      4
cf244dc01   Paul E. McKenney   rcu: Enable fourt...
91
  #  define NUM_RCU_LVL_0	      1
0209f6490   Paul E. McKenney   rcu: limit rcu_no...
92
93
94
  #  define NUM_RCU_LVL_1	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
  #  define NUM_RCU_LVL_2	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
  #  define NUM_RCU_LVL_3	      DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
426216970   Alexander Gordeev   rcu: Simplify ari...
95
  #  define NUM_RCU_NODES	      (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
cb0071023   Alexander Gordeev   rcu: Limit count ...
96
97
98
  #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 }
  #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" }
  #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }
9f77da9f4   Paul E. McKenney   rcu: Move private...
99
100
  #else
  # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
0209f6490   Paul E. McKenney   rcu: limit rcu_no...
101
  #endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */
9f77da9f4   Paul E. McKenney   rcu: Move private...
102

f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
103
104
  extern int rcu_num_lvls;
  extern int rcu_num_nodes;
9f77da9f4   Paul E. McKenney   rcu: Move private...
105
106
107
108
  /*
   * Dynticks per-CPU state.
   */
  struct rcu_dynticks {
9b2e4f188   Paul E. McKenney   rcu: Track idlene...
109
110
111
112
  	long long dynticks_nesting; /* Track irq/process nesting level. */
  				    /* Process level is worth LLONG_MAX/2. */
  	int dynticks_nmi_nesting;   /* Track NMI nesting level. */
  	atomic_t dynticks;	    /* Even value for idle, else odd. */
2333210b2   Paul E. McKenney   nohz_full: Add rc...
113
114
115
116
117
118
119
120
  #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
  	long long dynticks_idle_nesting;
  				    /* irq/process nesting level from idle. */
  	atomic_t dynticks_idle;	    /* Even value for idle, else odd. */
  				    /*  "Idle" excludes userspace execution. */
  	unsigned long dynticks_idle_jiffies;
  				    /* End of last non-NMI non-idle period. */
  #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
5955f7eec   Paul E. McKenney   rcu: Move RCU_FAS...
121
  #ifdef CONFIG_RCU_FAST_NO_HZ
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
122
  	bool all_lazy;		    /* Are all CPU's CBs lazy? */
5955f7eec   Paul E. McKenney   rcu: Move RCU_FAS...
123
124
125
126
  	unsigned long nonlazy_posted;
  				    /* # times non-lazy CBs posted to CPU. */
  	unsigned long nonlazy_posted_snap;
  				    /* idle-period nonlazy_posted snapshot. */
c0f4dfd4f   Paul E. McKenney   rcu: Make RCU_FAS...
127
128
  	unsigned long last_accelerate;
  				    /* Last jiffy CBs were accelerated. */
c229828ca   Paul E. McKenney   rcu: Throttle rcu...
129
130
  	unsigned long last_advance_all;
  				    /* Last jiffy CBs were all advanced. */
9d2ad2430   Paul E. McKenney   rcu: Make RCU_FAS...
131
  	int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
5955f7eec   Paul E. McKenney   rcu: Move RCU_FAS...
132
  #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
9f77da9f4   Paul E. McKenney   rcu: Move private...
133
  };
d71df90ea   Paul E. McKenney   rcu: add tracing ...
134
135
136
137
  /* RCU's kthread states for tracing. */
  #define RCU_KTHREAD_STOPPED  0
  #define RCU_KTHREAD_RUNNING  1
  #define RCU_KTHREAD_WAITING  2
15ba0ba86   Paul E. McKenney   rcu: add grace-pe...
138
139
140
  #define RCU_KTHREAD_OFFCPU   3
  #define RCU_KTHREAD_YIELDING 4
  #define RCU_KTHREAD_MAX      4
d71df90ea   Paul E. McKenney   rcu: add tracing ...
141

9f77da9f4   Paul E. McKenney   rcu: Move private...
142
143
144
145
  /*
   * Definition for node within the RCU grace-period-detection hierarchy.
   */
  struct rcu_node {
67c583a7d   Boqun Feng   RCU: Privatize rc...
146
147
148
  	raw_spinlock_t __private lock;	/* Root rcu_node's lock protects */
  					/*  some rcu_state fields as well as */
  					/*  following. */
20133cfce   Paul E. McKenney   rcu: Stop overflo...
149
  	unsigned long gpnum;	/* Current grace period for this node. */
868489660   Paul E. McKenney   rcu: Changes from...
150
151
  				/*  This will either be equal to or one */
  				/*  behind the root rcu_node's gpnum. */
20133cfce   Paul E. McKenney   rcu: Stop overflo...
152
  	unsigned long completed; /* Last GP completed for this node. */
d09b62dfa   Paul E. McKenney   rcu: Fix synchron...
153
154
  				/*  This will either be equal to or one */
  				/*  behind the root rcu_node's gpnum. */
9f77da9f4   Paul E. McKenney   rcu: Move private...
155
156
  	unsigned long qsmask;	/* CPUs or groups that need to switch in */
  				/*  order for current grace period to proceed.*/
1eba8f843   Paul E. McKenney   rcu: Clean up cod...
157
158
159
160
  				/*  In leaf rcu_node, each bit corresponds to */
  				/*  an rcu_data structure, otherwise, each */
  				/*  bit corresponds to a child rcu_node */
  				/*  structure. */
9f77da9f4   Paul E. McKenney   rcu: Move private...
161
  	unsigned long qsmaskinit;
b9585e940   Paul E. McKenney   rcu: Consolidate ...
162
  				/* Per-GP initial value for qsmask. */
0aa04b055   Paul E. McKenney   rcu: Process offl...
163
164
165
166
  				/*  Initialized from ->qsmaskinitnext at the */
  				/*  beginning of each grace period. */
  	unsigned long qsmaskinitnext;
  				/* Online CPUs for next grace period. */
b9585e940   Paul E. McKenney   rcu: Consolidate ...
167
168
169
170
171
172
173
174
175
  	unsigned long expmask;	/* CPUs or groups that need to check in */
  				/*  to allow the current expedited GP */
  				/*  to complete. */
  	unsigned long expmaskinit;
  				/* Per-GP initial values for expmask. */
  				/*  Initialized from ->expmaskinitnext at the */
  				/*  beginning of each expedited GP. */
  	unsigned long expmaskinitnext;
  				/* Online CPUs for next expedited GP. */
1de6e56dd   Paul E. McKenney   rcu: Clarify role...
176
177
  				/*  Any CPU that has ever been online will */
  				/*  have its bit set. */
9f77da9f4   Paul E. McKenney   rcu: Move private...
178
  	unsigned long grpmask;	/* Mask to apply to parent qsmask. */
1eba8f843   Paul E. McKenney   rcu: Clean up cod...
179
  				/*  Only one bit will be set in this mask. */
9f77da9f4   Paul E. McKenney   rcu: Move private...
180
181
182
183
  	int	grplo;		/* lowest-numbered CPU or group here. */
  	int	grphi;		/* highest-numbered CPU or group here. */
  	u8	grpnum;		/* CPU/group number for next level up. */
  	u8	level;		/* root is at level 0. */
0aa04b055   Paul E. McKenney   rcu: Process offl...
184
185
186
187
  	bool	wait_blkd_tasks;/* Necessary to wait for blocked tasks to */
  				/*  exit RCU read-side critical sections */
  				/*  before propagating offline up the */
  				/*  rcu_node tree? */
9f77da9f4   Paul E. McKenney   rcu: Move private...
188
  	struct rcu_node *parent;
12f5f524c   Paul E. McKenney   rcu: merge TREE_P...
189
190
191
192
193
194
195
196
197
198
199
200
201
202
  	struct list_head blkd_tasks;
  				/* Tasks blocked in RCU read-side critical */
  				/*  section.  Tasks are placed at the head */
  				/*  of this list and age towards the tail. */
  	struct list_head *gp_tasks;
  				/* Pointer to the first task blocking the */
  				/*  current grace period, or NULL if there */
  				/*  is no such task. */
  	struct list_head *exp_tasks;
  				/* Pointer to the first task blocking the */
  				/*  current expedited grace period, or NULL */
  				/*  if there is no such task.  If there */
  				/*  is no current expedited grace period, */
  				/*  then there can cannot be any such task. */
27f4d2805   Paul E. McKenney   rcu: priority boo...
203
204
205
206
207
208
209
210
  	struct list_head *boost_tasks;
  				/* Pointer to first task that needs to be */
  				/*  priority boosted, or NULL if no priority */
  				/*  boosting is needed for this rcu_node */
  				/*  structure.  If there are no tasks */
  				/*  queued on this rcu_node structure that */
  				/*  are blocking the current grace period, */
  				/*  there can be no such task. */
abaa93d9e   Paul E. McKenney   rcu: Simplify pri...
211
212
213
  	struct rt_mutex boost_mtx;
  				/* Used only for the priority-boosting */
  				/*  side effect, not as a lock. */
27f4d2805   Paul E. McKenney   rcu: priority boo...
214
215
216
217
218
  	unsigned long boost_time;
  				/* When to start boosting (jiffies). */
  	struct task_struct *boost_kthread_task;
  				/* kthread that takes care of priority */
  				/*  boosting for this rcu_node structure. */
d71df90ea   Paul E. McKenney   rcu: add tracing ...
219
220
  	unsigned int boost_kthread_status;
  				/* State of boost_kthread_task for tracing. */
0ea1f2ebe   Paul E. McKenney   rcu: Add boosting...
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
  	unsigned long n_tasks_boosted;
  				/* Total number of tasks boosted. */
  	unsigned long n_exp_boosts;
  				/* Number of tasks boosted for expedited GP. */
  	unsigned long n_normal_boosts;
  				/* Number of tasks boosted for normal GP. */
  	unsigned long n_balk_blkd_tasks;
  				/* Refused to boost: no blocked tasks. */
  	unsigned long n_balk_exp_gp_tasks;
  				/* Refused to boost: nothing blocking GP. */
  	unsigned long n_balk_boost_tasks;
  				/* Refused to boost: already boosting. */
  	unsigned long n_balk_notblocked;
  				/* Refused to boost: RCU RS CS still running. */
  	unsigned long n_balk_notyet;
  				/* Refused to boost: not yet time. */
  	unsigned long n_balk_nos;
  				/* Refused to boost: not sure why, though. */
  				/*  This can happen due to race conditions. */
dae6e64d2   Paul E. McKenney   rcu: Introduce pr...
240
  #ifdef CONFIG_RCU_NOCB_CPU
abedf8e24   Paul Gortmaker   rcu: Use simple w...
241
  	struct swait_queue_head nocb_gp_wq[2];
dae6e64d2   Paul E. McKenney   rcu: Introduce pr...
242
  				/* Place for rcu_nocb_kthread() to wait GP. */
dae6e64d2   Paul E. McKenney   rcu: Introduce pr...
243
  #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
8b425aa8f   Paul E. McKenney   rcu: Rename n_noc...
244
245
  	int need_future_gp[2];
  				/* Counts of upcoming no-CB GP requests. */
394f2769a   Paul E. McKenney   rcu: Prevent forc...
246
  	raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
385b73c06   Paul E. McKenney   rcu: Get rid of s...
247

f6a12f34a   Paul E. McKenney   rcu: Enforce expe...
248
249
  	spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
  	unsigned long exp_seq_rq;
3b5f668e7   Paul E. McKenney   rcu: Overlap wake...
250
  	wait_queue_head_t exp_wq[4];
9f77da9f4   Paul E. McKenney   rcu: Move private...
251
  } ____cacheline_internodealigned_in_smp;
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
252
  /*
bc75e9998   Mark Rutland   rcu: Correctly ha...
253
254
255
256
257
258
259
   * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
   * are indexed relative to this interval rather than the global CPU ID space.
   * This generates the bit for a CPU in node-local masks.
   */
  #define leaf_node_cpu_bit(rnp, cpu) (1UL << ((cpu) - (rnp)->grplo))
  
  /*
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
260
261
262
263
264
   * Do a full breadth-first scan of the rcu_node structures for the
   * specified rcu_state structure.
   */
  #define rcu_for_each_node_breadth_first(rsp, rnp) \
  	for ((rnp) = &(rsp)->node[0]; \
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
265
  	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
266

d9a3da069   Paul E. McKenney   rcu: Add expedite...
267
268
269
270
271
272
273
  /*
   * Do a breadth-first scan of the non-leaf rcu_node structures for the
   * specified rcu_state structure.  Note that if there is a singleton
   * rcu_node tree with but one rcu_node structure, this loop is a no-op.
   */
  #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
  	for ((rnp) = &(rsp)->node[0]; \
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
274
  	     (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
d9a3da069   Paul E. McKenney   rcu: Add expedite...
275
276
277
278
279
280
281
  
  /*
   * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
   * structure.  Note that if there is a singleton rcu_node tree with but
   * one rcu_node structure, this loop -will- visit the rcu_node structure.
   * It is still a leaf node, even if it is also the root node.
   */
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
282
  #define rcu_for_each_leaf_node(rsp, rnp) \
f885b7f2b   Paul E. McKenney   rcu: Control RCU_...
283
284
  	for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
  	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
a0b6c9a78   Paul E. McKenney   rcu: Clean up cod...
285

5b74c4589   Paul E. McKenney   rcu: Make ->cpu_n...
286
  /*
bc75e9998   Mark Rutland   rcu: Correctly ha...
287
288
289
290
291
292
293
294
   * Iterate over all possible CPUs in a leaf RCU node.
   */
  #define for_each_leaf_node_possible_cpu(rnp, cpu) \
  	for ((cpu) = cpumask_next(rnp->grplo - 1, cpu_possible_mask); \
  	     cpu <= rnp->grphi; \
  	     cpu = cpumask_next((cpu), cpu_possible_mask))
  
  /*
5b74c4589   Paul E. McKenney   rcu: Make ->cpu_n...
295
296
297
298
299
300
301
302
303
304
   * Union to allow "aggregate OR" operation on the need for a quiescent
   * state by the normal and expedited grace periods.
   */
  union rcu_noqs {
  	struct {
  		u8 norm;
  		u8 exp;
  	} b; /* Bits. */
  	u16 s; /* Set of bits, aggregate OR here. */
  };
9f77da9f4   Paul E. McKenney   rcu: Move private...
305
306
307
308
309
310
311
312
313
314
  /* Index values for nxttail array in struct rcu_data. */
  #define RCU_DONE_TAIL		0	/* Also RCU_WAIT head. */
  #define RCU_WAIT_TAIL		1	/* Also RCU_NEXT_READY head. */
  #define RCU_NEXT_READY_TAIL	2	/* Also RCU_NEXT head. */
  #define RCU_NEXT_TAIL		3
  #define RCU_NEXT_SIZE		4
  
  /* Per-CPU data for read-copy update. */
  struct rcu_data {
  	/* 1) quiescent-state and grace-period handling : */
20133cfce   Paul E. McKenney   rcu: Stop overflo...
315
  	unsigned long	completed;	/* Track rsp->completed gp number */
9f77da9f4   Paul E. McKenney   rcu: Move private...
316
  					/*  in order to detect GP end. */
20133cfce   Paul E. McKenney   rcu: Stop overflo...
317
  	unsigned long	gpnum;		/* Highest gp number that this CPU */
9f77da9f4   Paul E. McKenney   rcu: Move private...
318
  					/*  is aware of having started. */
5cd37193c   Paul E. McKenney   rcu: Make cond_re...
319
320
  	unsigned long	rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
  					/*  for rcu_all_qs() invocations. */
5b74c4589   Paul E. McKenney   rcu: Make ->cpu_n...
321
  	union rcu_noqs	cpu_no_qs;	/* No QSes yet for this CPU. */
97c668b8e   Paul E. McKenney   rcu: Rename qs_pe...
322
  	bool		core_needs_qs;	/* Core waits for quiesc state. */
9f77da9f4   Paul E. McKenney   rcu: Move private...
323
  	bool		beenonline;	/* CPU online at least once. */
e3663b102   Paul E. McKenney   rcu: Handle gpnum...
324
  	bool		gpwrap;		/* Possible gpnum/completed wrap. */
9f77da9f4   Paul E. McKenney   rcu: Move private...
325
326
  	struct rcu_node *mynode;	/* This CPU's leaf of hierarchy */
  	unsigned long grpmask;		/* Mask to apply to leaf qsmask. */
a858af287   Paul E. McKenney   rcu: Print schedu...
327
328
329
330
  	unsigned long	ticks_this_gp;	/* The number of scheduling-clock */
  					/*  ticks this CPU has handled */
  					/*  during and after the last grace */
  					/* period it is aware of. */
9f77da9f4   Paul E. McKenney   rcu: Move private...
331
332
333
334
335
336
337
  
  	/* 2) batch handling */
  	/*
  	 * If nxtlist is not NULL, it is partitioned as follows.
  	 * Any of the partitions might be empty, in which case the
  	 * pointer to that partition will be equal to the pointer for
  	 * the following partition.  When the list is empty, all of
1eba8f843   Paul E. McKenney   rcu: Clean up cod...
338
339
  	 * the nxttail elements point to the ->nxtlist pointer itself,
  	 * which in that case is NULL.
9f77da9f4   Paul E. McKenney   rcu: Move private...
340
  	 *
9f77da9f4   Paul E. McKenney   rcu: Move private...
341
342
343
344
345
  	 * [nxtlist, *nxttail[RCU_DONE_TAIL]):
  	 *	Entries that batch # <= ->completed
  	 *	The grace period for these entries has completed, and
  	 *	the other grace-period-completed entries may be moved
  	 *	here temporarily in rcu_process_callbacks().
1eba8f843   Paul E. McKenney   rcu: Clean up cod...
346
347
348
349
350
351
352
353
  	 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
  	 *	Entries that batch # <= ->completed - 1: waiting for current GP
  	 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
  	 *	Entries known to have arrived before current GP ended
  	 * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]):
  	 *	Entries that might have arrived after current GP ended
  	 *	Note that the value of *nxttail[RCU_NEXT_TAIL] will
  	 *	always be NULL, as this is the end of the list.
9f77da9f4   Paul E. McKenney   rcu: Move private...
354
355
356
  	 */
  	struct rcu_head *nxtlist;
  	struct rcu_head **nxttail[RCU_NEXT_SIZE];
dc35c8934   Paul E. McKenney   rcu: Tag callback...
357
358
  	unsigned long	nxtcompleted[RCU_NEXT_SIZE];
  					/* grace periods for sublists. */
486e25934   Paul E. McKenney   rcu: Avoid waking...
359
360
  	long		qlen_lazy;	/* # of lazy queued callbacks */
  	long		qlen;		/* # of queued callbacks, incl lazy */
37c72e56f   Paul E. McKenney   rcu: Prevent RCU ...
361
362
  	long		qlen_last_fqs_check;
  					/* qlen at last check for QS forcing */
269dcc1c2   Paul E. McKenney   rcu: Add tracing ...
363
  	unsigned long	n_cbs_invoked;	/* count of RCU cbs invoked. */
c635a4e1c   Paul E. McKenney   rcu: Separate acc...
364
  	unsigned long	n_nocbs_invoked; /* count of no-CBs RCU cbs invoked. */
29494be71   Lai Jiangshan   rcu,cleanup: simp...
365
366
  	unsigned long   n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */
  	unsigned long   n_cbs_adopted;  /* RCU cbs adopted from dying CPU */
37c72e56f   Paul E. McKenney   rcu: Prevent RCU ...
367
368
  	unsigned long	n_force_qs_snap;
  					/* did other CPU force QS recently? */
9f77da9f4   Paul E. McKenney   rcu: Move private...
369
  	long		blimit;		/* Upper limit on a processed batch */
9f77da9f4   Paul E. McKenney   rcu: Move private...
370
371
372
  	/* 3) dynticks interface. */
  	struct rcu_dynticks *dynticks;	/* Shared per-CPU dynticks state. */
  	int dynticks_snap;		/* Per-GP tracking for dynticks. */
9f77da9f4   Paul E. McKenney   rcu: Move private...
373
374
  
  	/* 4) reasons this CPU needed to be kicked by force_quiescent_state */
9f77da9f4   Paul E. McKenney   rcu: Move private...
375
  	unsigned long dynticks_fqs;	/* Kicked due to dynticks idle. */
9f77da9f4   Paul E. McKenney   rcu: Move private...
376
  	unsigned long offline_fqs;	/* Kicked due to being offline. */
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
377
378
379
  	unsigned long cond_resched_completed;
  					/* Grace period that needs help */
  					/*  from cond_resched(). */
9f77da9f4   Paul E. McKenney   rcu: Move private...
380
381
  
  	/* 5) __rcu_pending() statistics. */
20133cfce   Paul E. McKenney   rcu: Stop overflo...
382
  	unsigned long n_rcu_pending;	/* rcu_pending() calls since boot. */
97c668b8e   Paul E. McKenney   rcu: Rename qs_pe...
383
  	unsigned long n_rp_core_needs_qs;
d21670aca   Paul E. McKenney   rcu: reduce the n...
384
  	unsigned long n_rp_report_qs;
20133cfce   Paul E. McKenney   rcu: Stop overflo...
385
386
387
388
  	unsigned long n_rp_cb_ready;
  	unsigned long n_rp_cpu_needs_gp;
  	unsigned long n_rp_gp_completed;
  	unsigned long n_rp_gp_started;
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
389
  	unsigned long n_rp_nocb_defer_wakeup;
20133cfce   Paul E. McKenney   rcu: Stop overflo...
390
  	unsigned long n_rp_need_nothing;
9f77da9f4   Paul E. McKenney   rcu: Move private...
391

2cd6ffafe   Paul E. McKenney   rcu: Extend exped...
392
  	/* 6) _rcu_barrier(), OOM callbacks, and expediting. */
06668efa9   Paul E. McKenney   rcu: Move _rcu_ba...
393
  	struct rcu_head barrier_head;
b626c1b68   Paul E. McKenney   rcu: Provide OOM ...
394
395
396
  #ifdef CONFIG_RCU_FAST_NO_HZ
  	struct rcu_head oom_head;
  #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
8b355e3bc   Paul E. McKenney   rcu: Drive expedi...
397
  	atomic_long_t exp_workdone0;	/* # done by workqueue. */
d40a4f09a   Paul E. McKenney   rcu: Shorten expe...
398
399
400
  	atomic_long_t exp_workdone1;	/* # done by others #1. */
  	atomic_long_t exp_workdone2;	/* # done by others #2. */
  	atomic_long_t exp_workdone3;	/* # done by others #3. */
06668efa9   Paul E. McKenney   rcu: Move _rcu_ba...
401

3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
402
403
404
405
  	/* 7) Callback offloading. */
  #ifdef CONFIG_RCU_NOCB_CPU
  	struct rcu_head *nocb_head;	/* CBs waiting for kthread. */
  	struct rcu_head **nocb_tail;
41050a009   Paul E. McKenney   rcu: Fix rcu_barr...
406
407
  	atomic_long_t nocb_q_count;	/* # CBs waiting for nocb */
  	atomic_long_t nocb_q_count_lazy; /*  invocation (all stages). */
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
408
409
  	struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
  	struct rcu_head **nocb_follower_tail;
abedf8e24   Paul Gortmaker   rcu: Use simple w...
410
  	struct swait_queue_head nocb_wq; /* For nocb kthreads to sleep on. */
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
411
  	struct task_struct *nocb_kthread;
9fdd3bc90   Paul E. McKenney   rcu: Break more c...
412
  	int nocb_defer_wakeup;		/* Defer wakeup of nocb_kthread. */
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
413
414
415
416
417
  
  	/* The following fields are used by the leader, hence own cacheline. */
  	struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;
  					/* CBs waiting for GP. */
  	struct rcu_head **nocb_gp_tail;
11ed7f934   Pranith Kumar   rcu: Make nocb le...
418
  	bool nocb_leader_sleep;		/* Is the nocb leader thread asleep? */
fbce7497e   Paul E. McKenney   rcu: Parallelize ...
419
420
421
422
423
424
  	struct rcu_data *nocb_next_follower;
  					/* Next follower in wakeup chain. */
  
  	/* The following fields are used by the follower, hence new cachline. */
  	struct rcu_data *nocb_leader ____cacheline_internodealigned_in_smp;
  					/* Leader CPU takes GP-end wakeups. */
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
425
  #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
6231069bd   Paul E. McKenney   rcu: Add softirq-...
426
  	/* 8) RCU CPU stall data. */
6231069bd   Paul E. McKenney   rcu: Add softirq-...
427
  	unsigned int softirq_snap;	/* Snapshot of softirq activity. */
6231069bd   Paul E. McKenney   rcu: Add softirq-...
428

9f77da9f4   Paul E. McKenney   rcu: Move private...
429
  	int cpu;
d4c08f2ac   Paul E. McKenney   rcu: Add grace-pe...
430
  	struct rcu_state *rsp;
9f77da9f4   Paul E. McKenney   rcu: Move private...
431
  };
9fdd3bc90   Paul E. McKenney   rcu: Break more c...
432
433
434
435
  /* Values for nocb_defer_wakeup field in struct rcu_data. */
  #define RCU_NOGP_WAKE_NOT	0
  #define RCU_NOGP_WAKE		1
  #define RCU_NOGP_WAKE_FORCE	2
026ad2835   Paul E. McKenney   rcu: Drive quiesc...
436
437
438
  #define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500))
  					/* For jiffies_till_first_fqs and */
  					/*  and jiffies_till_next_fqs. */
007b09243   Paul E. McKenney   rcu: Increase RCU...
439

026ad2835   Paul E. McKenney   rcu: Drive quiesc...
440
441
442
443
444
445
446
  #define RCU_JIFFIES_FQS_DIV	256	/* Very large systems need more */
  					/*  delay between bouts of */
  					/*  quiescent-state forcing. */
  
  #define RCU_STALL_RAT_DELAY	2	/* Allow other CPUs time to take */
  					/*  at least one scheduling clock */
  					/*  irq before ratting on them. */
9f77da9f4   Paul E. McKenney   rcu: Move private...
447

08bca60a6   Peter Zijlstra   rcu: Remove waitq...
448
449
450
451
452
453
454
455
456
457
  #define rcu_wait(cond)							\
  do {									\
  	for (;;) {							\
  		set_current_state(TASK_INTERRUPTIBLE);			\
  		if (cond)						\
  			break;						\
  		schedule();						\
  	}								\
  	__set_current_state(TASK_RUNNING);				\
  } while (0)
9f77da9f4   Paul E. McKenney   rcu: Move private...
458
459
460
461
462
463
464
465
466
467
468
469
470
  
  /*
   * RCU global state, including node hierarchy.  This hierarchy is
   * represented in "heap" form in a dense array.  The root (first level)
   * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
   * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
   * and the third level in ->node[m+1] and following (->node[m+1] referenced
   * by ->level[2]).  The number of levels is determined by the number of
   * CPUs and by CONFIG_RCU_FANOUT.  Small systems will have a "hierarchy"
   * consisting of a single rcu_node.
   */
  struct rcu_state {
  	struct rcu_node node[NUM_RCU_NODES];	/* Hierarchy. */
032dfc872   Alexander Gordeev   rcu: Shut up bogu...
471
472
473
  	struct rcu_node *level[RCU_NUM_LVLS + 1];
  						/* Hierarchy levels (+1 to */
  						/*  shut bogus gcc warning) */
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
474
  	u8 flavor_mask;				/* bit in flavor mask. */
394f99a90   Lai Jiangshan   rcu: simplify the...
475
  	struct rcu_data __percpu *rda;		/* pointer of percu rcu_data. */
db3e8db45   Boqun Feng   rcu: Use call_rcu...
476
  	call_rcu_func_t call;			/* call_rcu() flavor. */
b9585e940   Paul E. McKenney   rcu: Consolidate ...
477
  	int ncpus;				/* # CPUs seen so far. */
9f77da9f4   Paul E. McKenney   rcu: Move private...
478
479
  
  	/* The following fields are guarded by the root rcu_node's lock. */
77f81fe08   Petr Mladek   rcu: Finish foldi...
480
481
  	u8	boost ____cacheline_internodealigned_in_smp;
  						/* Subject to priority boost. */
20133cfce   Paul E. McKenney   rcu: Stop overflo...
482
483
  	unsigned long gpnum;			/* Current gp number. */
  	unsigned long completed;		/* # of last completed gp. */
b3dbec76e   Paul E. McKenney   rcu: Move RCU gra...
484
  	struct task_struct *gp_kthread;		/* Task for grace periods. */
abedf8e24   Paul Gortmaker   rcu: Use simple w...
485
  	struct swait_queue_head gp_wq;		/* Where GP task waits. */
afea227fd   Paul E. McKenney   rcutorture: Expor...
486
487
  	short gp_flags;				/* Commands for GP task. */
  	short gp_state;				/* GP kthread sleep state. */
1eba8f843   Paul E. McKenney   rcu: Clean up cod...
488

d9a3da069   Paul E. McKenney   rcu: Add expedite...
489
  	/* End of fields guarded by root rcu_node's lock. */
1eba8f843   Paul E. McKenney   rcu: Clean up cod...
490

7b2e6011f   Paul E. McKenney   rcu: Rename ->ono...
491
492
  	raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp;
  						/* Protect following fields. */
b1420f1c8   Paul E. McKenney   rcu: Make rcu_bar...
493
494
495
496
497
498
499
500
  	struct rcu_head *orphan_nxtlist;	/* Orphaned callbacks that */
  						/*  need a grace period. */
  	struct rcu_head **orphan_nxttail;	/* Tail of above. */
  	struct rcu_head *orphan_donelist;	/* Orphaned callbacks that */
  						/*  are ready to invoke. */
  	struct rcu_head **orphan_donetail;	/* Tail of above. */
  	long qlen_lazy;				/* Number of lazy callbacks. */
  	long qlen;				/* Total number of callbacks. */
7b2e6011f   Paul E. McKenney   rcu: Rename ->ono...
501
  	/* End of fields guarded by orphan_lock. */
a4fbe35a1   Paul E. McKenney   rcu: Grace-period...
502

7be7f0be9   Paul E. McKenney   rcu: Move rcu_bar...
503
  	struct mutex barrier_mutex;		/* Guards barrier fields. */
24ebbca8e   Paul E. McKenney   rcu: Move rcu_bar...
504
  	atomic_t barrier_cpu_count;		/* # CPUs waiting on. */
7db74df88   Paul E. McKenney   rcu: Move rcu_bar...
505
  	struct completion barrier_completion;	/* Wake at barrier end. */
4f525a528   Paul E. McKenney   rcu: Apply rcu_se...
506
  	unsigned long barrier_sequence;		/* ++ at start and end of */
cf3a9c484   Paul E. McKenney   rcu: Increase rcu...
507
  						/*  _rcu_barrier(). */
a4fbe35a1   Paul E. McKenney   rcu: Grace-period...
508
  	/* End of fields guarded by barrier_mutex. */
f6a12f34a   Paul E. McKenney   rcu: Enforce expe...
509
  	struct mutex exp_mutex;			/* Serialize expedited GP. */
3b5f668e7   Paul E. McKenney   rcu: Overlap wake...
510
  	struct mutex exp_wake_mutex;		/* Serialize wakeup. */
d6ada2cf2   Paul E. McKenney   rcu: Rework synch...
511
  	unsigned long expedited_sequence;	/* Take a ticket. */
a30489c52   Paul E. McKenney   rcu: Instrument s...
512
  	atomic_long_t expedited_normal;		/* # fallbacks to normal. */
3a6d7c64d   Peter Zijlstra   rcu: Make expedit...
513
  	atomic_t expedited_need_qs;		/* # CPUs left to check in. */
abedf8e24   Paul Gortmaker   rcu: Use simple w...
514
  	struct swait_queue_head expedited_wq;	/* Wait for check-ins. */
b9585e940   Paul E. McKenney   rcu: Consolidate ...
515
  	int ncpus_snap;				/* # CPUs seen last time. */
40694d664   Paul E. McKenney   rcu: Move synchro...
516

9f77da9f4   Paul E. McKenney   rcu: Move private...
517
518
  	unsigned long jiffies_force_qs;		/* Time at which to invoke */
  						/*  force_quiescent_state(). */
8c7c4829a   Paul E. McKenney   rcu: Awaken grace...
519
520
  	unsigned long jiffies_kick_kthreads;	/* Time at which to kick */
  						/*  kthreads, if configured. */
9f77da9f4   Paul E. McKenney   rcu: Move private...
521
522
523
524
525
526
  	unsigned long n_force_qs;		/* Number of calls to */
  						/*  force_quiescent_state(). */
  	unsigned long n_force_qs_lh;		/* ~Number of calls leaving */
  						/*  due to lock unavailable. */
  	unsigned long n_force_qs_ngp;		/* Number of calls leaving */
  						/*  due to no GP active. */
9f77da9f4   Paul E. McKenney   rcu: Move private...
527
528
  	unsigned long gp_start;			/* Time at which GP started, */
  						/*  but in jiffies. */
6ccd2ecd4   Paul E. McKenney   rcu: Improve diag...
529
530
  	unsigned long gp_activity;		/* Time of last GP kthread */
  						/*  activity in jiffies. */
9f77da9f4   Paul E. McKenney   rcu: Move private...
531
532
  	unsigned long jiffies_stall;		/* Time at which to check */
  						/*  for CPU stalls. */
6193c76ab   Paul E. McKenney   rcu: Kick CPU hal...
533
534
  	unsigned long jiffies_resched;		/* Time at which to resched */
  						/*  a reluctant CPU. */
fc908ed33   Paul E. McKenney   rcu: Make RCU_CPU...
535
536
  	unsigned long n_force_qs_gpstart;	/* Snapshot of n_force_qs at */
  						/*  GP start. */
15ba0ba86   Paul E. McKenney   rcu: add grace-pe...
537
538
  	unsigned long gp_max;			/* Maximum GP duration in */
  						/*  jiffies. */
e66c33d57   Steven Rostedt (Red Hat)   rcu: Add const an...
539
  	const char *name;			/* Name of structure. */
a48898585   Paul E. McKenney   rcu: Distinguish ...
540
  	char abbr;				/* Abbreviated name. */
6ce75a232   Paul E. McKenney   rcu: Introduce fo...
541
  	struct list_head flavors;		/* List of RCU flavors. */
9f77da9f4   Paul E. McKenney   rcu: Move private...
542
  };
4cdfc175c   Paul E. McKenney   rcu: Move quiesce...
543
544
545
  /* Values for rcu_state structure's gp_flags field. */
  #define RCU_GP_FLAG_INIT 0x1	/* Need grace-period initialization. */
  #define RCU_GP_FLAG_FQS  0x2	/* Need grace-period quiescent-state forcing. */
c34d2f418   Paul E. McKenney   rcu: Correct comm...
546
  /* Values for rcu_state structure's gp_state field. */
77f81fe08   Petr Mladek   rcu: Finish foldi...
547
  #define RCU_GP_IDLE	 0	/* Initial state and no GP in progress. */
afea227fd   Paul E. McKenney   rcutorture: Expor...
548
  #define RCU_GP_WAIT_GPS  1	/* Wait for grace-period start. */
319362c90   Paul E. McKenney   rcu: Provide more...
549
550
  #define RCU_GP_DONE_GPS  2	/* Wait done for grace-period start. */
  #define RCU_GP_WAIT_FQS  3	/* Wait for force-quiescent-state time. */
32bb1c799   Paul E. McKenney   rcu: Rename RCU_G...
551
  #define RCU_GP_DOING_FQS 4	/* Wait done for force-quiescent-state time. */
319362c90   Paul E. McKenney   rcu: Provide more...
552
553
  #define RCU_GP_CLEANUP   5	/* Grace-period cleanup started. */
  #define RCU_GP_CLEANED   6	/* Grace-period cleanup complete. */
afea227fd   Paul E. McKenney   rcutorture: Expor...
554

6b50e119c   Paul E. McKenney   rcutorture: Print...
555
556
557
558
559
560
561
562
563
564
565
  #ifndef RCU_TREE_NONCORE
  static const char * const gp_state_names[] = {
  	"RCU_GP_IDLE",
  	"RCU_GP_WAIT_GPS",
  	"RCU_GP_DONE_GPS",
  	"RCU_GP_WAIT_FQS",
  	"RCU_GP_DOING_FQS",
  	"RCU_GP_CLEANUP",
  	"RCU_GP_CLEANED",
  };
  #endif /* #ifndef RCU_TREE_NONCORE */
6ce75a232   Paul E. McKenney   rcu: Introduce fo...
566
  extern struct list_head rcu_struct_flavors;
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
567
568
  
  /* Sequence through rcu_state structures for each RCU flavor. */
6ce75a232   Paul E. McKenney   rcu: Introduce fo...
569
570
  #define for_each_rcu_flavor(rsp) \
  	list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
6258c4fb5   Ingo Molnar   kmemtrace, rcu: f...
571
572
573
  /*
   * RCU implementation internal declarations:
   */
d6714c22b   Paul E. McKenney   rcu: Renamings to...
574
  extern struct rcu_state rcu_sched_state;
6258c4fb5   Ingo Molnar   kmemtrace, rcu: f...
575
576
  
  extern struct rcu_state rcu_bh_state;
6258c4fb5   Ingo Molnar   kmemtrace, rcu: f...
577

28f6569ab   Pranith Kumar   rcu: Remove redun...
578
  #ifdef CONFIG_PREEMPT_RCU
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
579
  extern struct rcu_state rcu_preempt_state;
28f6569ab   Pranith Kumar   rcu: Remove redun...
580
  #endif /* #ifdef CONFIG_PREEMPT_RCU */
f41d911f8   Paul E. McKenney   rcu: Merge preemp...
581

eab0993c7   Paul E. McKenney   rcu: Move RCU_BOO...
582
583
584
585
586
587
  #ifdef CONFIG_RCU_BOOST
  DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
  DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
  DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
  DECLARE_PER_CPU(char, rcu_cpu_has_work);
  #endif /* #ifdef CONFIG_RCU_BOOST */
017c42613   Paul E. McKenney   rcu: Fix sparse w...
588
  #ifndef RCU_TREE_NONCORE
9f77da9f4   Paul E. McKenney   rcu: Move private...
589

9b2619aff   Paul E. McKenney   rcu: Clean up cod...
590
  /* Forward declarations for rcutree_plugin.h */
dbe01350f   Paul E. McKenney   rcu: Remove inlin...
591
  static void rcu_bootup_announce(void);
38200cf24   Paul E. McKenney   rcu: Remove "cpu"...
592
  static void rcu_preempt_note_context_switch(void);
27f4d2805   Paul E. McKenney   rcu: priority boo...
593
  static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
b668c9cf3   Paul E. McKenney   rcu: Fix grace-pe...
594
  #ifdef CONFIG_HOTPLUG_CPU
8af3a5e78   Paul E. McKenney   rcu: Abstract rcu...
595
  static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
b668c9cf3   Paul E. McKenney   rcu: Fix grace-pe...
596
  #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1ed509a22   Paul E. McKenney   rcu: Add RCU_CPU_...
597
  static void rcu_print_detail_task_stall(struct rcu_state *rsp);
9bc8b5586   Paul E. McKenney   rcu: Suppress NMI...
598
  static int rcu_print_task_stall(struct rcu_node *rnp);
74611ecb0   Paul E. McKenney   rcu: Add online/o...
599
  static int rcu_print_task_exp_stall(struct rcu_node *rnp);
9b2619aff   Paul E. McKenney   rcu: Clean up cod...
600
  static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
86aea0e6e   Paul E. McKenney   rcu: Remove "cpu"...
601
  static void rcu_preempt_check_callbacks(void);
b6a4ae766   Boqun Feng   rcu: Use rcu_call...
602
  void call_rcu(struct rcu_head *head, rcu_callback_t func);
9b2619aff   Paul E. McKenney   rcu: Clean up cod...
603
  static void __init __rcu_init_preempt(void);
1217ed1ba   Paul E. McKenney   rcu: permit rcu_r...
604
  static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
a46e0899e   Paul E. McKenney   rcu: use softirq ...
605
606
  static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
  static void invoke_rcu_callbacks_kthread(void);
dff1672d9   Paul E. McKenney   rcu: Keep invokin...
607
  static bool rcu_is_callbacks_kthread(void);
a46e0899e   Paul E. McKenney   rcu: use softirq ...
608
609
  #ifdef CONFIG_RCU_BOOST
  static void rcu_preempt_do_callbacks(void);
49fb4c629   Paul Gortmaker   rcu: delete __cpu...
610
  static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
5d01bbd11   Thomas Gleixner   rcu: Yield simpler
611
  						 struct rcu_node *rnp);
a46e0899e   Paul E. McKenney   rcu: use softirq ...
612
  #endif /* #ifdef CONFIG_RCU_BOOST */
9386c0b75   Paul E. McKenney   rcu: Rationalize ...
613
  static void __init rcu_spawn_boost_kthreads(void);
49fb4c629   Paul Gortmaker   rcu: delete __cpu...
614
  static void rcu_prepare_kthreads(int cpu);
8fa7845df   Paul E. McKenney   rcu: Remove "cpu"...
615
  static void rcu_cleanup_after_idle(void);
198bbf812   Paul E. McKenney   rcu: Remove "cpu"...
616
  static void rcu_prepare_for_idle(void);
c57afe80d   Paul E. McKenney   rcu: Make RCU_FAS...
617
  static void rcu_idle_count_callbacks_posted(void);
0aa04b055   Paul E. McKenney   rcu: Process offl...
618
  static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
a858af287   Paul E. McKenney   rcu: Print schedu...
619
620
621
622
623
  static void print_cpu_stall_info_begin(void);
  static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
  static void print_cpu_stall_info_end(void);
  static void zero_cpu_stall_ticks(struct rcu_data *rdp);
  static void increment_cpu_stall_ticks(void);
d7e299339   Paul E. McKenney   rcu: Make rcu_bar...
624
  static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
dae6e64d2   Paul E. McKenney   rcu: Introduce pr...
625
  static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
abedf8e24   Paul Gortmaker   rcu: Use simple w...
626
627
  static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp);
  static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq);
dae6e64d2   Paul E. McKenney   rcu: Introduce pr...
628
  static void rcu_init_one_nocb(struct rcu_node *rnp);
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
629
  static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
630
  			    bool lazy, unsigned long flags);
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
631
  static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
632
633
  				      struct rcu_data *rdp,
  				      unsigned long flags);
9fdd3bc90   Paul E. McKenney   rcu: Break more c...
634
  static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
96d3fd0d3   Paul E. McKenney   rcu: Break call_r...
635
  static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
636
  static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
35ce7f29a   Paul E. McKenney   rcu: Create rcuo ...
637
638
639
640
641
  static void rcu_spawn_all_nocb_kthreads(int cpu);
  static void __init rcu_spawn_nocb_kthreads(void);
  #ifdef CONFIG_RCU_NOCB_CPU
  static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
  #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
4a81e8328   Paul E. McKenney   rcu: Reduce overh...
642
  static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
34ed62461   Paul E. McKenney   rcu: Remove restr...
643
  static bool init_nocb_callback_list(struct rcu_data *rdp);
28ced795c   Christoph Lameter   rcu: Remove rcu_d...
644
645
  static void rcu_sysidle_enter(int irq);
  static void rcu_sysidle_exit(int irq);
0edd1b178   Paul E. McKenney   nohz_full: Add fu...
646
647
648
649
650
  static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
  				  unsigned long *maxj);
  static bool is_sysidle_rcu_state(struct rcu_state *rsp);
  static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
  				  unsigned long maxj);
eb75767be   Paul E. McKenney   nohz_full: Force ...
651
  static void rcu_bind_gp_kthread(void);
2333210b2   Paul E. McKenney   nohz_full: Add rc...
652
  static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp);
a096932f0   Paul E. McKenney   rcu: Don't activa...
653
  static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
176f8f7a5   Paul E. McKenney   rcu: Make TASKS_R...
654
655
  static void rcu_dynticks_task_enter(void);
  static void rcu_dynticks_task_exit(void);
9b2619aff   Paul E. McKenney   rcu: Clean up cod...
656

017c42613   Paul E. McKenney   rcu: Fix sparse w...
657
  #endif /* #ifndef RCU_TREE_NONCORE */
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
658
659
  
  #ifdef CONFIG_RCU_TRACE
41050a009   Paul E. McKenney   rcu: Fix rcu_barr...
660
  /* Read out queue lengths for tracing. */
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
661
662
  static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
  {
41050a009   Paul E. McKenney   rcu: Fix rcu_barr...
663
664
665
  #ifdef CONFIG_RCU_NOCB_CPU
  	*ql = atomic_long_read(&rdp->nocb_q_count);
  	*qll = atomic_long_read(&rdp->nocb_q_count_lazy);
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
666
  #else /* #ifdef CONFIG_RCU_NOCB_CPU */
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
667
668
  	*ql = 0;
  	*qll = 0;
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
669
  #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
41050a009   Paul E. McKenney   rcu: Fix rcu_barr...
670
  }
3fbfbf7a3   Paul E. McKenney   rcu: Add callback...
671
  #endif /* #ifdef CONFIG_RCU_TRACE */
12d560f4e   Paul E. McKenney   rcu,locking: Priv...
672
673
674
675
676
677
678
679
680
681
682
683
  
  /*
   * Place this after a lock-acquisition primitive to guarantee that
   * an UNLOCK+LOCK pair act as a full barrier.  This guarantee applies
   * if the UNLOCK and LOCK are executed by the same CPU or if the
   * UNLOCK and LOCK operate on the same lock variable.
   */
  #ifdef CONFIG_PPC
  #define smp_mb__after_unlock_lock()	smp_mb()  /* Full ordering for lock. */
  #else /* #ifdef CONFIG_PPC */
  #define smp_mb__after_unlock_lock()	do { } while (0)
  #endif /* #else #ifdef CONFIG_PPC */
2a67e741b   Peter Zijlstra   rcu: Create trans...
684
685
  
  /*
67c583a7d   Boqun Feng   RCU: Privatize rc...
686
   * Wrappers for the rcu_node::lock acquire and release.
2a67e741b   Peter Zijlstra   rcu: Create trans...
687
688
689
690
691
692
693
694
   *
   * Because the rcu_nodes form a tree, the tree traversal locking will observe
   * different lock values, this in turn means that an UNLOCK of one level
   * followed by a LOCK of another level does not imply a full memory barrier;
   * and most importantly transitivity is lost.
   *
   * In order to restore full ordering between tree levels, augment the regular
   * lock acquire functions with smp_mb__after_unlock_lock().
67c583a7d   Boqun Feng   RCU: Privatize rc...
695
696
697
   *
   * As ->lock of struct rcu_node is a __private field, therefore one should use
   * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
2a67e741b   Peter Zijlstra   rcu: Create trans...
698
699
700
   */
  static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp)
  {
67c583a7d   Boqun Feng   RCU: Privatize rc...
701
  	raw_spin_lock(&ACCESS_PRIVATE(rnp, lock));
2a67e741b   Peter Zijlstra   rcu: Create trans...
702
703
  	smp_mb__after_unlock_lock();
  }
67c583a7d   Boqun Feng   RCU: Privatize rc...
704
705
706
707
  static inline void raw_spin_unlock_rcu_node(struct rcu_node *rnp)
  {
  	raw_spin_unlock(&ACCESS_PRIVATE(rnp, lock));
  }
2a67e741b   Peter Zijlstra   rcu: Create trans...
708
709
  static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp)
  {
67c583a7d   Boqun Feng   RCU: Privatize rc...
710
  	raw_spin_lock_irq(&ACCESS_PRIVATE(rnp, lock));
2a67e741b   Peter Zijlstra   rcu: Create trans...
711
712
  	smp_mb__after_unlock_lock();
  }
67c583a7d   Boqun Feng   RCU: Privatize rc...
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
  static inline void raw_spin_unlock_irq_rcu_node(struct rcu_node *rnp)
  {
  	raw_spin_unlock_irq(&ACCESS_PRIVATE(rnp, lock));
  }
  
  #define raw_spin_lock_irqsave_rcu_node(rnp, flags)			\
  do {									\
  	typecheck(unsigned long, flags);				\
  	raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags);	\
  	smp_mb__after_unlock_lock();					\
  } while (0)
  
  #define raw_spin_unlock_irqrestore_rcu_node(rnp, flags)			\
  do {									\
  	typecheck(unsigned long, flags);				\
  	raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags);	\
2a67e741b   Peter Zijlstra   rcu: Create trans...
729
730
731
732
  } while (0)
  
  static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp)
  {
67c583a7d   Boqun Feng   RCU: Privatize rc...
733
  	bool locked = raw_spin_trylock(&ACCESS_PRIVATE(rnp, lock));
2a67e741b   Peter Zijlstra   rcu: Create trans...
734
735
736
737
738
  
  	if (locked)
  		smp_mb__after_unlock_lock();
  	return locked;
  }