Blame view
kernel/rcutree.h
22.6 KB
9f77da9f4
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
/* * Read-Copy Update mechanism for mutual exclusion (tree-based version) * Internal non-public definitions. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright IBM Corporation, 2008 * * Author: Ingo Molnar <mingo@elte.hu> * Paul E. McKenney <paulmck@linux.vnet.ibm.com> */ #include <linux/cache.h> #include <linux/spinlock.h> #include <linux/threads.h> #include <linux/cpumask.h> #include <linux/seqlock.h> |
016a8d5be
|
30 |
#include <linux/irq_work.h> |
9f77da9f4
|
31 32 |
/* |
8932a63d5
|
33 34 |
* Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and * CONFIG_RCU_FANOUT_LEAF. |
9f77da9f4
|
35 |
* In theory, it should be possible to add more levels straightforwardly. |
0209f6490
|
36 37 |
* In practice, this did work well going from three levels to four. * Of course, your mileage may vary. |
9f77da9f4
|
38 |
*/ |
cf244dc01
|
39 |
#define MAX_RCU_LVLS 4 |
8932a63d5
|
40 |
#define RCU_FANOUT_1 (CONFIG_RCU_FANOUT_LEAF) |
0209f6490
|
41 42 43 |
#define RCU_FANOUT_2 (RCU_FANOUT_1 * CONFIG_RCU_FANOUT) #define RCU_FANOUT_3 (RCU_FANOUT_2 * CONFIG_RCU_FANOUT) #define RCU_FANOUT_4 (RCU_FANOUT_3 * CONFIG_RCU_FANOUT) |
9f77da9f4
|
44 |
|
0209f6490
|
45 |
#if NR_CPUS <= RCU_FANOUT_1 |
f885b7f2b
|
46 |
# define RCU_NUM_LVLS 1 |
9f77da9f4
|
47 48 49 50 |
# define NUM_RCU_LVL_0 1 # define NUM_RCU_LVL_1 (NR_CPUS) # define NUM_RCU_LVL_2 0 # define NUM_RCU_LVL_3 0 |
cf244dc01
|
51 |
# define NUM_RCU_LVL_4 0 |
0209f6490
|
52 |
#elif NR_CPUS <= RCU_FANOUT_2 |
f885b7f2b
|
53 |
# define RCU_NUM_LVLS 2 |
9f77da9f4
|
54 |
# define NUM_RCU_LVL_0 1 |
0209f6490
|
55 |
# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) |
9f77da9f4
|
56 57 |
# define NUM_RCU_LVL_2 (NR_CPUS) # define NUM_RCU_LVL_3 0 |
cf244dc01
|
58 |
# define NUM_RCU_LVL_4 0 |
0209f6490
|
59 |
#elif NR_CPUS <= RCU_FANOUT_3 |
f885b7f2b
|
60 |
# define RCU_NUM_LVLS 3 |
9f77da9f4
|
61 |
# define NUM_RCU_LVL_0 1 |
0209f6490
|
62 63 64 |
# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) # define NUM_RCU_LVL_3 (NR_CPUS) |
cf244dc01
|
65 |
# define NUM_RCU_LVL_4 0 |
0209f6490
|
66 |
#elif NR_CPUS <= RCU_FANOUT_4 |
f885b7f2b
|
67 |
# define RCU_NUM_LVLS 4 |
cf244dc01
|
68 |
# define NUM_RCU_LVL_0 1 |
0209f6490
|
69 70 71 72 |
# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3) # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) # define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) # define NUM_RCU_LVL_4 (NR_CPUS) |
9f77da9f4
|
73 74 |
#else # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" |
0209f6490
|
75 |
#endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */ |
9f77da9f4
|
76 |
|
cf244dc01
|
77 |
#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4) |
9f77da9f4
|
78 |
#define NUM_RCU_NODES (RCU_SUM - NR_CPUS) |
f885b7f2b
|
79 80 |
extern int rcu_num_lvls; extern int rcu_num_nodes; |
9f77da9f4
|
81 82 83 84 |
/* * Dynticks per-CPU state. */ struct rcu_dynticks { |
9b2e4f188
|
85 86 87 88 |
long long dynticks_nesting; /* Track irq/process nesting level. */ /* Process level is worth LLONG_MAX/2. */ int dynticks_nmi_nesting; /* Track NMI nesting level. */ atomic_t dynticks; /* Even value for idle, else odd. */ |
5955f7eec
|
89 |
#ifdef CONFIG_RCU_FAST_NO_HZ |
c0f4dfd4f
|
90 |
bool all_lazy; /* Are all CPU's CBs lazy? */ |
5955f7eec
|
91 92 93 94 |
unsigned long nonlazy_posted; /* # times non-lazy CBs posted to CPU. */ unsigned long nonlazy_posted_snap; /* idle-period nonlazy_posted snapshot. */ |
c0f4dfd4f
|
95 96 |
unsigned long last_accelerate; /* Last jiffy CBs were accelerated. */ |
9d2ad2430
|
97 |
int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ |
5955f7eec
|
98 |
#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ |
9f77da9f4
|
99 |
}; |
d71df90ea
|
100 101 102 103 |
/* RCU's kthread states for tracing. */ #define RCU_KTHREAD_STOPPED 0 #define RCU_KTHREAD_RUNNING 1 #define RCU_KTHREAD_WAITING 2 |
15ba0ba86
|
104 105 106 |
#define RCU_KTHREAD_OFFCPU 3 #define RCU_KTHREAD_YIELDING 4 #define RCU_KTHREAD_MAX 4 |
d71df90ea
|
107 |
|
9f77da9f4
|
108 109 110 111 |
/* * Definition for node within the RCU grace-period-detection hierarchy. */ struct rcu_node { |
1304afb22
|
112 |
raw_spinlock_t lock; /* Root rcu_node's lock protects some */ |
1eba8f843
|
113 |
/* rcu_state fields as well as following. */ |
20133cfce
|
114 |
unsigned long gpnum; /* Current grace period for this node. */ |
868489660
|
115 116 |
/* This will either be equal to or one */ /* behind the root rcu_node's gpnum. */ |
20133cfce
|
117 |
unsigned long completed; /* Last GP completed for this node. */ |
d09b62dfa
|
118 119 |
/* This will either be equal to or one */ /* behind the root rcu_node's gpnum. */ |
9f77da9f4
|
120 121 |
unsigned long qsmask; /* CPUs or groups that need to switch in */ /* order for current grace period to proceed.*/ |
1eba8f843
|
122 123 124 125 |
/* In leaf rcu_node, each bit corresponds to */ /* an rcu_data structure, otherwise, each */ /* bit corresponds to a child rcu_node */ /* structure. */ |
12f5f524c
|
126 |
unsigned long expmask; /* Groups that have ->blkd_tasks */ |
d9a3da069
|
127 128 129 |
/* elements that need to drain to allow the */ /* current expedited grace period to */ /* complete (only for TREE_PREEMPT_RCU). */ |
9f77da9f4
|
130 |
unsigned long qsmaskinit; |
d9a3da069
|
131 |
/* Per-GP initial value for qsmask & expmask. */ |
9f77da9f4
|
132 |
unsigned long grpmask; /* Mask to apply to parent qsmask. */ |
1eba8f843
|
133 |
/* Only one bit will be set in this mask. */ |
9f77da9f4
|
134 135 136 137 138 |
int grplo; /* lowest-numbered CPU or group here. */ int grphi; /* highest-numbered CPU or group here. */ u8 grpnum; /* CPU/group number for next level up. */ u8 level; /* root is at level 0. */ struct rcu_node *parent; |
12f5f524c
|
139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
struct list_head blkd_tasks; /* Tasks blocked in RCU read-side critical */ /* section. Tasks are placed at the head */ /* of this list and age towards the tail. */ struct list_head *gp_tasks; /* Pointer to the first task blocking the */ /* current grace period, or NULL if there */ /* is no such task. */ struct list_head *exp_tasks; /* Pointer to the first task blocking the */ /* current expedited grace period, or NULL */ /* if there is no such task. If there */ /* is no current expedited grace period, */ /* then there can cannot be any such task. */ |
27f4d2805
|
153 154 155 156 157 158 159 160 161 162 163 164 165 166 |
#ifdef CONFIG_RCU_BOOST struct list_head *boost_tasks; /* Pointer to first task that needs to be */ /* priority boosted, or NULL if no priority */ /* boosting is needed for this rcu_node */ /* structure. If there are no tasks */ /* queued on this rcu_node structure that */ /* are blocking the current grace period, */ /* there can be no such task. */ unsigned long boost_time; /* When to start boosting (jiffies). */ struct task_struct *boost_kthread_task; /* kthread that takes care of priority */ /* boosting for this rcu_node structure. */ |
d71df90ea
|
167 168 |
unsigned int boost_kthread_status; /* State of boost_kthread_task for tracing. */ |
0ea1f2ebe
|
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 |
unsigned long n_tasks_boosted; /* Total number of tasks boosted. */ unsigned long n_exp_boosts; /* Number of tasks boosted for expedited GP. */ unsigned long n_normal_boosts; /* Number of tasks boosted for normal GP. */ unsigned long n_balk_blkd_tasks; /* Refused to boost: no blocked tasks. */ unsigned long n_balk_exp_gp_tasks; /* Refused to boost: nothing blocking GP. */ unsigned long n_balk_boost_tasks; /* Refused to boost: already boosting. */ unsigned long n_balk_notblocked; /* Refused to boost: RCU RS CS still running. */ unsigned long n_balk_notyet; /* Refused to boost: not yet time. */ unsigned long n_balk_nos; /* Refused to boost: not sure why, though. */ /* This can happen due to race conditions. */ |
27f4d2805
|
188 |
#endif /* #ifdef CONFIG_RCU_BOOST */ |
dae6e64d2
|
189 190 191 |
#ifdef CONFIG_RCU_NOCB_CPU wait_queue_head_t nocb_gp_wq[2]; /* Place for rcu_nocb_kthread() to wait GP. */ |
dae6e64d2
|
192 |
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ |
8b425aa8f
|
193 194 |
int need_future_gp[2]; /* Counts of upcoming no-CB GP requests. */ |
394f2769a
|
195 |
raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp; |
9f77da9f4
|
196 |
} ____cacheline_internodealigned_in_smp; |
a0b6c9a78
|
197 198 199 200 201 202 |
/* * Do a full breadth-first scan of the rcu_node structures for the * specified rcu_state structure. */ #define rcu_for_each_node_breadth_first(rsp, rnp) \ for ((rnp) = &(rsp)->node[0]; \ |
f885b7f2b
|
203 |
(rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++) |
a0b6c9a78
|
204 |
|
d9a3da069
|
205 206 207 208 209 210 211 |
/* * Do a breadth-first scan of the non-leaf rcu_node structures for the * specified rcu_state structure. Note that if there is a singleton * rcu_node tree with but one rcu_node structure, this loop is a no-op. */ #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \ for ((rnp) = &(rsp)->node[0]; \ |
f885b7f2b
|
212 |
(rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++) |
d9a3da069
|
213 214 215 216 217 218 219 |
/* * Scan the leaves of the rcu_node hierarchy for the specified rcu_state * structure. Note that if there is a singleton rcu_node tree with but * one rcu_node structure, this loop -will- visit the rcu_node structure. * It is still a leaf node, even if it is also the root node. */ |
a0b6c9a78
|
220 |
#define rcu_for_each_leaf_node(rsp, rnp) \ |
f885b7f2b
|
221 222 |
for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \ (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++) |
a0b6c9a78
|
223 |
|
9f77da9f4
|
224 225 226 227 228 229 230 231 232 233 |
/* Index values for nxttail array in struct rcu_data. */ #define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ #define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ #define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */ #define RCU_NEXT_TAIL 3 #define RCU_NEXT_SIZE 4 /* Per-CPU data for read-copy update. */ struct rcu_data { /* 1) quiescent-state and grace-period handling : */ |
20133cfce
|
234 |
unsigned long completed; /* Track rsp->completed gp number */ |
9f77da9f4
|
235 |
/* in order to detect GP end. */ |
20133cfce
|
236 |
unsigned long gpnum; /* Highest gp number that this CPU */ |
9f77da9f4
|
237 |
/* is aware of having started. */ |
e4cc1f22b
|
238 |
bool passed_quiesce; /* User-mode/idle loop etc. */ |
9f77da9f4
|
239 240 |
bool qs_pending; /* Core waits for quiesc state. */ bool beenonline; /* CPU online at least once. */ |
6cc68793e
|
241 |
bool preemptible; /* Preemptible RCU? */ |
9f77da9f4
|
242 243 |
struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ unsigned long grpmask; /* Mask to apply to leaf qsmask. */ |
a858af287
|
244 245 246 247 248 249 |
#ifdef CONFIG_RCU_CPU_STALL_INFO unsigned long ticks_this_gp; /* The number of scheduling-clock */ /* ticks this CPU has handled */ /* during and after the last grace */ /* period it is aware of. */ #endif /* #ifdef CONFIG_RCU_CPU_STALL_INFO */ |
9f77da9f4
|
250 251 252 253 254 255 256 |
/* 2) batch handling */ /* * If nxtlist is not NULL, it is partitioned as follows. * Any of the partitions might be empty, in which case the * pointer to that partition will be equal to the pointer for * the following partition. When the list is empty, all of |
1eba8f843
|
257 258 |
* the nxttail elements point to the ->nxtlist pointer itself, * which in that case is NULL. |
9f77da9f4
|
259 |
* |
9f77da9f4
|
260 261 262 263 264 |
* [nxtlist, *nxttail[RCU_DONE_TAIL]): * Entries that batch # <= ->completed * The grace period for these entries has completed, and * the other grace-period-completed entries may be moved * here temporarily in rcu_process_callbacks(). |
1eba8f843
|
265 266 267 268 269 270 271 272 |
* [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]): * Entries that batch # <= ->completed - 1: waiting for current GP * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]): * Entries known to have arrived before current GP ended * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]): * Entries that might have arrived after current GP ended * Note that the value of *nxttail[RCU_NEXT_TAIL] will * always be NULL, as this is the end of the list. |
9f77da9f4
|
273 274 275 |
*/ struct rcu_head *nxtlist; struct rcu_head **nxttail[RCU_NEXT_SIZE]; |
dc35c8934
|
276 277 |
unsigned long nxtcompleted[RCU_NEXT_SIZE]; /* grace periods for sublists. */ |
486e25934
|
278 279 |
long qlen_lazy; /* # of lazy queued callbacks */ long qlen; /* # of queued callbacks, incl lazy */ |
37c72e56f
|
280 281 |
long qlen_last_fqs_check; /* qlen at last check for QS forcing */ |
269dcc1c2
|
282 |
unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */ |
c635a4e1c
|
283 |
unsigned long n_nocbs_invoked; /* count of no-CBs RCU cbs invoked. */ |
29494be71
|
284 285 |
unsigned long n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */ unsigned long n_cbs_adopted; /* RCU cbs adopted from dying CPU */ |
37c72e56f
|
286 287 |
unsigned long n_force_qs_snap; /* did other CPU force QS recently? */ |
9f77da9f4
|
288 |
long blimit; /* Upper limit on a processed batch */ |
9f77da9f4
|
289 290 291 |
/* 3) dynticks interface. */ struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */ int dynticks_snap; /* Per-GP tracking for dynticks. */ |
9f77da9f4
|
292 293 |
/* 4) reasons this CPU needed to be kicked by force_quiescent_state */ |
9f77da9f4
|
294 |
unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ |
9f77da9f4
|
295 |
unsigned long offline_fqs; /* Kicked due to being offline. */ |
9f77da9f4
|
296 297 |
/* 5) __rcu_pending() statistics. */ |
20133cfce
|
298 299 |
unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */ unsigned long n_rp_qs_pending; |
d21670aca
|
300 |
unsigned long n_rp_report_qs; |
20133cfce
|
301 302 303 304 |
unsigned long n_rp_cb_ready; unsigned long n_rp_cpu_needs_gp; unsigned long n_rp_gp_completed; unsigned long n_rp_gp_started; |
20133cfce
|
305 |
unsigned long n_rp_need_nothing; |
9f77da9f4
|
306 |
|
b626c1b68
|
307 |
/* 6) _rcu_barrier() and OOM callbacks. */ |
06668efa9
|
308 |
struct rcu_head barrier_head; |
b626c1b68
|
309 310 311 |
#ifdef CONFIG_RCU_FAST_NO_HZ struct rcu_head oom_head; #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ |
06668efa9
|
312 |
|
3fbfbf7a3
|
313 314 315 316 317 318 319 320 321 322 323 |
/* 7) Callback offloading. */ #ifdef CONFIG_RCU_NOCB_CPU struct rcu_head *nocb_head; /* CBs waiting for kthread. */ struct rcu_head **nocb_tail; atomic_long_t nocb_q_count; /* # CBs waiting for kthread */ atomic_long_t nocb_q_count_lazy; /* (approximate). */ int nocb_p_count; /* # CBs being invoked by kthread */ int nocb_p_count_lazy; /* (approximate). */ wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */ struct task_struct *nocb_kthread; #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ |
6231069bd
|
324 325 326 327 |
/* 8) RCU CPU stall data. */ #ifdef CONFIG_RCU_CPU_STALL_INFO unsigned int softirq_snap; /* Snapshot of softirq activity. */ #endif /* #ifdef CONFIG_RCU_CPU_STALL_INFO */ |
9f77da9f4
|
328 |
int cpu; |
d4c08f2ac
|
329 |
struct rcu_state *rsp; |
9f77da9f4
|
330 |
}; |
af446b702
|
331 |
/* Values for fqs_state field in struct rcu_state. */ |
83f5b01ff
|
332 333 334 |
#define RCU_GP_IDLE 0 /* No grace period in progress. */ #define RCU_GP_INIT 1 /* Grace period being initialized. */ #define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */ |
ee47eb9f4
|
335 |
#define RCU_FORCE_QS 3 /* Need to force quiescent state. */ |
9f77da9f4
|
336 |
#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK |
9f77da9f4
|
337 |
|
026ad2835
|
338 339 340 |
#define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500)) /* For jiffies_till_first_fqs and */ /* and jiffies_till_next_fqs. */ |
007b09243
|
341 |
|
026ad2835
|
342 343 344 345 346 347 348 |
#define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */ /* delay between bouts of */ /* quiescent-state forcing. */ #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */ /* at least one scheduling clock */ /* irq before ratting on them. */ |
9f77da9f4
|
349 |
|
08bca60a6
|
350 351 352 353 354 355 356 357 358 359 |
#define rcu_wait(cond) \ do { \ for (;;) { \ set_current_state(TASK_INTERRUPTIBLE); \ if (cond) \ break; \ schedule(); \ } \ __set_current_state(TASK_RUNNING); \ } while (0) |
9f77da9f4
|
360 361 362 363 364 365 366 367 368 369 370 371 372 |
/* * RCU global state, including node hierarchy. This hierarchy is * represented in "heap" form in a dense array. The root (first level) * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]), * and the third level in ->node[m+1] and following (->node[m+1] referenced * by ->level[2]). The number of levels is determined by the number of * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy" * consisting of a single rcu_node. */ struct rcu_state { struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ |
f885b7f2b
|
373 |
struct rcu_node *level[RCU_NUM_LVLS]; /* Hierarchy levels. */ |
9f77da9f4
|
374 |
u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ |
f885b7f2b
|
375 |
u8 levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */ |
394f99a90
|
376 |
struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */ |
037b64ed0
|
377 378 |
void (*call)(struct rcu_head *head, /* call_rcu() flavor. */ void (*func)(struct rcu_head *head)); |
9f77da9f4
|
379 380 |
/* The following fields are guarded by the root rcu_node's lock. */ |
af446b702
|
381 |
u8 fqs_state ____cacheline_internodealigned_in_smp; |
9f77da9f4
|
382 |
/* Force QS state. */ |
a46e0899e
|
383 |
u8 boost; /* Subject to priority boost. */ |
20133cfce
|
384 385 |
unsigned long gpnum; /* Current gp number. */ unsigned long completed; /* # of last completed gp. */ |
b3dbec76e
|
386 387 388 |
struct task_struct *gp_kthread; /* Task for grace periods. */ wait_queue_head_t gp_wq; /* Where GP task waits. */ int gp_flags; /* Commands for GP task. */ |
1eba8f843
|
389 |
|
d9a3da069
|
390 |
/* End of fields guarded by root rcu_node's lock. */ |
1eba8f843
|
391 |
|
7b2e6011f
|
392 393 |
raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp; /* Protect following fields. */ |
b1420f1c8
|
394 395 396 397 398 399 400 401 |
struct rcu_head *orphan_nxtlist; /* Orphaned callbacks that */ /* need a grace period. */ struct rcu_head **orphan_nxttail; /* Tail of above. */ struct rcu_head *orphan_donelist; /* Orphaned callbacks that */ /* are ready to invoke. */ struct rcu_head **orphan_donetail; /* Tail of above. */ long qlen_lazy; /* Number of lazy callbacks. */ long qlen; /* Total number of callbacks. */ |
7b2e6011f
|
402 |
/* End of fields guarded by orphan_lock. */ |
a4fbe35a1
|
403 404 |
struct mutex onoff_mutex; /* Coordinate hotplug & GPs. */ |
7be7f0be9
|
405 |
struct mutex barrier_mutex; /* Guards barrier fields. */ |
24ebbca8e
|
406 |
atomic_t barrier_cpu_count; /* # CPUs waiting on. */ |
7db74df88
|
407 |
struct completion barrier_completion; /* Wake at barrier end. */ |
cf3a9c484
|
408 409 |
unsigned long n_barrier_done; /* ++ at start and end of */ /* _rcu_barrier(). */ |
a4fbe35a1
|
410 |
/* End of fields guarded by barrier_mutex. */ |
40694d664
|
411 412 |
atomic_long_t expedited_start; /* Starting ticket. */ atomic_long_t expedited_done; /* Done ticket. */ |
a30489c52
|
413 414 415 416 417 418 419 420 421 |
atomic_long_t expedited_wrap; /* # near-wrap incidents. */ atomic_long_t expedited_tryfail; /* # acquisition failures. */ atomic_long_t expedited_workdone1; /* # done by others #1. */ atomic_long_t expedited_workdone2; /* # done by others #2. */ atomic_long_t expedited_normal; /* # fallbacks to normal. */ atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */ atomic_long_t expedited_done_tries; /* # tries to update _done. */ atomic_long_t expedited_done_lost; /* # times beaten to _done. */ atomic_long_t expedited_done_exit; /* # times exited _done loop. */ |
40694d664
|
422 |
|
9f77da9f4
|
423 424 425 426 427 428 429 430 |
unsigned long jiffies_force_qs; /* Time at which to invoke */ /* force_quiescent_state(). */ unsigned long n_force_qs; /* Number of calls to */ /* force_quiescent_state(). */ unsigned long n_force_qs_lh; /* ~Number of calls leaving */ /* due to lock unavailable. */ unsigned long n_force_qs_ngp; /* Number of calls leaving */ /* due to no GP active. */ |
9f77da9f4
|
431 432 433 434 |
unsigned long gp_start; /* Time at which GP started, */ /* but in jiffies. */ unsigned long jiffies_stall; /* Time at which to check */ /* for CPU stalls. */ |
15ba0ba86
|
435 436 |
unsigned long gp_max; /* Maximum GP duration in */ /* jiffies. */ |
4300aa642
|
437 |
char *name; /* Name of structure. */ |
a48898585
|
438 |
char abbr; /* Abbreviated name. */ |
6ce75a232
|
439 |
struct list_head flavors; /* List of RCU flavors. */ |
016a8d5be
|
440 |
struct irq_work wakeup_work; /* Postponed wakeups */ |
9f77da9f4
|
441 |
}; |
4cdfc175c
|
442 443 444 |
/* Values for rcu_state structure's gp_flags field. */ #define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */ #define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */ |
6ce75a232
|
445 |
extern struct list_head rcu_struct_flavors; |
3fbfbf7a3
|
446 447 |
/* Sequence through rcu_state structures for each RCU flavor. */ |
6ce75a232
|
448 449 |
#define for_each_rcu_flavor(rsp) \ list_for_each_entry((rsp), &rcu_struct_flavors, flavors) |
d9a3da069
|
450 451 452 453 454 455 |
/* Return values for rcu_preempt_offline_tasks(). */ #define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */ /* GP were moved to root. */ #define RCU_OFL_TASKS_EXP_GP 0x2 /* Tasks blocking expedited */ /* GP were moved to root. */ |
6258c4fb5
|
456 457 458 |
/* * RCU implementation internal declarations: */ |
d6714c22b
|
459 460 |
extern struct rcu_state rcu_sched_state; DECLARE_PER_CPU(struct rcu_data, rcu_sched_data); |
6258c4fb5
|
461 462 463 |
extern struct rcu_state rcu_bh_state; DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); |
f41d911f8
|
464 465 466 467 |
#ifdef CONFIG_TREE_PREEMPT_RCU extern struct rcu_state rcu_preempt_state; DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data); #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
eab0993c7
|
468 469 470 471 472 473 |
#ifdef CONFIG_RCU_BOOST DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu); DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); DECLARE_PER_CPU(char, rcu_cpu_has_work); #endif /* #ifdef CONFIG_RCU_BOOST */ |
017c42613
|
474 |
#ifndef RCU_TREE_NONCORE |
9f77da9f4
|
475 |
|
9b2619aff
|
476 |
/* Forward declarations for rcutree_plugin.h */ |
dbe01350f
|
477 |
static void rcu_bootup_announce(void); |
9b2619aff
|
478 |
long rcu_batches_completed(void); |
cba6d0d64
|
479 |
static void rcu_preempt_note_context_switch(int cpu); |
27f4d2805
|
480 |
static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); |
b668c9cf3
|
481 |
#ifdef CONFIG_HOTPLUG_CPU |
d3f6bad39
|
482 483 |
static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags); |
b668c9cf3
|
484 |
#endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
1ed509a22
|
485 |
static void rcu_print_detail_task_stall(struct rcu_state *rsp); |
9bc8b5586
|
486 |
static int rcu_print_task_stall(struct rcu_node *rnp); |
9b2619aff
|
487 488 |
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); #ifdef CONFIG_HOTPLUG_CPU |
237c80c5c
|
489 490 491 |
static int rcu_preempt_offline_tasks(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp); |
9b2619aff
|
492 493 |
#endif /* #ifdef CONFIG_HOTPLUG_CPU */ static void rcu_preempt_check_callbacks(int cpu); |
9b2619aff
|
494 |
void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); |
d9a3da069
|
495 |
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) |
b40d293eb
|
496 497 |
static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, bool wake); |
d9a3da069
|
498 |
#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */ |
9b2619aff
|
499 |
static void __init __rcu_init_preempt(void); |
1217ed1ba
|
500 |
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); |
a46e0899e
|
501 502 |
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); static void invoke_rcu_callbacks_kthread(void); |
dff1672d9
|
503 |
static bool rcu_is_callbacks_kthread(void); |
a46e0899e
|
504 505 |
#ifdef CONFIG_RCU_BOOST static void rcu_preempt_do_callbacks(void); |
49fb4c629
|
506 |
static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, |
5d01bbd11
|
507 |
struct rcu_node *rnp); |
a46e0899e
|
508 |
#endif /* #ifdef CONFIG_RCU_BOOST */ |
49fb4c629
|
509 |
static void rcu_prepare_kthreads(int cpu); |
7cb924990
|
510 |
static void rcu_cleanup_after_idle(int cpu); |
aea1b35e2
|
511 |
static void rcu_prepare_for_idle(int cpu); |
c57afe80d
|
512 |
static void rcu_idle_count_callbacks_posted(void); |
a858af287
|
513 514 515 516 517 |
static void print_cpu_stall_info_begin(void); static void print_cpu_stall_info(struct rcu_state *rsp, int cpu); static void print_cpu_stall_info_end(void); static void zero_cpu_stall_ticks(struct rcu_data *rdp); static void increment_cpu_stall_ticks(void); |
dae6e64d2
|
518 519 |
static int rcu_nocb_needs_gp(struct rcu_state *rsp); static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq); |
0446be489
|
520 |
static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp); |
dae6e64d2
|
521 |
static void rcu_init_one_nocb(struct rcu_node *rnp); |
3fbfbf7a3
|
522 523 524 525 |
static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, bool lazy); static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, struct rcu_data *rdp); |
3fbfbf7a3
|
526 527 |
static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp); |
65d798f0f
|
528 |
static void rcu_kick_nohz_cpu(int cpu); |
34ed62461
|
529 |
static bool init_nocb_callback_list(struct rcu_data *rdp); |
9b2619aff
|
530 |
|
017c42613
|
531 |
#endif /* #ifndef RCU_TREE_NONCORE */ |
3fbfbf7a3
|
532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 |
#ifdef CONFIG_RCU_TRACE #ifdef CONFIG_RCU_NOCB_CPU /* Sum up queue lengths for tracing. */ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) { *ql = atomic_long_read(&rdp->nocb_q_count) + rdp->nocb_p_count; *qll = atomic_long_read(&rdp->nocb_q_count_lazy) + rdp->nocb_p_count_lazy; } #else /* #ifdef CONFIG_RCU_NOCB_CPU */ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) { *ql = 0; *qll = 0; } #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ #endif /* #ifdef CONFIG_RCU_TRACE */ |