Blame view
kernel/rcutree.c
69.6 KB
64db4cfff "Tree RCU": scala... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 |
/* * Read-Copy Update mechanism for mutual exclusion * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright IBM Corporation, 2008 * * Authors: Dipankar Sarma <dipankar@in.ibm.com> * Manfred Spraul <manfred@colorfullife.com> * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version * * Based on the original work by Paul McKenney <paulmck@us.ibm.com> * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * * For detailed explanation of Read-Copy Update mechanism see - |
a71fca58b rcu: Fix whitespa... |
28 |
* Documentation/RCU |
64db4cfff "Tree RCU": scala... |
29 30 31 32 33 34 35 36 37 |
*/ #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/rcupdate.h> #include <linux/interrupt.h> #include <linux/sched.h> |
c1dc0b9c0 debug lockups: Im... |
38 |
#include <linux/nmi.h> |
8826f3b03 rcu: Avoid acquir... |
39 |
#include <linux/atomic.h> |
64db4cfff "Tree RCU": scala... |
40 |
#include <linux/bitops.h> |
9984de1a5 kernel: Map most ... |
41 |
#include <linux/export.h> |
64db4cfff "Tree RCU": scala... |
42 43 44 45 46 47 48 |
#include <linux/completion.h> #include <linux/moduleparam.h> #include <linux/percpu.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/mutex.h> #include <linux/time.h> |
bbad93798 rcu: slim down rc... |
49 |
#include <linux/kernel_stat.h> |
a26ac2455 rcu: move TREE_RC... |
50 51 |
#include <linux/wait.h> #include <linux/kthread.h> |
268bb0ce3 sanitize <linux/p... |
52 |
#include <linux/prefetch.h> |
64db4cfff "Tree RCU": scala... |
53 |
|
9f77da9f4 rcu: Move private... |
54 |
#include "rcutree.h" |
29c00b4a1 rcu: Add event-tr... |
55 56 57 |
#include <trace/events/rcu.h> #include "rcu.h" |
9f77da9f4 rcu: Move private... |
58 |
|
64db4cfff "Tree RCU": scala... |
59 |
/* Data structures. */ |
b668c9cf3 rcu: Fix grace-pe... |
60 |
static struct lock_class_key rcu_node_class[NUM_RCU_LVLS]; |
88b91c7ca rcu: Simplify cre... |
61 |
|
4300aa642 rcu: improve RCU ... |
62 |
#define RCU_STATE_INITIALIZER(structname) { \ |
e99033c5c rcu: Put names in... |
63 |
.level = { &structname##_state.node[0] }, \ |
64db4cfff "Tree RCU": scala... |
64 65 66 67 |
.levelcnt = { \ NUM_RCU_LVL_0, /* root of hierarchy. */ \ NUM_RCU_LVL_1, \ NUM_RCU_LVL_2, \ |
cf244dc01 rcu: Enable fourt... |
68 69 |
NUM_RCU_LVL_3, \ NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \ |
64db4cfff "Tree RCU": scala... |
70 |
}, \ |
af446b702 rcu: ->signaled b... |
71 |
.fqs_state = RCU_GP_IDLE, \ |
64db4cfff "Tree RCU": scala... |
72 73 |
.gpnum = -300, \ .completed = -300, \ |
e99033c5c rcu: Put names in... |
74 75 |
.onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.onofflock), \ .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.fqslock), \ |
64db4cfff "Tree RCU": scala... |
76 77 |
.n_force_qs = 0, \ .n_force_qs_ngp = 0, \ |
4300aa642 rcu: improve RCU ... |
78 |
.name = #structname, \ |
64db4cfff "Tree RCU": scala... |
79 |
} |
e99033c5c rcu: Put names in... |
80 |
struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched); |
d6714c22b rcu: Renamings to... |
81 |
DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); |
64db4cfff "Tree RCU": scala... |
82 |
|
e99033c5c rcu: Put names in... |
83 |
struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh); |
6258c4fb5 kmemtrace, rcu: f... |
84 |
DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
b1f77b058 kmemtrace, rcu: f... |
85 |
|
27f4d2805 rcu: priority boo... |
86 |
static struct rcu_state *rcu_state; |
b0d304172 rcu: Prevent RCU ... |
87 88 89 90 91 92 93 94 95 |
/* * The rcu_scheduler_active variable transitions from zero to one just * before the first task is spawned. So when this variable is zero, RCU * can assume that there is but one task, allowing RCU to (for example) * optimized synchronize_sched() to a simple barrier(). When this variable * is one, RCU must actually do all the hard work required to detect real * grace periods. This variable is also used to suppress boot-time false * positives from lockdep-RCU error checking. */ |
bbad93798 rcu: slim down rc... |
96 97 |
int rcu_scheduler_active __read_mostly; EXPORT_SYMBOL_GPL(rcu_scheduler_active); |
b0d304172 rcu: Prevent RCU ... |
98 99 100 101 102 103 104 105 106 107 108 109 110 |
/* * The rcu_scheduler_fully_active variable transitions from zero to one * during the early_initcall() processing, which is after the scheduler * is capable of creating new tasks. So RCU processing (for example, * creating tasks for RCU priority boosting) must be delayed until after * rcu_scheduler_fully_active transitions from zero to one. We also * currently delay invocation of any RCU callbacks until after this point. * * It might later prove better for people registering RCU callbacks during * early boot to take responsibility for these callbacks, but one step at * a time. */ static int rcu_scheduler_fully_active __read_mostly; |
a46e0899e rcu: use softirq ... |
111 |
#ifdef CONFIG_RCU_BOOST |
b1f77b058 kmemtrace, rcu: f... |
112 |
/* |
a26ac2455 rcu: move TREE_RC... |
113 114 115 116 |
* Control variables for per-CPU and per-rcu_node kthreads. These * handle all flavors of RCU. */ static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); |
d71df90ea rcu: add tracing ... |
117 |
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); |
15ba0ba86 rcu: add grace-pe... |
118 |
DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu); |
5ece5bab3 rcu: Add forward-... |
119 |
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); |
d71df90ea rcu: add tracing ... |
120 |
DEFINE_PER_CPU(char, rcu_cpu_has_work); |
a26ac2455 rcu: move TREE_RC... |
121 |
|
a46e0899e rcu: use softirq ... |
122 |
#endif /* #ifdef CONFIG_RCU_BOOST */ |
0f962a5e7 rcu: Force per-rc... |
123 |
static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); |
a46e0899e rcu: use softirq ... |
124 125 |
static void invoke_rcu_core(void); static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); |
a26ac2455 rcu: move TREE_RC... |
126 |
|
a26ac2455 rcu: move TREE_RC... |
127 |
/* |
4a2986568 rcu: make rcutort... |
128 129 130 131 132 133 134 135 136 137 138 139 |
* Track the rcutorture test sequence number and the update version * number within a given test. The rcutorture_testseq is incremented * on every rcutorture module load and unload, so has an odd value * when a test is running. The rcutorture_vernum is set to zero * when rcutorture starts and is incremented on each rcutorture update. * These variables enable correlating rcutorture output with the * RCU tracing information. */ unsigned long rcutorture_testseq; unsigned long rcutorture_vernum; /* |
fc2219d49 rcu: Clean up cod... |
140 141 142 143 144 145 146 147 148 149 |
* Return true if an RCU grace period is in progress. The ACCESS_ONCE()s * permit this function to be invoked without holding the root rcu_node * structure's ->lock, but of course results can be subject to change. */ static int rcu_gp_in_progress(struct rcu_state *rsp) { return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum); } /* |
d6714c22b rcu: Renamings to... |
150 |
* Note a quiescent state. Because we do not need to know |
b1f77b058 kmemtrace, rcu: f... |
151 |
* how many quiescent states passed, just if there was at least |
d6714c22b rcu: Renamings to... |
152 |
* one since the start of the grace period, this just sets a flag. |
e4cc1f22b rcu: Simplify qui... |
153 |
* The caller must have disabled preemption. |
b1f77b058 kmemtrace, rcu: f... |
154 |
*/ |
d6714c22b rcu: Renamings to... |
155 |
void rcu_sched_qs(int cpu) |
b1f77b058 kmemtrace, rcu: f... |
156 |
{ |
25502a6c1 rcu: refactor RCU... |
157 |
struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu); |
f41d911f8 rcu: Merge preemp... |
158 |
|
e4cc1f22b rcu: Simplify qui... |
159 |
rdp->passed_quiesce_gpnum = rdp->gpnum; |
c3422bea5 rcu: Simplify rcu... |
160 |
barrier(); |
e4cc1f22b rcu: Simplify qui... |
161 |
if (rdp->passed_quiesce == 0) |
d4c08f2ac rcu: Add grace-pe... |
162 |
trace_rcu_grace_period("rcu_sched", rdp->gpnum, "cpuqs"); |
e4cc1f22b rcu: Simplify qui... |
163 |
rdp->passed_quiesce = 1; |
b1f77b058 kmemtrace, rcu: f... |
164 |
} |
d6714c22b rcu: Renamings to... |
165 |
void rcu_bh_qs(int cpu) |
b1f77b058 kmemtrace, rcu: f... |
166 |
{ |
25502a6c1 rcu: refactor RCU... |
167 |
struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); |
f41d911f8 rcu: Merge preemp... |
168 |
|
e4cc1f22b rcu: Simplify qui... |
169 |
rdp->passed_quiesce_gpnum = rdp->gpnum; |
c3422bea5 rcu: Simplify rcu... |
170 |
barrier(); |
e4cc1f22b rcu: Simplify qui... |
171 |
if (rdp->passed_quiesce == 0) |
d4c08f2ac rcu: Add grace-pe... |
172 |
trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs"); |
e4cc1f22b rcu: Simplify qui... |
173 |
rdp->passed_quiesce = 1; |
b1f77b058 kmemtrace, rcu: f... |
174 |
} |
64db4cfff "Tree RCU": scala... |
175 |
|
25502a6c1 rcu: refactor RCU... |
176 177 178 |
/* * Note a context switch. This is a quiescent state for RCU-sched, * and requires special handling for preemptible RCU. |
e4cc1f22b rcu: Simplify qui... |
179 |
* The caller must have disabled preemption. |
25502a6c1 rcu: refactor RCU... |
180 181 182 |
*/ void rcu_note_context_switch(int cpu) { |
300df91ca rcu: Event-trace ... |
183 |
trace_rcu_utilization("Start context switch"); |
25502a6c1 rcu: refactor RCU... |
184 185 |
rcu_sched_qs(cpu); rcu_preempt_note_context_switch(cpu); |
300df91ca rcu: Event-trace ... |
186 |
trace_rcu_utilization("End context switch"); |
25502a6c1 rcu: refactor RCU... |
187 |
} |
29ce83100 rcu: provide rcu_... |
188 |
EXPORT_SYMBOL_GPL(rcu_note_context_switch); |
25502a6c1 rcu: refactor RCU... |
189 |
|
90a4d2c01 rcu: make treercu... |
190 |
DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { |
4145fa7fb rcu: Deconfuse dy... |
191 |
.dynticks_nesting = DYNTICK_TASK_NESTING, |
23b5c8fa0 rcu: Decrease mem... |
192 |
.dynticks = ATOMIC_INIT(1), |
90a4d2c01 rcu: make treercu... |
193 |
}; |
64db4cfff "Tree RCU": scala... |
194 |
|
e0f23060a rcu: Update comme... |
195 |
static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */ |
64db4cfff "Tree RCU": scala... |
196 197 |
static int qhimark = 10000; /* If this many pending, ignore blimit. */ static int qlowmark = 100; /* Once only this many pending, use blimit. */ |
3d76c0829 rcu: Clean up cod... |
198 199 200 |
module_param(blimit, int, 0); module_param(qhimark, int, 0); module_param(qlowmark, int, 0); |
a00e0d714 rcu: Remove condi... |
201 |
int rcu_cpu_stall_suppress __read_mostly; |
f2e0dd709 rcu: allow RCU CP... |
202 |
module_param(rcu_cpu_stall_suppress, int, 0644); |
742734eea rcu: add boot par... |
203 |
|
64db4cfff "Tree RCU": scala... |
204 |
static void force_quiescent_state(struct rcu_state *rsp, int relaxed); |
a157229ca rcu: Simplify rcu... |
205 |
static int rcu_pending(int cpu); |
64db4cfff "Tree RCU": scala... |
206 207 |
/* |
d6714c22b rcu: Renamings to... |
208 |
* Return the number of RCU-sched batches processed thus far for debug & stats. |
64db4cfff "Tree RCU": scala... |
209 |
*/ |
d6714c22b rcu: Renamings to... |
210 |
long rcu_batches_completed_sched(void) |
64db4cfff "Tree RCU": scala... |
211 |
{ |
d6714c22b rcu: Renamings to... |
212 |
return rcu_sched_state.completed; |
64db4cfff "Tree RCU": scala... |
213 |
} |
d6714c22b rcu: Renamings to... |
214 |
EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); |
64db4cfff "Tree RCU": scala... |
215 216 217 218 219 220 221 222 223 224 225 |
/* * Return the number of RCU BH batches processed thus far for debug & stats. */ long rcu_batches_completed_bh(void) { return rcu_bh_state.completed; } EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); /* |
bf66f18e7 rcu: Add force_qu... |
226 227 228 229 230 231 232 233 234 |
* Force a quiescent state for RCU BH. */ void rcu_bh_force_quiescent_state(void) { force_quiescent_state(&rcu_bh_state, 0); } EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); /* |
4a2986568 rcu: make rcutort... |
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 |
* Record the number of times rcutorture tests have been initiated and * terminated. This information allows the debugfs tracing stats to be * correlated to the rcutorture messages, even when the rcutorture module * is being repeatedly loaded and unloaded. In other words, we cannot * store this state in rcutorture itself. */ void rcutorture_record_test_transition(void) { rcutorture_testseq++; rcutorture_vernum = 0; } EXPORT_SYMBOL_GPL(rcutorture_record_test_transition); /* * Record the number of writer passes through the current rcutorture test. * This is also used to correlate debugfs tracing stats with the rcutorture * messages. */ void rcutorture_record_progress(unsigned long vernum) { rcutorture_vernum++; } EXPORT_SYMBOL_GPL(rcutorture_record_progress); /* |
bf66f18e7 rcu: Add force_qu... |
260 261 262 263 264 265 266 267 268 |
* Force a quiescent state for RCU-sched. */ void rcu_sched_force_quiescent_state(void) { force_quiescent_state(&rcu_sched_state, 0); } EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state); /* |
64db4cfff "Tree RCU": scala... |
269 270 271 272 273 274 275 276 277 278 279 280 281 282 |
* Does the CPU have callbacks ready to be invoked? */ static int cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp) { return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]; } /* * Does the current CPU require a yet-as-unscheduled grace period? */ static int cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) { |
fc2219d49 rcu: Clean up cod... |
283 |
return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp); |
64db4cfff "Tree RCU": scala... |
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 |
} /* * Return the root node of the specified rcu_state structure. */ static struct rcu_node *rcu_get_root(struct rcu_state *rsp) { return &rsp->node[0]; } #ifdef CONFIG_SMP /* * If the specified CPU is offline, tell the caller that it is in * a quiescent state. Otherwise, whack it with a reschedule IPI. * Grace periods can end up waiting on an offline CPU when that * CPU is in the process of coming online -- it will be added to the * rcu_node bitmasks before it actually makes it online. The same thing * can happen while a CPU is in the process of coming online. Because this * race is quite rare, we check for it after detecting that the grace * period has been delayed rather than checking each and every CPU * each and every time we start a new grace period. */ static int rcu_implicit_offline_qs(struct rcu_data *rdp) { /* * If the CPU is offline, it is in a quiescent state. We can * trust its state not to change because interrupts are disabled. */ if (cpu_is_offline(rdp->cpu)) { |
d4c08f2ac rcu: Add grace-pe... |
314 |
trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl"); |
64db4cfff "Tree RCU": scala... |
315 316 317 |
rdp->offline_fqs++; return 1; } |
9b2e4f188 rcu: Track idlene... |
318 319 320 321 322 |
/* * The CPU is online, so send it a reschedule IPI. This forces * it through the scheduler, and (inefficiently) also handles cases * where idle loops fail to inform RCU about the CPU being idle. */ |
64db4cfff "Tree RCU": scala... |
323 324 325 326 327 328 329 330 331 |
if (rdp->cpu != smp_processor_id()) smp_send_reschedule(rdp->cpu); else set_need_resched(); rdp->resched_ipi++; return 0; } #endif /* #ifdef CONFIG_SMP */ |
9b2e4f188 rcu: Track idlene... |
332 333 334 335 336 337 338 |
/* * rcu_idle_enter_common - inform RCU that current CPU is moving towards idle * * If the new value of the ->dynticks_nesting counter now is zero, * we really have entered idle, and must do the appropriate accounting. * The caller must have disabled interrupts. */ |
4145fa7fb rcu: Deconfuse dy... |
339 |
static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval) |
9b2e4f188 rcu: Track idlene... |
340 |
{ |
facc4e159 rcu: Irq nesting ... |
341 |
trace_rcu_dyntick("Start", oldval, 0); |
99745b6a8 rcu: Make RCU use... |
342 |
if (!is_idle_task(current)) { |
0989cb467 rcu: Add more inf... |
343 |
struct task_struct *idle = idle_task(smp_processor_id()); |
facc4e159 rcu: Irq nesting ... |
344 |
trace_rcu_dyntick("Error on entry: not idle task", oldval, 0); |
9b2e4f188 rcu: Track idlene... |
345 |
ftrace_dump(DUMP_ALL); |
0989cb467 rcu: Add more inf... |
346 347 348 |
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", current->pid, current->comm, idle->pid, idle->comm); /* must be idle task! */ |
9b2e4f188 rcu: Track idlene... |
349 |
} |
aea1b35e2 rcu: Allow dyntic... |
350 |
rcu_prepare_for_idle(smp_processor_id()); |
9b2e4f188 rcu: Track idlene... |
351 352 353 354 355 356 |
/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ smp_mb__before_atomic_inc(); /* See above. */ atomic_inc(&rdtp->dynticks); smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */ WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); } |
64db4cfff "Tree RCU": scala... |
357 358 |
/** |
9b2e4f188 rcu: Track idlene... |
359 |
* rcu_idle_enter - inform RCU that current CPU is entering idle |
64db4cfff "Tree RCU": scala... |
360 |
* |
9b2e4f188 rcu: Track idlene... |
361 |
* Enter idle mode, in other words, -leave- the mode in which RCU |
64db4cfff "Tree RCU": scala... |
362 |
* read-side critical sections can occur. (Though RCU read-side |
9b2e4f188 rcu: Track idlene... |
363 364 365 366 367 368 |
* critical sections can occur in irq handlers in idle, a possibility * handled by irq_enter() and irq_exit().) * * We crowbar the ->dynticks_nesting field to zero to allow for * the possibility of usermode upcalls having messed up our count * of interrupt nesting level during the prior busy period. |
64db4cfff "Tree RCU": scala... |
369 |
*/ |
9b2e4f188 rcu: Track idlene... |
370 |
void rcu_idle_enter(void) |
64db4cfff "Tree RCU": scala... |
371 372 |
{ unsigned long flags; |
4145fa7fb rcu: Deconfuse dy... |
373 |
long long oldval; |
64db4cfff "Tree RCU": scala... |
374 |
struct rcu_dynticks *rdtp; |
64db4cfff "Tree RCU": scala... |
375 376 |
local_irq_save(flags); rdtp = &__get_cpu_var(rcu_dynticks); |
4145fa7fb rcu: Deconfuse dy... |
377 |
oldval = rdtp->dynticks_nesting; |
9b2e4f188 rcu: Track idlene... |
378 |
rdtp->dynticks_nesting = 0; |
4145fa7fb rcu: Deconfuse dy... |
379 |
rcu_idle_enter_common(rdtp, oldval); |
64db4cfff "Tree RCU": scala... |
380 381 |
local_irq_restore(flags); } |
9b2e4f188 rcu: Track idlene... |
382 383 384 385 386 387 |
/** * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle * * Exit from an interrupt handler, which might possibly result in entering * idle mode, in other words, leaving the mode in which read-side critical * sections can occur. |
64db4cfff "Tree RCU": scala... |
388 |
* |
9b2e4f188 rcu: Track idlene... |
389 390 391 392 393 394 395 396 |
* This code assumes that the idle loop never does anything that might * result in unbalanced calls to irq_enter() and irq_exit(). If your * architecture violates this assumption, RCU will give you what you * deserve, good and hard. But very infrequently and irreproducibly. * * Use things like work queues to work around this limitation. * * You have been warned. |
64db4cfff "Tree RCU": scala... |
397 |
*/ |
9b2e4f188 rcu: Track idlene... |
398 |
void rcu_irq_exit(void) |
64db4cfff "Tree RCU": scala... |
399 400 |
{ unsigned long flags; |
4145fa7fb rcu: Deconfuse dy... |
401 |
long long oldval; |
64db4cfff "Tree RCU": scala... |
402 403 404 405 |
struct rcu_dynticks *rdtp; local_irq_save(flags); rdtp = &__get_cpu_var(rcu_dynticks); |
4145fa7fb rcu: Deconfuse dy... |
406 |
oldval = rdtp->dynticks_nesting; |
9b2e4f188 rcu: Track idlene... |
407 408 |
rdtp->dynticks_nesting--; WARN_ON_ONCE(rdtp->dynticks_nesting < 0); |
b6fc60201 rcu: Don't check ... |
409 410 411 412 |
if (rdtp->dynticks_nesting) trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting); else rcu_idle_enter_common(rdtp, oldval); |
9b2e4f188 rcu: Track idlene... |
413 414 415 416 417 418 419 420 421 422 423 424 |
local_irq_restore(flags); } /* * rcu_idle_exit_common - inform RCU that current CPU is moving away from idle * * If the new value of the ->dynticks_nesting counter was previously zero, * we really have exited idle, and must do the appropriate accounting. * The caller must have disabled interrupts. */ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) { |
23b5c8fa0 rcu: Decrease mem... |
425 426 427 428 429 |
smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */ atomic_inc(&rdtp->dynticks); /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ smp_mb__after_atomic_inc(); /* See above. */ WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); |
7cb924990 rcu: Permit dynti... |
430 |
rcu_cleanup_after_idle(smp_processor_id()); |
4145fa7fb rcu: Deconfuse dy... |
431 |
trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting); |
99745b6a8 rcu: Make RCU use... |
432 |
if (!is_idle_task(current)) { |
0989cb467 rcu: Add more inf... |
433 |
struct task_struct *idle = idle_task(smp_processor_id()); |
4145fa7fb rcu: Deconfuse dy... |
434 435 |
trace_rcu_dyntick("Error on exit: not idle task", oldval, rdtp->dynticks_nesting); |
9b2e4f188 rcu: Track idlene... |
436 |
ftrace_dump(DUMP_ALL); |
0989cb467 rcu: Add more inf... |
437 438 439 |
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", current->pid, current->comm, idle->pid, idle->comm); /* must be idle task! */ |
9b2e4f188 rcu: Track idlene... |
440 441 442 443 444 445 446 447 448 |
} } /** * rcu_idle_exit - inform RCU that current CPU is leaving idle * * Exit idle mode, in other words, -enter- the mode in which RCU * read-side critical sections can occur. * |
4145fa7fb rcu: Deconfuse dy... |
449 450 |
* We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NESTING to * allow for the possibility of usermode upcalls messing up our count |
9b2e4f188 rcu: Track idlene... |
451 452 453 454 455 456 457 458 459 460 461 462 463 |
* of interrupt nesting level during the busy period that is just * now starting. */ void rcu_idle_exit(void) { unsigned long flags; struct rcu_dynticks *rdtp; long long oldval; local_irq_save(flags); rdtp = &__get_cpu_var(rcu_dynticks); oldval = rdtp->dynticks_nesting; WARN_ON_ONCE(oldval != 0); |
4145fa7fb rcu: Deconfuse dy... |
464 |
rdtp->dynticks_nesting = DYNTICK_TASK_NESTING; |
9b2e4f188 rcu: Track idlene... |
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 |
rcu_idle_exit_common(rdtp, oldval); local_irq_restore(flags); } /** * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle * * Enter an interrupt handler, which might possibly result in exiting * idle mode, in other words, entering the mode in which read-side critical * sections can occur. * * Note that the Linux kernel is fully capable of entering an interrupt * handler that it never exits, for example when doing upcalls to * user mode! This code assumes that the idle loop never does upcalls to * user mode. If your architecture does do upcalls from the idle loop (or * does anything else that results in unbalanced calls to the irq_enter() * and irq_exit() functions), RCU will give you what you deserve, good * and hard. But very infrequently and irreproducibly. * * Use things like work queues to work around this limitation. * * You have been warned. */ void rcu_irq_enter(void) { unsigned long flags; struct rcu_dynticks *rdtp; long long oldval; local_irq_save(flags); rdtp = &__get_cpu_var(rcu_dynticks); oldval = rdtp->dynticks_nesting; rdtp->dynticks_nesting++; WARN_ON_ONCE(rdtp->dynticks_nesting == 0); |
b6fc60201 rcu: Don't check ... |
499 500 501 502 |
if (oldval) trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting); else rcu_idle_exit_common(rdtp, oldval); |
64db4cfff "Tree RCU": scala... |
503 |
local_irq_restore(flags); |
64db4cfff "Tree RCU": scala... |
504 505 506 507 508 509 510 511 512 513 514 515 |
} /** * rcu_nmi_enter - inform RCU of entry to NMI context * * If the CPU was idle with dynamic ticks active, and there is no * irq handler running, this updates rdtp->dynticks_nmi to let the * RCU grace-period handling know that the CPU is active. */ void rcu_nmi_enter(void) { struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); |
23b5c8fa0 rcu: Decrease mem... |
516 517 |
if (rdtp->dynticks_nmi_nesting == 0 && (atomic_read(&rdtp->dynticks) & 0x1)) |
64db4cfff "Tree RCU": scala... |
518 |
return; |
23b5c8fa0 rcu: Decrease mem... |
519 520 521 522 523 524 |
rdtp->dynticks_nmi_nesting++; smp_mb__before_atomic_inc(); /* Force delay from prior write. */ atomic_inc(&rdtp->dynticks); /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ smp_mb__after_atomic_inc(); /* See above. */ WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); |
64db4cfff "Tree RCU": scala... |
525 526 527 528 529 530 531 532 533 534 535 536 |
} /** * rcu_nmi_exit - inform RCU of exit from NMI context * * If the CPU was idle with dynamic ticks active, and there is no * irq handler running, this updates rdtp->dynticks_nmi to let the * RCU grace-period handling know that the CPU is no longer active. */ void rcu_nmi_exit(void) { struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); |
23b5c8fa0 rcu: Decrease mem... |
537 538 |
if (rdtp->dynticks_nmi_nesting == 0 || --rdtp->dynticks_nmi_nesting != 0) |
64db4cfff "Tree RCU": scala... |
539 |
return; |
23b5c8fa0 rcu: Decrease mem... |
540 541 542 543 544 |
/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ smp_mb__before_atomic_inc(); /* See above. */ atomic_inc(&rdtp->dynticks); smp_mb__after_atomic_inc(); /* Force delay to next write. */ WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); |
64db4cfff "Tree RCU": scala... |
545 |
} |
9b2e4f188 rcu: Track idlene... |
546 |
#ifdef CONFIG_PROVE_RCU |
64db4cfff "Tree RCU": scala... |
547 |
/** |
9b2e4f188 rcu: Track idlene... |
548 |
* rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle |
64db4cfff "Tree RCU": scala... |
549 |
* |
9b2e4f188 rcu: Track idlene... |
550 |
* If the current CPU is in its idle loop and is neither in an interrupt |
34240697d rcu: Disable pree... |
551 |
* or NMI handler, return true. |
64db4cfff "Tree RCU": scala... |
552 |
*/ |
9b2e4f188 rcu: Track idlene... |
553 |
int rcu_is_cpu_idle(void) |
64db4cfff "Tree RCU": scala... |
554 |
{ |
34240697d rcu: Disable pree... |
555 556 557 558 559 560 |
int ret; preempt_disable(); ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0; preempt_enable(); return ret; |
64db4cfff "Tree RCU": scala... |
561 |
} |
e6b80a3b0 rcu: Detect illeg... |
562 |
EXPORT_SYMBOL(rcu_is_cpu_idle); |
64db4cfff "Tree RCU": scala... |
563 |
|
9b2e4f188 rcu: Track idlene... |
564 |
#endif /* #ifdef CONFIG_PROVE_RCU */ |
64db4cfff "Tree RCU": scala... |
565 |
/** |
9b2e4f188 rcu: Track idlene... |
566 |
* rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle |
64db4cfff "Tree RCU": scala... |
567 |
* |
9b2e4f188 rcu: Track idlene... |
568 569 570 |
* If the current CPU is idle or running at a first-level (not nested) * interrupt from idle, return true. The caller must have at least * disabled preemption. |
64db4cfff "Tree RCU": scala... |
571 |
*/ |
9b2e4f188 rcu: Track idlene... |
572 |
int rcu_is_cpu_rrupt_from_idle(void) |
64db4cfff "Tree RCU": scala... |
573 |
{ |
9b2e4f188 rcu: Track idlene... |
574 |
return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1; |
64db4cfff "Tree RCU": scala... |
575 |
} |
64db4cfff "Tree RCU": scala... |
576 577 578 |
#ifdef CONFIG_SMP /* |
64db4cfff "Tree RCU": scala... |
579 580 |
* Snapshot the specified CPU's dynticks counter so that we can later * credit them with an implicit quiescent state. Return 1 if this CPU |
1eba8f843 rcu: Clean up cod... |
581 |
* is in dynticks idle mode, which is an extended quiescent state. |
64db4cfff "Tree RCU": scala... |
582 583 584 |
*/ static int dyntick_save_progress_counter(struct rcu_data *rdp) { |
23b5c8fa0 rcu: Decrease mem... |
585 |
rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks); |
f0e7c19db rcu: Identify dyn... |
586 |
return (rdp->dynticks_snap & 0x1) == 0; |
64db4cfff "Tree RCU": scala... |
587 588 589 590 591 592 593 594 595 596 |
} /* * Return true if the specified CPU has passed through a quiescent * state by virtue of being in or having passed through an dynticks * idle state since the last call to dyntick_save_progress_counter() * for this same CPU. */ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) { |
7eb4f4553 rcu: Make rcu_imp... |
597 598 |
unsigned int curr; unsigned int snap; |
64db4cfff "Tree RCU": scala... |
599 |
|
7eb4f4553 rcu: Make rcu_imp... |
600 601 |
curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks); snap = (unsigned int)rdp->dynticks_snap; |
64db4cfff "Tree RCU": scala... |
602 603 604 605 606 607 608 609 610 |
/* * If the CPU passed through or entered a dynticks idle phase with * no active irq/NMI handlers, then we can safely pretend that the CPU * already acknowledged the request to pass through a quiescent * state. Either way, that CPU cannot possibly be in an RCU * read-side critical section that started before the beginning * of the current RCU grace period. */ |
7eb4f4553 rcu: Make rcu_imp... |
611 |
if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) { |
d4c08f2ac rcu: Add grace-pe... |
612 |
trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "dti"); |
64db4cfff "Tree RCU": scala... |
613 614 615 616 617 618 619 620 621 |
rdp->dynticks_fqs++; return 1; } /* Go check for the CPU being offline. */ return rcu_implicit_offline_qs(rdp); } #endif /* #ifdef CONFIG_SMP */ |
64db4cfff "Tree RCU": scala... |
622 623 624 625 626 627 628 629 630 631 632 |
static void record_gp_stall_check_time(struct rcu_state *rsp) { rsp->gp_start = jiffies; rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK; } static void print_other_cpu_stall(struct rcu_state *rsp) { int cpu; long delta; unsigned long flags; |
9bc8b5586 rcu: Suppress NMI... |
633 |
int ndetected; |
64db4cfff "Tree RCU": scala... |
634 |
struct rcu_node *rnp = rcu_get_root(rsp); |
64db4cfff "Tree RCU": scala... |
635 636 |
/* Only let one CPU complain about others per time interval. */ |
1304afb22 rcu: Convert to r... |
637 |
raw_spin_lock_irqsave(&rnp->lock, flags); |
64db4cfff "Tree RCU": scala... |
638 |
delta = jiffies - rsp->jiffies_stall; |
fc2219d49 rcu: Clean up cod... |
639 |
if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { |
1304afb22 rcu: Convert to r... |
640 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
64db4cfff "Tree RCU": scala... |
641 642 643 |
return; } rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; |
a0b6c9a78 rcu: Clean up cod... |
644 645 646 647 648 |
/* * Now rat on any tasks that got kicked up to the root rcu_node * due to CPU offlining. */ |
9bc8b5586 rcu: Suppress NMI... |
649 |
ndetected = rcu_print_task_stall(rnp); |
1304afb22 rcu: Convert to r... |
650 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
64db4cfff "Tree RCU": scala... |
651 |
|
8cdd32a91 rcu: refer RCU CP... |
652 653 654 655 656 |
/* * OK, time to rat on our buddy... * See Documentation/RCU/stallwarn.txt for info on how to debug * RCU CPU stall warnings. */ |
4300aa642 rcu: improve RCU ... |
657 658 |
printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {", rsp->name); |
a0b6c9a78 rcu: Clean up cod... |
659 |
rcu_for_each_leaf_node(rsp, rnp) { |
3acd9eb31 rcu: Fix deadlock... |
660 |
raw_spin_lock_irqsave(&rnp->lock, flags); |
9bc8b5586 rcu: Suppress NMI... |
661 |
ndetected += rcu_print_task_stall(rnp); |
3acd9eb31 rcu: Fix deadlock... |
662 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
a0b6c9a78 rcu: Clean up cod... |
663 |
if (rnp->qsmask == 0) |
64db4cfff "Tree RCU": scala... |
664 |
continue; |
a0b6c9a78 rcu: Clean up cod... |
665 |
for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) |
9bc8b5586 rcu: Suppress NMI... |
666 |
if (rnp->qsmask & (1UL << cpu)) { |
a0b6c9a78 rcu: Clean up cod... |
667 |
printk(" %d", rnp->grplo + cpu); |
9bc8b5586 rcu: Suppress NMI... |
668 669 |
ndetected++; } |
64db4cfff "Tree RCU": scala... |
670 |
} |
4300aa642 rcu: improve RCU ... |
671 672 |
printk("} (detected by %d, t=%ld jiffies) ", |
64db4cfff "Tree RCU": scala... |
673 |
smp_processor_id(), (long)(jiffies - rsp->gp_start)); |
9bc8b5586 rcu: Suppress NMI... |
674 675 676 677 |
if (ndetected == 0) printk(KERN_ERR "INFO: Stall ended before state dump start "); else if (!trigger_all_cpu_backtrace()) |
4627e240d rcu: Dump local s... |
678 |
dump_stack(); |
c1dc0b9c0 debug lockups: Im... |
679 |
|
1ed509a22 rcu: Add RCU_CPU_... |
680 681 682 |
/* If so configured, complain about tasks blocking the grace period. */ rcu_print_detail_task_stall(rsp); |
64db4cfff "Tree RCU": scala... |
683 684 685 686 687 688 689 |
force_quiescent_state(rsp, 0); /* Kick them all. */ } static void print_cpu_stall(struct rcu_state *rsp) { unsigned long flags; struct rcu_node *rnp = rcu_get_root(rsp); |
8cdd32a91 rcu: refer RCU CP... |
690 691 692 693 694 |
/* * OK, time to rat on ourselves... * See Documentation/RCU/stallwarn.txt for info on how to debug * RCU CPU stall warnings. */ |
4300aa642 rcu: improve RCU ... |
695 696 697 |
printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies) ", rsp->name, smp_processor_id(), jiffies - rsp->gp_start); |
4627e240d rcu: Dump local s... |
698 699 |
if (!trigger_all_cpu_backtrace()) dump_stack(); |
c1dc0b9c0 debug lockups: Im... |
700 |
|
1304afb22 rcu: Convert to r... |
701 |
raw_spin_lock_irqsave(&rnp->lock, flags); |
20133cfce rcu: Stop overflo... |
702 |
if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall)) |
64db4cfff "Tree RCU": scala... |
703 704 |
rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; |
1304afb22 rcu: Convert to r... |
705 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
c1dc0b9c0 debug lockups: Im... |
706 |
|
64db4cfff "Tree RCU": scala... |
707 708 709 710 711 |
set_need_resched(); /* kick ourselves to get things going. */ } static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) { |
bad6e1393 rcu: get rid of s... |
712 713 |
unsigned long j; unsigned long js; |
64db4cfff "Tree RCU": scala... |
714 |
struct rcu_node *rnp; |
742734eea rcu: add boot par... |
715 |
if (rcu_cpu_stall_suppress) |
c68de2097 rcu: disable CPU ... |
716 |
return; |
bad6e1393 rcu: get rid of s... |
717 718 |
j = ACCESS_ONCE(jiffies); js = ACCESS_ONCE(rsp->jiffies_stall); |
64db4cfff "Tree RCU": scala... |
719 |
rnp = rdp->mynode; |
bad6e1393 rcu: get rid of s... |
720 |
if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) { |
64db4cfff "Tree RCU": scala... |
721 722 723 |
/* We haven't checked in, so go dump stack. */ print_cpu_stall(rsp); |
bad6e1393 rcu: get rid of s... |
724 725 |
} else if (rcu_gp_in_progress(rsp) && ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) { |
64db4cfff "Tree RCU": scala... |
726 |
|
bad6e1393 rcu: get rid of s... |
727 |
/* They had a few time units to dump stack, so complain. */ |
64db4cfff "Tree RCU": scala... |
728 729 730 |
print_other_cpu_stall(rsp); } } |
c68de2097 rcu: disable CPU ... |
731 732 |
static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) { |
742734eea rcu: add boot par... |
733 |
rcu_cpu_stall_suppress = 1; |
c68de2097 rcu: disable CPU ... |
734 735 |
return NOTIFY_DONE; } |
53d84e004 rcu: permit suppr... |
736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 |
/** * rcu_cpu_stall_reset - prevent further stall warnings in current grace period * * Set the stall-warning timeout way off into the future, thus preventing * any RCU CPU stall-warning messages from appearing in the current set of * RCU grace periods. * * The caller must disable hard irqs. */ void rcu_cpu_stall_reset(void) { rcu_sched_state.jiffies_stall = jiffies + ULONG_MAX / 2; rcu_bh_state.jiffies_stall = jiffies + ULONG_MAX / 2; rcu_preempt_stall_reset(); } |
c68de2097 rcu: disable CPU ... |
751 752 753 754 755 756 757 758 |
static struct notifier_block rcu_panic_block = { .notifier_call = rcu_panic, }; static void __init check_cpu_stall_init(void) { atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); } |
64db4cfff "Tree RCU": scala... |
759 760 761 |
/* * Update CPU-local rcu_data state to record the newly noticed grace period. * This is used both when we started the grace period and when we notice |
9160306e6 rcu: Fix note_new... |
762 763 764 |
* that someone else started the grace period. The caller must hold the * ->lock of the leaf rcu_node structure corresponding to the current CPU, * and must have irqs disabled. |
64db4cfff "Tree RCU": scala... |
765 |
*/ |
9160306e6 rcu: Fix note_new... |
766 767 768 |
static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) { if (rdp->gpnum != rnp->gpnum) { |
121dfc4b3 rcu: fine-tune gr... |
769 770 771 772 773 |
/* * If the current grace period is waiting for this CPU, * set up to detect a quiescent state, otherwise don't * go looking for one. */ |
9160306e6 rcu: Fix note_new... |
774 |
rdp->gpnum = rnp->gpnum; |
d4c08f2ac rcu: Add grace-pe... |
775 |
trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpustart"); |
121dfc4b3 rcu: fine-tune gr... |
776 777 |
if (rnp->qsmask & rdp->grpmask) { rdp->qs_pending = 1; |
e4cc1f22b rcu: Simplify qui... |
778 |
rdp->passed_quiesce = 0; |
121dfc4b3 rcu: fine-tune gr... |
779 780 |
} else rdp->qs_pending = 0; |
9160306e6 rcu: Fix note_new... |
781 782 |
} } |
64db4cfff "Tree RCU": scala... |
783 784 |
static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) { |
9160306e6 rcu: Fix note_new... |
785 786 787 788 789 790 |
unsigned long flags; struct rcu_node *rnp; local_irq_save(flags); rnp = rdp->mynode; if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */ |
1304afb22 rcu: Convert to r... |
791 |
!raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */ |
9160306e6 rcu: Fix note_new... |
792 793 794 795 |
local_irq_restore(flags); return; } __note_new_gpnum(rsp, rnp, rdp); |
1304afb22 rcu: Convert to r... |
796 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
64db4cfff "Tree RCU": scala... |
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 |
} /* * Did someone else start a new RCU grace period start since we last * checked? Update local state appropriately if so. Must be called * on the CPU corresponding to rdp. */ static int check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp) { unsigned long flags; int ret = 0; local_irq_save(flags); if (rdp->gpnum != rsp->gpnum) { note_new_gpnum(rsp, rdp); ret = 1; } local_irq_restore(flags); return ret; } /* |
d09b62dfa rcu: Fix synchron... |
820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 |
* Advance this CPU's callbacks, but only if the current grace period * has ended. This may be called only from the CPU to whom the rdp * belongs. In addition, the corresponding leaf rcu_node structure's * ->lock must be held by the caller, with irqs disabled. */ static void __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) { /* Did another grace period end? */ if (rdp->completed != rnp->completed) { /* Advance callbacks. No harm if list empty. */ rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL]; rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL]; rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; /* Remember that we saw this grace-period completion. */ rdp->completed = rnp->completed; |
d4c08f2ac rcu: Add grace-pe... |
838 |
trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuend"); |
20377f32d rcu: Stop chasing... |
839 840 |
/* |
5ff8e6f05 rcu: Keep gpnum a... |
841 |
* If we were in an extended quiescent state, we may have |
121dfc4b3 rcu: fine-tune gr... |
842 |
* missed some grace periods that others CPUs handled on |
5ff8e6f05 rcu: Keep gpnum a... |
843 |
* our behalf. Catch up with this state to avoid noting |
121dfc4b3 rcu: fine-tune gr... |
844 845 846 |
* spurious new grace periods. If another grace period * has started, then rnp->gpnum will have advanced, so * we will detect this later on. |
5ff8e6f05 rcu: Keep gpnum a... |
847 |
*/ |
121dfc4b3 rcu: fine-tune gr... |
848 |
if (ULONG_CMP_LT(rdp->gpnum, rdp->completed)) |
5ff8e6f05 rcu: Keep gpnum a... |
849 850 851 |
rdp->gpnum = rdp->completed; /* |
121dfc4b3 rcu: fine-tune gr... |
852 853 |
* If RCU does not need a quiescent state from this CPU, * then make sure that this CPU doesn't go looking for one. |
20377f32d rcu: Stop chasing... |
854 |
*/ |
121dfc4b3 rcu: fine-tune gr... |
855 |
if ((rnp->qsmask & rdp->grpmask) == 0) |
20377f32d rcu: Stop chasing... |
856 |
rdp->qs_pending = 0; |
d09b62dfa rcu: Fix synchron... |
857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 |
} } /* * Advance this CPU's callbacks, but only if the current grace period * has ended. This may be called only from the CPU to whom the rdp * belongs. */ static void rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) { unsigned long flags; struct rcu_node *rnp; local_irq_save(flags); rnp = rdp->mynode; if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */ |
1304afb22 rcu: Convert to r... |
874 |
!raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */ |
d09b62dfa rcu: Fix synchron... |
875 876 877 878 |
local_irq_restore(flags); return; } __rcu_process_gp_end(rsp, rnp, rdp); |
1304afb22 rcu: Convert to r... |
879 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
d09b62dfa rcu: Fix synchron... |
880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 |
} /* * Do per-CPU grace-period initialization for running CPU. The caller * must hold the lock of the leaf rcu_node structure corresponding to * this CPU. */ static void rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) { /* Prior grace period ended, so advance callbacks for current CPU. */ __rcu_process_gp_end(rsp, rnp, rdp); /* * Because this CPU just now started the new grace period, we know * that all of its callbacks will be covered by this upcoming grace * period, even the ones that were registered arbitrarily recently. * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL. * * Other CPUs cannot be sure exactly when the grace period started. * Therefore, their recently registered callbacks must pass through * an additional RCU_NEXT_READY stage, so that they will be handled * by the next RCU grace period. */ rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; |
9160306e6 rcu: Fix note_new... |
906 907 908 |
/* Set state so that this CPU will detect the next quiescent state. */ __note_new_gpnum(rsp, rnp, rdp); |
d09b62dfa rcu: Fix synchron... |
909 910 911 |
} /* |
64db4cfff "Tree RCU": scala... |
912 913 914 915 916 917 918 919 920 |
* Start a new RCU grace period if warranted, re-initializing the hierarchy * in preparation for detecting the next grace period. The caller must hold * the root node's ->lock, which is released before return. Hard irqs must * be disabled. */ static void rcu_start_gp(struct rcu_state *rsp, unsigned long flags) __releases(rcu_get_root(rsp)->lock) { |
394f99a90 rcu: simplify the... |
921 |
struct rcu_data *rdp = this_cpu_ptr(rsp->rda); |
64db4cfff "Tree RCU": scala... |
922 |
struct rcu_node *rnp = rcu_get_root(rsp); |
64db4cfff "Tree RCU": scala... |
923 |
|
037067a1b rcu: Prohibit gra... |
924 |
if (!rcu_scheduler_fully_active || |
afe24b122 rcu: Move propaga... |
925 926 927 928 929 930 931 932 933 934 |
!cpu_needs_another_gp(rsp, rdp)) { /* * Either the scheduler hasn't yet spawned the first * non-idle task or this CPU does not need another * grace period. Either way, don't start a new grace * period. */ raw_spin_unlock_irqrestore(&rnp->lock, flags); return; } |
b32e9eb6a rcu: Accelerate c... |
935 |
|
afe24b122 rcu: Move propaga... |
936 |
if (rsp->fqs_active) { |
b32e9eb6a rcu: Accelerate c... |
937 |
/* |
afe24b122 rcu: Move propaga... |
938 939 |
* This CPU needs a grace period, but force_quiescent_state() * is running. Tell it to start one on this CPU's behalf. |
b32e9eb6a rcu: Accelerate c... |
940 |
*/ |
afe24b122 rcu: Move propaga... |
941 942 |
rsp->fqs_need_gp = 1; raw_spin_unlock_irqrestore(&rnp->lock, flags); |
64db4cfff "Tree RCU": scala... |
943 944 945 946 947 |
return; } /* Advance to a new grace period and initialize state. */ rsp->gpnum++; |
d4c08f2ac rcu: Add grace-pe... |
948 |
trace_rcu_grace_period(rsp->name, rsp->gpnum, "start"); |
af446b702 rcu: ->signaled b... |
949 950 |
WARN_ON_ONCE(rsp->fqs_state == RCU_GP_INIT); rsp->fqs_state = RCU_GP_INIT; /* Hold off force_quiescent_state. */ |
64db4cfff "Tree RCU": scala... |
951 |
rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; |
64db4cfff "Tree RCU": scala... |
952 |
record_gp_stall_check_time(rsp); |
64db4cfff "Tree RCU": scala... |
953 |
|
64db4cfff "Tree RCU": scala... |
954 955 |
/* Special-case the common single-level case. */ if (NUM_RCU_NODES == 1) { |
b0e165c03 rcu: Add debug ch... |
956 |
rcu_preempt_check_blocked_tasks(rnp); |
28ecd5802 rcu: Add WARN_ON_... |
957 |
rnp->qsmask = rnp->qsmaskinit; |
de078d875 rcu: Need to upda... |
958 |
rnp->gpnum = rsp->gpnum; |
d09b62dfa rcu: Fix synchron... |
959 |
rnp->completed = rsp->completed; |
af446b702 rcu: ->signaled b... |
960 |
rsp->fqs_state = RCU_SIGNAL_INIT; /* force_quiescent_state OK */ |
d09b62dfa rcu: Fix synchron... |
961 |
rcu_start_gp_per_cpu(rsp, rnp, rdp); |
27f4d2805 rcu: priority boo... |
962 |
rcu_preempt_boost_start_gp(rnp); |
d4c08f2ac rcu: Add grace-pe... |
963 964 965 |
trace_rcu_grace_period_init(rsp->name, rnp->gpnum, rnp->level, rnp->grplo, rnp->grphi, rnp->qsmask); |
1304afb22 rcu: Convert to r... |
966 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
64db4cfff "Tree RCU": scala... |
967 968 |
return; } |
1304afb22 rcu: Convert to r... |
969 |
raw_spin_unlock(&rnp->lock); /* leave irqs disabled. */ |
64db4cfff "Tree RCU": scala... |
970 971 972 |
/* Exclude any concurrent CPU-hotplug operations. */ |
1304afb22 rcu: Convert to r... |
973 |
raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */ |
64db4cfff "Tree RCU": scala... |
974 975 |
/* |
b835db1f9 rcu: Initialize m... |
976 977 978 979 980 981 982 983 984 |
* Set the quiescent-state-needed bits in all the rcu_node * structures for all currently online CPUs in breadth-first * order, starting from the root rcu_node structure. This * operation relies on the layout of the hierarchy within the * rsp->node[] array. Note that other CPUs will access only * the leaves of the hierarchy, which still indicate that no * grace period is in progress, at least until the corresponding * leaf node has been initialized. In addition, we have excluded * CPU-hotplug operations. |
64db4cfff "Tree RCU": scala... |
985 986 987 988 |
* * Note that the grace period cannot complete until we finish * the initialization process, as there will be at least one * qsmask bit set in the root node until that time, namely the |
b835db1f9 rcu: Initialize m... |
989 990 |
* one corresponding to this CPU, due to the fact that we have * irqs disabled. |
64db4cfff "Tree RCU": scala... |
991 |
*/ |
a0b6c9a78 rcu: Clean up cod... |
992 |
rcu_for_each_node_breadth_first(rsp, rnp) { |
1304afb22 rcu: Convert to r... |
993 |
raw_spin_lock(&rnp->lock); /* irqs already disabled. */ |
b0e165c03 rcu: Add debug ch... |
994 |
rcu_preempt_check_blocked_tasks(rnp); |
49e291266 rcu: Fix thinko, ... |
995 |
rnp->qsmask = rnp->qsmaskinit; |
de078d875 rcu: Need to upda... |
996 |
rnp->gpnum = rsp->gpnum; |
d09b62dfa rcu: Fix synchron... |
997 998 999 |
rnp->completed = rsp->completed; if (rnp == rdp->mynode) rcu_start_gp_per_cpu(rsp, rnp, rdp); |
27f4d2805 rcu: priority boo... |
1000 |
rcu_preempt_boost_start_gp(rnp); |
d4c08f2ac rcu: Add grace-pe... |
1001 1002 1003 |
trace_rcu_grace_period_init(rsp->name, rnp->gpnum, rnp->level, rnp->grplo, rnp->grphi, rnp->qsmask); |
1304afb22 rcu: Convert to r... |
1004 |
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
64db4cfff "Tree RCU": scala... |
1005 |
} |
83f5b01ff rcu: Fix long-gra... |
1006 |
rnp = rcu_get_root(rsp); |
1304afb22 rcu: Convert to r... |
1007 |
raw_spin_lock(&rnp->lock); /* irqs already disabled. */ |
af446b702 rcu: ->signaled b... |
1008 |
rsp->fqs_state = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ |
1304afb22 rcu: Convert to r... |
1009 1010 |
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ raw_spin_unlock_irqrestore(&rsp->onofflock, flags); |
64db4cfff "Tree RCU": scala... |
1011 1012 1013 |
} /* |
d3f6bad39 rcu: Rename "quie... |
1014 1015 1016 1017 1018 |
* Report a full set of quiescent states to the specified rcu_state * data structure. This involves cleaning up after the prior grace * period and letting rcu_start_gp() start up the next grace period * if one is needed. Note that the caller must hold rnp->lock, as * required by rcu_start_gp(), which will release it. |
f41d911f8 rcu: Merge preemp... |
1019 |
*/ |
d3f6bad39 rcu: Rename "quie... |
1020 |
static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) |
fc2219d49 rcu: Clean up cod... |
1021 |
__releases(rcu_get_root(rsp)->lock) |
f41d911f8 rcu: Merge preemp... |
1022 |
{ |
15ba0ba86 rcu: add grace-pe... |
1023 |
unsigned long gp_duration; |
afe24b122 rcu: Move propaga... |
1024 1025 |
struct rcu_node *rnp = rcu_get_root(rsp); struct rcu_data *rdp = this_cpu_ptr(rsp->rda); |
15ba0ba86 rcu: add grace-pe... |
1026 |
|
fc2219d49 rcu: Clean up cod... |
1027 |
WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); |
0bbcc529f rcu: Add memory b... |
1028 1029 1030 1031 1032 1033 |
/* * Ensure that all grace-period and pre-grace-period activity * is seen before the assignment to rsp->completed. */ smp_mb(); /* See above block comment. */ |
15ba0ba86 rcu: add grace-pe... |
1034 1035 1036 |
gp_duration = jiffies - rsp->gp_start; if (gp_duration > rsp->gp_max) rsp->gp_max = gp_duration; |
afe24b122 rcu: Move propaga... |
1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 |
/* * We know the grace period is complete, but to everyone else * it appears to still be ongoing. But it is also the case * that to everyone else it looks like there is nothing that * they can do to advance the grace period. It is therefore * safe for us to drop the lock in order to mark the grace * period as completed in all of the rcu_node structures. * * But if this CPU needs another grace period, it will take * care of this while initializing the next grace period. * We use RCU_WAIT_TAIL instead of the usual RCU_DONE_TAIL * because the callbacks have not yet been advanced: Those * callbacks are waiting on the grace period that just now * completed. */ if (*rdp->nxttail[RCU_WAIT_TAIL] == NULL) { raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ /* * Propagate new ->completed value to rcu_node structures * so that other CPUs don't have to wait until the start * of the next grace period to process their callbacks. */ rcu_for_each_node_breadth_first(rsp, rnp) { raw_spin_lock(&rnp->lock); /* irqs already disabled. */ rnp->completed = rsp->gpnum; raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ } rnp = rcu_get_root(rsp); raw_spin_lock(&rnp->lock); /* irqs already disabled. */ } rsp->completed = rsp->gpnum; /* Declare the grace period complete. */ |
d4c08f2ac rcu: Add grace-pe... |
1071 |
trace_rcu_grace_period(rsp->name, rsp->completed, "end"); |
af446b702 rcu: ->signaled b... |
1072 |
rsp->fqs_state = RCU_GP_IDLE; |
f41d911f8 rcu: Merge preemp... |
1073 1074 1075 1076 |
rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ } /* |
d3f6bad39 rcu: Rename "quie... |
1077 1078 1079 1080 1081 1082 |
* Similar to rcu_report_qs_rdp(), for which it is a helper function. * Allows quiescent states for a group of CPUs to be reported at one go * to the specified rcu_node structure, though all the CPUs in the group * must be represented by the same rcu_node structure (which need not be * a leaf rcu_node structure, though it often will be). That structure's * lock must be held upon entry, and it is released before return. |
64db4cfff "Tree RCU": scala... |
1083 1084 |
*/ static void |
d3f6bad39 rcu: Rename "quie... |
1085 1086 |
rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, unsigned long flags) |
64db4cfff "Tree RCU": scala... |
1087 1088 |
__releases(rnp->lock) { |
28ecd5802 rcu: Add WARN_ON_... |
1089 |
struct rcu_node *rnp_c; |
64db4cfff "Tree RCU": scala... |
1090 1091 1092 1093 1094 |
/* Walk up the rcu_node hierarchy. */ for (;;) { if (!(rnp->qsmask & mask)) { /* Our bit has already been cleared, so done. */ |
1304afb22 rcu: Convert to r... |
1095 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
64db4cfff "Tree RCU": scala... |
1096 1097 1098 |
return; } rnp->qsmask &= ~mask; |
d4c08f2ac rcu: Add grace-pe... |
1099 1100 1101 1102 |
trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum, mask, rnp->qsmask, rnp->level, rnp->grplo, rnp->grphi, !!rnp->gp_tasks); |
27f4d2805 rcu: priority boo... |
1103 |
if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { |
64db4cfff "Tree RCU": scala... |
1104 1105 |
/* Other bits still set at this level, so done. */ |
1304afb22 rcu: Convert to r... |
1106 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
64db4cfff "Tree RCU": scala... |
1107 1108 1109 1110 1111 1112 1113 1114 1115 |
return; } mask = rnp->grpmask; if (rnp->parent == NULL) { /* No more levels. Exit loop holding root lock. */ break; } |
1304afb22 rcu: Convert to r... |
1116 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
28ecd5802 rcu: Add WARN_ON_... |
1117 |
rnp_c = rnp; |
64db4cfff "Tree RCU": scala... |
1118 |
rnp = rnp->parent; |
1304afb22 rcu: Convert to r... |
1119 |
raw_spin_lock_irqsave(&rnp->lock, flags); |
28ecd5802 rcu: Add WARN_ON_... |
1120 |
WARN_ON_ONCE(rnp_c->qsmask); |
64db4cfff "Tree RCU": scala... |
1121 1122 1123 1124 |
} /* * Get here if we are the last CPU to pass through a quiescent |
d3f6bad39 rcu: Rename "quie... |
1125 |
* state for this grace period. Invoke rcu_report_qs_rsp() |
f41d911f8 rcu: Merge preemp... |
1126 |
* to clean up and start the next grace period if one is needed. |
64db4cfff "Tree RCU": scala... |
1127 |
*/ |
d3f6bad39 rcu: Rename "quie... |
1128 |
rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */ |
64db4cfff "Tree RCU": scala... |
1129 1130 1131 |
} /* |
d3f6bad39 rcu: Rename "quie... |
1132 1133 1134 1135 1136 1137 1138 |
* Record a quiescent state for the specified CPU to that CPU's rcu_data * structure. This must be either called from the specified CPU, or * called when the specified CPU is known to be offline (and when it is * also known that no other CPU is concurrently trying to help the offline * CPU). The lastcomp argument is used to make sure we are still in the * grace period of interest. We don't want to end the current grace period * based on quiescent states detected in an earlier grace period! |
64db4cfff "Tree RCU": scala... |
1139 1140 |
*/ static void |
e4cc1f22b rcu: Simplify qui... |
1141 |
rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastgp) |
64db4cfff "Tree RCU": scala... |
1142 1143 1144 1145 1146 1147 |
{ unsigned long flags; unsigned long mask; struct rcu_node *rnp; rnp = rdp->mynode; |
1304afb22 rcu: Convert to r... |
1148 |
raw_spin_lock_irqsave(&rnp->lock, flags); |
e4cc1f22b rcu: Simplify qui... |
1149 |
if (lastgp != rnp->gpnum || rnp->completed == rnp->gpnum) { |
64db4cfff "Tree RCU": scala... |
1150 1151 |
/* |
e4cc1f22b rcu: Simplify qui... |
1152 1153 1154 1155 |
* The grace period in which this quiescent state was * recorded has ended, so don't report it upwards. * We will instead need a new quiescent state that lies * within the current grace period. |
64db4cfff "Tree RCU": scala... |
1156 |
*/ |
e4cc1f22b rcu: Simplify qui... |
1157 |
rdp->passed_quiesce = 0; /* need qs for new gp. */ |
1304afb22 rcu: Convert to r... |
1158 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
64db4cfff "Tree RCU": scala... |
1159 1160 1161 1162 |
return; } mask = rdp->grpmask; if ((rnp->qsmask & mask) == 0) { |
1304afb22 rcu: Convert to r... |
1163 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
64db4cfff "Tree RCU": scala... |
1164 1165 1166 1167 1168 1169 1170 |
} else { rdp->qs_pending = 0; /* * This GP can't end until cpu checks in, so all of our * callbacks can be processed during the next GP. */ |
64db4cfff "Tree RCU": scala... |
1171 |
rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; |
d3f6bad39 rcu: Rename "quie... |
1172 |
rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */ |
64db4cfff "Tree RCU": scala... |
1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 |
} } /* * Check to see if there is a new grace period of which this CPU * is not yet aware, and if so, set up local rcu_data state for it. * Otherwise, see if this CPU has just passed through its first * quiescent state for this grace period, and record that fact if so. */ static void rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) { /* If there is now a new grace period, record and return. */ if (check_for_new_grace_period(rsp, rdp)) return; /* * Does this CPU still need to do its part for current grace period? * If no, return and let the other CPUs do their part as well. */ if (!rdp->qs_pending) return; /* * Was there a quiescent state since the beginning of the grace * period? If no, then exit and wait for the next call. */ |
e4cc1f22b rcu: Simplify qui... |
1200 |
if (!rdp->passed_quiesce) |
64db4cfff "Tree RCU": scala... |
1201 |
return; |
d3f6bad39 rcu: Rename "quie... |
1202 1203 1204 1205 |
/* * Tell RCU we are done (but rcu_report_qs_rdp() will be the * judge of that). */ |
e4cc1f22b rcu: Simplify qui... |
1206 |
rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesce_gpnum); |
64db4cfff "Tree RCU": scala... |
1207 1208 1209 1210 1211 |
} #ifdef CONFIG_HOTPLUG_CPU /* |
29494be71 rcu,cleanup: simp... |
1212 1213 1214 |
* Move a dying CPU's RCU callbacks to online CPU's callback list. * Synchronization is not required because this function executes * in stop_machine() context. |
e74f4c456 rcu: Make hot-unp... |
1215 |
*/ |
29494be71 rcu,cleanup: simp... |
1216 |
static void rcu_send_cbs_to_online(struct rcu_state *rsp) |
e74f4c456 rcu: Make hot-unp... |
1217 1218 |
{ int i; |
29494be71 rcu,cleanup: simp... |
1219 1220 |
/* current DYING CPU is cleared in the cpu_online_mask */ int receive_cpu = cpumask_any(cpu_online_mask); |
394f99a90 rcu: simplify the... |
1221 |
struct rcu_data *rdp = this_cpu_ptr(rsp->rda); |
29494be71 rcu,cleanup: simp... |
1222 |
struct rcu_data *receive_rdp = per_cpu_ptr(rsp->rda, receive_cpu); |
e74f4c456 rcu: Make hot-unp... |
1223 1224 1225 |
if (rdp->nxtlist == NULL) return; /* irqs disabled, so comparison is stable. */ |
29494be71 rcu,cleanup: simp... |
1226 1227 1228 1229 1230 1231 |
*receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist; receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; receive_rdp->qlen += rdp->qlen; receive_rdp->n_cbs_adopted += rdp->qlen; rdp->n_cbs_orphaned += rdp->qlen; |
e74f4c456 rcu: Make hot-unp... |
1232 1233 1234 |
rdp->nxtlist = NULL; for (i = 0; i < RCU_NEXT_SIZE; i++) rdp->nxttail[i] = &rdp->nxtlist; |
e74f4c456 rcu: Make hot-unp... |
1235 |
rdp->qlen = 0; |
e74f4c456 rcu: Make hot-unp... |
1236 1237 1238 |
} /* |
64db4cfff "Tree RCU": scala... |
1239 1240 |
* Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy * and move all callbacks from the outgoing CPU to the current one. |
a26ac2455 rcu: move TREE_RC... |
1241 1242 |
* There can only be one CPU hotplug operation at a time, so no other * CPU can be attempting to update rcu_cpu_kthread_task. |
64db4cfff "Tree RCU": scala... |
1243 1244 1245 |
*/ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) { |
64db4cfff "Tree RCU": scala... |
1246 |
unsigned long flags; |
64db4cfff "Tree RCU": scala... |
1247 |
unsigned long mask; |
d9a3da069 rcu: Add expedite... |
1248 |
int need_report = 0; |
394f99a90 rcu: simplify the... |
1249 |
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
64db4cfff "Tree RCU": scala... |
1250 |
struct rcu_node *rnp; |
a26ac2455 rcu: move TREE_RC... |
1251 |
|
f8b7fc6b5 rcu: Move RCU_BOO... |
1252 |
rcu_stop_cpu_kthread(cpu); |
64db4cfff "Tree RCU": scala... |
1253 1254 |
/* Exclude any attempts to start a new grace period. */ |
1304afb22 rcu: Convert to r... |
1255 |
raw_spin_lock_irqsave(&rsp->onofflock, flags); |
64db4cfff "Tree RCU": scala... |
1256 1257 |
/* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ |
28ecd5802 rcu: Add WARN_ON_... |
1258 |
rnp = rdp->mynode; /* this is the outgoing CPU's rnp. */ |
64db4cfff "Tree RCU": scala... |
1259 1260 |
mask = rdp->grpmask; /* rnp->grplo is constant. */ do { |
1304afb22 rcu: Convert to r... |
1261 |
raw_spin_lock(&rnp->lock); /* irqs already disabled. */ |
64db4cfff "Tree RCU": scala... |
1262 1263 |
rnp->qsmaskinit &= ~mask; if (rnp->qsmaskinit != 0) { |
b668c9cf3 rcu: Fix grace-pe... |
1264 |
if (rnp != rdp->mynode) |
1304afb22 rcu: Convert to r... |
1265 |
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
d4c08f2ac rcu: Add grace-pe... |
1266 1267 1268 1269 1270 |
else trace_rcu_grace_period(rsp->name, rnp->gpnum + 1 - !!(rnp->qsmask & mask), "cpuofl"); |
64db4cfff "Tree RCU": scala... |
1271 1272 |
break; } |
d4c08f2ac rcu: Add grace-pe... |
1273 1274 1275 1276 1277 |
if (rnp == rdp->mynode) { trace_rcu_grace_period(rsp->name, rnp->gpnum + 1 - !!(rnp->qsmask & mask), "cpuofl"); |
d9a3da069 rcu: Add expedite... |
1278 |
need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp); |
d4c08f2ac rcu: Add grace-pe... |
1279 |
} else |
1304afb22 rcu: Convert to r... |
1280 |
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
64db4cfff "Tree RCU": scala... |
1281 |
mask = rnp->grpmask; |
64db4cfff "Tree RCU": scala... |
1282 1283 |
rnp = rnp->parent; } while (rnp != NULL); |
64db4cfff "Tree RCU": scala... |
1284 |
|
b668c9cf3 rcu: Fix grace-pe... |
1285 1286 1287 |
/* * We still hold the leaf rcu_node structure lock here, and * irqs are still disabled. The reason for this subterfuge is |
d3f6bad39 rcu: Rename "quie... |
1288 1289 |
* because invoking rcu_report_unblock_qs_rnp() with ->onofflock * held leads to deadlock. |
b668c9cf3 rcu: Fix grace-pe... |
1290 |
*/ |
1304afb22 rcu: Convert to r... |
1291 |
raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ |
b668c9cf3 rcu: Fix grace-pe... |
1292 |
rnp = rdp->mynode; |
d9a3da069 rcu: Add expedite... |
1293 |
if (need_report & RCU_OFL_TASKS_NORM_GP) |
d3f6bad39 rcu: Rename "quie... |
1294 |
rcu_report_unblock_qs_rnp(rnp, flags); |
b668c9cf3 rcu: Fix grace-pe... |
1295 |
else |
1304afb22 rcu: Convert to r... |
1296 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
d9a3da069 rcu: Add expedite... |
1297 |
if (need_report & RCU_OFL_TASKS_EXP_GP) |
b40d293eb rcu: Omit self-aw... |
1298 |
rcu_report_exp_rnp(rsp, rnp, true); |
1217ed1ba rcu: permit rcu_r... |
1299 |
rcu_node_kthread_setaffinity(rnp, -1); |
64db4cfff "Tree RCU": scala... |
1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 |
} /* * Remove the specified CPU from the RCU hierarchy and move any pending * callbacks that it might have to the current CPU. This code assumes * that at least one CPU in the system will remain running at all times. * Any attempt to offline -all- CPUs is likely to strand RCU callbacks. */ static void rcu_offline_cpu(int cpu) { |
d6714c22b rcu: Renamings to... |
1310 |
__rcu_offline_cpu(cpu, &rcu_sched_state); |
64db4cfff "Tree RCU": scala... |
1311 |
__rcu_offline_cpu(cpu, &rcu_bh_state); |
33f76148c rcu: Add CPU-offl... |
1312 |
rcu_preempt_offline_cpu(cpu); |
64db4cfff "Tree RCU": scala... |
1313 1314 1315 |
} #else /* #ifdef CONFIG_HOTPLUG_CPU */ |
29494be71 rcu,cleanup: simp... |
1316 |
static void rcu_send_cbs_to_online(struct rcu_state *rsp) |
e74f4c456 rcu: Make hot-unp... |
1317 1318 |
{ } |
64db4cfff "Tree RCU": scala... |
1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 |
static void rcu_offline_cpu(int cpu) { } #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ /* * Invoke any RCU callbacks that have made it to the end of their grace * period. Thottle as specified by rdp->blimit. */ |
37c72e56f rcu: Prevent RCU ... |
1329 |
static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) |
64db4cfff "Tree RCU": scala... |
1330 1331 1332 |
{ unsigned long flags; struct rcu_head *next, *list, **tail; |
29c00b4a1 rcu: Add event-tr... |
1333 |
int bl, count; |
64db4cfff "Tree RCU": scala... |
1334 1335 |
/* If no callbacks are ready, just return.*/ |
29c00b4a1 rcu: Add event-tr... |
1336 |
if (!cpu_has_callbacks_ready_to_invoke(rdp)) { |
72fe701b7 rcu: Add RCU type... |
1337 |
trace_rcu_batch_start(rsp->name, 0, 0); |
4968c300e rcu: Augment rcu_... |
1338 1339 1340 |
trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist), need_resched(), is_idle_task(current), rcu_is_callbacks_kthread()); |
64db4cfff "Tree RCU": scala... |
1341 |
return; |
29c00b4a1 rcu: Add event-tr... |
1342 |
} |
64db4cfff "Tree RCU": scala... |
1343 1344 1345 1346 1347 1348 |
/* * Extract the list of ready callbacks, disabling to prevent * races with call_rcu() from interrupt handlers. */ local_irq_save(flags); |
29c00b4a1 rcu: Add event-tr... |
1349 |
bl = rdp->blimit; |
72fe701b7 rcu: Add RCU type... |
1350 |
trace_rcu_batch_start(rsp->name, rdp->qlen, bl); |
64db4cfff "Tree RCU": scala... |
1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 |
list = rdp->nxtlist; rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL]; *rdp->nxttail[RCU_DONE_TAIL] = NULL; tail = rdp->nxttail[RCU_DONE_TAIL]; for (count = RCU_NEXT_SIZE - 1; count >= 0; count--) if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL]) rdp->nxttail[count] = &rdp->nxtlist; local_irq_restore(flags); /* Invoke callbacks. */ count = 0; while (list) { next = list->next; prefetch(next); |
551d55a94 tree/tiny rcu: Ad... |
1365 |
debug_rcu_head_unqueue(list); |
d4c08f2ac rcu: Add grace-pe... |
1366 |
__rcu_reclaim(rsp->name, list); |
64db4cfff "Tree RCU": scala... |
1367 |
list = next; |
dff1672d9 rcu: Keep invokin... |
1368 1369 1370 1371 |
/* Stop only if limit reached and CPU has something to do. */ if (++count >= bl && (need_resched() || (!is_idle_task(current) && !rcu_is_callbacks_kthread()))) |
64db4cfff "Tree RCU": scala... |
1372 1373 1374 1375 |
break; } local_irq_save(flags); |
4968c300e rcu: Augment rcu_... |
1376 1377 1378 |
trace_rcu_batch_end(rsp->name, count, !!list, need_resched(), is_idle_task(current), rcu_is_callbacks_kthread()); |
64db4cfff "Tree RCU": scala... |
1379 1380 1381 |
/* Update count, and requeue any remaining callbacks. */ rdp->qlen -= count; |
269dcc1c2 rcu: Add tracing ... |
1382 |
rdp->n_cbs_invoked += count; |
64db4cfff "Tree RCU": scala... |
1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 |
if (list != NULL) { *tail = rdp->nxtlist; rdp->nxtlist = list; for (count = 0; count < RCU_NEXT_SIZE; count++) if (&rdp->nxtlist == rdp->nxttail[count]) rdp->nxttail[count] = tail; else break; } /* Reinstate batch limit if we have worked down the excess. */ if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) rdp->blimit = blimit; |
37c72e56f rcu: Prevent RCU ... |
1396 1397 1398 1399 1400 1401 |
/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) { rdp->qlen_last_fqs_check = 0; rdp->n_force_qs_snap = rsp->n_force_qs; } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark) rdp->qlen_last_fqs_check = rdp->qlen; |
64db4cfff "Tree RCU": scala... |
1402 |
local_irq_restore(flags); |
e0f23060a rcu: Update comme... |
1403 |
/* Re-invoke RCU core processing if there are callbacks remaining. */ |
64db4cfff "Tree RCU": scala... |
1404 |
if (cpu_has_callbacks_ready_to_invoke(rdp)) |
a46e0899e rcu: use softirq ... |
1405 |
invoke_rcu_core(); |
64db4cfff "Tree RCU": scala... |
1406 1407 1408 1409 1410 |
} /* * Check to see if this CPU is in a non-context-switch quiescent state * (user mode or idle loop for rcu, non-softirq execution for rcu_bh). |
e0f23060a rcu: Update comme... |
1411 |
* Also schedule RCU core processing. |
64db4cfff "Tree RCU": scala... |
1412 |
* |
9b2e4f188 rcu: Track idlene... |
1413 |
* This function must be called from hardirq context. It is normally |
64db4cfff "Tree RCU": scala... |
1414 1415 1416 1417 1418 |
* invoked from the scheduling-clock interrupt. If rcu_pending returns * false, there is no point in invoking rcu_check_callbacks(). */ void rcu_check_callbacks(int cpu, int user) { |
300df91ca rcu: Event-trace ... |
1419 |
trace_rcu_utilization("Start scheduler-tick"); |
9b2e4f188 rcu: Track idlene... |
1420 |
if (user || rcu_is_cpu_rrupt_from_idle()) { |
64db4cfff "Tree RCU": scala... |
1421 1422 1423 1424 1425 |
/* * Get here if this CPU took its interrupt from user * mode or from the idle loop, and if this is not a * nested interrupt. In this case, the CPU is in |
d6714c22b rcu: Renamings to... |
1426 |
* a quiescent state, so note it. |
64db4cfff "Tree RCU": scala... |
1427 1428 |
* * No memory barrier is required here because both |
d6714c22b rcu: Renamings to... |
1429 1430 1431 |
* rcu_sched_qs() and rcu_bh_qs() reference only CPU-local * variables that other CPUs neither access nor modify, * at least not while the corresponding CPU is online. |
64db4cfff "Tree RCU": scala... |
1432 |
*/ |
d6714c22b rcu: Renamings to... |
1433 1434 |
rcu_sched_qs(cpu); rcu_bh_qs(cpu); |
64db4cfff "Tree RCU": scala... |
1435 1436 1437 1438 1439 1440 1441 |
} else if (!in_softirq()) { /* * Get here if this CPU did not take its interrupt from * softirq, in other words, if it is not interrupting * a rcu_bh read-side critical section. This is an _bh |
d6714c22b rcu: Renamings to... |
1442 |
* critical section, so note it. |
64db4cfff "Tree RCU": scala... |
1443 |
*/ |
d6714c22b rcu: Renamings to... |
1444 |
rcu_bh_qs(cpu); |
64db4cfff "Tree RCU": scala... |
1445 |
} |
f41d911f8 rcu: Merge preemp... |
1446 |
rcu_preempt_check_callbacks(cpu); |
d21670aca rcu: reduce the n... |
1447 |
if (rcu_pending(cpu)) |
a46e0899e rcu: use softirq ... |
1448 |
invoke_rcu_core(); |
300df91ca rcu: Event-trace ... |
1449 |
trace_rcu_utilization("End scheduler-tick"); |
64db4cfff "Tree RCU": scala... |
1450 1451 1452 1453 1454 1455 1456 |
} #ifdef CONFIG_SMP /* * Scan the leaf rcu_node structures, processing dyntick state for any that * have not yet encountered a quiescent state, using the function specified. |
27f4d2805 rcu: priority boo... |
1457 1458 |
* Also initiate boosting for any threads blocked on the root rcu_node. * |
ee47eb9f4 rcu: Remove leg o... |
1459 |
* The caller must have suppressed start of new grace periods. |
64db4cfff "Tree RCU": scala... |
1460 |
*/ |
45f014c52 rcu: Remove redun... |
1461 |
static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) |
64db4cfff "Tree RCU": scala... |
1462 1463 1464 1465 1466 |
{ unsigned long bit; int cpu; unsigned long flags; unsigned long mask; |
a0b6c9a78 rcu: Clean up cod... |
1467 |
struct rcu_node *rnp; |
64db4cfff "Tree RCU": scala... |
1468 |
|
a0b6c9a78 rcu: Clean up cod... |
1469 |
rcu_for_each_leaf_node(rsp, rnp) { |
64db4cfff "Tree RCU": scala... |
1470 |
mask = 0; |
1304afb22 rcu: Convert to r... |
1471 |
raw_spin_lock_irqsave(&rnp->lock, flags); |
ee47eb9f4 rcu: Remove leg o... |
1472 |
if (!rcu_gp_in_progress(rsp)) { |
1304afb22 rcu: Convert to r... |
1473 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
0f10dc826 rcu: Eliminate rc... |
1474 |
return; |
64db4cfff "Tree RCU": scala... |
1475 |
} |
a0b6c9a78 rcu: Clean up cod... |
1476 |
if (rnp->qsmask == 0) { |
1217ed1ba rcu: permit rcu_r... |
1477 |
rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ |
64db4cfff "Tree RCU": scala... |
1478 1479 |
continue; } |
a0b6c9a78 rcu: Clean up cod... |
1480 |
cpu = rnp->grplo; |
64db4cfff "Tree RCU": scala... |
1481 |
bit = 1; |
a0b6c9a78 rcu: Clean up cod... |
1482 |
for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { |
394f99a90 rcu: simplify the... |
1483 1484 |
if ((rnp->qsmask & bit) != 0 && f(per_cpu_ptr(rsp->rda, cpu))) |
64db4cfff "Tree RCU": scala... |
1485 1486 |
mask |= bit; } |
45f014c52 rcu: Remove redun... |
1487 |
if (mask != 0) { |
64db4cfff "Tree RCU": scala... |
1488 |
|
d3f6bad39 rcu: Rename "quie... |
1489 1490 |
/* rcu_report_qs_rnp() releases rnp->lock. */ rcu_report_qs_rnp(mask, rsp, rnp, flags); |
64db4cfff "Tree RCU": scala... |
1491 1492 |
continue; } |
1304afb22 rcu: Convert to r... |
1493 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
64db4cfff "Tree RCU": scala... |
1494 |
} |
27f4d2805 rcu: priority boo... |
1495 |
rnp = rcu_get_root(rsp); |
1217ed1ba rcu: permit rcu_r... |
1496 1497 1498 1499 |
if (rnp->qsmask == 0) { raw_spin_lock_irqsave(&rnp->lock, flags); rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ } |
64db4cfff "Tree RCU": scala... |
1500 1501 1502 1503 1504 1505 1506 1507 1508 |
} /* * Force quiescent states on reluctant CPUs, and also detect which * CPUs are in dyntick-idle mode. */ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) { unsigned long flags; |
64db4cfff "Tree RCU": scala... |
1509 |
struct rcu_node *rnp = rcu_get_root(rsp); |
64db4cfff "Tree RCU": scala... |
1510 |
|
300df91ca rcu: Event-trace ... |
1511 1512 1513 |
trace_rcu_utilization("Start fqs"); if (!rcu_gp_in_progress(rsp)) { trace_rcu_utilization("End fqs"); |
64db4cfff "Tree RCU": scala... |
1514 |
return; /* No grace period in progress, nothing to force. */ |
300df91ca rcu: Event-trace ... |
1515 |
} |
1304afb22 rcu: Convert to r... |
1516 |
if (!raw_spin_trylock_irqsave(&rsp->fqslock, flags)) { |
64db4cfff "Tree RCU": scala... |
1517 |
rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ |
300df91ca rcu: Event-trace ... |
1518 |
trace_rcu_utilization("End fqs"); |
64db4cfff "Tree RCU": scala... |
1519 1520 |
return; /* Someone else is already on the job. */ } |
20133cfce rcu: Stop overflo... |
1521 |
if (relaxed && ULONG_CMP_GE(rsp->jiffies_force_qs, jiffies)) |
f96e9232e rcu: Adjust force... |
1522 |
goto unlock_fqs_ret; /* no emergency and done recently. */ |
64db4cfff "Tree RCU": scala... |
1523 |
rsp->n_force_qs++; |
1304afb22 rcu: Convert to r... |
1524 |
raw_spin_lock(&rnp->lock); /* irqs already disabled */ |
64db4cfff "Tree RCU": scala... |
1525 |
rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; |
560d4bc0d rcu: Further clea... |
1526 |
if(!rcu_gp_in_progress(rsp)) { |
64db4cfff "Tree RCU": scala... |
1527 |
rsp->n_force_qs_ngp++; |
1304afb22 rcu: Convert to r... |
1528 |
raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ |
f96e9232e rcu: Adjust force... |
1529 |
goto unlock_fqs_ret; /* no GP in progress, time updated. */ |
64db4cfff "Tree RCU": scala... |
1530 |
} |
07079d535 rcu: Prohibit sta... |
1531 |
rsp->fqs_active = 1; |
af446b702 rcu: ->signaled b... |
1532 |
switch (rsp->fqs_state) { |
83f5b01ff rcu: Fix long-gra... |
1533 |
case RCU_GP_IDLE: |
64db4cfff "Tree RCU": scala... |
1534 |
case RCU_GP_INIT: |
83f5b01ff rcu: Fix long-gra... |
1535 |
break; /* grace period idle or initializing, ignore. */ |
64db4cfff "Tree RCU": scala... |
1536 1537 |
case RCU_SAVE_DYNTICK: |
64db4cfff "Tree RCU": scala... |
1538 1539 |
if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK) break; /* So gcc recognizes the dead code. */ |
f261414f0 rcu: make dead co... |
1540 |
raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ |
64db4cfff "Tree RCU": scala... |
1541 |
/* Record dyntick-idle state. */ |
45f014c52 rcu: Remove redun... |
1542 |
force_qs_rnp(rsp, dyntick_save_progress_counter); |
1304afb22 rcu: Convert to r... |
1543 |
raw_spin_lock(&rnp->lock); /* irqs already disabled */ |
ee47eb9f4 rcu: Remove leg o... |
1544 |
if (rcu_gp_in_progress(rsp)) |
af446b702 rcu: ->signaled b... |
1545 |
rsp->fqs_state = RCU_FORCE_QS; |
ee47eb9f4 rcu: Remove leg o... |
1546 |
break; |
64db4cfff "Tree RCU": scala... |
1547 1548 1549 1550 |
case RCU_FORCE_QS: /* Check dyntick-idle state, send IPI to laggarts. */ |
1304afb22 rcu: Convert to r... |
1551 |
raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ |
45f014c52 rcu: Remove redun... |
1552 |
force_qs_rnp(rsp, rcu_implicit_dynticks_qs); |
64db4cfff "Tree RCU": scala... |
1553 1554 |
/* Leave state in case more forcing is required. */ |
1304afb22 rcu: Convert to r... |
1555 |
raw_spin_lock(&rnp->lock); /* irqs already disabled */ |
f96e9232e rcu: Adjust force... |
1556 |
break; |
64db4cfff "Tree RCU": scala... |
1557 |
} |
07079d535 rcu: Prohibit sta... |
1558 |
rsp->fqs_active = 0; |
46a1e34ed rcu: Make force_q... |
1559 |
if (rsp->fqs_need_gp) { |
1304afb22 rcu: Convert to r... |
1560 |
raw_spin_unlock(&rsp->fqslock); /* irqs remain disabled */ |
46a1e34ed rcu: Make force_q... |
1561 1562 |
rsp->fqs_need_gp = 0; rcu_start_gp(rsp, flags); /* releases rnp->lock */ |
300df91ca rcu: Event-trace ... |
1563 |
trace_rcu_utilization("End fqs"); |
46a1e34ed rcu: Make force_q... |
1564 1565 |
return; } |
1304afb22 rcu: Convert to r... |
1566 |
raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ |
f96e9232e rcu: Adjust force... |
1567 |
unlock_fqs_ret: |
1304afb22 rcu: Convert to r... |
1568 |
raw_spin_unlock_irqrestore(&rsp->fqslock, flags); |
300df91ca rcu: Event-trace ... |
1569 |
trace_rcu_utilization("End fqs"); |
64db4cfff "Tree RCU": scala... |
1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 |
} #else /* #ifdef CONFIG_SMP */ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) { set_need_resched(); } #endif /* #else #ifdef CONFIG_SMP */ /* |
e0f23060a rcu: Update comme... |
1582 1583 1584 |
* This does the RCU core processing work for the specified rcu_state * and rcu_data structures. This may be called only from the CPU to * whom the rdp belongs. |
64db4cfff "Tree RCU": scala... |
1585 1586 1587 1588 1589 |
*/ static void __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) { unsigned long flags; |
2e5975580 rcu: Simplify RCU... |
1590 |
WARN_ON_ONCE(rdp->beenonline == 0); |
64db4cfff "Tree RCU": scala... |
1591 1592 1593 1594 |
/* * If an RCU GP has gone long enough, go check for dyntick * idle CPUs and, if needed, send resched IPIs. */ |
20133cfce rcu: Stop overflo... |
1595 |
if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) |
64db4cfff "Tree RCU": scala... |
1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 |
force_quiescent_state(rsp, 1); /* * Advance callbacks in response to end of earlier grace * period that some other CPU ended. */ rcu_process_gp_end(rsp, rdp); /* Update RCU state based on any recent quiescent states. */ rcu_check_quiescent_state(rsp, rdp); /* Does this CPU require a not-yet-started grace period? */ if (cpu_needs_another_gp(rsp, rdp)) { |
1304afb22 rcu: Convert to r... |
1609 |
raw_spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags); |
64db4cfff "Tree RCU": scala... |
1610 1611 1612 1613 |
rcu_start_gp(rsp, flags); /* releases above lock */ } /* If there are callbacks ready, invoke them. */ |
09223371d rcu: Use softirq ... |
1614 |
if (cpu_has_callbacks_ready_to_invoke(rdp)) |
a46e0899e rcu: use softirq ... |
1615 |
invoke_rcu_callbacks(rsp, rdp); |
09223371d rcu: Use softirq ... |
1616 |
} |
64db4cfff "Tree RCU": scala... |
1617 |
/* |
e0f23060a rcu: Update comme... |
1618 |
* Do RCU core processing for the current CPU. |
64db4cfff "Tree RCU": scala... |
1619 |
*/ |
09223371d rcu: Use softirq ... |
1620 |
static void rcu_process_callbacks(struct softirq_action *unused) |
64db4cfff "Tree RCU": scala... |
1621 |
{ |
300df91ca rcu: Event-trace ... |
1622 |
trace_rcu_utilization("Start RCU core"); |
d6714c22b rcu: Renamings to... |
1623 1624 |
__rcu_process_callbacks(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); |
64db4cfff "Tree RCU": scala... |
1625 |
__rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); |
f41d911f8 rcu: Merge preemp... |
1626 |
rcu_preempt_process_callbacks(); |
300df91ca rcu: Event-trace ... |
1627 |
trace_rcu_utilization("End RCU core"); |
64db4cfff "Tree RCU": scala... |
1628 |
} |
a26ac2455 rcu: move TREE_RC... |
1629 |
/* |
e0f23060a rcu: Update comme... |
1630 1631 1632 1633 1634 |
* Schedule RCU callback invocation. If the specified type of RCU * does not support RCU priority boosting, just do a direct call, * otherwise wake up the per-CPU kernel kthread. Note that because we * are running on the current CPU with interrupts disabled, the * rcu_cpu_kthread_task cannot disappear out from under us. |
a26ac2455 rcu: move TREE_RC... |
1635 |
*/ |
a46e0899e rcu: use softirq ... |
1636 |
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) |
a26ac2455 rcu: move TREE_RC... |
1637 |
{ |
b0d304172 rcu: Prevent RCU ... |
1638 1639 |
if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active))) return; |
a46e0899e rcu: use softirq ... |
1640 1641 |
if (likely(!rsp->boost)) { rcu_do_batch(rsp, rdp); |
a26ac2455 rcu: move TREE_RC... |
1642 1643 |
return; } |
a46e0899e rcu: use softirq ... |
1644 |
invoke_rcu_callbacks_kthread(); |
a26ac2455 rcu: move TREE_RC... |
1645 |
} |
a46e0899e rcu: use softirq ... |
1646 |
static void invoke_rcu_core(void) |
09223371d rcu: Use softirq ... |
1647 1648 1649 |
{ raise_softirq(RCU_SOFTIRQ); } |
64db4cfff "Tree RCU": scala... |
1650 1651 1652 1653 1654 1655 |
static void __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), struct rcu_state *rsp) { unsigned long flags; struct rcu_data *rdp; |
551d55a94 tree/tiny rcu: Ad... |
1656 |
debug_rcu_head_queue(head); |
64db4cfff "Tree RCU": scala... |
1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 |
head->func = func; head->next = NULL; smp_mb(); /* Ensure RCU update seen before callback registry. */ /* * Opportunistically note grace-period endings and beginnings. * Note that we might see a beginning right after we see an * end, but never vice versa, since this CPU has to pass through * a quiescent state betweentimes. */ local_irq_save(flags); |
394f99a90 rcu: simplify the... |
1669 |
rdp = this_cpu_ptr(rsp->rda); |
64db4cfff "Tree RCU": scala... |
1670 1671 1672 1673 |
/* Add the callback to our list. */ *rdp->nxttail[RCU_NEXT_TAIL] = head; rdp->nxttail[RCU_NEXT_TAIL] = &head->next; |
2655d57ef rcu: prevent call... |
1674 |
rdp->qlen++; |
d4c08f2ac rcu: Add grace-pe... |
1675 1676 1677 1678 1679 |
if (__is_kfree_rcu_offset((unsigned long)func)) trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func, rdp->qlen); else trace_rcu_callback(rsp->name, head, rdp->qlen); |
2655d57ef rcu: prevent call... |
1680 1681 1682 1683 1684 |
/* If interrupts were disabled, don't dive into RCU core. */ if (irqs_disabled_flags(flags)) { local_irq_restore(flags); return; } |
64db4cfff "Tree RCU": scala... |
1685 |
|
37c72e56f rcu: Prevent RCU ... |
1686 1687 1688 1689 1690 1691 1692 |
/* * Force the grace period if too many callbacks or too long waiting. * Enforce hysteresis, and don't invoke force_quiescent_state() * if some other CPU has recently done so. Also, don't bother * invoking force_quiescent_state() if the newly enqueued callback * is the only one waiting for a grace period to complete. */ |
2655d57ef rcu: prevent call... |
1693 |
if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { |
b52573d27 rcu: reduce __cal... |
1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 |
/* Are we ignoring a completed grace period? */ rcu_process_gp_end(rsp, rdp); check_for_new_grace_period(rsp, rdp); /* Start a new grace period if one not already started. */ if (!rcu_gp_in_progress(rsp)) { unsigned long nestflag; struct rcu_node *rnp_root = rcu_get_root(rsp); raw_spin_lock_irqsave(&rnp_root->lock, nestflag); rcu_start_gp(rsp, nestflag); /* rlses rnp_root->lock */ } else { /* Give the grace period a kick. */ rdp->blimit = LONG_MAX; if (rsp->n_force_qs == rdp->n_force_qs_snap && *rdp->nxttail[RCU_DONE_TAIL] != head) force_quiescent_state(rsp, 0); rdp->n_force_qs_snap = rsp->n_force_qs; rdp->qlen_last_fqs_check = rdp->qlen; } |
20133cfce rcu: Stop overflo... |
1715 |
} else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) |
64db4cfff "Tree RCU": scala... |
1716 1717 1718 1719 1720 |
force_quiescent_state(rsp, 1); local_irq_restore(flags); } /* |
d6714c22b rcu: Renamings to... |
1721 |
* Queue an RCU-sched callback for invocation after a grace period. |
64db4cfff "Tree RCU": scala... |
1722 |
*/ |
d6714c22b rcu: Renamings to... |
1723 |
void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
64db4cfff "Tree RCU": scala... |
1724 |
{ |
d6714c22b rcu: Renamings to... |
1725 |
__call_rcu(head, func, &rcu_sched_state); |
64db4cfff "Tree RCU": scala... |
1726 |
} |
d6714c22b rcu: Renamings to... |
1727 |
EXPORT_SYMBOL_GPL(call_rcu_sched); |
64db4cfff "Tree RCU": scala... |
1728 1729 1730 1731 1732 1733 1734 1735 1736 |
/* * Queue an RCU for invocation after a quicker grace period. */ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) { __call_rcu(head, func, &rcu_bh_state); } EXPORT_SYMBOL_GPL(call_rcu_bh); |
6ebb237be rcu: Re-arrange c... |
1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 |
/** * synchronize_sched - wait until an rcu-sched grace period has elapsed. * * Control will return to the caller some time after a full rcu-sched * grace period has elapsed, in other words after all currently executing * rcu-sched read-side critical sections have completed. These read-side * critical sections are delimited by rcu_read_lock_sched() and * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(), * local_irq_disable(), and so on may be used in place of * rcu_read_lock_sched(). * * This means that all preempt_disable code sequences, including NMI and * hardware-interrupt handlers, in progress on entry will have completed * before this primitive returns. However, this does not guarantee that * softirq handlers will have completed, since in some kernels, these * handlers can run in process context, and can block. * * This primitive provides the guarantees made by the (now removed) * synchronize_kernel() API. In contrast, synchronize_rcu() only * guarantees that rcu_read_lock() sections will have completed. * In "classic RCU", these two guarantees happen to be one and * the same, but can differ in realtime RCU implementations. */ void synchronize_sched(void) { |
6ebb237be rcu: Re-arrange c... |
1762 1763 |
if (rcu_blocking_is_gp()) return; |
2c42818e9 rcu: Abstract com... |
1764 |
wait_rcu_gp(call_rcu_sched); |
6ebb237be rcu: Re-arrange c... |
1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 |
} EXPORT_SYMBOL_GPL(synchronize_sched); /** * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. * * Control will return to the caller some time after a full rcu_bh grace * period has elapsed, in other words after all currently executing rcu_bh * read-side critical sections have completed. RCU read-side critical * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), * and may be nested. */ void synchronize_rcu_bh(void) { |
6ebb237be rcu: Re-arrange c... |
1779 1780 |
if (rcu_blocking_is_gp()) return; |
2c42818e9 rcu: Abstract com... |
1781 |
wait_rcu_gp(call_rcu_bh); |
6ebb237be rcu: Re-arrange c... |
1782 1783 |
} EXPORT_SYMBOL_GPL(synchronize_rcu_bh); |
64db4cfff "Tree RCU": scala... |
1784 1785 1786 1787 1788 1789 1790 1791 1792 |
/* * Check to see if there is any immediate RCU-related work to be done * by the current CPU, for the specified type of RCU, returning 1 if so. * The checks are in order of increasing expense: checks that can be * carried out against CPU-local state are performed first. However, * we must check for CPU stalls first, else we might not get a chance. */ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) { |
2f51f9884 rcu: Eliminate __... |
1793 |
struct rcu_node *rnp = rdp->mynode; |
64db4cfff "Tree RCU": scala... |
1794 1795 1796 1797 1798 1799 |
rdp->n_rcu_pending++; /* Check for CPU stalls, if enabled. */ check_cpu_stall(rsp, rdp); /* Is the RCU core waiting for a quiescent state from this CPU? */ |
5c51dd734 rcu: Prevent earl... |
1800 1801 |
if (rcu_scheduler_fully_active && rdp->qs_pending && !rdp->passed_quiesce) { |
d25eb9442 rcu: substitute s... |
1802 1803 1804 1805 1806 1807 |
/* * If force_quiescent_state() coming soon and this CPU * needs a quiescent state, and this is either RCU-sched * or RCU-bh, force a local reschedule. */ |
d21670aca rcu: reduce the n... |
1808 |
rdp->n_rp_qs_pending++; |
6cc68793e rcu: fix spelling |
1809 |
if (!rdp->preemptible && |
d25eb9442 rcu: substitute s... |
1810 1811 1812 |
ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1, jiffies)) set_need_resched(); |
e4cc1f22b rcu: Simplify qui... |
1813 |
} else if (rdp->qs_pending && rdp->passed_quiesce) { |
d21670aca rcu: reduce the n... |
1814 |
rdp->n_rp_report_qs++; |
64db4cfff "Tree RCU": scala... |
1815 |
return 1; |
7ba5c840e rcu: Add __rcu_pe... |
1816 |
} |
64db4cfff "Tree RCU": scala... |
1817 1818 |
/* Does this CPU have callbacks ready to invoke? */ |
7ba5c840e rcu: Add __rcu_pe... |
1819 1820 |
if (cpu_has_callbacks_ready_to_invoke(rdp)) { rdp->n_rp_cb_ready++; |
64db4cfff "Tree RCU": scala... |
1821 |
return 1; |
7ba5c840e rcu: Add __rcu_pe... |
1822 |
} |
64db4cfff "Tree RCU": scala... |
1823 1824 |
/* Has RCU gone idle with this CPU needing another grace period? */ |
7ba5c840e rcu: Add __rcu_pe... |
1825 1826 |
if (cpu_needs_another_gp(rsp, rdp)) { rdp->n_rp_cpu_needs_gp++; |
64db4cfff "Tree RCU": scala... |
1827 |
return 1; |
7ba5c840e rcu: Add __rcu_pe... |
1828 |
} |
64db4cfff "Tree RCU": scala... |
1829 1830 |
/* Has another RCU grace period completed? */ |
2f51f9884 rcu: Eliminate __... |
1831 |
if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */ |
7ba5c840e rcu: Add __rcu_pe... |
1832 |
rdp->n_rp_gp_completed++; |
64db4cfff "Tree RCU": scala... |
1833 |
return 1; |
7ba5c840e rcu: Add __rcu_pe... |
1834 |
} |
64db4cfff "Tree RCU": scala... |
1835 1836 |
/* Has a new RCU grace period started? */ |
2f51f9884 rcu: Eliminate __... |
1837 |
if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */ |
7ba5c840e rcu: Add __rcu_pe... |
1838 |
rdp->n_rp_gp_started++; |
64db4cfff "Tree RCU": scala... |
1839 |
return 1; |
7ba5c840e rcu: Add __rcu_pe... |
1840 |
} |
64db4cfff "Tree RCU": scala... |
1841 1842 |
/* Has an RCU GP gone long enough to send resched IPIs &c? */ |
fc2219d49 rcu: Clean up cod... |
1843 |
if (rcu_gp_in_progress(rsp) && |
20133cfce rcu: Stop overflo... |
1844 |
ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) { |
7ba5c840e rcu: Add __rcu_pe... |
1845 |
rdp->n_rp_need_fqs++; |
64db4cfff "Tree RCU": scala... |
1846 |
return 1; |
7ba5c840e rcu: Add __rcu_pe... |
1847 |
} |
64db4cfff "Tree RCU": scala... |
1848 1849 |
/* nothing to do */ |
7ba5c840e rcu: Add __rcu_pe... |
1850 |
rdp->n_rp_need_nothing++; |
64db4cfff "Tree RCU": scala... |
1851 1852 1853 1854 1855 1856 1857 1858 |
return 0; } /* * Check to see if there is any immediate RCU-related work to be done * by the current CPU, returning 1 if so. This function is part of the * RCU implementation; it is -not- an exported member of the RCU API. */ |
a157229ca rcu: Simplify rcu... |
1859 |
static int rcu_pending(int cpu) |
64db4cfff "Tree RCU": scala... |
1860 |
{ |
d6714c22b rcu: Renamings to... |
1861 |
return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) || |
f41d911f8 rcu: Merge preemp... |
1862 1863 |
__rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) || rcu_preempt_pending(cpu); |
64db4cfff "Tree RCU": scala... |
1864 1865 1866 1867 1868 |
} /* * Check to see if any future RCU-related work will need to be done * by the current CPU, even if none need be done immediately, returning |
8bd93a2c5 rcu: Accelerate g... |
1869 |
* 1 if so. |
64db4cfff "Tree RCU": scala... |
1870 |
*/ |
aea1b35e2 rcu: Allow dyntic... |
1871 |
static int rcu_cpu_has_callbacks(int cpu) |
64db4cfff "Tree RCU": scala... |
1872 1873 |
{ /* RCU callbacks either ready or pending? */ |
d6714c22b rcu: Renamings to... |
1874 |
return per_cpu(rcu_sched_data, cpu).nxtlist || |
f41d911f8 rcu: Merge preemp... |
1875 1876 |
per_cpu(rcu_bh_data, cpu).nxtlist || rcu_preempt_needs_cpu(cpu); |
64db4cfff "Tree RCU": scala... |
1877 |
} |
d0ec774cb rcu: Move rcu_bar... |
1878 1879 1880 1881 |
static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; static atomic_t rcu_barrier_cpu_count; static DEFINE_MUTEX(rcu_barrier_mutex); static struct completion rcu_barrier_completion; |
d0ec774cb rcu: Move rcu_bar... |
1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 |
static void rcu_barrier_callback(struct rcu_head *notused) { if (atomic_dec_and_test(&rcu_barrier_cpu_count)) complete(&rcu_barrier_completion); } /* * Called with preemption disabled, and from cross-cpu IRQ context. */ static void rcu_barrier_func(void *type) { int cpu = smp_processor_id(); struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); void (*call_rcu_func)(struct rcu_head *head, void (*func)(struct rcu_head *head)); atomic_inc(&rcu_barrier_cpu_count); call_rcu_func = type; call_rcu_func(head, rcu_barrier_callback); } |
d0ec774cb rcu: Move rcu_bar... |
1903 1904 1905 1906 |
/* * Orchestrate the specified type of RCU barrier, waiting for all * RCU callbacks of the specified type to complete. */ |
e74f4c456 rcu: Make hot-unp... |
1907 1908 |
static void _rcu_barrier(struct rcu_state *rsp, void (*call_rcu_func)(struct rcu_head *head, |
d0ec774cb rcu: Move rcu_bar... |
1909 1910 1911 |
void (*func)(struct rcu_head *head))) { BUG_ON(in_interrupt()); |
e74f4c456 rcu: Make hot-unp... |
1912 |
/* Take mutex to serialize concurrent rcu_barrier() requests. */ |
d0ec774cb rcu: Move rcu_bar... |
1913 1914 1915 1916 1917 1918 1919 1920 1921 |
mutex_lock(&rcu_barrier_mutex); init_completion(&rcu_barrier_completion); /* * Initialize rcu_barrier_cpu_count to 1, then invoke * rcu_barrier_func() on each CPU, so that each CPU also has * incremented rcu_barrier_cpu_count. Only then is it safe to * decrement rcu_barrier_cpu_count -- otherwise the first CPU * might complete its grace period before all of the other CPUs * did their increment, causing this function to return too |
2d999e03b rcu: update docum... |
1922 1923 1924 |
* early. Note that on_each_cpu() disables irqs, which prevents * any CPUs from coming online or going offline until each online * CPU has queued its RCU-barrier callback. |
d0ec774cb rcu: Move rcu_bar... |
1925 1926 1927 1928 1929 1930 1931 |
*/ atomic_set(&rcu_barrier_cpu_count, 1); on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1); if (atomic_dec_and_test(&rcu_barrier_cpu_count)) complete(&rcu_barrier_completion); wait_for_completion(&rcu_barrier_completion); mutex_unlock(&rcu_barrier_mutex); |
d0ec774cb rcu: Move rcu_bar... |
1932 |
} |
d0ec774cb rcu: Move rcu_bar... |
1933 1934 1935 1936 1937 1938 |
/** * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. */ void rcu_barrier_bh(void) { |
e74f4c456 rcu: Make hot-unp... |
1939 |
_rcu_barrier(&rcu_bh_state, call_rcu_bh); |
d0ec774cb rcu: Move rcu_bar... |
1940 1941 1942 1943 1944 1945 1946 1947 |
} EXPORT_SYMBOL_GPL(rcu_barrier_bh); /** * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. */ void rcu_barrier_sched(void) { |
e74f4c456 rcu: Make hot-unp... |
1948 |
_rcu_barrier(&rcu_sched_state, call_rcu_sched); |
d0ec774cb rcu: Move rcu_bar... |
1949 1950 |
} EXPORT_SYMBOL_GPL(rcu_barrier_sched); |
64db4cfff "Tree RCU": scala... |
1951 |
/* |
27569620c rcu: Split hierar... |
1952 |
* Do boot-time initialization of a CPU's per-CPU RCU data. |
64db4cfff "Tree RCU": scala... |
1953 |
*/ |
27569620c rcu: Split hierar... |
1954 1955 |
static void __init rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) |
64db4cfff "Tree RCU": scala... |
1956 1957 1958 |
{ unsigned long flags; int i; |
394f99a90 rcu: simplify the... |
1959 |
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
27569620c rcu: Split hierar... |
1960 1961 1962 |
struct rcu_node *rnp = rcu_get_root(rsp); /* Set up local state, ensuring consistent view of global state. */ |
1304afb22 rcu: Convert to r... |
1963 |
raw_spin_lock_irqsave(&rnp->lock, flags); |
27569620c rcu: Split hierar... |
1964 1965 1966 1967 1968 |
rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); rdp->nxtlist = NULL; for (i = 0; i < RCU_NEXT_SIZE; i++) rdp->nxttail[i] = &rdp->nxtlist; rdp->qlen = 0; |
27569620c rcu: Split hierar... |
1969 |
rdp->dynticks = &per_cpu(rcu_dynticks, cpu); |
4145fa7fb rcu: Deconfuse dy... |
1970 |
WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING); |
9b2e4f188 rcu: Track idlene... |
1971 |
WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); |
27569620c rcu: Split hierar... |
1972 |
rdp->cpu = cpu; |
d4c08f2ac rcu: Add grace-pe... |
1973 |
rdp->rsp = rsp; |
1304afb22 rcu: Convert to r... |
1974 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
27569620c rcu: Split hierar... |
1975 1976 1977 1978 1979 1980 1981 |
} /* * Initialize a CPU's per-CPU RCU data. Note that only one online or * offline event can be happening at a given time. Note also that we * can accept some slop in the rsp->completed access due to the fact * that this CPU cannot possibly have any RCU callbacks in flight yet. |
64db4cfff "Tree RCU": scala... |
1982 |
*/ |
e4fa4c970 rcu: add __cpuini... |
1983 |
static void __cpuinit |
6cc68793e rcu: fix spelling |
1984 |
rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) |
64db4cfff "Tree RCU": scala... |
1985 1986 |
{ unsigned long flags; |
64db4cfff "Tree RCU": scala... |
1987 |
unsigned long mask; |
394f99a90 rcu: simplify the... |
1988 |
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
64db4cfff "Tree RCU": scala... |
1989 1990 1991 |
struct rcu_node *rnp = rcu_get_root(rsp); /* Set up local state, ensuring consistent view of global state. */ |
1304afb22 rcu: Convert to r... |
1992 |
raw_spin_lock_irqsave(&rnp->lock, flags); |
64db4cfff "Tree RCU": scala... |
1993 |
rdp->beenonline = 1; /* We have now been online. */ |
6cc68793e rcu: fix spelling |
1994 |
rdp->preemptible = preemptible; |
37c72e56f rcu: Prevent RCU ... |
1995 1996 |
rdp->qlen_last_fqs_check = 0; rdp->n_force_qs_snap = rsp->n_force_qs; |
64db4cfff "Tree RCU": scala... |
1997 |
rdp->blimit = blimit; |
c92b131bd rcu: Remove dynti... |
1998 1999 2000 |
rdp->dynticks->dynticks_nesting = DYNTICK_TASK_NESTING; atomic_set(&rdp->dynticks->dynticks, (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1); |
7cb924990 rcu: Permit dynti... |
2001 |
rcu_prepare_for_idle_init(cpu); |
1304afb22 rcu: Convert to r... |
2002 |
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
64db4cfff "Tree RCU": scala... |
2003 2004 2005 2006 2007 2008 2009 |
/* * A new grace period might start here. If so, we won't be part * of it, but that is OK, as we are currently in a quiescent state. */ /* Exclude any attempts to start a new GP on large systems. */ |
1304afb22 rcu: Convert to r... |
2010 |
raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */ |
64db4cfff "Tree RCU": scala... |
2011 2012 2013 2014 2015 2016 |
/* Add CPU to rcu_node bitmasks. */ rnp = rdp->mynode; mask = rdp->grpmask; do { /* Exclude any attempts to start a new GP on small systems. */ |
1304afb22 rcu: Convert to r... |
2017 |
raw_spin_lock(&rnp->lock); /* irqs already disabled. */ |
64db4cfff "Tree RCU": scala... |
2018 2019 |
rnp->qsmaskinit |= mask; mask = rnp->grpmask; |
d09b62dfa rcu: Fix synchron... |
2020 |
if (rnp == rdp->mynode) { |
06ae115a1 rcu: Avoid having... |
2021 2022 2023 2024 2025 2026 |
/* * If there is a grace period in progress, we will * set up to wait for it next time we run the * RCU core code. */ rdp->gpnum = rnp->completed; |
d09b62dfa rcu: Fix synchron... |
2027 |
rdp->completed = rnp->completed; |
06ae115a1 rcu: Avoid having... |
2028 2029 |
rdp->passed_quiesce = 0; rdp->qs_pending = 0; |
e4cc1f22b rcu: Simplify qui... |
2030 |
rdp->passed_quiesce_gpnum = rnp->gpnum - 1; |
d4c08f2ac rcu: Add grace-pe... |
2031 |
trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuonl"); |
d09b62dfa rcu: Fix synchron... |
2032 |
} |
1304afb22 rcu: Convert to r... |
2033 |
raw_spin_unlock(&rnp->lock); /* irqs already disabled. */ |
64db4cfff "Tree RCU": scala... |
2034 2035 |
rnp = rnp->parent; } while (rnp != NULL && !(rnp->qsmaskinit & mask)); |
1304afb22 rcu: Convert to r... |
2036 |
raw_spin_unlock_irqrestore(&rsp->onofflock, flags); |
64db4cfff "Tree RCU": scala... |
2037 |
} |
d72bce0e6 rcu: Cure load woes |
2038 |
static void __cpuinit rcu_prepare_cpu(int cpu) |
64db4cfff "Tree RCU": scala... |
2039 |
{ |
f41d911f8 rcu: Merge preemp... |
2040 2041 2042 |
rcu_init_percpu_data(cpu, &rcu_sched_state, 0); rcu_init_percpu_data(cpu, &rcu_bh_state, 0); rcu_preempt_init_percpu_data(cpu); |
64db4cfff "Tree RCU": scala... |
2043 2044 2045 |
} /* |
f41d911f8 rcu: Merge preemp... |
2046 |
* Handle CPU online/offline notification events. |
64db4cfff "Tree RCU": scala... |
2047 |
*/ |
9f680ab41 rcu: Eliminate un... |
2048 2049 |
static int __cpuinit rcu_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) |
64db4cfff "Tree RCU": scala... |
2050 2051 |
{ long cpu = (long)hcpu; |
27f4d2805 rcu: priority boo... |
2052 |
struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); |
a26ac2455 rcu: move TREE_RC... |
2053 |
struct rcu_node *rnp = rdp->mynode; |
64db4cfff "Tree RCU": scala... |
2054 |
|
300df91ca rcu: Event-trace ... |
2055 |
trace_rcu_utilization("Start CPU hotplug"); |
64db4cfff "Tree RCU": scala... |
2056 2057 2058 |
switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: |
d72bce0e6 rcu: Cure load woes |
2059 2060 |
rcu_prepare_cpu(cpu); rcu_prepare_kthreads(cpu); |
a26ac2455 rcu: move TREE_RC... |
2061 2062 |
break; case CPU_ONLINE: |
0f962a5e7 rcu: Force per-rc... |
2063 2064 |
case CPU_DOWN_FAILED: rcu_node_kthread_setaffinity(rnp, -1); |
e3995a25f rcu: put per-CPU ... |
2065 |
rcu_cpu_kthread_setrt(cpu, 1); |
0f962a5e7 rcu: Force per-rc... |
2066 2067 2068 |
break; case CPU_DOWN_PREPARE: rcu_node_kthread_setaffinity(rnp, cpu); |
e3995a25f rcu: put per-CPU ... |
2069 |
rcu_cpu_kthread_setrt(cpu, 0); |
64db4cfff "Tree RCU": scala... |
2070 |
break; |
d0ec774cb rcu: Move rcu_bar... |
2071 2072 2073 |
case CPU_DYING: case CPU_DYING_FROZEN: /* |
2d999e03b rcu: update docum... |
2074 2075 2076 |
* The whole machine is "stopped" except this CPU, so we can * touch any data without introducing corruption. We send the * dying CPU's callbacks to an arbitrarily chosen online CPU. |
d0ec774cb rcu: Move rcu_bar... |
2077 |
*/ |
29494be71 rcu,cleanup: simp... |
2078 2079 2080 |
rcu_send_cbs_to_online(&rcu_bh_state); rcu_send_cbs_to_online(&rcu_sched_state); rcu_preempt_send_cbs_to_online(); |
7cb924990 rcu: Permit dynti... |
2081 |
rcu_cleanup_after_idle(cpu); |
d0ec774cb rcu: Move rcu_bar... |
2082 |
break; |
64db4cfff "Tree RCU": scala... |
2083 2084 2085 2086 2087 2088 2089 2090 2091 |
case CPU_DEAD: case CPU_DEAD_FROZEN: case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: rcu_offline_cpu(cpu); break; default: break; } |
300df91ca rcu: Event-trace ... |
2092 |
trace_rcu_utilization("End CPU hotplug"); |
64db4cfff "Tree RCU": scala... |
2093 2094 2095 2096 |
return NOTIFY_OK; } /* |
bbad93798 rcu: slim down rc... |
2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 |
* This function is invoked towards the end of the scheduler's initialization * process. Before this is called, the idle task might contain * RCU read-side critical sections (during which time, this idle * task is booting the system). After this function is called, the * idle tasks are prohibited from containing RCU read-side critical * sections. This function also enables RCU lockdep checking. */ void rcu_scheduler_starting(void) { WARN_ON(num_online_cpus() != 1); WARN_ON(nr_context_switches() > 0); rcu_scheduler_active = 1; } /* |
64db4cfff "Tree RCU": scala... |
2112 2113 2114 2115 2116 2117 2118 |
* Compute the per-level fanout, either using the exact fanout specified * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT. */ #ifdef CONFIG_RCU_FANOUT_EXACT static void __init rcu_init_levelspread(struct rcu_state *rsp) { int i; |
0209f6490 rcu: limit rcu_no... |
2119 |
for (i = NUM_RCU_LVLS - 1; i > 0; i--) |
64db4cfff "Tree RCU": scala... |
2120 |
rsp->levelspread[i] = CONFIG_RCU_FANOUT; |
0209f6490 rcu: limit rcu_no... |
2121 |
rsp->levelspread[0] = RCU_FANOUT_LEAF; |
64db4cfff "Tree RCU": scala... |
2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 |
} #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */ static void __init rcu_init_levelspread(struct rcu_state *rsp) { int ccur; int cprv; int i; cprv = NR_CPUS; for (i = NUM_RCU_LVLS - 1; i >= 0; i--) { ccur = rsp->levelcnt[i]; rsp->levelspread[i] = (cprv + ccur - 1) / ccur; cprv = ccur; } } #endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */ /* * Helper function for rcu_init() that initializes one rcu_state structure. */ |
394f99a90 rcu: simplify the... |
2142 2143 |
static void __init rcu_init_one(struct rcu_state *rsp, struct rcu_data __percpu *rda) |
64db4cfff "Tree RCU": scala... |
2144 |
{ |
b6407e863 rcu: Give differe... |
2145 2146 2147 2148 |
static char *buf[] = { "rcu_node_level_0", "rcu_node_level_1", "rcu_node_level_2", "rcu_node_level_3" }; /* Match MAX_RCU_LVLS */ |
64db4cfff "Tree RCU": scala... |
2149 2150 2151 2152 |
int cpustride = 1; int i; int j; struct rcu_node *rnp; |
b6407e863 rcu: Give differe... |
2153 |
BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */ |
64db4cfff "Tree RCU": scala... |
2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 |
/* Initialize the level-tracking arrays. */ for (i = 1; i < NUM_RCU_LVLS; i++) rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1]; rcu_init_levelspread(rsp); /* Initialize the elements themselves, starting from the leaves. */ for (i = NUM_RCU_LVLS - 1; i >= 0; i--) { cpustride *= rsp->levelspread[i]; rnp = rsp->level[i]; for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { |
1304afb22 rcu: Convert to r... |
2166 |
raw_spin_lock_init(&rnp->lock); |
b6407e863 rcu: Give differe... |
2167 2168 |
lockdep_set_class_and_name(&rnp->lock, &rcu_node_class[i], buf[i]); |
f41d911f8 rcu: Merge preemp... |
2169 |
rnp->gpnum = 0; |
64db4cfff "Tree RCU": scala... |
2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 |
rnp->qsmask = 0; rnp->qsmaskinit = 0; rnp->grplo = j * cpustride; rnp->grphi = (j + 1) * cpustride - 1; if (rnp->grphi >= NR_CPUS) rnp->grphi = NR_CPUS - 1; if (i == 0) { rnp->grpnum = 0; rnp->grpmask = 0; rnp->parent = NULL; } else { rnp->grpnum = j % rsp->levelspread[i - 1]; rnp->grpmask = 1UL << rnp->grpnum; rnp->parent = rsp->level[i - 1] + j / rsp->levelspread[i - 1]; } rnp->level = i; |
12f5f524c rcu: merge TREE_P... |
2187 |
INIT_LIST_HEAD(&rnp->blkd_tasks); |
64db4cfff "Tree RCU": scala... |
2188 2189 |
} } |
0c34029ab rcu: move some co... |
2190 |
|
394f99a90 rcu: simplify the... |
2191 |
rsp->rda = rda; |
0c34029ab rcu: move some co... |
2192 2193 |
rnp = rsp->level[NUM_RCU_LVLS - 1]; for_each_possible_cpu(i) { |
4a90a0681 rcu: permit disco... |
2194 |
while (i > rnp->grphi) |
0c34029ab rcu: move some co... |
2195 |
rnp++; |
394f99a90 rcu: simplify the... |
2196 |
per_cpu_ptr(rsp->rda, i)->mynode = rnp; |
0c34029ab rcu: move some co... |
2197 2198 |
rcu_boot_init_percpu_data(i, rsp); } |
64db4cfff "Tree RCU": scala... |
2199 |
} |
9f680ab41 rcu: Eliminate un... |
2200 |
void __init rcu_init(void) |
64db4cfff "Tree RCU": scala... |
2201 |
{ |
017c42613 rcu: Fix sparse w... |
2202 |
int cpu; |
9f680ab41 rcu: Eliminate un... |
2203 |
|
f41d911f8 rcu: Merge preemp... |
2204 |
rcu_bootup_announce(); |
394f99a90 rcu: simplify the... |
2205 2206 |
rcu_init_one(&rcu_sched_state, &rcu_sched_data); rcu_init_one(&rcu_bh_state, &rcu_bh_data); |
f41d911f8 rcu: Merge preemp... |
2207 |
__rcu_init_preempt(); |
09223371d rcu: Use softirq ... |
2208 |
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
9f680ab41 rcu: Eliminate un... |
2209 2210 2211 2212 2213 2214 2215 |
/* * We don't need protection against CPU-hotplug here because * this is called early in boot, before either interrupts * or the scheduler are operational. */ cpu_notifier(rcu_cpu_notify, 0); |
017c42613 rcu: Fix sparse w... |
2216 2217 |
for_each_online_cpu(cpu) rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); |
c68de2097 rcu: disable CPU ... |
2218 |
check_cpu_stall_init(); |
64db4cfff "Tree RCU": scala... |
2219 |
} |
1eba8f843 rcu: Clean up cod... |
2220 |
#include "rcutree_plugin.h" |