Blame view
kernel/rcutree.c
60.7 KB
64db4cfff "Tree RCU": scala... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 |
/* * Read-Copy Update mechanism for mutual exclusion * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright IBM Corporation, 2008 * * Authors: Dipankar Sarma <dipankar@in.ibm.com> * Manfred Spraul <manfred@colorfullife.com> * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version * * Based on the original work by Paul McKenney <paulmck@us.ibm.com> * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * * For detailed explanation of Read-Copy Update mechanism see - |
a71fca58b rcu: Fix whitespa... |
28 |
* Documentation/RCU |
64db4cfff "Tree RCU": scala... |
29 30 31 32 33 34 35 36 37 |
*/ #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/rcupdate.h> #include <linux/interrupt.h> #include <linux/sched.h> |
c1dc0b9c0 debug lockups: Im... |
38 |
#include <linux/nmi.h> |
8826f3b03 rcu: Avoid acquir... |
39 |
#include <linux/atomic.h> |
64db4cfff "Tree RCU": scala... |
40 41 42 43 44 45 46 47 48 |
#include <linux/bitops.h> #include <linux/module.h> #include <linux/completion.h> #include <linux/moduleparam.h> #include <linux/percpu.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/mutex.h> #include <linux/time.h> |
bbad93798 rcu: slim down rc... |
49 |
#include <linux/kernel_stat.h> |
a26ac2455 rcu: move TREE_RC... |
50 51 |
#include <linux/wait.h> #include <linux/kthread.h> |
268bb0ce3 sanitize <linux/p... |
52 |
#include <linux/prefetch.h> |
64db4cfff "Tree RCU": scala... |
53 |
|
9f77da9f4 rcu: Move private... |
54 |
#include "rcutree.h" |
64db4cfff "Tree RCU": scala... |
55 |
/* Data structures. */ |
b668c9cf3 rcu: Fix grace-pe... |
56 |
static struct lock_class_key rcu_node_class[NUM_RCU_LVLS]; |
88b91c7ca rcu: Simplify cre... |
57 |
|
4300aa642 rcu: improve RCU ... |
58 59 |
#define RCU_STATE_INITIALIZER(structname) { \ .level = { &structname.node[0] }, \ |
64db4cfff "Tree RCU": scala... |
60 61 62 63 |
.levelcnt = { \ NUM_RCU_LVL_0, /* root of hierarchy. */ \ NUM_RCU_LVL_1, \ NUM_RCU_LVL_2, \ |
cf244dc01 rcu: Enable fourt... |
64 65 |
NUM_RCU_LVL_3, \ NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \ |
64db4cfff "Tree RCU": scala... |
66 |
}, \ |
83f5b01ff rcu: Fix long-gra... |
67 |
.signaled = RCU_GP_IDLE, \ |
64db4cfff "Tree RCU": scala... |
68 69 |
.gpnum = -300, \ .completed = -300, \ |
4300aa642 rcu: improve RCU ... |
70 |
.onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname.onofflock), \ |
4300aa642 rcu: improve RCU ... |
71 |
.fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname.fqslock), \ |
64db4cfff "Tree RCU": scala... |
72 73 |
.n_force_qs = 0, \ .n_force_qs_ngp = 0, \ |
4300aa642 rcu: improve RCU ... |
74 |
.name = #structname, \ |
64db4cfff "Tree RCU": scala... |
75 |
} |
d6714c22b rcu: Renamings to... |
76 77 |
struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state); DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); |
64db4cfff "Tree RCU": scala... |
78 |
|
6258c4fb5 kmemtrace, rcu: f... |
79 80 |
struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
b1f77b058 kmemtrace, rcu: f... |
81 |
|
27f4d2805 rcu: priority boo... |
82 |
static struct rcu_state *rcu_state; |
bbad93798 rcu: slim down rc... |
83 84 |
int rcu_scheduler_active __read_mostly; EXPORT_SYMBOL_GPL(rcu_scheduler_active); |
a46e0899e rcu: use softirq ... |
85 |
#ifdef CONFIG_RCU_BOOST |
b1f77b058 kmemtrace, rcu: f... |
86 |
/* |
a26ac2455 rcu: move TREE_RC... |
87 88 89 90 |
* Control variables for per-CPU and per-rcu_node kthreads. These * handle all flavors of RCU. */ static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); |
d71df90ea rcu: add tracing ... |
91 |
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); |
15ba0ba86 rcu: add grace-pe... |
92 |
DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu); |
5ece5bab3 rcu: Add forward-... |
93 |
DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); |
d71df90ea rcu: add tracing ... |
94 |
DEFINE_PER_CPU(char, rcu_cpu_has_work); |
a26ac2455 rcu: move TREE_RC... |
95 |
static char rcu_kthreads_spawnable; |
a46e0899e rcu: use softirq ... |
96 |
#endif /* #ifdef CONFIG_RCU_BOOST */ |
0f962a5e7 rcu: Force per-rc... |
97 |
static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); |
a46e0899e rcu: use softirq ... |
98 99 |
static void invoke_rcu_core(void); static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); |
a26ac2455 rcu: move TREE_RC... |
100 101 102 103 |
#define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */ /* |
4a2986568 rcu: make rcutort... |
104 105 106 107 108 109 110 111 112 113 114 115 |
* Track the rcutorture test sequence number and the update version * number within a given test. The rcutorture_testseq is incremented * on every rcutorture module load and unload, so has an odd value * when a test is running. The rcutorture_vernum is set to zero * when rcutorture starts and is incremented on each rcutorture update. * These variables enable correlating rcutorture output with the * RCU tracing information. */ unsigned long rcutorture_testseq; unsigned long rcutorture_vernum; /* |
fc2219d49 rcu: Clean up cod... |
116 117 118 119 120 121 122 123 124 125 |
* Return true if an RCU grace period is in progress. The ACCESS_ONCE()s * permit this function to be invoked without holding the root rcu_node * structure's ->lock, but of course results can be subject to change. */ static int rcu_gp_in_progress(struct rcu_state *rsp) { return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum); } /* |
d6714c22b rcu: Renamings to... |
126 |
* Note a quiescent state. Because we do not need to know |
b1f77b058 kmemtrace, rcu: f... |
127 |
* how many quiescent states passed, just if there was at least |
d6714c22b rcu: Renamings to... |
128 |
* one since the start of the grace period, this just sets a flag. |
b1f77b058 kmemtrace, rcu: f... |
129 |
*/ |
d6714c22b rcu: Renamings to... |
130 |
void rcu_sched_qs(int cpu) |
b1f77b058 kmemtrace, rcu: f... |
131 |
{ |
25502a6c1 rcu: refactor RCU... |
132 |
struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu); |
f41d911f8 rcu: Merge preemp... |
133 |
|
c64ac3ce0 rcu: Simplify ass... |
134 |
rdp->passed_quiesc_completed = rdp->gpnum - 1; |
c3422bea5 rcu: Simplify rcu... |
135 136 |
barrier(); rdp->passed_quiesc = 1; |
b1f77b058 kmemtrace, rcu: f... |
137 |
} |
d6714c22b rcu: Renamings to... |
138 |
void rcu_bh_qs(int cpu) |
b1f77b058 kmemtrace, rcu: f... |
139 |
{ |
25502a6c1 rcu: refactor RCU... |
140 |
struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); |
f41d911f8 rcu: Merge preemp... |
141 |
|
c64ac3ce0 rcu: Simplify ass... |
142 |
rdp->passed_quiesc_completed = rdp->gpnum - 1; |
c3422bea5 rcu: Simplify rcu... |
143 144 |
barrier(); rdp->passed_quiesc = 1; |
b1f77b058 kmemtrace, rcu: f... |
145 |
} |
64db4cfff "Tree RCU": scala... |
146 |
|
25502a6c1 rcu: refactor RCU... |
147 148 149 150 151 152 153 154 155 |
/* * Note a context switch. This is a quiescent state for RCU-sched, * and requires special handling for preemptible RCU. */ void rcu_note_context_switch(int cpu) { rcu_sched_qs(cpu); rcu_preempt_note_context_switch(cpu); } |
29ce83100 rcu: provide rcu_... |
156 |
EXPORT_SYMBOL_GPL(rcu_note_context_switch); |
25502a6c1 rcu: refactor RCU... |
157 |
|
64db4cfff "Tree RCU": scala... |
158 |
#ifdef CONFIG_NO_HZ |
90a4d2c01 rcu: make treercu... |
159 160 |
DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { .dynticks_nesting = 1, |
23b5c8fa0 rcu: Decrease mem... |
161 |
.dynticks = ATOMIC_INIT(1), |
90a4d2c01 rcu: make treercu... |
162 |
}; |
64db4cfff "Tree RCU": scala... |
163 164 165 166 167 |
#endif /* #ifdef CONFIG_NO_HZ */ static int blimit = 10; /* Maximum callbacks per softirq. */ static int qhimark = 10000; /* If this many pending, ignore blimit. */ static int qlowmark = 100; /* Once only this many pending, use blimit. */ |
3d76c0829 rcu: Clean up cod... |
168 169 170 |
module_param(blimit, int, 0); module_param(qhimark, int, 0); module_param(qlowmark, int, 0); |
a00e0d714 rcu: Remove condi... |
171 |
int rcu_cpu_stall_suppress __read_mostly; |
f2e0dd709 rcu: allow RCU CP... |
172 |
module_param(rcu_cpu_stall_suppress, int, 0644); |
742734eea rcu: add boot par... |
173 |
|
64db4cfff "Tree RCU": scala... |
174 |
static void force_quiescent_state(struct rcu_state *rsp, int relaxed); |
a157229ca rcu: Simplify rcu... |
175 |
static int rcu_pending(int cpu); |
64db4cfff "Tree RCU": scala... |
176 177 |
/* |
d6714c22b rcu: Renamings to... |
178 |
* Return the number of RCU-sched batches processed thus far for debug & stats. |
64db4cfff "Tree RCU": scala... |
179 |
*/ |
d6714c22b rcu: Renamings to... |
180 |
long rcu_batches_completed_sched(void) |
64db4cfff "Tree RCU": scala... |
181 |
{ |
d6714c22b rcu: Renamings to... |
182 |
return rcu_sched_state.completed; |
64db4cfff "Tree RCU": scala... |
183 |
} |
d6714c22b rcu: Renamings to... |
184 |
EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); |
64db4cfff "Tree RCU": scala... |
185 186 187 188 189 190 191 192 193 194 195 |
/* * Return the number of RCU BH batches processed thus far for debug & stats. */ long rcu_batches_completed_bh(void) { return rcu_bh_state.completed; } EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); /* |
bf66f18e7 rcu: Add force_qu... |
196 197 198 199 200 201 202 203 204 |
* Force a quiescent state for RCU BH. */ void rcu_bh_force_quiescent_state(void) { force_quiescent_state(&rcu_bh_state, 0); } EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); /* |
4a2986568 rcu: make rcutort... |
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 |
* Record the number of times rcutorture tests have been initiated and * terminated. This information allows the debugfs tracing stats to be * correlated to the rcutorture messages, even when the rcutorture module * is being repeatedly loaded and unloaded. In other words, we cannot * store this state in rcutorture itself. */ void rcutorture_record_test_transition(void) { rcutorture_testseq++; rcutorture_vernum = 0; } EXPORT_SYMBOL_GPL(rcutorture_record_test_transition); /* * Record the number of writer passes through the current rcutorture test. * This is also used to correlate debugfs tracing stats with the rcutorture * messages. */ void rcutorture_record_progress(unsigned long vernum) { rcutorture_vernum++; } EXPORT_SYMBOL_GPL(rcutorture_record_progress); /* |
bf66f18e7 rcu: Add force_qu... |
230 231 232 233 234 235 236 237 238 |
* Force a quiescent state for RCU-sched. */ void rcu_sched_force_quiescent_state(void) { force_quiescent_state(&rcu_sched_state, 0); } EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state); /* |
64db4cfff "Tree RCU": scala... |
239 240 241 242 243 244 245 246 247 248 249 250 251 252 |
* Does the CPU have callbacks ready to be invoked? */ static int cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp) { return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]; } /* * Does the current CPU require a yet-as-unscheduled grace period? */ static int cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) { |
fc2219d49 rcu: Clean up cod... |
253 |
return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp); |
64db4cfff "Tree RCU": scala... |
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 |
} /* * Return the root node of the specified rcu_state structure. */ static struct rcu_node *rcu_get_root(struct rcu_state *rsp) { return &rsp->node[0]; } #ifdef CONFIG_SMP /* * If the specified CPU is offline, tell the caller that it is in * a quiescent state. Otherwise, whack it with a reschedule IPI. * Grace periods can end up waiting on an offline CPU when that * CPU is in the process of coming online -- it will be added to the * rcu_node bitmasks before it actually makes it online. The same thing * can happen while a CPU is in the process of coming online. Because this * race is quite rare, we check for it after detecting that the grace * period has been delayed rather than checking each and every CPU * each and every time we start a new grace period. */ static int rcu_implicit_offline_qs(struct rcu_data *rdp) { /* * If the CPU is offline, it is in a quiescent state. We can * trust its state not to change because interrupts are disabled. */ if (cpu_is_offline(rdp->cpu)) { rdp->offline_fqs++; return 1; } |
6cc68793e rcu: fix spelling |
287 288 |
/* If preemptible RCU, no point in sending reschedule IPI. */ if (rdp->preemptible) |
f41d911f8 rcu: Merge preemp... |
289 |
return 0; |
64db4cfff "Tree RCU": scala... |
290 291 292 293 294 295 296 297 298 299 300 301 |
/* The CPU is online, so send it a reschedule IPI. */ if (rdp->cpu != smp_processor_id()) smp_send_reschedule(rdp->cpu); else set_need_resched(); rdp->resched_ipi++; return 0; } #endif /* #ifdef CONFIG_SMP */ #ifdef CONFIG_NO_HZ |
64db4cfff "Tree RCU": scala... |
302 303 304 305 306 307 308 309 310 311 312 313 314 |
/** * rcu_enter_nohz - inform RCU that current CPU is entering nohz * * Enter nohz mode, in other words, -leave- the mode in which RCU * read-side critical sections can occur. (Though RCU read-side * critical sections can occur in irq handlers in nohz mode, a possibility * handled by rcu_irq_enter() and rcu_irq_exit()). */ void rcu_enter_nohz(void) { unsigned long flags; struct rcu_dynticks *rdtp; |
64db4cfff "Tree RCU": scala... |
315 316 |
local_irq_save(flags); rdtp = &__get_cpu_var(rcu_dynticks); |
23b5c8fa0 rcu: Decrease mem... |
317 318 319 320 321 322 323 324 325 |
if (--rdtp->dynticks_nesting) { local_irq_restore(flags); return; } /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ smp_mb__before_atomic_inc(); /* See above. */ atomic_inc(&rdtp->dynticks); smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */ WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); |
64db4cfff "Tree RCU": scala... |
326 |
local_irq_restore(flags); |
23b5c8fa0 rcu: Decrease mem... |
327 328 329 330 331 332 333 |
/* If the interrupt queued a callback, get out of dyntick mode. */ if (in_irq() && (__get_cpu_var(rcu_sched_data).nxtlist || __get_cpu_var(rcu_bh_data).nxtlist || rcu_preempt_needs_cpu(smp_processor_id()))) set_need_resched(); |
64db4cfff "Tree RCU": scala... |
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 |
} /* * rcu_exit_nohz - inform RCU that current CPU is leaving nohz * * Exit nohz mode, in other words, -enter- the mode in which RCU * read-side critical sections normally occur. */ void rcu_exit_nohz(void) { unsigned long flags; struct rcu_dynticks *rdtp; local_irq_save(flags); rdtp = &__get_cpu_var(rcu_dynticks); |
23b5c8fa0 rcu: Decrease mem... |
349 350 351 352 353 354 355 356 357 |
if (rdtp->dynticks_nesting++) { local_irq_restore(flags); return; } smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */ atomic_inc(&rdtp->dynticks); /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ smp_mb__after_atomic_inc(); /* See above. */ WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); |
64db4cfff "Tree RCU": scala... |
358 |
local_irq_restore(flags); |
64db4cfff "Tree RCU": scala... |
359 360 361 362 363 364 365 366 367 368 369 370 |
} /** * rcu_nmi_enter - inform RCU of entry to NMI context * * If the CPU was idle with dynamic ticks active, and there is no * irq handler running, this updates rdtp->dynticks_nmi to let the * RCU grace-period handling know that the CPU is active. */ void rcu_nmi_enter(void) { struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); |
23b5c8fa0 rcu: Decrease mem... |
371 372 |
if (rdtp->dynticks_nmi_nesting == 0 && (atomic_read(&rdtp->dynticks) & 0x1)) |
64db4cfff "Tree RCU": scala... |
373 |
return; |
23b5c8fa0 rcu: Decrease mem... |
374 375 376 377 378 379 |
rdtp->dynticks_nmi_nesting++; smp_mb__before_atomic_inc(); /* Force delay from prior write. */ atomic_inc(&rdtp->dynticks); /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ smp_mb__after_atomic_inc(); /* See above. */ WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); |
64db4cfff "Tree RCU": scala... |
380 381 382 383 384 385 386 387 388 389 390 391 |
} /** * rcu_nmi_exit - inform RCU of exit from NMI context * * If the CPU was idle with dynamic ticks active, and there is no * irq handler running, this updates rdtp->dynticks_nmi to let the * RCU grace-period handling know that the CPU is no longer active. */ void rcu_nmi_exit(void) { struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); |
23b5c8fa0 rcu: Decrease mem... |
392 393 |
if (rdtp->dynticks_nmi_nesting == 0 || --rdtp->dynticks_nmi_nesting != 0) |
64db4cfff "Tree RCU": scala... |
394 |
return; |
23b5c8fa0 rcu: Decrease mem... |
395 396 397 398 399 |
/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ smp_mb__before_atomic_inc(); /* See above. */ atomic_inc(&rdtp->dynticks); smp_mb__after_atomic_inc(); /* Force delay to next write. */ WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); |
64db4cfff "Tree RCU": scala... |
400 401 402 403 404 405 406 407 408 409 |
} /** * rcu_irq_enter - inform RCU of entry to hard irq context * * If the CPU was idle with dynamic ticks active, this updates the * rdtp->dynticks to let the RCU handling know that the CPU is active. */ void rcu_irq_enter(void) { |
23b5c8fa0 rcu: Decrease mem... |
410 |
rcu_exit_nohz(); |
64db4cfff "Tree RCU": scala... |
411 412 413 414 415 416 417 418 419 420 421 |
} /** * rcu_irq_exit - inform RCU of exit from hard irq context * * If the CPU was idle with dynamic ticks active, update the rdp->dynticks * to put let the RCU handling be aware that the CPU is going back to idle * with no ticks. */ void rcu_irq_exit(void) { |
23b5c8fa0 rcu: Decrease mem... |
422 |
rcu_enter_nohz(); |
64db4cfff "Tree RCU": scala... |
423 |
} |
64db4cfff "Tree RCU": scala... |
424 425 426 |
#ifdef CONFIG_SMP /* |
64db4cfff "Tree RCU": scala... |
427 428 |
* Snapshot the specified CPU's dynticks counter so that we can later * credit them with an implicit quiescent state. Return 1 if this CPU |
1eba8f843 rcu: Clean up cod... |
429 |
* is in dynticks idle mode, which is an extended quiescent state. |
64db4cfff "Tree RCU": scala... |
430 431 432 |
*/ static int dyntick_save_progress_counter(struct rcu_data *rdp) { |
23b5c8fa0 rcu: Decrease mem... |
433 434 |
rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks); return 0; |
64db4cfff "Tree RCU": scala... |
435 436 437 438 439 440 441 442 443 444 |
} /* * Return true if the specified CPU has passed through a quiescent * state by virtue of being in or having passed through an dynticks * idle state since the last call to dyntick_save_progress_counter() * for this same CPU. */ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) { |
23b5c8fa0 rcu: Decrease mem... |
445 446 |
unsigned long curr; unsigned long snap; |
64db4cfff "Tree RCU": scala... |
447 |
|
23b5c8fa0 rcu: Decrease mem... |
448 449 |
curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks); snap = (unsigned long)rdp->dynticks_snap; |
64db4cfff "Tree RCU": scala... |
450 451 452 453 454 455 456 457 458 |
/* * If the CPU passed through or entered a dynticks idle phase with * no active irq/NMI handlers, then we can safely pretend that the CPU * already acknowledged the request to pass through a quiescent * state. Either way, that CPU cannot possibly be in an RCU * read-side critical section that started before the beginning * of the current RCU grace period. */ |
23b5c8fa0 rcu: Decrease mem... |
459 |
if ((curr & 0x1) == 0 || ULONG_CMP_GE(curr, snap + 2)) { |
64db4cfff "Tree RCU": scala... |
460 461 462 463 464 465 466 467 468 469 470 |
rdp->dynticks_fqs++; return 1; } /* Go check for the CPU being offline. */ return rcu_implicit_offline_qs(rdp); } #endif /* #ifdef CONFIG_SMP */ #else /* #ifdef CONFIG_NO_HZ */ |
64db4cfff "Tree RCU": scala... |
471 |
#ifdef CONFIG_SMP |
64db4cfff "Tree RCU": scala... |
472 473 474 475 476 477 478 479 480 481 482 483 484 |
static int dyntick_save_progress_counter(struct rcu_data *rdp) { return 0; } static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) { return rcu_implicit_offline_qs(rdp); } #endif /* #ifdef CONFIG_SMP */ #endif /* #else #ifdef CONFIG_NO_HZ */ |
742734eea rcu: add boot par... |
485 |
int rcu_cpu_stall_suppress __read_mostly; |
c68de2097 rcu: disable CPU ... |
486 |
|
64db4cfff "Tree RCU": scala... |
487 488 489 490 491 492 493 494 495 496 497 498 |
static void record_gp_stall_check_time(struct rcu_state *rsp) { rsp->gp_start = jiffies; rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK; } static void print_other_cpu_stall(struct rcu_state *rsp) { int cpu; long delta; unsigned long flags; struct rcu_node *rnp = rcu_get_root(rsp); |
64db4cfff "Tree RCU": scala... |
499 500 |
/* Only let one CPU complain about others per time interval. */ |
1304afb22 rcu: Convert to r... |
501 |
raw_spin_lock_irqsave(&rnp->lock, flags); |
64db4cfff "Tree RCU": scala... |
502 |
delta = jiffies - rsp->jiffies_stall; |
fc2219d49 rcu: Clean up cod... |
503 |
if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { |
1304afb22 rcu: Convert to r... |
504 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
64db4cfff "Tree RCU": scala... |
505 506 507 |
return; } rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; |
a0b6c9a78 rcu: Clean up cod... |
508 509 510 511 512 513 |
/* * Now rat on any tasks that got kicked up to the root rcu_node * due to CPU offlining. */ rcu_print_task_stall(rnp); |
1304afb22 rcu: Convert to r... |
514 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
64db4cfff "Tree RCU": scala... |
515 |
|
8cdd32a91 rcu: refer RCU CP... |
516 517 518 519 520 |
/* * OK, time to rat on our buddy... * See Documentation/RCU/stallwarn.txt for info on how to debug * RCU CPU stall warnings. */ |
4300aa642 rcu: improve RCU ... |
521 522 |
printk(KERN_ERR "INFO: %s detected stalls on CPUs/tasks: {", rsp->name); |
a0b6c9a78 rcu: Clean up cod... |
523 |
rcu_for_each_leaf_node(rsp, rnp) { |
3acd9eb31 rcu: Fix deadlock... |
524 |
raw_spin_lock_irqsave(&rnp->lock, flags); |
f41d911f8 rcu: Merge preemp... |
525 |
rcu_print_task_stall(rnp); |
3acd9eb31 rcu: Fix deadlock... |
526 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
a0b6c9a78 rcu: Clean up cod... |
527 |
if (rnp->qsmask == 0) |
64db4cfff "Tree RCU": scala... |
528 |
continue; |
a0b6c9a78 rcu: Clean up cod... |
529 530 531 |
for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) if (rnp->qsmask & (1UL << cpu)) printk(" %d", rnp->grplo + cpu); |
64db4cfff "Tree RCU": scala... |
532 |
} |
4300aa642 rcu: improve RCU ... |
533 534 |
printk("} (detected by %d, t=%ld jiffies) ", |
64db4cfff "Tree RCU": scala... |
535 |
smp_processor_id(), (long)(jiffies - rsp->gp_start)); |
c1dc0b9c0 debug lockups: Im... |
536 |
trigger_all_cpu_backtrace(); |
1ed509a22 rcu: Add RCU_CPU_... |
537 538 539 |
/* If so configured, complain about tasks blocking the grace period. */ rcu_print_detail_task_stall(rsp); |
64db4cfff "Tree RCU": scala... |
540 541 542 543 544 545 546 |
force_quiescent_state(rsp, 0); /* Kick them all. */ } static void print_cpu_stall(struct rcu_state *rsp) { unsigned long flags; struct rcu_node *rnp = rcu_get_root(rsp); |
8cdd32a91 rcu: refer RCU CP... |
547 548 549 550 551 |
/* * OK, time to rat on ourselves... * See Documentation/RCU/stallwarn.txt for info on how to debug * RCU CPU stall warnings. */ |
4300aa642 rcu: improve RCU ... |
552 553 554 |
printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies) ", rsp->name, smp_processor_id(), jiffies - rsp->gp_start); |
c1dc0b9c0 debug lockups: Im... |
555 |
trigger_all_cpu_backtrace(); |
1304afb22 rcu: Convert to r... |
556 |
raw_spin_lock_irqsave(&rnp->lock, flags); |
20133cfce rcu: Stop overflo... |
557 |
if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall)) |
64db4cfff "Tree RCU": scala... |
558 559 |
rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; |
1304afb22 rcu: Convert to r... |
560 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
c1dc0b9c0 debug lockups: Im... |
561 |
|
64db4cfff "Tree RCU": scala... |
562 563 564 565 566 |
set_need_resched(); /* kick ourselves to get things going. */ } static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) { |
bad6e1393 rcu: get rid of s... |
567 568 |
unsigned long j; unsigned long js; |
64db4cfff "Tree RCU": scala... |
569 |
struct rcu_node *rnp; |
742734eea rcu: add boot par... |
570 |
if (rcu_cpu_stall_suppress) |
c68de2097 rcu: disable CPU ... |
571 |
return; |
bad6e1393 rcu: get rid of s... |
572 573 |
j = ACCESS_ONCE(jiffies); js = ACCESS_ONCE(rsp->jiffies_stall); |
64db4cfff "Tree RCU": scala... |
574 |
rnp = rdp->mynode; |
bad6e1393 rcu: get rid of s... |
575 |
if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) { |
64db4cfff "Tree RCU": scala... |
576 577 578 |
/* We haven't checked in, so go dump stack. */ print_cpu_stall(rsp); |
bad6e1393 rcu: get rid of s... |
579 580 |
} else if (rcu_gp_in_progress(rsp) && ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) { |
64db4cfff "Tree RCU": scala... |
581 |
|
bad6e1393 rcu: get rid of s... |
582 |
/* They had a few time units to dump stack, so complain. */ |
64db4cfff "Tree RCU": scala... |
583 584 585 |
print_other_cpu_stall(rsp); } } |
c68de2097 rcu: disable CPU ... |
586 587 |
static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) { |
742734eea rcu: add boot par... |
588 |
rcu_cpu_stall_suppress = 1; |
c68de2097 rcu: disable CPU ... |
589 590 |
return NOTIFY_DONE; } |
53d84e004 rcu: permit suppr... |
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 |
/** * rcu_cpu_stall_reset - prevent further stall warnings in current grace period * * Set the stall-warning timeout way off into the future, thus preventing * any RCU CPU stall-warning messages from appearing in the current set of * RCU grace periods. * * The caller must disable hard irqs. */ void rcu_cpu_stall_reset(void) { rcu_sched_state.jiffies_stall = jiffies + ULONG_MAX / 2; rcu_bh_state.jiffies_stall = jiffies + ULONG_MAX / 2; rcu_preempt_stall_reset(); } |
c68de2097 rcu: disable CPU ... |
606 607 608 609 610 611 612 613 |
static struct notifier_block rcu_panic_block = { .notifier_call = rcu_panic, }; static void __init check_cpu_stall_init(void) { atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); } |
64db4cfff "Tree RCU": scala... |
614 615 616 |
/* * Update CPU-local rcu_data state to record the newly noticed grace period. * This is used both when we started the grace period and when we notice |
9160306e6 rcu: Fix note_new... |
617 618 619 |
* that someone else started the grace period. The caller must hold the * ->lock of the leaf rcu_node structure corresponding to the current CPU, * and must have irqs disabled. |
64db4cfff "Tree RCU": scala... |
620 |
*/ |
9160306e6 rcu: Fix note_new... |
621 622 623 |
static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) { if (rdp->gpnum != rnp->gpnum) { |
121dfc4b3 rcu: fine-tune gr... |
624 625 626 627 628 |
/* * If the current grace period is waiting for this CPU, * set up to detect a quiescent state, otherwise don't * go looking for one. */ |
9160306e6 rcu: Fix note_new... |
629 |
rdp->gpnum = rnp->gpnum; |
121dfc4b3 rcu: fine-tune gr... |
630 631 632 633 634 |
if (rnp->qsmask & rdp->grpmask) { rdp->qs_pending = 1; rdp->passed_quiesc = 0; } else rdp->qs_pending = 0; |
9160306e6 rcu: Fix note_new... |
635 636 |
} } |
64db4cfff "Tree RCU": scala... |
637 638 |
static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) { |
9160306e6 rcu: Fix note_new... |
639 640 641 642 643 644 |
unsigned long flags; struct rcu_node *rnp; local_irq_save(flags); rnp = rdp->mynode; if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */ |
1304afb22 rcu: Convert to r... |
645 |
!raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */ |
9160306e6 rcu: Fix note_new... |
646 647 648 649 |
local_irq_restore(flags); return; } __note_new_gpnum(rsp, rnp, rdp); |
1304afb22 rcu: Convert to r... |
650 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
64db4cfff "Tree RCU": scala... |
651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 |
} /* * Did someone else start a new RCU grace period start since we last * checked? Update local state appropriately if so. Must be called * on the CPU corresponding to rdp. */ static int check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp) { unsigned long flags; int ret = 0; local_irq_save(flags); if (rdp->gpnum != rsp->gpnum) { note_new_gpnum(rsp, rdp); ret = 1; } local_irq_restore(flags); return ret; } /* |
d09b62dfa rcu: Fix synchron... |
674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 |
* Advance this CPU's callbacks, but only if the current grace period * has ended. This may be called only from the CPU to whom the rdp * belongs. In addition, the corresponding leaf rcu_node structure's * ->lock must be held by the caller, with irqs disabled. */ static void __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) { /* Did another grace period end? */ if (rdp->completed != rnp->completed) { /* Advance callbacks. No harm if list empty. */ rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL]; rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL]; rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; /* Remember that we saw this grace-period completion. */ rdp->completed = rnp->completed; |
20377f32d rcu: Stop chasing... |
692 693 |
/* |
5ff8e6f05 rcu: Keep gpnum a... |
694 |
* If we were in an extended quiescent state, we may have |
121dfc4b3 rcu: fine-tune gr... |
695 |
* missed some grace periods that others CPUs handled on |
5ff8e6f05 rcu: Keep gpnum a... |
696 |
* our behalf. Catch up with this state to avoid noting |
121dfc4b3 rcu: fine-tune gr... |
697 698 699 |
* spurious new grace periods. If another grace period * has started, then rnp->gpnum will have advanced, so * we will detect this later on. |
5ff8e6f05 rcu: Keep gpnum a... |
700 |
*/ |
121dfc4b3 rcu: fine-tune gr... |
701 |
if (ULONG_CMP_LT(rdp->gpnum, rdp->completed)) |
5ff8e6f05 rcu: Keep gpnum a... |
702 703 704 |
rdp->gpnum = rdp->completed; /* |
121dfc4b3 rcu: fine-tune gr... |
705 706 |
* If RCU does not need a quiescent state from this CPU, * then make sure that this CPU doesn't go looking for one. |
20377f32d rcu: Stop chasing... |
707 |
*/ |
121dfc4b3 rcu: fine-tune gr... |
708 |
if ((rnp->qsmask & rdp->grpmask) == 0) |
20377f32d rcu: Stop chasing... |
709 |
rdp->qs_pending = 0; |
d09b62dfa rcu: Fix synchron... |
710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 |
} } /* * Advance this CPU's callbacks, but only if the current grace period * has ended. This may be called only from the CPU to whom the rdp * belongs. */ static void rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) { unsigned long flags; struct rcu_node *rnp; local_irq_save(flags); rnp = rdp->mynode; if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */ |
1304afb22 rcu: Convert to r... |
727 |
!raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */ |
d09b62dfa rcu: Fix synchron... |
728 729 730 731 |
local_irq_restore(flags); return; } __rcu_process_gp_end(rsp, rnp, rdp); |
1304afb22 rcu: Convert to r... |
732 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
d09b62dfa rcu: Fix synchron... |
733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 |
} /* * Do per-CPU grace-period initialization for running CPU. The caller * must hold the lock of the leaf rcu_node structure corresponding to * this CPU. */ static void rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) { /* Prior grace period ended, so advance callbacks for current CPU. */ __rcu_process_gp_end(rsp, rnp, rdp); /* * Because this CPU just now started the new grace period, we know * that all of its callbacks will be covered by this upcoming grace * period, even the ones that were registered arbitrarily recently. * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL. * * Other CPUs cannot be sure exactly when the grace period started. * Therefore, their recently registered callbacks must pass through * an additional RCU_NEXT_READY stage, so that they will be handled * by the next RCU grace period. */ rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; |
9160306e6 rcu: Fix note_new... |
759 760 761 |
/* Set state so that this CPU will detect the next quiescent state. */ __note_new_gpnum(rsp, rnp, rdp); |
d09b62dfa rcu: Fix synchron... |
762 763 764 |
} /* |
64db4cfff "Tree RCU": scala... |
765 766 767 768 769 770 771 772 773 |
* Start a new RCU grace period if warranted, re-initializing the hierarchy * in preparation for detecting the next grace period. The caller must hold * the root node's ->lock, which is released before return. Hard irqs must * be disabled. */ static void rcu_start_gp(struct rcu_state *rsp, unsigned long flags) __releases(rcu_get_root(rsp)->lock) { |
394f99a90 rcu: simplify the... |
774 |
struct rcu_data *rdp = this_cpu_ptr(rsp->rda); |
64db4cfff "Tree RCU": scala... |
775 |
struct rcu_node *rnp = rcu_get_root(rsp); |
64db4cfff "Tree RCU": scala... |
776 |
|
07079d535 rcu: Prohibit sta... |
777 |
if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) { |
46a1e34ed rcu: Make force_q... |
778 779 |
if (cpu_needs_another_gp(rsp, rdp)) rsp->fqs_need_gp = 1; |
b32e9eb6a rcu: Accelerate c... |
780 |
if (rnp->completed == rsp->completed) { |
1304afb22 rcu: Convert to r... |
781 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
b32e9eb6a rcu: Accelerate c... |
782 783 |
return; } |
1304afb22 rcu: Convert to r... |
784 |
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
b32e9eb6a rcu: Accelerate c... |
785 786 787 788 789 790 791 |
/* * Propagate new ->completed value to rcu_node structures * so that other CPUs don't have to wait until the start * of the next grace period to process their callbacks. */ rcu_for_each_node_breadth_first(rsp, rnp) { |
1304afb22 rcu: Convert to r... |
792 |
raw_spin_lock(&rnp->lock); /* irqs already disabled. */ |
b32e9eb6a rcu: Accelerate c... |
793 |
rnp->completed = rsp->completed; |
1304afb22 rcu: Convert to r... |
794 |
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
b32e9eb6a rcu: Accelerate c... |
795 796 |
} local_irq_restore(flags); |
64db4cfff "Tree RCU": scala... |
797 798 799 800 801 |
return; } /* Advance to a new grace period and initialize state. */ rsp->gpnum++; |
c3422bea5 rcu: Simplify rcu... |
802 |
WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT); |
64db4cfff "Tree RCU": scala... |
803 804 |
rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; |
64db4cfff "Tree RCU": scala... |
805 |
record_gp_stall_check_time(rsp); |
64db4cfff "Tree RCU": scala... |
806 |
|
64db4cfff "Tree RCU": scala... |
807 808 |
/* Special-case the common single-level case. */ if (NUM_RCU_NODES == 1) { |
b0e165c03 rcu: Add debug ch... |
809 |
rcu_preempt_check_blocked_tasks(rnp); |
28ecd5802 rcu: Add WARN_ON_... |
810 |
rnp->qsmask = rnp->qsmaskinit; |
de078d875 rcu: Need to upda... |
811 |
rnp->gpnum = rsp->gpnum; |
d09b62dfa rcu: Fix synchron... |
812 |
rnp->completed = rsp->completed; |
c12172c02 rcu: fix rcutree ... |
813 |
rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ |
d09b62dfa rcu: Fix synchron... |
814 |
rcu_start_gp_per_cpu(rsp, rnp, rdp); |
27f4d2805 rcu: priority boo... |
815 |
rcu_preempt_boost_start_gp(rnp); |
1304afb22 rcu: Convert to r... |
816 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
64db4cfff "Tree RCU": scala... |
817 818 |
return; } |
1304afb22 rcu: Convert to r... |
819 |
raw_spin_unlock(&rnp->lock); /* leave irqs disabled. */ |
64db4cfff "Tree RCU": scala... |
820 821 822 |
/* Exclude any concurrent CPU-hotplug operations. */ |
1304afb22 rcu: Convert to r... |
823 |
raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */ |
64db4cfff "Tree RCU": scala... |
824 825 |
/* |
b835db1f9 rcu: Initialize m... |
826 827 828 829 830 831 832 833 834 |
* Set the quiescent-state-needed bits in all the rcu_node * structures for all currently online CPUs in breadth-first * order, starting from the root rcu_node structure. This * operation relies on the layout of the hierarchy within the * rsp->node[] array. Note that other CPUs will access only * the leaves of the hierarchy, which still indicate that no * grace period is in progress, at least until the corresponding * leaf node has been initialized. In addition, we have excluded * CPU-hotplug operations. |
64db4cfff "Tree RCU": scala... |
835 836 837 838 |
* * Note that the grace period cannot complete until we finish * the initialization process, as there will be at least one * qsmask bit set in the root node until that time, namely the |
b835db1f9 rcu: Initialize m... |
839 840 |
* one corresponding to this CPU, due to the fact that we have * irqs disabled. |
64db4cfff "Tree RCU": scala... |
841 |
*/ |
a0b6c9a78 rcu: Clean up cod... |
842 |
rcu_for_each_node_breadth_first(rsp, rnp) { |
1304afb22 rcu: Convert to r... |
843 |
raw_spin_lock(&rnp->lock); /* irqs already disabled. */ |
b0e165c03 rcu: Add debug ch... |
844 |
rcu_preempt_check_blocked_tasks(rnp); |
49e291266 rcu: Fix thinko, ... |
845 |
rnp->qsmask = rnp->qsmaskinit; |
de078d875 rcu: Need to upda... |
846 |
rnp->gpnum = rsp->gpnum; |
d09b62dfa rcu: Fix synchron... |
847 848 849 |
rnp->completed = rsp->completed; if (rnp == rdp->mynode) rcu_start_gp_per_cpu(rsp, rnp, rdp); |
27f4d2805 rcu: priority boo... |
850 |
rcu_preempt_boost_start_gp(rnp); |
1304afb22 rcu: Convert to r... |
851 |
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
64db4cfff "Tree RCU": scala... |
852 |
} |
83f5b01ff rcu: Fix long-gra... |
853 |
rnp = rcu_get_root(rsp); |
1304afb22 rcu: Convert to r... |
854 |
raw_spin_lock(&rnp->lock); /* irqs already disabled. */ |
64db4cfff "Tree RCU": scala... |
855 |
rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ |
1304afb22 rcu: Convert to r... |
856 857 |
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ raw_spin_unlock_irqrestore(&rsp->onofflock, flags); |
64db4cfff "Tree RCU": scala... |
858 859 860 |
} /* |
d3f6bad39 rcu: Rename "quie... |
861 862 863 864 865 |
* Report a full set of quiescent states to the specified rcu_state * data structure. This involves cleaning up after the prior grace * period and letting rcu_start_gp() start up the next grace period * if one is needed. Note that the caller must hold rnp->lock, as * required by rcu_start_gp(), which will release it. |
f41d911f8 rcu: Merge preemp... |
866 |
*/ |
d3f6bad39 rcu: Rename "quie... |
867 |
static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) |
fc2219d49 rcu: Clean up cod... |
868 |
__releases(rcu_get_root(rsp)->lock) |
f41d911f8 rcu: Merge preemp... |
869 |
{ |
15ba0ba86 rcu: add grace-pe... |
870 |
unsigned long gp_duration; |
fc2219d49 rcu: Clean up cod... |
871 |
WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); |
0bbcc529f rcu: Add memory b... |
872 873 874 875 876 877 |
/* * Ensure that all grace-period and pre-grace-period activity * is seen before the assignment to rsp->completed. */ smp_mb(); /* See above block comment. */ |
15ba0ba86 rcu: add grace-pe... |
878 879 880 |
gp_duration = jiffies - rsp->gp_start; if (gp_duration > rsp->gp_max) rsp->gp_max = gp_duration; |
f41d911f8 rcu: Merge preemp... |
881 |
rsp->completed = rsp->gpnum; |
83f5b01ff rcu: Fix long-gra... |
882 |
rsp->signaled = RCU_GP_IDLE; |
f41d911f8 rcu: Merge preemp... |
883 884 885 886 |
rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ } /* |
d3f6bad39 rcu: Rename "quie... |
887 888 889 890 891 892 |
* Similar to rcu_report_qs_rdp(), for which it is a helper function. * Allows quiescent states for a group of CPUs to be reported at one go * to the specified rcu_node structure, though all the CPUs in the group * must be represented by the same rcu_node structure (which need not be * a leaf rcu_node structure, though it often will be). That structure's * lock must be held upon entry, and it is released before return. |
64db4cfff "Tree RCU": scala... |
893 894 |
*/ static void |
d3f6bad39 rcu: Rename "quie... |
895 896 |
rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, unsigned long flags) |
64db4cfff "Tree RCU": scala... |
897 898 |
__releases(rnp->lock) { |
28ecd5802 rcu: Add WARN_ON_... |
899 |
struct rcu_node *rnp_c; |
64db4cfff "Tree RCU": scala... |
900 901 902 903 904 |
/* Walk up the rcu_node hierarchy. */ for (;;) { if (!(rnp->qsmask & mask)) { /* Our bit has already been cleared, so done. */ |
1304afb22 rcu: Convert to r... |
905 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
64db4cfff "Tree RCU": scala... |
906 907 908 |
return; } rnp->qsmask &= ~mask; |
27f4d2805 rcu: priority boo... |
909 |
if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { |
64db4cfff "Tree RCU": scala... |
910 911 |
/* Other bits still set at this level, so done. */ |
1304afb22 rcu: Convert to r... |
912 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
64db4cfff "Tree RCU": scala... |
913 914 915 916 917 918 919 920 921 |
return; } mask = rnp->grpmask; if (rnp->parent == NULL) { /* No more levels. Exit loop holding root lock. */ break; } |
1304afb22 rcu: Convert to r... |
922 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
28ecd5802 rcu: Add WARN_ON_... |
923 |
rnp_c = rnp; |
64db4cfff "Tree RCU": scala... |
924 |
rnp = rnp->parent; |
1304afb22 rcu: Convert to r... |
925 |
raw_spin_lock_irqsave(&rnp->lock, flags); |
28ecd5802 rcu: Add WARN_ON_... |
926 |
WARN_ON_ONCE(rnp_c->qsmask); |
64db4cfff "Tree RCU": scala... |
927 928 929 930 |
} /* * Get here if we are the last CPU to pass through a quiescent |
d3f6bad39 rcu: Rename "quie... |
931 |
* state for this grace period. Invoke rcu_report_qs_rsp() |
f41d911f8 rcu: Merge preemp... |
932 |
* to clean up and start the next grace period if one is needed. |
64db4cfff "Tree RCU": scala... |
933 |
*/ |
d3f6bad39 rcu: Rename "quie... |
934 |
rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */ |
64db4cfff "Tree RCU": scala... |
935 936 937 |
} /* |
d3f6bad39 rcu: Rename "quie... |
938 939 940 941 942 943 944 |
* Record a quiescent state for the specified CPU to that CPU's rcu_data * structure. This must be either called from the specified CPU, or * called when the specified CPU is known to be offline (and when it is * also known that no other CPU is concurrently trying to help the offline * CPU). The lastcomp argument is used to make sure we are still in the * grace period of interest. We don't want to end the current grace period * based on quiescent states detected in an earlier grace period! |
64db4cfff "Tree RCU": scala... |
945 946 |
*/ static void |
d3f6bad39 rcu: Rename "quie... |
947 |
rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) |
64db4cfff "Tree RCU": scala... |
948 949 950 951 952 953 |
{ unsigned long flags; unsigned long mask; struct rcu_node *rnp; rnp = rdp->mynode; |
1304afb22 rcu: Convert to r... |
954 |
raw_spin_lock_irqsave(&rnp->lock, flags); |
560d4bc0d rcu: Further clea... |
955 |
if (lastcomp != rnp->completed) { |
64db4cfff "Tree RCU": scala... |
956 957 958 959 960 961 |
/* * Someone beat us to it for this grace period, so leave. * The race with GP start is resolved by the fact that we * hold the leaf rcu_node lock, so that the per-CPU bits * cannot yet be initialized -- so we would simply find our |
d3f6bad39 rcu: Rename "quie... |
962 963 |
* CPU's bit already cleared in rcu_report_qs_rnp() if this * race occurred. |
64db4cfff "Tree RCU": scala... |
964 965 |
*/ rdp->passed_quiesc = 0; /* try again later! */ |
1304afb22 rcu: Convert to r... |
966 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
64db4cfff "Tree RCU": scala... |
967 968 969 970 |
return; } mask = rdp->grpmask; if ((rnp->qsmask & mask) == 0) { |
1304afb22 rcu: Convert to r... |
971 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
64db4cfff "Tree RCU": scala... |
972 973 974 975 976 977 978 |
} else { rdp->qs_pending = 0; /* * This GP can't end until cpu checks in, so all of our * callbacks can be processed during the next GP. */ |
64db4cfff "Tree RCU": scala... |
979 |
rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; |
d3f6bad39 rcu: Rename "quie... |
980 |
rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */ |
64db4cfff "Tree RCU": scala... |
981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 |
} } /* * Check to see if there is a new grace period of which this CPU * is not yet aware, and if so, set up local rcu_data state for it. * Otherwise, see if this CPU has just passed through its first * quiescent state for this grace period, and record that fact if so. */ static void rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) { /* If there is now a new grace period, record and return. */ if (check_for_new_grace_period(rsp, rdp)) return; /* * Does this CPU still need to do its part for current grace period? * If no, return and let the other CPUs do their part as well. */ if (!rdp->qs_pending) return; /* * Was there a quiescent state since the beginning of the grace * period? If no, then exit and wait for the next call. */ if (!rdp->passed_quiesc) return; |
d3f6bad39 rcu: Rename "quie... |
1010 1011 1012 1013 1014 |
/* * Tell RCU we are done (but rcu_report_qs_rdp() will be the * judge of that). */ rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed); |
64db4cfff "Tree RCU": scala... |
1015 1016 1017 1018 1019 |
} #ifdef CONFIG_HOTPLUG_CPU /* |
29494be71 rcu,cleanup: simp... |
1020 1021 1022 |
* Move a dying CPU's RCU callbacks to online CPU's callback list. * Synchronization is not required because this function executes * in stop_machine() context. |
e74f4c456 rcu: Make hot-unp... |
1023 |
*/ |
29494be71 rcu,cleanup: simp... |
1024 |
static void rcu_send_cbs_to_online(struct rcu_state *rsp) |
e74f4c456 rcu: Make hot-unp... |
1025 1026 |
{ int i; |
29494be71 rcu,cleanup: simp... |
1027 1028 |
/* current DYING CPU is cleared in the cpu_online_mask */ int receive_cpu = cpumask_any(cpu_online_mask); |
394f99a90 rcu: simplify the... |
1029 |
struct rcu_data *rdp = this_cpu_ptr(rsp->rda); |
29494be71 rcu,cleanup: simp... |
1030 |
struct rcu_data *receive_rdp = per_cpu_ptr(rsp->rda, receive_cpu); |
e74f4c456 rcu: Make hot-unp... |
1031 1032 1033 |
if (rdp->nxtlist == NULL) return; /* irqs disabled, so comparison is stable. */ |
29494be71 rcu,cleanup: simp... |
1034 1035 1036 1037 1038 1039 |
*receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist; receive_rdp->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; receive_rdp->qlen += rdp->qlen; receive_rdp->n_cbs_adopted += rdp->qlen; rdp->n_cbs_orphaned += rdp->qlen; |
e74f4c456 rcu: Make hot-unp... |
1040 1041 1042 |
rdp->nxtlist = NULL; for (i = 0; i < RCU_NEXT_SIZE; i++) rdp->nxttail[i] = &rdp->nxtlist; |
e74f4c456 rcu: Make hot-unp... |
1043 |
rdp->qlen = 0; |
e74f4c456 rcu: Make hot-unp... |
1044 1045 1046 |
} /* |
64db4cfff "Tree RCU": scala... |
1047 1048 |
* Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy * and move all callbacks from the outgoing CPU to the current one. |
a26ac2455 rcu: move TREE_RC... |
1049 1050 |
* There can only be one CPU hotplug operation at a time, so no other * CPU can be attempting to update rcu_cpu_kthread_task. |
64db4cfff "Tree RCU": scala... |
1051 1052 1053 |
*/ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) { |
64db4cfff "Tree RCU": scala... |
1054 |
unsigned long flags; |
64db4cfff "Tree RCU": scala... |
1055 |
unsigned long mask; |
d9a3da069 rcu: Add expedite... |
1056 |
int need_report = 0; |
394f99a90 rcu: simplify the... |
1057 |
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
64db4cfff "Tree RCU": scala... |
1058 |
struct rcu_node *rnp; |
a26ac2455 rcu: move TREE_RC... |
1059 |
|
f8b7fc6b5 rcu: Move RCU_BOO... |
1060 |
rcu_stop_cpu_kthread(cpu); |
64db4cfff "Tree RCU": scala... |
1061 1062 |
/* Exclude any attempts to start a new grace period. */ |
1304afb22 rcu: Convert to r... |
1063 |
raw_spin_lock_irqsave(&rsp->onofflock, flags); |
64db4cfff "Tree RCU": scala... |
1064 1065 |
/* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ |
28ecd5802 rcu: Add WARN_ON_... |
1066 |
rnp = rdp->mynode; /* this is the outgoing CPU's rnp. */ |
64db4cfff "Tree RCU": scala... |
1067 1068 |
mask = rdp->grpmask; /* rnp->grplo is constant. */ do { |
1304afb22 rcu: Convert to r... |
1069 |
raw_spin_lock(&rnp->lock); /* irqs already disabled. */ |
64db4cfff "Tree RCU": scala... |
1070 1071 |
rnp->qsmaskinit &= ~mask; if (rnp->qsmaskinit != 0) { |
b668c9cf3 rcu: Fix grace-pe... |
1072 |
if (rnp != rdp->mynode) |
1304afb22 rcu: Convert to r... |
1073 |
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
64db4cfff "Tree RCU": scala... |
1074 1075 |
break; } |
b668c9cf3 rcu: Fix grace-pe... |
1076 |
if (rnp == rdp->mynode) |
d9a3da069 rcu: Add expedite... |
1077 |
need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp); |
b668c9cf3 rcu: Fix grace-pe... |
1078 |
else |
1304afb22 rcu: Convert to r... |
1079 |
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
64db4cfff "Tree RCU": scala... |
1080 |
mask = rnp->grpmask; |
64db4cfff "Tree RCU": scala... |
1081 1082 |
rnp = rnp->parent; } while (rnp != NULL); |
64db4cfff "Tree RCU": scala... |
1083 |
|
b668c9cf3 rcu: Fix grace-pe... |
1084 1085 1086 |
/* * We still hold the leaf rcu_node structure lock here, and * irqs are still disabled. The reason for this subterfuge is |
d3f6bad39 rcu: Rename "quie... |
1087 1088 |
* because invoking rcu_report_unblock_qs_rnp() with ->onofflock * held leads to deadlock. |
b668c9cf3 rcu: Fix grace-pe... |
1089 |
*/ |
1304afb22 rcu: Convert to r... |
1090 |
raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ |
b668c9cf3 rcu: Fix grace-pe... |
1091 |
rnp = rdp->mynode; |
d9a3da069 rcu: Add expedite... |
1092 |
if (need_report & RCU_OFL_TASKS_NORM_GP) |
d3f6bad39 rcu: Rename "quie... |
1093 |
rcu_report_unblock_qs_rnp(rnp, flags); |
b668c9cf3 rcu: Fix grace-pe... |
1094 |
else |
1304afb22 rcu: Convert to r... |
1095 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
d9a3da069 rcu: Add expedite... |
1096 1097 |
if (need_report & RCU_OFL_TASKS_EXP_GP) rcu_report_exp_rnp(rsp, rnp); |
1217ed1ba rcu: permit rcu_r... |
1098 |
rcu_node_kthread_setaffinity(rnp, -1); |
64db4cfff "Tree RCU": scala... |
1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 |
} /* * Remove the specified CPU from the RCU hierarchy and move any pending * callbacks that it might have to the current CPU. This code assumes * that at least one CPU in the system will remain running at all times. * Any attempt to offline -all- CPUs is likely to strand RCU callbacks. */ static void rcu_offline_cpu(int cpu) { |
d6714c22b rcu: Renamings to... |
1109 |
__rcu_offline_cpu(cpu, &rcu_sched_state); |
64db4cfff "Tree RCU": scala... |
1110 |
__rcu_offline_cpu(cpu, &rcu_bh_state); |
33f76148c rcu: Add CPU-offl... |
1111 |
rcu_preempt_offline_cpu(cpu); |
64db4cfff "Tree RCU": scala... |
1112 1113 1114 |
} #else /* #ifdef CONFIG_HOTPLUG_CPU */ |
29494be71 rcu,cleanup: simp... |
1115 |
static void rcu_send_cbs_to_online(struct rcu_state *rsp) |
e74f4c456 rcu: Make hot-unp... |
1116 1117 |
{ } |
64db4cfff "Tree RCU": scala... |
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 |
static void rcu_offline_cpu(int cpu) { } #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ /* * Invoke any RCU callbacks that have made it to the end of their grace * period. Thottle as specified by rdp->blimit. */ |
37c72e56f rcu: Prevent RCU ... |
1128 |
static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) |
64db4cfff "Tree RCU": scala... |
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 |
{ unsigned long flags; struct rcu_head *next, *list, **tail; int count; /* If no callbacks are ready, just return.*/ if (!cpu_has_callbacks_ready_to_invoke(rdp)) return; /* * Extract the list of ready callbacks, disabling to prevent * races with call_rcu() from interrupt handlers. */ local_irq_save(flags); list = rdp->nxtlist; rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL]; *rdp->nxttail[RCU_DONE_TAIL] = NULL; tail = rdp->nxttail[RCU_DONE_TAIL]; for (count = RCU_NEXT_SIZE - 1; count >= 0; count--) if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL]) rdp->nxttail[count] = &rdp->nxtlist; local_irq_restore(flags); /* Invoke callbacks. */ count = 0; while (list) { next = list->next; prefetch(next); |
551d55a94 tree/tiny rcu: Ad... |
1157 |
debug_rcu_head_unqueue(list); |
9ab1544eb rcu: introduce kf... |
1158 |
__rcu_reclaim(list); |
64db4cfff "Tree RCU": scala... |
1159 1160 1161 1162 1163 1164 1165 1166 1167 |
list = next; if (++count >= rdp->blimit) break; } local_irq_save(flags); /* Update count, and requeue any remaining callbacks. */ rdp->qlen -= count; |
269dcc1c2 rcu: Add tracing ... |
1168 |
rdp->n_cbs_invoked += count; |
64db4cfff "Tree RCU": scala... |
1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 |
if (list != NULL) { *tail = rdp->nxtlist; rdp->nxtlist = list; for (count = 0; count < RCU_NEXT_SIZE; count++) if (&rdp->nxtlist == rdp->nxttail[count]) rdp->nxttail[count] = tail; else break; } /* Reinstate batch limit if we have worked down the excess. */ if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) rdp->blimit = blimit; |
37c72e56f rcu: Prevent RCU ... |
1182 1183 1184 1185 1186 1187 |
/* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) { rdp->qlen_last_fqs_check = 0; rdp->n_force_qs_snap = rsp->n_force_qs; } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark) rdp->qlen_last_fqs_check = rdp->qlen; |
64db4cfff "Tree RCU": scala... |
1188 1189 1190 1191 |
local_irq_restore(flags); /* Re-raise the RCU softirq if there are callbacks remaining. */ if (cpu_has_callbacks_ready_to_invoke(rdp)) |
a46e0899e rcu: use softirq ... |
1192 |
invoke_rcu_core(); |
64db4cfff "Tree RCU": scala... |
1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 |
} /* * Check to see if this CPU is in a non-context-switch quiescent state * (user mode or idle loop for rcu, non-softirq execution for rcu_bh). * Also schedule the RCU softirq handler. * * This function must be called with hardirqs disabled. It is normally * invoked from the scheduling-clock interrupt. If rcu_pending returns * false, there is no point in invoking rcu_check_callbacks(). */ void rcu_check_callbacks(int cpu, int user) { if (user || |
a68260483 rcu: Teach RCU th... |
1207 1208 |
(idle_cpu(cpu) && rcu_scheduler_active && !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { |
64db4cfff "Tree RCU": scala... |
1209 1210 1211 1212 1213 |
/* * Get here if this CPU took its interrupt from user * mode or from the idle loop, and if this is not a * nested interrupt. In this case, the CPU is in |
d6714c22b rcu: Renamings to... |
1214 |
* a quiescent state, so note it. |
64db4cfff "Tree RCU": scala... |
1215 1216 |
* * No memory barrier is required here because both |
d6714c22b rcu: Renamings to... |
1217 1218 1219 |
* rcu_sched_qs() and rcu_bh_qs() reference only CPU-local * variables that other CPUs neither access nor modify, * at least not while the corresponding CPU is online. |
64db4cfff "Tree RCU": scala... |
1220 |
*/ |
d6714c22b rcu: Renamings to... |
1221 1222 |
rcu_sched_qs(cpu); rcu_bh_qs(cpu); |
64db4cfff "Tree RCU": scala... |
1223 1224 1225 1226 1227 1228 1229 |
} else if (!in_softirq()) { /* * Get here if this CPU did not take its interrupt from * softirq, in other words, if it is not interrupting * a rcu_bh read-side critical section. This is an _bh |
d6714c22b rcu: Renamings to... |
1230 |
* critical section, so note it. |
64db4cfff "Tree RCU": scala... |
1231 |
*/ |
d6714c22b rcu: Renamings to... |
1232 |
rcu_bh_qs(cpu); |
64db4cfff "Tree RCU": scala... |
1233 |
} |
f41d911f8 rcu: Merge preemp... |
1234 |
rcu_preempt_check_callbacks(cpu); |
d21670aca rcu: reduce the n... |
1235 |
if (rcu_pending(cpu)) |
a46e0899e rcu: use softirq ... |
1236 |
invoke_rcu_core(); |
64db4cfff "Tree RCU": scala... |
1237 1238 1239 1240 1241 1242 1243 |
} #ifdef CONFIG_SMP /* * Scan the leaf rcu_node structures, processing dyntick state for any that * have not yet encountered a quiescent state, using the function specified. |
27f4d2805 rcu: priority boo... |
1244 1245 |
* Also initiate boosting for any threads blocked on the root rcu_node. * |
ee47eb9f4 rcu: Remove leg o... |
1246 |
* The caller must have suppressed start of new grace periods. |
64db4cfff "Tree RCU": scala... |
1247 |
*/ |
45f014c52 rcu: Remove redun... |
1248 |
static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) |
64db4cfff "Tree RCU": scala... |
1249 1250 1251 1252 1253 |
{ unsigned long bit; int cpu; unsigned long flags; unsigned long mask; |
a0b6c9a78 rcu: Clean up cod... |
1254 |
struct rcu_node *rnp; |
64db4cfff "Tree RCU": scala... |
1255 |
|
a0b6c9a78 rcu: Clean up cod... |
1256 |
rcu_for_each_leaf_node(rsp, rnp) { |
64db4cfff "Tree RCU": scala... |
1257 |
mask = 0; |
1304afb22 rcu: Convert to r... |
1258 |
raw_spin_lock_irqsave(&rnp->lock, flags); |
ee47eb9f4 rcu: Remove leg o... |
1259 |
if (!rcu_gp_in_progress(rsp)) { |
1304afb22 rcu: Convert to r... |
1260 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
0f10dc826 rcu: Eliminate rc... |
1261 |
return; |
64db4cfff "Tree RCU": scala... |
1262 |
} |
a0b6c9a78 rcu: Clean up cod... |
1263 |
if (rnp->qsmask == 0) { |
1217ed1ba rcu: permit rcu_r... |
1264 |
rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ |
64db4cfff "Tree RCU": scala... |
1265 1266 |
continue; } |
a0b6c9a78 rcu: Clean up cod... |
1267 |
cpu = rnp->grplo; |
64db4cfff "Tree RCU": scala... |
1268 |
bit = 1; |
a0b6c9a78 rcu: Clean up cod... |
1269 |
for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { |
394f99a90 rcu: simplify the... |
1270 1271 |
if ((rnp->qsmask & bit) != 0 && f(per_cpu_ptr(rsp->rda, cpu))) |
64db4cfff "Tree RCU": scala... |
1272 1273 |
mask |= bit; } |
45f014c52 rcu: Remove redun... |
1274 |
if (mask != 0) { |
64db4cfff "Tree RCU": scala... |
1275 |
|
d3f6bad39 rcu: Rename "quie... |
1276 1277 |
/* rcu_report_qs_rnp() releases rnp->lock. */ rcu_report_qs_rnp(mask, rsp, rnp, flags); |
64db4cfff "Tree RCU": scala... |
1278 1279 |
continue; } |
1304afb22 rcu: Convert to r... |
1280 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
64db4cfff "Tree RCU": scala... |
1281 |
} |
27f4d2805 rcu: priority boo... |
1282 |
rnp = rcu_get_root(rsp); |
1217ed1ba rcu: permit rcu_r... |
1283 1284 1285 1286 |
if (rnp->qsmask == 0) { raw_spin_lock_irqsave(&rnp->lock, flags); rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ } |
64db4cfff "Tree RCU": scala... |
1287 1288 1289 1290 1291 1292 1293 1294 1295 |
} /* * Force quiescent states on reluctant CPUs, and also detect which * CPUs are in dyntick-idle mode. */ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) { unsigned long flags; |
64db4cfff "Tree RCU": scala... |
1296 |
struct rcu_node *rnp = rcu_get_root(rsp); |
64db4cfff "Tree RCU": scala... |
1297 |
|
fc2219d49 rcu: Clean up cod... |
1298 |
if (!rcu_gp_in_progress(rsp)) |
64db4cfff "Tree RCU": scala... |
1299 |
return; /* No grace period in progress, nothing to force. */ |
1304afb22 rcu: Convert to r... |
1300 |
if (!raw_spin_trylock_irqsave(&rsp->fqslock, flags)) { |
64db4cfff "Tree RCU": scala... |
1301 1302 1303 |
rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ return; /* Someone else is already on the job. */ } |
20133cfce rcu: Stop overflo... |
1304 |
if (relaxed && ULONG_CMP_GE(rsp->jiffies_force_qs, jiffies)) |
f96e9232e rcu: Adjust force... |
1305 |
goto unlock_fqs_ret; /* no emergency and done recently. */ |
64db4cfff "Tree RCU": scala... |
1306 |
rsp->n_force_qs++; |
1304afb22 rcu: Convert to r... |
1307 |
raw_spin_lock(&rnp->lock); /* irqs already disabled */ |
64db4cfff "Tree RCU": scala... |
1308 |
rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; |
560d4bc0d rcu: Further clea... |
1309 |
if(!rcu_gp_in_progress(rsp)) { |
64db4cfff "Tree RCU": scala... |
1310 |
rsp->n_force_qs_ngp++; |
1304afb22 rcu: Convert to r... |
1311 |
raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ |
f96e9232e rcu: Adjust force... |
1312 |
goto unlock_fqs_ret; /* no GP in progress, time updated. */ |
64db4cfff "Tree RCU": scala... |
1313 |
} |
07079d535 rcu: Prohibit sta... |
1314 |
rsp->fqs_active = 1; |
f3a8b5c6a rcu: Eliminate lo... |
1315 |
switch (rsp->signaled) { |
83f5b01ff rcu: Fix long-gra... |
1316 |
case RCU_GP_IDLE: |
64db4cfff "Tree RCU": scala... |
1317 |
case RCU_GP_INIT: |
83f5b01ff rcu: Fix long-gra... |
1318 |
break; /* grace period idle or initializing, ignore. */ |
64db4cfff "Tree RCU": scala... |
1319 1320 |
case RCU_SAVE_DYNTICK: |
64db4cfff "Tree RCU": scala... |
1321 1322 |
if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK) break; /* So gcc recognizes the dead code. */ |
f261414f0 rcu: make dead co... |
1323 |
raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ |
64db4cfff "Tree RCU": scala... |
1324 |
/* Record dyntick-idle state. */ |
45f014c52 rcu: Remove redun... |
1325 |
force_qs_rnp(rsp, dyntick_save_progress_counter); |
1304afb22 rcu: Convert to r... |
1326 |
raw_spin_lock(&rnp->lock); /* irqs already disabled */ |
ee47eb9f4 rcu: Remove leg o... |
1327 |
if (rcu_gp_in_progress(rsp)) |
64db4cfff "Tree RCU": scala... |
1328 |
rsp->signaled = RCU_FORCE_QS; |
ee47eb9f4 rcu: Remove leg o... |
1329 |
break; |
64db4cfff "Tree RCU": scala... |
1330 1331 1332 1333 |
case RCU_FORCE_QS: /* Check dyntick-idle state, send IPI to laggarts. */ |
1304afb22 rcu: Convert to r... |
1334 |
raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ |
45f014c52 rcu: Remove redun... |
1335 |
force_qs_rnp(rsp, rcu_implicit_dynticks_qs); |
64db4cfff "Tree RCU": scala... |
1336 1337 |
/* Leave state in case more forcing is required. */ |
1304afb22 rcu: Convert to r... |
1338 |
raw_spin_lock(&rnp->lock); /* irqs already disabled */ |
f96e9232e rcu: Adjust force... |
1339 |
break; |
64db4cfff "Tree RCU": scala... |
1340 |
} |
07079d535 rcu: Prohibit sta... |
1341 |
rsp->fqs_active = 0; |
46a1e34ed rcu: Make force_q... |
1342 |
if (rsp->fqs_need_gp) { |
1304afb22 rcu: Convert to r... |
1343 |
raw_spin_unlock(&rsp->fqslock); /* irqs remain disabled */ |
46a1e34ed rcu: Make force_q... |
1344 1345 1346 1347 |
rsp->fqs_need_gp = 0; rcu_start_gp(rsp, flags); /* releases rnp->lock */ return; } |
1304afb22 rcu: Convert to r... |
1348 |
raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ |
f96e9232e rcu: Adjust force... |
1349 |
unlock_fqs_ret: |
1304afb22 rcu: Convert to r... |
1350 |
raw_spin_unlock_irqrestore(&rsp->fqslock, flags); |
64db4cfff "Tree RCU": scala... |
1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 |
} #else /* #ifdef CONFIG_SMP */ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) { set_need_resched(); } #endif /* #else #ifdef CONFIG_SMP */ /* * This does the RCU processing work from softirq context for the * specified rcu_state and rcu_data structures. This may be called * only from the CPU to whom the rdp belongs. */ static void __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) { unsigned long flags; |
2e5975580 rcu: Simplify RCU... |
1371 |
WARN_ON_ONCE(rdp->beenonline == 0); |
64db4cfff "Tree RCU": scala... |
1372 1373 1374 1375 |
/* * If an RCU GP has gone long enough, go check for dyntick * idle CPUs and, if needed, send resched IPIs. */ |
20133cfce rcu: Stop overflo... |
1376 |
if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) |
64db4cfff "Tree RCU": scala... |
1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 |
force_quiescent_state(rsp, 1); /* * Advance callbacks in response to end of earlier grace * period that some other CPU ended. */ rcu_process_gp_end(rsp, rdp); /* Update RCU state based on any recent quiescent states. */ rcu_check_quiescent_state(rsp, rdp); /* Does this CPU require a not-yet-started grace period? */ if (cpu_needs_another_gp(rsp, rdp)) { |
1304afb22 rcu: Convert to r... |
1390 |
raw_spin_lock_irqsave(&rcu_get_root(rsp)->lock, flags); |
64db4cfff "Tree RCU": scala... |
1391 1392 1393 1394 |
rcu_start_gp(rsp, flags); /* releases above lock */ } /* If there are callbacks ready, invoke them. */ |
09223371d rcu: Use softirq ... |
1395 |
if (cpu_has_callbacks_ready_to_invoke(rdp)) |
a46e0899e rcu: use softirq ... |
1396 |
invoke_rcu_callbacks(rsp, rdp); |
09223371d rcu: Use softirq ... |
1397 |
} |
64db4cfff "Tree RCU": scala... |
1398 1399 1400 |
/* * Do softirq processing for the current CPU. */ |
09223371d rcu: Use softirq ... |
1401 |
static void rcu_process_callbacks(struct softirq_action *unused) |
64db4cfff "Tree RCU": scala... |
1402 |
{ |
d6714c22b rcu: Renamings to... |
1403 1404 |
__rcu_process_callbacks(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); |
64db4cfff "Tree RCU": scala... |
1405 |
__rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); |
f41d911f8 rcu: Merge preemp... |
1406 |
rcu_preempt_process_callbacks(); |
a47cd880b rcu: Fix accelera... |
1407 1408 1409 |
/* If we are last CPU on way to dyntick-idle mode, accelerate it. */ rcu_needs_cpu_flush(); |
64db4cfff "Tree RCU": scala... |
1410 |
} |
a26ac2455 rcu: move TREE_RC... |
1411 1412 1413 1414 1415 1416 |
/* * Wake up the current CPU's kthread. This replaces raise_softirq() * in earlier versions of RCU. Note that because we are running on * the current CPU with interrupts disabled, the rcu_cpu_kthread_task * cannot disappear out from under us. */ |
a46e0899e rcu: use softirq ... |
1417 |
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) |
a26ac2455 rcu: move TREE_RC... |
1418 |
{ |
a46e0899e rcu: use softirq ... |
1419 1420 |
if (likely(!rsp->boost)) { rcu_do_batch(rsp, rdp); |
a26ac2455 rcu: move TREE_RC... |
1421 1422 |
return; } |
a46e0899e rcu: use softirq ... |
1423 |
invoke_rcu_callbacks_kthread(); |
a26ac2455 rcu: move TREE_RC... |
1424 |
} |
a46e0899e rcu: use softirq ... |
1425 |
static void invoke_rcu_core(void) |
09223371d rcu: Use softirq ... |
1426 1427 1428 |
{ raise_softirq(RCU_SOFTIRQ); } |
64db4cfff "Tree RCU": scala... |
1429 1430 1431 1432 1433 1434 |
static void __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), struct rcu_state *rsp) { unsigned long flags; struct rcu_data *rdp; |
551d55a94 tree/tiny rcu: Ad... |
1435 |
debug_rcu_head_queue(head); |
64db4cfff "Tree RCU": scala... |
1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 |
head->func = func; head->next = NULL; smp_mb(); /* Ensure RCU update seen before callback registry. */ /* * Opportunistically note grace-period endings and beginnings. * Note that we might see a beginning right after we see an * end, but never vice versa, since this CPU has to pass through * a quiescent state betweentimes. */ local_irq_save(flags); |
394f99a90 rcu: simplify the... |
1448 |
rdp = this_cpu_ptr(rsp->rda); |
64db4cfff "Tree RCU": scala... |
1449 1450 1451 1452 |
/* Add the callback to our list. */ *rdp->nxttail[RCU_NEXT_TAIL] = head; rdp->nxttail[RCU_NEXT_TAIL] = &head->next; |
2655d57ef rcu: prevent call... |
1453 1454 1455 1456 1457 1458 1459 |
rdp->qlen++; /* If interrupts were disabled, don't dive into RCU core. */ if (irqs_disabled_flags(flags)) { local_irq_restore(flags); return; } |
64db4cfff "Tree RCU": scala... |
1460 |
|
37c72e56f rcu: Prevent RCU ... |
1461 1462 1463 1464 1465 1466 1467 |
/* * Force the grace period if too many callbacks or too long waiting. * Enforce hysteresis, and don't invoke force_quiescent_state() * if some other CPU has recently done so. Also, don't bother * invoking force_quiescent_state() if the newly enqueued callback * is the only one waiting for a grace period to complete. */ |
2655d57ef rcu: prevent call... |
1468 |
if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { |
b52573d27 rcu: reduce __cal... |
1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 |
/* Are we ignoring a completed grace period? */ rcu_process_gp_end(rsp, rdp); check_for_new_grace_period(rsp, rdp); /* Start a new grace period if one not already started. */ if (!rcu_gp_in_progress(rsp)) { unsigned long nestflag; struct rcu_node *rnp_root = rcu_get_root(rsp); raw_spin_lock_irqsave(&rnp_root->lock, nestflag); rcu_start_gp(rsp, nestflag); /* rlses rnp_root->lock */ } else { /* Give the grace period a kick. */ rdp->blimit = LONG_MAX; if (rsp->n_force_qs == rdp->n_force_qs_snap && *rdp->nxttail[RCU_DONE_TAIL] != head) force_quiescent_state(rsp, 0); rdp->n_force_qs_snap = rsp->n_force_qs; rdp->qlen_last_fqs_check = rdp->qlen; } |
20133cfce rcu: Stop overflo... |
1490 |
} else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) |
64db4cfff "Tree RCU": scala... |
1491 1492 1493 1494 1495 |
force_quiescent_state(rsp, 1); local_irq_restore(flags); } /* |
d6714c22b rcu: Renamings to... |
1496 |
* Queue an RCU-sched callback for invocation after a grace period. |
64db4cfff "Tree RCU": scala... |
1497 |
*/ |
d6714c22b rcu: Renamings to... |
1498 |
void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
64db4cfff "Tree RCU": scala... |
1499 |
{ |
d6714c22b rcu: Renamings to... |
1500 |
__call_rcu(head, func, &rcu_sched_state); |
64db4cfff "Tree RCU": scala... |
1501 |
} |
d6714c22b rcu: Renamings to... |
1502 |
EXPORT_SYMBOL_GPL(call_rcu_sched); |
64db4cfff "Tree RCU": scala... |
1503 1504 1505 1506 1507 1508 1509 1510 1511 |
/* * Queue an RCU for invocation after a quicker grace period. */ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) { __call_rcu(head, func, &rcu_bh_state); } EXPORT_SYMBOL_GPL(call_rcu_bh); |
6ebb237be rcu: Re-arrange c... |
1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 |
/** * synchronize_sched - wait until an rcu-sched grace period has elapsed. * * Control will return to the caller some time after a full rcu-sched * grace period has elapsed, in other words after all currently executing * rcu-sched read-side critical sections have completed. These read-side * critical sections are delimited by rcu_read_lock_sched() and * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(), * local_irq_disable(), and so on may be used in place of * rcu_read_lock_sched(). * * This means that all preempt_disable code sequences, including NMI and * hardware-interrupt handlers, in progress on entry will have completed * before this primitive returns. However, this does not guarantee that * softirq handlers will have completed, since in some kernels, these * handlers can run in process context, and can block. * * This primitive provides the guarantees made by the (now removed) * synchronize_kernel() API. In contrast, synchronize_rcu() only * guarantees that rcu_read_lock() sections will have completed. * In "classic RCU", these two guarantees happen to be one and * the same, but can differ in realtime RCU implementations. */ void synchronize_sched(void) { struct rcu_synchronize rcu; if (rcu_blocking_is_gp()) return; |
72d5a9f7a rcu: remove all r... |
1541 |
init_rcu_head_on_stack(&rcu.head); |
6ebb237be rcu: Re-arrange c... |
1542 1543 1544 1545 1546 |
init_completion(&rcu.completion); /* Will wake me after RCU finished. */ call_rcu_sched(&rcu.head, wakeme_after_rcu); /* Wait for it. */ wait_for_completion(&rcu.completion); |
72d5a9f7a rcu: remove all r... |
1547 |
destroy_rcu_head_on_stack(&rcu.head); |
6ebb237be rcu: Re-arrange c... |
1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 |
} EXPORT_SYMBOL_GPL(synchronize_sched); /** * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. * * Control will return to the caller some time after a full rcu_bh grace * period has elapsed, in other words after all currently executing rcu_bh * read-side critical sections have completed. RCU read-side critical * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), * and may be nested. */ void synchronize_rcu_bh(void) { struct rcu_synchronize rcu; if (rcu_blocking_is_gp()) return; |
72d5a9f7a rcu: remove all r... |
1566 |
init_rcu_head_on_stack(&rcu.head); |
6ebb237be rcu: Re-arrange c... |
1567 1568 1569 1570 1571 |
init_completion(&rcu.completion); /* Will wake me after RCU finished. */ call_rcu_bh(&rcu.head, wakeme_after_rcu); /* Wait for it. */ wait_for_completion(&rcu.completion); |
72d5a9f7a rcu: remove all r... |
1572 |
destroy_rcu_head_on_stack(&rcu.head); |
6ebb237be rcu: Re-arrange c... |
1573 1574 |
} EXPORT_SYMBOL_GPL(synchronize_rcu_bh); |
64db4cfff "Tree RCU": scala... |
1575 1576 1577 1578 1579 1580 1581 1582 1583 |
/* * Check to see if there is any immediate RCU-related work to be done * by the current CPU, for the specified type of RCU, returning 1 if so. * The checks are in order of increasing expense: checks that can be * carried out against CPU-local state are performed first. However, * we must check for CPU stalls first, else we might not get a chance. */ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) { |
2f51f9884 rcu: Eliminate __... |
1584 |
struct rcu_node *rnp = rdp->mynode; |
64db4cfff "Tree RCU": scala... |
1585 1586 1587 1588 1589 1590 |
rdp->n_rcu_pending++; /* Check for CPU stalls, if enabled. */ check_cpu_stall(rsp, rdp); /* Is the RCU core waiting for a quiescent state from this CPU? */ |
d21670aca rcu: reduce the n... |
1591 |
if (rdp->qs_pending && !rdp->passed_quiesc) { |
d25eb9442 rcu: substitute s... |
1592 1593 1594 1595 1596 1597 |
/* * If force_quiescent_state() coming soon and this CPU * needs a quiescent state, and this is either RCU-sched * or RCU-bh, force a local reschedule. */ |
d21670aca rcu: reduce the n... |
1598 |
rdp->n_rp_qs_pending++; |
6cc68793e rcu: fix spelling |
1599 |
if (!rdp->preemptible && |
d25eb9442 rcu: substitute s... |
1600 1601 1602 |
ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1, jiffies)) set_need_resched(); |
d21670aca rcu: reduce the n... |
1603 1604 |
} else if (rdp->qs_pending && rdp->passed_quiesc) { rdp->n_rp_report_qs++; |
64db4cfff "Tree RCU": scala... |
1605 |
return 1; |
7ba5c840e rcu: Add __rcu_pe... |
1606 |
} |
64db4cfff "Tree RCU": scala... |
1607 1608 |
/* Does this CPU have callbacks ready to invoke? */ |
7ba5c840e rcu: Add __rcu_pe... |
1609 1610 |
if (cpu_has_callbacks_ready_to_invoke(rdp)) { rdp->n_rp_cb_ready++; |
64db4cfff "Tree RCU": scala... |
1611 |
return 1; |
7ba5c840e rcu: Add __rcu_pe... |
1612 |
} |
64db4cfff "Tree RCU": scala... |
1613 1614 |
/* Has RCU gone idle with this CPU needing another grace period? */ |
7ba5c840e rcu: Add __rcu_pe... |
1615 1616 |
if (cpu_needs_another_gp(rsp, rdp)) { rdp->n_rp_cpu_needs_gp++; |
64db4cfff "Tree RCU": scala... |
1617 |
return 1; |
7ba5c840e rcu: Add __rcu_pe... |
1618 |
} |
64db4cfff "Tree RCU": scala... |
1619 1620 |
/* Has another RCU grace period completed? */ |
2f51f9884 rcu: Eliminate __... |
1621 |
if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */ |
7ba5c840e rcu: Add __rcu_pe... |
1622 |
rdp->n_rp_gp_completed++; |
64db4cfff "Tree RCU": scala... |
1623 |
return 1; |
7ba5c840e rcu: Add __rcu_pe... |
1624 |
} |
64db4cfff "Tree RCU": scala... |
1625 1626 |
/* Has a new RCU grace period started? */ |
2f51f9884 rcu: Eliminate __... |
1627 |
if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */ |
7ba5c840e rcu: Add __rcu_pe... |
1628 |
rdp->n_rp_gp_started++; |
64db4cfff "Tree RCU": scala... |
1629 |
return 1; |
7ba5c840e rcu: Add __rcu_pe... |
1630 |
} |
64db4cfff "Tree RCU": scala... |
1631 1632 |
/* Has an RCU GP gone long enough to send resched IPIs &c? */ |
fc2219d49 rcu: Clean up cod... |
1633 |
if (rcu_gp_in_progress(rsp) && |
20133cfce rcu: Stop overflo... |
1634 |
ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) { |
7ba5c840e rcu: Add __rcu_pe... |
1635 |
rdp->n_rp_need_fqs++; |
64db4cfff "Tree RCU": scala... |
1636 |
return 1; |
7ba5c840e rcu: Add __rcu_pe... |
1637 |
} |
64db4cfff "Tree RCU": scala... |
1638 1639 |
/* nothing to do */ |
7ba5c840e rcu: Add __rcu_pe... |
1640 |
rdp->n_rp_need_nothing++; |
64db4cfff "Tree RCU": scala... |
1641 1642 1643 1644 1645 1646 1647 1648 |
return 0; } /* * Check to see if there is any immediate RCU-related work to be done * by the current CPU, returning 1 if so. This function is part of the * RCU implementation; it is -not- an exported member of the RCU API. */ |
a157229ca rcu: Simplify rcu... |
1649 |
static int rcu_pending(int cpu) |
64db4cfff "Tree RCU": scala... |
1650 |
{ |
d6714c22b rcu: Renamings to... |
1651 |
return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) || |
f41d911f8 rcu: Merge preemp... |
1652 1653 |
__rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) || rcu_preempt_pending(cpu); |
64db4cfff "Tree RCU": scala... |
1654 1655 1656 1657 1658 |
} /* * Check to see if any future RCU-related work will need to be done * by the current CPU, even if none need be done immediately, returning |
8bd93a2c5 rcu: Accelerate g... |
1659 |
* 1 if so. |
64db4cfff "Tree RCU": scala... |
1660 |
*/ |
8bd93a2c5 rcu: Accelerate g... |
1661 |
static int rcu_needs_cpu_quick_check(int cpu) |
64db4cfff "Tree RCU": scala... |
1662 1663 |
{ /* RCU callbacks either ready or pending? */ |
d6714c22b rcu: Renamings to... |
1664 |
return per_cpu(rcu_sched_data, cpu).nxtlist || |
f41d911f8 rcu: Merge preemp... |
1665 1666 |
per_cpu(rcu_bh_data, cpu).nxtlist || rcu_preempt_needs_cpu(cpu); |
64db4cfff "Tree RCU": scala... |
1667 |
} |
d0ec774cb rcu: Move rcu_bar... |
1668 1669 1670 1671 |
static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; static atomic_t rcu_barrier_cpu_count; static DEFINE_MUTEX(rcu_barrier_mutex); static struct completion rcu_barrier_completion; |
d0ec774cb rcu: Move rcu_bar... |
1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 |
static void rcu_barrier_callback(struct rcu_head *notused) { if (atomic_dec_and_test(&rcu_barrier_cpu_count)) complete(&rcu_barrier_completion); } /* * Called with preemption disabled, and from cross-cpu IRQ context. */ static void rcu_barrier_func(void *type) { int cpu = smp_processor_id(); struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); void (*call_rcu_func)(struct rcu_head *head, void (*func)(struct rcu_head *head)); atomic_inc(&rcu_barrier_cpu_count); call_rcu_func = type; call_rcu_func(head, rcu_barrier_callback); } |
d0ec774cb rcu: Move rcu_bar... |
1693 1694 1695 1696 |
/* * Orchestrate the specified type of RCU barrier, waiting for all * RCU callbacks of the specified type to complete. */ |
e74f4c456 rcu: Make hot-unp... |
1697 1698 |
static void _rcu_barrier(struct rcu_state *rsp, void (*call_rcu_func)(struct rcu_head *head, |
d0ec774cb rcu: Move rcu_bar... |
1699 1700 1701 |
void (*func)(struct rcu_head *head))) { BUG_ON(in_interrupt()); |
e74f4c456 rcu: Make hot-unp... |
1702 |
/* Take mutex to serialize concurrent rcu_barrier() requests. */ |
d0ec774cb rcu: Move rcu_bar... |
1703 1704 1705 1706 1707 1708 1709 1710 1711 |
mutex_lock(&rcu_barrier_mutex); init_completion(&rcu_barrier_completion); /* * Initialize rcu_barrier_cpu_count to 1, then invoke * rcu_barrier_func() on each CPU, so that each CPU also has * incremented rcu_barrier_cpu_count. Only then is it safe to * decrement rcu_barrier_cpu_count -- otherwise the first CPU * might complete its grace period before all of the other CPUs * did their increment, causing this function to return too |
2d999e03b rcu: update docum... |
1712 1713 1714 |
* early. Note that on_each_cpu() disables irqs, which prevents * any CPUs from coming online or going offline until each online * CPU has queued its RCU-barrier callback. |
d0ec774cb rcu: Move rcu_bar... |
1715 1716 1717 1718 1719 1720 1721 |
*/ atomic_set(&rcu_barrier_cpu_count, 1); on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1); if (atomic_dec_and_test(&rcu_barrier_cpu_count)) complete(&rcu_barrier_completion); wait_for_completion(&rcu_barrier_completion); mutex_unlock(&rcu_barrier_mutex); |
d0ec774cb rcu: Move rcu_bar... |
1722 |
} |
d0ec774cb rcu: Move rcu_bar... |
1723 1724 1725 1726 1727 1728 |
/** * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. */ void rcu_barrier_bh(void) { |
e74f4c456 rcu: Make hot-unp... |
1729 |
_rcu_barrier(&rcu_bh_state, call_rcu_bh); |
d0ec774cb rcu: Move rcu_bar... |
1730 1731 1732 1733 1734 1735 1736 1737 |
} EXPORT_SYMBOL_GPL(rcu_barrier_bh); /** * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. */ void rcu_barrier_sched(void) { |
e74f4c456 rcu: Make hot-unp... |
1738 |
_rcu_barrier(&rcu_sched_state, call_rcu_sched); |
d0ec774cb rcu: Move rcu_bar... |
1739 1740 |
} EXPORT_SYMBOL_GPL(rcu_barrier_sched); |
64db4cfff "Tree RCU": scala... |
1741 |
/* |
27569620c rcu: Split hierar... |
1742 |
* Do boot-time initialization of a CPU's per-CPU RCU data. |
64db4cfff "Tree RCU": scala... |
1743 |
*/ |
27569620c rcu: Split hierar... |
1744 1745 |
static void __init rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) |
64db4cfff "Tree RCU": scala... |
1746 1747 1748 |
{ unsigned long flags; int i; |
394f99a90 rcu: simplify the... |
1749 |
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
27569620c rcu: Split hierar... |
1750 1751 1752 |
struct rcu_node *rnp = rcu_get_root(rsp); /* Set up local state, ensuring consistent view of global state. */ |
1304afb22 rcu: Convert to r... |
1753 |
raw_spin_lock_irqsave(&rnp->lock, flags); |
27569620c rcu: Split hierar... |
1754 1755 1756 1757 1758 1759 1760 1761 1762 |
rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); rdp->nxtlist = NULL; for (i = 0; i < RCU_NEXT_SIZE; i++) rdp->nxttail[i] = &rdp->nxtlist; rdp->qlen = 0; #ifdef CONFIG_NO_HZ rdp->dynticks = &per_cpu(rcu_dynticks, cpu); #endif /* #ifdef CONFIG_NO_HZ */ rdp->cpu = cpu; |
1304afb22 rcu: Convert to r... |
1763 |
raw_spin_unlock_irqrestore(&rnp->lock, flags); |
27569620c rcu: Split hierar... |
1764 1765 1766 1767 1768 1769 1770 |
} /* * Initialize a CPU's per-CPU RCU data. Note that only one online or * offline event can be happening at a given time. Note also that we * can accept some slop in the rsp->completed access due to the fact * that this CPU cannot possibly have any RCU callbacks in flight yet. |
64db4cfff "Tree RCU": scala... |
1771 |
*/ |
e4fa4c970 rcu: add __cpuini... |
1772 |
static void __cpuinit |
6cc68793e rcu: fix spelling |
1773 |
rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) |
64db4cfff "Tree RCU": scala... |
1774 1775 |
{ unsigned long flags; |
64db4cfff "Tree RCU": scala... |
1776 |
unsigned long mask; |
394f99a90 rcu: simplify the... |
1777 |
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
64db4cfff "Tree RCU": scala... |
1778 1779 1780 |
struct rcu_node *rnp = rcu_get_root(rsp); /* Set up local state, ensuring consistent view of global state. */ |
1304afb22 rcu: Convert to r... |
1781 |
raw_spin_lock_irqsave(&rnp->lock, flags); |
64db4cfff "Tree RCU": scala... |
1782 1783 1784 |
rdp->passed_quiesc = 0; /* We could be racing with new GP, */ rdp->qs_pending = 1; /* so set up to respond to current GP. */ rdp->beenonline = 1; /* We have now been online. */ |
6cc68793e rcu: fix spelling |
1785 |
rdp->preemptible = preemptible; |
37c72e56f rcu: Prevent RCU ... |
1786 1787 |
rdp->qlen_last_fqs_check = 0; rdp->n_force_qs_snap = rsp->n_force_qs; |
64db4cfff "Tree RCU": scala... |
1788 |
rdp->blimit = blimit; |
1304afb22 rcu: Convert to r... |
1789 |
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
64db4cfff "Tree RCU": scala... |
1790 1791 1792 1793 1794 1795 1796 |
/* * A new grace period might start here. If so, we won't be part * of it, but that is OK, as we are currently in a quiescent state. */ /* Exclude any attempts to start a new GP on large systems. */ |
1304afb22 rcu: Convert to r... |
1797 |
raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */ |
64db4cfff "Tree RCU": scala... |
1798 1799 1800 1801 1802 1803 |
/* Add CPU to rcu_node bitmasks. */ rnp = rdp->mynode; mask = rdp->grpmask; do { /* Exclude any attempts to start a new GP on small systems. */ |
1304afb22 rcu: Convert to r... |
1804 |
raw_spin_lock(&rnp->lock); /* irqs already disabled. */ |
64db4cfff "Tree RCU": scala... |
1805 1806 |
rnp->qsmaskinit |= mask; mask = rnp->grpmask; |
d09b62dfa rcu: Fix synchron... |
1807 1808 1809 1810 1811 |
if (rnp == rdp->mynode) { rdp->gpnum = rnp->completed; /* if GP in progress... */ rdp->completed = rnp->completed; rdp->passed_quiesc_completed = rnp->completed - 1; } |
1304afb22 rcu: Convert to r... |
1812 |
raw_spin_unlock(&rnp->lock); /* irqs already disabled. */ |
64db4cfff "Tree RCU": scala... |
1813 1814 |
rnp = rnp->parent; } while (rnp != NULL && !(rnp->qsmaskinit & mask)); |
1304afb22 rcu: Convert to r... |
1815 |
raw_spin_unlock_irqrestore(&rsp->onofflock, flags); |
64db4cfff "Tree RCU": scala... |
1816 |
} |
d72bce0e6 rcu: Cure load woes |
1817 |
static void __cpuinit rcu_prepare_cpu(int cpu) |
64db4cfff "Tree RCU": scala... |
1818 |
{ |
f41d911f8 rcu: Merge preemp... |
1819 1820 1821 |
rcu_init_percpu_data(cpu, &rcu_sched_state, 0); rcu_init_percpu_data(cpu, &rcu_bh_state, 0); rcu_preempt_init_percpu_data(cpu); |
64db4cfff "Tree RCU": scala... |
1822 1823 1824 |
} /* |
f41d911f8 rcu: Merge preemp... |
1825 |
* Handle CPU online/offline notification events. |
64db4cfff "Tree RCU": scala... |
1826 |
*/ |
9f680ab41 rcu: Eliminate un... |
1827 1828 |
static int __cpuinit rcu_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) |
64db4cfff "Tree RCU": scala... |
1829 1830 |
{ long cpu = (long)hcpu; |
27f4d2805 rcu: priority boo... |
1831 |
struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); |
a26ac2455 rcu: move TREE_RC... |
1832 |
struct rcu_node *rnp = rdp->mynode; |
64db4cfff "Tree RCU": scala... |
1833 1834 1835 1836 |
switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: |
d72bce0e6 rcu: Cure load woes |
1837 1838 |
rcu_prepare_cpu(cpu); rcu_prepare_kthreads(cpu); |
a26ac2455 rcu: move TREE_RC... |
1839 1840 |
break; case CPU_ONLINE: |
0f962a5e7 rcu: Force per-rc... |
1841 1842 |
case CPU_DOWN_FAILED: rcu_node_kthread_setaffinity(rnp, -1); |
e3995a25f rcu: put per-CPU ... |
1843 |
rcu_cpu_kthread_setrt(cpu, 1); |
0f962a5e7 rcu: Force per-rc... |
1844 1845 1846 |
break; case CPU_DOWN_PREPARE: rcu_node_kthread_setaffinity(rnp, cpu); |
e3995a25f rcu: put per-CPU ... |
1847 |
rcu_cpu_kthread_setrt(cpu, 0); |
64db4cfff "Tree RCU": scala... |
1848 |
break; |
d0ec774cb rcu: Move rcu_bar... |
1849 1850 1851 |
case CPU_DYING: case CPU_DYING_FROZEN: /* |
2d999e03b rcu: update docum... |
1852 1853 1854 |
* The whole machine is "stopped" except this CPU, so we can * touch any data without introducing corruption. We send the * dying CPU's callbacks to an arbitrarily chosen online CPU. |
d0ec774cb rcu: Move rcu_bar... |
1855 |
*/ |
29494be71 rcu,cleanup: simp... |
1856 1857 1858 |
rcu_send_cbs_to_online(&rcu_bh_state); rcu_send_cbs_to_online(&rcu_sched_state); rcu_preempt_send_cbs_to_online(); |
d0ec774cb rcu: Move rcu_bar... |
1859 |
break; |
64db4cfff "Tree RCU": scala... |
1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 |
case CPU_DEAD: case CPU_DEAD_FROZEN: case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: rcu_offline_cpu(cpu); break; default: break; } return NOTIFY_OK; } /* |
bbad93798 rcu: slim down rc... |
1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 |
* This function is invoked towards the end of the scheduler's initialization * process. Before this is called, the idle task might contain * RCU read-side critical sections (during which time, this idle * task is booting the system). After this function is called, the * idle tasks are prohibited from containing RCU read-side critical * sections. This function also enables RCU lockdep checking. */ void rcu_scheduler_starting(void) { WARN_ON(num_online_cpus() != 1); WARN_ON(nr_context_switches() > 0); rcu_scheduler_active = 1; } /* |
64db4cfff "Tree RCU": scala... |
1888 1889 1890 1891 1892 1893 1894 |
* Compute the per-level fanout, either using the exact fanout specified * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT. */ #ifdef CONFIG_RCU_FANOUT_EXACT static void __init rcu_init_levelspread(struct rcu_state *rsp) { int i; |
0209f6490 rcu: limit rcu_no... |
1895 |
for (i = NUM_RCU_LVLS - 1; i > 0; i--) |
64db4cfff "Tree RCU": scala... |
1896 |
rsp->levelspread[i] = CONFIG_RCU_FANOUT; |
0209f6490 rcu: limit rcu_no... |
1897 |
rsp->levelspread[0] = RCU_FANOUT_LEAF; |
64db4cfff "Tree RCU": scala... |
1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 |
} #else /* #ifdef CONFIG_RCU_FANOUT_EXACT */ static void __init rcu_init_levelspread(struct rcu_state *rsp) { int ccur; int cprv; int i; cprv = NR_CPUS; for (i = NUM_RCU_LVLS - 1; i >= 0; i--) { ccur = rsp->levelcnt[i]; rsp->levelspread[i] = (cprv + ccur - 1) / ccur; cprv = ccur; } } #endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */ /* * Helper function for rcu_init() that initializes one rcu_state structure. */ |
394f99a90 rcu: simplify the... |
1918 1919 |
static void __init rcu_init_one(struct rcu_state *rsp, struct rcu_data __percpu *rda) |
64db4cfff "Tree RCU": scala... |
1920 |
{ |
b6407e863 rcu: Give differe... |
1921 1922 1923 1924 |
static char *buf[] = { "rcu_node_level_0", "rcu_node_level_1", "rcu_node_level_2", "rcu_node_level_3" }; /* Match MAX_RCU_LVLS */ |
64db4cfff "Tree RCU": scala... |
1925 1926 1927 1928 |
int cpustride = 1; int i; int j; struct rcu_node *rnp; |
b6407e863 rcu: Give differe... |
1929 |
BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */ |
64db4cfff "Tree RCU": scala... |
1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 |
/* Initialize the level-tracking arrays. */ for (i = 1; i < NUM_RCU_LVLS; i++) rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1]; rcu_init_levelspread(rsp); /* Initialize the elements themselves, starting from the leaves. */ for (i = NUM_RCU_LVLS - 1; i >= 0; i--) { cpustride *= rsp->levelspread[i]; rnp = rsp->level[i]; for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { |
1304afb22 rcu: Convert to r... |
1942 |
raw_spin_lock_init(&rnp->lock); |
b6407e863 rcu: Give differe... |
1943 1944 |
lockdep_set_class_and_name(&rnp->lock, &rcu_node_class[i], buf[i]); |
f41d911f8 rcu: Merge preemp... |
1945 |
rnp->gpnum = 0; |
64db4cfff "Tree RCU": scala... |
1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 |
rnp->qsmask = 0; rnp->qsmaskinit = 0; rnp->grplo = j * cpustride; rnp->grphi = (j + 1) * cpustride - 1; if (rnp->grphi >= NR_CPUS) rnp->grphi = NR_CPUS - 1; if (i == 0) { rnp->grpnum = 0; rnp->grpmask = 0; rnp->parent = NULL; } else { rnp->grpnum = j % rsp->levelspread[i - 1]; rnp->grpmask = 1UL << rnp->grpnum; rnp->parent = rsp->level[i - 1] + j / rsp->levelspread[i - 1]; } rnp->level = i; |
12f5f524c rcu: merge TREE_P... |
1963 |
INIT_LIST_HEAD(&rnp->blkd_tasks); |
64db4cfff "Tree RCU": scala... |
1964 1965 |
} } |
0c34029ab rcu: move some co... |
1966 |
|
394f99a90 rcu: simplify the... |
1967 |
rsp->rda = rda; |
0c34029ab rcu: move some co... |
1968 1969 |
rnp = rsp->level[NUM_RCU_LVLS - 1]; for_each_possible_cpu(i) { |
4a90a0681 rcu: permit disco... |
1970 |
while (i > rnp->grphi) |
0c34029ab rcu: move some co... |
1971 |
rnp++; |
394f99a90 rcu: simplify the... |
1972 |
per_cpu_ptr(rsp->rda, i)->mynode = rnp; |
0c34029ab rcu: move some co... |
1973 1974 |
rcu_boot_init_percpu_data(i, rsp); } |
64db4cfff "Tree RCU": scala... |
1975 |
} |
9f680ab41 rcu: Eliminate un... |
1976 |
void __init rcu_init(void) |
64db4cfff "Tree RCU": scala... |
1977 |
{ |
017c42613 rcu: Fix sparse w... |
1978 |
int cpu; |
9f680ab41 rcu: Eliminate un... |
1979 |
|
f41d911f8 rcu: Merge preemp... |
1980 |
rcu_bootup_announce(); |
394f99a90 rcu: simplify the... |
1981 1982 |
rcu_init_one(&rcu_sched_state, &rcu_sched_data); rcu_init_one(&rcu_bh_state, &rcu_bh_data); |
f41d911f8 rcu: Merge preemp... |
1983 |
__rcu_init_preempt(); |
09223371d rcu: Use softirq ... |
1984 |
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
9f680ab41 rcu: Eliminate un... |
1985 1986 1987 1988 1989 1990 1991 |
/* * We don't need protection against CPU-hotplug here because * this is called early in boot, before either interrupts * or the scheduler are operational. */ cpu_notifier(rcu_cpu_notify, 0); |
017c42613 rcu: Fix sparse w... |
1992 1993 |
for_each_online_cpu(cpu) rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); |
c68de2097 rcu: disable CPU ... |
1994 |
check_cpu_stall_init(); |
64db4cfff "Tree RCU": scala... |
1995 |
} |
1eba8f843 rcu: Clean up cod... |
1996 |
#include "rcutree_plugin.h" |