Blame view
kernel/rcuclassic.c
21.9 KB
01c1c660f Preempt-RCU: reor... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
/* * Read-Copy Update mechanism for mutual exclusion * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright IBM Corporation, 2001 * * Authors: Dipankar Sarma <dipankar@in.ibm.com> * Manfred Spraul <manfred@colorfullife.com> * * Based on the original work by Paul McKenney <paulmck@us.ibm.com> * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) * * For detailed explanation of Read-Copy Update mechanism see - * Documentation/RCU * */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/rcupdate.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <asm/atomic.h> #include <linux/bitops.h> #include <linux/module.h> #include <linux/completion.h> #include <linux/moduleparam.h> #include <linux/percpu.h> #include <linux/notifier.h> |
01c1c660f Preempt-RCU: reor... |
48 49 |
#include <linux/cpu.h> #include <linux/mutex.h> |
67182ae1c rcu, debug: detec... |
50 |
#include <linux/time.h> |
01c1c660f Preempt-RCU: reor... |
51 52 53 54 55 56 57 58 59 60 61 62 63 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC static struct lock_class_key rcu_lock_key; struct lockdep_map rcu_lock_map = STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); EXPORT_SYMBOL_GPL(rcu_lock_map); #endif /* Definition for rcupdate control block. */ static struct rcu_ctrlblk rcu_ctrlblk = { .cur = -300, .completed = -300, |
3cac97cbb rcu classic: simp... |
64 |
.pending = -300, |
01c1c660f Preempt-RCU: reor... |
65 |
.lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), |
bd232f97b cpumask: convert ... |
66 |
.cpumask = CPU_BITS_NONE, |
01c1c660f Preempt-RCU: reor... |
67 68 69 70 |
}; static struct rcu_ctrlblk rcu_bh_ctrlblk = { .cur = -300, .completed = -300, |
3cac97cbb rcu classic: simp... |
71 |
.pending = -300, |
01c1c660f Preempt-RCU: reor... |
72 |
.lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), |
bd232f97b cpumask: convert ... |
73 |
.cpumask = CPU_BITS_NONE, |
01c1c660f Preempt-RCU: reor... |
74 75 76 77 78 79 80 81 82 83 84 85 86 87 |
}; DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L }; static int blimit = 10; static int qhimark = 10000; static int qlowmark = 100; #ifdef CONFIG_SMP static void force_quiescent_state(struct rcu_data *rdp, struct rcu_ctrlblk *rcp) { int cpu; |
eff9b713e rcu: fix locking ... |
88 |
unsigned long flags; |
01c1c660f Preempt-RCU: reor... |
89 |
set_need_resched(); |
eff9b713e rcu: fix locking ... |
90 |
spin_lock_irqsave(&rcp->lock, flags); |
01c1c660f Preempt-RCU: reor... |
91 92 93 94 95 |
if (unlikely(!rcp->signaled)) { rcp->signaled = 1; /* * Don't send IPI to itself. With irqs disabled, * rdp->cpu is the current cpu. |
8558f8f81 rcu: fix hotplug ... |
96 |
* |
bd232f97b cpumask: convert ... |
97 |
* cpu_online_mask is updated by the _cpu_down() |
9b1a4d383 stop_machine: Wea... |
98 99 |
* using __stop_machine(). Since we're in irqs disabled * section, __stop_machine() is not exectuting, hence |
bd232f97b cpumask: convert ... |
100 |
* the cpu_online_mask is stable. |
8558f8f81 rcu: fix hotplug ... |
101 102 103 104 105 106 107 |
* * However, a cpu might have been offlined _just_ before * we disabled irqs while entering here. * And rcu subsystem might not yet have handled the CPU_DEAD * notification, leading to the offlined cpu's bit * being set in the rcp->cpumask. * |
bd232f97b cpumask: convert ... |
108 |
* Hence cpumask = (rcp->cpumask & cpu_online_mask) to prevent |
8558f8f81 rcu: fix hotplug ... |
109 |
* sending smp_reschedule() to an offlined CPU. |
01c1c660f Preempt-RCU: reor... |
110 |
*/ |
bd232f97b cpumask: convert ... |
111 112 113 114 115 |
for_each_cpu_and(cpu, to_cpumask(rcp->cpumask), cpu_online_mask) { if (cpu != rdp->cpu) smp_send_reschedule(cpu); } |
01c1c660f Preempt-RCU: reor... |
116 |
} |
eff9b713e rcu: fix locking ... |
117 |
spin_unlock_irqrestore(&rcp->lock, flags); |
01c1c660f Preempt-RCU: reor... |
118 119 120 121 122 123 124 125 |
} #else static inline void force_quiescent_state(struct rcu_data *rdp, struct rcu_ctrlblk *rcp) { set_need_resched(); } #endif |
5127bed58 rcu classic: new ... |
126 127 128 129 |
static void __call_rcu(struct rcu_head *head, struct rcu_ctrlblk *rcp, struct rcu_data *rdp) { long batch; |
1f7b94cd3 rcu: classic RCU ... |
130 131 132 |
head->next = NULL; smp_mb(); /* Read of rcu->cur must happen after any change by caller. */ |
5127bed58 rcu classic: new ... |
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
/* * Determine the batch number of this callback. * * Using ACCESS_ONCE to avoid the following error when gcc eliminates * local variable "batch" and emits codes like this: * 1) rdp->batch = rcp->cur + 1 # gets old value * ...... * 2)rcu_batch_after(rcp->cur + 1, rdp->batch) # gets new value * then [*nxttail[0], *nxttail[1]) may contain callbacks * that batch# = rdp->batch, see the comment of struct rcu_data. */ batch = ACCESS_ONCE(rcp->cur) + 1; if (rdp->nxtlist && rcu_batch_after(batch, rdp->batch)) { /* process callbacks */ rdp->nxttail[0] = rdp->nxttail[1]; rdp->nxttail[1] = rdp->nxttail[2]; if (rcu_batch_after(batch - 1, rdp->batch)) rdp->nxttail[0] = rdp->nxttail[2]; } rdp->batch = batch; *rdp->nxttail[2] = head; rdp->nxttail[2] = &head->next; if (unlikely(++rdp->qlen > qhimark)) { rdp->blimit = INT_MAX; force_quiescent_state(rdp, &rcu_ctrlblk); } } |
2133b5d7f rcu: RCU-based de... |
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 |
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp) { rcp->gp_start = jiffies; rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK; } static void print_other_cpu_stall(struct rcu_ctrlblk *rcp) { int cpu; long delta; unsigned long flags; /* Only let one CPU complain about others per time interval. */ spin_lock_irqsave(&rcp->lock, flags); delta = jiffies - rcp->jiffies_stall; if (delta < 2 || rcp->cur != rcp->completed) { spin_unlock_irqrestore(&rcp->lock, flags); return; } rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; spin_unlock_irqrestore(&rcp->lock, flags); /* OK, time to rat on our buddy... */ |
be19ef82e rcu: make rcu-sta... |
190 |
printk(KERN_ERR "INFO: RCU detected CPU stalls:"); |
2133b5d7f rcu: RCU-based de... |
191 |
for_each_possible_cpu(cpu) { |
bd232f97b cpumask: convert ... |
192 |
if (cpumask_test_cpu(cpu, to_cpumask(rcp->cpumask))) |
2133b5d7f rcu: RCU-based de... |
193 194 195 196 197 198 199 200 201 202 |
printk(" %d", cpu); } printk(" (detected by %d, t=%ld jiffies) ", smp_processor_id(), (long)(jiffies - rcp->gp_start)); } static void print_cpu_stall(struct rcu_ctrlblk *rcp) { unsigned long flags; |
be19ef82e rcu: make rcu-sta... |
203 204 |
printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu/%lu jiffies) ", |
2133b5d7f rcu: RCU-based de... |
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 |
smp_processor_id(), jiffies, jiffies - rcp->gp_start); dump_stack(); spin_lock_irqsave(&rcp->lock, flags); if ((long)(jiffies - rcp->jiffies_stall) >= 0) rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; spin_unlock_irqrestore(&rcp->lock, flags); set_need_resched(); /* kick ourselves to get things going. */ } static void check_cpu_stall(struct rcu_ctrlblk *rcp) { long delta; delta = jiffies - rcp->jiffies_stall; |
bd232f97b cpumask: convert ... |
221 222 |
if (cpumask_test_cpu(smp_processor_id(), to_cpumask(rcp->cpumask)) && delta >= 0) { |
2133b5d7f rcu: RCU-based de... |
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 |
/* We haven't checked in, so go dump stack. */ print_cpu_stall(rcp); } else if (rcp->cur != rcp->completed && delta >= 2) { /* They had two seconds to dump stack, so complain. */ print_other_cpu_stall(rcp); } } #else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp) { } |
2ec2b482b rcu: RCU-based de... |
239 |
static inline void check_cpu_stall(struct rcu_ctrlblk *rcp) |
2133b5d7f rcu: RCU-based de... |
240 241 242 243 |
{ } #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
01c1c660f Preempt-RCU: reor... |
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 |
/** * call_rcu - Queue an RCU callback for invocation after a grace period. * @head: structure to be used for queueing the RCU updates. * @func: actual update function to be invoked after the grace period * * The update function will be invoked some time after a full grace * period elapses, in other words after all currently executing RCU * read-side critical sections have completed. RCU read-side critical * sections are delimited by rcu_read_lock() and rcu_read_unlock(), * and may be nested. */ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) { unsigned long flags; |
01c1c660f Preempt-RCU: reor... |
259 260 |
head->func = func; |
01c1c660f Preempt-RCU: reor... |
261 |
local_irq_save(flags); |
5127bed58 rcu classic: new ... |
262 |
__call_rcu(head, &rcu_ctrlblk, &__get_cpu_var(rcu_data)); |
01c1c660f Preempt-RCU: reor... |
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 |
local_irq_restore(flags); } EXPORT_SYMBOL_GPL(call_rcu); /** * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. * @head: structure to be used for queueing the RCU updates. * @func: actual update function to be invoked after the grace period * * The update function will be invoked some time after a full grace * period elapses, in other words after all currently executing RCU * read-side critical sections have completed. call_rcu_bh() assumes * that the read-side critical sections end on completion of a softirq * handler. This means that read-side critical sections in process * context must not be interrupted by softirqs. This interface is to be * used when most of the read-side critical sections are in softirq context. * RCU read-side critical sections are delimited by rcu_read_lock() and * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh() * and rcu_read_unlock_bh(), if in process context. These may be nested. */ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) { unsigned long flags; |
01c1c660f Preempt-RCU: reor... |
287 288 |
head->func = func; |
01c1c660f Preempt-RCU: reor... |
289 |
local_irq_save(flags); |
5127bed58 rcu classic: new ... |
290 |
__call_rcu(head, &rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); |
01c1c660f Preempt-RCU: reor... |
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 |
local_irq_restore(flags); } EXPORT_SYMBOL_GPL(call_rcu_bh); /* * Return the number of RCU batches processed thus far. Useful * for debug and statistics. */ long rcu_batches_completed(void) { return rcu_ctrlblk.completed; } EXPORT_SYMBOL_GPL(rcu_batches_completed); /* * Return the number of RCU batches processed thus far. Useful * for debug and statistics. */ long rcu_batches_completed_bh(void) { return rcu_bh_ctrlblk.completed; } EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); /* Raises the softirq for processing rcu_callbacks. */ static inline void raise_rcu_softirq(void) { raise_softirq(RCU_SOFTIRQ); |
01c1c660f Preempt-RCU: reor... |
319 320 321 322 323 324 325 326 |
} /* * Invoke the completed RCU callbacks. They are expected to be in * a per-cpu list. */ static void rcu_do_batch(struct rcu_data *rdp) { |
275a89bdd rcu: use irq-safe... |
327 |
unsigned long flags; |
01c1c660f Preempt-RCU: reor... |
328 329 330 331 332 333 334 335 336 337 338 339 340 |
struct rcu_head *next, *list; int count = 0; list = rdp->donelist; while (list) { next = list->next; prefetch(next); list->func(list); list = next; if (++count >= rdp->blimit) break; } rdp->donelist = list; |
275a89bdd rcu: use irq-safe... |
341 |
local_irq_save(flags); |
01c1c660f Preempt-RCU: reor... |
342 |
rdp->qlen -= count; |
275a89bdd rcu: use irq-safe... |
343 |
local_irq_restore(flags); |
01c1c660f Preempt-RCU: reor... |
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 |
if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) rdp->blimit = blimit; if (!rdp->donelist) rdp->donetail = &rdp->donelist; else raise_rcu_softirq(); } /* * Grace period handling: * The grace period handling consists out of two steps: * - A new grace period is started. * This is done by rcu_start_batch. The start is not broadcasted to * all cpus, they must pick this up by comparing rcp->cur with * rdp->quiescbatch. All cpus are recorded in the * rcu_ctrlblk.cpumask bitmap. * - All cpus must go through a quiescent state. * Since the start of the grace period is not broadcasted, at least two * calls to rcu_check_quiescent_state are required: * The first call just notices that a new grace period is running. The * following calls check if there was a quiescent state since the beginning * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If * the bitmap is empty, then the grace period is completed. * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace * period (if necessary). */ |
67182ae1c rcu, debug: detec... |
371 |
|
01c1c660f Preempt-RCU: reor... |
372 373 374 375 376 377 378 |
/* * Register a new batch of callbacks, and start it up if there is currently no * active batch and the batch to be registered has not already occurred. * Caller must hold rcu_ctrlblk.lock. */ static void rcu_start_batch(struct rcu_ctrlblk *rcp) { |
3cac97cbb rcu classic: simp... |
379 |
if (rcp->cur != rcp->pending && |
01c1c660f Preempt-RCU: reor... |
380 |
rcp->completed == rcp->cur) { |
01c1c660f Preempt-RCU: reor... |
381 |
rcp->cur++; |
2133b5d7f rcu: RCU-based de... |
382 |
record_gp_stall_check_time(rcp); |
01c1c660f Preempt-RCU: reor... |
383 384 385 386 387 388 389 390 |
/* * Accessing nohz_cpu_mask before incrementing rcp->cur needs a * Barrier Otherwise it can cause tickless idle CPUs to be * included in rcp->cpumask, which will extend graceperiods * unnecessarily. */ smp_mb(); |
263ec6457 cpumask: convert ... |
391 392 |
cpumask_andnot(to_cpumask(rcp->cpumask), cpu_online_mask, nohz_cpu_mask); |
01c1c660f Preempt-RCU: reor... |
393 394 395 396 397 398 399 400 401 402 403 404 |
rcp->signaled = 0; } } /* * cpu went through a quiescent state since the beginning of the grace period. * Clear it from the cpu mask and complete the grace period if it was the last * cpu. Start another grace period if someone has further entries pending */ static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) { |
bd232f97b cpumask: convert ... |
405 406 |
cpumask_clear_cpu(cpu, to_cpumask(rcp->cpumask)); if (cpumask_empty(to_cpumask(rcp->cpumask))) { |
01c1c660f Preempt-RCU: reor... |
407 408 409 410 411 412 413 414 415 416 417 418 419 420 |
/* batch completed ! */ rcp->completed = rcp->cur; rcu_start_batch(rcp); } } /* * Check if the cpu has gone through a quiescent state (say context * switch). If so and if it already hasn't done so in this RCU * quiescent cycle, then indicate that it has done so. */ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) { |
eff9b713e rcu: fix locking ... |
421 |
unsigned long flags; |
01c1c660f Preempt-RCU: reor... |
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 |
if (rdp->quiescbatch != rcp->cur) { /* start new grace period: */ rdp->qs_pending = 1; rdp->passed_quiesc = 0; rdp->quiescbatch = rcp->cur; return; } /* Grace period already completed for this cpu? * qs_pending is checked instead of the actual bitmap to avoid * cacheline trashing. */ if (!rdp->qs_pending) return; /* * Was there a quiescent state since the beginning of the grace * period? If no, then exit and wait for the next call. */ if (!rdp->passed_quiesc) return; rdp->qs_pending = 0; |
eff9b713e rcu: fix locking ... |
444 |
spin_lock_irqsave(&rcp->lock, flags); |
01c1c660f Preempt-RCU: reor... |
445 446 447 448 449 450 |
/* * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync * during cpu startup. Ignore the quiescent state. */ if (likely(rdp->quiescbatch == rcp->cur)) cpu_quiet(rdp->cpu, rcp); |
eff9b713e rcu: fix locking ... |
451 |
spin_unlock_irqrestore(&rcp->lock, flags); |
01c1c660f Preempt-RCU: reor... |
452 453 454 455 456 457 458 459 460 461 |
} #ifdef CONFIG_HOTPLUG_CPU /* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing * locking requirements, the list it's pulling from has to belong to a cpu * which is dead and hence not processing interrupts. */ static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, |
5127bed58 rcu classic: new ... |
462 |
struct rcu_head **tail, long batch) |
01c1c660f Preempt-RCU: reor... |
463 |
{ |
275a89bdd rcu: use irq-safe... |
464 |
unsigned long flags; |
5127bed58 rcu classic: new ... |
465 |
if (list) { |
275a89bdd rcu: use irq-safe... |
466 |
local_irq_save(flags); |
5127bed58 rcu classic: new ... |
467 468 469 |
this_rdp->batch = batch; *this_rdp->nxttail[2] = list; this_rdp->nxttail[2] = tail; |
275a89bdd rcu: use irq-safe... |
470 |
local_irq_restore(flags); |
5127bed58 rcu classic: new ... |
471 |
} |
01c1c660f Preempt-RCU: reor... |
472 473 474 475 476 |
} static void __rcu_offline_cpu(struct rcu_data *this_rdp, struct rcu_ctrlblk *rcp, struct rcu_data *rdp) { |
eff9b713e rcu: fix locking ... |
477 |
unsigned long flags; |
1f7b94cd3 rcu: classic RCU ... |
478 479 |
/* * if the cpu going offline owns the grace period |
01c1c660f Preempt-RCU: reor... |
480 481 482 |
* we can block indefinitely waiting for it, so flush * it here */ |
eff9b713e rcu: fix locking ... |
483 |
spin_lock_irqsave(&rcp->lock, flags); |
01c1c660f Preempt-RCU: reor... |
484 485 |
if (rcp->cur != rcp->completed) cpu_quiet(rdp->cpu, rcp); |
5127bed58 rcu classic: new ... |
486 487 |
rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1); rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1); |
eff9b713e rcu: fix locking ... |
488 |
spin_unlock(&rcp->lock); |
199a95287 rcu classic: upda... |
489 |
|
199a95287 rcu classic: upda... |
490 |
this_rdp->qlen += rdp->qlen; |
eff9b713e rcu: fix locking ... |
491 |
local_irq_restore(flags); |
01c1c660f Preempt-RCU: reor... |
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 |
} static void rcu_offline_cpu(int cpu) { struct rcu_data *this_rdp = &get_cpu_var(rcu_data); struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data); __rcu_offline_cpu(this_rdp, &rcu_ctrlblk, &per_cpu(rcu_data, cpu)); __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu)); put_cpu_var(rcu_data); put_cpu_var(rcu_bh_data); } #else static void rcu_offline_cpu(int cpu) { } #endif /* * This does the RCU processing work from softirq context. */ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) { |
275a89bdd rcu: use irq-safe... |
521 |
unsigned long flags; |
1f7b94cd3 rcu: classic RCU ... |
522 |
long completed_snap; |
5127bed58 rcu classic: new ... |
523 |
if (rdp->nxtlist) { |
275a89bdd rcu: use irq-safe... |
524 |
local_irq_save(flags); |
1f7b94cd3 rcu: classic RCU ... |
525 |
completed_snap = ACCESS_ONCE(rcp->completed); |
01c1c660f Preempt-RCU: reor... |
526 527 |
/* |
5127bed58 rcu classic: new ... |
528 529 530 |
* move the other grace-period-completed entries to * [rdp->nxtlist, *rdp->nxttail[0]) temporarily */ |
1f7b94cd3 rcu: classic RCU ... |
531 |
if (!rcu_batch_before(completed_snap, rdp->batch)) |
5127bed58 rcu classic: new ... |
532 |
rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2]; |
1f7b94cd3 rcu: classic RCU ... |
533 |
else if (!rcu_batch_before(completed_snap, rdp->batch - 1)) |
5127bed58 rcu classic: new ... |
534 535 536 537 538 539 |
rdp->nxttail[0] = rdp->nxttail[1]; /* * the grace period for entries in * [rdp->nxtlist, *rdp->nxttail[0]) has completed and * move these entries to donelist |
01c1c660f Preempt-RCU: reor... |
540 |
*/ |
5127bed58 rcu classic: new ... |
541 542 543 544 545 546 547 548 549 550 551 552 |
if (rdp->nxttail[0] != &rdp->nxtlist) { *rdp->donetail = rdp->nxtlist; rdp->donetail = rdp->nxttail[0]; rdp->nxtlist = *rdp->nxttail[0]; *rdp->donetail = NULL; if (rdp->nxttail[1] == rdp->nxttail[0]) rdp->nxttail[1] = &rdp->nxtlist; if (rdp->nxttail[2] == rdp->nxttail[0]) rdp->nxttail[2] = &rdp->nxtlist; rdp->nxttail[0] = &rdp->nxtlist; } |
01c1c660f Preempt-RCU: reor... |
553 |
|
275a89bdd rcu: use irq-safe... |
554 |
local_irq_restore(flags); |
01c1c660f Preempt-RCU: reor... |
555 |
|
3cac97cbb rcu classic: simp... |
556 |
if (rcu_batch_after(rdp->batch, rcp->pending)) { |
3a72dc8eb rcu: fix sparse s... |
557 |
unsigned long flags2; |
0c925d792 rcuclassic: fix c... |
558 |
|
01c1c660f Preempt-RCU: reor... |
559 |
/* and start it/schedule start if it's a new batch */ |
3a72dc8eb rcu: fix sparse s... |
560 |
spin_lock_irqsave(&rcp->lock, flags2); |
3cac97cbb rcu classic: simp... |
561 562 563 564 |
if (rcu_batch_after(rdp->batch, rcp->pending)) { rcp->pending = rdp->batch; rcu_start_batch(rcp); } |
3a72dc8eb rcu: fix sparse s... |
565 |
spin_unlock_irqrestore(&rcp->lock, flags2); |
01c1c660f Preempt-RCU: reor... |
566 567 568 569 570 571 572 573 574 575 |
} } rcu_check_quiescent_state(rcp, rdp); if (rdp->donelist) rcu_do_batch(rdp); } static void rcu_process_callbacks(struct softirq_action *unused) { |
1f7b94cd3 rcu: classic RCU ... |
576 577 578 579 580 581 582 |
/* * Memory references from any prior RCU read-side critical sections * executed by the interrupted code must be see before any RCU * grace-period manupulations below. */ smp_mb(); /* See above block comment. */ |
01c1c660f Preempt-RCU: reor... |
583 584 |
__rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); |
1f7b94cd3 rcu: classic RCU ... |
585 586 587 588 589 590 591 592 |
/* * Memory references from any later RCU read-side critical sections * executed by the interrupted code must be see after any RCU * grace-period manupulations above. */ smp_mb(); /* See above block comment. */ |
01c1c660f Preempt-RCU: reor... |
593 594 595 596 |
} static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) { |
67182ae1c rcu, debug: detec... |
597 |
/* Check for CPU stalls, if enabled. */ |
2133b5d7f rcu: RCU-based de... |
598 |
check_cpu_stall(rcp); |
67182ae1c rcu, debug: detec... |
599 |
|
5127bed58 rcu classic: new ... |
600 |
if (rdp->nxtlist) { |
1f7b94cd3 rcu: classic RCU ... |
601 |
long completed_snap = ACCESS_ONCE(rcp->completed); |
5127bed58 rcu classic: new ... |
602 603 604 605 |
/* * This cpu has pending rcu entries and the grace period * for them has completed. */ |
1f7b94cd3 rcu: classic RCU ... |
606 |
if (!rcu_batch_before(completed_snap, rdp->batch)) |
5127bed58 rcu classic: new ... |
607 |
return 1; |
1f7b94cd3 rcu: classic RCU ... |
608 |
if (!rcu_batch_before(completed_snap, rdp->batch - 1) && |
5127bed58 rcu classic: new ... |
609 610 611 612 |
rdp->nxttail[0] != rdp->nxttail[1]) return 1; if (rdp->nxttail[0] != &rdp->nxtlist) return 1; |
01c1c660f Preempt-RCU: reor... |
613 |
|
5127bed58 rcu classic: new ... |
614 615 616 617 618 619 620 |
/* * This cpu has pending rcu entries and the new batch * for then hasn't been started nor scheduled start */ if (rcu_batch_after(rdp->batch, rcp->pending)) return 1; } |
01c1c660f Preempt-RCU: reor... |
621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 |
/* This cpu has finished callbacks to invoke */ if (rdp->donelist) return 1; /* The rcu core waits for a quiescent state from the cpu */ if (rdp->quiescbatch != rcp->cur || rdp->qs_pending) return 1; /* nothing to do */ return 0; } /* * Check to see if there is any immediate RCU-related work to be done * by the current CPU, returning 1 if so. This function is part of the * RCU implementation; it is -not- an exported member of the RCU API. */ int rcu_pending(int cpu) { return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) || __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu)); } /* * Check to see if any future RCU-related work will need to be done * by the current CPU, even if none need be done immediately, returning * 1 if so. This function is part of the RCU implementation; it is -not- * an exported member of the RCU API. */ int rcu_needs_cpu(int cpu) { struct rcu_data *rdp = &per_cpu(rcu_data, cpu); struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); |
5127bed58 rcu classic: new ... |
655 |
return !!rdp->nxtlist || !!rdp_bh->nxtlist || rcu_pending(cpu); |
01c1c660f Preempt-RCU: reor... |
656 |
} |
1f7b94cd3 rcu: classic RCU ... |
657 658 659 660 661 662 |
/* * Top-level function driving RCU grace-period detection, normally * invoked from the scheduler-clock interrupt. This function simply * increments counters that are read only from softirq by this same * CPU, so there are no memory barriers required. */ |
01c1c660f Preempt-RCU: reor... |
663 664 665 |
void rcu_check_callbacks(int cpu, int user) { if (user || |
a68260483 rcu: Teach RCU th... |
666 667 |
(idle_cpu(cpu) && rcu_scheduler_active && !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { |
8db559b83 rcu: add memory b... |
668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 |
/* * Get here if this CPU took its interrupt from user * mode or from the idle loop, and if this is not a * nested interrupt. In this case, the CPU is in * a quiescent state, so count it. * * Also do a memory barrier. This is needed to handle * the case where writes from a preempt-disable section * of code get reordered into schedule() by this CPU's * write buffer. The memory barrier makes sure that * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see * by other CPUs to happen after any such write. */ smp_mb(); /* See above block comment. */ |
01c1c660f Preempt-RCU: reor... |
684 685 |
rcu_qsctr_inc(cpu); rcu_bh_qsctr_inc(cpu); |
8db559b83 rcu: add memory b... |
686 687 688 689 690 691 692 693 694 695 696 697 |
} else if (!in_softirq()) { /* * Get here if this CPU did not take its interrupt from * softirq, in other words, if it is not interrupting * a rcu_bh read-side critical section. This is an _bh * critical section, so count it. The memory barrier * is needed for the same reason as is the above one. */ smp_mb(); /* See above block comment. */ |
01c1c660f Preempt-RCU: reor... |
698 |
rcu_bh_qsctr_inc(cpu); |
8db559b83 rcu: add memory b... |
699 |
} |
01c1c660f Preempt-RCU: reor... |
700 701 |
raise_rcu_softirq(); } |
e4fa4c970 rcu: add __cpuini... |
702 |
static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, |
01c1c660f Preempt-RCU: reor... |
703 704 |
struct rcu_data *rdp) { |
0cd418ddb rcuclassic: fix c... |
705 |
unsigned long flags; |
cd9585178 rcu: fix classic ... |
706 707 |
spin_lock_irqsave(&rcp->lock, flags); |
01c1c660f Preempt-RCU: reor... |
708 |
memset(rdp, 0, sizeof(*rdp)); |
5127bed58 rcu classic: new ... |
709 |
rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2] = &rdp->nxtlist; |
01c1c660f Preempt-RCU: reor... |
710 711 712 713 714 |
rdp->donetail = &rdp->donelist; rdp->quiescbatch = rcp->completed; rdp->qs_pending = 0; rdp->cpu = cpu; rdp->blimit = blimit; |
cd9585178 rcu: fix classic ... |
715 |
spin_unlock_irqrestore(&rcp->lock, flags); |
01c1c660f Preempt-RCU: reor... |
716 717 718 719 720 721 722 723 724 |
} static void __cpuinit rcu_online_cpu(int cpu) { struct rcu_data *rdp = &per_cpu(rcu_data, cpu); struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu); rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp); rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp); |
962cf36c5 Remove argument f... |
725 |
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
01c1c660f Preempt-RCU: reor... |
726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 |
} static int __cpuinit rcu_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { long cpu = (long)hcpu; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: rcu_online_cpu(cpu); break; case CPU_DEAD: case CPU_DEAD_FROZEN: rcu_offline_cpu(cpu); break; default: break; } return NOTIFY_OK; } static struct notifier_block __cpuinitdata rcu_nb = { .notifier_call = rcu_cpu_notify, }; /* * Initializes rcu mechanism. Assumed to be called early. * That is before local timer(SMP) or jiffie timer (uniproc) is setup. * Note that rcu_qsctr and friends are implicitly * initialized due to the choice of ``0'' for RCU_CTR_INVALID. */ void __init __rcu_init(void) { |
2133b5d7f rcu: RCU-based de... |
760 761 762 763 |
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled. "); #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
01c1c660f Preempt-RCU: reor... |
764 765 766 767 768 769 770 771 772 |
rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)smp_processor_id()); /* Register notifier for non-boot CPUs */ register_cpu_notifier(&rcu_nb); } module_param(blimit, int, 0); module_param(qhimark, int, 0); module_param(qlowmark, int, 0); |