Blame view
kernel/rcu/update.c
28.3 KB
1da177e4c Linux-2.6.12-rc2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 |
/* * Read-Copy Update mechanism for mutual exclusion * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License |
87de1cfdc rcu: Stop trackin... |
15 16 |
* along with this program; if not, you can access it online at * http://www.gnu.org/licenses/gpl-2.0.html. |
1da177e4c Linux-2.6.12-rc2 |
17 |
* |
01c1c660f Preempt-RCU: reor... |
18 |
* Copyright IBM Corporation, 2001 |
1da177e4c Linux-2.6.12-rc2 |
19 20 21 |
* * Authors: Dipankar Sarma <dipankar@in.ibm.com> * Manfred Spraul <manfred@colorfullife.com> |
a71fca58b rcu: Fix whitespa... |
22 |
* |
1da177e4c Linux-2.6.12-rc2 |
23 24 25 26 27 28 29 |
* Based on the original work by Paul McKenney <paulmck@us.ibm.com> * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) * * For detailed explanation of Read-Copy Update mechanism see - |
a71fca58b rcu: Fix whitespa... |
30 |
* http://lse.sourceforge.net/locking/rcupdate.html |
1da177e4c Linux-2.6.12-rc2 |
31 32 33 34 35 36 37 38 39 |
* */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/sched.h> |
60063497a atomic: use <linu... |
40 |
#include <linux/atomic.h> |
1da177e4c Linux-2.6.12-rc2 |
41 |
#include <linux/bitops.h> |
1da177e4c Linux-2.6.12-rc2 |
42 43 |
#include <linux/percpu.h> #include <linux/notifier.h> |
1da177e4c Linux-2.6.12-rc2 |
44 |
#include <linux/cpu.h> |
9331b3157 [PATCH] convert k... |
45 |
#include <linux/mutex.h> |
9984de1a5 kernel: Map most ... |
46 |
#include <linux/export.h> |
e3818b8dc rcu: Make rcu_rea... |
47 |
#include <linux/hardirq.h> |
e3ebfb96f rcu: Add PROVE_RC... |
48 |
#include <linux/delay.h> |
e77b70412 rcu: Don't use mo... |
49 |
#include <linux/moduleparam.h> |
8315f4229 rcu: Add call_rcu... |
50 |
#include <linux/kthread.h> |
4ff475ed4 rcu: Additional i... |
51 |
#include <linux/tick.h> |
1da177e4c Linux-2.6.12-rc2 |
52 |
|
29c00b4a1 rcu: Add event-tr... |
53 |
#define CREATE_TRACE_POINTS |
29c00b4a1 rcu: Add event-tr... |
54 55 |
#include "rcu.h" |
4102adab9 rcu: Move RCU-rel... |
56 57 58 59 |
#ifdef MODULE_PARAM_PREFIX #undef MODULE_PARAM_PREFIX #endif #define MODULE_PARAM_PREFIX "rcupdate." |
79cfea027 rcu: Remove TINY_... |
60 |
#ifndef CONFIG_TINY_RCU |
3705b88db rcu: Add a module... |
61 |
module_param(rcu_expedited, int, 0); |
5a9be7c62 rcu: Add rcu_norm... |
62 |
module_param(rcu_normal, int, 0); |
3e42ec1aa rcu: Allow expedi... |
63 64 |
static int rcu_normal_after_boot; module_param(rcu_normal_after_boot, int, 0); |
79cfea027 rcu: Remove TINY_... |
65 |
#endif /* #ifndef CONFIG_TINY_RCU */ |
3e42ec1aa rcu: Allow expedi... |
66 |
|
293e2421f rcu: Remove super... |
67 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
d5671f6bf rcu: Deinline rcu... |
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
/** * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? * * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an * RCU-sched read-side critical section. In absence of * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side * critical section unless it can prove otherwise. Note that disabling * of preemption (including disabling irqs) counts as an RCU-sched * read-side critical section. This is useful for debug checks in functions * that required that they be called within an RCU-sched read-side * critical section. * * Check debug_lockdep_rcu_enabled() to prevent false positives during boot * and while lockdep is disabled. * * Note that if the CPU is in the idle loop from an RCU point of * view (ie: that we are in the section between rcu_idle_enter() and * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs * that are in such a section, considering these as in extended quiescent * state, so such a CPU is effectively never in an RCU read-side critical * section regardless of what RCU primitives it invokes. This state of * affairs is required --- we need to keep an RCU-free window in idle * where the CPU may possibly enter into low power mode. This way we can * notice an extended quiescent state to other CPUs that started a grace * period. Otherwise we would delay any grace period as long as we run in * the idle task. * * Similarly, we avoid claiming an SRCU read lock held if the current * CPU is offline. */ int rcu_read_lock_sched_held(void) { int lockdep_opinion = 0; if (!debug_lockdep_rcu_enabled()) return 1; if (!rcu_is_watching()) return 0; if (!rcu_lockdep_current_cpu_online()) return 0; if (debug_locks) lockdep_opinion = lock_is_held(&rcu_sched_lock_map); |
293e2421f rcu: Remove super... |
111 |
return lockdep_opinion || !preemptible(); |
d5671f6bf rcu: Deinline rcu... |
112 113 114 |
} EXPORT_SYMBOL(rcu_read_lock_sched_held); #endif |
0d39482c3 rcu: Provide rcu_... |
115 |
#ifndef CONFIG_TINY_RCU |
5a9be7c62 rcu: Add rcu_norm... |
116 117 118 119 |
/* * Should expedited grace-period primitives always fall back to their * non-expedited counterparts? Intended for use within RCU. Note * that if the user specifies both rcu_expedited and rcu_normal, then |
90687fc3c rcu: Narrow early... |
120 121 122 |
* rcu_normal wins. (Except during the time period during boot from * when the first task is spawned until the rcu_exp_runtime_mode() * core_initcall() is invoked, at which point everything is expedited.) |
5a9be7c62 rcu: Add rcu_norm... |
123 124 125 |
*/ bool rcu_gp_is_normal(void) { |
90687fc3c rcu: Narrow early... |
126 127 |
return READ_ONCE(rcu_normal) && rcu_scheduler_active != RCU_SCHEDULER_INIT; |
5a9be7c62 rcu: Add rcu_norm... |
128 |
} |
4f2a848c5 rcu: Export rcu_g... |
129 |
EXPORT_SYMBOL_GPL(rcu_gp_is_normal); |
5a9be7c62 rcu: Add rcu_norm... |
130 |
|
ee42571f4 rcu: Add Kconfig ... |
131 132 |
static atomic_t rcu_expedited_nesting = ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0); |
0d39482c3 rcu: Provide rcu_... |
133 134 135 136 |
/* * Should normal grace-period primitives be expedited? Intended for * use within RCU. Note that this function takes the rcu_expedited |
90687fc3c rcu: Narrow early... |
137 138 139 |
* sysfs/boot variable and rcu_scheduler_active into account as well * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp() * until rcu_gp_is_expedited() returns false is a -really- bad idea. |
0d39482c3 rcu: Provide rcu_... |
140 141 142 |
*/ bool rcu_gp_is_expedited(void) { |
90687fc3c rcu: Narrow early... |
143 144 |
return rcu_expedited || atomic_read(&rcu_expedited_nesting) || rcu_scheduler_active == RCU_SCHEDULER_INIT; |
0d39482c3 rcu: Provide rcu_... |
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
} EXPORT_SYMBOL_GPL(rcu_gp_is_expedited); /** * rcu_expedite_gp - Expedite future RCU grace periods * * After a call to this function, future calls to synchronize_rcu() and * friends act as the corresponding synchronize_rcu_expedited() function * had instead been called. */ void rcu_expedite_gp(void) { atomic_inc(&rcu_expedited_nesting); } EXPORT_SYMBOL_GPL(rcu_expedite_gp); /** * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation * * Undo a prior call to rcu_expedite_gp(). If all prior calls to * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(), * and if the rcu_expedited sysfs/boot parameter is not set, then all * subsequent calls to synchronize_rcu() and friends will return to * their normal non-expedited behavior. */ void rcu_unexpedite_gp(void) { atomic_dec(&rcu_expedited_nesting); } EXPORT_SYMBOL_GPL(rcu_unexpedite_gp); |
ee42571f4 rcu: Add Kconfig ... |
175 176 177 178 179 180 181 |
/* * Inform RCU of the end of the in-kernel boot sequence. */ void rcu_end_inkernel_boot(void) { if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT)) rcu_unexpedite_gp(); |
3e42ec1aa rcu: Allow expedi... |
182 183 |
if (rcu_normal_after_boot) WRITE_ONCE(rcu_normal, 1); |
ee42571f4 rcu: Add Kconfig ... |
184 |
} |
0d39482c3 rcu: Provide rcu_... |
185 |
|
79cfea027 rcu: Remove TINY_... |
186 |
#endif /* #ifndef CONFIG_TINY_RCU */ |
9dd8fb16c rcu: Make exit_rc... |
187 188 189 |
#ifdef CONFIG_PREEMPT_RCU /* |
2a3fa843b rcu: Consolidate ... |
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 |
* Preemptible RCU implementation for rcu_read_lock(). * Just increment ->rcu_read_lock_nesting, shared state will be updated * if we block. */ void __rcu_read_lock(void) { current->rcu_read_lock_nesting++; barrier(); /* critical section after entry code. */ } EXPORT_SYMBOL_GPL(__rcu_read_lock); /* * Preemptible RCU implementation for rcu_read_unlock(). * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then * invoke rcu_read_unlock_special() to clean up after a context switch * in an RCU read-side critical section and other special cases. */ void __rcu_read_unlock(void) { struct task_struct *t = current; if (t->rcu_read_lock_nesting != 1) { --t->rcu_read_lock_nesting; } else { barrier(); /* critical section before exit code. */ t->rcu_read_lock_nesting = INT_MIN; barrier(); /* assign before ->rcu_read_unlock_special load */ |
7d0ae8086 rcu: Convert ACCE... |
218 |
if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) |
2a3fa843b rcu: Consolidate ... |
219 220 221 222 223 224 |
rcu_read_unlock_special(t); barrier(); /* ->rcu_read_unlock_special load before assign */ t->rcu_read_lock_nesting = 0; } #ifdef CONFIG_PROVE_LOCKING { |
7d0ae8086 rcu: Convert ACCE... |
225 |
int rrln = READ_ONCE(t->rcu_read_lock_nesting); |
2a3fa843b rcu: Consolidate ... |
226 227 228 229 230 231 |
WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); } #endif /* #ifdef CONFIG_PROVE_LOCKING */ } EXPORT_SYMBOL_GPL(__rcu_read_unlock); |
2439b696c rcu: Shrink TINY_... |
232 |
#endif /* #ifdef CONFIG_PREEMPT_RCU */ |
9dd8fb16c rcu: Make exit_rc... |
233 |
|
162cc2794 rcu: Fix rcu_lock... |
234 235 236 237 238 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC static struct lock_class_key rcu_lock_key; struct lockdep_map rcu_lock_map = STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); EXPORT_SYMBOL_GPL(rcu_lock_map); |
632ee2001 rcu: Introduce lo... |
239 240 241 242 243 244 245 246 247 248 |
static struct lock_class_key rcu_bh_lock_key; struct lockdep_map rcu_bh_lock_map = STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key); EXPORT_SYMBOL_GPL(rcu_bh_lock_map); static struct lock_class_key rcu_sched_lock_key; struct lockdep_map rcu_sched_lock_map = STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); EXPORT_SYMBOL_GPL(rcu_sched_lock_map); |
e3818b8dc rcu: Make rcu_rea... |
249 |
|
24ef659a8 rcu: Provide bett... |
250 251 252 253 |
static struct lock_class_key rcu_callback_key; struct lockdep_map rcu_callback_map = STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key); EXPORT_SYMBOL_GPL(rcu_callback_map); |
a0a5a0561 ftrace/rcu: Do no... |
254 |
int notrace debug_lockdep_rcu_enabled(void) |
bc293d62b rcu: Make RCU loc... |
255 |
{ |
90687fc3c rcu: Narrow early... |
256 |
return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks && |
bc293d62b rcu: Make RCU loc... |
257 258 259 |
current->lockdep_recursion == 0; } EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); |
e3818b8dc rcu: Make rcu_rea... |
260 |
/** |
85b39d305 rcu: Uninline rcu... |
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 |
* rcu_read_lock_held() - might we be in RCU read-side critical section? * * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, * this assumes we are in an RCU read-side critical section unless it can * prove otherwise. This is useful for debug checks in functions that * require that they be called within an RCU read-side critical section. * * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot * and while lockdep is disabled. * * Note that rcu_read_lock() and the matching rcu_read_unlock() must * occur in the same context, for example, it is illegal to invoke * rcu_read_unlock() in process context if the matching rcu_read_lock() * was invoked from within an irq handler. * * Note that rcu_read_lock() is disallowed if the CPU is either idle or * offline from an RCU perspective, so check for those as well. */ int rcu_read_lock_held(void) { if (!debug_lockdep_rcu_enabled()) return 1; if (!rcu_is_watching()) return 0; if (!rcu_lockdep_current_cpu_online()) return 0; return lock_is_held(&rcu_lock_map); } EXPORT_SYMBOL_GPL(rcu_read_lock_held); /** |
ca5ecddfa rcu: define __rcu... |
293 |
* rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? |
e3818b8dc rcu: Make rcu_rea... |
294 295 296 297 |
* * Check for bottom half being disabled, which covers both the * CONFIG_PROVE_RCU and not cases. Note that if someone uses * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) |
ca5ecddfa rcu: define __rcu... |
298 299 300 |
* will show the situation. This is useful for debug checks in functions * that require that they be called within an RCU read-side critical * section. |
e3818b8dc rcu: Make rcu_rea... |
301 302 |
* * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. |
c0d6d01bf rcu: Check for il... |
303 304 305 |
* * Note that rcu_read_lock() is disallowed if the CPU is either idle or * offline from an RCU perspective, so check for those as well. |
e3818b8dc rcu: Make rcu_rea... |
306 307 308 309 310 |
*/ int rcu_read_lock_bh_held(void) { if (!debug_lockdep_rcu_enabled()) return 1; |
5c173eb8b rcu: Consistent r... |
311 |
if (!rcu_is_watching()) |
e6b80a3b0 rcu: Detect illeg... |
312 |
return 0; |
c0d6d01bf rcu: Check for il... |
313 314 |
if (!rcu_lockdep_current_cpu_online()) return 0; |
773e3f935 rcu: move check f... |
315 |
return in_softirq() || irqs_disabled(); |
e3818b8dc rcu: Make rcu_rea... |
316 317 318 319 |
} EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
ee376dbdf rcu: Consolidate ... |
320 321 322 323 324 |
/** * wakeme_after_rcu() - Callback function to awaken a task after grace period * @head: Pointer to rcu_head member within rcu_synchronize structure * * Awaken the corresponding task now that a grace period has elapsed. |
fbf6bfca7 rcupdate: fix com... |
325 |
*/ |
ee376dbdf rcu: Consolidate ... |
326 |
void wakeme_after_rcu(struct rcu_head *head) |
21a1ea9eb [PATCH] rcu batch... |
327 |
{ |
01c1c660f Preempt-RCU: reor... |
328 329 330 331 |
struct rcu_synchronize *rcu; rcu = container_of(head, struct rcu_synchronize, head); complete(&rcu->completion); |
21a1ea9eb [PATCH] rcu batch... |
332 |
} |
ec90a194a rcu: Create a syn... |
333 |
EXPORT_SYMBOL_GPL(wakeme_after_rcu); |
ee84b8243 rcu: create rcu_m... |
334 |
|
ec90a194a rcu: Create a syn... |
335 336 |
void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, struct rcu_synchronize *rs_array) |
2c42818e9 rcu: Abstract com... |
337 |
{ |
ec90a194a rcu: Create a syn... |
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 |
int i; /* Initialize and register callbacks for each flavor specified. */ for (i = 0; i < n; i++) { if (checktiny && (crcu_array[i] == call_rcu || crcu_array[i] == call_rcu_bh)) { might_sleep(); continue; } init_rcu_head_on_stack(&rs_array[i].head); init_completion(&rs_array[i].completion); (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu); } /* Wait for all callbacks to be invoked. */ for (i = 0; i < n; i++) { if (checktiny && (crcu_array[i] == call_rcu || crcu_array[i] == call_rcu_bh)) continue; wait_for_completion(&rs_array[i].completion); destroy_rcu_head_on_stack(&rs_array[i].head); } |
2c42818e9 rcu: Abstract com... |
362 |
} |
ec90a194a rcu: Create a syn... |
363 |
EXPORT_SYMBOL_GPL(__wait_rcu_gp); |
2c42818e9 rcu: Abstract com... |
364 |
|
551d55a94 tree/tiny rcu: Ad... |
365 |
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
546a9d851 rcu: Export debug... |
366 |
void init_rcu_head(struct rcu_head *head) |
551d55a94 tree/tiny rcu: Ad... |
367 368 369 |
{ debug_object_init(head, &rcuhead_debug_descr); } |
546a9d851 rcu: Export debug... |
370 |
void destroy_rcu_head(struct rcu_head *head) |
551d55a94 tree/tiny rcu: Ad... |
371 372 373 |
{ debug_object_free(head, &rcuhead_debug_descr); } |
b9fdac7f6 debugobjects: ins... |
374 |
static bool rcuhead_is_static_object(void *addr) |
551d55a94 tree/tiny rcu: Ad... |
375 |
{ |
b9fdac7f6 debugobjects: ins... |
376 |
return true; |
551d55a94 tree/tiny rcu: Ad... |
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 |
} /** * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects * @head: pointer to rcu_head structure to be initialized * * This function informs debugobjects of a new rcu_head structure that * has been allocated as an auto variable on the stack. This function * is not required for rcu_head structures that are statically defined or * that are dynamically allocated on the heap. This function has no * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. */ void init_rcu_head_on_stack(struct rcu_head *head) { debug_object_init_on_stack(head, &rcuhead_debug_descr); } EXPORT_SYMBOL_GPL(init_rcu_head_on_stack); /** * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects * @head: pointer to rcu_head structure to be initialized * * This function informs debugobjects that an on-stack rcu_head structure * is about to go out of scope. As with init_rcu_head_on_stack(), this * function is not required for rcu_head structures that are statically * defined or that are dynamically allocated on the heap. Also as with * init_rcu_head_on_stack(), this function has no effect for * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. */ void destroy_rcu_head_on_stack(struct rcu_head *head) { debug_object_free(head, &rcuhead_debug_descr); } EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack); struct debug_obj_descr rcuhead_debug_descr = { .name = "rcu_head", |
b9fdac7f6 debugobjects: ins... |
414 |
.is_static_object = rcuhead_is_static_object, |
551d55a94 tree/tiny rcu: Ad... |
415 416 417 |
}; EXPORT_SYMBOL_GPL(rcuhead_debug_descr); #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
91afaf300 rcu: Add failure ... |
418 |
|
28f6569ab rcu: Remove redun... |
419 |
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) |
e66c33d57 rcu: Add const an... |
420 |
void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, |
524945351 rcu: Reduce rcuto... |
421 422 |
unsigned long secs, unsigned long c_old, unsigned long c) |
91afaf300 rcu: Add failure ... |
423 |
{ |
524945351 rcu: Reduce rcuto... |
424 |
trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c); |
91afaf300 rcu: Add failure ... |
425 426 427 |
} EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read); #else |
524945351 rcu: Reduce rcuto... |
428 429 |
#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ do { } while (0) |
91afaf300 rcu: Add failure ... |
430 |
#endif |
6bfc09e23 rcu: Provide RCU ... |
431 432 433 434 435 436 437 438 439 440 |
#ifdef CONFIG_RCU_STALL_COMMON #ifdef CONFIG_PROVE_RCU #define RCU_STALL_DELAY_DELTA (5 * HZ) #else #define RCU_STALL_DELAY_DELTA 0 #endif int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ |
01896f7e0 rcu: Convert loca... |
441 |
static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; |
6bfc09e23 rcu: Provide RCU ... |
442 443 444 445 446 447 |
module_param(rcu_cpu_stall_suppress, int, 0644); module_param(rcu_cpu_stall_timeout, int, 0644); int rcu_jiffies_till_stall_check(void) { |
7d0ae8086 rcu: Convert ACCE... |
448 |
int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout); |
6bfc09e23 rcu: Provide RCU ... |
449 450 451 452 453 454 |
/* * Limit check must be consistent with the Kconfig limits * for CONFIG_RCU_CPU_STALL_TIMEOUT. */ if (till_stall_check < 3) { |
7d0ae8086 rcu: Convert ACCE... |
455 |
WRITE_ONCE(rcu_cpu_stall_timeout, 3); |
6bfc09e23 rcu: Provide RCU ... |
456 457 |
till_stall_check = 3; } else if (till_stall_check > 300) { |
7d0ae8086 rcu: Convert ACCE... |
458 |
WRITE_ONCE(rcu_cpu_stall_timeout, 300); |
6bfc09e23 rcu: Provide RCU ... |
459 460 461 462 |
till_stall_check = 300; } return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; } |
61f38db3e rcu: Provide API ... |
463 464 465 466 467 468 469 470 471 472 473 |
void rcu_sysrq_start(void) { if (!rcu_cpu_stall_suppress) rcu_cpu_stall_suppress = 2; } void rcu_sysrq_end(void) { if (rcu_cpu_stall_suppress == 2) rcu_cpu_stall_suppress = 0; } |
6bfc09e23 rcu: Provide RCU ... |
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 |
static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) { rcu_cpu_stall_suppress = 1; return NOTIFY_DONE; } static struct notifier_block rcu_panic_block = { .notifier_call = rcu_panic, }; static int __init check_cpu_stall_init(void) { atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); return 0; } early_initcall(check_cpu_stall_init); #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ |
8315f4229 rcu: Add call_rcu... |
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 |
#ifdef CONFIG_TASKS_RCU /* * Simple variant of RCU whose quiescent states are voluntary context switch, * user-space execution, and idle. As such, grace periods can take one good * long time. There are no read-side primitives similar to rcu_read_lock() * and rcu_read_unlock() because this implementation is intended to get * the system into a safe state for some of the manipulations involved in * tracing and the like. Finally, this implementation does not support * high call_rcu_tasks() rates from multiple CPUs. If this is required, * per-CPU callback lists will be needed. */ /* Global list of callbacks and associated lock. */ static struct rcu_head *rcu_tasks_cbs_head; static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; |
c7b24d2b9 rcu: Improve RCU-... |
509 |
static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq); |
8315f4229 rcu: Add call_rcu... |
510 |
static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock); |
3f95aa81d rcu: Make TASKS_R... |
511 512 513 514 |
/* Track exiting tasks in order to allow them to be waited for. */ DEFINE_SRCU(tasks_rcu_exit_srcu); /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ |
52db30ab2 rcu: Add stall-wa... |
515 |
static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 10; |
3f95aa81d rcu: Make TASKS_R... |
516 |
module_param(rcu_task_stall_timeout, int, 0644); |
84a8f446f rcu: Defer rcu_ta... |
517 |
static void rcu_spawn_tasks_kthread(void); |
4929c913b rcu: Make call_rc... |
518 |
static struct task_struct *rcu_tasks_kthread_ptr; |
84a8f446f rcu: Defer rcu_ta... |
519 520 521 522 523 |
/* * Post an RCU-tasks callback. First call must be from process context * after the scheduler if fully operational. */ |
b6a4ae766 rcu: Use rcu_call... |
524 |
void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) |
8315f4229 rcu: Add call_rcu... |
525 526 |
{ unsigned long flags; |
c7b24d2b9 rcu: Improve RCU-... |
527 |
bool needwake; |
4929c913b rcu: Make call_rc... |
528 |
bool havetask = READ_ONCE(rcu_tasks_kthread_ptr); |
8315f4229 rcu: Add call_rcu... |
529 530 531 532 |
rhp->next = NULL; rhp->func = func; raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); |
c7b24d2b9 rcu: Improve RCU-... |
533 |
needwake = !rcu_tasks_cbs_head; |
8315f4229 rcu: Add call_rcu... |
534 535 536 |
*rcu_tasks_cbs_tail = rhp; rcu_tasks_cbs_tail = &rhp->next; raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); |
4929c913b rcu: Make call_rc... |
537 538 539 |
/* We can't create the thread unless interrupts are enabled. */ if ((needwake && havetask) || (!havetask && !irqs_disabled_flags(flags))) { |
84a8f446f rcu: Defer rcu_ta... |
540 |
rcu_spawn_tasks_kthread(); |
c7b24d2b9 rcu: Improve RCU-... |
541 |
wake_up(&rcu_tasks_cbs_wq); |
84a8f446f rcu: Defer rcu_ta... |
542 |
} |
8315f4229 rcu: Add call_rcu... |
543 544 |
} EXPORT_SYMBOL_GPL(call_rcu_tasks); |
53c6d4edf rcu: Add synchron... |
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 |
/** * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed. * * Control will return to the caller some time after a full rcu-tasks * grace period has elapsed, in other words after all currently * executing rcu-tasks read-side critical sections have elapsed. These * read-side critical sections are delimited by calls to schedule(), * cond_resched_rcu_qs(), idle execution, userspace execution, calls * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). * * This is a very specialized primitive, intended only for a few uses in * tracing and other situations requiring manipulation of function * preambles and profiling hooks. The synchronize_rcu_tasks() function * is not (yet) intended for heavy use from multiple CPUs. * * Note that this guarantee implies further memory-ordering guarantees. * On systems with more than one CPU, when synchronize_rcu_tasks() returns, * each CPU is guaranteed to have executed a full memory barrier since the * end of its last RCU-tasks read-side critical section whose beginning * preceded the call to synchronize_rcu_tasks(). In addition, each CPU * having an RCU-tasks read-side critical section that extends beyond * the return from synchronize_rcu_tasks() is guaranteed to have executed * a full memory barrier after the beginning of synchronize_rcu_tasks() * and before the beginning of that RCU-tasks read-side critical section. * Note that these guarantees include CPUs that are offline, idle, or * executing in user mode, as well as CPUs that are executing in the kernel. * * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned * to its caller on CPU B, then both CPU A and CPU B are guaranteed * to have executed a full memory barrier during the execution of * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU * (but again only if the system has more than one CPU). */ void synchronize_rcu_tasks(void) { /* Complain if the scheduler has not started. */ |
90687fc3c rcu: Narrow early... |
581 |
RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, |
f78f5b90c rcu: Rename rcu_l... |
582 |
"synchronize_rcu_tasks called too soon"); |
53c6d4edf rcu: Add synchron... |
583 584 585 586 |
/* Wait for the grace period. */ wait_rcu_gp(call_rcu_tasks); } |
06c2a9238 rcu: Export RCU-t... |
587 |
EXPORT_SYMBOL_GPL(synchronize_rcu_tasks); |
53c6d4edf rcu: Add synchron... |
588 589 590 591 592 593 594 595 596 597 598 599 |
/** * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks. * * Although the current implementation is guaranteed to wait, it is not * obligated to, for example, if there are no pending callbacks. */ void rcu_barrier_tasks(void) { /* There is only one callback queue, so this is easy. ;-) */ synchronize_rcu_tasks(); } |
06c2a9238 rcu: Export RCU-t... |
600 |
EXPORT_SYMBOL_GPL(rcu_barrier_tasks); |
53c6d4edf rcu: Add synchron... |
601 |
|
52db30ab2 rcu: Add stall-wa... |
602 603 604 |
/* See if tasks are still holding out, complain if so. */ static void check_holdout_task(struct task_struct *t, bool needreport, bool *firstreport) |
8315f4229 rcu: Add call_rcu... |
605 |
{ |
4ff475ed4 rcu: Additional i... |
606 |
int cpu; |
7d0ae8086 rcu: Convert ACCE... |
607 608 609 |
if (!READ_ONCE(t->rcu_tasks_holdout) || t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || !READ_ONCE(t->on_rq) || |
176f8f7a5 rcu: Make TASKS_R... |
610 611 |
(IS_ENABLED(CONFIG_NO_HZ_FULL) && !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { |
7d0ae8086 rcu: Convert ACCE... |
612 |
WRITE_ONCE(t->rcu_tasks_holdout, false); |
8f20a5e83 rcu: Make rcu_tas... |
613 |
list_del_init(&t->rcu_tasks_holdout_list); |
8315f4229 rcu: Add call_rcu... |
614 |
put_task_struct(t); |
52db30ab2 rcu: Add stall-wa... |
615 |
return; |
8315f4229 rcu: Add call_rcu... |
616 |
} |
52db30ab2 rcu: Add stall-wa... |
617 618 619 620 621 622 623 |
if (!needreport) return; if (*firstreport) { pr_err("INFO: rcu_tasks detected stalls on tasks: "); *firstreport = false; } |
4ff475ed4 rcu: Additional i... |
624 625 626 627 628 629 630 |
cpu = task_cpu(t); pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d ", t, ".I"[is_idle_task(t)], "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)], t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, t->rcu_tasks_idle_cpu, cpu); |
52db30ab2 rcu: Add stall-wa... |
631 |
sched_show_task(t); |
8315f4229 rcu: Add call_rcu... |
632 633 634 635 636 637 638 |
} /* RCU-tasks kthread that detects grace periods and invokes callbacks. */ static int __noreturn rcu_tasks_kthread(void *arg) { unsigned long flags; struct task_struct *g, *t; |
52db30ab2 rcu: Add stall-wa... |
639 |
unsigned long lastreport; |
8315f4229 rcu: Add call_rcu... |
640 641 642 |
struct rcu_head *list; struct rcu_head *next; LIST_HEAD(rcu_tasks_holdouts); |
60ced4950 rcu: Fix FIXME in... |
643 644 |
/* Run on housekeeping CPUs by default. Sysadm can move if desired. */ housekeeping_affine(current); |
8315f4229 rcu: Add call_rcu... |
645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 |
/* * Each pass through the following loop makes one check for * newly arrived callbacks, and, if there are some, waits for * one RCU-tasks grace period and then invokes the callbacks. * This loop is terminated by the system going down. ;-) */ for (;;) { /* Pick up any new callbacks. */ raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); list = rcu_tasks_cbs_head; rcu_tasks_cbs_head = NULL; rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); /* If there were none, wait a bit and start over. */ if (!list) { |
c7b24d2b9 rcu: Improve RCU-... |
663 664 665 666 667 668 |
wait_event_interruptible(rcu_tasks_cbs_wq, rcu_tasks_cbs_head); if (!rcu_tasks_cbs_head) { WARN_ON(signal_pending(current)); schedule_timeout_interruptible(HZ/10); } |
8315f4229 rcu: Add call_rcu... |
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 |
continue; } /* * Wait for all pre-existing t->on_rq and t->nvcsw * transitions to complete. Invoking synchronize_sched() * suffices because all these transitions occur with * interrupts disabled. Without this synchronize_sched(), * a read-side critical section that started before the * grace period might be incorrectly seen as having started * after the grace period. * * This synchronize_sched() also dispenses with the * need for a memory barrier on the first store to * ->rcu_tasks_holdout, as it forces the store to happen * after the beginning of the grace period. */ synchronize_sched(); /* * There were callbacks, so we need to wait for an * RCU-tasks grace period. Start off by scanning * the task list for tasks that are not already * voluntarily blocked. Mark these tasks and make * a list of them in rcu_tasks_holdouts. */ rcu_read_lock(); for_each_process_thread(g, t) { |
7d0ae8086 rcu: Convert ACCE... |
697 |
if (t != current && READ_ONCE(t->on_rq) && |
8315f4229 rcu: Add call_rcu... |
698 699 |
!is_idle_task(t)) { get_task_struct(t); |
7d0ae8086 rcu: Convert ACCE... |
700 701 |
t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); WRITE_ONCE(t->rcu_tasks_holdout, true); |
8315f4229 rcu: Add call_rcu... |
702 703 704 705 706 707 708 |
list_add(&t->rcu_tasks_holdout_list, &rcu_tasks_holdouts); } } rcu_read_unlock(); /* |
3f95aa81d rcu: Make TASKS_R... |
709 710 711 712 713 714 715 716 717 |
* Wait for tasks that are in the process of exiting. * This does only part of the job, ensuring that all * tasks that were previously exiting reach the point * where they have disabled preemption, allowing the * later synchronize_sched() to finish the job. */ synchronize_srcu(&tasks_rcu_exit_srcu); /* |
8315f4229 rcu: Add call_rcu... |
718 719 720 721 |
* Each pass through the following loop scans the list * of holdout tasks, removing any that are no longer * holdouts. When the list is empty, we are done. */ |
52db30ab2 rcu: Add stall-wa... |
722 |
lastreport = jiffies; |
8315f4229 rcu: Add call_rcu... |
723 |
while (!list_empty(&rcu_tasks_holdouts)) { |
52db30ab2 rcu: Add stall-wa... |
724 725 726 |
bool firstreport; bool needreport; int rtst; |
8f20a5e83 rcu: Make rcu_tas... |
727 |
struct task_struct *t1; |
52db30ab2 rcu: Add stall-wa... |
728 |
|
8315f4229 rcu: Add call_rcu... |
729 |
schedule_timeout_interruptible(HZ); |
7d0ae8086 rcu: Convert ACCE... |
730 |
rtst = READ_ONCE(rcu_task_stall_timeout); |
52db30ab2 rcu: Add stall-wa... |
731 732 733 734 735 |
needreport = rtst > 0 && time_after(jiffies, lastreport + rtst); if (needreport) lastreport = jiffies; firstreport = true; |
8315f4229 rcu: Add call_rcu... |
736 |
WARN_ON(signal_pending(current)); |
8f20a5e83 rcu: Make rcu_tas... |
737 738 |
list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts, rcu_tasks_holdout_list) { |
52db30ab2 rcu: Add stall-wa... |
739 |
check_holdout_task(t, needreport, &firstreport); |
8f20a5e83 rcu: Make rcu_tas... |
740 741 |
cond_resched(); } |
8315f4229 rcu: Add call_rcu... |
742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 |
} /* * Because ->on_rq and ->nvcsw are not guaranteed * to have a full memory barriers prior to them in the * schedule() path, memory reordering on other CPUs could * cause their RCU-tasks read-side critical sections to * extend past the end of the grace period. However, * because these ->nvcsw updates are carried out with * interrupts disabled, we can use synchronize_sched() * to force the needed ordering on all such CPUs. * * This synchronize_sched() also confines all * ->rcu_tasks_holdout accesses to be within the grace * period, avoiding the need for memory barriers for * ->rcu_tasks_holdout accesses. |
3f95aa81d rcu: Make TASKS_R... |
758 759 760 761 762 |
* * In addition, this synchronize_sched() waits for exiting * tasks to complete their final preempt_disable() region * of execution, cleaning up after the synchronize_srcu() * above. |
8315f4229 rcu: Add call_rcu... |
763 764 765 766 767 768 769 770 771 772 773 774 |
*/ synchronize_sched(); /* Invoke the callbacks. */ while (list) { next = list->next; local_bh_disable(); list->func(list); local_bh_enable(); list = next; cond_resched(); } |
c7b24d2b9 rcu: Improve RCU-... |
775 |
schedule_timeout_uninterruptible(HZ/10); |
8315f4229 rcu: Add call_rcu... |
776 777 |
} } |
84a8f446f rcu: Defer rcu_ta... |
778 779 |
/* Spawn rcu_tasks_kthread() at first call to call_rcu_tasks(). */ static void rcu_spawn_tasks_kthread(void) |
8315f4229 rcu: Add call_rcu... |
780 |
{ |
84a8f446f rcu: Defer rcu_ta... |
781 |
static DEFINE_MUTEX(rcu_tasks_kthread_mutex); |
84a8f446f rcu: Defer rcu_ta... |
782 |
struct task_struct *t; |
8315f4229 rcu: Add call_rcu... |
783 |
|
7d0ae8086 rcu: Convert ACCE... |
784 |
if (READ_ONCE(rcu_tasks_kthread_ptr)) { |
84a8f446f rcu: Defer rcu_ta... |
785 786 787 788 789 790 791 792 |
smp_mb(); /* Ensure caller sees full kthread. */ return; } mutex_lock(&rcu_tasks_kthread_mutex); if (rcu_tasks_kthread_ptr) { mutex_unlock(&rcu_tasks_kthread_mutex); return; } |
8315f4229 rcu: Add call_rcu... |
793 794 |
t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread"); BUG_ON(IS_ERR(t)); |
84a8f446f rcu: Defer rcu_ta... |
795 |
smp_mb(); /* Ensure others see full kthread. */ |
7d0ae8086 rcu: Convert ACCE... |
796 |
WRITE_ONCE(rcu_tasks_kthread_ptr, t); |
84a8f446f rcu: Defer rcu_ta... |
797 |
mutex_unlock(&rcu_tasks_kthread_mutex); |
8315f4229 rcu: Add call_rcu... |
798 |
} |
8315f4229 rcu: Add call_rcu... |
799 800 |
#endif /* #ifdef CONFIG_TASKS_RCU */ |
aa23c6fbc rcutorture: Add e... |
801 |
|
90687fc3c rcu: Narrow early... |
802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 |
/* * Test each non-SRCU synchronous grace-period wait API. This is * useful just after a change in mode for these primitives, and * during early boot. */ void rcu_test_sync_prims(void) { if (!IS_ENABLED(CONFIG_PROVE_RCU)) return; synchronize_rcu(); synchronize_rcu_bh(); synchronize_sched(); synchronize_rcu_expedited(); synchronize_rcu_bh_expedited(); synchronize_sched_expedited(); } |
aa23c6fbc rcutorture: Add e... |
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 |
#ifdef CONFIG_PROVE_RCU /* * Early boot self test parameters, one for each flavor */ static bool rcu_self_test; static bool rcu_self_test_bh; static bool rcu_self_test_sched; module_param(rcu_self_test, bool, 0444); module_param(rcu_self_test_bh, bool, 0444); module_param(rcu_self_test_sched, bool, 0444); static int rcu_self_test_counter; static void test_callback(struct rcu_head *r) { rcu_self_test_counter++; pr_info("RCU test callback executed %d ", rcu_self_test_counter); } static void early_boot_test_call_rcu(void) { static struct rcu_head head; call_rcu(&head, test_callback); } static void early_boot_test_call_rcu_bh(void) { static struct rcu_head head; call_rcu_bh(&head, test_callback); } static void early_boot_test_call_rcu_sched(void) { static struct rcu_head head; call_rcu_sched(&head, test_callback); } void rcu_early_boot_tests(void) { pr_info("Running RCU self tests "); if (rcu_self_test) early_boot_test_call_rcu(); if (rcu_self_test_bh) early_boot_test_call_rcu_bh(); if (rcu_self_test_sched) early_boot_test_call_rcu_sched(); |
90687fc3c rcu: Narrow early... |
872 |
rcu_test_sync_prims(); |
aa23c6fbc rcutorture: Add e... |
873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 |
} static int rcu_verify_early_boot_tests(void) { int ret = 0; int early_boot_test_counter = 0; if (rcu_self_test) { early_boot_test_counter++; rcu_barrier(); } if (rcu_self_test_bh) { early_boot_test_counter++; rcu_barrier_bh(); } if (rcu_self_test_sched) { early_boot_test_counter++; rcu_barrier_sched(); } if (rcu_self_test_counter != early_boot_test_counter) { WARN_ON(1); ret = -1; } return ret; } late_initcall(rcu_verify_early_boot_tests); #else void rcu_early_boot_tests(void) {} #endif /* CONFIG_PROVE_RCU */ |