Blame view
kernel/rcupdate.c
11.7 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 |
/* * Read-Copy Update mechanism for mutual exclusion * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * |
01c1c660f
|
18 |
* Copyright IBM Corporation, 2001 |
1da177e4c
|
19 20 21 |
* * Authors: Dipankar Sarma <dipankar@in.ibm.com> * Manfred Spraul <manfred@colorfullife.com> |
a71fca58b
|
22 |
* |
1da177e4c
|
23 24 25 26 27 28 29 |
* Based on the original work by Paul McKenney <paulmck@us.ibm.com> * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) * * For detailed explanation of Read-Copy Update mechanism see - |
a71fca58b
|
30 |
* http://lse.sourceforge.net/locking/rcupdate.html |
1da177e4c
|
31 32 33 34 35 36 37 38 39 |
* */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/sched.h> |
60063497a
|
40 |
#include <linux/atomic.h> |
1da177e4c
|
41 |
#include <linux/bitops.h> |
1da177e4c
|
42 43 |
#include <linux/percpu.h> #include <linux/notifier.h> |
1da177e4c
|
44 |
#include <linux/cpu.h> |
9331b3157
|
45 |
#include <linux/mutex.h> |
9984de1a5
|
46 |
#include <linux/export.h> |
e3818b8dc
|
47 |
#include <linux/hardirq.h> |
e3ebfb96f
|
48 |
#include <linux/delay.h> |
1da177e4c
|
49 |
|
29c00b4a1
|
50 51 52 53 |
#define CREATE_TRACE_POINTS #include <trace/events/rcu.h> #include "rcu.h" |
9dd8fb16c
|
54 55 56 |
#ifdef CONFIG_PREEMPT_RCU /* |
2a3fa843b
|
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
* Preemptible RCU implementation for rcu_read_lock(). * Just increment ->rcu_read_lock_nesting, shared state will be updated * if we block. */ void __rcu_read_lock(void) { current->rcu_read_lock_nesting++; barrier(); /* critical section after entry code. */ } EXPORT_SYMBOL_GPL(__rcu_read_lock); /* * Preemptible RCU implementation for rcu_read_unlock(). * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then * invoke rcu_read_unlock_special() to clean up after a context switch * in an RCU read-side critical section and other special cases. */ void __rcu_read_unlock(void) { struct task_struct *t = current; if (t->rcu_read_lock_nesting != 1) { --t->rcu_read_lock_nesting; } else { barrier(); /* critical section before exit code. */ t->rcu_read_lock_nesting = INT_MIN; |
e3ebfb96f
|
84 85 86 |
#ifdef CONFIG_PROVE_RCU_DELAY udelay(10); /* Make preemption more probable. */ #endif /* #ifdef CONFIG_PROVE_RCU_DELAY */ |
2a3fa843b
|
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
barrier(); /* assign before ->rcu_read_unlock_special load */ if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) rcu_read_unlock_special(t); barrier(); /* ->rcu_read_unlock_special load before assign */ t->rcu_read_lock_nesting = 0; } #ifdef CONFIG_PROVE_LOCKING { int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); } #endif /* #ifdef CONFIG_PROVE_LOCKING */ } EXPORT_SYMBOL_GPL(__rcu_read_unlock); /* |
9dd8fb16c
|
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
* Check for a task exiting while in a preemptible-RCU read-side * critical section, clean up if so. No need to issue warnings, * as debug_check_no_locks_held() already does this if lockdep * is enabled. */ void exit_rcu(void) { struct task_struct *t = current; if (likely(list_empty(¤t->rcu_node_entry))) return; t->rcu_read_lock_nesting = 1; barrier(); t->rcu_read_unlock_special = RCU_READ_UNLOCK_BLOCKED; __rcu_read_unlock(); } #else /* #ifdef CONFIG_PREEMPT_RCU */ void exit_rcu(void) { } #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
162cc2794
|
128 129 130 131 132 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC static struct lock_class_key rcu_lock_key; struct lockdep_map rcu_lock_map = STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); EXPORT_SYMBOL_GPL(rcu_lock_map); |
632ee2001
|
133 134 135 136 137 138 139 140 141 142 |
static struct lock_class_key rcu_bh_lock_key; struct lockdep_map rcu_bh_lock_map = STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key); EXPORT_SYMBOL_GPL(rcu_bh_lock_map); static struct lock_class_key rcu_sched_lock_key; struct lockdep_map rcu_sched_lock_map = STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); EXPORT_SYMBOL_GPL(rcu_sched_lock_map); |
162cc2794
|
143 |
#endif |
e3818b8dc
|
144 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
bc293d62b
|
145 146 147 148 149 150 |
int debug_lockdep_rcu_enabled(void) { return rcu_scheduler_active && debug_locks && current->lockdep_recursion == 0; } EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); |
e3818b8dc
|
151 |
/** |
ca5ecddfa
|
152 |
* rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? |
e3818b8dc
|
153 154 155 156 |
* * Check for bottom half being disabled, which covers both the * CONFIG_PROVE_RCU and not cases. Note that if someone uses * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) |
ca5ecddfa
|
157 158 159 |
* will show the situation. This is useful for debug checks in functions * that require that they be called within an RCU read-side critical * section. |
e3818b8dc
|
160 161 |
* * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. |
c0d6d01bf
|
162 163 164 |
* * Note that rcu_read_lock() is disallowed if the CPU is either idle or * offline from an RCU perspective, so check for those as well. |
e3818b8dc
|
165 166 167 168 169 |
*/ int rcu_read_lock_bh_held(void) { if (!debug_lockdep_rcu_enabled()) return 1; |
e6b80a3b0
|
170 171 |
if (rcu_is_cpu_idle()) return 0; |
c0d6d01bf
|
172 173 |
if (!rcu_lockdep_current_cpu_online()) return 0; |
773e3f935
|
174 |
return in_softirq() || irqs_disabled(); |
e3818b8dc
|
175 176 177 178 |
} EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
2c42818e9
|
179 180 181 182 |
struct rcu_synchronize { struct rcu_head head; struct completion completion; }; |
d9f1bb6ad
|
183 |
/* |
fbf6bfca7
|
184 185 186 |
* Awaken the corresponding synchronize_rcu() instance now that a * grace period has elapsed. */ |
2c42818e9
|
187 |
static void wakeme_after_rcu(struct rcu_head *head) |
21a1ea9eb
|
188 |
{ |
01c1c660f
|
189 190 191 192 |
struct rcu_synchronize *rcu; rcu = container_of(head, struct rcu_synchronize, head); complete(&rcu->completion); |
21a1ea9eb
|
193 |
} |
ee84b8243
|
194 |
|
2c42818e9
|
195 196 197 198 199 200 201 202 203 204 205 206 207 |
void wait_rcu_gp(call_rcu_func_t crf) { struct rcu_synchronize rcu; init_rcu_head_on_stack(&rcu.head); init_completion(&rcu.completion); /* Will wake me after RCU finished. */ crf(&rcu.head, wakeme_after_rcu); /* Wait for it. */ wait_for_completion(&rcu.completion); destroy_rcu_head_on_stack(&rcu.head); } EXPORT_SYMBOL_GPL(wait_rcu_gp); |
ee84b8243
|
208 209 210 211 212 213 214 215 216 217 |
#ifdef CONFIG_PROVE_RCU /* * wrapper function to avoid #include problems. */ int rcu_my_thread_group_empty(void) { return thread_group_empty(current); } EXPORT_SYMBOL_GPL(rcu_my_thread_group_empty); #endif /* #ifdef CONFIG_PROVE_RCU */ |
551d55a94
|
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 |
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD static inline void debug_init_rcu_head(struct rcu_head *head) { debug_object_init(head, &rcuhead_debug_descr); } static inline void debug_rcu_head_free(struct rcu_head *head) { debug_object_free(head, &rcuhead_debug_descr); } /* * fixup_init is called when: * - an active object is initialized */ static int rcuhead_fixup_init(void *addr, enum debug_obj_state state) { struct rcu_head *head = addr; switch (state) { case ODEBUG_STATE_ACTIVE: /* * Ensure that queued callbacks are all executed. * If we detect that we are nested in a RCU read-side critical * section, we should simply fail, otherwise we would deadlock. |
fc2ecf7ec
|
244 245 246 |
* In !PREEMPT configurations, there is no way to tell if we are * in a RCU read-side critical section or not, so we never * attempt any fixup and just print a warning. |
551d55a94
|
247 |
*/ |
fc2ecf7ec
|
248 |
#ifndef CONFIG_PREEMPT |
108aae223
|
249 |
WARN_ON_ONCE(1); |
fc2ecf7ec
|
250 251 |
return 0; #endif |
551d55a94
|
252 253 |
if (rcu_preempt_depth() != 0 || preempt_count() != 0 || irqs_disabled()) { |
108aae223
|
254 |
WARN_ON_ONCE(1); |
551d55a94
|
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 |
return 0; } rcu_barrier(); rcu_barrier_sched(); rcu_barrier_bh(); debug_object_init(head, &rcuhead_debug_descr); return 1; default: return 0; } } /* * fixup_activate is called when: * - an active object is activated * - an unknown object is activated (might be a statically initialized object) * Activation is performed internally by call_rcu(). */ static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state) { struct rcu_head *head = addr; switch (state) { case ODEBUG_STATE_NOTAVAILABLE: /* * This is not really a fixup. We just make sure that it is * tracked in the object tracker. */ debug_object_init(head, &rcuhead_debug_descr); debug_object_activate(head, &rcuhead_debug_descr); return 0; case ODEBUG_STATE_ACTIVE: /* * Ensure that queued callbacks are all executed. * If we detect that we are nested in a RCU read-side critical * section, we should simply fail, otherwise we would deadlock. |
fc2ecf7ec
|
293 294 295 |
* In !PREEMPT configurations, there is no way to tell if we are * in a RCU read-side critical section or not, so we never * attempt any fixup and just print a warning. |
551d55a94
|
296 |
*/ |
fc2ecf7ec
|
297 |
#ifndef CONFIG_PREEMPT |
108aae223
|
298 |
WARN_ON_ONCE(1); |
fc2ecf7ec
|
299 300 |
return 0; #endif |
551d55a94
|
301 302 |
if (rcu_preempt_depth() != 0 || preempt_count() != 0 || irqs_disabled()) { |
108aae223
|
303 |
WARN_ON_ONCE(1); |
551d55a94
|
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 |
return 0; } rcu_barrier(); rcu_barrier_sched(); rcu_barrier_bh(); debug_object_activate(head, &rcuhead_debug_descr); return 1; default: return 0; } } /* * fixup_free is called when: * - an active object is freed */ static int rcuhead_fixup_free(void *addr, enum debug_obj_state state) { struct rcu_head *head = addr; switch (state) { case ODEBUG_STATE_ACTIVE: /* * Ensure that queued callbacks are all executed. * If we detect that we are nested in a RCU read-side critical * section, we should simply fail, otherwise we would deadlock. |
fc2ecf7ec
|
330 331 332 |
* In !PREEMPT configurations, there is no way to tell if we are * in a RCU read-side critical section or not, so we never * attempt any fixup and just print a warning. |
551d55a94
|
333 |
*/ |
fc2ecf7ec
|
334 |
#ifndef CONFIG_PREEMPT |
108aae223
|
335 |
WARN_ON_ONCE(1); |
fc2ecf7ec
|
336 337 |
return 0; #endif |
551d55a94
|
338 339 |
if (rcu_preempt_depth() != 0 || preempt_count() != 0 || irqs_disabled()) { |
108aae223
|
340 |
WARN_ON_ONCE(1); |
551d55a94
|
341 342 343 344 345 346 347 |
return 0; } rcu_barrier(); rcu_barrier_sched(); rcu_barrier_bh(); debug_object_free(head, &rcuhead_debug_descr); return 1; |
551d55a94
|
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 |
default: return 0; } } /** * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects * @head: pointer to rcu_head structure to be initialized * * This function informs debugobjects of a new rcu_head structure that * has been allocated as an auto variable on the stack. This function * is not required for rcu_head structures that are statically defined or * that are dynamically allocated on the heap. This function has no * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. */ void init_rcu_head_on_stack(struct rcu_head *head) { debug_object_init_on_stack(head, &rcuhead_debug_descr); } EXPORT_SYMBOL_GPL(init_rcu_head_on_stack); /** * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects * @head: pointer to rcu_head structure to be initialized * * This function informs debugobjects that an on-stack rcu_head structure * is about to go out of scope. As with init_rcu_head_on_stack(), this * function is not required for rcu_head structures that are statically * defined or that are dynamically allocated on the heap. Also as with * init_rcu_head_on_stack(), this function has no effect for * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. */ void destroy_rcu_head_on_stack(struct rcu_head *head) { debug_object_free(head, &rcuhead_debug_descr); } EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack); struct debug_obj_descr rcuhead_debug_descr = { .name = "rcu_head", .fixup_init = rcuhead_fixup_init, .fixup_activate = rcuhead_fixup_activate, .fixup_free = rcuhead_fixup_free, }; EXPORT_SYMBOL_GPL(rcuhead_debug_descr); #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
91afaf300
|
394 395 396 397 398 399 400 401 402 403 |
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) void do_trace_rcu_torture_read(char *rcutorturename, struct rcu_head *rhp) { trace_rcu_torture_read(rcutorturename, rhp); } EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read); #else #define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) #endif |