Blame view
include/linux/rcupdate.h
40.3 KB
1da177e4c
|
1 |
/* |
a71fca58b
|
2 |
* Read-Copy Update mechanism for mutual exclusion |
1da177e4c
|
3 4 5 6 7 8 9 10 11 12 13 14 |
* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License |
87de1cfdc
|
15 16 |
* along with this program; if not, you can access it online at * http://www.gnu.org/licenses/gpl-2.0.html. |
1da177e4c
|
17 |
* |
01c1c660f
|
18 |
* Copyright IBM Corporation, 2001 |
1da177e4c
|
19 20 |
* * Author: Dipankar Sarma <dipankar@in.ibm.com> |
a71fca58b
|
21 |
* |
595182bcd
|
22 |
* Based on the original work by Paul McKenney <paulmck@us.ibm.com> |
1da177e4c
|
23 24 25 26 27 28 |
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) * * For detailed explanation of Read-Copy Update mechanism see - |
a71fca58b
|
29 |
* http://lse.sourceforge.net/locking/rcupdate.html |
1da177e4c
|
30 31 32 33 34 |
* */ #ifndef __LINUX_RCUPDATE_H #define __LINUX_RCUPDATE_H |
990987511
|
35 |
#include <linux/types.h> |
1da177e4c
|
36 37 38 |
#include <linux/cache.h> #include <linux/spinlock.h> #include <linux/threads.h> |
1da177e4c
|
39 40 |
#include <linux/cpumask.h> #include <linux/seqlock.h> |
851a67b82
|
41 |
#include <linux/lockdep.h> |
4446a36ff
|
42 |
#include <linux/completion.h> |
551d55a94
|
43 |
#include <linux/debugobjects.h> |
187f1882b
|
44 |
#include <linux/bug.h> |
ca5ecddfa
|
45 |
#include <linux/compiler.h> |
c1ad348b4
|
46 |
#include <linux/ktime.h> |
88c186306
|
47 |
#include <asm/barrier.h> |
1da177e4c
|
48 |
|
7a7547431
|
49 |
extern int rcu_expedited; /* for sysctl */ |
e5ab67726
|
50 |
|
0d39482c3
|
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
#ifdef CONFIG_TINY_RCU /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */ { return false; } static inline void rcu_expedite_gp(void) { } static inline void rcu_unexpedite_gp(void) { } #else /* #ifdef CONFIG_TINY_RCU */ bool rcu_gp_is_expedited(void); /* Internal RCU use. */ void rcu_expedite_gp(void); void rcu_unexpedite_gp(void); #endif /* #else #ifdef CONFIG_TINY_RCU */ |
ad0dc7f94
|
70 71 72 73 |
enum rcutorture_type { RCU_FLAVOR, RCU_BH_FLAVOR, RCU_SCHED_FLAVOR, |
69c604557
|
74 |
RCU_TASKS_FLAVOR, |
ad0dc7f94
|
75 76 77 |
SRCU_FLAVOR, INVALID_RCU_FLAVOR }; |
28f6569ab
|
78 |
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) |
ad0dc7f94
|
79 80 |
void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, unsigned long *gpnum, unsigned long *completed); |
584dc4ce5
|
81 82 83 84 85 86 87 |
void rcutorture_record_test_transition(void); void rcutorture_record_progress(unsigned long vernum); void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, unsigned long secs, unsigned long c_old, unsigned long c); |
4a2986568
|
88 |
#else |
ad0dc7f94
|
89 90 91 92 93 94 95 96 97 |
static inline void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, unsigned long *gpnum, unsigned long *completed) { *flags = 0; *gpnum = 0; *completed = 0; } |
4a2986568
|
98 99 100 101 102 103 |
static inline void rcutorture_record_test_transition(void) { } static inline void rcutorture_record_progress(unsigned long vernum) { } |
91afaf300
|
104 |
#ifdef CONFIG_RCU_TRACE |
584dc4ce5
|
105 106 107 108 109 |
void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, unsigned long secs, unsigned long c_old, unsigned long c); |
91afaf300
|
110 |
#else |
524945351
|
111 112 |
#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ do { } while (0) |
91afaf300
|
113 |
#endif |
4a2986568
|
114 |
#endif |
e27fc9641
|
115 116 |
#define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) #define UINT_CMP_LT(a, b) (UINT_MAX / 2 < (a) - (b)) |
a3dc3fb16
|
117 118 |
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) |
c0f4dfd4f
|
119 |
#define ulong2long(a) (*(long *)(&(a))) |
a3dc3fb16
|
120 |
|
03b042bf1
|
121 |
/* Exported common interfaces */ |
2c42818e9
|
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
#ifdef CONFIG_PREEMPT_RCU /** * call_rcu() - Queue an RCU callback for invocation after a grace period. * @head: structure to be used for queueing the RCU updates. * @func: actual callback function to be invoked after the grace period * * The callback function will be invoked some time after a full grace * period elapses, in other words after all pre-existing RCU read-side * critical sections have completed. However, the callback function * might well execute concurrently with RCU read-side critical sections * that started after call_rcu() was invoked. RCU read-side critical * sections are delimited by rcu_read_lock() and rcu_read_unlock(), * and may be nested. |
f0a0e6f28
|
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
* * Note that all CPUs must agree that the grace period extended beyond * all pre-existing RCU read-side critical section. On systems with more * than one CPU, this means that when "func()" is invoked, each CPU is * guaranteed to have executed a full memory barrier since the end of its * last RCU read-side critical section whose beginning preceded the call * to call_rcu(). It also means that each CPU executing an RCU read-side * critical section that continues beyond the start of "func()" must have * executed a memory barrier after the call_rcu() but before the beginning * of that RCU read-side critical section. Note that these guarantees * include CPUs that are offline, idle, or executing in user mode, as * well as CPUs that are executing in the kernel. * * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the * resulting RCU callback function "func()", then both CPU A and CPU B are * guaranteed to execute a full memory barrier during the time interval * between the call to call_rcu() and the invocation of "func()" -- even * if CPU A and CPU B are the same CPU (but again only if the system has * more than one CPU). |
2c42818e9
|
156 |
*/ |
584dc4ce5
|
157 |
void call_rcu(struct rcu_head *head, |
b6a4ae766
|
158 |
rcu_callback_t func); |
2c42818e9
|
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 |
#else /* #ifdef CONFIG_PREEMPT_RCU */ /* In classic RCU, call_rcu() is just call_rcu_sched(). */ #define call_rcu call_rcu_sched #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ /** * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. * @head: structure to be used for queueing the RCU updates. * @func: actual callback function to be invoked after the grace period * * The callback function will be invoked some time after a full grace * period elapses, in other words after all currently executing RCU * read-side critical sections have completed. call_rcu_bh() assumes * that the read-side critical sections end on completion of a softirq * handler. This means that read-side critical sections in process * context must not be interrupted by softirqs. This interface is to be * used when most of the read-side critical sections are in softirq context. * RCU read-side critical sections are delimited by : * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. * OR * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. * These may be nested. |
f0a0e6f28
|
184 185 186 |
* * See the description of call_rcu() for more detailed information on * memory ordering guarantees. |
2c42818e9
|
187 |
*/ |
584dc4ce5
|
188 |
void call_rcu_bh(struct rcu_head *head, |
b6a4ae766
|
189 |
rcu_callback_t func); |
2c42818e9
|
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 |
/** * call_rcu_sched() - Queue an RCU for invocation after sched grace period. * @head: structure to be used for queueing the RCU updates. * @func: actual callback function to be invoked after the grace period * * The callback function will be invoked some time after a full grace * period elapses, in other words after all currently executing RCU * read-side critical sections have completed. call_rcu_sched() assumes * that the read-side critical sections end on enabling of preemption * or on voluntary preemption. * RCU read-side critical sections are delimited by : * - rcu_read_lock_sched() and rcu_read_unlock_sched(), * OR * anything that disables preemption. * These may be nested. |
f0a0e6f28
|
206 207 208 |
* * See the description of call_rcu() for more detailed information on * memory ordering guarantees. |
2c42818e9
|
209 |
*/ |
584dc4ce5
|
210 |
void call_rcu_sched(struct rcu_head *head, |
b6a4ae766
|
211 |
rcu_callback_t func); |
2c42818e9
|
212 |
|
584dc4ce5
|
213 |
void synchronize_sched(void); |
03b042bf1
|
214 |
|
ee376dbdf
|
215 216 217 218 219 220 221 222 |
/* * Structure allowing asynchronous waiting on RCU. */ struct rcu_synchronize { struct rcu_head head; struct completion completion; }; void wakeme_after_rcu(struct rcu_head *head); |
ec90a194a
|
223 224 225 226 |
void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, struct rcu_synchronize *rs_array); #define _wait_rcu_gp(checktiny, ...) \ |
66e8c57da
|
227 228 229 230 231 |
do { \ call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ __crcu_array, __rs_array); \ |
ec90a194a
|
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 |
} while (0) #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) /** * synchronize_rcu_mult - Wait concurrently for multiple grace periods * @...: List of call_rcu() functions for the flavors to wait on. * * This macro waits concurrently for multiple flavors of RCU grace periods. * For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait * on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU * domain requires you to write a wrapper function for that SRCU domain's * call_srcu() function, supplying the corresponding srcu_struct. * * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU * or RCU-bh, given that anywhere synchronize_rcu_mult() can be called * is automatically a grace period. */ #define synchronize_rcu_mult(...) \ _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) |
8315f4229
|
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 |
/** * call_rcu_tasks() - Queue an RCU for invocation task-based grace period * @head: structure to be used for queueing the RCU updates. * @func: actual callback function to be invoked after the grace period * * The callback function will be invoked some time after a full grace * period elapses, in other words after all currently executing RCU * read-side critical sections have completed. call_rcu_tasks() assumes * that the read-side critical sections end at a voluntary context * switch (not a preemption!), entry into idle, or transition to usermode * execution. As such, there are no read-side primitives analogous to * rcu_read_lock() and rcu_read_unlock() because this primitive is intended * to determine that all tasks have passed through a safe state, not so * much for data-strcuture synchronization. * * See the description of call_rcu() for more detailed information on * memory ordering guarantees. */ |
b6a4ae766
|
270 |
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); |
53c6d4edf
|
271 272 |
void synchronize_rcu_tasks(void); void rcu_barrier_tasks(void); |
8315f4229
|
273 |
|
a3dc3fb16
|
274 |
#ifdef CONFIG_PREEMPT_RCU |
584dc4ce5
|
275 276 277 |
void __rcu_read_lock(void); void __rcu_read_unlock(void); void rcu_read_unlock_special(struct task_struct *t); |
7b0b759b6
|
278 |
void synchronize_rcu(void); |
a3dc3fb16
|
279 280 281 282 283 284 285 |
/* * Defined as a macro as it is a very low level header included from * areas that don't even know about current. This gives the rcu_read_lock() * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. */ #define rcu_preempt_depth() (current->rcu_read_lock_nesting) |
7b0b759b6
|
286 287 288 289 |
#else /* #ifdef CONFIG_PREEMPT_RCU */ static inline void __rcu_read_lock(void) { |
bb73c52ba
|
290 291 |
if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) preempt_disable(); |
7b0b759b6
|
292 293 294 295 |
} static inline void __rcu_read_unlock(void) { |
bb73c52ba
|
296 297 |
if (IS_ENABLED(CONFIG_PREEMPT_COUNT)) preempt_enable(); |
7b0b759b6
|
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 |
} static inline void synchronize_rcu(void) { synchronize_sched(); } static inline int rcu_preempt_depth(void) { return 0; } #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ /* Internal to kernel */ |
584dc4ce5
|
313 |
void rcu_init(void); |
ee42571f4
|
314 |
void rcu_end_inkernel_boot(void); |
284a8c93a
|
315 316 |
void rcu_sched_qs(void); void rcu_bh_qs(void); |
c3377c2da
|
317 |
void rcu_check_callbacks(int user); |
7b0b759b6
|
318 |
struct notifier_block; |
88428cc5c
|
319 320 |
int rcu_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu); |
2b1d5024e
|
321 |
|
61f38db3e
|
322 323 324 325 326 327 328 329 330 331 332 |
#ifdef CONFIG_RCU_STALL_COMMON void rcu_sysrq_start(void); void rcu_sysrq_end(void); #else /* #ifdef CONFIG_RCU_STALL_COMMON */ static inline void rcu_sysrq_start(void) { } static inline void rcu_sysrq_end(void) { } #endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */ |
d1ec4c34c
|
333 |
#ifdef CONFIG_NO_HZ_FULL |
584dc4ce5
|
334 335 |
void rcu_user_enter(void); void rcu_user_exit(void); |
2b1d5024e
|
336 337 338 |
#else static inline void rcu_user_enter(void) { } static inline void rcu_user_exit(void) { } |
4d9a5d431
|
339 340 |
static inline void rcu_user_hooks_switch(struct task_struct *prev, struct task_struct *next) { } |
d1ec4c34c
|
341 |
#endif /* CONFIG_NO_HZ_FULL */ |
2b1d5024e
|
342 |
|
f4579fc57
|
343 344 345 346 347 348 349 |
#ifdef CONFIG_RCU_NOCB_CPU void rcu_init_nohz(void); #else /* #ifdef CONFIG_RCU_NOCB_CPU */ static inline void rcu_init_nohz(void) { } #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ |
8a2ecf474
|
350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 |
/** * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers * @a: Code that RCU needs to pay attention to. * * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden * in the inner idle loop, that is, between the rcu_idle_enter() and * the rcu_idle_exit() -- RCU will happily ignore any such read-side * critical sections. However, things like powertop need tracepoints * in the inner idle loop. * * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU()) * will tell RCU that it needs to pay attending, invoke its argument * (in this example, a call to the do_something_with_RCU() function), * and then tell RCU to go back to ignoring this CPU. It is permissible * to nest RCU_NONIDLE() wrappers, but the nesting level is currently * quite limited. If deeper nesting is required, it will be necessary * to adjust DYNTICK_TASK_NESTING_VALUE accordingly. |
8a2ecf474
|
367 368 369 |
*/ #define RCU_NONIDLE(a) \ do { \ |
b4270ee35
|
370 |
rcu_irq_enter(); \ |
8a2ecf474
|
371 |
do { a; } while (0); \ |
b4270ee35
|
372 |
rcu_irq_exit(); \ |
8a2ecf474
|
373 |
} while (0) |
8315f4229
|
374 375 376 377 378 |
/* * Note a voluntary context switch for RCU-tasks benefit. This is a * macro rather than an inline function to avoid #include hell. */ #ifdef CONFIG_TASKS_RCU |
3f95aa81d
|
379 380 |
#define TASKS_RCU(x) x extern struct srcu_struct tasks_rcu_exit_srcu; |
8315f4229
|
381 382 |
#define rcu_note_voluntary_context_switch(t) \ do { \ |
5cd37193c
|
383 |
rcu_all_qs(); \ |
7d0ae8086
|
384 385 |
if (READ_ONCE((t)->rcu_tasks_holdout)) \ WRITE_ONCE((t)->rcu_tasks_holdout, false); \ |
8315f4229
|
386 387 |
} while (0) #else /* #ifdef CONFIG_TASKS_RCU */ |
3f95aa81d
|
388 |
#define TASKS_RCU(x) do { } while (0) |
5cd37193c
|
389 |
#define rcu_note_voluntary_context_switch(t) rcu_all_qs() |
8315f4229
|
390 |
#endif /* #else #ifdef CONFIG_TASKS_RCU */ |
bde6c3aa9
|
391 392 393 394 395 396 397 398 399 |
/** * cond_resched_rcu_qs - Report potential quiescent states to RCU * * This macro resembles cond_resched(), except that it is defined to * report potential quiescent states to RCU-tasks even if the cond_resched() * machinery were to be shut off, as some advocate for PREEMPT kernels. */ #define cond_resched_rcu_qs() \ do { \ |
b6331ae8a
|
400 401 |
if (!cond_resched()) \ rcu_note_voluntary_context_switch(current); \ |
bde6c3aa9
|
402 |
} while (0) |
cc6783f78
|
403 |
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) |
584dc4ce5
|
404 |
bool __rcu_is_watching(void); |
cc6783f78
|
405 |
#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ |
2c42818e9
|
406 407 408 409 |
/* * Infrastructure to implement the synchronize_() primitives in * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. */ |
28f6569ab
|
410 |
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) |
64db4cfff
|
411 |
#include <linux/rcutree.h> |
127781d1b
|
412 |
#elif defined(CONFIG_TINY_RCU) |
9b1d82fa1
|
413 |
#include <linux/rcutiny.h> |
64db4cfff
|
414 415 |
#else #error "Unknown RCU implementation specified to kernel configuration" |
6b3ef48ad
|
416 |
#endif |
01c1c660f
|
417 |
|
551d55a94
|
418 419 420 421 422 423 424 |
/* * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic * initialization and destruction of rcu_head on the stack. rcu_head structures * allocated dynamically in the heap or defined statically don't need any * initialization. */ #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
546a9d851
|
425 426 |
void init_rcu_head(struct rcu_head *head); void destroy_rcu_head(struct rcu_head *head); |
584dc4ce5
|
427 428 |
void init_rcu_head_on_stack(struct rcu_head *head); void destroy_rcu_head_on_stack(struct rcu_head *head); |
551d55a94
|
429 |
#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
546a9d851
|
430 431 432 433 434 435 436 |
static inline void init_rcu_head(struct rcu_head *head) { } static inline void destroy_rcu_head(struct rcu_head *head) { } |
4376030a5
|
437 438 439 440 441 442 443 |
static inline void init_rcu_head_on_stack(struct rcu_head *head) { } static inline void destroy_rcu_head_on_stack(struct rcu_head *head) { } |
551d55a94
|
444 |
#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
4376030a5
|
445 |
|
c0d6d01bf
|
446 447 448 449 450 |
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) bool rcu_lockdep_current_cpu_online(void); #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ static inline bool rcu_lockdep_current_cpu_online(void) { |
521d24ee5
|
451 |
return true; |
c0d6d01bf
|
452 453 |
} #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ |
bc33f24bd
|
454 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
632ee2001
|
455 |
|
00f49e572
|
456 457 |
static inline void rcu_lock_acquire(struct lockdep_map *map) { |
fb9edbe98
|
458 |
lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); |
00f49e572
|
459 460 461 462 |
} static inline void rcu_lock_release(struct lockdep_map *map) { |
00f49e572
|
463 464 |
lock_release(map, 1, _THIS_IP_); } |
bc33f24bd
|
465 |
extern struct lockdep_map rcu_lock_map; |
632ee2001
|
466 |
extern struct lockdep_map rcu_bh_lock_map; |
632ee2001
|
467 |
extern struct lockdep_map rcu_sched_lock_map; |
24ef659a8
|
468 |
extern struct lockdep_map rcu_callback_map; |
a235c0916
|
469 |
int debug_lockdep_rcu_enabled(void); |
54dbf96c9
|
470 |
|
85b39d305
|
471 |
int rcu_read_lock_held(void); |
584dc4ce5
|
472 |
int rcu_read_lock_bh_held(void); |
632ee2001
|
473 474 |
/** |
ca5ecddfa
|
475 |
* rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? |
632ee2001
|
476 |
* |
d20200b59
|
477 478 479 |
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an * RCU-sched read-side critical section. In absence of * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side |
d5671f6bf
|
480 |
* critical section unless it can prove otherwise. |
632ee2001
|
481 |
*/ |
bdd4e85dc
|
482 |
#ifdef CONFIG_PREEMPT_COUNT |
d5671f6bf
|
483 |
int rcu_read_lock_sched_held(void); |
bdd4e85dc
|
484 |
#else /* #ifdef CONFIG_PREEMPT_COUNT */ |
e6033e3b3
|
485 486 487 |
static inline int rcu_read_lock_sched_held(void) { return 1; |
632ee2001
|
488 |
} |
bdd4e85dc
|
489 |
#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */ |
632ee2001
|
490 491 |
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
d8ab29f8b
|
492 493 |
# define rcu_lock_acquire(a) do { } while (0) # define rcu_lock_release(a) do { } while (0) |
632ee2001
|
494 495 496 497 498 499 500 501 502 503 |
static inline int rcu_read_lock_held(void) { return 1; } static inline int rcu_read_lock_bh_held(void) { return 1; } |
bdd4e85dc
|
504 |
#ifdef CONFIG_PREEMPT_COUNT |
632ee2001
|
505 506 |
static inline int rcu_read_lock_sched_held(void) { |
bbad93798
|
507 |
return preempt_count() != 0 || irqs_disabled(); |
632ee2001
|
508 |
} |
bdd4e85dc
|
509 |
#else /* #ifdef CONFIG_PREEMPT_COUNT */ |
e6033e3b3
|
510 511 512 |
static inline int rcu_read_lock_sched_held(void) { return 1; |
632ee2001
|
513 |
} |
bdd4e85dc
|
514 |
#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */ |
632ee2001
|
515 516 517 518 |
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #ifdef CONFIG_PROVE_RCU |
4221a9918
|
519 |
/** |
f78f5b90c
|
520 521 522 523 524 525 526 527 528 529 530 531 |
* RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met * @c: condition to check * @s: informative message */ #define RCU_LOCKDEP_WARN(c, s) \ do { \ static bool __section(.data.unlikely) __warned; \ if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \ __warned = true; \ lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ } \ } while (0) |
50406b98b
|
532 533 534 |
#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU) static inline void rcu_preempt_sleep_check(void) { |
f78f5b90c
|
535 536 |
RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map), "Illegal context switch in RCU read-side critical section"); |
50406b98b
|
537 538 539 540 541 542 |
} #else /* #ifdef CONFIG_PROVE_RCU */ static inline void rcu_preempt_sleep_check(void) { } #endif /* #else #ifdef CONFIG_PROVE_RCU */ |
b3fbab057
|
543 544 |
#define rcu_sleep_check() \ do { \ |
50406b98b
|
545 |
rcu_preempt_sleep_check(); \ |
f78f5b90c
|
546 547 548 549 |
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \ "Illegal context switch in RCU-bh read-side critical section"); \ RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \ "Illegal context switch in RCU-sched read-side critical section"); \ |
b3fbab057
|
550 |
} while (0) |
ca5ecddfa
|
551 |
#else /* #ifdef CONFIG_PROVE_RCU */ |
f78f5b90c
|
552 |
#define RCU_LOCKDEP_WARN(c, s) do { } while (0) |
b3fbab057
|
553 |
#define rcu_sleep_check() do { } while (0) |
ca5ecddfa
|
554 555 556 557 558 559 560 561 562 563 564 |
#endif /* #else #ifdef CONFIG_PROVE_RCU */ /* * Helper functions for rcu_dereference_check(), rcu_dereference_protected() * and rcu_assign_pointer(). Some of these could be folded into their * callers, but they are left separate in order to ease introduction of * multiple flavors of pointers to match the multiple flavors of RCU * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in * the future. */ |
53ecfba25
|
565 566 567 568 569 570 571 |
#ifdef __CHECKER__ #define rcu_dereference_sparse(p, space) \ ((void)(((typeof(*p) space *)p) == p)) #else /* #ifdef __CHECKER__ */ #define rcu_dereference_sparse(p, space) #endif /* #else #ifdef __CHECKER__ */ |
ca5ecddfa
|
572 |
#define __rcu_access_pointer(p, space) \ |
0adab9b9a
|
573 |
({ \ |
7d0ae8086
|
574 |
typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \ |
0adab9b9a
|
575 576 577 |
rcu_dereference_sparse(p, space); \ ((typeof(*p) __force __kernel *)(_________p1)); \ }) |
ca5ecddfa
|
578 |
#define __rcu_dereference_check(p, c, space) \ |
0adab9b9a
|
579 |
({ \ |
ac59853c0
|
580 581 |
/* Dependency order vs. p above. */ \ typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \ |
f78f5b90c
|
582 |
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ |
0adab9b9a
|
583 |
rcu_dereference_sparse(p, space); \ |
ac59853c0
|
584 |
((typeof(*p) __force __kernel *)(________p1)); \ |
0adab9b9a
|
585 |
}) |
ca5ecddfa
|
586 |
#define __rcu_dereference_protected(p, c, space) \ |
0adab9b9a
|
587 |
({ \ |
f78f5b90c
|
588 |
RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \ |
0adab9b9a
|
589 590 591 |
rcu_dereference_sparse(p, space); \ ((typeof(*p) __force __kernel *)(p)); \ }) |
ca5ecddfa
|
592 |
|
462225ae4
|
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 |
/** * RCU_INITIALIZER() - statically initialize an RCU-protected global variable * @v: The value to statically initialize with. */ #define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) /** * rcu_assign_pointer() - assign to RCU-protected pointer * @p: pointer to assign to * @v: value to assign (publish) * * Assigns the specified value to the specified RCU-protected * pointer, ensuring that any concurrent RCU readers will see * any prior initialization. * * Inserts memory barriers on architectures that require them * (which is most of them), and also prevents the compiler from * reordering the code that initializes the structure after the pointer * assignment. More importantly, this call documents which pointers * will be dereferenced by RCU read-side code. * * In some special cases, you may use RCU_INIT_POINTER() instead * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due * to the fact that it does not constrain either the CPU or the compiler. * That said, using RCU_INIT_POINTER() when you should have used * rcu_assign_pointer() is a very bad thing that results in * impossible-to-diagnose memory corruption. So please be careful. * See the RCU_INIT_POINTER() comment header for details. * * Note that rcu_assign_pointer() evaluates each of its arguments only * once, appearances notwithstanding. One of the "extra" evaluations * is in typeof() and the other visible only to sparse (__CHECKER__), * neither of which actually execute the argument. As with most cpp * macros, this execute-arguments-only-once property is important, so * please be careful when making changes to rcu_assign_pointer() and the * other macros that it invokes. */ |
88c186306
|
630 |
#define rcu_assign_pointer(p, v) smp_store_release(&p, RCU_INITIALIZER(v)) |
ca5ecddfa
|
631 632 633 634 635 636 |
/** * rcu_access_pointer() - fetch RCU pointer with no dereferencing * @p: The pointer to read * * Return the value of the specified RCU-protected pointer, but omit the |
7d0ae8086
|
637 |
* smp_read_barrier_depends() and keep the READ_ONCE(). This is useful |
ca5ecddfa
|
638 639 640 641 642 |
* when the value of this pointer is accessed, but the pointer is not * dereferenced, for example, when testing an RCU-protected pointer against * NULL. Although rcu_access_pointer() may also be used in cases where * update-side locks prevent the value of the pointer from changing, you * should instead use rcu_dereference_protected() for this use case. |
5e1ee6e10
|
643 644 645 646 647 648 649 |
* * It is also permissible to use rcu_access_pointer() when read-side * access to the pointer was removed at least one grace period ago, as * is the case in the context of the RCU callback that is freeing up * the data, or after a synchronize_rcu() returns. This can be useful * when tearing down multi-linked structures after a grace period * has elapsed. |
ca5ecddfa
|
650 651 |
*/ #define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu) |
632ee2001
|
652 |
/** |
ca5ecddfa
|
653 |
* rcu_dereference_check() - rcu_dereference with debug checking |
c08c68dd7
|
654 655 |
* @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place |
632ee2001
|
656 |
* |
c08c68dd7
|
657 |
* Do an rcu_dereference(), but check that the conditions under which the |
ca5ecddfa
|
658 659 660 661 662 |
* dereference will take place are correct. Typically the conditions * indicate the various locking conditions that should be held at that * point. The check should return true if the conditions are satisfied. * An implicit check for being in an RCU read-side critical section * (rcu_read_lock()) is included. |
c08c68dd7
|
663 664 665 |
* * For example: * |
ca5ecddfa
|
666 |
* bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock)); |
c08c68dd7
|
667 668 |
* * could be used to indicate to lockdep that foo->bar may only be dereferenced |
ca5ecddfa
|
669 |
* if either rcu_read_lock() is held, or that the lock required to replace |
c08c68dd7
|
670 671 672 673 674 675 |
* the bar struct at foo->bar is held. * * Note that the list of conditions may also include indications of when a lock * need not be held, for example during initialisation or destruction of the * target struct: * |
ca5ecddfa
|
676 |
* bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) || |
c08c68dd7
|
677 |
* atomic_read(&foo->usage) == 0); |
ca5ecddfa
|
678 679 680 681 682 683 |
* * Inserts memory barriers on architectures that require them * (currently only the Alpha), prevents the compiler from refetching * (and from merging fetches), and, more importantly, documents exactly * which pointers are protected by RCU and checks that the pointer is * annotated as __rcu. |
632ee2001
|
684 685 |
*/ #define rcu_dereference_check(p, c) \ |
b826565aa
|
686 |
__rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu) |
ca5ecddfa
|
687 688 689 690 691 692 693 694 695 |
/** * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * This is the RCU-bh counterpart to rcu_dereference_check(). */ #define rcu_dereference_bh_check(p, c) \ |
b826565aa
|
696 |
__rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu) |
632ee2001
|
697 |
|
b62730bae
|
698 |
/** |
ca5ecddfa
|
699 700 701 702 703 704 705 |
* rcu_dereference_sched_check() - rcu_dereference_sched with debug checking * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * This is the RCU-sched counterpart to rcu_dereference_check(). */ #define rcu_dereference_sched_check(p, c) \ |
b826565aa
|
706 |
__rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \ |
ca5ecddfa
|
707 708 709 |
__rcu) #define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/ |
12bcbe66d
|
710 711 712 713 714 715 716 717 |
/* * The tracing infrastructure traces RCU (we want that), but unfortunately * some of the RCU checks causes tracing to lock up the system. * * The tracing version of rcu_dereference_raw() must not call * rcu_read_lock_held(). */ #define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) |
ca5ecddfa
|
718 |
/** |
ca5ecddfa
|
719 720 721 |
* rcu_dereference_protected() - fetch RCU pointer when updates prevented * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place |
b62730bae
|
722 723 |
* * Return the value of the specified RCU-protected pointer, but omit |
7d0ae8086
|
724 |
* both the smp_read_barrier_depends() and the READ_ONCE(). This |
b62730bae
|
725 726 727 728 729 |
* is useful in cases where update-side locks prevent the value of the * pointer from changing. Please note that this primitive does -not- * prevent the compiler from repeating this reference or combining it * with other references, so it should not be used without protection * of appropriate locks. |
ca5ecddfa
|
730 731 732 733 |
* * This function is only for update-side use. Using this function * when protected only by rcu_read_lock() will result in infrequent * but very ugly failures. |
b62730bae
|
734 735 |
*/ #define rcu_dereference_protected(p, c) \ |
ca5ecddfa
|
736 |
__rcu_dereference_protected((p), (c), __rcu) |
b62730bae
|
737 |
|
bc33f24bd
|
738 |
|
1da177e4c
|
739 |
/** |
ca5ecddfa
|
740 741 |
* rcu_dereference() - fetch RCU-protected pointer for dereferencing * @p: The pointer to read, prior to dereferencing |
b62730bae
|
742 |
* |
ca5ecddfa
|
743 |
* This is a simple wrapper around rcu_dereference_check(). |
b62730bae
|
744 |
*/ |
ca5ecddfa
|
745 |
#define rcu_dereference(p) rcu_dereference_check(p, 0) |
b62730bae
|
746 747 |
/** |
ca5ecddfa
|
748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 |
* rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing * @p: The pointer to read, prior to dereferencing * * Makes rcu_dereference_check() do the dirty work. */ #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0) /** * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing * @p: The pointer to read, prior to dereferencing * * Makes rcu_dereference_check() do the dirty work. */ #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) /** |
c3ac7cf18
|
764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 |
* rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism * @p: The pointer to hand off * * This is simply an identity function, but it documents where a pointer * is handed off from RCU to some other synchronization mechanism, for * example, reference counting or locking. In C11, it would map to * kill_dependency(). It could be used as follows: * * rcu_read_lock(); * p = rcu_dereference(gp); * long_lived = is_long_lived(p); * if (long_lived) { * if (!atomic_inc_not_zero(p->refcnt)) * long_lived = false; * else * p = rcu_pointer_handoff(p); * } * rcu_read_unlock(); */ #define rcu_pointer_handoff(p) (p) /** |
ca5ecddfa
|
786 |
* rcu_read_lock() - mark the beginning of an RCU read-side critical section |
1da177e4c
|
787 |
* |
9b06e8189
|
788 |
* When synchronize_rcu() is invoked on one CPU while other CPUs |
1da177e4c
|
789 |
* are within RCU read-side critical sections, then the |
9b06e8189
|
790 |
* synchronize_rcu() is guaranteed to block until after all the other |
1da177e4c
|
791 792 793 794 795 796 |
* CPUs exit their critical sections. Similarly, if call_rcu() is invoked * on one CPU while other CPUs are within RCU read-side critical * sections, invocation of the corresponding RCU callback is deferred * until after the all the other CPUs exit their critical sections. * * Note, however, that RCU callbacks are permitted to run concurrently |
77d8485a8
|
797 |
* with new RCU read-side critical sections. One way that this can happen |
1da177e4c
|
798 799 800 801 802 803 804 805 806 807 808 809 810 811 |
* is via the following sequence of events: (1) CPU 0 enters an RCU * read-side critical section, (2) CPU 1 invokes call_rcu() to register * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU * callback is invoked. This is legal, because the RCU read-side critical * section that was running concurrently with the call_rcu() (and which * therefore might be referencing something that the corresponding RCU * callback would free up) has completed before the corresponding * RCU callback is invoked. * * RCU read-side critical sections may be nested. Any deferred actions * will be deferred until the outermost RCU read-side critical section * completes. * |
9079fd7c2
|
812 813 814 815 816 |
* You can avoid reading and understanding the next paragraph by * following this rule: don't put anything in an rcu_read_lock() RCU * read-side critical section that would block in a !PREEMPT kernel. * But if you want the full story, read on! * |
ab74fdfd4
|
817 818 |
* In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), * it is illegal to block while in an RCU read-side critical section. |
28f6569ab
|
819 |
* In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT |
ab74fdfd4
|
820 821 822 823 824 |
* kernel builds, RCU read-side critical sections may be preempted, * but explicit blocking is illegal. Finally, in preemptible RCU * implementations in real-time (with -rt patchset) kernel builds, RCU * read-side critical sections may be preempted and they may also block, but * only when acquiring spinlocks that are subject to priority inheritance. |
1da177e4c
|
825 |
*/ |
bc33f24bd
|
826 827 828 829 |
static inline void rcu_read_lock(void) { __rcu_read_lock(); __acquire(RCU); |
d8ab29f8b
|
830 |
rcu_lock_acquire(&rcu_lock_map); |
f78f5b90c
|
831 832 |
RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_lock() used illegally while idle"); |
bc33f24bd
|
833 |
} |
1da177e4c
|
834 |
|
1da177e4c
|
835 836 837 838 839 840 841 842 843 |
/* * So where is rcu_write_lock()? It does not exist, as there is no * way for writers to lock out RCU readers. This is a feature, not * a bug -- this property is what provides RCU's performance benefits. * Of course, writers must coordinate with each other. The normal * spinlock primitives work well for this, but any other technique may be * used as well. RCU does not care how the writers keep out of each * others' way, as long as they do so. */ |
3d76c0829
|
844 845 |
/** |
ca5ecddfa
|
846 |
* rcu_read_unlock() - marks the end of an RCU read-side critical section. |
3d76c0829
|
847 |
* |
f27bc4873
|
848 849 850 851 852 853 |
* In most situations, rcu_read_unlock() is immune from deadlock. * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock() * is responsible for deboosting, which it does via rt_mutex_unlock(). * Unfortunately, this function acquires the scheduler's runqueue and * priority-inheritance spinlocks. This means that deadlock could result * if the caller of rcu_read_unlock() already holds one of these locks or |
ce36f2f3e
|
854 855 856 |
* any lock that is ever acquired while holding them; or any lock which * can be taken from interrupt context because rcu_boost()->rt_mutex_lock() * does not disable irqs while taking ->wait_lock. |
f27bc4873
|
857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 |
* * That said, RCU readers are never priority boosted unless they were * preempted. Therefore, one way to avoid deadlock is to make sure * that preemption never happens within any RCU read-side critical * section whose outermost rcu_read_unlock() is called with one of * rt_mutex_unlock()'s locks held. Such preemption can be avoided in * a number of ways, for example, by invoking preempt_disable() before * critical section's outermost rcu_read_lock(). * * Given that the set of locks acquired by rt_mutex_unlock() might change * at any time, a somewhat more future-proofed approach is to make sure * that that preemption never happens within any RCU read-side critical * section whose outermost rcu_read_unlock() is called with irqs disabled. * This approach relies on the fact that rt_mutex_unlock() currently only * acquires irq-disabled locks. * * The second of these two approaches is best in most situations, * however, the first approach can also be useful, at least to those * developers willing to keep abreast of the set of locks acquired by * rt_mutex_unlock(). * |
3d76c0829
|
878 879 |
* See rcu_read_lock() for more information. */ |
bc33f24bd
|
880 881 |
static inline void rcu_read_unlock(void) { |
f78f5b90c
|
882 883 |
RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_unlock() used illegally while idle"); |
bc33f24bd
|
884 885 |
__release(RCU); __rcu_read_unlock(); |
d24209bb6
|
886 |
rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */ |
bc33f24bd
|
887 |
} |
1da177e4c
|
888 889 |
/** |
ca5ecddfa
|
890 |
* rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section |
1da177e4c
|
891 892 |
* * This is equivalent of rcu_read_lock(), but to be used when updates |
ca5ecddfa
|
893 894 895 896 897 898 899 |
* are being done using call_rcu_bh() or synchronize_rcu_bh(). Since * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a * softirq handler to be a quiescent state, a process in RCU read-side * critical section must be protected by disabling softirqs. Read-side * critical sections in interrupt context can use just rcu_read_lock(), * though this should at least be commented to avoid confusing people * reading the code. |
3842a0832
|
900 901 902 903 904 |
* * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() * must occur in the same context, for example, it is illegal to invoke * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh() * was invoked from some other task. |
1da177e4c
|
905 |
*/ |
bc33f24bd
|
906 907 |
static inline void rcu_read_lock_bh(void) { |
6206ab9ba
|
908 |
local_bh_disable(); |
bc33f24bd
|
909 |
__acquire(RCU_BH); |
d8ab29f8b
|
910 |
rcu_lock_acquire(&rcu_bh_lock_map); |
f78f5b90c
|
911 912 |
RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_lock_bh() used illegally while idle"); |
bc33f24bd
|
913 |
} |
1da177e4c
|
914 915 916 917 918 919 |
/* * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section * * See rcu_read_lock_bh() for more information. */ |
bc33f24bd
|
920 921 |
static inline void rcu_read_unlock_bh(void) { |
f78f5b90c
|
922 923 |
RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_unlock_bh() used illegally while idle"); |
d8ab29f8b
|
924 |
rcu_lock_release(&rcu_bh_lock_map); |
bc33f24bd
|
925 |
__release(RCU_BH); |
6206ab9ba
|
926 |
local_bh_enable(); |
bc33f24bd
|
927 |
} |
1da177e4c
|
928 929 |
/** |
ca5ecddfa
|
930 |
* rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section |
1c50b728c
|
931 |
* |
ca5ecddfa
|
932 933 934 935 |
* This is equivalent of rcu_read_lock(), but to be used when updates * are being done using call_rcu_sched() or synchronize_rcu_sched(). * Read-side critical sections can also be introduced by anything that * disables preemption, including local_irq_disable() and friends. |
3842a0832
|
936 937 938 939 940 |
* * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() * must occur in the same context, for example, it is illegal to invoke * rcu_read_unlock_sched() from process context if the matching * rcu_read_lock_sched() was invoked from an NMI handler. |
1c50b728c
|
941 |
*/ |
d6714c22b
|
942 943 944 |
static inline void rcu_read_lock_sched(void) { preempt_disable(); |
bc33f24bd
|
945 |
__acquire(RCU_SCHED); |
d8ab29f8b
|
946 |
rcu_lock_acquire(&rcu_sched_lock_map); |
f78f5b90c
|
947 948 |
RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_lock_sched() used illegally while idle"); |
d6714c22b
|
949 |
} |
1eba8f843
|
950 951 |
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ |
7c614d646
|
952 |
static inline notrace void rcu_read_lock_sched_notrace(void) |
d6714c22b
|
953 954 |
{ preempt_disable_notrace(); |
bc33f24bd
|
955 |
__acquire(RCU_SCHED); |
d6714c22b
|
956 |
} |
1c50b728c
|
957 958 959 960 961 962 |
/* * rcu_read_unlock_sched - marks the end of a RCU-classic critical section * * See rcu_read_lock_sched for more information. */ |
d6714c22b
|
963 964 |
static inline void rcu_read_unlock_sched(void) { |
f78f5b90c
|
965 966 |
RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_unlock_sched() used illegally while idle"); |
d8ab29f8b
|
967 |
rcu_lock_release(&rcu_sched_lock_map); |
bc33f24bd
|
968 |
__release(RCU_SCHED); |
d6714c22b
|
969 970 |
preempt_enable(); } |
1eba8f843
|
971 972 |
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ |
7c614d646
|
973 |
static inline notrace void rcu_read_unlock_sched_notrace(void) |
d6714c22b
|
974 |
{ |
bc33f24bd
|
975 |
__release(RCU_SCHED); |
d6714c22b
|
976 977 |
preempt_enable_notrace(); } |
1c50b728c
|
978 |
|
1c50b728c
|
979 |
/** |
ca5ecddfa
|
980 981 |
* RCU_INIT_POINTER() - initialize an RCU protected pointer * |
6846c0c54
|
982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 |
* Initialize an RCU-protected pointer in special cases where readers * do not need ordering constraints on the CPU or the compiler. These * special cases are: * * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer -or- * 2. The caller has taken whatever steps are required to prevent * RCU readers from concurrently accessing this pointer -or- * 3. The referenced data structure has already been exposed to * readers either at compile time or via rcu_assign_pointer() -and- * a. You have not made -any- reader-visible changes to * this structure since then -or- * b. It is OK for readers accessing this structure from its * new location to see the old state of the structure. (For * example, the changes were to statistical counters or to * other state where exact synchronization is not required.) * * Failure to follow these rules governing use of RCU_INIT_POINTER() will * result in impossible-to-diagnose memory corruption. As in the structures * will look OK in crash dumps, but any concurrent RCU readers might * see pre-initialized values of the referenced data structure. So * please be very careful how you use RCU_INIT_POINTER()!!! * * If you are creating an RCU-protected linked structure that is accessed * by a single external-to-structure RCU-protected pointer, then you may * use RCU_INIT_POINTER() to initialize the internal RCU-protected * pointers, but you must use rcu_assign_pointer() to initialize the * external-to-structure pointer -after- you have completely initialized * the reader-accessible portions of the linked structure. |
71a9b2696
|
1010 1011 1012 |
* * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no * ordering guarantees for either the CPU or the compiler. |
ca5ecddfa
|
1013 1014 |
*/ #define RCU_INIT_POINTER(p, v) \ |
d1b88eb9e
|
1015 |
do { \ |
1a6c9b267
|
1016 |
rcu_dereference_sparse(p, __rcu); \ |
155d1d127
|
1017 |
WRITE_ONCE(p, RCU_INITIALIZER(v)); \ |
d1b88eb9e
|
1018 |
} while (0) |
9ab1544eb
|
1019 |
|
172708d00
|
1020 1021 1022 1023 1024 1025 |
/** * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer * * GCC-style initialization for an RCU-protected pointer in a structure field. */ #define RCU_POINTER_INITIALIZER(p, v) \ |
462225ae4
|
1026 |
.p = RCU_INITIALIZER(v) |
9ab1544eb
|
1027 |
|
d8169d4c3
|
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 |
/* * Does the specified offset indicate that the corresponding rcu_head * structure can be handled by kfree_rcu()? */ #define __is_kfree_rcu_offset(offset) ((offset) < 4096) /* * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain. */ #define __kfree_rcu(head, offset) \ do { \ BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \ |
b6a4ae766
|
1040 |
kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \ |
d8169d4c3
|
1041 |
} while (0) |
9ab1544eb
|
1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 |
/** * kfree_rcu() - kfree an object after a grace period. * @ptr: pointer to kfree * @rcu_head: the name of the struct rcu_head within the type of @ptr. * * Many rcu callbacks functions just call kfree() on the base structure. * These functions are trivial, but their size adds up, and furthermore * when they are used in a kernel module, that module must invoke the * high-latency rcu_barrier() function at module-unload time. * * The kfree_rcu() function handles this issue. Rather than encoding a * function address in the embedded rcu_head structure, kfree_rcu() instead * encodes the offset of the rcu_head structure within the base structure. * Because the functions are not allowed in the low-order 4096 bytes of * kernel virtual memory, offsets up to 4095 bytes can be accommodated. * If the offset is larger than 4095 bytes, a compile-time error will * be generated in __kfree_rcu(). If this error is triggered, you can * either fall back to use of call_rcu() or rearrange the structure to * position the rcu_head structure into the first 4096 bytes. * * Note that the allowable offset might decrease in the future, for example, * to allow something like kmem_cache_free_rcu(). |
d8169d4c3
|
1064 1065 1066 |
* * The BUILD_BUG_ON check must not involve any function calls, hence the * checks are done in macros here. |
9ab1544eb
|
1067 1068 1069 |
*/ #define kfree_rcu(ptr, rcu_head) \ __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) |
3382adbc1
|
1070 |
#ifdef CONFIG_TINY_RCU |
c1ad348b4
|
1071 |
static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) |
ffa83fb56
|
1072 |
{ |
c1ad348b4
|
1073 |
*nextevt = KTIME_MAX; |
ffa83fb56
|
1074 1075 |
return 0; } |
3382adbc1
|
1076 |
#endif /* #ifdef CONFIG_TINY_RCU */ |
ffa83fb56
|
1077 |
|
2f33b512a
|
1078 1079 1080 |
#if defined(CONFIG_RCU_NOCB_CPU_ALL) static inline bool rcu_is_nocb_cpu(int cpu) { return true; } #elif defined(CONFIG_RCU_NOCB_CPU) |
584dc4ce5
|
1081 |
bool rcu_is_nocb_cpu(int cpu); |
d1e43fa5f
|
1082 1083 |
#else static inline bool rcu_is_nocb_cpu(int cpu) { return false; } |
2f33b512a
|
1084 |
#endif |
d1e43fa5f
|
1085 |
|
0edd1b178
|
1086 1087 |
/* Only for use by adaptive-ticks code. */ #ifdef CONFIG_NO_HZ_FULL_SYSIDLE |
584dc4ce5
|
1088 1089 |
bool rcu_sys_is_idle(void); void rcu_sysidle_force_exit(void); |
0edd1b178
|
1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 |
#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ static inline bool rcu_sys_is_idle(void) { return false; } static inline void rcu_sysidle_force_exit(void) { } #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ |
1da177e4c
|
1102 |
#endif /* __LINUX_RCUPDATE_H */ |