Commit 9b06e818985d139fd9e82c28297f7744e1b484e1
Committed by
Linus Torvalds
1 parent
512345be25
Exists in
master
and in
20 other branches
[PATCH] Deprecate synchronize_kernel, GPL replacement
The synchronize_kernel() primitive is used for quite a few different purposes: waiting for RCU readers, waiting for NMIs, waiting for interrupts, and so on. This makes RCU code harder to read, since synchronize_kernel() might or might not have matching rcu_read_lock()s. This patch creates a new synchronize_rcu() that is to be used for RCU readers and a new synchronize_sched() that is used for the rest. These two new primitives currently have the same implementation, but this is might well change with additional real-time support. Both new primitives are GPL-only, the old primitive is deprecated. Signed-off-by: Paul E. McKenney <paulmck@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 2 changed files with 34 additions and 5 deletions Side-by-side Diff
include/linux/rcupdate.h
... | ... | @@ -157,9 +157,9 @@ |
157 | 157 | /** |
158 | 158 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. |
159 | 159 | * |
160 | - * When synchronize_kernel() is invoked on one CPU while other CPUs | |
160 | + * When synchronize_rcu() is invoked on one CPU while other CPUs | |
161 | 161 | * are within RCU read-side critical sections, then the |
162 | - * synchronize_kernel() is guaranteed to block until after all the other | |
162 | + * synchronize_rcu() is guaranteed to block until after all the other | |
163 | 163 | * CPUs exit their critical sections. Similarly, if call_rcu() is invoked |
164 | 164 | * on one CPU while other CPUs are within RCU read-side critical |
165 | 165 | * sections, invocation of the corresponding RCU callback is deferred |
... | ... | @@ -256,6 +256,21 @@ |
256 | 256 | (p) = (v); \ |
257 | 257 | }) |
258 | 258 | |
259 | +/** | |
260 | + * synchronize_sched - block until all CPUs have exited any non-preemptive | |
261 | + * kernel code sequences. | |
262 | + * | |
263 | + * This means that all preempt_disable code sequences, including NMI and | |
264 | + * hardware-interrupt handlers, in progress on entry will have completed | |
265 | + * before this primitive returns. However, this does not guarantee that | |
266 | + * softirq handlers will have completed, since in some kernels | |
267 | + * | |
268 | + * This primitive provides the guarantees made by the (deprecated) | |
269 | + * synchronize_kernel() API. In contrast, synchronize_rcu() only | |
270 | + * guarantees that rcu_read_lock() sections will have completed. | |
271 | + */ | |
272 | +#define synchronize_sched() synchronize_rcu() | |
273 | + | |
259 | 274 | extern void rcu_init(void); |
260 | 275 | extern void rcu_check_callbacks(int cpu, int user); |
261 | 276 | extern void rcu_restart_cpu(int cpu); |
... | ... | @@ -265,7 +280,9 @@ |
265 | 280 | void (*func)(struct rcu_head *head))); |
266 | 281 | extern void FASTCALL(call_rcu_bh(struct rcu_head *head, |
267 | 282 | void (*func)(struct rcu_head *head))); |
268 | -extern void synchronize_kernel(void); | |
283 | +extern __deprecated_for_modules void synchronize_kernel(void); | |
284 | +extern void synchronize_rcu(void); | |
285 | +void synchronize_idle(void); | |
269 | 286 | |
270 | 287 | #endif /* __KERNEL__ */ |
271 | 288 | #endif /* __LINUX_RCUPDATE_H */ |
kernel/rcupdate.c
... | ... | @@ -444,15 +444,18 @@ |
444 | 444 | } |
445 | 445 | |
446 | 446 | /** |
447 | - * synchronize_kernel - wait until a grace period has elapsed. | |
447 | + * synchronize_rcu - wait until a grace period has elapsed. | |
448 | 448 | * |
449 | 449 | * Control will return to the caller some time after a full grace |
450 | 450 | * period has elapsed, in other words after all currently executing RCU |
451 | 451 | * read-side critical sections have completed. RCU read-side critical |
452 | 452 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), |
453 | 453 | * and may be nested. |
454 | + * | |
455 | + * If your read-side code is not protected by rcu_read_lock(), do -not- | |
456 | + * use synchronize_rcu(). | |
454 | 457 | */ |
455 | -void synchronize_kernel(void) | |
458 | +void synchronize_rcu(void) | |
456 | 459 | { |
457 | 460 | struct rcu_synchronize rcu; |
458 | 461 | |
459 | 462 | |
... | ... | @@ -464,8 +467,17 @@ |
464 | 467 | wait_for_completion(&rcu.completion); |
465 | 468 | } |
466 | 469 | |
470 | +/* | |
471 | + * Deprecated, use synchronize_rcu() or synchronize_sched() instead. | |
472 | + */ | |
473 | +void synchronize_kernel(void) | |
474 | +{ | |
475 | + synchronize_rcu(); | |
476 | +} | |
477 | + | |
467 | 478 | module_param(maxbatch, int, 0); |
468 | 479 | EXPORT_SYMBOL(call_rcu); /* WARNING: GPL-only in April 2006. */ |
469 | 480 | EXPORT_SYMBOL(call_rcu_bh); /* WARNING: GPL-only in April 2006. */ |
481 | +EXPORT_SYMBOL_GPL(synchronize_rcu); | |
470 | 482 | EXPORT_SYMBOL(synchronize_kernel); /* WARNING: GPL-only in April 2006. */ |