Commit ab4720ec76b756e1f8705e207a7b392b0453afd6
Committed by
Linus Torvalds
1 parent
85b8724249
Exists in
master
and in
7 other branches
[PATCH] add rcu_barrier() synchronization point
This introduces a new interface - rcu_barrier() which waits until all the RCUs queued until this call have been completed. Reiser4 needs this, because we do more than just freeing memory object in our RCU callback: we also remove it from the list hanging off super-block. This means, that before freeing reiser4-specific portion of super-block (during umount) we have to wait until all pending RCU callbacks are executed. The only change of reiser4 made to the original patch, is exporting of rcu_barrier(). Cc: Hans Reiser <reiser@namesys.com> Cc: Vladimir V. Saveliev <vs@namesys.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 2 changed files with 43 additions and 0 deletions Side-by-side Diff
include/linux/rcupdate.h
... | ... | @@ -100,6 +100,7 @@ |
100 | 100 | struct rcu_head *donelist; |
101 | 101 | struct rcu_head **donetail; |
102 | 102 | int cpu; |
103 | + struct rcu_head barrier; | |
103 | 104 | }; |
104 | 105 | |
105 | 106 | DECLARE_PER_CPU(struct rcu_data, rcu_data); |
... | ... | @@ -285,6 +286,7 @@ |
285 | 286 | extern __deprecated_for_modules void synchronize_kernel(void); |
286 | 287 | extern void synchronize_rcu(void); |
287 | 288 | void synchronize_idle(void); |
289 | +extern void rcu_barrier(void); | |
288 | 290 | |
289 | 291 | #endif /* __KERNEL__ */ |
290 | 292 | #endif /* __LINUX_RCUPDATE_H */ |
kernel/rcupdate.c
... | ... | @@ -116,6 +116,10 @@ |
116 | 116 | local_irq_restore(flags); |
117 | 117 | } |
118 | 118 | |
119 | +static atomic_t rcu_barrier_cpu_count; | |
120 | +static struct semaphore rcu_barrier_sema; | |
121 | +static struct completion rcu_barrier_completion; | |
122 | + | |
119 | 123 | /** |
120 | 124 | * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. |
121 | 125 | * @head: structure to be used for queueing the RCU updates. |
122 | 126 | |
... | ... | @@ -162,7 +166,43 @@ |
162 | 166 | return rcu_ctrlblk.completed; |
163 | 167 | } |
164 | 168 | |
169 | +static void rcu_barrier_callback(struct rcu_head *notused) | |
170 | +{ | |
171 | + if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | |
172 | + complete(&rcu_barrier_completion); | |
173 | +} | |
174 | + | |
165 | 175 | /* |
176 | + * Called with preemption disabled, and from cross-cpu IRQ context. | |
177 | + */ | |
178 | +static void rcu_barrier_func(void *notused) | |
179 | +{ | |
180 | + int cpu = smp_processor_id(); | |
181 | + struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | |
182 | + struct rcu_head *head; | |
183 | + | |
184 | + head = &rdp->barrier; | |
185 | + atomic_inc(&rcu_barrier_cpu_count); | |
186 | + call_rcu(head, rcu_barrier_callback); | |
187 | +} | |
188 | + | |
189 | +/** | |
190 | + * rcu_barrier - Wait until all the in-flight RCUs are complete. | |
191 | + */ | |
192 | +void rcu_barrier(void) | |
193 | +{ | |
194 | + BUG_ON(in_interrupt()); | |
195 | + /* Take cpucontrol semaphore to protect against CPU hotplug */ | |
196 | + down(&rcu_barrier_sema); | |
197 | + init_completion(&rcu_barrier_completion); | |
198 | + atomic_set(&rcu_barrier_cpu_count, 0); | |
199 | + on_each_cpu(rcu_barrier_func, NULL, 0, 1); | |
200 | + wait_for_completion(&rcu_barrier_completion); | |
201 | + up(&rcu_barrier_sema); | |
202 | +} | |
203 | +EXPORT_SYMBOL_GPL(rcu_barrier); | |
204 | + | |
205 | +/* | |
166 | 206 | * Invoke the completed RCU callbacks. They are expected to be in |
167 | 207 | * a per-cpu list. |
168 | 208 | */ |
... | ... | @@ -457,6 +497,7 @@ |
457 | 497 | */ |
458 | 498 | void __init rcu_init(void) |
459 | 499 | { |
500 | + sema_init(&rcu_barrier_sema, 1); | |
460 | 501 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, |
461 | 502 | (void *)(long)smp_processor_id()); |
462 | 503 | /* Register notifier for non-boot CPUs */ |