Commit 02d211688727ad02bb4555b1aa8ae2de16b21b39
Committed by
Linus Torvalds
1 parent
71c5576fbd
Exists in
master
and in
4 other branches
revert "percpu_counter: new function percpu_counter_sum_and_set"
Revert commit e8ced39d5e8911c662d4d69a342b9d053eaaac4e Author: Mingming Cao <cmm@us.ibm.com> Date: Fri Jul 11 19:27:31 2008 -0400 percpu_counter: new function percpu_counter_sum_and_set As described in revert "percpu counter: clean up percpu_counter_sum_and_set()" the new percpu_counter_sum_and_set() is racy against updates to the cpu-local accumulators on other CPUs. Revert that change. This means that ext4 will be slow again. But correct. Reported-by: Eric Dumazet <dada1@cosmosbay.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mingming Cao <cmm@us.ibm.com> Cc: <linux-ext4@vger.kernel.org> Cc: <stable@kernel.org> [2.6.27.x] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 3 changed files with 6 additions and 17 deletions Side-by-side Diff
fs/ext4/balloc.c
... | ... | @@ -609,8 +609,8 @@ |
609 | 609 | |
610 | 610 | if (free_blocks - (nblocks + root_blocks + dirty_blocks) < |
611 | 611 | EXT4_FREEBLOCKS_WATERMARK) { |
612 | - free_blocks = percpu_counter_sum_and_set(fbc); | |
613 | - dirty_blocks = percpu_counter_sum_and_set(dbc); | |
612 | + free_blocks = percpu_counter_sum_positive(fbc); | |
613 | + dirty_blocks = percpu_counter_sum_positive(dbc); | |
614 | 614 | if (dirty_blocks < 0) { |
615 | 615 | printk(KERN_CRIT "Dirty block accounting " |
616 | 616 | "went wrong %lld\n", |
include/linux/percpu_counter.h
... | ... | @@ -35,7 +35,7 @@ |
35 | 35 | void percpu_counter_destroy(struct percpu_counter *fbc); |
36 | 36 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount); |
37 | 37 | void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); |
38 | -s64 __percpu_counter_sum(struct percpu_counter *fbc, int set); | |
38 | +s64 __percpu_counter_sum(struct percpu_counter *fbc); | |
39 | 39 | |
40 | 40 | static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) |
41 | 41 | { |
42 | 42 | |
43 | 43 | |
... | ... | @@ -44,19 +44,13 @@ |
44 | 44 | |
45 | 45 | static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) |
46 | 46 | { |
47 | - s64 ret = __percpu_counter_sum(fbc, 0); | |
47 | + s64 ret = __percpu_counter_sum(fbc); | |
48 | 48 | return ret < 0 ? 0 : ret; |
49 | 49 | } |
50 | 50 | |
51 | -static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc) | |
52 | -{ | |
53 | - return __percpu_counter_sum(fbc, 1); | |
54 | -} | |
55 | - | |
56 | - | |
57 | 51 | static inline s64 percpu_counter_sum(struct percpu_counter *fbc) |
58 | 52 | { |
59 | - return __percpu_counter_sum(fbc, 0); | |
53 | + return __percpu_counter_sum(fbc); | |
60 | 54 | } |
61 | 55 | |
62 | 56 | static inline s64 percpu_counter_read(struct percpu_counter *fbc) |
lib/percpu_counter.c
... | ... | @@ -52,7 +52,7 @@ |
52 | 52 | * Add up all the per-cpu counts, return the result. This is a more accurate |
53 | 53 | * but much slower version of percpu_counter_read_positive() |
54 | 54 | */ |
55 | -s64 __percpu_counter_sum(struct percpu_counter *fbc, int set) | |
55 | +s64 __percpu_counter_sum(struct percpu_counter *fbc) | |
56 | 56 | { |
57 | 57 | s64 ret; |
58 | 58 | int cpu; |
59 | 59 | |
... | ... | @@ -62,12 +62,7 @@ |
62 | 62 | for_each_online_cpu(cpu) { |
63 | 63 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
64 | 64 | ret += *pcount; |
65 | - if (set) | |
66 | - *pcount = 0; | |
67 | 65 | } |
68 | - if (set) | |
69 | - fbc->count = ret; | |
70 | - | |
71 | 66 | spin_unlock(&fbc->lock); |
72 | 67 | return ret; |
73 | 68 | } |