Commit b4ef0296f214a1e0e65f161f88663b0ca1acca31
Committed by
Linus Torvalds
1 parent
c67ad917cb
Exists in
master
and in
39 other branches
percpu_counters: use for_each_online_cpu()
Now that we have implemented hotunplug-time counter spilling, percpu_counter_sum() only needs to look at online CPUs. Cc: Gautham R Shenoy <ego@in.ibm.com> Cc: Oleg Nesterov <oleg@tv-sign.ru> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 1 changed file with 1 additions and 1 deletions Inline Diff
lib/percpu_counter.c
1 | /* | 1 | /* |
2 | * Fast batching percpu counters. | 2 | * Fast batching percpu counters. |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #include <linux/percpu_counter.h> | 5 | #include <linux/percpu_counter.h> |
6 | #include <linux/notifier.h> | 6 | #include <linux/notifier.h> |
7 | #include <linux/mutex.h> | 7 | #include <linux/mutex.h> |
8 | #include <linux/init.h> | 8 | #include <linux/init.h> |
9 | #include <linux/cpu.h> | 9 | #include <linux/cpu.h> |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | 11 | ||
12 | #ifdef CONFIG_HOTPLUG_CPU | 12 | #ifdef CONFIG_HOTPLUG_CPU |
13 | static LIST_HEAD(percpu_counters); | 13 | static LIST_HEAD(percpu_counters); |
14 | static DEFINE_MUTEX(percpu_counters_lock); | 14 | static DEFINE_MUTEX(percpu_counters_lock); |
15 | #endif | 15 | #endif |
16 | 16 | ||
17 | void percpu_counter_mod(struct percpu_counter *fbc, s32 amount) | 17 | void percpu_counter_mod(struct percpu_counter *fbc, s32 amount) |
18 | { | 18 | { |
19 | long count; | 19 | long count; |
20 | s32 *pcount; | 20 | s32 *pcount; |
21 | int cpu = get_cpu(); | 21 | int cpu = get_cpu(); |
22 | 22 | ||
23 | pcount = per_cpu_ptr(fbc->counters, cpu); | 23 | pcount = per_cpu_ptr(fbc->counters, cpu); |
24 | count = *pcount + amount; | 24 | count = *pcount + amount; |
25 | if (count >= FBC_BATCH || count <= -FBC_BATCH) { | 25 | if (count >= FBC_BATCH || count <= -FBC_BATCH) { |
26 | spin_lock(&fbc->lock); | 26 | spin_lock(&fbc->lock); |
27 | fbc->count += count; | 27 | fbc->count += count; |
28 | *pcount = 0; | 28 | *pcount = 0; |
29 | spin_unlock(&fbc->lock); | 29 | spin_unlock(&fbc->lock); |
30 | } else { | 30 | } else { |
31 | *pcount = count; | 31 | *pcount = count; |
32 | } | 32 | } |
33 | put_cpu(); | 33 | put_cpu(); |
34 | } | 34 | } |
35 | EXPORT_SYMBOL(percpu_counter_mod); | 35 | EXPORT_SYMBOL(percpu_counter_mod); |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Add up all the per-cpu counts, return the result. This is a more accurate | 38 | * Add up all the per-cpu counts, return the result. This is a more accurate |
39 | * but much slower version of percpu_counter_read_positive() | 39 | * but much slower version of percpu_counter_read_positive() |
40 | */ | 40 | */ |
41 | s64 percpu_counter_sum(struct percpu_counter *fbc) | 41 | s64 percpu_counter_sum(struct percpu_counter *fbc) |
42 | { | 42 | { |
43 | s64 ret; | 43 | s64 ret; |
44 | int cpu; | 44 | int cpu; |
45 | 45 | ||
46 | spin_lock(&fbc->lock); | 46 | spin_lock(&fbc->lock); |
47 | ret = fbc->count; | 47 | ret = fbc->count; |
48 | for_each_possible_cpu(cpu) { | 48 | for_each_online_cpu(cpu) { |
49 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); | 49 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
50 | ret += *pcount; | 50 | ret += *pcount; |
51 | } | 51 | } |
52 | spin_unlock(&fbc->lock); | 52 | spin_unlock(&fbc->lock); |
53 | return ret < 0 ? 0 : ret; | 53 | return ret < 0 ? 0 : ret; |
54 | } | 54 | } |
55 | EXPORT_SYMBOL(percpu_counter_sum); | 55 | EXPORT_SYMBOL(percpu_counter_sum); |
56 | 56 | ||
57 | void percpu_counter_init(struct percpu_counter *fbc, s64 amount) | 57 | void percpu_counter_init(struct percpu_counter *fbc, s64 amount) |
58 | { | 58 | { |
59 | spin_lock_init(&fbc->lock); | 59 | spin_lock_init(&fbc->lock); |
60 | fbc->count = amount; | 60 | fbc->count = amount; |
61 | fbc->counters = alloc_percpu(s32); | 61 | fbc->counters = alloc_percpu(s32); |
62 | #ifdef CONFIG_HOTPLUG_CPU | 62 | #ifdef CONFIG_HOTPLUG_CPU |
63 | mutex_lock(&percpu_counters_lock); | 63 | mutex_lock(&percpu_counters_lock); |
64 | list_add(&fbc->list, &percpu_counters); | 64 | list_add(&fbc->list, &percpu_counters); |
65 | mutex_unlock(&percpu_counters_lock); | 65 | mutex_unlock(&percpu_counters_lock); |
66 | #endif | 66 | #endif |
67 | } | 67 | } |
68 | EXPORT_SYMBOL(percpu_counter_init); | 68 | EXPORT_SYMBOL(percpu_counter_init); |
69 | 69 | ||
70 | void percpu_counter_destroy(struct percpu_counter *fbc) | 70 | void percpu_counter_destroy(struct percpu_counter *fbc) |
71 | { | 71 | { |
72 | free_percpu(fbc->counters); | 72 | free_percpu(fbc->counters); |
73 | #ifdef CONFIG_HOTPLUG_CPU | 73 | #ifdef CONFIG_HOTPLUG_CPU |
74 | mutex_lock(&percpu_counters_lock); | 74 | mutex_lock(&percpu_counters_lock); |
75 | list_del(&fbc->list); | 75 | list_del(&fbc->list); |
76 | mutex_unlock(&percpu_counters_lock); | 76 | mutex_unlock(&percpu_counters_lock); |
77 | #endif | 77 | #endif |
78 | } | 78 | } |
79 | EXPORT_SYMBOL(percpu_counter_destroy); | 79 | EXPORT_SYMBOL(percpu_counter_destroy); |
80 | 80 | ||
81 | #ifdef CONFIG_HOTPLUG_CPU | 81 | #ifdef CONFIG_HOTPLUG_CPU |
82 | static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, | 82 | static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, |
83 | unsigned long action, void *hcpu) | 83 | unsigned long action, void *hcpu) |
84 | { | 84 | { |
85 | unsigned int cpu; | 85 | unsigned int cpu; |
86 | struct percpu_counter *fbc; | 86 | struct percpu_counter *fbc; |
87 | 87 | ||
88 | if (action != CPU_DEAD) | 88 | if (action != CPU_DEAD) |
89 | return NOTIFY_OK; | 89 | return NOTIFY_OK; |
90 | 90 | ||
91 | cpu = (unsigned long)hcpu; | 91 | cpu = (unsigned long)hcpu; |
92 | mutex_lock(&percpu_counters_lock); | 92 | mutex_lock(&percpu_counters_lock); |
93 | list_for_each_entry(fbc, &percpu_counters, list) { | 93 | list_for_each_entry(fbc, &percpu_counters, list) { |
94 | s32 *pcount; | 94 | s32 *pcount; |
95 | 95 | ||
96 | spin_lock(&fbc->lock); | 96 | spin_lock(&fbc->lock); |
97 | pcount = per_cpu_ptr(fbc->counters, cpu); | 97 | pcount = per_cpu_ptr(fbc->counters, cpu); |
98 | fbc->count += *pcount; | 98 | fbc->count += *pcount; |
99 | *pcount = 0; | 99 | *pcount = 0; |
100 | spin_unlock(&fbc->lock); | 100 | spin_unlock(&fbc->lock); |
101 | } | 101 | } |
102 | mutex_unlock(&percpu_counters_lock); | 102 | mutex_unlock(&percpu_counters_lock); |
103 | return NOTIFY_OK; | 103 | return NOTIFY_OK; |
104 | } | 104 | } |
105 | 105 | ||
106 | static int __init percpu_counter_startup(void) | 106 | static int __init percpu_counter_startup(void) |
107 | { | 107 | { |
108 | hotcpu_notifier(percpu_counter_hotcpu_callback, 0); | 108 | hotcpu_notifier(percpu_counter_hotcpu_callback, 0); |
109 | return 0; | 109 | return 0; |
110 | } | 110 | } |
111 | module_init(percpu_counter_startup); | 111 | module_init(percpu_counter_startup); |
112 | #endif | 112 | #endif |
113 | 113 |