Commit fd3d664fef97cf01f8e28fe0b024ad52f3bbc1bc
Committed by
Linus Torvalds
1 parent
fe102c71a6
Exists in
master
and in
39 other branches
percpu_counter: fix CPU unplug race in percpu_counter_destroy()
We should first delete the counter from percpu_counters list before freeing memory, or a percpu_counter_hotcpu_callback() could dereference a NULL pointer. Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Acked-by: David S. Miller <davem@davemloft.net> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mingming Cao <cmm@us.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 1 changed file with 2 additions and 2 deletions Inline Diff
lib/percpu_counter.c
1 | /* | 1 | /* |
2 | * Fast batching percpu counters. | 2 | * Fast batching percpu counters. |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #include <linux/percpu_counter.h> | 5 | #include <linux/percpu_counter.h> |
6 | #include <linux/notifier.h> | 6 | #include <linux/notifier.h> |
7 | #include <linux/mutex.h> | 7 | #include <linux/mutex.h> |
8 | #include <linux/init.h> | 8 | #include <linux/init.h> |
9 | #include <linux/cpu.h> | 9 | #include <linux/cpu.h> |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | 11 | ||
12 | #ifdef CONFIG_HOTPLUG_CPU | 12 | #ifdef CONFIG_HOTPLUG_CPU |
13 | static LIST_HEAD(percpu_counters); | 13 | static LIST_HEAD(percpu_counters); |
14 | static DEFINE_MUTEX(percpu_counters_lock); | 14 | static DEFINE_MUTEX(percpu_counters_lock); |
15 | #endif | 15 | #endif |
16 | 16 | ||
17 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount) | 17 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount) |
18 | { | 18 | { |
19 | int cpu; | 19 | int cpu; |
20 | 20 | ||
21 | spin_lock(&fbc->lock); | 21 | spin_lock(&fbc->lock); |
22 | for_each_possible_cpu(cpu) { | 22 | for_each_possible_cpu(cpu) { |
23 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); | 23 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
24 | *pcount = 0; | 24 | *pcount = 0; |
25 | } | 25 | } |
26 | fbc->count = amount; | 26 | fbc->count = amount; |
27 | spin_unlock(&fbc->lock); | 27 | spin_unlock(&fbc->lock); |
28 | } | 28 | } |
29 | EXPORT_SYMBOL(percpu_counter_set); | 29 | EXPORT_SYMBOL(percpu_counter_set); |
30 | 30 | ||
31 | void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) | 31 | void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) |
32 | { | 32 | { |
33 | s64 count; | 33 | s64 count; |
34 | s32 *pcount; | 34 | s32 *pcount; |
35 | int cpu = get_cpu(); | 35 | int cpu = get_cpu(); |
36 | 36 | ||
37 | pcount = per_cpu_ptr(fbc->counters, cpu); | 37 | pcount = per_cpu_ptr(fbc->counters, cpu); |
38 | count = *pcount + amount; | 38 | count = *pcount + amount; |
39 | if (count >= batch || count <= -batch) { | 39 | if (count >= batch || count <= -batch) { |
40 | spin_lock(&fbc->lock); | 40 | spin_lock(&fbc->lock); |
41 | fbc->count += count; | 41 | fbc->count += count; |
42 | *pcount = 0; | 42 | *pcount = 0; |
43 | spin_unlock(&fbc->lock); | 43 | spin_unlock(&fbc->lock); |
44 | } else { | 44 | } else { |
45 | *pcount = count; | 45 | *pcount = count; |
46 | } | 46 | } |
47 | put_cpu(); | 47 | put_cpu(); |
48 | } | 48 | } |
49 | EXPORT_SYMBOL(__percpu_counter_add); | 49 | EXPORT_SYMBOL(__percpu_counter_add); |
50 | 50 | ||
51 | /* | 51 | /* |
52 | * Add up all the per-cpu counts, return the result. This is a more accurate | 52 | * Add up all the per-cpu counts, return the result. This is a more accurate |
53 | * but much slower version of percpu_counter_read_positive() | 53 | * but much slower version of percpu_counter_read_positive() |
54 | */ | 54 | */ |
55 | s64 __percpu_counter_sum(struct percpu_counter *fbc) | 55 | s64 __percpu_counter_sum(struct percpu_counter *fbc) |
56 | { | 56 | { |
57 | s64 ret; | 57 | s64 ret; |
58 | int cpu; | 58 | int cpu; |
59 | 59 | ||
60 | spin_lock(&fbc->lock); | 60 | spin_lock(&fbc->lock); |
61 | ret = fbc->count; | 61 | ret = fbc->count; |
62 | for_each_online_cpu(cpu) { | 62 | for_each_online_cpu(cpu) { |
63 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); | 63 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
64 | ret += *pcount; | 64 | ret += *pcount; |
65 | *pcount = 0; | 65 | *pcount = 0; |
66 | } | 66 | } |
67 | fbc->count = ret; | 67 | fbc->count = ret; |
68 | 68 | ||
69 | spin_unlock(&fbc->lock); | 69 | spin_unlock(&fbc->lock); |
70 | return ret; | 70 | return ret; |
71 | } | 71 | } |
72 | EXPORT_SYMBOL(__percpu_counter_sum); | 72 | EXPORT_SYMBOL(__percpu_counter_sum); |
73 | 73 | ||
74 | static struct lock_class_key percpu_counter_irqsafe; | 74 | static struct lock_class_key percpu_counter_irqsafe; |
75 | 75 | ||
76 | int percpu_counter_init(struct percpu_counter *fbc, s64 amount) | 76 | int percpu_counter_init(struct percpu_counter *fbc, s64 amount) |
77 | { | 77 | { |
78 | spin_lock_init(&fbc->lock); | 78 | spin_lock_init(&fbc->lock); |
79 | fbc->count = amount; | 79 | fbc->count = amount; |
80 | fbc->counters = alloc_percpu(s32); | 80 | fbc->counters = alloc_percpu(s32); |
81 | if (!fbc->counters) | 81 | if (!fbc->counters) |
82 | return -ENOMEM; | 82 | return -ENOMEM; |
83 | #ifdef CONFIG_HOTPLUG_CPU | 83 | #ifdef CONFIG_HOTPLUG_CPU |
84 | mutex_lock(&percpu_counters_lock); | 84 | mutex_lock(&percpu_counters_lock); |
85 | list_add(&fbc->list, &percpu_counters); | 85 | list_add(&fbc->list, &percpu_counters); |
86 | mutex_unlock(&percpu_counters_lock); | 86 | mutex_unlock(&percpu_counters_lock); |
87 | #endif | 87 | #endif |
88 | return 0; | 88 | return 0; |
89 | } | 89 | } |
90 | EXPORT_SYMBOL(percpu_counter_init); | 90 | EXPORT_SYMBOL(percpu_counter_init); |
91 | 91 | ||
92 | int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount) | 92 | int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount) |
93 | { | 93 | { |
94 | int err; | 94 | int err; |
95 | 95 | ||
96 | err = percpu_counter_init(fbc, amount); | 96 | err = percpu_counter_init(fbc, amount); |
97 | if (!err) | 97 | if (!err) |
98 | lockdep_set_class(&fbc->lock, &percpu_counter_irqsafe); | 98 | lockdep_set_class(&fbc->lock, &percpu_counter_irqsafe); |
99 | return err; | 99 | return err; |
100 | } | 100 | } |
101 | 101 | ||
102 | void percpu_counter_destroy(struct percpu_counter *fbc) | 102 | void percpu_counter_destroy(struct percpu_counter *fbc) |
103 | { | 103 | { |
104 | if (!fbc->counters) | 104 | if (!fbc->counters) |
105 | return; | 105 | return; |
106 | 106 | ||
107 | free_percpu(fbc->counters); | ||
108 | fbc->counters = NULL; | ||
109 | #ifdef CONFIG_HOTPLUG_CPU | 107 | #ifdef CONFIG_HOTPLUG_CPU |
110 | mutex_lock(&percpu_counters_lock); | 108 | mutex_lock(&percpu_counters_lock); |
111 | list_del(&fbc->list); | 109 | list_del(&fbc->list); |
112 | mutex_unlock(&percpu_counters_lock); | 110 | mutex_unlock(&percpu_counters_lock); |
113 | #endif | 111 | #endif |
112 | free_percpu(fbc->counters); | ||
113 | fbc->counters = NULL; | ||
114 | } | 114 | } |
115 | EXPORT_SYMBOL(percpu_counter_destroy); | 115 | EXPORT_SYMBOL(percpu_counter_destroy); |
116 | 116 | ||
117 | #ifdef CONFIG_HOTPLUG_CPU | 117 | #ifdef CONFIG_HOTPLUG_CPU |
118 | static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, | 118 | static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, |
119 | unsigned long action, void *hcpu) | 119 | unsigned long action, void *hcpu) |
120 | { | 120 | { |
121 | unsigned int cpu; | 121 | unsigned int cpu; |
122 | struct percpu_counter *fbc; | 122 | struct percpu_counter *fbc; |
123 | 123 | ||
124 | if (action != CPU_DEAD) | 124 | if (action != CPU_DEAD) |
125 | return NOTIFY_OK; | 125 | return NOTIFY_OK; |
126 | 126 | ||
127 | cpu = (unsigned long)hcpu; | 127 | cpu = (unsigned long)hcpu; |
128 | mutex_lock(&percpu_counters_lock); | 128 | mutex_lock(&percpu_counters_lock); |
129 | list_for_each_entry(fbc, &percpu_counters, list) { | 129 | list_for_each_entry(fbc, &percpu_counters, list) { |
130 | s32 *pcount; | 130 | s32 *pcount; |
131 | unsigned long flags; | 131 | unsigned long flags; |
132 | 132 | ||
133 | spin_lock_irqsave(&fbc->lock, flags); | 133 | spin_lock_irqsave(&fbc->lock, flags); |
134 | pcount = per_cpu_ptr(fbc->counters, cpu); | 134 | pcount = per_cpu_ptr(fbc->counters, cpu); |
135 | fbc->count += *pcount; | 135 | fbc->count += *pcount; |
136 | *pcount = 0; | 136 | *pcount = 0; |
137 | spin_unlock_irqrestore(&fbc->lock, flags); | 137 | spin_unlock_irqrestore(&fbc->lock, flags); |
138 | } | 138 | } |
139 | mutex_unlock(&percpu_counters_lock); | 139 | mutex_unlock(&percpu_counters_lock); |
140 | return NOTIFY_OK; | 140 | return NOTIFY_OK; |
141 | } | 141 | } |
142 | 142 | ||
143 | static int __init percpu_counter_startup(void) | 143 | static int __init percpu_counter_startup(void) |
144 | { | 144 | { |
145 | hotcpu_notifier(percpu_counter_hotcpu_callback, 0); | 145 | hotcpu_notifier(percpu_counter_hotcpu_callback, 0); |
146 | return 0; | 146 | return 0; |
147 | } | 147 | } |
148 | module_init(percpu_counter_startup); | 148 | module_init(percpu_counter_startup); |