Commit 098faf5805c80f951ce5e8b4a6842382ad793c38
Committed by
Jens Axboe
1 parent
71fe07d040
Exists in
master
and in
16 other branches
percpu_counter: make APIs irq safe
In my usage, sometimes the percpu APIs are called with irq locked, sometimes not. lockdep complains there is potential deadlock. Let's always use percpucounter lock in irq safe way. There should be no performance penality, as all those are slow code path. Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Shaohua Li <shli@fusionio.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Showing 1 changed file with 9 additions and 6 deletions Side-by-side Diff
lib/percpu_counter.c
... | ... | @@ -60,14 +60,15 @@ |
60 | 60 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount) |
61 | 61 | { |
62 | 62 | int cpu; |
63 | + unsigned long flags; | |
63 | 64 | |
64 | - raw_spin_lock(&fbc->lock); | |
65 | + raw_spin_lock_irqsave(&fbc->lock, flags); | |
65 | 66 | for_each_possible_cpu(cpu) { |
66 | 67 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
67 | 68 | *pcount = 0; |
68 | 69 | } |
69 | 70 | fbc->count = amount; |
70 | - raw_spin_unlock(&fbc->lock); | |
71 | + raw_spin_unlock_irqrestore(&fbc->lock, flags); | |
71 | 72 | } |
72 | 73 | EXPORT_SYMBOL(percpu_counter_set); |
73 | 74 | |
74 | 75 | |
... | ... | @@ -78,9 +79,10 @@ |
78 | 79 | preempt_disable(); |
79 | 80 | count = __this_cpu_read(*fbc->counters) + amount; |
80 | 81 | if (count >= batch || count <= -batch) { |
81 | - raw_spin_lock(&fbc->lock); | |
82 | + unsigned long flags; | |
83 | + raw_spin_lock_irqsave(&fbc->lock, flags); | |
82 | 84 | fbc->count += count; |
83 | - raw_spin_unlock(&fbc->lock); | |
85 | + raw_spin_unlock_irqrestore(&fbc->lock, flags); | |
84 | 86 | __this_cpu_write(*fbc->counters, 0); |
85 | 87 | } else { |
86 | 88 | __this_cpu_write(*fbc->counters, count); |
87 | 89 | |
88 | 90 | |
... | ... | @@ -97,14 +99,15 @@ |
97 | 99 | { |
98 | 100 | s64 ret; |
99 | 101 | int cpu; |
102 | + unsigned long flags; | |
100 | 103 | |
101 | - raw_spin_lock(&fbc->lock); | |
104 | + raw_spin_lock_irqsave(&fbc->lock, flags); | |
102 | 105 | ret = fbc->count; |
103 | 106 | for_each_online_cpu(cpu) { |
104 | 107 | s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
105 | 108 | ret += *pcount; |
106 | 109 | } |
107 | - raw_spin_unlock(&fbc->lock); | |
110 | + raw_spin_unlock_irqrestore(&fbc->lock, flags); | |
108 | 111 | return ret; |
109 | 112 | } |
110 | 113 | EXPORT_SYMBOL(__percpu_counter_sum); |