Blame view
lib/percpu_counter.c
3.17 KB
3cbc56402 [PATCH] percpu_co... |
1 2 3 4 5 |
/* * Fast batching percpu counters. */ #include <linux/percpu_counter.h> |
c67ad917c percpu_counters()... |
6 7 8 9 |
#include <linux/notifier.h> #include <linux/mutex.h> #include <linux/init.h> #include <linux/cpu.h> |
3cbc56402 [PATCH] percpu_co... |
10 |
#include <linux/module.h> |
c67ad917c percpu_counters()... |
11 12 13 14 |
#ifdef CONFIG_HOTPLUG_CPU static LIST_HEAD(percpu_counters); static DEFINE_MUTEX(percpu_counters_lock); #endif |
3a587f47b lib: percpu_count... |
15 16 17 18 19 20 21 22 23 24 25 26 27 |
void percpu_counter_set(struct percpu_counter *fbc, s64 amount) { int cpu; spin_lock(&fbc->lock); for_each_possible_cpu(cpu) { s32 *pcount = per_cpu_ptr(fbc->counters, cpu); *pcount = 0; } fbc->count = amount; spin_unlock(&fbc->lock); } EXPORT_SYMBOL(percpu_counter_set); |
20e897670 lib: make percpu_... |
28 |
void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) |
3cbc56402 [PATCH] percpu_co... |
29 |
{ |
20e897670 lib: make percpu_... |
30 |
s64 count; |
0216bfcff [PATCH] percpu co... |
31 |
s32 *pcount; |
3cbc56402 [PATCH] percpu_co... |
32 33 34 35 |
int cpu = get_cpu(); pcount = per_cpu_ptr(fbc->counters, cpu); count = *pcount + amount; |
252e0ba6b lib: percpu_count... |
36 |
if (count >= batch || count <= -batch) { |
3cbc56402 [PATCH] percpu_co... |
37 38 39 40 41 42 43 44 45 |
spin_lock(&fbc->lock); fbc->count += count; *pcount = 0; spin_unlock(&fbc->lock); } else { *pcount = count; } put_cpu(); } |
252e0ba6b lib: percpu_count... |
46 |
EXPORT_SYMBOL(__percpu_counter_add); |
3cbc56402 [PATCH] percpu_co... |
47 48 49 50 51 |
/* * Add up all the per-cpu counts, return the result. This is a more accurate * but much slower version of percpu_counter_read_positive() */ |
1f7c14c62 percpu counter: c... |
52 |
s64 __percpu_counter_sum(struct percpu_counter *fbc) |
3cbc56402 [PATCH] percpu_co... |
53 |
{ |
0216bfcff [PATCH] percpu co... |
54 |
s64 ret; |
3cbc56402 [PATCH] percpu_co... |
55 56 57 58 |
int cpu; spin_lock(&fbc->lock); ret = fbc->count; |
b4ef0296f percpu_counters: ... |
59 |
for_each_online_cpu(cpu) { |
0216bfcff [PATCH] percpu co... |
60 |
s32 *pcount = per_cpu_ptr(fbc->counters, cpu); |
3cbc56402 [PATCH] percpu_co... |
61 |
ret += *pcount; |
1f7c14c62 percpu counter: c... |
62 |
*pcount = 0; |
3cbc56402 [PATCH] percpu_co... |
63 |
} |
1f7c14c62 percpu counter: c... |
64 |
fbc->count = ret; |
e8ced39d5 percpu_counter: n... |
65 |
|
3cbc56402 [PATCH] percpu_co... |
66 |
spin_unlock(&fbc->lock); |
bf1d89c81 lib: percpu_count... |
67 |
return ret; |
3cbc56402 [PATCH] percpu_co... |
68 |
} |
bf1d89c81 lib: percpu_count... |
69 |
EXPORT_SYMBOL(__percpu_counter_sum); |
c67ad917c percpu_counters()... |
70 |
|
dc62a30e2 lib: percpu_count... |
71 |
static struct lock_class_key percpu_counter_irqsafe; |
833f4077b lib: percpu_count... |
72 |
int percpu_counter_init(struct percpu_counter *fbc, s64 amount) |
c67ad917c percpu_counters()... |
73 74 75 76 |
{ spin_lock_init(&fbc->lock); fbc->count = amount; fbc->counters = alloc_percpu(s32); |
833f4077b lib: percpu_count... |
77 78 |
if (!fbc->counters) return -ENOMEM; |
c67ad917c percpu_counters()... |
79 80 81 82 83 |
#ifdef CONFIG_HOTPLUG_CPU mutex_lock(&percpu_counters_lock); list_add(&fbc->list, &percpu_counters); mutex_unlock(&percpu_counters_lock); #endif |
833f4077b lib: percpu_count... |
84 |
return 0; |
c67ad917c percpu_counters()... |
85 86 |
} EXPORT_SYMBOL(percpu_counter_init); |
dc62a30e2 lib: percpu_count... |
87 88 89 90 91 92 93 94 95 96 |
int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount) { int err; err = percpu_counter_init(fbc, amount); if (!err) lockdep_set_class(&fbc->lock, &percpu_counter_irqsafe); return err; } |
c67ad917c percpu_counters()... |
97 98 99 |
void percpu_counter_destroy(struct percpu_counter *fbc) { |
833f4077b lib: percpu_count... |
100 101 |
if (!fbc->counters) return; |
c67ad917c percpu_counters()... |
102 |
free_percpu(fbc->counters); |
cf0ca9fe5 mm: bdi: export B... |
103 |
fbc->counters = NULL; |
c67ad917c percpu_counters()... |
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
#ifdef CONFIG_HOTPLUG_CPU mutex_lock(&percpu_counters_lock); list_del(&fbc->list); mutex_unlock(&percpu_counters_lock); #endif } EXPORT_SYMBOL(percpu_counter_destroy); #ifdef CONFIG_HOTPLUG_CPU static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) { unsigned int cpu; struct percpu_counter *fbc; if (action != CPU_DEAD) return NOTIFY_OK; cpu = (unsigned long)hcpu; mutex_lock(&percpu_counters_lock); list_for_each_entry(fbc, &percpu_counters, list) { s32 *pcount; |
d2b20b115 Add irq protectio... |
126 |
unsigned long flags; |
c67ad917c percpu_counters()... |
127 |
|
d2b20b115 Add irq protectio... |
128 |
spin_lock_irqsave(&fbc->lock, flags); |
c67ad917c percpu_counters()... |
129 130 131 |
pcount = per_cpu_ptr(fbc->counters, cpu); fbc->count += *pcount; *pcount = 0; |
d2b20b115 Add irq protectio... |
132 |
spin_unlock_irqrestore(&fbc->lock, flags); |
c67ad917c percpu_counters()... |
133 134 135 136 137 138 139 140 141 142 143 144 |
} mutex_unlock(&percpu_counters_lock); return NOTIFY_OK; } static int __init percpu_counter_startup(void) { hotcpu_notifier(percpu_counter_hotcpu_callback, 0); return 0; } module_init(percpu_counter_startup); #endif |