Blame view
include/linux/percpu_counter.h
3.52 KB
1da177e4c
|
1 2 3 4 5 6 7 |
#ifndef _LINUX_PERCPU_COUNTER_H #define _LINUX_PERCPU_COUNTER_H /* * A simple "approximate counter" for use in ext2 and ext3 superblocks. * * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4. */ |
1da177e4c
|
8 9 |
#include <linux/spinlock.h> #include <linux/smp.h> |
c67ad917c
|
10 |
#include <linux/list.h> |
1da177e4c
|
11 12 |
#include <linux/threads.h> #include <linux/percpu.h> |
0216bfcff
|
13 |
#include <linux/types.h> |
1da177e4c
|
14 15 16 17 18 |
#ifdef CONFIG_SMP struct percpu_counter { spinlock_t lock; |
0216bfcff
|
19 |
s64 count; |
c67ad917c
|
20 21 22 |
#ifdef CONFIG_HOTPLUG_CPU struct list_head list; /* All percpu_counters are on a list */ #endif |
43cf38eb5
|
23 |
s32 __percpu *counters; |
1da177e4c
|
24 |
}; |
179f7ebff
|
25 |
extern int percpu_counter_batch; |
1da177e4c
|
26 |
|
ea319518b
|
27 28 29 30 31 32 33 34 35 |
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, struct lock_class_key *key); #define percpu_counter_init(fbc, value) \ ({ \ static struct lock_class_key __key; \ \ __percpu_counter_init(fbc, value, &__key); \ }) |
c67ad917c
|
36 |
void percpu_counter_destroy(struct percpu_counter *fbc); |
3a587f47b
|
37 |
void percpu_counter_set(struct percpu_counter *fbc, s64 amount); |
20e897670
|
38 |
void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); |
02d211688
|
39 |
s64 __percpu_counter_sum(struct percpu_counter *fbc); |
27f5e0f69
|
40 |
int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs); |
1da177e4c
|
41 |
|
20e897670
|
42 |
static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) |
252e0ba6b
|
43 |
{ |
179f7ebff
|
44 |
__percpu_counter_add(fbc, amount, percpu_counter_batch); |
252e0ba6b
|
45 |
} |
bf1d89c81
|
46 47 |
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) { |
02d211688
|
48 |
s64 ret = __percpu_counter_sum(fbc); |
bf1d89c81
|
49 50 51 52 53 |
return ret < 0 ? 0 : ret; } static inline s64 percpu_counter_sum(struct percpu_counter *fbc) { |
02d211688
|
54 |
return __percpu_counter_sum(fbc); |
bf1d89c81
|
55 |
} |
0216bfcff
|
56 |
static inline s64 percpu_counter_read(struct percpu_counter *fbc) |
1da177e4c
|
57 58 59 60 61 62 63 |
{ return fbc->count; } /* * It is possible for the percpu_counter_read() to return a small negative * number for some counter which should never be negative. |
0216bfcff
|
64 |
* |
1da177e4c
|
65 |
*/ |
0216bfcff
|
66 |
static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) |
1da177e4c
|
67 |
{ |
0216bfcff
|
68 |
s64 ret = fbc->count; |
1da177e4c
|
69 70 |
barrier(); /* Prevent reloads of fbc->count */ |
0216bfcff
|
71 |
if (ret >= 0) |
1da177e4c
|
72 73 74 75 76 77 78 |
return ret; return 1; } #else struct percpu_counter { |
0216bfcff
|
79 |
s64 count; |
1da177e4c
|
80 |
}; |
833f4077b
|
81 |
static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount) |
1da177e4c
|
82 |
{ |
0216bfcff
|
83 |
fbc->count = amount; |
833f4077b
|
84 |
return 0; |
1da177e4c
|
85 86 87 88 89 |
} static inline void percpu_counter_destroy(struct percpu_counter *fbc) { } |
3a587f47b
|
90 91 92 93 |
static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount) { fbc->count = amount; } |
27f5e0f69
|
94 95 96 97 98 99 100 101 102 |
static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) { if (fbc->count > rhs) return 1; else if (fbc->count < rhs) return -1; else return 0; } |
1da177e4c
|
103 |
static inline void |
20e897670
|
104 |
percpu_counter_add(struct percpu_counter *fbc, s64 amount) |
1da177e4c
|
105 106 107 108 109 |
{ preempt_disable(); fbc->count += amount; preempt_enable(); } |
0c9cf2efd
|
110 111 112 113 114 |
static inline void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) { percpu_counter_add(fbc, amount); } |
0216bfcff
|
115 |
static inline s64 percpu_counter_read(struct percpu_counter *fbc) |
1da177e4c
|
116 117 118 |
{ return fbc->count; } |
0216bfcff
|
119 |
static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) |
1da177e4c
|
120 121 122 |
{ return fbc->count; } |
52d9f3b40
|
123 |
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) |
e2bab3d92
|
124 125 126 |
{ return percpu_counter_read_positive(fbc); } |
bf1d89c81
|
127 128 129 130 |
static inline s64 percpu_counter_sum(struct percpu_counter *fbc) { return percpu_counter_read(fbc); } |
1da177e4c
|
131 132 133 134 |
#endif /* CONFIG_SMP */ static inline void percpu_counter_inc(struct percpu_counter *fbc) { |
aa0dff2d0
|
135 |
percpu_counter_add(fbc, 1); |
1da177e4c
|
136 137 138 139 |
} static inline void percpu_counter_dec(struct percpu_counter *fbc) { |
aa0dff2d0
|
140 |
percpu_counter_add(fbc, -1); |
1da177e4c
|
141 |
} |
3cb4f9fa0
|
142 143 144 145 |
static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount) { percpu_counter_add(fbc, -amount); } |
1da177e4c
|
146 |
#endif /* _LINUX_PERCPU_COUNTER_H */ |