Blame view
include/linux/percpu_counter.h
2.21 KB
1da177e4c
|
1 2 3 4 5 6 7 |
#ifndef _LINUX_PERCPU_COUNTER_H #define _LINUX_PERCPU_COUNTER_H /* * A simple "approximate counter" for use in ext2 and ext3 superblocks. * * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4. */ |
1da177e4c
|
8 9 |
#include <linux/spinlock.h> #include <linux/smp.h> |
c67ad917c
|
10 |
#include <linux/list.h> |
1da177e4c
|
11 12 |
#include <linux/threads.h> #include <linux/percpu.h> |
0216bfcff
|
13 |
#include <linux/types.h> |
1da177e4c
|
14 15 16 17 18 |
#ifdef CONFIG_SMP struct percpu_counter { spinlock_t lock; |
0216bfcff
|
19 |
s64 count; |
c67ad917c
|
20 21 22 |
#ifdef CONFIG_HOTPLUG_CPU struct list_head list; /* All percpu_counters are on a list */ #endif |
0216bfcff
|
23 |
s32 *counters; |
1da177e4c
|
24 25 26 27 28 29 30 |
}; #if NR_CPUS >= 16 #define FBC_BATCH (NR_CPUS*2) #else #define FBC_BATCH (NR_CPUS*4) #endif |
c67ad917c
|
31 32 |
void percpu_counter_init(struct percpu_counter *fbc, s64 amount); void percpu_counter_destroy(struct percpu_counter *fbc); |
0216bfcff
|
33 34 |
void percpu_counter_mod(struct percpu_counter *fbc, s32 amount); s64 percpu_counter_sum(struct percpu_counter *fbc); |
1da177e4c
|
35 |
|
0216bfcff
|
36 |
static inline s64 percpu_counter_read(struct percpu_counter *fbc) |
1da177e4c
|
37 38 39 40 41 42 43 |
{ return fbc->count; } /* * It is possible for the percpu_counter_read() to return a small negative * number for some counter which should never be negative. |
0216bfcff
|
44 |
* |
1da177e4c
|
45 |
*/ |
0216bfcff
|
46 |
static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) |
1da177e4c
|
47 |
{ |
0216bfcff
|
48 |
s64 ret = fbc->count; |
1da177e4c
|
49 50 |
barrier(); /* Prevent reloads of fbc->count */ |
0216bfcff
|
51 |
if (ret >= 0) |
1da177e4c
|
52 53 54 55 56 57 58 |
return ret; return 1; } #else struct percpu_counter { |
0216bfcff
|
59 |
s64 count; |
1da177e4c
|
60 |
}; |
0216bfcff
|
61 |
static inline void percpu_counter_init(struct percpu_counter *fbc, s64 amount) |
1da177e4c
|
62 |
{ |
0216bfcff
|
63 |
fbc->count = amount; |
1da177e4c
|
64 65 66 67 68 69 70 |
} static inline void percpu_counter_destroy(struct percpu_counter *fbc) { } static inline void |
0216bfcff
|
71 |
percpu_counter_mod(struct percpu_counter *fbc, s32 amount) |
1da177e4c
|
72 73 74 75 76 |
{ preempt_disable(); fbc->count += amount; preempt_enable(); } |
0216bfcff
|
77 |
static inline s64 percpu_counter_read(struct percpu_counter *fbc) |
1da177e4c
|
78 79 80 |
{ return fbc->count; } |
0216bfcff
|
81 |
static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) |
1da177e4c
|
82 83 84 |
{ return fbc->count; } |
0216bfcff
|
85 |
static inline s64 percpu_counter_sum(struct percpu_counter *fbc) |
e2bab3d92
|
86 87 88 |
{ return percpu_counter_read_positive(fbc); } |
1da177e4c
|
89 90 91 92 93 94 95 96 97 98 99 100 101 |
#endif /* CONFIG_SMP */ static inline void percpu_counter_inc(struct percpu_counter *fbc) { percpu_counter_mod(fbc, 1); } static inline void percpu_counter_dec(struct percpu_counter *fbc) { percpu_counter_mod(fbc, -1); } #endif /* _LINUX_PERCPU_COUNTER_H */ |