Commit 5e97b9309baa76b476ec7e0d6e9c097edeb4142c
Committed by
Linus Torvalds
1 parent
2856f5e31c
Exists in
master
and in
39 other branches
local_t: architecture independent extension
This series extena and standardises local_t operations on each architecture, allowing a rich set of atomic operations to be done on per-cpu data with minimal performance impact. On architectures where there seems to be no difference between the SMP and UP operation (same memory barriers, same LOCKing), local.h simply includes asm-generic/local.h, which removes duplicated code from the current kernel tree. This patch: local_t: architecture independent extension Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 1 changed file with 23 additions and 10 deletions Side-by-side Diff
include/asm-generic/local.h
... | ... | @@ -33,6 +33,19 @@ |
33 | 33 | #define local_add(i,l) atomic_long_add((i),(&(l)->a)) |
34 | 34 | #define local_sub(i,l) atomic_long_sub((i),(&(l)->a)) |
35 | 35 | |
36 | +#define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a)) | |
37 | +#define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a) | |
38 | +#define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a) | |
39 | +#define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a)) | |
40 | +#define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a)) | |
41 | +#define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a)) | |
42 | +#define local_inc_return(l) atomic_long_inc_return(&(l)->a) | |
43 | + | |
44 | +#define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n)) | |
45 | +#define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n)) | |
46 | +#define local_add_unless(l, a, u) atomic_long_add_unless((&(l)->a), (a), (u)) | |
47 | +#define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a) | |
48 | + | |
36 | 49 | /* Non-atomic variants, ie. preemption disabled and won't be touched |
37 | 50 | * in interrupt, etc. Some archs can optimize this case well. */ |
38 | 51 | #define __local_inc(l) local_set((l), local_read(l) + 1) |
39 | 52 | |
... | ... | @@ -44,20 +57,20 @@ |
44 | 57 | * much more efficient than these naive implementations. Note they take |
45 | 58 | * a variable (eg. mystruct.foo), not an address. |
46 | 59 | */ |
47 | -#define cpu_local_read(v) local_read(&__get_cpu_var(v)) | |
48 | -#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) | |
49 | -#define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) | |
50 | -#define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) | |
51 | -#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) | |
52 | -#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) | |
60 | +#define cpu_local_read(l) local_read(&__get_cpu_var(l)) | |
61 | +#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i)) | |
62 | +#define cpu_local_inc(l) local_inc(&__get_cpu_var(l)) | |
63 | +#define cpu_local_dec(l) local_dec(&__get_cpu_var(l)) | |
64 | +#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l)) | |
65 | +#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l)) | |
53 | 66 | |
54 | 67 | /* Non-atomic increments, ie. preemption disabled and won't be touched |
55 | 68 | * in interrupt, etc. Some archs can optimize this case well. |
56 | 69 | */ |
57 | -#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v)) | |
58 | -#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v)) | |
59 | -#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v)) | |
60 | -#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v)) | |
70 | +#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l)) | |
71 | +#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l)) | |
72 | +#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l)) | |
73 | +#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l)) | |
61 | 74 | |
62 | 75 | #endif /* _ASM_GENERIC_LOCAL_H */ |