Commit 4a6dae6d382e9edf3ff440b819e554ed706359bc
Committed by
Linus Torvalds
1 parent
53e86b91b7
Exists in
master
and in
7 other branches
[PATCH] atomic: cmpxchg
Introduce an atomic_cmpxchg operation. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: "Paul E. McKenney" <paulmck@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 23 changed files with 169 additions and 5 deletions Side-by-side Diff
- Documentation/atomic_ops.txt
- arch/sparc/lib/atomic32.c
- include/asm-alpha/atomic.h
- include/asm-arm/atomic.h
- include/asm-arm26/atomic.h
- include/asm-cris/atomic.h
- include/asm-frv/atomic.h
- include/asm-h8300/atomic.h
- include/asm-i386/atomic.h
- include/asm-ia64/atomic.h
- include/asm-m68k/atomic.h
- include/asm-m68knommu/atomic.h
- include/asm-mips/atomic.h
- include/asm-parisc/atomic.h
- include/asm-powerpc/atomic.h
- include/asm-s390/atomic.h
- include/asm-sh/atomic.h
- include/asm-sh64/atomic.h
- include/asm-sparc/atomic.h
- include/asm-sparc64/atomic.h
- include/asm-v850/atomic.h
- include/asm-x86_64/atomic.h
- include/asm-xtensa/atomic.h
Documentation/atomic_ops.txt
... | ... | @@ -115,6 +115,21 @@ |
115 | 115 | is negative. It requires explicit memory barrier semantics around the |
116 | 116 | operation. |
117 | 117 | |
118 | +Finally: | |
119 | + | |
120 | + int atomic_cmpxchg(atomic_t *v, int old, int new); | |
121 | + | |
122 | +This performs an atomic compare exchange operation on the atomic value v, | |
123 | +with the given old and new values. Like all atomic_xxx operations, | |
124 | +atomic_cmpxchg will only satisfy its atomicity semantics as long as all | |
125 | +other accesses of *v are performed through atomic_xxx operations. | |
126 | + | |
127 | +atomic_cmpxchg requires explicit memory barriers around the operation. | |
128 | + | |
129 | +The semantics for atomic_cmpxchg are the same as those defined for 'cas' | |
130 | +below. | |
131 | + | |
132 | + | |
118 | 133 | If a caller requires memory barrier semantics around an atomic_t |
119 | 134 | operation which does not return a value, a set of interfaces are |
120 | 135 | defined which accomplish this: |
arch/sparc/lib/atomic32.c
... | ... | @@ -37,17 +37,29 @@ |
37 | 37 | spin_unlock_irqrestore(ATOMIC_HASH(v), flags); |
38 | 38 | return ret; |
39 | 39 | } |
40 | +EXPORT_SYMBOL(__atomic_add_return); | |
40 | 41 | |
41 | -void atomic_set(atomic_t *v, int i) | |
42 | +int atomic_cmpxchg(atomic_t *v, int old, int new) | |
42 | 43 | { |
44 | + int ret; | |
43 | 45 | unsigned long flags; |
46 | + | |
44 | 47 | spin_lock_irqsave(ATOMIC_HASH(v), flags); |
48 | + ret = v->counter; | |
49 | + if (likely(ret == old)) | |
50 | + v->counter = new; | |
45 | 51 | |
46 | - v->counter = i; | |
47 | - | |
48 | 52 | spin_unlock_irqrestore(ATOMIC_HASH(v), flags); |
53 | + return ret; | |
49 | 54 | } |
50 | 55 | |
51 | -EXPORT_SYMBOL(__atomic_add_return); | |
56 | +void atomic_set(atomic_t *v, int i) | |
57 | +{ | |
58 | + unsigned long flags; | |
59 | + | |
60 | + spin_lock_irqsave(ATOMIC_HASH(v), flags); | |
61 | + v->counter = i; | |
62 | + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); | |
63 | +} | |
52 | 64 | EXPORT_SYMBOL(atomic_set); |
include/asm-alpha/atomic.h
include/asm-arm/atomic.h
... | ... | @@ -80,6 +80,23 @@ |
80 | 80 | return result; |
81 | 81 | } |
82 | 82 | |
83 | +static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) | |
84 | +{ | |
85 | + u32 oldval, res; | |
86 | + | |
87 | + do { | |
88 | + __asm__ __volatile__("@ atomic_cmpxchg\n" | |
89 | + "ldrex %1, [%2]\n" | |
90 | + "teq %1, %3\n" | |
91 | + "strexeq %0, %4, [%2]\n" | |
92 | + : "=&r" (res), "=&r" (oldval) | |
93 | + : "r" (&ptr->counter), "Ir" (old), "r" (new) | |
94 | + : "cc"); | |
95 | + } while (res); | |
96 | + | |
97 | + return oldval; | |
98 | +} | |
99 | + | |
83 | 100 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) |
84 | 101 | { |
85 | 102 | unsigned long tmp, tmp2; |
... | ... | @@ -129,6 +146,20 @@ |
129 | 146 | local_irq_restore(flags); |
130 | 147 | |
131 | 148 | return val; |
149 | +} | |
150 | + | |
151 | +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | |
152 | +{ | |
153 | + int ret; | |
154 | + unsigned long flags; | |
155 | + | |
156 | + local_irq_save(flags); | |
157 | + ret = v->counter; | |
158 | + if (likely(ret == old)) | |
159 | + v->counter = new; | |
160 | + local_irq_restore(flags); | |
161 | + | |
162 | + return ret; | |
132 | 163 | } |
133 | 164 | |
134 | 165 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) |
include/asm-arm26/atomic.h
... | ... | @@ -62,6 +62,20 @@ |
62 | 62 | return val; |
63 | 63 | } |
64 | 64 | |
65 | +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | |
66 | +{ | |
67 | + int ret; | |
68 | + unsigned long flags; | |
69 | + | |
70 | + local_irq_save(flags); | |
71 | + ret = v->counter; | |
72 | + if (likely(ret == old)) | |
73 | + v->counter = new; | |
74 | + local_irq_restore(flags); | |
75 | + | |
76 | + return ret; | |
77 | +} | |
78 | + | |
65 | 79 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) |
66 | 80 | { |
67 | 81 | unsigned long flags; |
include/asm-cris/atomic.h
... | ... | @@ -123,6 +123,19 @@ |
123 | 123 | return retval; |
124 | 124 | } |
125 | 125 | |
126 | +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | |
127 | +{ | |
128 | + int ret; | |
129 | + unsigned long flags; | |
130 | + | |
131 | + cris_atomic_save(v, flags); | |
132 | + ret = v->counter; | |
133 | + if (likely(ret == old)) | |
134 | + v->counter = new; | |
135 | + cris_atomic_restore(v, flags); | |
136 | + return ret; | |
137 | +} | |
138 | + | |
126 | 139 | /* Atomic operations are already serializing */ |
127 | 140 | #define smp_mb__before_atomic_dec() barrier() |
128 | 141 | #define smp_mb__after_atomic_dec() barrier() |
include/asm-frv/atomic.h
include/asm-h8300/atomic.h
... | ... | @@ -82,6 +82,19 @@ |
82 | 82 | return ret == 0; |
83 | 83 | } |
84 | 84 | |
85 | +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | |
86 | +{ | |
87 | + int ret; | |
88 | + unsigned long flags; | |
89 | + | |
90 | + local_irq_save(flags); | |
91 | + ret = v->counter; | |
92 | + if (likely(ret == old)) | |
93 | + v->counter = new; | |
94 | + local_irq_restore(flags); | |
95 | + return ret; | |
96 | +} | |
97 | + | |
85 | 98 | static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v) |
86 | 99 | { |
87 | 100 | __asm__ __volatile__("stc ccr,r1l\n\t" |
include/asm-i386/atomic.h
... | ... | @@ -215,6 +215,8 @@ |
215 | 215 | return atomic_add_return(-i,v); |
216 | 216 | } |
217 | 217 | |
218 | +#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) | |
219 | + | |
218 | 220 | #define atomic_inc_return(v) (atomic_add_return(1,v)) |
219 | 221 | #define atomic_dec_return(v) (atomic_sub_return(1,v)) |
220 | 222 |
include/asm-ia64/atomic.h
include/asm-m68k/atomic.h
... | ... | @@ -139,6 +139,8 @@ |
139 | 139 | __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); |
140 | 140 | } |
141 | 141 | |
142 | +#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) | |
143 | + | |
142 | 144 | /* Atomic operations are already serializing */ |
143 | 145 | #define smp_mb__before_atomic_dec() barrier() |
144 | 146 | #define smp_mb__after_atomic_dec() barrier() |
include/asm-m68knommu/atomic.h
include/asm-mips/atomic.h
include/asm-parisc/atomic.h
... | ... | @@ -164,6 +164,7 @@ |
164 | 164 | } |
165 | 165 | |
166 | 166 | /* exported interface */ |
167 | +#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) | |
167 | 168 | |
168 | 169 | #define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v)))) |
169 | 170 | #define atomic_sub(i,v) ((void)(__atomic_add_return(-((int)i),(v)))) |
include/asm-powerpc/atomic.h
... | ... | @@ -164,6 +164,8 @@ |
164 | 164 | return t; |
165 | 165 | } |
166 | 166 | |
167 | +#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) | |
168 | + | |
167 | 169 | #define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) |
168 | 170 | #define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) |
169 | 171 |
include/asm-s390/atomic.h
... | ... | @@ -198,6 +198,8 @@ |
198 | 198 | return retval; |
199 | 199 | } |
200 | 200 | |
201 | +#define atomic_cmpxchg(v, o, n) (atomic_compare_and_swap((o), (n), &((v)->counter))) | |
202 | + | |
201 | 203 | #define smp_mb__before_atomic_dec() smp_mb() |
202 | 204 | #define smp_mb__after_atomic_dec() smp_mb() |
203 | 205 | #define smp_mb__before_atomic_inc() smp_mb() |
include/asm-sh/atomic.h
... | ... | @@ -87,6 +87,20 @@ |
87 | 87 | #define atomic_inc(v) atomic_add(1,(v)) |
88 | 88 | #define atomic_dec(v) atomic_sub(1,(v)) |
89 | 89 | |
90 | +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | |
91 | +{ | |
92 | + int ret; | |
93 | + unsigned long flags; | |
94 | + | |
95 | + local_irq_save(flags); | |
96 | + ret = v->counter; | |
97 | + if (likely(ret == old)) | |
98 | + v->counter = new; | |
99 | + local_irq_restore(flags); | |
100 | + | |
101 | + return ret; | |
102 | +} | |
103 | + | |
90 | 104 | static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) |
91 | 105 | { |
92 | 106 | unsigned long flags; |
include/asm-sh64/atomic.h
... | ... | @@ -99,6 +99,20 @@ |
99 | 99 | #define atomic_inc(v) atomic_add(1,(v)) |
100 | 100 | #define atomic_dec(v) atomic_sub(1,(v)) |
101 | 101 | |
102 | +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | |
103 | +{ | |
104 | + int ret; | |
105 | + unsigned long flags; | |
106 | + | |
107 | + local_irq_save(flags); | |
108 | + ret = v->counter; | |
109 | + if (likely(ret == old)) | |
110 | + v->counter = new; | |
111 | + local_irq_restore(flags); | |
112 | + | |
113 | + return ret; | |
114 | +} | |
115 | + | |
102 | 116 | static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) |
103 | 117 | { |
104 | 118 | unsigned long flags; |
include/asm-sparc/atomic.h
include/asm-sparc64/atomic.h
... | ... | @@ -70,6 +70,8 @@ |
70 | 70 | #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0) |
71 | 71 | #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) |
72 | 72 | |
73 | +#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) | |
74 | + | |
73 | 75 | /* Atomic operations are already serializing */ |
74 | 76 | #ifdef CONFIG_SMP |
75 | 77 | #define smp_mb__before_atomic_dec() membar_storeload_loadload(); |
include/asm-v850/atomic.h
... | ... | @@ -90,6 +90,20 @@ |
90 | 90 | #define atomic_dec_and_test(v) (atomic_sub_return (1, (v)) == 0) |
91 | 91 | #define atomic_add_negative(i,v) (atomic_add_return ((i), (v)) < 0) |
92 | 92 | |
93 | +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | |
94 | +{ | |
95 | + int ret; | |
96 | + unsigned long flags; | |
97 | + | |
98 | + local_irq_save(flags); | |
99 | + ret = v->counter; | |
100 | + if (likely(ret == old)) | |
101 | + v->counter = new; | |
102 | + local_irq_restore(flags); | |
103 | + | |
104 | + return ret; | |
105 | +} | |
106 | + | |
93 | 107 | /* Atomic operations are already serializing on ARM */ |
94 | 108 | #define smp_mb__before_atomic_dec() barrier() |
95 | 109 | #define smp_mb__after_atomic_dec() barrier() |
include/asm-x86_64/atomic.h
... | ... | @@ -360,6 +360,8 @@ |
360 | 360 | return atomic_add_return(-i,v); |
361 | 361 | } |
362 | 362 | |
363 | +#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) | |
364 | + | |
363 | 365 | #define atomic_inc_return(v) (atomic_add_return(1,v)) |
364 | 366 | #define atomic_dec_return(v) (atomic_sub_return(1,v)) |
365 | 367 |
include/asm-xtensa/atomic.h