Blame view
arch/ia64/include/asm/atomic.h
5.99 KB
1da177e4c Linux-2.6.12-rc2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 |
#ifndef _ASM_IA64_ATOMIC_H #define _ASM_IA64_ATOMIC_H /* * Atomic operations that C can't guarantee us. Useful for * resource counting etc.. * * NOTE: don't mess with the types below! The "unsigned long" and * "int" types were carefully placed so as to ensure proper operation * of the macros. * * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> */ #include <linux/types.h> #include <asm/intrinsics.h> |
2856f5e31 atomic.h: atomic_... |
18 |
#include <asm/system.h> |
1da177e4c Linux-2.6.12-rc2 |
19 |
|
1da177e4c Linux-2.6.12-rc2 |
20 21 22 |
#define ATOMIC_INIT(i) ((atomic_t) { (i) }) #define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) |
f3d46f9d3 atomic_t: Cast to... |
23 24 |
#define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic64_read(v) (*(volatile long *)&(v)->counter) |
1da177e4c Linux-2.6.12-rc2 |
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
#define atomic_set(v,i) (((v)->counter) = (i)) #define atomic64_set(v,i) (((v)->counter) = (i)) static __inline__ int ia64_atomic_add (int i, atomic_t *v) { __s32 old, new; CMPXCHG_BUGCHECK_DECL do { CMPXCHG_BUGCHECK(v); old = atomic_read(v); new = old + i; } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); return new; } |
01d69a82e [IA64] Fix 64-bit... |
42 |
static __inline__ long |
1da177e4c Linux-2.6.12-rc2 |
43 44 45 46 47 48 49 |
ia64_atomic64_add (__s64 i, atomic64_t *v) { __s64 old, new; CMPXCHG_BUGCHECK_DECL do { CMPXCHG_BUGCHECK(v); |
6cba98629 [IA64] Use atomic... |
50 |
old = atomic64_read(v); |
1da177e4c Linux-2.6.12-rc2 |
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
new = old + i; } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); return new; } static __inline__ int ia64_atomic_sub (int i, atomic_t *v) { __s32 old, new; CMPXCHG_BUGCHECK_DECL do { CMPXCHG_BUGCHECK(v); old = atomic_read(v); new = old - i; } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); return new; } |
01d69a82e [IA64] Fix 64-bit... |
69 |
static __inline__ long |
1da177e4c Linux-2.6.12-rc2 |
70 71 72 73 74 75 76 |
ia64_atomic64_sub (__s64 i, atomic64_t *v) { __s64 old, new; CMPXCHG_BUGCHECK_DECL do { CMPXCHG_BUGCHECK(v); |
6cba98629 [IA64] Use atomic... |
77 |
old = atomic64_read(v); |
1da177e4c Linux-2.6.12-rc2 |
78 79 80 81 |
new = old - i; } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); return new; } |
819791319 atomic.h: add ato... |
82 |
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) |
ffbf670f5 [PATCH] mutex sub... |
83 |
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
4a6dae6d3 [PATCH] atomic: c... |
84 |
|
819791319 atomic.h: add ato... |
85 86 87 |
#define atomic64_cmpxchg(v, old, new) \ (cmpxchg(&((v)->counter), old, new)) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
f24219b4e atomic: move atom... |
88 |
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) |
2856f5e31 atomic.h: atomic_... |
89 90 91 92 93 94 95 96 97 98 99 |
{ int c, old; c = atomic_read(v); for (;;) { if (unlikely(c == (u))) break; old = atomic_cmpxchg((v), c, c + (a)); if (likely(old == c)) break; c = old; } |
f24219b4e atomic: move atom... |
100 |
return c; |
2856f5e31 atomic.h: atomic_... |
101 |
} |
8426e1f6a [PATCH] atomic: i... |
102 |
|
01d69a82e [IA64] Fix 64-bit... |
103 |
static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u) |
2856f5e31 atomic.h: atomic_... |
104 105 106 107 108 109 110 111 112 113 114 115 116 |
{ long c, old; c = atomic64_read(v); for (;;) { if (unlikely(c == (u))) break; old = atomic64_cmpxchg((v), c, c + (a)); if (likely(old == c)) break; c = old; } return c != (u); } |
819791319 atomic.h: add ato... |
117 |
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) |
1da177e4c Linux-2.6.12-rc2 |
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
#define atomic_add_return(i,v) \ ({ \ int __ia64_aar_i = (i); \ (__builtin_constant_p(i) \ && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ : ia64_atomic_add(__ia64_aar_i, v); \ }) #define atomic64_add_return(i,v) \ ({ \ long __ia64_aar_i = (i); \ (__builtin_constant_p(i) \ && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \ || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \ || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \ || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \ ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \ : ia64_atomic64_add(__ia64_aar_i, v); \ }) /* * Atomically add I to V and return TRUE if the resulting value is * negative. */ static __inline__ int atomic_add_negative (int i, atomic_t *v) { return atomic_add_return(i, v) < 0; } |
01d69a82e [IA64] Fix 64-bit... |
151 |
static __inline__ long |
1da177e4c Linux-2.6.12-rc2 |
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 |
atomic64_add_negative (__s64 i, atomic64_t *v) { return atomic64_add_return(i, v) < 0; } #define atomic_sub_return(i,v) \ ({ \ int __ia64_asr_i = (i); \ (__builtin_constant_p(i) \ && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ : ia64_atomic_sub(__ia64_asr_i, v); \ }) #define atomic64_sub_return(i,v) \ ({ \ long __ia64_asr_i = (i); \ (__builtin_constant_p(i) \ && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \ || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \ || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \ || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \ ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \ : ia64_atomic64_sub(__ia64_asr_i, v); \ }) #define atomic_dec_return(v) atomic_sub_return(1, (v)) #define atomic_inc_return(v) atomic_add_return(1, (v)) #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) #define atomic64_inc_return(v) atomic64_add_return(1, (v)) #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0) #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0) #define atomic_add(i,v) atomic_add_return((i), (v)) #define atomic_sub(i,v) atomic_sub_return((i), (v)) #define atomic_inc(v) atomic_add(1, (v)) #define atomic_dec(v) atomic_sub(1, (v)) #define atomic64_add(i,v) atomic64_add_return((i), (v)) #define atomic64_sub(i,v) atomic64_sub_return((i), (v)) #define atomic64_inc(v) atomic64_add(1, (v)) #define atomic64_dec(v) atomic64_sub(1, (v)) /* Atomic operations are already serializing */ #define smp_mb__before_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier() #define smp_mb__before_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier() #endif /* _ASM_IA64_ATOMIC_H */ |