Blame view
include/asm-x86/cmpxchg_64.h
4.47 KB
77ef50a52
|
1 2 |
#ifndef ASM_X86__CMPXCHG_64_H #define ASM_X86__CMPXCHG_64_H |
a436ed9c5
|
3 4 |
#include <asm/alternative.h> /* Provides LOCK_PREFIX */ |
e52da357a
|
5 6 |
#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), \ (ptr), sizeof(*(ptr)))) |
a436ed9c5
|
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 |
#define __xg(x) ((volatile long *)(x)) static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) { *ptr = val; } #define _set_64bit set_64bit /* * Note: no "lock" prefix even on SMP: xchg always implies lock anyway * Note 2: xchg has side effect, so that attribute volatile is necessary, * but generally the primitive is invalid, *ptr is output argument. --ANK */ |
e52da357a
|
22 23 |
static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) |
a436ed9c5
|
24 25 |
{ switch (size) { |
e52da357a
|
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
case 1: asm volatile("xchgb %b0,%1" : "=q" (x) : "m" (*__xg(ptr)), "0" (x) : "memory"); break; case 2: asm volatile("xchgw %w0,%1" : "=r" (x) : "m" (*__xg(ptr)), "0" (x) : "memory"); break; case 4: asm volatile("xchgl %k0,%1" : "=r" (x) : "m" (*__xg(ptr)), "0" (x) : "memory"); break; case 8: asm volatile("xchgq %0,%1" : "=r" (x) : "m" (*__xg(ptr)), "0" (x) : "memory"); break; |
a436ed9c5
|
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
} return x; } /* * Atomic compare and exchange. Compare OLD with MEM, if identical, * store NEW in MEM. Return the initial value in MEM. Success is * indicated by comparing RETURN with OLD. */ #define __HAVE_ARCH_CMPXCHG 1 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { unsigned long prev; switch (size) { case 1: |
e52da357a
|
68 69 70 71 |
asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2" : "=a"(prev) : "q"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); |
a436ed9c5
|
72 73 |
return prev; case 2: |
e52da357a
|
74 75 76 77 |
asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2" : "=a"(prev) : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); |
a436ed9c5
|
78 79 |
return prev; case 4: |
e52da357a
|
80 81 82 83 |
asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2" : "=a"(prev) : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); |
a436ed9c5
|
84 85 |
return prev; case 8: |
e52da357a
|
86 87 88 89 |
asm volatile(LOCK_PREFIX "cmpxchgq %1,%2" : "=a"(prev) : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); |
a436ed9c5
|
90 91 92 93 |
return prev; } return old; } |
15878c0b2
|
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
/* * Always use locked operations when touching memory shared with a * hypervisor, since the system may be SMP even if the guest kernel * isn't. */ static inline unsigned long __sync_cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { unsigned long prev; switch (size) { case 1: asm volatile("lock; cmpxchgb %b1,%2" : "=a"(prev) : "q"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; case 2: asm volatile("lock; cmpxchgw %w1,%2" : "=a"(prev) : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; case 4: asm volatile("lock; cmpxchgl %1,%2" : "=a"(prev) : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; } return old; } |
a436ed9c5
|
126 |
static inline unsigned long __cmpxchg_local(volatile void *ptr, |
e52da357a
|
127 128 |
unsigned long old, unsigned long new, int size) |
a436ed9c5
|
129 130 131 132 |
{ unsigned long prev; switch (size) { case 1: |
e52da357a
|
133 134 135 136 |
asm volatile("cmpxchgb %b1,%2" : "=a"(prev) : "q"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); |
a436ed9c5
|
137 138 |
return prev; case 2: |
e52da357a
|
139 140 141 142 |
asm volatile("cmpxchgw %w1,%2" : "=a"(prev) : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); |
a436ed9c5
|
143 144 |
return prev; case 4: |
e52da357a
|
145 146 147 148 |
asm volatile("cmpxchgl %k1,%2" : "=a"(prev) : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); |
a436ed9c5
|
149 150 |
return prev; case 8: |
e52da357a
|
151 152 153 154 |
asm volatile("cmpxchgq %1,%2" : "=a"(prev) : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); |
a436ed9c5
|
155 156 157 158 |
return prev; } return old; } |
32f49eab5
|
159 160 |
#define cmpxchg(ptr, o, n) \ ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ |
e52da357a
|
161 |
(unsigned long)(n), sizeof(*(ptr)))) |
32f49eab5
|
162 |
#define cmpxchg64(ptr, o, n) \ |
e52da357a
|
163 |
({ \ |
32f49eab5
|
164 165 |
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ cmpxchg((ptr), (o), (n)); \ |
e52da357a
|
166 |
}) |
32f49eab5
|
167 168 |
#define cmpxchg_local(ptr, o, n) \ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ |
e52da357a
|
169 170 |
(unsigned long)(n), \ sizeof(*(ptr)))) |
15878c0b2
|
171 172 173 174 |
#define sync_cmpxchg(ptr, o, n) \ ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \ (unsigned long)(n), \ sizeof(*(ptr)))) |
32f49eab5
|
175 |
#define cmpxchg64_local(ptr, o, n) \ |
e52da357a
|
176 |
({ \ |
32f49eab5
|
177 178 |
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ cmpxchg_local((ptr), (o), (n)); \ |
e52da357a
|
179 |
}) |
a436ed9c5
|
180 |
|
77ef50a52
|
181 |
#endif /* ASM_X86__CMPXCHG_64_H */ |