Commit b4e1ded3cf6e13c122f019532cb60347d6c88c8c
Committed by
Linus Torvalds
1 parent
8eafafb54b
Exists in
master
and in
7 other branches
m32r: __xchg() should be always_inline
it depends on elimination of unreachable branches in switch (by object size), so we must declare it always_inline Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Acked-by: Hirokazu Takata <takata@linux-m32r.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 1 changed file with 2 additions and 1 deletions Inline Diff
include/asm-m32r/system.h
1 | #ifndef _ASM_M32R_SYSTEM_H | 1 | #ifndef _ASM_M32R_SYSTEM_H |
2 | #define _ASM_M32R_SYSTEM_H | 2 | #define _ASM_M32R_SYSTEM_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * This file is subject to the terms and conditions of the GNU General Public | 5 | * This file is subject to the terms and conditions of the GNU General Public |
6 | * License. See the file "COPYING" in the main directory of this archive | 6 | * License. See the file "COPYING" in the main directory of this archive |
7 | * for more details. | 7 | * for more details. |
8 | * | 8 | * |
9 | * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto | 9 | * Copyright (C) 2001 Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto |
10 | * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org> | 10 | * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org> |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/compiler.h> | ||
13 | #include <asm/assembler.h> | 14 | #include <asm/assembler.h> |
14 | 15 | ||
15 | #ifdef __KERNEL__ | 16 | #ifdef __KERNEL__ |
16 | 17 | ||
17 | /* | 18 | /* |
18 | * switch_to(prev, next) should switch from task `prev' to `next' | 19 | * switch_to(prev, next) should switch from task `prev' to `next' |
19 | * `prev' will never be the same as `next'. | 20 | * `prev' will never be the same as `next'. |
20 | * | 21 | * |
21 | * `next' and `prev' should be struct task_struct, but it isn't always defined | 22 | * `next' and `prev' should be struct task_struct, but it isn't always defined |
22 | */ | 23 | */ |
23 | 24 | ||
24 | #if defined(CONFIG_FRAME_POINTER) || \ | 25 | #if defined(CONFIG_FRAME_POINTER) || \ |
25 | !defined(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER) | 26 | !defined(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER) |
26 | #define M32R_PUSH_FP " push fp\n" | 27 | #define M32R_PUSH_FP " push fp\n" |
27 | #define M32R_POP_FP " pop fp\n" | 28 | #define M32R_POP_FP " pop fp\n" |
28 | #else | 29 | #else |
29 | #define M32R_PUSH_FP "" | 30 | #define M32R_PUSH_FP "" |
30 | #define M32R_POP_FP "" | 31 | #define M32R_POP_FP "" |
31 | #endif | 32 | #endif |
32 | 33 | ||
33 | #define switch_to(prev, next, last) do { \ | 34 | #define switch_to(prev, next, last) do { \ |
34 | __asm__ __volatile__ ( \ | 35 | __asm__ __volatile__ ( \ |
35 | " seth lr, #high(1f) \n" \ | 36 | " seth lr, #high(1f) \n" \ |
36 | " or3 lr, lr, #low(1f) \n" \ | 37 | " or3 lr, lr, #low(1f) \n" \ |
37 | " st lr, @%4 ; store old LR \n" \ | 38 | " st lr, @%4 ; store old LR \n" \ |
38 | " ld lr, @%5 ; load new LR \n" \ | 39 | " ld lr, @%5 ; load new LR \n" \ |
39 | M32R_PUSH_FP \ | 40 | M32R_PUSH_FP \ |
40 | " st sp, @%2 ; store old SP \n" \ | 41 | " st sp, @%2 ; store old SP \n" \ |
41 | " ld sp, @%3 ; load new SP \n" \ | 42 | " ld sp, @%3 ; load new SP \n" \ |
42 | " push %1 ; store `prev' on new stack \n" \ | 43 | " push %1 ; store `prev' on new stack \n" \ |
43 | " jmp lr \n" \ | 44 | " jmp lr \n" \ |
44 | " .fillinsn \n" \ | 45 | " .fillinsn \n" \ |
45 | "1: \n" \ | 46 | "1: \n" \ |
46 | " pop %0 ; restore `__last' from new stack \n" \ | 47 | " pop %0 ; restore `__last' from new stack \n" \ |
47 | M32R_POP_FP \ | 48 | M32R_POP_FP \ |
48 | : "=r" (last) \ | 49 | : "=r" (last) \ |
49 | : "0" (prev), \ | 50 | : "0" (prev), \ |
50 | "r" (&(prev->thread.sp)), "r" (&(next->thread.sp)), \ | 51 | "r" (&(prev->thread.sp)), "r" (&(next->thread.sp)), \ |
51 | "r" (&(prev->thread.lr)), "r" (&(next->thread.lr)) \ | 52 | "r" (&(prev->thread.lr)), "r" (&(next->thread.lr)) \ |
52 | : "memory", "lr" \ | 53 | : "memory", "lr" \ |
53 | ); \ | 54 | ); \ |
54 | } while(0) | 55 | } while(0) |
55 | 56 | ||
56 | /* | 57 | /* |
57 | * On SMP systems, when the scheduler does migration-cost autodetection, | 58 | * On SMP systems, when the scheduler does migration-cost autodetection, |
58 | * it needs a way to flush as much of the CPU's caches as possible. | 59 | * it needs a way to flush as much of the CPU's caches as possible. |
59 | * | 60 | * |
60 | * TODO: fill this in! | 61 | * TODO: fill this in! |
61 | */ | 62 | */ |
62 | static inline void sched_cacheflush(void) | 63 | static inline void sched_cacheflush(void) |
63 | { | 64 | { |
64 | } | 65 | } |
65 | 66 | ||
66 | /* Interrupt Control */ | 67 | /* Interrupt Control */ |
67 | #if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) | 68 | #if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104) |
68 | #define local_irq_enable() \ | 69 | #define local_irq_enable() \ |
69 | __asm__ __volatile__ ("setpsw #0x40 -> nop": : :"memory") | 70 | __asm__ __volatile__ ("setpsw #0x40 -> nop": : :"memory") |
70 | #define local_irq_disable() \ | 71 | #define local_irq_disable() \ |
71 | __asm__ __volatile__ ("clrpsw #0x40 -> nop": : :"memory") | 72 | __asm__ __volatile__ ("clrpsw #0x40 -> nop": : :"memory") |
72 | #else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ | 73 | #else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ |
73 | static inline void local_irq_enable(void) | 74 | static inline void local_irq_enable(void) |
74 | { | 75 | { |
75 | unsigned long tmpreg; | 76 | unsigned long tmpreg; |
76 | __asm__ __volatile__( | 77 | __asm__ __volatile__( |
77 | "mvfc %0, psw; \n\t" | 78 | "mvfc %0, psw; \n\t" |
78 | "or3 %0, %0, #0x0040; \n\t" | 79 | "or3 %0, %0, #0x0040; \n\t" |
79 | "mvtc %0, psw; \n\t" | 80 | "mvtc %0, psw; \n\t" |
80 | : "=&r" (tmpreg) : : "cbit", "memory"); | 81 | : "=&r" (tmpreg) : : "cbit", "memory"); |
81 | } | 82 | } |
82 | 83 | ||
83 | static inline void local_irq_disable(void) | 84 | static inline void local_irq_disable(void) |
84 | { | 85 | { |
85 | unsigned long tmpreg0, tmpreg1; | 86 | unsigned long tmpreg0, tmpreg1; |
86 | __asm__ __volatile__( | 87 | __asm__ __volatile__( |
87 | "ld24 %0, #0 ; Use 32-bit insn. \n\t" | 88 | "ld24 %0, #0 ; Use 32-bit insn. \n\t" |
88 | "mvfc %1, psw ; No interrupt can be accepted here. \n\t" | 89 | "mvfc %1, psw ; No interrupt can be accepted here. \n\t" |
89 | "mvtc %0, psw \n\t" | 90 | "mvtc %0, psw \n\t" |
90 | "and3 %0, %1, #0xffbf \n\t" | 91 | "and3 %0, %1, #0xffbf \n\t" |
91 | "mvtc %0, psw \n\t" | 92 | "mvtc %0, psw \n\t" |
92 | : "=&r" (tmpreg0), "=&r" (tmpreg1) : : "cbit", "memory"); | 93 | : "=&r" (tmpreg0), "=&r" (tmpreg1) : : "cbit", "memory"); |
93 | } | 94 | } |
94 | #endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ | 95 | #endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ |
95 | 96 | ||
96 | #define local_save_flags(x) \ | 97 | #define local_save_flags(x) \ |
97 | __asm__ __volatile__("mvfc %0,psw" : "=r"(x) : /* no input */) | 98 | __asm__ __volatile__("mvfc %0,psw" : "=r"(x) : /* no input */) |
98 | 99 | ||
99 | #define local_irq_restore(x) \ | 100 | #define local_irq_restore(x) \ |
100 | __asm__ __volatile__("mvtc %0,psw" : /* no outputs */ \ | 101 | __asm__ __volatile__("mvtc %0,psw" : /* no outputs */ \ |
101 | : "r" (x) : "cbit", "memory") | 102 | : "r" (x) : "cbit", "memory") |
102 | 103 | ||
103 | #if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104)) | 104 | #if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104)) |
104 | #define local_irq_save(x) \ | 105 | #define local_irq_save(x) \ |
105 | __asm__ __volatile__( \ | 106 | __asm__ __volatile__( \ |
106 | "mvfc %0, psw; \n\t" \ | 107 | "mvfc %0, psw; \n\t" \ |
107 | "clrpsw #0x40 -> nop; \n\t" \ | 108 | "clrpsw #0x40 -> nop; \n\t" \ |
108 | : "=r" (x) : /* no input */ : "memory") | 109 | : "=r" (x) : /* no input */ : "memory") |
109 | #else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ | 110 | #else /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ |
110 | #define local_irq_save(x) \ | 111 | #define local_irq_save(x) \ |
111 | ({ \ | 112 | ({ \ |
112 | unsigned long tmpreg; \ | 113 | unsigned long tmpreg; \ |
113 | __asm__ __volatile__( \ | 114 | __asm__ __volatile__( \ |
114 | "ld24 %1, #0 \n\t" \ | 115 | "ld24 %1, #0 \n\t" \ |
115 | "mvfc %0, psw \n\t" \ | 116 | "mvfc %0, psw \n\t" \ |
116 | "mvtc %1, psw \n\t" \ | 117 | "mvtc %1, psw \n\t" \ |
117 | "and3 %1, %0, #0xffbf \n\t" \ | 118 | "and3 %1, %0, #0xffbf \n\t" \ |
118 | "mvtc %1, psw \n\t" \ | 119 | "mvtc %1, psw \n\t" \ |
119 | : "=r" (x), "=&r" (tmpreg) \ | 120 | : "=r" (x), "=&r" (tmpreg) \ |
120 | : : "cbit", "memory"); \ | 121 | : : "cbit", "memory"); \ |
121 | }) | 122 | }) |
122 | #endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ | 123 | #endif /* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */ |
123 | 124 | ||
124 | #define irqs_disabled() \ | 125 | #define irqs_disabled() \ |
125 | ({ \ | 126 | ({ \ |
126 | unsigned long flags; \ | 127 | unsigned long flags; \ |
127 | local_save_flags(flags); \ | 128 | local_save_flags(flags); \ |
128 | !(flags & 0x40); \ | 129 | !(flags & 0x40); \ |
129 | }) | 130 | }) |
130 | 131 | ||
131 | #define nop() __asm__ __volatile__ ("nop" : : ) | 132 | #define nop() __asm__ __volatile__ ("nop" : : ) |
132 | 133 | ||
133 | #define xchg(ptr,x) \ | 134 | #define xchg(ptr,x) \ |
134 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | 135 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) |
135 | 136 | ||
136 | #ifdef CONFIG_SMP | 137 | #ifdef CONFIG_SMP |
137 | extern void __xchg_called_with_bad_pointer(void); | 138 | extern void __xchg_called_with_bad_pointer(void); |
138 | #endif | 139 | #endif |
139 | 140 | ||
140 | #ifdef CONFIG_CHIP_M32700_TS1 | 141 | #ifdef CONFIG_CHIP_M32700_TS1 |
141 | #define DCACHE_CLEAR(reg0, reg1, addr) \ | 142 | #define DCACHE_CLEAR(reg0, reg1, addr) \ |
142 | "seth "reg1", #high(dcache_dummy); \n\t" \ | 143 | "seth "reg1", #high(dcache_dummy); \n\t" \ |
143 | "or3 "reg1", "reg1", #low(dcache_dummy); \n\t" \ | 144 | "or3 "reg1", "reg1", #low(dcache_dummy); \n\t" \ |
144 | "lock "reg0", @"reg1"; \n\t" \ | 145 | "lock "reg0", @"reg1"; \n\t" \ |
145 | "add3 "reg0", "addr", #0x1000; \n\t" \ | 146 | "add3 "reg0", "addr", #0x1000; \n\t" \ |
146 | "ld "reg0", @"reg0"; \n\t" \ | 147 | "ld "reg0", @"reg0"; \n\t" \ |
147 | "add3 "reg0", "addr", #0x2000; \n\t" \ | 148 | "add3 "reg0", "addr", #0x2000; \n\t" \ |
148 | "ld "reg0", @"reg0"; \n\t" \ | 149 | "ld "reg0", @"reg0"; \n\t" \ |
149 | "unlock "reg0", @"reg1"; \n\t" | 150 | "unlock "reg0", @"reg1"; \n\t" |
150 | /* FIXME: This workaround code cannot handle kernel modules | 151 | /* FIXME: This workaround code cannot handle kernel modules |
151 | * correctly under SMP environment. | 152 | * correctly under SMP environment. |
152 | */ | 153 | */ |
153 | #else /* CONFIG_CHIP_M32700_TS1 */ | 154 | #else /* CONFIG_CHIP_M32700_TS1 */ |
154 | #define DCACHE_CLEAR(reg0, reg1, addr) | 155 | #define DCACHE_CLEAR(reg0, reg1, addr) |
155 | #endif /* CONFIG_CHIP_M32700_TS1 */ | 156 | #endif /* CONFIG_CHIP_M32700_TS1 */ |
156 | 157 | ||
157 | static inline unsigned long | 158 | static __always_inline unsigned long |
158 | __xchg(unsigned long x, volatile void * ptr, int size) | 159 | __xchg(unsigned long x, volatile void * ptr, int size) |
159 | { | 160 | { |
160 | unsigned long flags; | 161 | unsigned long flags; |
161 | unsigned long tmp = 0; | 162 | unsigned long tmp = 0; |
162 | 163 | ||
163 | local_irq_save(flags); | 164 | local_irq_save(flags); |
164 | 165 | ||
165 | switch (size) { | 166 | switch (size) { |
166 | #ifndef CONFIG_SMP | 167 | #ifndef CONFIG_SMP |
167 | case 1: | 168 | case 1: |
168 | __asm__ __volatile__ ( | 169 | __asm__ __volatile__ ( |
169 | "ldb %0, @%2 \n\t" | 170 | "ldb %0, @%2 \n\t" |
170 | "stb %1, @%2 \n\t" | 171 | "stb %1, @%2 \n\t" |
171 | : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); | 172 | : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); |
172 | break; | 173 | break; |
173 | case 2: | 174 | case 2: |
174 | __asm__ __volatile__ ( | 175 | __asm__ __volatile__ ( |
175 | "ldh %0, @%2 \n\t" | 176 | "ldh %0, @%2 \n\t" |
176 | "sth %1, @%2 \n\t" | 177 | "sth %1, @%2 \n\t" |
177 | : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); | 178 | : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); |
178 | break; | 179 | break; |
179 | case 4: | 180 | case 4: |
180 | __asm__ __volatile__ ( | 181 | __asm__ __volatile__ ( |
181 | "ld %0, @%2 \n\t" | 182 | "ld %0, @%2 \n\t" |
182 | "st %1, @%2 \n\t" | 183 | "st %1, @%2 \n\t" |
183 | : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); | 184 | : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory"); |
184 | break; | 185 | break; |
185 | #else /* CONFIG_SMP */ | 186 | #else /* CONFIG_SMP */ |
186 | case 4: | 187 | case 4: |
187 | __asm__ __volatile__ ( | 188 | __asm__ __volatile__ ( |
188 | DCACHE_CLEAR("%0", "r4", "%2") | 189 | DCACHE_CLEAR("%0", "r4", "%2") |
189 | "lock %0, @%2; \n\t" | 190 | "lock %0, @%2; \n\t" |
190 | "unlock %1, @%2; \n\t" | 191 | "unlock %1, @%2; \n\t" |
191 | : "=&r" (tmp) : "r" (x), "r" (ptr) | 192 | : "=&r" (tmp) : "r" (x), "r" (ptr) |
192 | : "memory" | 193 | : "memory" |
193 | #ifdef CONFIG_CHIP_M32700_TS1 | 194 | #ifdef CONFIG_CHIP_M32700_TS1 |
194 | , "r4" | 195 | , "r4" |
195 | #endif /* CONFIG_CHIP_M32700_TS1 */ | 196 | #endif /* CONFIG_CHIP_M32700_TS1 */ |
196 | ); | 197 | ); |
197 | break; | 198 | break; |
198 | default: | 199 | default: |
199 | __xchg_called_with_bad_pointer(); | 200 | __xchg_called_with_bad_pointer(); |
200 | #endif /* CONFIG_SMP */ | 201 | #endif /* CONFIG_SMP */ |
201 | } | 202 | } |
202 | 203 | ||
203 | local_irq_restore(flags); | 204 | local_irq_restore(flags); |
204 | 205 | ||
205 | return (tmp); | 206 | return (tmp); |
206 | } | 207 | } |
207 | 208 | ||
208 | #define __HAVE_ARCH_CMPXCHG 1 | 209 | #define __HAVE_ARCH_CMPXCHG 1 |
209 | 210 | ||
210 | static inline unsigned long | 211 | static inline unsigned long |
211 | __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) | 212 | __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) |
212 | { | 213 | { |
213 | unsigned long flags; | 214 | unsigned long flags; |
214 | unsigned int retval; | 215 | unsigned int retval; |
215 | 216 | ||
216 | local_irq_save(flags); | 217 | local_irq_save(flags); |
217 | __asm__ __volatile__ ( | 218 | __asm__ __volatile__ ( |
218 | DCACHE_CLEAR("%0", "r4", "%1") | 219 | DCACHE_CLEAR("%0", "r4", "%1") |
219 | M32R_LOCK" %0, @%1; \n" | 220 | M32R_LOCK" %0, @%1; \n" |
220 | " bne %0, %2, 1f; \n" | 221 | " bne %0, %2, 1f; \n" |
221 | M32R_UNLOCK" %3, @%1; \n" | 222 | M32R_UNLOCK" %3, @%1; \n" |
222 | " bra 2f; \n" | 223 | " bra 2f; \n" |
223 | " .fillinsn \n" | 224 | " .fillinsn \n" |
224 | "1:" | 225 | "1:" |
225 | M32R_UNLOCK" %0, @%1; \n" | 226 | M32R_UNLOCK" %0, @%1; \n" |
226 | " .fillinsn \n" | 227 | " .fillinsn \n" |
227 | "2:" | 228 | "2:" |
228 | : "=&r" (retval) | 229 | : "=&r" (retval) |
229 | : "r" (p), "r" (old), "r" (new) | 230 | : "r" (p), "r" (old), "r" (new) |
230 | : "cbit", "memory" | 231 | : "cbit", "memory" |
231 | #ifdef CONFIG_CHIP_M32700_TS1 | 232 | #ifdef CONFIG_CHIP_M32700_TS1 |
232 | , "r4" | 233 | , "r4" |
233 | #endif /* CONFIG_CHIP_M32700_TS1 */ | 234 | #endif /* CONFIG_CHIP_M32700_TS1 */ |
234 | ); | 235 | ); |
235 | local_irq_restore(flags); | 236 | local_irq_restore(flags); |
236 | 237 | ||
237 | return retval; | 238 | return retval; |
238 | } | 239 | } |
239 | 240 | ||
240 | /* This function doesn't exist, so you'll get a linker error | 241 | /* This function doesn't exist, so you'll get a linker error |
241 | if something tries to do an invalid cmpxchg(). */ | 242 | if something tries to do an invalid cmpxchg(). */ |
242 | extern void __cmpxchg_called_with_bad_pointer(void); | 243 | extern void __cmpxchg_called_with_bad_pointer(void); |
243 | 244 | ||
244 | static inline unsigned long | 245 | static inline unsigned long |
245 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | 246 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) |
246 | { | 247 | { |
247 | switch (size) { | 248 | switch (size) { |
248 | case 4: | 249 | case 4: |
249 | return __cmpxchg_u32(ptr, old, new); | 250 | return __cmpxchg_u32(ptr, old, new); |
250 | #if 0 /* we don't have __cmpxchg_u64 */ | 251 | #if 0 /* we don't have __cmpxchg_u64 */ |
251 | case 8: | 252 | case 8: |
252 | return __cmpxchg_u64(ptr, old, new); | 253 | return __cmpxchg_u64(ptr, old, new); |
253 | #endif /* 0 */ | 254 | #endif /* 0 */ |
254 | } | 255 | } |
255 | __cmpxchg_called_with_bad_pointer(); | 256 | __cmpxchg_called_with_bad_pointer(); |
256 | return old; | 257 | return old; |
257 | } | 258 | } |
258 | 259 | ||
259 | #define cmpxchg(ptr,o,n) \ | 260 | #define cmpxchg(ptr,o,n) \ |
260 | ({ \ | 261 | ({ \ |
261 | __typeof__(*(ptr)) _o_ = (o); \ | 262 | __typeof__(*(ptr)) _o_ = (o); \ |
262 | __typeof__(*(ptr)) _n_ = (n); \ | 263 | __typeof__(*(ptr)) _n_ = (n); \ |
263 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | 264 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ |
264 | (unsigned long)_n_, sizeof(*(ptr))); \ | 265 | (unsigned long)_n_, sizeof(*(ptr))); \ |
265 | }) | 266 | }) |
266 | 267 | ||
267 | #endif /* __KERNEL__ */ | 268 | #endif /* __KERNEL__ */ |
268 | 269 | ||
269 | /* | 270 | /* |
270 | * Memory barrier. | 271 | * Memory barrier. |
271 | * | 272 | * |
272 | * mb() prevents loads and stores being reordered across this point. | 273 | * mb() prevents loads and stores being reordered across this point. |
273 | * rmb() prevents loads being reordered across this point. | 274 | * rmb() prevents loads being reordered across this point. |
274 | * wmb() prevents stores being reordered across this point. | 275 | * wmb() prevents stores being reordered across this point. |
275 | */ | 276 | */ |
276 | #define mb() barrier() | 277 | #define mb() barrier() |
277 | #define rmb() mb() | 278 | #define rmb() mb() |
278 | #define wmb() mb() | 279 | #define wmb() mb() |
279 | 280 | ||
280 | /** | 281 | /** |
281 | * read_barrier_depends - Flush all pending reads that subsequents reads | 282 | * read_barrier_depends - Flush all pending reads that subsequents reads |
282 | * depend on. | 283 | * depend on. |
283 | * | 284 | * |
284 | * No data-dependent reads from memory-like regions are ever reordered | 285 | * No data-dependent reads from memory-like regions are ever reordered |
285 | * over this barrier. All reads preceding this primitive are guaranteed | 286 | * over this barrier. All reads preceding this primitive are guaranteed |
286 | * to access memory (but not necessarily other CPUs' caches) before any | 287 | * to access memory (but not necessarily other CPUs' caches) before any |
287 | * reads following this primitive that depend on the data return by | 288 | * reads following this primitive that depend on the data return by |
288 | * any of the preceding reads. This primitive is much lighter weight than | 289 | * any of the preceding reads. This primitive is much lighter weight than |
289 | * rmb() on most CPUs, and is never heavier weight than is | 290 | * rmb() on most CPUs, and is never heavier weight than is |
290 | * rmb(). | 291 | * rmb(). |
291 | * | 292 | * |
292 | * These ordering constraints are respected by both the local CPU | 293 | * These ordering constraints are respected by both the local CPU |
293 | * and the compiler. | 294 | * and the compiler. |
294 | * | 295 | * |
295 | * Ordering is not guaranteed by anything other than these primitives, | 296 | * Ordering is not guaranteed by anything other than these primitives, |
296 | * not even by data dependencies. See the documentation for | 297 | * not even by data dependencies. See the documentation for |
297 | * memory_barrier() for examples and URLs to more information. | 298 | * memory_barrier() for examples and URLs to more information. |
298 | * | 299 | * |
299 | * For example, the following code would force ordering (the initial | 300 | * For example, the following code would force ordering (the initial |
300 | * value of "a" is zero, "b" is one, and "p" is "&a"): | 301 | * value of "a" is zero, "b" is one, and "p" is "&a"): |
301 | * | 302 | * |
302 | * <programlisting> | 303 | * <programlisting> |
303 | * CPU 0 CPU 1 | 304 | * CPU 0 CPU 1 |
304 | * | 305 | * |
305 | * b = 2; | 306 | * b = 2; |
306 | * memory_barrier(); | 307 | * memory_barrier(); |
307 | * p = &b; q = p; | 308 | * p = &b; q = p; |
308 | * read_barrier_depends(); | 309 | * read_barrier_depends(); |
309 | * d = *q; | 310 | * d = *q; |
310 | * </programlisting> | 311 | * </programlisting> |
311 | * | 312 | * |
312 | * | 313 | * |
313 | * because the read of "*q" depends on the read of "p" and these | 314 | * because the read of "*q" depends on the read of "p" and these |
314 | * two reads are separated by a read_barrier_depends(). However, | 315 | * two reads are separated by a read_barrier_depends(). However, |
315 | * the following code, with the same initial values for "a" and "b": | 316 | * the following code, with the same initial values for "a" and "b": |
316 | * | 317 | * |
317 | * <programlisting> | 318 | * <programlisting> |
318 | * CPU 0 CPU 1 | 319 | * CPU 0 CPU 1 |
319 | * | 320 | * |
320 | * a = 2; | 321 | * a = 2; |
321 | * memory_barrier(); | 322 | * memory_barrier(); |
322 | * b = 3; y = b; | 323 | * b = 3; y = b; |
323 | * read_barrier_depends(); | 324 | * read_barrier_depends(); |
324 | * x = a; | 325 | * x = a; |
325 | * </programlisting> | 326 | * </programlisting> |
326 | * | 327 | * |
327 | * does not enforce ordering, since there is no data dependency between | 328 | * does not enforce ordering, since there is no data dependency between |
328 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | 329 | * the read of "a" and the read of "b". Therefore, on some CPUs, such |
329 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | 330 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() |
330 | * in cases like this where there are no data dependencies. | 331 | * in cases like this where there are no data dependencies. |
331 | **/ | 332 | **/ |
332 | 333 | ||
333 | #define read_barrier_depends() do { } while (0) | 334 | #define read_barrier_depends() do { } while (0) |
334 | 335 | ||
335 | #ifdef CONFIG_SMP | 336 | #ifdef CONFIG_SMP |
336 | #define smp_mb() mb() | 337 | #define smp_mb() mb() |
337 | #define smp_rmb() rmb() | 338 | #define smp_rmb() rmb() |
338 | #define smp_wmb() wmb() | 339 | #define smp_wmb() wmb() |
339 | #define smp_read_barrier_depends() read_barrier_depends() | 340 | #define smp_read_barrier_depends() read_barrier_depends() |
340 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) | 341 | #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) |
341 | #else | 342 | #else |
342 | #define smp_mb() barrier() | 343 | #define smp_mb() barrier() |
343 | #define smp_rmb() barrier() | 344 | #define smp_rmb() barrier() |
344 | #define smp_wmb() barrier() | 345 | #define smp_wmb() barrier() |
345 | #define smp_read_barrier_depends() do { } while (0) | 346 | #define smp_read_barrier_depends() do { } while (0) |
346 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | 347 | #define set_mb(var, value) do { var = value; barrier(); } while (0) |
347 | #endif | 348 | #endif |
348 | 349 | ||
349 | #define arch_align_stack(x) (x) | 350 | #define arch_align_stack(x) (x) |
350 | 351 | ||
351 | #endif /* _ASM_M32R_SYSTEM_H */ | 352 | #endif /* _ASM_M32R_SYSTEM_H */ |
352 | 353 |