Commit 7224c0d1045327d637dab2c90777b6d5ec6d6804

Authored by Greg Ungerer
Committed by Geert Uytterhoeven
1 parent dd775ae254

m68k: include asm/cmpxchg.h in our m68k atomic.h

After commit 9ffc93f203c18a70623f21950f1dd473c9ec48cd ("Remove all

  CC      init/main.o
In file included from include/linux/mm.h:15:0,
                 from include/linux/ring_buffer.h:5,
                 from include/linux/ftrace_event.h:4,
                 from include/trace/syscall.h:6,
                 from include/linux/syscalls.h:78,
                 from init/main.c:16:
include/linux/debug_locks.h: In function ‘__debug_locks_off’:
include/linux/debug_locks.h:16:2: error: implicit declaration of function ‘xchg’

There is no indirect inclusions of the new asm/cmpxchg.h for m68k here.
Looking at most other architectures they include asm/cmpxchg.h in their
asm/atomic.h. M68k currently does not do this. Including this in atomic.h
fixes all m68k build problems.

Signed-off-by: Greg Ungerer <gerg@uclinux.org>
Acked-by: David Howells <dhowells@redhat.com>
Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>

Showing 1 changed file with 1 additions and 0 deletions Inline Diff

arch/m68k/include/asm/atomic.h
1 #ifndef __ARCH_M68K_ATOMIC__ 1 #ifndef __ARCH_M68K_ATOMIC__
2 #define __ARCH_M68K_ATOMIC__ 2 #define __ARCH_M68K_ATOMIC__
3 3
4 #include <linux/types.h> 4 #include <linux/types.h>
5 #include <linux/irqflags.h> 5 #include <linux/irqflags.h>
6 #include <asm/cmpxchg.h>
6 7
7 /* 8 /*
8 * Atomic operations that C can't guarantee us. Useful for 9 * Atomic operations that C can't guarantee us. Useful for
9 * resource counting etc.. 10 * resource counting etc..
10 */ 11 */
11 12
12 /* 13 /*
13 * We do not have SMP m68k systems, so we don't have to deal with that. 14 * We do not have SMP m68k systems, so we don't have to deal with that.
14 */ 15 */
15 16
16 #define ATOMIC_INIT(i) { (i) } 17 #define ATOMIC_INIT(i) { (i) }
17 18
18 #define atomic_read(v) (*(volatile int *)&(v)->counter) 19 #define atomic_read(v) (*(volatile int *)&(v)->counter)
19 #define atomic_set(v, i) (((v)->counter) = i) 20 #define atomic_set(v, i) (((v)->counter) = i)
20 21
21 /* 22 /*
22 * The ColdFire parts cannot do some immediate to memory operations, 23 * The ColdFire parts cannot do some immediate to memory operations,
23 * so for them we do not specify the "i" asm constraint. 24 * so for them we do not specify the "i" asm constraint.
24 */ 25 */
25 #ifdef CONFIG_COLDFIRE 26 #ifdef CONFIG_COLDFIRE
26 #define ASM_DI "d" 27 #define ASM_DI "d"
27 #else 28 #else
28 #define ASM_DI "di" 29 #define ASM_DI "di"
29 #endif 30 #endif
30 31
31 static inline void atomic_add(int i, atomic_t *v) 32 static inline void atomic_add(int i, atomic_t *v)
32 { 33 {
33 __asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI (i)); 34 __asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI (i));
34 } 35 }
35 36
36 static inline void atomic_sub(int i, atomic_t *v) 37 static inline void atomic_sub(int i, atomic_t *v)
37 { 38 {
38 __asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI (i)); 39 __asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI (i));
39 } 40 }
40 41
41 static inline void atomic_inc(atomic_t *v) 42 static inline void atomic_inc(atomic_t *v)
42 { 43 {
43 __asm__ __volatile__("addql #1,%0" : "+m" (*v)); 44 __asm__ __volatile__("addql #1,%0" : "+m" (*v));
44 } 45 }
45 46
46 static inline void atomic_dec(atomic_t *v) 47 static inline void atomic_dec(atomic_t *v)
47 { 48 {
48 __asm__ __volatile__("subql #1,%0" : "+m" (*v)); 49 __asm__ __volatile__("subql #1,%0" : "+m" (*v));
49 } 50 }
50 51
51 static inline int atomic_dec_and_test(atomic_t *v) 52 static inline int atomic_dec_and_test(atomic_t *v)
52 { 53 {
53 char c; 54 char c;
54 __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v)); 55 __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
55 return c != 0; 56 return c != 0;
56 } 57 }
57 58
58 static inline int atomic_dec_and_test_lt(atomic_t *v) 59 static inline int atomic_dec_and_test_lt(atomic_t *v)
59 { 60 {
60 char c; 61 char c;
61 __asm__ __volatile__( 62 __asm__ __volatile__(
62 "subql #1,%1; slt %0" 63 "subql #1,%1; slt %0"
63 : "=d" (c), "=m" (*v) 64 : "=d" (c), "=m" (*v)
64 : "m" (*v)); 65 : "m" (*v));
65 return c != 0; 66 return c != 0;
66 } 67 }
67 68
68 static inline int atomic_inc_and_test(atomic_t *v) 69 static inline int atomic_inc_and_test(atomic_t *v)
69 { 70 {
70 char c; 71 char c;
71 __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v)); 72 __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
72 return c != 0; 73 return c != 0;
73 } 74 }
74 75
75 #ifdef CONFIG_RMW_INSNS 76 #ifdef CONFIG_RMW_INSNS
76 77
77 static inline int atomic_add_return(int i, atomic_t *v) 78 static inline int atomic_add_return(int i, atomic_t *v)
78 { 79 {
79 int t, tmp; 80 int t, tmp;
80 81
81 __asm__ __volatile__( 82 __asm__ __volatile__(
82 "1: movel %2,%1\n" 83 "1: movel %2,%1\n"
83 " addl %3,%1\n" 84 " addl %3,%1\n"
84 " casl %2,%1,%0\n" 85 " casl %2,%1,%0\n"
85 " jne 1b" 86 " jne 1b"
86 : "+m" (*v), "=&d" (t), "=&d" (tmp) 87 : "+m" (*v), "=&d" (t), "=&d" (tmp)
87 : "g" (i), "2" (atomic_read(v))); 88 : "g" (i), "2" (atomic_read(v)));
88 return t; 89 return t;
89 } 90 }
90 91
91 static inline int atomic_sub_return(int i, atomic_t *v) 92 static inline int atomic_sub_return(int i, atomic_t *v)
92 { 93 {
93 int t, tmp; 94 int t, tmp;
94 95
95 __asm__ __volatile__( 96 __asm__ __volatile__(
96 "1: movel %2,%1\n" 97 "1: movel %2,%1\n"
97 " subl %3,%1\n" 98 " subl %3,%1\n"
98 " casl %2,%1,%0\n" 99 " casl %2,%1,%0\n"
99 " jne 1b" 100 " jne 1b"
100 : "+m" (*v), "=&d" (t), "=&d" (tmp) 101 : "+m" (*v), "=&d" (t), "=&d" (tmp)
101 : "g" (i), "2" (atomic_read(v))); 102 : "g" (i), "2" (atomic_read(v)));
102 return t; 103 return t;
103 } 104 }
104 105
105 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) 106 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
106 #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 107 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
107 108
108 #else /* !CONFIG_RMW_INSNS */ 109 #else /* !CONFIG_RMW_INSNS */
109 110
110 static inline int atomic_add_return(int i, atomic_t * v) 111 static inline int atomic_add_return(int i, atomic_t * v)
111 { 112 {
112 unsigned long flags; 113 unsigned long flags;
113 int t; 114 int t;
114 115
115 local_irq_save(flags); 116 local_irq_save(flags);
116 t = atomic_read(v); 117 t = atomic_read(v);
117 t += i; 118 t += i;
118 atomic_set(v, t); 119 atomic_set(v, t);
119 local_irq_restore(flags); 120 local_irq_restore(flags);
120 121
121 return t; 122 return t;
122 } 123 }
123 124
124 static inline int atomic_sub_return(int i, atomic_t * v) 125 static inline int atomic_sub_return(int i, atomic_t * v)
125 { 126 {
126 unsigned long flags; 127 unsigned long flags;
127 int t; 128 int t;
128 129
129 local_irq_save(flags); 130 local_irq_save(flags);
130 t = atomic_read(v); 131 t = atomic_read(v);
131 t -= i; 132 t -= i;
132 atomic_set(v, t); 133 atomic_set(v, t);
133 local_irq_restore(flags); 134 local_irq_restore(flags);
134 135
135 return t; 136 return t;
136 } 137 }
137 138
138 static inline int atomic_cmpxchg(atomic_t *v, int old, int new) 139 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
139 { 140 {
140 unsigned long flags; 141 unsigned long flags;
141 int prev; 142 int prev;
142 143
143 local_irq_save(flags); 144 local_irq_save(flags);
144 prev = atomic_read(v); 145 prev = atomic_read(v);
145 if (prev == old) 146 if (prev == old)
146 atomic_set(v, new); 147 atomic_set(v, new);
147 local_irq_restore(flags); 148 local_irq_restore(flags);
148 return prev; 149 return prev;
149 } 150 }
150 151
151 static inline int atomic_xchg(atomic_t *v, int new) 152 static inline int atomic_xchg(atomic_t *v, int new)
152 { 153 {
153 unsigned long flags; 154 unsigned long flags;
154 int prev; 155 int prev;
155 156
156 local_irq_save(flags); 157 local_irq_save(flags);
157 prev = atomic_read(v); 158 prev = atomic_read(v);
158 atomic_set(v, new); 159 atomic_set(v, new);
159 local_irq_restore(flags); 160 local_irq_restore(flags);
160 return prev; 161 return prev;
161 } 162 }
162 163
163 #endif /* !CONFIG_RMW_INSNS */ 164 #endif /* !CONFIG_RMW_INSNS */
164 165
165 #define atomic_dec_return(v) atomic_sub_return(1, (v)) 166 #define atomic_dec_return(v) atomic_sub_return(1, (v))
166 #define atomic_inc_return(v) atomic_add_return(1, (v)) 167 #define atomic_inc_return(v) atomic_add_return(1, (v))
167 168
168 static inline int atomic_sub_and_test(int i, atomic_t *v) 169 static inline int atomic_sub_and_test(int i, atomic_t *v)
169 { 170 {
170 char c; 171 char c;
171 __asm__ __volatile__("subl %2,%1; seq %0" 172 __asm__ __volatile__("subl %2,%1; seq %0"
172 : "=d" (c), "+m" (*v) 173 : "=d" (c), "+m" (*v)
173 : ASM_DI (i)); 174 : ASM_DI (i));
174 return c != 0; 175 return c != 0;
175 } 176 }
176 177
177 static inline int atomic_add_negative(int i, atomic_t *v) 178 static inline int atomic_add_negative(int i, atomic_t *v)
178 { 179 {
179 char c; 180 char c;
180 __asm__ __volatile__("addl %2,%1; smi %0" 181 __asm__ __volatile__("addl %2,%1; smi %0"
181 : "=d" (c), "+m" (*v) 182 : "=d" (c), "+m" (*v)
182 : ASM_DI (i)); 183 : ASM_DI (i));
183 return c != 0; 184 return c != 0;
184 } 185 }
185 186
186 static inline void atomic_clear_mask(unsigned long mask, unsigned long *v) 187 static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
187 { 188 {
188 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask))); 189 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
189 } 190 }
190 191
191 static inline void atomic_set_mask(unsigned long mask, unsigned long *v) 192 static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
192 { 193 {
193 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask)); 194 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
194 } 195 }
195 196
196 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) 197 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
197 { 198 {
198 int c, old; 199 int c, old;
199 c = atomic_read(v); 200 c = atomic_read(v);
200 for (;;) { 201 for (;;) {
201 if (unlikely(c == (u))) 202 if (unlikely(c == (u)))
202 break; 203 break;
203 old = atomic_cmpxchg((v), c, c + (a)); 204 old = atomic_cmpxchg((v), c, c + (a));
204 if (likely(old == c)) 205 if (likely(old == c))
205 break; 206 break;
206 c = old; 207 c = old;
207 } 208 }
208 return c; 209 return c;
209 } 210 }
210 211
211 212
212 /* Atomic operations are already serializing */ 213 /* Atomic operations are already serializing */
213 #define smp_mb__before_atomic_dec() barrier() 214 #define smp_mb__before_atomic_dec() barrier()
214 #define smp_mb__after_atomic_dec() barrier() 215 #define smp_mb__after_atomic_dec() barrier()
215 #define smp_mb__before_atomic_inc() barrier() 216 #define smp_mb__before_atomic_inc() barrier()
216 #define smp_mb__after_atomic_inc() barrier() 217 #define smp_mb__after_atomic_inc() barrier()
217 218
218 #endif /* __ARCH_M68K_ATOMIC __ */ 219 #endif /* __ARCH_M68K_ATOMIC __ */
219 220