Commit 2501cf768e4009a06287a5ee842fd93dd4fd690e
1 parent
c9034c3a1d
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
m68k: Fix xchg/cmpxchg to fail to link if given an inappropriate pointer
Fix the m68k versions of xchg() and cmpxchg() to fail to link if given an inappropriately sized pointer rather than BUG()'ing at runtime. Signed-off-by: David Howells <dhowells@redhat.com> Acked-by: Greg Ungerer <gerg@uclinux.org> cc: linux-m68k@lists.linux-m68k.org
Showing 1 changed file with 16 additions and 4 deletions Inline Diff
arch/m68k/include/asm/system.h
1 | #ifndef _M68K_SYSTEM_H | 1 | #ifndef _M68K_SYSTEM_H |
2 | #define _M68K_SYSTEM_H | 2 | #define _M68K_SYSTEM_H |
3 | 3 | ||
4 | #include <linux/linkage.h> | 4 | #include <linux/linkage.h> |
5 | #include <linux/kernel.h> | 5 | #include <linux/kernel.h> |
6 | #include <linux/irqflags.h> | 6 | #include <linux/irqflags.h> |
7 | #include <asm/segment.h> | 7 | #include <asm/segment.h> |
8 | #include <asm/entry.h> | 8 | #include <asm/entry.h> |
9 | 9 | ||
10 | #ifdef __KERNEL__ | 10 | #ifdef __KERNEL__ |
11 | 11 | ||
12 | /* | 12 | /* |
13 | * switch_to(n) should switch tasks to task ptr, first checking that | 13 | * switch_to(n) should switch tasks to task ptr, first checking that |
14 | * ptr isn't the current task, in which case it does nothing. This | 14 | * ptr isn't the current task, in which case it does nothing. This |
15 | * also clears the TS-flag if the task we switched to has used the | 15 | * also clears the TS-flag if the task we switched to has used the |
16 | * math co-processor latest. | 16 | * math co-processor latest. |
17 | */ | 17 | */ |
18 | /* | 18 | /* |
19 | * switch_to() saves the extra registers, that are not saved | 19 | * switch_to() saves the extra registers, that are not saved |
20 | * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and | 20 | * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and |
21 | * a0-a1. Some of these are used by schedule() and its predecessors | 21 | * a0-a1. Some of these are used by schedule() and its predecessors |
22 | * and so we might get see unexpected behaviors when a task returns | 22 | * and so we might get see unexpected behaviors when a task returns |
23 | * with unexpected register values. | 23 | * with unexpected register values. |
24 | * | 24 | * |
25 | * syscall stores these registers itself and none of them are used | 25 | * syscall stores these registers itself and none of them are used |
26 | * by syscall after the function in the syscall has been called. | 26 | * by syscall after the function in the syscall has been called. |
27 | * | 27 | * |
28 | * Beware that resume now expects *next to be in d1 and the offset of | 28 | * Beware that resume now expects *next to be in d1 and the offset of |
29 | * tss to be in a1. This saves a few instructions as we no longer have | 29 | * tss to be in a1. This saves a few instructions as we no longer have |
30 | * to push them onto the stack and read them back right after. | 30 | * to push them onto the stack and read them back right after. |
31 | * | 31 | * |
32 | * 02/17/96 - Jes Sorensen (jds@kom.auc.dk) | 32 | * 02/17/96 - Jes Sorensen (jds@kom.auc.dk) |
33 | * | 33 | * |
34 | * Changed 96/09/19 by Andreas Schwab | 34 | * Changed 96/09/19 by Andreas Schwab |
35 | * pass prev in a0, next in a1 | 35 | * pass prev in a0, next in a1 |
36 | */ | 36 | */ |
37 | asmlinkage void resume(void); | 37 | asmlinkage void resume(void); |
38 | #define switch_to(prev,next,last) do { \ | 38 | #define switch_to(prev,next,last) do { \ |
39 | register void *_prev __asm__ ("a0") = (prev); \ | 39 | register void *_prev __asm__ ("a0") = (prev); \ |
40 | register void *_next __asm__ ("a1") = (next); \ | 40 | register void *_next __asm__ ("a1") = (next); \ |
41 | register void *_last __asm__ ("d1"); \ | 41 | register void *_last __asm__ ("d1"); \ |
42 | __asm__ __volatile__("jbsr resume" \ | 42 | __asm__ __volatile__("jbsr resume" \ |
43 | : "=a" (_prev), "=a" (_next), "=d" (_last) \ | 43 | : "=a" (_prev), "=a" (_next), "=d" (_last) \ |
44 | : "0" (_prev), "1" (_next) \ | 44 | : "0" (_prev), "1" (_next) \ |
45 | : "d0", "d2", "d3", "d4", "d5"); \ | 45 | : "d0", "d2", "d3", "d4", "d5"); \ |
46 | (last) = _last; \ | 46 | (last) = _last; \ |
47 | } while (0) | 47 | } while (0) |
48 | 48 | ||
49 | 49 | ||
50 | /* | 50 | /* |
51 | * Force strict CPU ordering. | 51 | * Force strict CPU ordering. |
52 | * Not really required on m68k... | 52 | * Not really required on m68k... |
53 | */ | 53 | */ |
54 | #define nop() do { asm volatile ("nop"); barrier(); } while (0) | 54 | #define nop() do { asm volatile ("nop"); barrier(); } while (0) |
55 | #define mb() barrier() | 55 | #define mb() barrier() |
56 | #define rmb() barrier() | 56 | #define rmb() barrier() |
57 | #define wmb() barrier() | 57 | #define wmb() barrier() |
58 | #define read_barrier_depends() ((void)0) | 58 | #define read_barrier_depends() ((void)0) |
59 | #define set_mb(var, value) ({ (var) = (value); wmb(); }) | 59 | #define set_mb(var, value) ({ (var) = (value); wmb(); }) |
60 | 60 | ||
61 | #define smp_mb() barrier() | 61 | #define smp_mb() barrier() |
62 | #define smp_rmb() barrier() | 62 | #define smp_rmb() barrier() |
63 | #define smp_wmb() barrier() | 63 | #define smp_wmb() barrier() |
64 | #define smp_read_barrier_depends() ((void)0) | 64 | #define smp_read_barrier_depends() ((void)0) |
65 | 65 | ||
66 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | 66 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) |
67 | 67 | ||
68 | struct __xchg_dummy { unsigned long a[100]; }; | 68 | struct __xchg_dummy { unsigned long a[100]; }; |
69 | #define __xg(x) ((volatile struct __xchg_dummy *)(x)) | 69 | #define __xg(x) ((volatile struct __xchg_dummy *)(x)) |
70 | 70 | ||
71 | extern unsigned long __invalid_xchg_size(unsigned long, volatile void *, int); | ||
72 | |||
71 | #ifndef CONFIG_RMW_INSNS | 73 | #ifndef CONFIG_RMW_INSNS |
72 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | 74 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) |
73 | { | 75 | { |
74 | unsigned long flags, tmp; | 76 | unsigned long flags, tmp; |
75 | 77 | ||
76 | local_irq_save(flags); | 78 | local_irq_save(flags); |
77 | 79 | ||
78 | switch (size) { | 80 | switch (size) { |
79 | case 1: | 81 | case 1: |
80 | tmp = *(u8 *)ptr; | 82 | tmp = *(u8 *)ptr; |
81 | *(u8 *)ptr = x; | 83 | *(u8 *)ptr = x; |
82 | x = tmp; | 84 | x = tmp; |
83 | break; | 85 | break; |
84 | case 2: | 86 | case 2: |
85 | tmp = *(u16 *)ptr; | 87 | tmp = *(u16 *)ptr; |
86 | *(u16 *)ptr = x; | 88 | *(u16 *)ptr = x; |
87 | x = tmp; | 89 | x = tmp; |
88 | break; | 90 | break; |
89 | case 4: | 91 | case 4: |
90 | tmp = *(u32 *)ptr; | 92 | tmp = *(u32 *)ptr; |
91 | *(u32 *)ptr = x; | 93 | *(u32 *)ptr = x; |
92 | x = tmp; | 94 | x = tmp; |
93 | break; | 95 | break; |
94 | default: | 96 | default: |
95 | BUG(); | 97 | tmp = __invalid_xchg_size(x, ptr, size); |
98 | break; | ||
96 | } | 99 | } |
97 | 100 | ||
98 | local_irq_restore(flags); | 101 | local_irq_restore(flags); |
99 | return x; | 102 | return x; |
100 | } | 103 | } |
101 | #else | 104 | #else |
102 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | 105 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) |
103 | { | 106 | { |
104 | switch (size) { | 107 | switch (size) { |
105 | case 1: | 108 | case 1: |
106 | __asm__ __volatile__ | 109 | __asm__ __volatile__ |
107 | ("moveb %2,%0\n\t" | 110 | ("moveb %2,%0\n\t" |
108 | "1:\n\t" | 111 | "1:\n\t" |
109 | "casb %0,%1,%2\n\t" | 112 | "casb %0,%1,%2\n\t" |
110 | "jne 1b" | 113 | "jne 1b" |
111 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); | 114 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); |
112 | break; | 115 | break; |
113 | case 2: | 116 | case 2: |
114 | __asm__ __volatile__ | 117 | __asm__ __volatile__ |
115 | ("movew %2,%0\n\t" | 118 | ("movew %2,%0\n\t" |
116 | "1:\n\t" | 119 | "1:\n\t" |
117 | "casw %0,%1,%2\n\t" | 120 | "casw %0,%1,%2\n\t" |
118 | "jne 1b" | 121 | "jne 1b" |
119 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); | 122 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); |
120 | break; | 123 | break; |
121 | case 4: | 124 | case 4: |
122 | __asm__ __volatile__ | 125 | __asm__ __volatile__ |
123 | ("movel %2,%0\n\t" | 126 | ("movel %2,%0\n\t" |
124 | "1:\n\t" | 127 | "1:\n\t" |
125 | "casl %0,%1,%2\n\t" | 128 | "casl %0,%1,%2\n\t" |
126 | "jne 1b" | 129 | "jne 1b" |
127 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); | 130 | : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); |
128 | break; | 131 | break; |
132 | default: | ||
133 | x = __invalid_xchg_size(x, ptr, size); | ||
134 | break; | ||
129 | } | 135 | } |
130 | return x; | 136 | return x; |
131 | } | 137 | } |
132 | #endif | 138 | #endif |
133 | 139 | ||
134 | #include <asm-generic/cmpxchg-local.h> | 140 | #include <asm-generic/cmpxchg-local.h> |
135 | 141 | ||
136 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | 142 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) |
137 | 143 | ||
144 | extern unsigned long __invalid_cmpxchg_size(volatile void *, | ||
145 | unsigned long, unsigned long, int); | ||
146 | |||
138 | /* | 147 | /* |
139 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | 148 | * Atomic compare and exchange. Compare OLD with MEM, if identical, |
140 | * store NEW in MEM. Return the initial value in MEM. Success is | 149 | * store NEW in MEM. Return the initial value in MEM. Success is |
141 | * indicated by comparing RETURN with OLD. | 150 | * indicated by comparing RETURN with OLD. |
142 | */ | 151 | */ |
143 | #ifdef CONFIG_RMW_INSNS | 152 | #ifdef CONFIG_RMW_INSNS |
144 | #define __HAVE_ARCH_CMPXCHG 1 | 153 | #define __HAVE_ARCH_CMPXCHG 1 |
145 | 154 | ||
146 | static inline unsigned long __cmpxchg(volatile void *p, unsigned long old, | 155 | static inline unsigned long __cmpxchg(volatile void *p, unsigned long old, |
147 | unsigned long new, int size) | 156 | unsigned long new, int size) |
148 | { | 157 | { |
149 | switch (size) { | 158 | switch (size) { |
150 | case 1: | 159 | case 1: |
151 | __asm__ __volatile__ ("casb %0,%2,%1" | 160 | __asm__ __volatile__ ("casb %0,%2,%1" |
152 | : "=d" (old), "=m" (*(char *)p) | 161 | : "=d" (old), "=m" (*(char *)p) |
153 | : "d" (new), "0" (old), "m" (*(char *)p)); | 162 | : "d" (new), "0" (old), "m" (*(char *)p)); |
154 | break; | 163 | break; |
155 | case 2: | 164 | case 2: |
156 | __asm__ __volatile__ ("casw %0,%2,%1" | 165 | __asm__ __volatile__ ("casw %0,%2,%1" |
157 | : "=d" (old), "=m" (*(short *)p) | 166 | : "=d" (old), "=m" (*(short *)p) |
158 | : "d" (new), "0" (old), "m" (*(short *)p)); | 167 | : "d" (new), "0" (old), "m" (*(short *)p)); |
159 | break; | 168 | break; |
160 | case 4: | 169 | case 4: |
161 | __asm__ __volatile__ ("casl %0,%2,%1" | 170 | __asm__ __volatile__ ("casl %0,%2,%1" |
162 | : "=d" (old), "=m" (*(int *)p) | 171 | : "=d" (old), "=m" (*(int *)p) |
163 | : "d" (new), "0" (old), "m" (*(int *)p)); | 172 | : "d" (new), "0" (old), "m" (*(int *)p)); |
173 | break; | ||
174 | default: | ||
175 | old = __invalid_cmpxchg_size(p, old, new, size); | ||
164 | break; | 176 | break; |
165 | } | 177 | } |
166 | return old; | 178 | return old; |
167 | } | 179 | } |
168 | 180 | ||
169 | #define cmpxchg(ptr, o, n) \ | 181 | #define cmpxchg(ptr, o, n) \ |
170 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ | 182 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ |
171 | (unsigned long)(n), sizeof(*(ptr)))) | 183 | (unsigned long)(n), sizeof(*(ptr)))) |
172 | #define cmpxchg_local(ptr, o, n) \ | 184 | #define cmpxchg_local(ptr, o, n) \ |
173 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ | 185 | ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ |
174 | (unsigned long)(n), sizeof(*(ptr)))) | 186 | (unsigned long)(n), sizeof(*(ptr)))) |
175 | #else | 187 | #else |
176 | 188 | ||
177 | /* | 189 | /* |
178 | * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make | 190 | * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make |
179 | * them available. | 191 | * them available. |
180 | */ | 192 | */ |
181 | #define cmpxchg_local(ptr, o, n) \ | 193 | #define cmpxchg_local(ptr, o, n) \ |
182 | ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ | 194 | ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ |
183 | (unsigned long)(n), sizeof(*(ptr)))) | 195 | (unsigned long)(n), sizeof(*(ptr)))) |
184 | 196 | ||
185 | #include <asm-generic/cmpxchg.h> | 197 | #include <asm-generic/cmpxchg.h> |
186 | 198 | ||
187 | #endif | 199 | #endif |
188 | 200 | ||
189 | #define arch_align_stack(x) (x) | 201 | #define arch_align_stack(x) (x) |
190 | 202 | ||
191 | #endif /* __KERNEL__ */ | 203 | #endif /* __KERNEL__ */ |
192 | 204 | ||
193 | #endif /* _M68K_SYSTEM_H */ | 205 | #endif /* _M68K_SYSTEM_H */ |
194 | 206 |