Commit 9987c19ed9a71693bf3efce2957666b2513a5252

Authored by Jesper Nilsson
1 parent 83f1588e9f

CRIS: Whitespace cleanup

No functional change, just clean up the most obvious.

Signed-off-by: Jesper Nilsson <jesper.nilsson@axis.com>

Showing 1 changed file with 52 additions and 31 deletions Inline Diff

arch/cris/include/asm/uaccess.h
1 /* 1 /*
2 * Authors: Bjorn Wesen (bjornw@axis.com) 2 * Authors: Bjorn Wesen (bjornw@axis.com)
3 * Hans-Peter Nilsson (hp@axis.com) 3 * Hans-Peter Nilsson (hp@axis.com)
4 */ 4 */
5 5
6 /* Asm:s have been tweaked (within the domain of correctness) to give 6 /* Asm:s have been tweaked (within the domain of correctness) to give
7 satisfactory results for "gcc version 2.96 20000427 (experimental)". 7 satisfactory results for "gcc version 2.96 20000427 (experimental)".
8 8
9 Check regularly... 9 Check regularly...
10 10
11 Register $r9 is chosen for temporaries, being a call-clobbered register 11 Register $r9 is chosen for temporaries, being a call-clobbered register
12 first in line to be used (notably for local blocks), not colliding with 12 first in line to be used (notably for local blocks), not colliding with
13 parameter registers. */ 13 parameter registers. */
14 14
15 #ifndef _CRIS_UACCESS_H 15 #ifndef _CRIS_UACCESS_H
16 #define _CRIS_UACCESS_H 16 #define _CRIS_UACCESS_H
17 17
18 #ifndef __ASSEMBLY__ 18 #ifndef __ASSEMBLY__
19 #include <linux/sched.h> 19 #include <linux/sched.h>
20 #include <linux/errno.h> 20 #include <linux/errno.h>
21 #include <asm/processor.h> 21 #include <asm/processor.h>
22 #include <asm/page.h> 22 #include <asm/page.h>
23 23
24 #define VERIFY_READ 0 24 #define VERIFY_READ 0
25 #define VERIFY_WRITE 1 25 #define VERIFY_WRITE 1
26 26
27 /* 27 /*
28 * The fs value determines whether argument validity checking should be 28 * The fs value determines whether argument validity checking should be
29 * performed or not. If get_fs() == USER_DS, checking is performed, with 29 * performed or not. If get_fs() == USER_DS, checking is performed, with
30 * get_fs() == KERNEL_DS, checking is bypassed. 30 * get_fs() == KERNEL_DS, checking is bypassed.
31 * 31 *
32 * For historical reasons, these macros are grossly misnamed. 32 * For historical reasons, these macros are grossly misnamed.
33 */ 33 */
34 34
35 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 35 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
36 36
37 /* addr_limit is the maximum accessible address for the task. we misuse 37 /* addr_limit is the maximum accessible address for the task. we misuse
38 * the KERNEL_DS and USER_DS values to both assign and compare the 38 * the KERNEL_DS and USER_DS values to both assign and compare the
39 * addr_limit values through the equally misnamed get/set_fs macros. 39 * addr_limit values through the equally misnamed get/set_fs macros.
40 * (see above) 40 * (see above)
41 */ 41 */
42 42
43 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) 43 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
44 #define USER_DS MAKE_MM_SEG(TASK_SIZE) 44 #define USER_DS MAKE_MM_SEG(TASK_SIZE)
45 45
46 #define get_ds() (KERNEL_DS) 46 #define get_ds() (KERNEL_DS)
47 #define get_fs() (current_thread_info()->addr_limit) 47 #define get_fs() (current_thread_info()->addr_limit)
48 #define set_fs(x) (current_thread_info()->addr_limit = (x)) 48 #define set_fs(x) (current_thread_info()->addr_limit = (x))
49 49
50 #define segment_eq(a, b) ((a).seg == (b).seg) 50 #define segment_eq(a, b) ((a).seg == (b).seg)
51 51
52 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) 52 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
53 #define __user_ok(addr, size) \ 53 #define __user_ok(addr, size) \
54 (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) 54 (((size) <= TASK_SIZE) && ((addr) <= TASK_SIZE-(size)))
55 #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) 55 #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
56 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size)) 56 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
57 57
58 #include <arch/uaccess.h> 58 #include <arch/uaccess.h>
59 59
60 /* 60 /*
61 * The exception table consists of pairs of addresses: the first is the 61 * The exception table consists of pairs of addresses: the first is the
62 * address of an instruction that is allowed to fault, and the second is 62 * address of an instruction that is allowed to fault, and the second is
63 * the address at which the program should continue. No registers are 63 * the address at which the program should continue. No registers are
64 * modified, so it is entirely up to the continuation code to figure out 64 * modified, so it is entirely up to the continuation code to figure out
65 * what to do. 65 * what to do.
66 * 66 *
67 * All the routines below use bits of fixup code that are out of line 67 * All the routines below use bits of fixup code that are out of line
68 * with the main instruction path. This means when everything is well, 68 * with the main instruction path. This means when everything is well,
69 * we don't even have to jump over them. Further, they do not intrude 69 * we don't even have to jump over them. Further, they do not intrude
70 * on our cache or tlb entries. 70 * on our cache or tlb entries.
71 */ 71 */
72 72
73 struct exception_table_entry 73 struct exception_table_entry {
74 {
75 unsigned long insn, fixup; 74 unsigned long insn, fixup;
76 }; 75 };
77 76
78 /* 77 /*
79 * These are the main single-value transfer routines. They automatically 78 * These are the main single-value transfer routines. They automatically
80 * use the right size if we just have the right pointer type. 79 * use the right size if we just have the right pointer type.
81 * 80 *
82 * This gets kind of ugly. We want to return _two_ values in "get_user()" 81 * This gets kind of ugly. We want to return _two_ values in "get_user()"
83 * and yet we don't want to do any pointers, because that is too much 82 * and yet we don't want to do any pointers, because that is too much
84 * of a performance impact. Thus we have a few rather ugly macros here, 83 * of a performance impact. Thus we have a few rather ugly macros here,
85 * and hide all the ugliness from the user. 84 * and hide all the ugliness from the user.
86 * 85 *
87 * The "__xxx" versions of the user access functions are versions that 86 * The "__xxx" versions of the user access functions are versions that
88 * do not verify the address space, that must have been done previously 87 * do not verify the address space, that must have been done previously
89 * with a separate "access_ok()" call (this is used when we do multiple 88 * with a separate "access_ok()" call (this is used when we do multiple
90 * accesses to the same area of user memory). 89 * accesses to the same area of user memory).
91 * 90 *
92 * As we use the same address space for kernel and user data on 91 * As we use the same address space for kernel and user data on
93 * CRIS, we can just do these as direct assignments. (Of course, the 92 * CRIS, we can just do these as direct assignments. (Of course, the
94 * exception handling means that it's no longer "just"...) 93 * exception handling means that it's no longer "just"...)
95 */ 94 */
96 #define get_user(x, ptr) \ 95 #define get_user(x, ptr) \
97 __get_user_check((x), (ptr), sizeof(*(ptr))) 96 __get_user_check((x), (ptr), sizeof(*(ptr)))
98 #define put_user(x, ptr) \ 97 #define put_user(x, ptr) \
99 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 98 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
100 99
101 #define __get_user(x, ptr) \ 100 #define __get_user(x, ptr) \
102 __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 101 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
103 #define __put_user(x, ptr) \ 102 #define __put_user(x, ptr) \
104 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 103 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
105 104
106 extern long __put_user_bad(void); 105 extern long __put_user_bad(void);
107 106
108 #define __put_user_size(x, ptr, size, retval) \ 107 #define __put_user_size(x, ptr, size, retval) \
109 do { \ 108 do { \
110 retval = 0; \ 109 retval = 0; \
111 switch (size) { \ 110 switch (size) { \
112 case 1: __put_user_asm(x, ptr, retval, "move.b"); break; \ 111 case 1: \
113 case 2: __put_user_asm(x, ptr, retval, "move.w"); break; \ 112 __put_user_asm(x, ptr, retval, "move.b"); \
114 case 4: __put_user_asm(x, ptr, retval, "move.d"); break; \ 113 break; \
115 case 8: __put_user_asm_64(x, ptr, retval); break; \ 114 case 2: \
116 default: __put_user_bad(); \ 115 __put_user_asm(x, ptr, retval, "move.w"); \
116 break; \
117 case 4: \
118 __put_user_asm(x, ptr, retval, "move.d"); \
119 break; \
120 case 8: \
121 __put_user_asm_64(x, ptr, retval); \
122 break; \
123 default: \
124 __put_user_bad(); \
117 } \ 125 } \
118 } while (0) 126 } while (0)
119 127
120 #define __get_user_size(x, ptr, size, retval) \ 128 #define __get_user_size(x, ptr, size, retval) \
121 do { \ 129 do { \
122 retval = 0; \ 130 retval = 0; \
123 switch (size) { \ 131 switch (size) { \
124 case 1: __get_user_asm(x, ptr, retval, "move.b"); break; \ 132 case 1: \
125 case 2: __get_user_asm(x, ptr, retval, "move.w"); break; \ 133 __get_user_asm(x, ptr, retval, "move.b"); \
126 case 4: __get_user_asm(x, ptr, retval, "move.d"); break; \ 134 break; \
127 case 8: __get_user_asm_64(x, ptr, retval); break; \ 135 case 2: \
128 default: (x) = __get_user_bad(); \ 136 __get_user_asm(x, ptr, retval, "move.w"); \
137 break; \
138 case 4: \
139 __get_user_asm(x, ptr, retval, "move.d"); \
140 break; \
141 case 8: \
142 __get_user_asm_64(x, ptr, retval); \
143 break; \
144 default: \
145 (x) = __get_user_bad(); \
129 } \ 146 } \
130 } while (0) 147 } while (0)
131 148
132 #define __put_user_nocheck(x, ptr, size) \ 149 #define __put_user_nocheck(x, ptr, size) \
133 ({ \ 150 ({ \
134 long __pu_err; \ 151 long __pu_err; \
135 __put_user_size((x), (ptr), (size), __pu_err); \ 152 __put_user_size((x), (ptr), (size), __pu_err); \
136 __pu_err; \ 153 __pu_err; \
137 }) 154 })
138 155
139 #define __put_user_check(x, ptr, size) \ 156 #define __put_user_check(x, ptr, size) \
140 ({ \ 157 ({ \
141 long __pu_err = -EFAULT; \ 158 long __pu_err = -EFAULT; \
142 __typeof__(*(ptr)) *__pu_addr = (ptr); \ 159 __typeof__(*(ptr)) *__pu_addr = (ptr); \
143 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ 160 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
144 __put_user_size((x), __pu_addr, (size), __pu_err); \ 161 __put_user_size((x), __pu_addr, (size), __pu_err); \
145 __pu_err; \ 162 __pu_err; \
146 }) 163 })
147 164
148 struct __large_struct { unsigned long buf[100]; }; 165 struct __large_struct { unsigned long buf[100]; };
149 #define __m(x) (*(struct __large_struct *)(x)) 166 #define __m(x) (*(struct __large_struct *)(x))
150 167
151 168
152 169
153 #define __get_user_nocheck(x, ptr, size) \ 170 #define __get_user_nocheck(x, ptr, size) \
154 ({ \ 171 ({ \
155 long __gu_err, __gu_val; \ 172 long __gu_err, __gu_val; \
156 __get_user_size(__gu_val, (ptr), (size), __gu_err); \ 173 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
157 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 174 (x) = (__force __typeof__(*(ptr)))__gu_val; \
158 __gu_err; \ 175 __gu_err; \
159 }) 176 })
160 177
161 #define __get_user_check(x, ptr, size) \ 178 #define __get_user_check(x, ptr, size) \
162 ({ \ 179 ({ \
163 long __gu_err = -EFAULT, __gu_val = 0; \ 180 long __gu_err = -EFAULT, __gu_val = 0; \
164 const __typeof__(*(ptr)) *__gu_addr = (ptr); \ 181 const __typeof__(*(ptr)) *__gu_addr = (ptr); \
165 if (access_ok(VERIFY_READ, __gu_addr, size)) \ 182 if (access_ok(VERIFY_READ, __gu_addr, size)) \
166 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 183 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
167 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 184 (x) = (__force __typeof__(*(ptr)))__gu_val; \
168 __gu_err; \ 185 __gu_err; \
169 }) 186 })
170 187
171 extern long __get_user_bad(void); 188 extern long __get_user_bad(void);
172 189
173 /* More complex functions. Most are inline, but some call functions that 190 /* More complex functions. Most are inline, but some call functions that
174 live in lib/usercopy.c */ 191 live in lib/usercopy.c */
175 192
176 extern unsigned long __copy_user(void __user *to, const void *from, unsigned long n); 193 extern unsigned long __copy_user(void __user *to, const void *from, unsigned long n);
177 extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n); 194 extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n);
178 extern unsigned long __do_clear_user(void __user *to, unsigned long n); 195 extern unsigned long __do_clear_user(void __user *to, unsigned long n);
179 196
180 static inline unsigned long 197 static inline unsigned long
181 __generic_copy_to_user(void __user *to, const void *from, unsigned long n) 198 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
182 { 199 {
183 if (access_ok(VERIFY_WRITE, to, n)) 200 if (access_ok(VERIFY_WRITE, to, n))
184 return __copy_user(to, from, n); 201 return __copy_user(to, from, n);
185 return n; 202 return n;
186 } 203 }
187 204
188 static inline unsigned long 205 static inline unsigned long
189 __generic_copy_from_user(void *to, const void __user *from, unsigned long n) 206 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
190 { 207 {
191 if (access_ok(VERIFY_READ, from, n)) 208 if (access_ok(VERIFY_READ, from, n))
192 return __copy_user_zeroing(to, from, n); 209 return __copy_user_zeroing(to, from, n);
193 return n; 210 return n;
194 } 211 }
195 212
196 static inline unsigned long 213 static inline unsigned long
197 __generic_clear_user(void __user *to, unsigned long n) 214 __generic_clear_user(void __user *to, unsigned long n)
198 { 215 {
199 if (access_ok(VERIFY_WRITE, to, n)) 216 if (access_ok(VERIFY_WRITE, to, n))
200 return __do_clear_user(to, n); 217 return __do_clear_user(to, n);
201 return n; 218 return n;
202 } 219 }
203 220
204 static inline long 221 static inline long
205 __strncpy_from_user(char *dst, const char __user *src, long count) 222 __strncpy_from_user(char *dst, const char __user *src, long count)
206 { 223 {
207 return __do_strncpy_from_user(dst, src, count); 224 return __do_strncpy_from_user(dst, src, count);
208 } 225 }
209 226
210 static inline long 227 static inline long
211 strncpy_from_user(char *dst, const char __user *src, long count) 228 strncpy_from_user(char *dst, const char __user *src, long count)
212 { 229 {
213 long res = -EFAULT; 230 long res = -EFAULT;
231
214 if (access_ok(VERIFY_READ, src, 1)) 232 if (access_ok(VERIFY_READ, src, 1))
215 res = __do_strncpy_from_user(dst, src, count); 233 res = __do_strncpy_from_user(dst, src, count);
216 return res; 234 return res;
217 } 235 }
218 236
219 237
220 /* Note that these expand awfully if made into switch constructs, so 238 /* Note that these expand awfully if made into switch constructs, so
221 don't do that. */ 239 don't do that. */
222 240
223 static inline unsigned long 241 static inline unsigned long
224 __constant_copy_from_user(void *to, const void __user *from, unsigned long n) 242 __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
225 { 243 {
226 unsigned long ret = 0; 244 unsigned long ret = 0;
245
227 if (n == 0) 246 if (n == 0)
228 ; 247 ;
229 else if (n == 1) 248 else if (n == 1)
230 __asm_copy_from_user_1(to, from, ret); 249 __asm_copy_from_user_1(to, from, ret);
231 else if (n == 2) 250 else if (n == 2)
232 __asm_copy_from_user_2(to, from, ret); 251 __asm_copy_from_user_2(to, from, ret);
233 else if (n == 3) 252 else if (n == 3)
234 __asm_copy_from_user_3(to, from, ret); 253 __asm_copy_from_user_3(to, from, ret);
235 else if (n == 4) 254 else if (n == 4)
236 __asm_copy_from_user_4(to, from, ret); 255 __asm_copy_from_user_4(to, from, ret);
237 else if (n == 5) 256 else if (n == 5)
238 __asm_copy_from_user_5(to, from, ret); 257 __asm_copy_from_user_5(to, from, ret);
239 else if (n == 6) 258 else if (n == 6)
240 __asm_copy_from_user_6(to, from, ret); 259 __asm_copy_from_user_6(to, from, ret);
241 else if (n == 7) 260 else if (n == 7)
242 __asm_copy_from_user_7(to, from, ret); 261 __asm_copy_from_user_7(to, from, ret);
243 else if (n == 8) 262 else if (n == 8)
244 __asm_copy_from_user_8(to, from, ret); 263 __asm_copy_from_user_8(to, from, ret);
245 else if (n == 9) 264 else if (n == 9)
246 __asm_copy_from_user_9(to, from, ret); 265 __asm_copy_from_user_9(to, from, ret);
247 else if (n == 10) 266 else if (n == 10)
248 __asm_copy_from_user_10(to, from, ret); 267 __asm_copy_from_user_10(to, from, ret);
249 else if (n == 11) 268 else if (n == 11)
250 __asm_copy_from_user_11(to, from, ret); 269 __asm_copy_from_user_11(to, from, ret);
251 else if (n == 12) 270 else if (n == 12)
252 __asm_copy_from_user_12(to, from, ret); 271 __asm_copy_from_user_12(to, from, ret);
253 else if (n == 13) 272 else if (n == 13)
254 __asm_copy_from_user_13(to, from, ret); 273 __asm_copy_from_user_13(to, from, ret);
255 else if (n == 14) 274 else if (n == 14)
256 __asm_copy_from_user_14(to, from, ret); 275 __asm_copy_from_user_14(to, from, ret);
257 else if (n == 15) 276 else if (n == 15)
258 __asm_copy_from_user_15(to, from, ret); 277 __asm_copy_from_user_15(to, from, ret);
259 else if (n == 16) 278 else if (n == 16)
260 __asm_copy_from_user_16(to, from, ret); 279 __asm_copy_from_user_16(to, from, ret);
261 else if (n == 20) 280 else if (n == 20)
262 __asm_copy_from_user_20(to, from, ret); 281 __asm_copy_from_user_20(to, from, ret);
263 else if (n == 24) 282 else if (n == 24)
264 __asm_copy_from_user_24(to, from, ret); 283 __asm_copy_from_user_24(to, from, ret);
265 else 284 else
266 ret = __generic_copy_from_user(to, from, n); 285 ret = __generic_copy_from_user(to, from, n);
267 286
268 return ret; 287 return ret;
269 } 288 }
270 289
271 /* Ditto, don't make a switch out of this. */ 290 /* Ditto, don't make a switch out of this. */
272 291
273 static inline unsigned long 292 static inline unsigned long
274 __constant_copy_to_user(void __user *to, const void *from, unsigned long n) 293 __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
275 { 294 {
276 unsigned long ret = 0; 295 unsigned long ret = 0;
296
277 if (n == 0) 297 if (n == 0)
278 ; 298 ;
279 else if (n == 1) 299 else if (n == 1)
280 __asm_copy_to_user_1(to, from, ret); 300 __asm_copy_to_user_1(to, from, ret);
281 else if (n == 2) 301 else if (n == 2)
282 __asm_copy_to_user_2(to, from, ret); 302 __asm_copy_to_user_2(to, from, ret);
283 else if (n == 3) 303 else if (n == 3)
284 __asm_copy_to_user_3(to, from, ret); 304 __asm_copy_to_user_3(to, from, ret);
285 else if (n == 4) 305 else if (n == 4)
286 __asm_copy_to_user_4(to, from, ret); 306 __asm_copy_to_user_4(to, from, ret);
287 else if (n == 5) 307 else if (n == 5)
288 __asm_copy_to_user_5(to, from, ret); 308 __asm_copy_to_user_5(to, from, ret);
289 else if (n == 6) 309 else if (n == 6)
290 __asm_copy_to_user_6(to, from, ret); 310 __asm_copy_to_user_6(to, from, ret);
291 else if (n == 7) 311 else if (n == 7)
292 __asm_copy_to_user_7(to, from, ret); 312 __asm_copy_to_user_7(to, from, ret);
293 else if (n == 8) 313 else if (n == 8)
294 __asm_copy_to_user_8(to, from, ret); 314 __asm_copy_to_user_8(to, from, ret);
295 else if (n == 9) 315 else if (n == 9)
296 __asm_copy_to_user_9(to, from, ret); 316 __asm_copy_to_user_9(to, from, ret);
297 else if (n == 10) 317 else if (n == 10)
298 __asm_copy_to_user_10(to, from, ret); 318 __asm_copy_to_user_10(to, from, ret);
299 else if (n == 11) 319 else if (n == 11)
300 __asm_copy_to_user_11(to, from, ret); 320 __asm_copy_to_user_11(to, from, ret);
301 else if (n == 12) 321 else if (n == 12)
302 __asm_copy_to_user_12(to, from, ret); 322 __asm_copy_to_user_12(to, from, ret);
303 else if (n == 13) 323 else if (n == 13)
304 __asm_copy_to_user_13(to, from, ret); 324 __asm_copy_to_user_13(to, from, ret);
305 else if (n == 14) 325 else if (n == 14)
306 __asm_copy_to_user_14(to, from, ret); 326 __asm_copy_to_user_14(to, from, ret);
307 else if (n == 15) 327 else if (n == 15)
308 __asm_copy_to_user_15(to, from, ret); 328 __asm_copy_to_user_15(to, from, ret);
309 else if (n == 16) 329 else if (n == 16)
310 __asm_copy_to_user_16(to, from, ret); 330 __asm_copy_to_user_16(to, from, ret);
311 else if (n == 20) 331 else if (n == 20)
312 __asm_copy_to_user_20(to, from, ret); 332 __asm_copy_to_user_20(to, from, ret);
313 else if (n == 24) 333 else if (n == 24)
314 __asm_copy_to_user_24(to, from, ret); 334 __asm_copy_to_user_24(to, from, ret);
315 else 335 else
316 ret = __generic_copy_to_user(to, from, n); 336 ret = __generic_copy_to_user(to, from, n);
317 337
318 return ret; 338 return ret;
319 } 339 }
320 340
321 /* No switch, please. */ 341 /* No switch, please. */
322 342
323 static inline unsigned long 343 static inline unsigned long
324 __constant_clear_user(void __user *to, unsigned long n) 344 __constant_clear_user(void __user *to, unsigned long n)
325 { 345 {
326 unsigned long ret = 0; 346 unsigned long ret = 0;
347
327 if (n == 0) 348 if (n == 0)
328 ; 349 ;
329 else if (n == 1) 350 else if (n == 1)
330 __asm_clear_1(to, ret); 351 __asm_clear_1(to, ret);
331 else if (n == 2) 352 else if (n == 2)
332 __asm_clear_2(to, ret); 353 __asm_clear_2(to, ret);
333 else if (n == 3) 354 else if (n == 3)
334 __asm_clear_3(to, ret); 355 __asm_clear_3(to, ret);
335 else if (n == 4) 356 else if (n == 4)
336 __asm_clear_4(to, ret); 357 __asm_clear_4(to, ret);
337 else if (n == 8) 358 else if (n == 8)
338 __asm_clear_8(to, ret); 359 __asm_clear_8(to, ret);
339 else if (n == 12) 360 else if (n == 12)
340 __asm_clear_12(to, ret); 361 __asm_clear_12(to, ret);
341 else if (n == 16) 362 else if (n == 16)
342 __asm_clear_16(to, ret); 363 __asm_clear_16(to, ret);
343 else if (n == 20) 364 else if (n == 20)
344 __asm_clear_20(to, ret); 365 __asm_clear_20(to, ret);
345 else if (n == 24) 366 else if (n == 24)
346 __asm_clear_24(to, ret); 367 __asm_clear_24(to, ret);
347 else 368 else
348 ret = __generic_clear_user(to, n); 369 ret = __generic_clear_user(to, n);
349 370
350 return ret; 371 return ret;
351 } 372 }
352 373
353 374
354 #define clear_user(to, n) \ 375 #define clear_user(to, n) \
355 (__builtin_constant_p(n) ? \ 376 (__builtin_constant_p(n) ? \
356 __constant_clear_user(to, n) : \ 377 __constant_clear_user(to, n) : \
357 __generic_clear_user(to, n)) 378 __generic_clear_user(to, n))
358 379
359 #define copy_from_user(to, from, n) \ 380 #define copy_from_user(to, from, n) \
360 (__builtin_constant_p(n) ? \ 381 (__builtin_constant_p(n) ? \
361 __constant_copy_from_user(to, from, n) : \ 382 __constant_copy_from_user(to, from, n) : \
362 __generic_copy_from_user(to, from, n)) 383 __generic_copy_from_user(to, from, n))
363 384
364 #define copy_to_user(to, from, n) \ 385 #define copy_to_user(to, from, n) \
365 (__builtin_constant_p(n) ? \ 386 (__builtin_constant_p(n) ? \
366 __constant_copy_to_user(to, from, n) : \ 387 __constant_copy_to_user(to, from, n) : \
367 __generic_copy_to_user(to, from, n)) 388 __generic_copy_to_user(to, from, n))
368 389
369 /* We let the __ versions of copy_from/to_user inline, because they're often 390 /* We let the __ versions of copy_from/to_user inline, because they're often
370 * used in fast paths and have only a small space overhead. 391 * used in fast paths and have only a small space overhead.
371 */ 392 */
372 393
373 static inline unsigned long 394 static inline unsigned long
374 __generic_copy_from_user_nocheck(void *to, const void __user *from, 395 __generic_copy_from_user_nocheck(void *to, const void __user *from,
375 unsigned long n) 396 unsigned long n)
376 { 397 {
377 return __copy_user_zeroing(to, from, n); 398 return __copy_user_zeroing(to, from, n);
378 } 399 }
379 400
380 static inline unsigned long 401 static inline unsigned long
381 __generic_copy_to_user_nocheck(void __user *to, const void *from, 402 __generic_copy_to_user_nocheck(void __user *to, const void *from,
382 unsigned long n) 403 unsigned long n)
383 { 404 {
384 return __copy_user(to, from, n); 405 return __copy_user(to, from, n);
385 } 406 }
386 407
387 static inline unsigned long 408 static inline unsigned long
388 __generic_clear_user_nocheck(void __user *to, unsigned long n) 409 __generic_clear_user_nocheck(void __user *to, unsigned long n)
389 { 410 {
390 return __do_clear_user(to, n); 411 return __do_clear_user(to, n);
391 } 412 }
392 413
393 /* without checking */ 414 /* without checking */
394 415
395 #define __copy_to_user(to, from, n) \ 416 #define __copy_to_user(to, from, n) \
396 __generic_copy_to_user_nocheck((to), (from), (n)) 417 __generic_copy_to_user_nocheck((to), (from), (n))
397 #define __copy_from_user(to, from, n) \ 418 #define __copy_from_user(to, from, n) \
398 __generic_copy_from_user_nocheck((to), (from), (n)) 419 __generic_copy_from_user_nocheck((to), (from), (n))
399 #define __copy_to_user_inatomic __copy_to_user 420 #define __copy_to_user_inatomic __copy_to_user
400 #define __copy_from_user_inatomic __copy_from_user 421 #define __copy_from_user_inatomic __copy_from_user
401 #define __clear_user(to, n) __generic_clear_user_nocheck((to), (n)) 422 #define __clear_user(to, n) __generic_clear_user_nocheck((to), (n))
402 423
403 #define strlen_user(str) strnlen_user((str), 0x7ffffffe) 424 #define strlen_user(str) strnlen_user((str), 0x7ffffffe)
404 425
405 #endif /* __ASSEMBLY__ */ 426 #endif /* __ASSEMBLY__ */
406 427
407 #endif /* _CRIS_UACCESS_H */ 428 #endif /* _CRIS_UACCESS_H */