Commit e08f457c7c0cc7720f28349f8780ea752c063441

Authored by Paul Mundt
Committed by Paul Mundt
1 parent 7a302a9674

sh: __user annotations for __get/__put_user().

This adds in some more __user annotations. These weren't being
handled properly in some of the __get_user and __put_user paths,
so tidy those up.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>

Showing 8 changed files with 49 additions and 34 deletions Side-by-side Diff

arch/sh/kernel/process.c
... ... @@ -17,6 +17,7 @@
17 17 #include <linux/kexec.h>
18 18 #include <linux/kdebug.h>
19 19 #include <linux/tick.h>
  20 +#include <linux/reboot.h>
20 21 #include <asm/uaccess.h>
21 22 #include <asm/mmu_context.h>
22 23 #include <asm/pgalloc.h>
23 24  
24 25  
... ... @@ -449,23 +450,20 @@
449 450 /*
450 451 * sys_execve() executes a new program.
451 452 */
452   -asmlinkage int sys_execve(char *ufilename, char **uargv,
453   - char **uenvp, unsigned long r7,
  453 +asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv,
  454 + char __user * __user *uenvp, unsigned long r7,
454 455 struct pt_regs __regs)
455 456 {
456 457 struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
457 458 int error;
458 459 char *filename;
459 460  
460   - filename = getname((char __user *)ufilename);
  461 + filename = getname(ufilename);
461 462 error = PTR_ERR(filename);
462 463 if (IS_ERR(filename))
463 464 goto out;
464 465  
465   - error = do_execve(filename,
466   - (char __user * __user *)uargv,
467   - (char __user * __user *)uenvp,
468   - regs);
  466 + error = do_execve(filename, uargv, uenvp, regs);
469 467 if (error == 0) {
470 468 task_lock(current);
471 469 current->ptrace &= ~PT_DTRACE;
arch/sh/kernel/ptrace.c
... ... @@ -99,7 +99,7 @@
99 99 ret = -EIO;
100 100 if (copied != sizeof(tmp))
101 101 break;
102   - ret = put_user(tmp,(unsigned long *) data);
  102 + ret = put_user(tmp,(unsigned long __user *) data);
103 103 break;
104 104 }
105 105  
... ... @@ -128,7 +128,7 @@
128 128 tmp = !!tsk_used_math(child);
129 129 else
130 130 tmp = 0;
131   - ret = put_user(tmp, (unsigned long *)data);
  131 + ret = put_user(tmp, (unsigned long __user *)data);
132 132 break;
133 133 }
134 134  
... ... @@ -196,7 +196,7 @@
196 196  
197 197 case PTRACE_SINGLESTEP: { /* set the trap flag. */
198 198 long pc;
199   - struct pt_regs *dummy = NULL;
  199 + struct pt_regs *regs = NULL;
200 200  
201 201 ret = -EIO;
202 202 if (!valid_signal(data))
... ... @@ -207,7 +207,7 @@
207 207 child->ptrace |= PT_DTRACE;
208 208 }
209 209  
210   - pc = get_stack_long(child, (long)&dummy->pc);
  210 + pc = get_stack_long(child, (long)&regs->pc);
211 211  
212 212 /* Next scheduling will set up UBC */
213 213 if (child->thread.ubc_pc == 0)
arch/sh/kernel/signal.c
... ... @@ -261,14 +261,14 @@
261 261 goto badframe;
262 262 /* It is more difficult to avoid calling this function than to
263 263 call it and ignore errors. */
264   - do_sigaltstack(&st, NULL, regs->regs[15]);
  264 + do_sigaltstack((const stack_t __user *)&st, NULL, (unsigned long)frame);
265 265  
266 266 return r0;
267 267  
268 268 badframe:
269 269 force_sig(SIGSEGV, current);
270 270 return 0;
271   -}
  271 +}
272 272  
273 273 /*
274 274 * Set up a signal frame.
arch/sh/kernel/traps.c
... ... @@ -581,7 +581,7 @@
581 581 info.si_signo = SIGBUS;
582 582 info.si_errno = 0;
583 583 info.si_code = si_code;
584   - info.si_addr = (void *) address;
  584 + info.si_addr = (void __user *)address;
585 585 force_sig_info(SIGBUS, &info, current);
586 586 } else {
587 587 if (regs->pc & 1)
include/asm-sh/page.h
... ... @@ -60,6 +60,7 @@
60 60  
61 61 extern unsigned long shm_align_mask;
62 62 extern unsigned long max_low_pfn, min_low_pfn;
  63 +extern unsigned long memory_start, memory_end;
63 64  
64 65 #ifdef CONFIG_MMU
65 66 extern void clear_page_slow(void *to);
include/asm-sh/sections.h
... ... @@ -3,7 +3,5 @@
3 3  
4 4 #include <asm-generic/sections.h>
5 5  
6   -extern char _end[];
7   -
8 6 #endif /* __ASM_SH_SECTIONS_H */
include/asm-sh/system.h
... ... @@ -8,9 +8,13 @@
8 8  
9 9 #include <linux/irqflags.h>
10 10 #include <linux/compiler.h>
  11 +#include <linux/linkage.h>
11 12 #include <asm/types.h>
12 13 #include <asm/ptrace.h>
13 14  
  15 +struct task_struct *__switch_to(struct task_struct *prev,
  16 + struct task_struct *next);
  17 +
14 18 /*
15 19 * switch_to() should switch tasks to task nr n, first
16 20 */
... ... @@ -270,6 +274,16 @@
270 274 #define HAVE_DISABLE_HLT
271 275 void disable_hlt(void);
272 276 void enable_hlt(void);
  277 +
  278 +void default_idle(void);
  279 +
  280 +asmlinkage void break_point_trap(void);
  281 +asmlinkage void debug_trap_handler(unsigned long r4, unsigned long r5,
  282 + unsigned long r6, unsigned long r7,
  283 + struct pt_regs __regs);
  284 +asmlinkage void bug_trap_handler(unsigned long r4, unsigned long r5,
  285 + unsigned long r6, unsigned long r7,
  286 + struct pt_regs __regs);
273 287  
274 288 #define arch_align_stack(x) (x)
275 289  
include/asm-sh/uaccess.h
... ... @@ -61,8 +61,6 @@
61 61 */
62 62 static inline int __access_ok(unsigned long addr, unsigned long size)
63 63 {
64   - extern unsigned long memory_start, memory_end;
65   -
66 64 return ((addr >= memory_start) && ((addr + size) < memory_end));
67 65 }
68 66 #else /* CONFIG_MMU */
... ... @@ -76,7 +74,7 @@
76 74 * __access_ok: Check if address with size is OK or not.
77 75 *
78 76 * We do three checks:
79   - * (1) is it user space?
  77 + * (1) is it user space?
80 78 * (2) addr + size --> carry?
81 79 * (3) addr + size >= 0x80000000 (PAGE_OFFSET)
82 80 *
83 81  
... ... @@ -142,11 +140,12 @@
142 140 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
143 141  
144 142 struct __large_struct { unsigned long buf[100]; };
145   -#define __m(x) (*(struct __large_struct *)(x))
  143 +#define __m(x) (*(struct __large_struct __user *)(x))
146 144  
147 145 #define __get_user_size(x,ptr,size,retval) \
148 146 do { \
149 147 retval = 0; \
  148 + __chk_user_ptr(ptr); \
150 149 switch (size) { \
151 150 case 1: \
152 151 __get_user_asm(x, ptr, retval, "b"); \
... ... @@ -175,6 +174,7 @@
175 174 #define __get_user_check(x,ptr,size) \
176 175 ({ \
177 176 long __gu_err, __gu_val; \
  177 + __chk_user_ptr(ptr); \
178 178 switch (size) { \
179 179 case 1: \
180 180 __get_user_1(__gu_val, (ptr), __gu_err); \
... ... @@ -300,6 +300,7 @@
300 300 #define __put_user_size(x,ptr,size,retval) \
301 301 do { \
302 302 retval = 0; \
  303 + __chk_user_ptr(ptr); \
303 304 switch (size) { \
304 305 case 1: \
305 306 __put_user_asm(x, ptr, retval, "b"); \
... ... @@ -328,7 +329,7 @@
328 329 #define __put_user_check(x,ptr,size) \
329 330 ({ \
330 331 long __pu_err = -EFAULT; \
331   - __typeof__(*(ptr)) *__pu_addr = (ptr); \
  332 + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
332 333 \
333 334 if (__access_ok((unsigned long)__pu_addr,size)) \
334 335 __put_user_size((x),__pu_addr,(size),__pu_err); \
335 336  
... ... @@ -406,10 +407,10 @@
406 407 #endif
407 408  
408 409 extern void __put_user_unknown(void);
409   -
  410 +
410 411 /* Generic arbitrary sized copy. */
411 412 /* Return the number of bytes NOT copied */
412   -extern __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
  413 +__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
413 414  
414 415 #define copy_to_user(to,from,n) ({ \
415 416 void *__copy_to = (void *) (to); \
... ... @@ -420,14 +421,6 @@
420 421 } else __copy_res = __copy_size; \
421 422 __copy_res; })
422 423  
423   -#define __copy_to_user(to,from,n) \
424   - __copy_user((void *)(to), \
425   - (void *)(from), n)
426   -
427   -#define __copy_to_user_inatomic __copy_to_user
428   -#define __copy_from_user_inatomic __copy_from_user
429   -
430   -
431 424 #define copy_from_user(to,from,n) ({ \
432 425 void *__copy_to = (void *) (to); \
433 426 void *__copy_from = (void *) (from); \
... ... @@ -438,9 +431,20 @@
438 431 } else __copy_res = __copy_size; \
439 432 __copy_res; })
440 433  
441   -#define __copy_from_user(to,from,n) \
442   - __copy_user((void *)(to), \
443   - (void *)(from), n)
  434 +static __always_inline unsigned long
  435 +__copy_from_user(void *to, const void __user *from, unsigned long n)
  436 +{
  437 + return __copy_user(to, (__force void *)from, n);
  438 +}
  439 +
  440 +static __always_inline unsigned long __must_check
  441 +__copy_to_user(void __user *to, const void *from, unsigned long n)
  442 +{
  443 + return __copy_user((__force void *)to, from, n);
  444 +}
  445 +
  446 +#define __copy_to_user_inatomic __copy_to_user
  447 +#define __copy_from_user_inatomic __copy_from_user
444 448  
445 449 /*
446 450 * Clear the area and return remaining number of bytes