Commit 8d7718aa082aaf30a0b4989e1f04858952f941bc
futex: Sanitize futex ops argument types
Change futex_atomic_op_inuser and futex_atomic_cmpxchg_inatomic prototypes to use u32 types for the futex as this is the data type the futex core code uses all over the place. Signed-off-by: Michel Lespinasse <walken@google.com> Cc: Darren Hart <darren@dvhart.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Russell King <linux@arm.linux.org.uk> Cc: David Howells <dhowells@redhat.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Michal Simek <monstr@monstr.eu> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> LKML-Reference: <20110311025058.GD26122@google.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Showing 20 changed files with 116 additions and 110 deletions Side-by-side Diff
- arch/alpha/include/asm/futex.h
- arch/arm/include/asm/futex.h
- arch/frv/include/asm/futex.h
- arch/frv/kernel/futex.c
- arch/ia64/include/asm/futex.h
- arch/microblaze/include/asm/futex.h
- arch/mips/include/asm/futex.h
- arch/parisc/include/asm/futex.h
- arch/powerpc/include/asm/futex.h
- arch/s390/include/asm/futex.h
- arch/s390/include/asm/uaccess.h
- arch/s390/lib/uaccess.h
- arch/s390/lib/uaccess_pt.c
- arch/s390/lib/uaccess_std.c
- arch/sh/include/asm/futex-irq.h
- arch/sh/include/asm/futex.h
- arch/sparc/include/asm/futex_64.h
- arch/tile/include/asm/futex.h
- arch/x86/include/asm/futex.h
- include/asm-generic/futex.h
... | ... | @@ -29,7 +29,7 @@ |
29 | 29 | : "r" (uaddr), "r"(oparg) \ |
30 | 30 | : "memory") |
31 | 31 | |
32 | -static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |
32 | +static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) | |
33 | 33 | { |
34 | 34 | int op = (encoded_op >> 28) & 7; |
35 | 35 | int cmp = (encoded_op >> 24) & 15; |
... | ... | @@ -39,7 +39,7 @@ |
39 | 39 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
40 | 40 | oparg = 1 << oparg; |
41 | 41 | |
42 | - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | |
42 | + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | |
43 | 43 | return -EFAULT; |
44 | 44 | |
45 | 45 | pagefault_disable(); |
46 | 46 | |
47 | 47 | |
... | ... | @@ -81,12 +81,13 @@ |
81 | 81 | } |
82 | 82 | |
83 | 83 | static inline int |
84 | -futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, | |
85 | - int oldval, int newval) | |
84 | +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |
85 | + u32 oldval, u32 newval) | |
86 | 86 | { |
87 | - int ret = 0, prev, cmp; | |
87 | + int ret = 0, cmp; | |
88 | + u32 prev; | |
88 | 89 | |
89 | - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | |
90 | + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | |
90 | 91 | return -EFAULT; |
91 | 92 | |
92 | 93 | __asm__ __volatile__ ( |
... | ... | @@ -35,7 +35,7 @@ |
35 | 35 | : "cc", "memory") |
36 | 36 | |
37 | 37 | static inline int |
38 | -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |
38 | +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) | |
39 | 39 | { |
40 | 40 | int op = (encoded_op >> 28) & 7; |
41 | 41 | int cmp = (encoded_op >> 24) & 15; |
... | ... | @@ -46,7 +46,7 @@ |
46 | 46 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
47 | 47 | oparg = 1 << oparg; |
48 | 48 | |
49 | - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | |
49 | + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | |
50 | 50 | return -EFAULT; |
51 | 51 | |
52 | 52 | pagefault_disable(); /* implies preempt_disable() */ |
53 | 53 | |
54 | 54 | |
... | ... | @@ -88,12 +88,13 @@ |
88 | 88 | } |
89 | 89 | |
90 | 90 | static inline int |
91 | -futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, | |
92 | - int oldval, int newval) | |
91 | +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |
92 | + u32 oldval, u32 newval) | |
93 | 93 | { |
94 | - int ret = 0, val; | |
94 | + int ret = 0; | |
95 | + u32 val; | |
95 | 96 | |
96 | - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | |
97 | + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | |
97 | 98 | return -EFAULT; |
98 | 99 | |
99 | 100 | /* Note that preemption is disabled by futex_atomic_cmpxchg_inatomic |
... | ... | @@ -7,11 +7,11 @@ |
7 | 7 | #include <asm/errno.h> |
8 | 8 | #include <asm/uaccess.h> |
9 | 9 | |
10 | -extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr); | |
10 | +extern int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr); | |
11 | 11 | |
12 | 12 | static inline int |
13 | -futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, | |
14 | - int oldval, int newval) | |
13 | +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |
14 | + u32 oldval, u32 newval) | |
15 | 15 | { |
16 | 16 | return -ENOSYS; |
17 | 17 | } |
... | ... | @@ -18,7 +18,7 @@ |
18 | 18 | * the various futex operations; MMU fault checking is ignored under no-MMU |
19 | 19 | * conditions |
20 | 20 | */ |
21 | -static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_oldval) | |
21 | +static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, int *_oldval) | |
22 | 22 | { |
23 | 23 | int oldval, ret; |
24 | 24 | |
... | ... | @@ -50,7 +50,7 @@ |
50 | 50 | return ret; |
51 | 51 | } |
52 | 52 | |
53 | -static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_oldval) | |
53 | +static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, int *_oldval) | |
54 | 54 | { |
55 | 55 | int oldval, ret; |
56 | 56 | |
... | ... | @@ -83,7 +83,7 @@ |
83 | 83 | return ret; |
84 | 84 | } |
85 | 85 | |
86 | -static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_oldval) | |
86 | +static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, int *_oldval) | |
87 | 87 | { |
88 | 88 | int oldval, ret; |
89 | 89 | |
... | ... | @@ -116,7 +116,7 @@ |
116 | 116 | return ret; |
117 | 117 | } |
118 | 118 | |
119 | -static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_oldval) | |
119 | +static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, int *_oldval) | |
120 | 120 | { |
121 | 121 | int oldval, ret; |
122 | 122 | |
... | ... | @@ -149,7 +149,7 @@ |
149 | 149 | return ret; |
150 | 150 | } |
151 | 151 | |
152 | -static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_oldval) | |
152 | +static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *_oldval) | |
153 | 153 | { |
154 | 154 | int oldval, ret; |
155 | 155 | |
... | ... | @@ -186,7 +186,7 @@ |
186 | 186 | /* |
187 | 187 | * do the futex operations |
188 | 188 | */ |
189 | -int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |
189 | +int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | |
190 | 190 | { |
191 | 191 | int op = (encoded_op >> 28) & 7; |
192 | 192 | int cmp = (encoded_op >> 24) & 15; |
... | ... | @@ -197,7 +197,7 @@ |
197 | 197 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
198 | 198 | oparg = 1 << oparg; |
199 | 199 | |
200 | - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | |
200 | + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | |
201 | 201 | return -EFAULT; |
202 | 202 | |
203 | 203 | pagefault_disable(); |
... | ... | @@ -46,7 +46,7 @@ |
46 | 46 | } while (0) |
47 | 47 | |
48 | 48 | static inline int |
49 | -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |
49 | +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) | |
50 | 50 | { |
51 | 51 | int op = (encoded_op >> 28) & 7; |
52 | 52 | int cmp = (encoded_op >> 24) & 15; |
... | ... | @@ -56,7 +56,7 @@ |
56 | 56 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
57 | 57 | oparg = 1 << oparg; |
58 | 58 | |
59 | - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | |
59 | + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) | |
60 | 60 | return -EFAULT; |
61 | 61 | |
62 | 62 | pagefault_disable(); |
63 | 63 | |
... | ... | @@ -100,10 +100,10 @@ |
100 | 100 | } |
101 | 101 | |
102 | 102 | static inline int |
103 | -futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, | |
104 | - int oldval, int newval) | |
103 | +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |
104 | + u32 oldval, u32 newval) | |
105 | 105 | { |
106 | - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | |
106 | + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | |
107 | 107 | return -EFAULT; |
108 | 108 | |
109 | 109 | { |
... | ... | @@ -29,7 +29,7 @@ |
29 | 29 | }) |
30 | 30 | |
31 | 31 | static inline int |
32 | -futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |
32 | +futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | |
33 | 33 | { |
34 | 34 | int op = (encoded_op >> 28) & 7; |
35 | 35 | int cmp = (encoded_op >> 24) & 15; |
... | ... | @@ -39,7 +39,7 @@ |
39 | 39 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
40 | 40 | oparg = 1 << oparg; |
41 | 41 | |
42 | - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | |
42 | + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | |
43 | 43 | return -EFAULT; |
44 | 44 | |
45 | 45 | pagefault_disable(); |
46 | 46 | |
47 | 47 | |
... | ... | @@ -94,12 +94,13 @@ |
94 | 94 | } |
95 | 95 | |
96 | 96 | static inline int |
97 | -futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, | |
98 | - int oldval, int newval) | |
97 | +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |
98 | + u32 oldval, u32 newval) | |
99 | 99 | { |
100 | - int ret = 0, prev, cmp; | |
100 | + int ret = 0, cmp; | |
101 | + u32 prev; | |
101 | 102 | |
102 | - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | |
103 | + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | |
103 | 104 | return -EFAULT; |
104 | 105 | |
105 | 106 | __asm__ __volatile__ ("1: lwx %1, %3, r0; \ |
... | ... | @@ -75,7 +75,7 @@ |
75 | 75 | } |
76 | 76 | |
77 | 77 | static inline int |
78 | -futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |
78 | +futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | |
79 | 79 | { |
80 | 80 | int op = (encoded_op >> 28) & 7; |
81 | 81 | int cmp = (encoded_op >> 24) & 15; |
... | ... | @@ -85,7 +85,7 @@ |
85 | 85 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
86 | 86 | oparg = 1 << oparg; |
87 | 87 | |
88 | - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | |
88 | + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) | |
89 | 89 | return -EFAULT; |
90 | 90 | |
91 | 91 | pagefault_disable(); |
92 | 92 | |
93 | 93 | |
... | ... | @@ -132,12 +132,13 @@ |
132 | 132 | } |
133 | 133 | |
134 | 134 | static inline int |
135 | -futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, | |
136 | - int oldval, int newval) | |
135 | +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |
136 | + u32 oldval, u32 newval) | |
137 | 137 | { |
138 | - int ret = 0, val; | |
138 | + int ret = 0; | |
139 | + u32 val; | |
139 | 140 | |
140 | - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | |
141 | + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | |
141 | 142 | return -EFAULT; |
142 | 143 | |
143 | 144 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
... | ... | @@ -8,7 +8,7 @@ |
8 | 8 | #include <asm/errno.h> |
9 | 9 | |
10 | 10 | static inline int |
11 | -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |
11 | +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) | |
12 | 12 | { |
13 | 13 | int op = (encoded_op >> 28) & 7; |
14 | 14 | int cmp = (encoded_op >> 24) & 15; |
... | ... | @@ -18,7 +18,7 @@ |
18 | 18 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
19 | 19 | oparg = 1 << oparg; |
20 | 20 | |
21 | - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | |
21 | + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) | |
22 | 22 | return -EFAULT; |
23 | 23 | |
24 | 24 | pagefault_disable(); |
25 | 25 | |
... | ... | @@ -51,10 +51,10 @@ |
51 | 51 | |
52 | 52 | /* Non-atomic version */ |
53 | 53 | static inline int |
54 | -futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, | |
55 | - int oldval, int newval) | |
54 | +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |
55 | + u32 oldval, u32 newval) | |
56 | 56 | { |
57 | - int val; | |
57 | + u32 val; | |
58 | 58 | |
59 | 59 | /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is |
60 | 60 | * our gateway page, and causes no end of trouble... |
... | ... | @@ -62,7 +62,7 @@ |
62 | 62 | if (segment_eq(KERNEL_DS, get_fs()) && !uaddr) |
63 | 63 | return -EFAULT; |
64 | 64 | |
65 | - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | |
65 | + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | |
66 | 66 | return -EFAULT; |
67 | 67 | |
68 | 68 | if (get_user(val, uaddr)) |
... | ... | @@ -30,7 +30,7 @@ |
30 | 30 | : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \ |
31 | 31 | : "cr0", "memory") |
32 | 32 | |
33 | -static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |
33 | +static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) | |
34 | 34 | { |
35 | 35 | int op = (encoded_op >> 28) & 7; |
36 | 36 | int cmp = (encoded_op >> 24) & 15; |
... | ... | @@ -40,7 +40,7 @@ |
40 | 40 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
41 | 41 | oparg = 1 << oparg; |
42 | 42 | |
43 | - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | |
43 | + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) | |
44 | 44 | return -EFAULT; |
45 | 45 | |
46 | 46 | pagefault_disable(); |
47 | 47 | |
48 | 48 | |
... | ... | @@ -82,12 +82,13 @@ |
82 | 82 | } |
83 | 83 | |
84 | 84 | static inline int |
85 | -futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, | |
86 | - int oldval, int newval) | |
85 | +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |
86 | + u32 oldval, u32 newval) | |
87 | 87 | { |
88 | - int ret = 0, prev; | |
88 | + int ret = 0; | |
89 | + u32 prev; | |
89 | 90 | |
90 | - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | |
91 | + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | |
91 | 92 | return -EFAULT; |
92 | 93 | |
93 | 94 | __asm__ __volatile__ ( |
... | ... | @@ -7,7 +7,7 @@ |
7 | 7 | #include <linux/uaccess.h> |
8 | 8 | #include <asm/errno.h> |
9 | 9 | |
10 | -static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |
10 | +static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) | |
11 | 11 | { |
12 | 12 | int op = (encoded_op >> 28) & 7; |
13 | 13 | int cmp = (encoded_op >> 24) & 15; |
... | ... | @@ -18,7 +18,7 @@ |
18 | 18 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
19 | 19 | oparg = 1 << oparg; |
20 | 20 | |
21 | - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | |
21 | + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) | |
22 | 22 | return -EFAULT; |
23 | 23 | |
24 | 24 | pagefault_disable(); |
25 | 25 | |
... | ... | @@ -39,10 +39,10 @@ |
39 | 39 | return ret; |
40 | 40 | } |
41 | 41 | |
42 | -static inline int futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, | |
43 | - int oldval, int newval) | |
42 | +static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |
43 | + u32 oldval, u32 newval) | |
44 | 44 | { |
45 | - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | |
45 | + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) | |
46 | 46 | return -EFAULT; |
47 | 47 | |
48 | 48 | return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval); |
... | ... | @@ -83,8 +83,8 @@ |
83 | 83 | size_t (*clear_user)(size_t, void __user *); |
84 | 84 | size_t (*strnlen_user)(size_t, const char __user *); |
85 | 85 | size_t (*strncpy_from_user)(size_t, const char __user *, char *); |
86 | - int (*futex_atomic_op)(int op, int __user *, int oparg, int *old); | |
87 | - int (*futex_atomic_cmpxchg)(int *, int __user *, int old, int new); | |
86 | + int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old); | |
87 | + int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new); | |
88 | 88 | }; |
89 | 89 | |
90 | 90 | extern struct uaccess_ops uaccess; |
... | ... | @@ -12,13 +12,13 @@ |
12 | 12 | extern size_t copy_to_user_std(size_t, void __user *, const void *); |
13 | 13 | extern size_t strnlen_user_std(size_t, const char __user *); |
14 | 14 | extern size_t strncpy_from_user_std(size_t, const char __user *, char *); |
15 | -extern int futex_atomic_cmpxchg_std(int *, int __user *, int, int); | |
16 | -extern int futex_atomic_op_std(int, int __user *, int, int *); | |
15 | +extern int futex_atomic_cmpxchg_std(u32 *, u32 __user *, u32, u32); | |
16 | +extern int futex_atomic_op_std(int, u32 __user *, int, int *); | |
17 | 17 | |
18 | 18 | extern size_t copy_from_user_pt(size_t, const void __user *, void *); |
19 | 19 | extern size_t copy_to_user_pt(size_t, void __user *, const void *); |
20 | -extern int futex_atomic_op_pt(int, int __user *, int, int *); | |
21 | -extern int futex_atomic_cmpxchg_pt(int *, int __user *, int, int); | |
20 | +extern int futex_atomic_op_pt(int, u32 __user *, int, int *); | |
21 | +extern int futex_atomic_cmpxchg_pt(u32 *, u32 __user *, u32, u32); | |
22 | 22 | |
23 | 23 | #endif /* __ARCH_S390_LIB_UACCESS_H */ |
... | ... | @@ -302,7 +302,7 @@ |
302 | 302 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ |
303 | 303 | "m" (*uaddr) : "cc" ); |
304 | 304 | |
305 | -static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) | |
305 | +static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) | |
306 | 306 | { |
307 | 307 | int oldval = 0, newval, ret; |
308 | 308 | |
... | ... | @@ -335,7 +335,7 @@ |
335 | 335 | return ret; |
336 | 336 | } |
337 | 337 | |
338 | -int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) | |
338 | +int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) | |
339 | 339 | { |
340 | 340 | int ret; |
341 | 341 | |
... | ... | @@ -354,8 +354,8 @@ |
354 | 354 | return ret; |
355 | 355 | } |
356 | 356 | |
357 | -static int __futex_atomic_cmpxchg_pt(int *uval, int __user *uaddr, | |
358 | - int oldval, int newval) | |
357 | +static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, | |
358 | + u32 oldval, u32 newval) | |
359 | 359 | { |
360 | 360 | int ret; |
361 | 361 | |
... | ... | @@ -370,8 +370,8 @@ |
370 | 370 | return ret; |
371 | 371 | } |
372 | 372 | |
373 | -int futex_atomic_cmpxchg_pt(int *uval, int __user *uaddr, | |
374 | - int oldval, int newval) | |
373 | +int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, | |
374 | + u32 oldval, u32 newval) | |
375 | 375 | { |
376 | 376 | int ret; |
377 | 377 |
... | ... | @@ -255,7 +255,7 @@ |
255 | 255 | : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ |
256 | 256 | "m" (*uaddr) : "cc"); |
257 | 257 | |
258 | -int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old) | |
258 | +int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old) | |
259 | 259 | { |
260 | 260 | int oldval = 0, newval, ret; |
261 | 261 | |
... | ... | @@ -287,8 +287,8 @@ |
287 | 287 | return ret; |
288 | 288 | } |
289 | 289 | |
290 | -int futex_atomic_cmpxchg_std(int *uval, int __user *uaddr, | |
291 | - int oldval, int newval) | |
290 | +int futex_atomic_cmpxchg_std(u32 *uval, u32 __user *uaddr, | |
291 | + u32 oldval, u32 newval) | |
292 | 292 | { |
293 | 293 | int ret; |
294 | 294 |
... | ... | @@ -3,7 +3,7 @@ |
3 | 3 | |
4 | 4 | #include <asm/system.h> |
5 | 5 | |
6 | -static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, | |
6 | +static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, | |
7 | 7 | int *oldval) |
8 | 8 | { |
9 | 9 | unsigned long flags; |
... | ... | @@ -20,7 +20,7 @@ |
20 | 20 | return ret; |
21 | 21 | } |
22 | 22 | |
23 | -static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, | |
23 | +static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, | |
24 | 24 | int *oldval) |
25 | 25 | { |
26 | 26 | unsigned long flags; |
... | ... | @@ -37,7 +37,7 @@ |
37 | 37 | return ret; |
38 | 38 | } |
39 | 39 | |
40 | -static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, | |
40 | +static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, | |
41 | 41 | int *oldval) |
42 | 42 | { |
43 | 43 | unsigned long flags; |
... | ... | @@ -54,7 +54,7 @@ |
54 | 54 | return ret; |
55 | 55 | } |
56 | 56 | |
57 | -static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, | |
57 | +static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, | |
58 | 58 | int *oldval) |
59 | 59 | { |
60 | 60 | unsigned long flags; |
... | ... | @@ -71,7 +71,7 @@ |
71 | 71 | return ret; |
72 | 72 | } |
73 | 73 | |
74 | -static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, | |
74 | +static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, | |
75 | 75 | int *oldval) |
76 | 76 | { |
77 | 77 | unsigned long flags; |
78 | 78 | |
... | ... | @@ -88,12 +88,13 @@ |
88 | 88 | return ret; |
89 | 89 | } |
90 | 90 | |
91 | -static inline int atomic_futex_op_cmpxchg_inatomic(int *uval, | |
92 | - int __user *uaddr, | |
93 | - int oldval, int newval) | |
91 | +static inline int atomic_futex_op_cmpxchg_inatomic(u32 *uval, | |
92 | + u32 __user *uaddr, | |
93 | + u32 oldval, u32 newval) | |
94 | 94 | { |
95 | 95 | unsigned long flags; |
96 | - int ret, prev = 0; | |
96 | + int ret; | |
97 | + u32 prev = 0; | |
97 | 98 | |
98 | 99 | local_irq_save(flags); |
99 | 100 |
... | ... | @@ -10,7 +10,7 @@ |
10 | 10 | /* XXX: UP variants, fix for SH-4A and SMP.. */ |
11 | 11 | #include <asm/futex-irq.h> |
12 | 12 | |
13 | -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |
13 | +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | |
14 | 14 | { |
15 | 15 | int op = (encoded_op >> 28) & 7; |
16 | 16 | int cmp = (encoded_op >> 24) & 15; |
... | ... | @@ -21,7 +21,7 @@ |
21 | 21 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
22 | 22 | oparg = 1 << oparg; |
23 | 23 | |
24 | - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | |
24 | + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | |
25 | 25 | return -EFAULT; |
26 | 26 | |
27 | 27 | pagefault_disable(); |
28 | 28 | |
... | ... | @@ -65,10 +65,10 @@ |
65 | 65 | } |
66 | 66 | |
67 | 67 | static inline int |
68 | -futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, | |
69 | - int oldval, int newval) | |
68 | +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |
69 | + u32 oldval, u32 newval) | |
70 | 70 | { |
71 | - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | |
71 | + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | |
72 | 72 | return -EFAULT; |
73 | 73 | |
74 | 74 | return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval); |
... | ... | @@ -30,7 +30,7 @@ |
30 | 30 | : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \ |
31 | 31 | : "memory") |
32 | 32 | |
33 | -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |
33 | +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | |
34 | 34 | { |
35 | 35 | int op = (encoded_op >> 28) & 7; |
36 | 36 | int cmp = (encoded_op >> 24) & 15; |
... | ... | @@ -38,7 +38,7 @@ |
38 | 38 | int cmparg = (encoded_op << 20) >> 20; |
39 | 39 | int oldval = 0, ret, tem; |
40 | 40 | |
41 | - if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))) | |
41 | + if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) | |
42 | 42 | return -EFAULT; |
43 | 43 | if (unlikely((((unsigned long) uaddr) & 0x3UL))) |
44 | 44 | return -EINVAL; |
... | ... | @@ -85,8 +85,8 @@ |
85 | 85 | } |
86 | 86 | |
87 | 87 | static inline int |
88 | -futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, | |
89 | - int oldval, int newval) | |
88 | +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |
89 | + u32 oldval, u32 newval) | |
90 | 90 | { |
91 | 91 | int ret = 0; |
92 | 92 |
... | ... | @@ -29,16 +29,16 @@ |
29 | 29 | #include <linux/uaccess.h> |
30 | 30 | #include <linux/errno.h> |
31 | 31 | |
32 | -extern struct __get_user futex_set(int __user *v, int i); | |
33 | -extern struct __get_user futex_add(int __user *v, int n); | |
34 | -extern struct __get_user futex_or(int __user *v, int n); | |
35 | -extern struct __get_user futex_andn(int __user *v, int n); | |
36 | -extern struct __get_user futex_cmpxchg(int __user *v, int o, int n); | |
32 | +extern struct __get_user futex_set(u32 __user *v, int i); | |
33 | +extern struct __get_user futex_add(u32 __user *v, int n); | |
34 | +extern struct __get_user futex_or(u32 __user *v, int n); | |
35 | +extern struct __get_user futex_andn(u32 __user *v, int n); | |
36 | +extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n); | |
37 | 37 | |
38 | 38 | #ifndef __tilegx__ |
39 | -extern struct __get_user futex_xor(int __user *v, int n); | |
39 | +extern struct __get_user futex_xor(u32 __user *v, int n); | |
40 | 40 | #else |
41 | -static inline struct __get_user futex_xor(int __user *uaddr, int n) | |
41 | +static inline struct __get_user futex_xor(u32 __user *uaddr, int n) | |
42 | 42 | { |
43 | 43 | struct __get_user asm_ret = __get_user_4(uaddr); |
44 | 44 | if (!asm_ret.err) { |
... | ... | @@ -53,7 +53,7 @@ |
53 | 53 | } |
54 | 54 | #endif |
55 | 55 | |
56 | -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |
56 | +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | |
57 | 57 | { |
58 | 58 | int op = (encoded_op >> 28) & 7; |
59 | 59 | int cmp = (encoded_op >> 24) & 15; |
... | ... | @@ -65,7 +65,7 @@ |
65 | 65 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
66 | 66 | oparg = 1 << oparg; |
67 | 67 | |
68 | - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | |
68 | + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | |
69 | 69 | return -EFAULT; |
70 | 70 | |
71 | 71 | pagefault_disable(); |
72 | 72 | |
... | ... | @@ -119,12 +119,12 @@ |
119 | 119 | return ret; |
120 | 120 | } |
121 | 121 | |
122 | -static inline int futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, | |
123 | - int oldval, int newval) | |
122 | +static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |
123 | + u32 oldval, u32 newval) | |
124 | 124 | { |
125 | 125 | struct __get_user asm_ret; |
126 | 126 | |
127 | - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | |
127 | + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | |
128 | 128 | return -EFAULT; |
129 | 129 | |
130 | 130 | asm_ret = futex_cmpxchg(uaddr, oldval, newval); |
... | ... | @@ -37,7 +37,7 @@ |
37 | 37 | "+m" (*uaddr), "=&r" (tem) \ |
38 | 38 | : "r" (oparg), "i" (-EFAULT), "1" (0)) |
39 | 39 | |
40 | -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |
40 | +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) | |
41 | 41 | { |
42 | 42 | int op = (encoded_op >> 28) & 7; |
43 | 43 | int cmp = (encoded_op >> 24) & 15; |
... | ... | @@ -48,7 +48,7 @@ |
48 | 48 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
49 | 49 | oparg = 1 << oparg; |
50 | 50 | |
51 | - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | |
51 | + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | |
52 | 52 | return -EFAULT; |
53 | 53 | |
54 | 54 | #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) |
... | ... | @@ -109,8 +109,8 @@ |
109 | 109 | return ret; |
110 | 110 | } |
111 | 111 | |
112 | -static inline int futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, | |
113 | - int oldval, int newval) | |
112 | +static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |
113 | + u32 oldval, u32 newval) | |
114 | 114 | { |
115 | 115 | int ret = 0; |
116 | 116 | |
... | ... | @@ -120,7 +120,7 @@ |
120 | 120 | return -ENOSYS; |
121 | 121 | #endif |
122 | 122 | |
123 | - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | |
123 | + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | |
124 | 124 | return -EFAULT; |
125 | 125 | |
126 | 126 | asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" |
... | ... | @@ -6,7 +6,7 @@ |
6 | 6 | #include <asm/errno.h> |
7 | 7 | |
8 | 8 | static inline int |
9 | -futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |
9 | +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) | |
10 | 10 | { |
11 | 11 | int op = (encoded_op >> 28) & 7; |
12 | 12 | int cmp = (encoded_op >> 24) & 15; |
... | ... | @@ -16,7 +16,7 @@ |
16 | 16 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
17 | 17 | oparg = 1 << oparg; |
18 | 18 | |
19 | - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | |
19 | + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) | |
20 | 20 | return -EFAULT; |
21 | 21 | |
22 | 22 | pagefault_disable(); |
... | ... | @@ -48,8 +48,8 @@ |
48 | 48 | } |
49 | 49 | |
50 | 50 | static inline int |
51 | -futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr, | |
52 | - int oldval, int newval) | |
51 | +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |
52 | + u32 oldval, u32 newval) | |
53 | 53 | { |
54 | 54 | return -ENOSYS; |
55 | 55 | } |
-
mentioned in commit f148e8
-
mentioned in commit f148e8
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4
-
mentioned in commit 62aca4