Commit 77c728c2240a1eb45f7d355f5d87ecc319cd55ce

Authored by Ralf Baechle
1 parent 5eaf7a21be

Gcc 4.0 fixes.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

Showing 3 changed files with 11 additions and 9 deletions Inline Diff

arch/mips/kernel/signal32.c
1 /* 1 /*
2 * This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds 6 * Copyright (C) 1991, 1992 Linus Torvalds
7 * Copyright (C) 1994 - 2000 Ralf Baechle 7 * Copyright (C) 1994 - 2000 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 */ 9 */
10 #include <linux/sched.h> 10 #include <linux/sched.h>
11 #include <linux/mm.h> 11 #include <linux/mm.h>
12 #include <linux/smp.h> 12 #include <linux/smp.h>
13 #include <linux/smp_lock.h> 13 #include <linux/smp_lock.h>
14 #include <linux/kernel.h> 14 #include <linux/kernel.h>
15 #include <linux/signal.h> 15 #include <linux/signal.h>
16 #include <linux/syscalls.h> 16 #include <linux/syscalls.h>
17 #include <linux/errno.h> 17 #include <linux/errno.h>
18 #include <linux/wait.h> 18 #include <linux/wait.h>
19 #include <linux/ptrace.h> 19 #include <linux/ptrace.h>
20 #include <linux/compat.h> 20 #include <linux/compat.h>
21 #include <linux/suspend.h> 21 #include <linux/suspend.h>
22 #include <linux/compiler.h> 22 #include <linux/compiler.h>
23 23
24 #include <asm/asm.h> 24 #include <asm/asm.h>
25 #include <linux/bitops.h> 25 #include <linux/bitops.h>
26 #include <asm/cacheflush.h> 26 #include <asm/cacheflush.h>
27 #include <asm/sim.h> 27 #include <asm/sim.h>
28 #include <asm/uaccess.h> 28 #include <asm/uaccess.h>
29 #include <asm/ucontext.h> 29 #include <asm/ucontext.h>
30 #include <asm/system.h> 30 #include <asm/system.h>
31 #include <asm/fpu.h> 31 #include <asm/fpu.h>
32 32
33 #define SI_PAD_SIZE32 ((SI_MAX_SIZE/sizeof(int)) - 3) 33 #define SI_PAD_SIZE32 ((SI_MAX_SIZE/sizeof(int)) - 3)
34 34
35 typedef struct compat_siginfo { 35 typedef struct compat_siginfo {
36 int si_signo; 36 int si_signo;
37 int si_code; 37 int si_code;
38 int si_errno; 38 int si_errno;
39 39
40 union { 40 union {
41 int _pad[SI_PAD_SIZE32]; 41 int _pad[SI_PAD_SIZE32];
42 42
43 /* kill() */ 43 /* kill() */
44 struct { 44 struct {
45 compat_pid_t _pid; /* sender's pid */ 45 compat_pid_t _pid; /* sender's pid */
46 compat_uid_t _uid; /* sender's uid */ 46 compat_uid_t _uid; /* sender's uid */
47 } _kill; 47 } _kill;
48 48
49 /* SIGCHLD */ 49 /* SIGCHLD */
50 struct { 50 struct {
51 compat_pid_t _pid; /* which child */ 51 compat_pid_t _pid; /* which child */
52 compat_uid_t _uid; /* sender's uid */ 52 compat_uid_t _uid; /* sender's uid */
53 int _status; /* exit code */ 53 int _status; /* exit code */
54 compat_clock_t _utime; 54 compat_clock_t _utime;
55 compat_clock_t _stime; 55 compat_clock_t _stime;
56 } _sigchld; 56 } _sigchld;
57 57
58 /* IRIX SIGCHLD */ 58 /* IRIX SIGCHLD */
59 struct { 59 struct {
60 compat_pid_t _pid; /* which child */ 60 compat_pid_t _pid; /* which child */
61 compat_clock_t _utime; 61 compat_clock_t _utime;
62 int _status; /* exit code */ 62 int _status; /* exit code */
63 compat_clock_t _stime; 63 compat_clock_t _stime;
64 } _irix_sigchld; 64 } _irix_sigchld;
65 65
66 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ 66 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
67 struct { 67 struct {
68 s32 _addr; /* faulting insn/memory ref. */ 68 s32 _addr; /* faulting insn/memory ref. */
69 } _sigfault; 69 } _sigfault;
70 70
71 /* SIGPOLL, SIGXFSZ (To do ...) */ 71 /* SIGPOLL, SIGXFSZ (To do ...) */
72 struct { 72 struct {
73 int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ 73 int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
74 int _fd; 74 int _fd;
75 } _sigpoll; 75 } _sigpoll;
76 76
77 /* POSIX.1b timers */ 77 /* POSIX.1b timers */
78 struct { 78 struct {
79 timer_t _tid; /* timer id */ 79 timer_t _tid; /* timer id */
80 int _overrun; /* overrun count */ 80 int _overrun; /* overrun count */
81 sigval_t32 _sigval; /* same as below */ 81 sigval_t32 _sigval; /* same as below */
82 int _sys_private; /* not to be passed to user */ 82 int _sys_private; /* not to be passed to user */
83 } _timer; 83 } _timer;
84 84
85 /* POSIX.1b signals */ 85 /* POSIX.1b signals */
86 struct { 86 struct {
87 compat_pid_t _pid; /* sender's pid */ 87 compat_pid_t _pid; /* sender's pid */
88 compat_uid_t _uid; /* sender's uid */ 88 compat_uid_t _uid; /* sender's uid */
89 compat_sigval_t _sigval; 89 compat_sigval_t _sigval;
90 } _rt; 90 } _rt;
91 91
92 } _sifields; 92 } _sifields;
93 } compat_siginfo_t; 93 } compat_siginfo_t;
94 94
95 /* 95 /*
96 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... 96 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
97 */ 97 */
98 #define __NR_O32_sigreturn 4119 98 #define __NR_O32_sigreturn 4119
99 #define __NR_O32_rt_sigreturn 4193 99 #define __NR_O32_rt_sigreturn 4193
100 #define __NR_O32_restart_syscall 4253 100 #define __NR_O32_restart_syscall 4253
101 101
102 #define DEBUG_SIG 0 102 #define DEBUG_SIG 0
103 103
104 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 104 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
105 105
106 extern int do_signal32(sigset_t *oldset, struct pt_regs *regs); 106 extern int do_signal32(sigset_t *oldset, struct pt_regs *regs);
107 107
108 /* 32-bit compatibility types */ 108 /* 32-bit compatibility types */
109 109
110 #define _NSIG_BPW32 32 110 #define _NSIG_BPW32 32
111 #define _NSIG_WORDS32 (_NSIG / _NSIG_BPW32) 111 #define _NSIG_WORDS32 (_NSIG / _NSIG_BPW32)
112 112
113 typedef struct { 113 typedef struct {
114 unsigned int sig[_NSIG_WORDS32]; 114 unsigned int sig[_NSIG_WORDS32];
115 } sigset_t32; 115 } sigset_t32;
116 116
117 typedef unsigned int __sighandler32_t; 117 typedef unsigned int __sighandler32_t;
118 typedef void (*vfptr_t)(void); 118 typedef void (*vfptr_t)(void);
119 119
120 struct sigaction32 { 120 struct sigaction32 {
121 unsigned int sa_flags; 121 unsigned int sa_flags;
122 __sighandler32_t sa_handler; 122 __sighandler32_t sa_handler;
123 compat_sigset_t sa_mask; 123 compat_sigset_t sa_mask;
124 }; 124 };
125 125
126 /* IRIX compatible stack_t */ 126 /* IRIX compatible stack_t */
127 typedef struct sigaltstack32 { 127 typedef struct sigaltstack32 {
128 s32 ss_sp; 128 s32 ss_sp;
129 compat_size_t ss_size; 129 compat_size_t ss_size;
130 int ss_flags; 130 int ss_flags;
131 } stack32_t; 131 } stack32_t;
132 132
133 struct ucontext32 { 133 struct ucontext32 {
134 u32 uc_flags; 134 u32 uc_flags;
135 s32 uc_link; 135 s32 uc_link;
136 stack32_t uc_stack; 136 stack32_t uc_stack;
137 struct sigcontext32 uc_mcontext; 137 struct sigcontext32 uc_mcontext;
138 sigset_t32 uc_sigmask; /* mask last for extensibility */ 138 sigset_t32 uc_sigmask; /* mask last for extensibility */
139 }; 139 };
140 140
141 extern void __put_sigset_unknown_nsig(void); 141 extern void __put_sigset_unknown_nsig(void);
142 extern void __get_sigset_unknown_nsig(void); 142 extern void __get_sigset_unknown_nsig(void);
143 143
144 static inline int put_sigset(const sigset_t *kbuf, compat_sigset_t *ubuf) 144 static inline int put_sigset(const sigset_t *kbuf, compat_sigset_t *ubuf)
145 { 145 {
146 int err = 0; 146 int err = 0;
147 147
148 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(*ubuf))) 148 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(*ubuf)))
149 return -EFAULT; 149 return -EFAULT;
150 150
151 switch (_NSIG_WORDS) { 151 switch (_NSIG_WORDS) {
152 default: 152 default:
153 __put_sigset_unknown_nsig(); 153 __put_sigset_unknown_nsig();
154 case 2: 154 case 2:
155 err |= __put_user (kbuf->sig[1] >> 32, &ubuf->sig[3]); 155 err |= __put_user (kbuf->sig[1] >> 32, &ubuf->sig[3]);
156 err |= __put_user (kbuf->sig[1] & 0xffffffff, &ubuf->sig[2]); 156 err |= __put_user (kbuf->sig[1] & 0xffffffff, &ubuf->sig[2]);
157 case 1: 157 case 1:
158 err |= __put_user (kbuf->sig[0] >> 32, &ubuf->sig[1]); 158 err |= __put_user (kbuf->sig[0] >> 32, &ubuf->sig[1]);
159 err |= __put_user (kbuf->sig[0] & 0xffffffff, &ubuf->sig[0]); 159 err |= __put_user (kbuf->sig[0] & 0xffffffff, &ubuf->sig[0]);
160 } 160 }
161 161
162 return err; 162 return err;
163 } 163 }
164 164
165 static inline int get_sigset(sigset_t *kbuf, const compat_sigset_t *ubuf) 165 static inline int get_sigset(sigset_t *kbuf, const compat_sigset_t *ubuf)
166 { 166 {
167 int err = 0; 167 int err = 0;
168 unsigned long sig[4]; 168 unsigned long sig[4];
169 169
170 if (!access_ok(VERIFY_READ, ubuf, sizeof(*ubuf))) 170 if (!access_ok(VERIFY_READ, ubuf, sizeof(*ubuf)))
171 return -EFAULT; 171 return -EFAULT;
172 172
173 switch (_NSIG_WORDS) { 173 switch (_NSIG_WORDS) {
174 default: 174 default:
175 __get_sigset_unknown_nsig(); 175 __get_sigset_unknown_nsig();
176 case 2: 176 case 2:
177 err |= __get_user (sig[3], &ubuf->sig[3]); 177 err |= __get_user (sig[3], &ubuf->sig[3]);
178 err |= __get_user (sig[2], &ubuf->sig[2]); 178 err |= __get_user (sig[2], &ubuf->sig[2]);
179 kbuf->sig[1] = sig[2] | (sig[3] << 32); 179 kbuf->sig[1] = sig[2] | (sig[3] << 32);
180 case 1: 180 case 1:
181 err |= __get_user (sig[1], &ubuf->sig[1]); 181 err |= __get_user (sig[1], &ubuf->sig[1]);
182 err |= __get_user (sig[0], &ubuf->sig[0]); 182 err |= __get_user (sig[0], &ubuf->sig[0]);
183 kbuf->sig[0] = sig[0] | (sig[1] << 32); 183 kbuf->sig[0] = sig[0] | (sig[1] << 32);
184 } 184 }
185 185
186 return err; 186 return err;
187 } 187 }
188 188
189 /* 189 /*
190 * Atomically swap in the new signal mask, and wait for a signal. 190 * Atomically swap in the new signal mask, and wait for a signal.
191 */ 191 */
192 192
193 save_static_function(sys32_sigsuspend); 193 save_static_function(sys32_sigsuspend);
194 __attribute_used__ noinline static int 194 __attribute_used__ noinline static int
195 _sys32_sigsuspend(nabi_no_regargs struct pt_regs regs) 195 _sys32_sigsuspend(nabi_no_regargs struct pt_regs regs)
196 { 196 {
197 compat_sigset_t *uset; 197 compat_sigset_t *uset;
198 sigset_t newset, saveset; 198 sigset_t newset, saveset;
199 199
200 uset = (compat_sigset_t *) regs.regs[4]; 200 uset = (compat_sigset_t *) regs.regs[4];
201 if (get_sigset(&newset, uset)) 201 if (get_sigset(&newset, uset))
202 return -EFAULT; 202 return -EFAULT;
203 sigdelsetmask(&newset, ~_BLOCKABLE); 203 sigdelsetmask(&newset, ~_BLOCKABLE);
204 204
205 spin_lock_irq(&current->sighand->siglock); 205 spin_lock_irq(&current->sighand->siglock);
206 saveset = current->blocked; 206 saveset = current->blocked;
207 current->blocked = newset; 207 current->blocked = newset;
208 recalc_sigpending(); 208 recalc_sigpending();
209 spin_unlock_irq(&current->sighand->siglock); 209 spin_unlock_irq(&current->sighand->siglock);
210 210
211 regs.regs[2] = EINTR; 211 regs.regs[2] = EINTR;
212 regs.regs[7] = 1; 212 regs.regs[7] = 1;
213 while (1) { 213 while (1) {
214 current->state = TASK_INTERRUPTIBLE; 214 current->state = TASK_INTERRUPTIBLE;
215 schedule(); 215 schedule();
216 if (do_signal32(&saveset, &regs)) 216 if (do_signal32(&saveset, &regs))
217 return -EINTR; 217 return -EINTR;
218 } 218 }
219 } 219 }
220 220
221 save_static_function(sys32_rt_sigsuspend); 221 save_static_function(sys32_rt_sigsuspend);
222 __attribute_used__ noinline static int 222 __attribute_used__ noinline static int
223 _sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) 223 _sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
224 { 224 {
225 compat_sigset_t *uset; 225 compat_sigset_t *uset;
226 sigset_t newset, saveset; 226 sigset_t newset, saveset;
227 size_t sigsetsize; 227 size_t sigsetsize;
228 228
229 /* XXX Don't preclude handling different sized sigset_t's. */ 229 /* XXX Don't preclude handling different sized sigset_t's. */
230 sigsetsize = regs.regs[5]; 230 sigsetsize = regs.regs[5];
231 if (sigsetsize != sizeof(compat_sigset_t)) 231 if (sigsetsize != sizeof(compat_sigset_t))
232 return -EINVAL; 232 return -EINVAL;
233 233
234 uset = (compat_sigset_t *) regs.regs[4]; 234 uset = (compat_sigset_t *) regs.regs[4];
235 if (get_sigset(&newset, uset)) 235 if (get_sigset(&newset, uset))
236 return -EFAULT; 236 return -EFAULT;
237 sigdelsetmask(&newset, ~_BLOCKABLE); 237 sigdelsetmask(&newset, ~_BLOCKABLE);
238 238
239 spin_lock_irq(&current->sighand->siglock); 239 spin_lock_irq(&current->sighand->siglock);
240 saveset = current->blocked; 240 saveset = current->blocked;
241 current->blocked = newset; 241 current->blocked = newset;
242 recalc_sigpending(); 242 recalc_sigpending();
243 spin_unlock_irq(&current->sighand->siglock); 243 spin_unlock_irq(&current->sighand->siglock);
244 244
245 regs.regs[2] = EINTR; 245 regs.regs[2] = EINTR;
246 regs.regs[7] = 1; 246 regs.regs[7] = 1;
247 while (1) { 247 while (1) {
248 current->state = TASK_INTERRUPTIBLE; 248 current->state = TASK_INTERRUPTIBLE;
249 schedule(); 249 schedule();
250 if (do_signal32(&saveset, &regs)) 250 if (do_signal32(&saveset, &regs))
251 return -EINTR; 251 return -EINTR;
252 } 252 }
253 } 253 }
254 254
255 asmlinkage int sys32_sigaction(int sig, const struct sigaction32 *act, 255 asmlinkage int sys32_sigaction(int sig, const struct sigaction32 *act,
256 struct sigaction32 *oact) 256 struct sigaction32 *oact)
257 { 257 {
258 struct k_sigaction new_ka, old_ka; 258 struct k_sigaction new_ka, old_ka;
259 int ret; 259 int ret;
260 int err = 0; 260 int err = 0;
261 261
262 if (act) { 262 if (act) {
263 old_sigset_t mask; 263 old_sigset_t mask;
264 s32 handler;
264 265
265 if (!access_ok(VERIFY_READ, act, sizeof(*act))) 266 if (!access_ok(VERIFY_READ, act, sizeof(*act)))
266 return -EFAULT; 267 return -EFAULT;
267 err |= __get_user((u32)(u64)new_ka.sa.sa_handler, 268 err |= __get_user(handler, &act->sa_handler);
268 &act->sa_handler); 269 new_ka.sa.sa_handler = (void*)(s64)handler;
269 err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); 270 err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
270 err |= __get_user(mask, &act->sa_mask.sig[0]); 271 err |= __get_user(mask, &act->sa_mask.sig[0]);
271 if (err) 272 if (err)
272 return -EFAULT; 273 return -EFAULT;
273 274
274 siginitset(&new_ka.sa.sa_mask, mask); 275 siginitset(&new_ka.sa.sa_mask, mask);
275 } 276 }
276 277
277 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); 278 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
278 279
279 if (!ret && oact) { 280 if (!ret && oact) {
280 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) 281 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
281 return -EFAULT; 282 return -EFAULT;
282 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); 283 err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
283 err |= __put_user((u32)(u64)old_ka.sa.sa_handler, 284 err |= __put_user((u32)(u64)old_ka.sa.sa_handler,
284 &oact->sa_handler); 285 &oact->sa_handler);
285 err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig); 286 err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
286 err |= __put_user(0, &oact->sa_mask.sig[1]); 287 err |= __put_user(0, &oact->sa_mask.sig[1]);
287 err |= __put_user(0, &oact->sa_mask.sig[2]); 288 err |= __put_user(0, &oact->sa_mask.sig[2]);
288 err |= __put_user(0, &oact->sa_mask.sig[3]); 289 err |= __put_user(0, &oact->sa_mask.sig[3]);
289 if (err) 290 if (err)
290 return -EFAULT; 291 return -EFAULT;
291 } 292 }
292 293
293 return ret; 294 return ret;
294 } 295 }
295 296
296 asmlinkage int sys32_sigaltstack(nabi_no_regargs struct pt_regs regs) 297 asmlinkage int sys32_sigaltstack(nabi_no_regargs struct pt_regs regs)
297 { 298 {
298 const stack32_t *uss = (const stack32_t *) regs.regs[4]; 299 const stack32_t *uss = (const stack32_t *) regs.regs[4];
299 stack32_t *uoss = (stack32_t *) regs.regs[5]; 300 stack32_t *uoss = (stack32_t *) regs.regs[5];
300 unsigned long usp = regs.regs[29]; 301 unsigned long usp = regs.regs[29];
301 stack_t kss, koss; 302 stack_t kss, koss;
302 int ret, err = 0; 303 int ret, err = 0;
303 mm_segment_t old_fs = get_fs(); 304 mm_segment_t old_fs = get_fs();
304 s32 sp; 305 s32 sp;
305 306
306 if (uss) { 307 if (uss) {
307 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) 308 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
308 return -EFAULT; 309 return -EFAULT;
309 err |= __get_user(sp, &uss->ss_sp); 310 err |= __get_user(sp, &uss->ss_sp);
310 kss.ss_sp = (void *) (long) sp; 311 kss.ss_sp = (void *) (long) sp;
311 err |= __get_user(kss.ss_size, &uss->ss_size); 312 err |= __get_user(kss.ss_size, &uss->ss_size);
312 err |= __get_user(kss.ss_flags, &uss->ss_flags); 313 err |= __get_user(kss.ss_flags, &uss->ss_flags);
313 if (err) 314 if (err)
314 return -EFAULT; 315 return -EFAULT;
315 } 316 }
316 317
317 set_fs (KERNEL_DS); 318 set_fs (KERNEL_DS);
318 ret = do_sigaltstack(uss ? &kss : NULL , uoss ? &koss : NULL, usp); 319 ret = do_sigaltstack(uss ? &kss : NULL , uoss ? &koss : NULL, usp);
319 set_fs (old_fs); 320 set_fs (old_fs);
320 321
321 if (!ret && uoss) { 322 if (!ret && uoss) {
322 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) 323 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
323 return -EFAULT; 324 return -EFAULT;
324 sp = (int) (long) koss.ss_sp; 325 sp = (int) (long) koss.ss_sp;
325 err |= __put_user(sp, &uoss->ss_sp); 326 err |= __put_user(sp, &uoss->ss_sp);
326 err |= __put_user(koss.ss_size, &uoss->ss_size); 327 err |= __put_user(koss.ss_size, &uoss->ss_size);
327 err |= __put_user(koss.ss_flags, &uoss->ss_flags); 328 err |= __put_user(koss.ss_flags, &uoss->ss_flags);
328 if (err) 329 if (err)
329 return -EFAULT; 330 return -EFAULT;
330 } 331 }
331 return ret; 332 return ret;
332 } 333 }
333 334
334 static int restore_sigcontext32(struct pt_regs *regs, struct sigcontext32 *sc) 335 static int restore_sigcontext32(struct pt_regs *regs, struct sigcontext32 *sc)
335 { 336 {
336 int err = 0; 337 int err = 0;
337 __u32 used_math; 338 __u32 used_math;
338 339
339 /* Always make any pending restarted system calls return -EINTR */ 340 /* Always make any pending restarted system calls return -EINTR */
340 current_thread_info()->restart_block.fn = do_no_restart_syscall; 341 current_thread_info()->restart_block.fn = do_no_restart_syscall;
341 342
342 err |= __get_user(regs->cp0_epc, &sc->sc_pc); 343 err |= __get_user(regs->cp0_epc, &sc->sc_pc);
343 err |= __get_user(regs->hi, &sc->sc_mdhi); 344 err |= __get_user(regs->hi, &sc->sc_mdhi);
344 err |= __get_user(regs->lo, &sc->sc_mdlo); 345 err |= __get_user(regs->lo, &sc->sc_mdlo);
345 346
346 #define restore_gp_reg(i) do { \ 347 #define restore_gp_reg(i) do { \
347 err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \ 348 err |= __get_user(regs->regs[i], &sc->sc_regs[i]); \
348 } while(0) 349 } while(0)
349 restore_gp_reg( 1); restore_gp_reg( 2); restore_gp_reg( 3); 350 restore_gp_reg( 1); restore_gp_reg( 2); restore_gp_reg( 3);
350 restore_gp_reg( 4); restore_gp_reg( 5); restore_gp_reg( 6); 351 restore_gp_reg( 4); restore_gp_reg( 5); restore_gp_reg( 6);
351 restore_gp_reg( 7); restore_gp_reg( 8); restore_gp_reg( 9); 352 restore_gp_reg( 7); restore_gp_reg( 8); restore_gp_reg( 9);
352 restore_gp_reg(10); restore_gp_reg(11); restore_gp_reg(12); 353 restore_gp_reg(10); restore_gp_reg(11); restore_gp_reg(12);
353 restore_gp_reg(13); restore_gp_reg(14); restore_gp_reg(15); 354 restore_gp_reg(13); restore_gp_reg(14); restore_gp_reg(15);
354 restore_gp_reg(16); restore_gp_reg(17); restore_gp_reg(18); 355 restore_gp_reg(16); restore_gp_reg(17); restore_gp_reg(18);
355 restore_gp_reg(19); restore_gp_reg(20); restore_gp_reg(21); 356 restore_gp_reg(19); restore_gp_reg(20); restore_gp_reg(21);
356 restore_gp_reg(22); restore_gp_reg(23); restore_gp_reg(24); 357 restore_gp_reg(22); restore_gp_reg(23); restore_gp_reg(24);
357 restore_gp_reg(25); restore_gp_reg(26); restore_gp_reg(27); 358 restore_gp_reg(25); restore_gp_reg(26); restore_gp_reg(27);
358 restore_gp_reg(28); restore_gp_reg(29); restore_gp_reg(30); 359 restore_gp_reg(28); restore_gp_reg(29); restore_gp_reg(30);
359 restore_gp_reg(31); 360 restore_gp_reg(31);
360 #undef restore_gp_reg 361 #undef restore_gp_reg
361 362
362 err |= __get_user(used_math, &sc->sc_used_math); 363 err |= __get_user(used_math, &sc->sc_used_math);
363 conditional_used_math(used_math); 364 conditional_used_math(used_math);
364 365
365 preempt_disable(); 366 preempt_disable();
366 367
367 if (used_math()) { 368 if (used_math()) {
368 /* restore fpu context if we have used it before */ 369 /* restore fpu context if we have used it before */
369 own_fpu(); 370 own_fpu();
370 err |= restore_fp_context32(sc); 371 err |= restore_fp_context32(sc);
371 } else { 372 } else {
372 /* signal handler may have used FPU. Give it up. */ 373 /* signal handler may have used FPU. Give it up. */
373 lose_fpu(); 374 lose_fpu();
374 } 375 }
375 376
376 preempt_enable(); 377 preempt_enable();
377 378
378 return err; 379 return err;
379 } 380 }
380 381
381 struct sigframe { 382 struct sigframe {
382 u32 sf_ass[4]; /* argument save space for o32 */ 383 u32 sf_ass[4]; /* argument save space for o32 */
383 u32 sf_code[2]; /* signal trampoline */ 384 u32 sf_code[2]; /* signal trampoline */
384 struct sigcontext32 sf_sc; 385 struct sigcontext32 sf_sc;
385 sigset_t sf_mask; 386 sigset_t sf_mask;
386 }; 387 };
387 388
388 struct rt_sigframe32 { 389 struct rt_sigframe32 {
389 u32 rs_ass[4]; /* argument save space for o32 */ 390 u32 rs_ass[4]; /* argument save space for o32 */
390 u32 rs_code[2]; /* signal trampoline */ 391 u32 rs_code[2]; /* signal trampoline */
391 compat_siginfo_t rs_info; 392 compat_siginfo_t rs_info;
392 struct ucontext32 rs_uc; 393 struct ucontext32 rs_uc;
393 }; 394 };
394 395
395 int copy_siginfo_to_user32(compat_siginfo_t *to, siginfo_t *from) 396 int copy_siginfo_to_user32(compat_siginfo_t *to, siginfo_t *from)
396 { 397 {
397 int err; 398 int err;
398 399
399 if (!access_ok (VERIFY_WRITE, to, sizeof(compat_siginfo_t))) 400 if (!access_ok (VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
400 return -EFAULT; 401 return -EFAULT;
401 402
402 /* If you change siginfo_t structure, please be sure 403 /* If you change siginfo_t structure, please be sure
403 this code is fixed accordingly. 404 this code is fixed accordingly.
404 It should never copy any pad contained in the structure 405 It should never copy any pad contained in the structure
405 to avoid security leaks, but must copy the generic 406 to avoid security leaks, but must copy the generic
406 3 ints plus the relevant union member. 407 3 ints plus the relevant union member.
407 This routine must convert siginfo from 64bit to 32bit as well 408 This routine must convert siginfo from 64bit to 32bit as well
408 at the same time. */ 409 at the same time. */
409 err = __put_user(from->si_signo, &to->si_signo); 410 err = __put_user(from->si_signo, &to->si_signo);
410 err |= __put_user(from->si_errno, &to->si_errno); 411 err |= __put_user(from->si_errno, &to->si_errno);
411 err |= __put_user((short)from->si_code, &to->si_code); 412 err |= __put_user((short)from->si_code, &to->si_code);
412 if (from->si_code < 0) 413 if (from->si_code < 0)
413 err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); 414 err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
414 else { 415 else {
415 switch (from->si_code >> 16) { 416 switch (from->si_code >> 16) {
416 case __SI_TIMER >> 16: 417 case __SI_TIMER >> 16:
417 err |= __put_user(from->si_tid, &to->si_tid); 418 err |= __put_user(from->si_tid, &to->si_tid);
418 err |= __put_user(from->si_overrun, &to->si_overrun); 419 err |= __put_user(from->si_overrun, &to->si_overrun);
419 err |= __put_user(from->si_int, &to->si_int); 420 err |= __put_user(from->si_int, &to->si_int);
420 break; 421 break;
421 case __SI_CHLD >> 16: 422 case __SI_CHLD >> 16:
422 err |= __put_user(from->si_utime, &to->si_utime); 423 err |= __put_user(from->si_utime, &to->si_utime);
423 err |= __put_user(from->si_stime, &to->si_stime); 424 err |= __put_user(from->si_stime, &to->si_stime);
424 err |= __put_user(from->si_status, &to->si_status); 425 err |= __put_user(from->si_status, &to->si_status);
425 default: 426 default:
426 err |= __put_user(from->si_pid, &to->si_pid); 427 err |= __put_user(from->si_pid, &to->si_pid);
427 err |= __put_user(from->si_uid, &to->si_uid); 428 err |= __put_user(from->si_uid, &to->si_uid);
428 break; 429 break;
429 case __SI_FAULT >> 16: 430 case __SI_FAULT >> 16:
430 err |= __put_user((long)from->si_addr, &to->si_addr); 431 err |= __put_user((long)from->si_addr, &to->si_addr);
431 break; 432 break;
432 case __SI_POLL >> 16: 433 case __SI_POLL >> 16:
433 err |= __put_user(from->si_band, &to->si_band); 434 err |= __put_user(from->si_band, &to->si_band);
434 err |= __put_user(from->si_fd, &to->si_fd); 435 err |= __put_user(from->si_fd, &to->si_fd);
435 break; 436 break;
436 case __SI_RT >> 16: /* This is not generated by the kernel as of now. */ 437 case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
437 case __SI_MESGQ >> 16: 438 case __SI_MESGQ >> 16:
438 err |= __put_user(from->si_pid, &to->si_pid); 439 err |= __put_user(from->si_pid, &to->si_pid);
439 err |= __put_user(from->si_uid, &to->si_uid); 440 err |= __put_user(from->si_uid, &to->si_uid);
440 err |= __put_user(from->si_int, &to->si_int); 441 err |= __put_user(from->si_int, &to->si_int);
441 break; 442 break;
442 } 443 }
443 } 444 }
444 return err; 445 return err;
445 } 446 }
446 447
447 save_static_function(sys32_sigreturn); 448 save_static_function(sys32_sigreturn);
448 __attribute_used__ noinline static void 449 __attribute_used__ noinline static void
449 _sys32_sigreturn(nabi_no_regargs struct pt_regs regs) 450 _sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
450 { 451 {
451 struct sigframe *frame; 452 struct sigframe *frame;
452 sigset_t blocked; 453 sigset_t blocked;
453 454
454 frame = (struct sigframe *) regs.regs[29]; 455 frame = (struct sigframe *) regs.regs[29];
455 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 456 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
456 goto badframe; 457 goto badframe;
457 if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked))) 458 if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
458 goto badframe; 459 goto badframe;
459 460
460 sigdelsetmask(&blocked, ~_BLOCKABLE); 461 sigdelsetmask(&blocked, ~_BLOCKABLE);
461 spin_lock_irq(&current->sighand->siglock); 462 spin_lock_irq(&current->sighand->siglock);
462 current->blocked = blocked; 463 current->blocked = blocked;
463 recalc_sigpending(); 464 recalc_sigpending();
464 spin_unlock_irq(&current->sighand->siglock); 465 spin_unlock_irq(&current->sighand->siglock);
465 466
466 if (restore_sigcontext32(&regs, &frame->sf_sc)) 467 if (restore_sigcontext32(&regs, &frame->sf_sc))
467 goto badframe; 468 goto badframe;
468 469
469 /* 470 /*
470 * Don't let your children do this ... 471 * Don't let your children do this ...
471 */ 472 */
472 if (current_thread_info()->flags & TIF_SYSCALL_TRACE) 473 if (current_thread_info()->flags & TIF_SYSCALL_TRACE)
473 do_syscall_trace(&regs, 1); 474 do_syscall_trace(&regs, 1);
474 __asm__ __volatile__( 475 __asm__ __volatile__(
475 "move\t$29, %0\n\t" 476 "move\t$29, %0\n\t"
476 "j\tsyscall_exit" 477 "j\tsyscall_exit"
477 :/* no outputs */ 478 :/* no outputs */
478 :"r" (&regs)); 479 :"r" (&regs));
479 /* Unreached */ 480 /* Unreached */
480 481
481 badframe: 482 badframe:
482 force_sig(SIGSEGV, current); 483 force_sig(SIGSEGV, current);
483 } 484 }
484 485
485 save_static_function(sys32_rt_sigreturn); 486 save_static_function(sys32_rt_sigreturn);
486 __attribute_used__ noinline static void 487 __attribute_used__ noinline static void
487 _sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) 488 _sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
488 { 489 {
489 struct rt_sigframe32 *frame; 490 struct rt_sigframe32 *frame;
490 sigset_t set; 491 sigset_t set;
491 stack_t st; 492 stack_t st;
492 s32 sp; 493 s32 sp;
493 494
494 frame = (struct rt_sigframe32 *) regs.regs[29]; 495 frame = (struct rt_sigframe32 *) regs.regs[29];
495 if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) 496 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
496 goto badframe; 497 goto badframe;
497 if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) 498 if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
498 goto badframe; 499 goto badframe;
499 500
500 sigdelsetmask(&set, ~_BLOCKABLE); 501 sigdelsetmask(&set, ~_BLOCKABLE);
501 spin_lock_irq(&current->sighand->siglock); 502 spin_lock_irq(&current->sighand->siglock);
502 current->blocked = set; 503 current->blocked = set;
503 recalc_sigpending(); 504 recalc_sigpending();
504 spin_unlock_irq(&current->sighand->siglock); 505 spin_unlock_irq(&current->sighand->siglock);
505 506
506 if (restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext)) 507 if (restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext))
507 goto badframe; 508 goto badframe;
508 509
509 /* The ucontext contains a stack32_t, so we must convert! */ 510 /* The ucontext contains a stack32_t, so we must convert! */
510 if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp)) 511 if (__get_user(sp, &frame->rs_uc.uc_stack.ss_sp))
511 goto badframe; 512 goto badframe;
512 st.ss_size = (long) sp; 513 st.ss_size = (long) sp;
513 if (__get_user(st.ss_size, &frame->rs_uc.uc_stack.ss_size)) 514 if (__get_user(st.ss_size, &frame->rs_uc.uc_stack.ss_size))
514 goto badframe; 515 goto badframe;
515 if (__get_user(st.ss_flags, &frame->rs_uc.uc_stack.ss_flags)) 516 if (__get_user(st.ss_flags, &frame->rs_uc.uc_stack.ss_flags))
516 goto badframe; 517 goto badframe;
517 518
518 /* It is more difficult to avoid calling this function than to 519 /* It is more difficult to avoid calling this function than to
519 call it and ignore errors. */ 520 call it and ignore errors. */
520 do_sigaltstack(&st, NULL, regs.regs[29]); 521 do_sigaltstack(&st, NULL, regs.regs[29]);
521 522
522 /* 523 /*
523 * Don't let your children do this ... 524 * Don't let your children do this ...
524 */ 525 */
525 __asm__ __volatile__( 526 __asm__ __volatile__(
526 "move\t$29, %0\n\t" 527 "move\t$29, %0\n\t"
527 "j\tsyscall_exit" 528 "j\tsyscall_exit"
528 :/* no outputs */ 529 :/* no outputs */
529 :"r" (&regs)); 530 :"r" (&regs));
530 /* Unreached */ 531 /* Unreached */
531 532
532 badframe: 533 badframe:
533 force_sig(SIGSEGV, current); 534 force_sig(SIGSEGV, current);
534 } 535 }
535 536
536 static inline int setup_sigcontext32(struct pt_regs *regs, 537 static inline int setup_sigcontext32(struct pt_regs *regs,
537 struct sigcontext32 *sc) 538 struct sigcontext32 *sc)
538 { 539 {
539 int err = 0; 540 int err = 0;
540 541
541 err |= __put_user(regs->cp0_epc, &sc->sc_pc); 542 err |= __put_user(regs->cp0_epc, &sc->sc_pc);
542 err |= __put_user(regs->cp0_status, &sc->sc_status); 543 err |= __put_user(regs->cp0_status, &sc->sc_status);
543 544
544 #define save_gp_reg(i) { \ 545 #define save_gp_reg(i) { \
545 err |= __put_user(regs->regs[i], &sc->sc_regs[i]); \ 546 err |= __put_user(regs->regs[i], &sc->sc_regs[i]); \
546 } while(0) 547 } while(0)
547 __put_user(0, &sc->sc_regs[0]); save_gp_reg(1); save_gp_reg(2); 548 __put_user(0, &sc->sc_regs[0]); save_gp_reg(1); save_gp_reg(2);
548 save_gp_reg(3); save_gp_reg(4); save_gp_reg(5); save_gp_reg(6); 549 save_gp_reg(3); save_gp_reg(4); save_gp_reg(5); save_gp_reg(6);
549 save_gp_reg(7); save_gp_reg(8); save_gp_reg(9); save_gp_reg(10); 550 save_gp_reg(7); save_gp_reg(8); save_gp_reg(9); save_gp_reg(10);
550 save_gp_reg(11); save_gp_reg(12); save_gp_reg(13); save_gp_reg(14); 551 save_gp_reg(11); save_gp_reg(12); save_gp_reg(13); save_gp_reg(14);
551 save_gp_reg(15); save_gp_reg(16); save_gp_reg(17); save_gp_reg(18); 552 save_gp_reg(15); save_gp_reg(16); save_gp_reg(17); save_gp_reg(18);
552 save_gp_reg(19); save_gp_reg(20); save_gp_reg(21); save_gp_reg(22); 553 save_gp_reg(19); save_gp_reg(20); save_gp_reg(21); save_gp_reg(22);
553 save_gp_reg(23); save_gp_reg(24); save_gp_reg(25); save_gp_reg(26); 554 save_gp_reg(23); save_gp_reg(24); save_gp_reg(25); save_gp_reg(26);
554 save_gp_reg(27); save_gp_reg(28); save_gp_reg(29); save_gp_reg(30); 555 save_gp_reg(27); save_gp_reg(28); save_gp_reg(29); save_gp_reg(30);
555 save_gp_reg(31); 556 save_gp_reg(31);
556 #undef save_gp_reg 557 #undef save_gp_reg
557 558
558 err |= __put_user(regs->hi, &sc->sc_mdhi); 559 err |= __put_user(regs->hi, &sc->sc_mdhi);
559 err |= __put_user(regs->lo, &sc->sc_mdlo); 560 err |= __put_user(regs->lo, &sc->sc_mdlo);
560 err |= __put_user(regs->cp0_cause, &sc->sc_cause); 561 err |= __put_user(regs->cp0_cause, &sc->sc_cause);
561 err |= __put_user(regs->cp0_badvaddr, &sc->sc_badvaddr); 562 err |= __put_user(regs->cp0_badvaddr, &sc->sc_badvaddr);
562 563
563 err |= __put_user(!!used_math(), &sc->sc_used_math); 564 err |= __put_user(!!used_math(), &sc->sc_used_math);
564 565
565 if (!used_math()) 566 if (!used_math())
566 goto out; 567 goto out;
567 568
568 /* 569 /*
569 * Save FPU state to signal context. Signal handler will "inherit" 570 * Save FPU state to signal context. Signal handler will "inherit"
570 * current FPU state. 571 * current FPU state.
571 */ 572 */
572 preempt_disable(); 573 preempt_disable();
573 574
574 if (!is_fpu_owner()) { 575 if (!is_fpu_owner()) {
575 own_fpu(); 576 own_fpu();
576 restore_fp(current); 577 restore_fp(current);
577 } 578 }
578 err |= save_fp_context32(sc); 579 err |= save_fp_context32(sc);
579 580
580 preempt_enable(); 581 preempt_enable();
581 582
582 out: 583 out:
583 return err; 584 return err;
584 } 585 }
585 586
586 /* 587 /*
587 * Determine which stack to use.. 588 * Determine which stack to use..
588 */ 589 */
589 static inline void *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, 590 static inline void *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
590 size_t frame_size) 591 size_t frame_size)
591 { 592 {
592 unsigned long sp; 593 unsigned long sp;
593 594
594 /* Default to using normal stack */ 595 /* Default to using normal stack */
595 sp = regs->regs[29]; 596 sp = regs->regs[29];
596 597
597 /* 598 /*
598 * FPU emulator may have it's own trampoline active just 599 * FPU emulator may have it's own trampoline active just
599 * above the user stack, 16-bytes before the next lowest 600 * above the user stack, 16-bytes before the next lowest
600 * 16 byte boundary. Try to avoid trashing it. 601 * 16 byte boundary. Try to avoid trashing it.
601 */ 602 */
602 sp -= 32; 603 sp -= 32;
603 604
604 /* This is the X/Open sanctioned signal stack switching. */ 605 /* This is the X/Open sanctioned signal stack switching. */
605 if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0)) 606 if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags (sp) == 0))
606 sp = current->sas_ss_sp + current->sas_ss_size; 607 sp = current->sas_ss_sp + current->sas_ss_size;
607 608
608 return (void *)((sp - frame_size) & ALMASK); 609 return (void *)((sp - frame_size) & ALMASK);
609 } 610 }
610 611
611 static inline void setup_frame(struct k_sigaction * ka, struct pt_regs *regs, 612 static inline void setup_frame(struct k_sigaction * ka, struct pt_regs *regs,
612 int signr, sigset_t *set) 613 int signr, sigset_t *set)
613 { 614 {
614 struct sigframe *frame; 615 struct sigframe *frame;
615 int err = 0; 616 int err = 0;
616 617
617 frame = get_sigframe(ka, regs, sizeof(*frame)); 618 frame = get_sigframe(ka, regs, sizeof(*frame));
618 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 619 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
619 goto give_sigsegv; 620 goto give_sigsegv;
620 621
621 /* 622 /*
622 * Set up the return code ... 623 * Set up the return code ...
623 * 624 *
624 * li v0, __NR_O32_sigreturn 625 * li v0, __NR_O32_sigreturn
625 * syscall 626 * syscall
626 */ 627 */
627 err |= __put_user(0x24020000 + __NR_O32_sigreturn, frame->sf_code + 0); 628 err |= __put_user(0x24020000 + __NR_O32_sigreturn, frame->sf_code + 0);
628 err |= __put_user(0x0000000c , frame->sf_code + 1); 629 err |= __put_user(0x0000000c , frame->sf_code + 1);
629 flush_cache_sigtramp((unsigned long) frame->sf_code); 630 flush_cache_sigtramp((unsigned long) frame->sf_code);
630 631
631 err |= setup_sigcontext32(regs, &frame->sf_sc); 632 err |= setup_sigcontext32(regs, &frame->sf_sc);
632 err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); 633 err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set));
633 if (err) 634 if (err)
634 goto give_sigsegv; 635 goto give_sigsegv;
635 636
636 /* 637 /*
637 * Arguments to signal handler: 638 * Arguments to signal handler:
638 * 639 *
639 * a0 = signal number 640 * a0 = signal number
640 * a1 = 0 (should be cause) 641 * a1 = 0 (should be cause)
641 * a2 = pointer to struct sigcontext 642 * a2 = pointer to struct sigcontext
642 * 643 *
643 * $25 and c0_epc point to the signal handler, $29 points to the 644 * $25 and c0_epc point to the signal handler, $29 points to the
644 * struct sigframe. 645 * struct sigframe.
645 */ 646 */
646 regs->regs[ 4] = signr; 647 regs->regs[ 4] = signr;
647 regs->regs[ 5] = 0; 648 regs->regs[ 5] = 0;
648 regs->regs[ 6] = (unsigned long) &frame->sf_sc; 649 regs->regs[ 6] = (unsigned long) &frame->sf_sc;
649 regs->regs[29] = (unsigned long) frame; 650 regs->regs[29] = (unsigned long) frame;
650 regs->regs[31] = (unsigned long) frame->sf_code; 651 regs->regs[31] = (unsigned long) frame->sf_code;
651 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; 652 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
652 653
653 #if DEBUG_SIG 654 #if DEBUG_SIG
654 printk("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%p\n", 655 printk("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%p\n",
655 current->comm, current->pid, 656 current->comm, current->pid,
656 frame, regs->cp0_epc, frame->sf_code); 657 frame, regs->cp0_epc, frame->sf_code);
657 #endif 658 #endif
658 return; 659 return;
659 660
660 give_sigsegv: 661 give_sigsegv:
661 force_sigsegv(signr, current); 662 force_sigsegv(signr, current);
662 } 663 }
663 664
664 static inline void setup_rt_frame(struct k_sigaction * ka, 665 static inline void setup_rt_frame(struct k_sigaction * ka,
665 struct pt_regs *regs, int signr, 666 struct pt_regs *regs, int signr,
666 sigset_t *set, siginfo_t *info) 667 sigset_t *set, siginfo_t *info)
667 { 668 {
668 struct rt_sigframe32 *frame; 669 struct rt_sigframe32 *frame;
669 int err = 0; 670 int err = 0;
670 s32 sp; 671 s32 sp;
671 672
672 frame = get_sigframe(ka, regs, sizeof(*frame)); 673 frame = get_sigframe(ka, regs, sizeof(*frame));
673 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) 674 if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
674 goto give_sigsegv; 675 goto give_sigsegv;
675 676
676 /* Set up to return from userspace. If provided, use a stub already 677 /* Set up to return from userspace. If provided, use a stub already
677 in userspace. */ 678 in userspace. */
678 /* 679 /*
679 * Set up the return code ... 680 * Set up the return code ...
680 * 681 *
681 * li v0, __NR_O32_rt_sigreturn 682 * li v0, __NR_O32_rt_sigreturn
682 * syscall 683 * syscall
683 */ 684 */
684 err |= __put_user(0x24020000 + __NR_O32_rt_sigreturn, frame->rs_code + 0); 685 err |= __put_user(0x24020000 + __NR_O32_rt_sigreturn, frame->rs_code + 0);
685 err |= __put_user(0x0000000c , frame->rs_code + 1); 686 err |= __put_user(0x0000000c , frame->rs_code + 1);
686 flush_cache_sigtramp((unsigned long) frame->rs_code); 687 flush_cache_sigtramp((unsigned long) frame->rs_code);
687 688
688 /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */ 689 /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */
689 err |= copy_siginfo_to_user32(&frame->rs_info, info); 690 err |= copy_siginfo_to_user32(&frame->rs_info, info);
690 691
691 /* Create the ucontext. */ 692 /* Create the ucontext. */
692 err |= __put_user(0, &frame->rs_uc.uc_flags); 693 err |= __put_user(0, &frame->rs_uc.uc_flags);
693 err |= __put_user(0, &frame->rs_uc.uc_link); 694 err |= __put_user(0, &frame->rs_uc.uc_link);
694 sp = (int) (long) current->sas_ss_sp; 695 sp = (int) (long) current->sas_ss_sp;
695 err |= __put_user(sp, 696 err |= __put_user(sp,
696 &frame->rs_uc.uc_stack.ss_sp); 697 &frame->rs_uc.uc_stack.ss_sp);
697 err |= __put_user(sas_ss_flags(regs->regs[29]), 698 err |= __put_user(sas_ss_flags(regs->regs[29]),
698 &frame->rs_uc.uc_stack.ss_flags); 699 &frame->rs_uc.uc_stack.ss_flags);
699 err |= __put_user(current->sas_ss_size, 700 err |= __put_user(current->sas_ss_size,
700 &frame->rs_uc.uc_stack.ss_size); 701 &frame->rs_uc.uc_stack.ss_size);
701 err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext); 702 err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext);
702 err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); 703 err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set));
703 704
704 if (err) 705 if (err)
705 goto give_sigsegv; 706 goto give_sigsegv;
706 707
707 /* 708 /*
708 * Arguments to signal handler: 709 * Arguments to signal handler:
709 * 710 *
710 * a0 = signal number 711 * a0 = signal number
711 * a1 = 0 (should be cause) 712 * a1 = 0 (should be cause)
712 * a2 = pointer to ucontext 713 * a2 = pointer to ucontext
713 * 714 *
714 * $25 and c0_epc point to the signal handler, $29 points to 715 * $25 and c0_epc point to the signal handler, $29 points to
715 * the struct rt_sigframe32. 716 * the struct rt_sigframe32.
716 */ 717 */
717 regs->regs[ 4] = signr; 718 regs->regs[ 4] = signr;
718 regs->regs[ 5] = (unsigned long) &frame->rs_info; 719 regs->regs[ 5] = (unsigned long) &frame->rs_info;
719 regs->regs[ 6] = (unsigned long) &frame->rs_uc; 720 regs->regs[ 6] = (unsigned long) &frame->rs_uc;
720 regs->regs[29] = (unsigned long) frame; 721 regs->regs[29] = (unsigned long) frame;
721 regs->regs[31] = (unsigned long) frame->rs_code; 722 regs->regs[31] = (unsigned long) frame->rs_code;
722 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler; 723 regs->cp0_epc = regs->regs[25] = (unsigned long) ka->sa.sa_handler;
723 724
724 #if DEBUG_SIG 725 #if DEBUG_SIG
725 printk("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%p\n", 726 printk("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%p\n",
726 current->comm, current->pid, 727 current->comm, current->pid,
727 frame, regs->cp0_epc, frame->rs_code); 728 frame, regs->cp0_epc, frame->rs_code);
728 #endif 729 #endif
729 return; 730 return;
730 731
731 give_sigsegv: 732 give_sigsegv:
732 force_sigsegv(signr, current); 733 force_sigsegv(signr, current);
733 } 734 }
734 735
735 static inline void handle_signal(unsigned long sig, siginfo_t *info, 736 static inline void handle_signal(unsigned long sig, siginfo_t *info,
736 struct k_sigaction *ka, sigset_t *oldset, struct pt_regs * regs) 737 struct k_sigaction *ka, sigset_t *oldset, struct pt_regs * regs)
737 { 738 {
738 switch (regs->regs[0]) { 739 switch (regs->regs[0]) {
739 case ERESTART_RESTARTBLOCK: 740 case ERESTART_RESTARTBLOCK:
740 case ERESTARTNOHAND: 741 case ERESTARTNOHAND:
741 regs->regs[2] = EINTR; 742 regs->regs[2] = EINTR;
742 break; 743 break;
743 case ERESTARTSYS: 744 case ERESTARTSYS:
744 if(!(ka->sa.sa_flags & SA_RESTART)) { 745 if(!(ka->sa.sa_flags & SA_RESTART)) {
745 regs->regs[2] = EINTR; 746 regs->regs[2] = EINTR;
746 break; 747 break;
747 } 748 }
748 /* fallthrough */ 749 /* fallthrough */
749 case ERESTARTNOINTR: /* Userland will reload $v0. */ 750 case ERESTARTNOINTR: /* Userland will reload $v0. */
750 regs->regs[7] = regs->regs[26]; 751 regs->regs[7] = regs->regs[26];
751 regs->cp0_epc -= 8; 752 regs->cp0_epc -= 8;
752 } 753 }
753 754
754 regs->regs[0] = 0; /* Don't deal with this again. */ 755 regs->regs[0] = 0; /* Don't deal with this again. */
755 756
756 if (ka->sa.sa_flags & SA_SIGINFO) 757 if (ka->sa.sa_flags & SA_SIGINFO)
757 setup_rt_frame(ka, regs, sig, oldset, info); 758 setup_rt_frame(ka, regs, sig, oldset, info);
758 else 759 else
759 setup_frame(ka, regs, sig, oldset); 760 setup_frame(ka, regs, sig, oldset);
760 761
761 spin_lock_irq(&current->sighand->siglock); 762 spin_lock_irq(&current->sighand->siglock);
762 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask); 763 sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
763 if (!(ka->sa.sa_flags & SA_NODEFER)) 764 if (!(ka->sa.sa_flags & SA_NODEFER))
764 sigaddset(&current->blocked,sig); 765 sigaddset(&current->blocked,sig);
765 recalc_sigpending(); 766 recalc_sigpending();
766 spin_unlock_irq(&current->sighand->siglock); 767 spin_unlock_irq(&current->sighand->siglock);
767 } 768 }
768 769
769 int do_signal32(sigset_t *oldset, struct pt_regs *regs) 770 int do_signal32(sigset_t *oldset, struct pt_regs *regs)
770 { 771 {
771 struct k_sigaction ka; 772 struct k_sigaction ka;
772 siginfo_t info; 773 siginfo_t info;
773 int signr; 774 int signr;
774 775
775 /* 776 /*
776 * We want the common case to go fast, which is why we may in certain 777 * We want the common case to go fast, which is why we may in certain
777 * cases get here from kernel mode. Just return without doing anything 778 * cases get here from kernel mode. Just return without doing anything
778 * if so. 779 * if so.
779 */ 780 */
780 if (!user_mode(regs)) 781 if (!user_mode(regs))
781 return 1; 782 return 1;
782 783
783 if (try_to_freeze()) 784 if (try_to_freeze())
784 goto no_signal; 785 goto no_signal;
785 786
786 if (!oldset) 787 if (!oldset)
787 oldset = &current->blocked; 788 oldset = &current->blocked;
788 789
789 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 790 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
790 if (signr > 0) { 791 if (signr > 0) {
791 handle_signal(signr, &info, &ka, oldset, regs); 792 handle_signal(signr, &info, &ka, oldset, regs);
792 return 1; 793 return 1;
793 } 794 }
794 795
795 no_signal: 796 no_signal:
796 /* 797 /*
797 * Who's code doesn't conform to the restartable syscall convention 798 * Who's code doesn't conform to the restartable syscall convention
798 * dies here!!! The li instruction, a single machine instruction, 799 * dies here!!! The li instruction, a single machine instruction,
799 * must directly be followed by the syscall instruction. 800 * must directly be followed by the syscall instruction.
800 */ 801 */
801 if (regs->regs[0]) { 802 if (regs->regs[0]) {
802 if (regs->regs[2] == ERESTARTNOHAND || 803 if (regs->regs[2] == ERESTARTNOHAND ||
803 regs->regs[2] == ERESTARTSYS || 804 regs->regs[2] == ERESTARTSYS ||
804 regs->regs[2] == ERESTARTNOINTR) { 805 regs->regs[2] == ERESTARTNOINTR) {
805 regs->regs[7] = regs->regs[26]; 806 regs->regs[7] = regs->regs[26];
806 regs->cp0_epc -= 8; 807 regs->cp0_epc -= 8;
807 } 808 }
808 if (regs->regs[2] == ERESTART_RESTARTBLOCK) { 809 if (regs->regs[2] == ERESTART_RESTARTBLOCK) {
809 regs->regs[2] = __NR_O32_restart_syscall; 810 regs->regs[2] = __NR_O32_restart_syscall;
810 regs->regs[7] = regs->regs[26]; 811 regs->regs[7] = regs->regs[26];
811 regs->cp0_epc -= 4; 812 regs->cp0_epc -= 4;
812 } 813 }
813 } 814 }
814 return 0; 815 return 0;
815 } 816 }
816 817
817 asmlinkage int sys32_rt_sigaction(int sig, const struct sigaction32 *act, 818 asmlinkage int sys32_rt_sigaction(int sig, const struct sigaction32 *act,
818 struct sigaction32 *oact, 819 struct sigaction32 *oact,
819 unsigned int sigsetsize) 820 unsigned int sigsetsize)
820 { 821 {
821 struct k_sigaction new_sa, old_sa; 822 struct k_sigaction new_sa, old_sa;
822 int ret = -EINVAL; 823 int ret = -EINVAL;
823 824
824 /* XXX: Don't preclude handling different sized sigset_t's. */ 825 /* XXX: Don't preclude handling different sized sigset_t's. */
825 if (sigsetsize != sizeof(sigset_t)) 826 if (sigsetsize != sizeof(sigset_t))
826 goto out; 827 goto out;
827 828
828 if (act) { 829 if (act) {
830 s32 handler;
829 int err = 0; 831 int err = 0;
830 832
831 if (!access_ok(VERIFY_READ, act, sizeof(*act))) 833 if (!access_ok(VERIFY_READ, act, sizeof(*act)))
832 return -EFAULT; 834 return -EFAULT;
833 err |= __get_user((u32)(u64)new_sa.sa.sa_handler, 835 err |= __get_user(handler, &act->sa_handler);
834 &act->sa_handler); 836 new_sa.sa.sa_handler = (void*)(s64)handler;
835 err |= __get_user(new_sa.sa.sa_flags, &act->sa_flags); 837 err |= __get_user(new_sa.sa.sa_flags, &act->sa_flags);
836 err |= get_sigset(&new_sa.sa.sa_mask, &act->sa_mask); 838 err |= get_sigset(&new_sa.sa.sa_mask, &act->sa_mask);
837 if (err) 839 if (err)
838 return -EFAULT; 840 return -EFAULT;
839 } 841 }
840 842
841 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); 843 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
842 844
843 if (!ret && oact) { 845 if (!ret && oact) {
844 int err = 0; 846 int err = 0;
845 847
846 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact))) 848 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
847 return -EFAULT; 849 return -EFAULT;
848 850
849 err |= __put_user((u32)(u64)old_sa.sa.sa_handler, 851 err |= __put_user((u32)(u64)old_sa.sa.sa_handler,
850 &oact->sa_handler); 852 &oact->sa_handler);
851 err |= __put_user(old_sa.sa.sa_flags, &oact->sa_flags); 853 err |= __put_user(old_sa.sa.sa_flags, &oact->sa_flags);
852 err |= put_sigset(&old_sa.sa.sa_mask, &oact->sa_mask); 854 err |= put_sigset(&old_sa.sa.sa_mask, &oact->sa_mask);
853 if (err) 855 if (err)
854 return -EFAULT; 856 return -EFAULT;
855 } 857 }
856 out: 858 out:
857 return ret; 859 return ret;
858 } 860 }
859 861
860 asmlinkage int sys32_rt_sigprocmask(int how, compat_sigset_t *set, 862 asmlinkage int sys32_rt_sigprocmask(int how, compat_sigset_t *set,
861 compat_sigset_t *oset, unsigned int sigsetsize) 863 compat_sigset_t *oset, unsigned int sigsetsize)
862 { 864 {
863 sigset_t old_set, new_set; 865 sigset_t old_set, new_set;
864 int ret; 866 int ret;
865 mm_segment_t old_fs = get_fs(); 867 mm_segment_t old_fs = get_fs();
866 868
867 if (set && get_sigset(&new_set, set)) 869 if (set && get_sigset(&new_set, set))
868 return -EFAULT; 870 return -EFAULT;
869 871
870 set_fs (KERNEL_DS); 872 set_fs (KERNEL_DS);
871 ret = sys_rt_sigprocmask(how, set ? &new_set : NULL, 873 ret = sys_rt_sigprocmask(how, set ? &new_set : NULL,
872 oset ? &old_set : NULL, sigsetsize); 874 oset ? &old_set : NULL, sigsetsize);
873 set_fs (old_fs); 875 set_fs (old_fs);
874 876
875 if (!ret && oset && put_sigset(&old_set, oset)) 877 if (!ret && oset && put_sigset(&old_set, oset))
876 return -EFAULT; 878 return -EFAULT;
877 879
878 return ret; 880 return ret;
879 } 881 }
880 882
881 asmlinkage int sys32_rt_sigpending(compat_sigset_t *uset, 883 asmlinkage int sys32_rt_sigpending(compat_sigset_t *uset,
882 unsigned int sigsetsize) 884 unsigned int sigsetsize)
883 { 885 {
884 int ret; 886 int ret;
885 sigset_t set; 887 sigset_t set;
886 mm_segment_t old_fs = get_fs(); 888 mm_segment_t old_fs = get_fs();
887 889
888 set_fs (KERNEL_DS); 890 set_fs (KERNEL_DS);
889 ret = sys_rt_sigpending(&set, sigsetsize); 891 ret = sys_rt_sigpending(&set, sigsetsize);
890 set_fs (old_fs); 892 set_fs (old_fs);
891 893
892 if (!ret && put_sigset(&set, uset)) 894 if (!ret && put_sigset(&set, uset))
893 return -EFAULT; 895 return -EFAULT;
894 896
895 return ret; 897 return ret;
896 } 898 }
897 899
898 asmlinkage int sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t *uinfo) 900 asmlinkage int sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t *uinfo)
899 { 901 {
900 siginfo_t info; 902 siginfo_t info;
901 int ret; 903 int ret;
902 mm_segment_t old_fs = get_fs(); 904 mm_segment_t old_fs = get_fs();
903 905
904 if (copy_from_user (&info, uinfo, 3*sizeof(int)) || 906 if (copy_from_user (&info, uinfo, 3*sizeof(int)) ||
905 copy_from_user (info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE)) 907 copy_from_user (info._sifields._pad, uinfo->_sifields._pad, SI_PAD_SIZE))
906 return -EFAULT; 908 return -EFAULT;
907 set_fs (KERNEL_DS); 909 set_fs (KERNEL_DS);
908 ret = sys_rt_sigqueueinfo(pid, sig, &info); 910 ret = sys_rt_sigqueueinfo(pid, sig, &info);
909 set_fs (old_fs); 911 set_fs (old_fs);
910 return ret; 912 return ret;
911 } 913 }
912 914
913 asmlinkage long 915 asmlinkage long
914 sys32_waitid(int which, compat_pid_t pid, 916 sys32_waitid(int which, compat_pid_t pid,
915 compat_siginfo_t __user *uinfo, int options, 917 compat_siginfo_t __user *uinfo, int options,
916 struct compat_rusage __user *uru) 918 struct compat_rusage __user *uru)
917 { 919 {
918 siginfo_t info; 920 siginfo_t info;
919 struct rusage ru; 921 struct rusage ru;
920 long ret; 922 long ret;
921 mm_segment_t old_fs = get_fs(); 923 mm_segment_t old_fs = get_fs();
922 924
923 info.si_signo = 0; 925 info.si_signo = 0;
924 set_fs (KERNEL_DS); 926 set_fs (KERNEL_DS);
925 ret = sys_waitid(which, pid, (siginfo_t __user *) &info, options, 927 ret = sys_waitid(which, pid, (siginfo_t __user *) &info, options,
926 uru ? (struct rusage __user *) &ru : NULL); 928 uru ? (struct rusage __user *) &ru : NULL);
927 set_fs (old_fs); 929 set_fs (old_fs);
928 930
929 if (ret < 0 || info.si_signo == 0) 931 if (ret < 0 || info.si_signo == 0)
930 return ret; 932 return ret;
931 933
932 if (uru && (ret = put_compat_rusage(&ru, uru))) 934 if (uru && (ret = put_compat_rusage(&ru, uru)))
933 return ret; 935 return ret;
934 936
935 BUG_ON(info.si_code & __SI_MASK); 937 BUG_ON(info.si_code & __SI_MASK);
936 info.si_code |= __SI_CHLD; 938 info.si_code |= __SI_CHLD;
937 return copy_siginfo_to_user32(uinfo, &info); 939 return copy_siginfo_to_user32(uinfo, &info);
938 } 940 }
939 941
arch/mips/mm/c-sb1.c
1 /* 1 /*
2 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) 2 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
3 * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org) 3 * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org)
4 * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation 4 * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
5 * Copyright (C) 2004 Maciej W. Rozycki 5 * Copyright (C) 2004 Maciej W. Rozycki
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2 9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version. 10 * of the License, or (at your option) any later version.
11 * 11 *
12 * This program is distributed in the hope that it will be useful, 12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 15 * GNU General Public License for more details.
16 * 16 *
17 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software 18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */ 20 */
21 #include <linux/config.h> 21 #include <linux/config.h>
22 #include <linux/init.h> 22 #include <linux/init.h>
23 23
24 #include <asm/asm.h> 24 #include <asm/asm.h>
25 #include <asm/bootinfo.h> 25 #include <asm/bootinfo.h>
26 #include <asm/cacheops.h> 26 #include <asm/cacheops.h>
27 #include <asm/cpu.h> 27 #include <asm/cpu.h>
28 #include <asm/mipsregs.h> 28 #include <asm/mipsregs.h>
29 #include <asm/mmu_context.h> 29 #include <asm/mmu_context.h>
30 #include <asm/uaccess.h> 30 #include <asm/uaccess.h>
31 31
32 extern void sb1_dma_init(void); 32 extern void sb1_dma_init(void);
33 33
34 /* These are probed at ld_mmu time */ 34 /* These are probed at ld_mmu time */
35 static unsigned long icache_size; 35 static unsigned long icache_size;
36 static unsigned long dcache_size; 36 static unsigned long dcache_size;
37 37
38 static unsigned short icache_line_size; 38 static unsigned short icache_line_size;
39 static unsigned short dcache_line_size; 39 static unsigned short dcache_line_size;
40 40
41 static unsigned int icache_index_mask; 41 static unsigned int icache_index_mask;
42 static unsigned int dcache_index_mask; 42 static unsigned int dcache_index_mask;
43 43
44 static unsigned short icache_assoc; 44 static unsigned short icache_assoc;
45 static unsigned short dcache_assoc; 45 static unsigned short dcache_assoc;
46 46
47 static unsigned short icache_sets; 47 static unsigned short icache_sets;
48 static unsigned short dcache_sets; 48 static unsigned short dcache_sets;
49 49
50 static unsigned int icache_range_cutoff; 50 static unsigned int icache_range_cutoff;
51 static unsigned int dcache_range_cutoff; 51 static unsigned int dcache_range_cutoff;
52 52
53 /* 53 /*
54 * The dcache is fully coherent to the system, with one 54 * The dcache is fully coherent to the system, with one
55 * big caveat: the instruction stream. In other words, 55 * big caveat: the instruction stream. In other words,
56 * if we miss in the icache, and have dirty data in the 56 * if we miss in the icache, and have dirty data in the
57 * L1 dcache, then we'll go out to memory (or the L2) and 57 * L1 dcache, then we'll go out to memory (or the L2) and
58 * get the not-as-recent data. 58 * get the not-as-recent data.
59 * 59 *
60 * So the only time we have to flush the dcache is when 60 * So the only time we have to flush the dcache is when
61 * we're flushing the icache. Since the L2 is fully 61 * we're flushing the icache. Since the L2 is fully
62 * coherent to everything, including I/O, we never have 62 * coherent to everything, including I/O, we never have
63 * to flush it 63 * to flush it
64 */ 64 */
65 65
66 #define cache_set_op(op, addr) \ 66 #define cache_set_op(op, addr) \
67 __asm__ __volatile__( \ 67 __asm__ __volatile__( \
68 " .set noreorder \n" \ 68 " .set noreorder \n" \
69 " .set mips64\n\t \n" \ 69 " .set mips64\n\t \n" \
70 " cache %0, (0<<13)(%1) \n" \ 70 " cache %0, (0<<13)(%1) \n" \
71 " cache %0, (1<<13)(%1) \n" \ 71 " cache %0, (1<<13)(%1) \n" \
72 " cache %0, (2<<13)(%1) \n" \ 72 " cache %0, (2<<13)(%1) \n" \
73 " cache %0, (3<<13)(%1) \n" \ 73 " cache %0, (3<<13)(%1) \n" \
74 " .set mips0 \n" \ 74 " .set mips0 \n" \
75 " .set reorder" \ 75 " .set reorder" \
76 : \ 76 : \
77 : "i" (op), "r" (addr)) 77 : "i" (op), "r" (addr))
78 78
79 #define sync() \ 79 #define sync() \
80 __asm__ __volatile( \ 80 __asm__ __volatile( \
81 " .set mips64\n\t \n" \ 81 " .set mips64\n\t \n" \
82 " sync \n" \ 82 " sync \n" \
83 " .set mips0") 83 " .set mips0")
84 84
85 #define mispredict() \ 85 #define mispredict() \
86 __asm__ __volatile__( \ 86 __asm__ __volatile__( \
87 " bnezl $0, 1f \n" /* Force mispredict */ \ 87 " bnezl $0, 1f \n" /* Force mispredict */ \
88 "1: \n"); 88 "1: \n");
89 89
90 /* 90 /*
91 * Writeback and invalidate the entire dcache 91 * Writeback and invalidate the entire dcache
92 */ 92 */
93 static inline void __sb1_writeback_inv_dcache_all(void) 93 static inline void __sb1_writeback_inv_dcache_all(void)
94 { 94 {
95 unsigned long addr = 0; 95 unsigned long addr = 0;
96 96
97 while (addr < dcache_line_size * dcache_sets) { 97 while (addr < dcache_line_size * dcache_sets) {
98 cache_set_op(Index_Writeback_Inv_D, addr); 98 cache_set_op(Index_Writeback_Inv_D, addr);
99 addr += dcache_line_size; 99 addr += dcache_line_size;
100 } 100 }
101 } 101 }
102 102
103 /* 103 /*
104 * Writeback and invalidate a range of the dcache. The addresses are 104 * Writeback and invalidate a range of the dcache. The addresses are
105 * virtual, and since we're using index ops and bit 12 is part of both 105 * virtual, and since we're using index ops and bit 12 is part of both
106 * the virtual frame and physical index, we have to clear both sets 106 * the virtual frame and physical index, we have to clear both sets
107 * (bit 12 set and cleared). 107 * (bit 12 set and cleared).
108 */ 108 */
109 static inline void __sb1_writeback_inv_dcache_range(unsigned long start, 109 static inline void __sb1_writeback_inv_dcache_range(unsigned long start,
110 unsigned long end) 110 unsigned long end)
111 { 111 {
112 unsigned long index; 112 unsigned long index;
113 113
114 start &= ~(dcache_line_size - 1); 114 start &= ~(dcache_line_size - 1);
115 end = (end + dcache_line_size - 1) & ~(dcache_line_size - 1); 115 end = (end + dcache_line_size - 1) & ~(dcache_line_size - 1);
116 116
117 while (start != end) { 117 while (start != end) {
118 index = start & dcache_index_mask; 118 index = start & dcache_index_mask;
119 cache_set_op(Index_Writeback_Inv_D, index); 119 cache_set_op(Index_Writeback_Inv_D, index);
120 cache_set_op(Index_Writeback_Inv_D, index ^ (1<<12)); 120 cache_set_op(Index_Writeback_Inv_D, index ^ (1<<12));
121 start += dcache_line_size; 121 start += dcache_line_size;
122 } 122 }
123 sync(); 123 sync();
124 } 124 }
125 125
126 /* 126 /*
127 * Writeback and invalidate a range of the dcache. With physical 127 * Writeback and invalidate a range of the dcache. With physical
128 * addresseses, we don't have to worry about possible bit 12 aliasing. 128 * addresseses, we don't have to worry about possible bit 12 aliasing.
129 * XXXKW is it worth turning on KX and using hit ops with xkphys? 129 * XXXKW is it worth turning on KX and using hit ops with xkphys?
130 */ 130 */
131 static inline void __sb1_writeback_inv_dcache_phys_range(unsigned long start, 131 static inline void __sb1_writeback_inv_dcache_phys_range(unsigned long start,
132 unsigned long end) 132 unsigned long end)
133 { 133 {
134 start &= ~(dcache_line_size - 1); 134 start &= ~(dcache_line_size - 1);
135 end = (end + dcache_line_size - 1) & ~(dcache_line_size - 1); 135 end = (end + dcache_line_size - 1) & ~(dcache_line_size - 1);
136 136
137 while (start != end) { 137 while (start != end) {
138 cache_set_op(Index_Writeback_Inv_D, start & dcache_index_mask); 138 cache_set_op(Index_Writeback_Inv_D, start & dcache_index_mask);
139 start += dcache_line_size; 139 start += dcache_line_size;
140 } 140 }
141 sync(); 141 sync();
142 } 142 }
143 143
144 144
145 /* 145 /*
146 * Invalidate the entire icache 146 * Invalidate the entire icache
147 */ 147 */
148 static inline void __sb1_flush_icache_all(void) 148 static inline void __sb1_flush_icache_all(void)
149 { 149 {
150 unsigned long addr = 0; 150 unsigned long addr = 0;
151 151
152 while (addr < icache_line_size * icache_sets) { 152 while (addr < icache_line_size * icache_sets) {
153 cache_set_op(Index_Invalidate_I, addr); 153 cache_set_op(Index_Invalidate_I, addr);
154 addr += icache_line_size; 154 addr += icache_line_size;
155 } 155 }
156 } 156 }
157 157
158 /* 158 /*
159 * Flush the icache for a given physical page. Need to writeback the 159 * Flush the icache for a given physical page. Need to writeback the
160 * dcache first, then invalidate the icache. If the page isn't 160 * dcache first, then invalidate the icache. If the page isn't
161 * executable, nothing is required. 161 * executable, nothing is required.
162 */ 162 */
163 static void local_sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) 163 static void local_sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
164 { 164 {
165 int cpu = smp_processor_id(); 165 int cpu = smp_processor_id();
166 166
167 #ifndef CONFIG_SMP 167 #ifndef CONFIG_SMP
168 if (!(vma->vm_flags & VM_EXEC)) 168 if (!(vma->vm_flags & VM_EXEC))
169 return; 169 return;
170 #endif 170 #endif
171 171
172 __sb1_writeback_inv_dcache_range(addr, addr + PAGE_SIZE); 172 __sb1_writeback_inv_dcache_range(addr, addr + PAGE_SIZE);
173 173
174 /* 174 /*
175 * Bumping the ASID is probably cheaper than the flush ... 175 * Bumping the ASID is probably cheaper than the flush ...
176 */ 176 */
177 if (cpu_context(cpu, vma->vm_mm) != 0) 177 if (cpu_context(cpu, vma->vm_mm) != 0)
178 drop_mmu_context(vma->vm_mm, cpu); 178 drop_mmu_context(vma->vm_mm, cpu);
179 } 179 }
180 180
181 #ifdef CONFIG_SMP 181 #ifdef CONFIG_SMP
182 struct flush_cache_page_args { 182 struct flush_cache_page_args {
183 struct vm_area_struct *vma; 183 struct vm_area_struct *vma;
184 unsigned long addr; 184 unsigned long addr;
185 unsigned long pfn; 185 unsigned long pfn;
186 }; 186 };
187 187
188 static void sb1_flush_cache_page_ipi(void *info) 188 static void sb1_flush_cache_page_ipi(void *info)
189 { 189 {
190 struct flush_cache_page_args *args = info; 190 struct flush_cache_page_args *args = info;
191 191
192 local_sb1_flush_cache_page(args->vma, args->addr, args->pfn); 192 local_sb1_flush_cache_page(args->vma, args->addr, args->pfn);
193 } 193 }
194 194
195 /* Dirty dcache could be on another CPU, so do the IPIs */ 195 /* Dirty dcache could be on another CPU, so do the IPIs */
196 static void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) 196 static void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
197 { 197 {
198 struct flush_cache_page_args args; 198 struct flush_cache_page_args args;
199 199
200 if (!(vma->vm_flags & VM_EXEC)) 200 if (!(vma->vm_flags & VM_EXEC))
201 return; 201 return;
202 202
203 addr &= PAGE_MASK; 203 addr &= PAGE_MASK;
204 args.vma = vma; 204 args.vma = vma;
205 args.addr = addr; 205 args.addr = addr;
206 args.pfn = pfn; 206 args.pfn = pfn;
207 on_each_cpu(sb1_flush_cache_page_ipi, (void *) &args, 1, 1); 207 on_each_cpu(sb1_flush_cache_page_ipi, (void *) &args, 1, 1);
208 } 208 }
209 #else 209 #else
210 void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) 210 void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
211 __attribute__((alias("local_sb1_flush_cache_page"))); 211 __attribute__((alias("local_sb1_flush_cache_page")));
212 #endif 212 #endif
213 213
214 /* 214 /*
215 * Invalidate a range of the icache. The addresses are virtual, and 215 * Invalidate a range of the icache. The addresses are virtual, and
216 * the cache is virtually indexed and tagged. However, we don't 216 * the cache is virtually indexed and tagged. However, we don't
217 * necessarily have the right ASID context, so use index ops instead 217 * necessarily have the right ASID context, so use index ops instead
218 * of hit ops. 218 * of hit ops.
219 */ 219 */
220 static inline void __sb1_flush_icache_range(unsigned long start, 220 static inline void __sb1_flush_icache_range(unsigned long start,
221 unsigned long end) 221 unsigned long end)
222 { 222 {
223 start &= ~(icache_line_size - 1); 223 start &= ~(icache_line_size - 1);
224 end = (end + icache_line_size - 1) & ~(icache_line_size - 1); 224 end = (end + icache_line_size - 1) & ~(icache_line_size - 1);
225 225
226 while (start != end) { 226 while (start != end) {
227 cache_set_op(Index_Invalidate_I, start & icache_index_mask); 227 cache_set_op(Index_Invalidate_I, start & icache_index_mask);
228 start += icache_line_size; 228 start += icache_line_size;
229 } 229 }
230 mispredict(); 230 mispredict();
231 sync(); 231 sync();
232 } 232 }
233 233
234 234
235 /* 235 /*
236 * Invalidate all caches on this CPU 236 * Invalidate all caches on this CPU
237 */ 237 */
238 static void local_sb1___flush_cache_all(void) 238 static void __attribute_used__ local_sb1___flush_cache_all(void)
239 { 239 {
240 __sb1_writeback_inv_dcache_all(); 240 __sb1_writeback_inv_dcache_all();
241 __sb1_flush_icache_all(); 241 __sb1_flush_icache_all();
242 } 242 }
243 243
244 #ifdef CONFIG_SMP 244 #ifdef CONFIG_SMP
245 void sb1___flush_cache_all_ipi(void *ignored) 245 void sb1___flush_cache_all_ipi(void *ignored)
246 __attribute__((alias("local_sb1___flush_cache_all"))); 246 __attribute__((alias("local_sb1___flush_cache_all")));
247 247
248 static void sb1___flush_cache_all(void) 248 static void sb1___flush_cache_all(void)
249 { 249 {
250 on_each_cpu(sb1___flush_cache_all_ipi, 0, 1, 1); 250 on_each_cpu(sb1___flush_cache_all_ipi, 0, 1, 1);
251 } 251 }
252 #else 252 #else
253 void sb1___flush_cache_all(void) 253 void sb1___flush_cache_all(void)
254 __attribute__((alias("local_sb1___flush_cache_all"))); 254 __attribute__((alias("local_sb1___flush_cache_all")));
255 #endif 255 #endif
256 256
257 /* 257 /*
258 * When flushing a range in the icache, we have to first writeback 258 * When flushing a range in the icache, we have to first writeback
259 * the dcache for the same range, so new ifetches will see any 259 * the dcache for the same range, so new ifetches will see any
260 * data that was dirty in the dcache. 260 * data that was dirty in the dcache.
261 * 261 *
262 * The start/end arguments are Kseg addresses (possibly mapped Kseg). 262 * The start/end arguments are Kseg addresses (possibly mapped Kseg).
263 */ 263 */
264 264
265 static void local_sb1_flush_icache_range(unsigned long start, 265 static void local_sb1_flush_icache_range(unsigned long start,
266 unsigned long end) 266 unsigned long end)
267 { 267 {
268 /* Just wb-inv the whole dcache if the range is big enough */ 268 /* Just wb-inv the whole dcache if the range is big enough */
269 if ((end - start) > dcache_range_cutoff) 269 if ((end - start) > dcache_range_cutoff)
270 __sb1_writeback_inv_dcache_all(); 270 __sb1_writeback_inv_dcache_all();
271 else 271 else
272 __sb1_writeback_inv_dcache_range(start, end); 272 __sb1_writeback_inv_dcache_range(start, end);
273 273
274 /* Just flush the whole icache if the range is big enough */ 274 /* Just flush the whole icache if the range is big enough */
275 if ((end - start) > icache_range_cutoff) 275 if ((end - start) > icache_range_cutoff)
276 __sb1_flush_icache_all(); 276 __sb1_flush_icache_all();
277 else 277 else
278 __sb1_flush_icache_range(start, end); 278 __sb1_flush_icache_range(start, end);
279 } 279 }
280 280
281 #ifdef CONFIG_SMP 281 #ifdef CONFIG_SMP
282 struct flush_icache_range_args { 282 struct flush_icache_range_args {
283 unsigned long start; 283 unsigned long start;
284 unsigned long end; 284 unsigned long end;
285 }; 285 };
286 286
287 static void sb1_flush_icache_range_ipi(void *info) 287 static void sb1_flush_icache_range_ipi(void *info)
288 { 288 {
289 struct flush_icache_range_args *args = info; 289 struct flush_icache_range_args *args = info;
290 290
291 local_sb1_flush_icache_range(args->start, args->end); 291 local_sb1_flush_icache_range(args->start, args->end);
292 } 292 }
293 293
294 void sb1_flush_icache_range(unsigned long start, unsigned long end) 294 void sb1_flush_icache_range(unsigned long start, unsigned long end)
295 { 295 {
296 struct flush_icache_range_args args; 296 struct flush_icache_range_args args;
297 297
298 args.start = start; 298 args.start = start;
299 args.end = end; 299 args.end = end;
300 on_each_cpu(sb1_flush_icache_range_ipi, &args, 1, 1); 300 on_each_cpu(sb1_flush_icache_range_ipi, &args, 1, 1);
301 } 301 }
302 #else 302 #else
303 void sb1_flush_icache_range(unsigned long start, unsigned long end) 303 void sb1_flush_icache_range(unsigned long start, unsigned long end)
304 __attribute__((alias("local_sb1_flush_icache_range"))); 304 __attribute__((alias("local_sb1_flush_icache_range")));
305 #endif 305 #endif
306 306
307 /* 307 /*
308 * Flush the icache for a given physical page. Need to writeback the 308 * Flush the icache for a given physical page. Need to writeback the
309 * dcache first, then invalidate the icache. If the page isn't 309 * dcache first, then invalidate the icache. If the page isn't
310 * executable, nothing is required. 310 * executable, nothing is required.
311 */ 311 */
312 static void local_sb1_flush_icache_page(struct vm_area_struct *vma, 312 static void local_sb1_flush_icache_page(struct vm_area_struct *vma,
313 struct page *page) 313 struct page *page)
314 { 314 {
315 unsigned long start; 315 unsigned long start;
316 int cpu = smp_processor_id(); 316 int cpu = smp_processor_id();
317 317
318 #ifndef CONFIG_SMP 318 #ifndef CONFIG_SMP
319 if (!(vma->vm_flags & VM_EXEC)) 319 if (!(vma->vm_flags & VM_EXEC))
320 return; 320 return;
321 #endif 321 #endif
322 322
323 /* Need to writeback any dirty data for that page, we have the PA */ 323 /* Need to writeback any dirty data for that page, we have the PA */
324 start = (unsigned long)(page-mem_map) << PAGE_SHIFT; 324 start = (unsigned long)(page-mem_map) << PAGE_SHIFT;
325 __sb1_writeback_inv_dcache_phys_range(start, start + PAGE_SIZE); 325 __sb1_writeback_inv_dcache_phys_range(start, start + PAGE_SIZE);
326 /* 326 /*
327 * If there's a context, bump the ASID (cheaper than a flush, 327 * If there's a context, bump the ASID (cheaper than a flush,
328 * since we don't know VAs!) 328 * since we don't know VAs!)
329 */ 329 */
330 if (cpu_context(cpu, vma->vm_mm) != 0) { 330 if (cpu_context(cpu, vma->vm_mm) != 0) {
331 drop_mmu_context(vma->vm_mm, cpu); 331 drop_mmu_context(vma->vm_mm, cpu);
332 } 332 }
333 } 333 }
334 334
335 #ifdef CONFIG_SMP 335 #ifdef CONFIG_SMP
336 struct flush_icache_page_args { 336 struct flush_icache_page_args {
337 struct vm_area_struct *vma; 337 struct vm_area_struct *vma;
338 struct page *page; 338 struct page *page;
339 }; 339 };
340 340
341 static void sb1_flush_icache_page_ipi(void *info) 341 static void sb1_flush_icache_page_ipi(void *info)
342 { 342 {
343 struct flush_icache_page_args *args = info; 343 struct flush_icache_page_args *args = info;
344 local_sb1_flush_icache_page(args->vma, args->page); 344 local_sb1_flush_icache_page(args->vma, args->page);
345 } 345 }
346 346
347 /* Dirty dcache could be on another CPU, so do the IPIs */ 347 /* Dirty dcache could be on another CPU, so do the IPIs */
348 static void sb1_flush_icache_page(struct vm_area_struct *vma, 348 static void sb1_flush_icache_page(struct vm_area_struct *vma,
349 struct page *page) 349 struct page *page)
350 { 350 {
351 struct flush_icache_page_args args; 351 struct flush_icache_page_args args;
352 352
353 if (!(vma->vm_flags & VM_EXEC)) 353 if (!(vma->vm_flags & VM_EXEC))
354 return; 354 return;
355 args.vma = vma; 355 args.vma = vma;
356 args.page = page; 356 args.page = page;
357 on_each_cpu(sb1_flush_icache_page_ipi, (void *) &args, 1, 1); 357 on_each_cpu(sb1_flush_icache_page_ipi, (void *) &args, 1, 1);
358 } 358 }
359 #else 359 #else
360 void sb1_flush_icache_page(struct vm_area_struct *vma, struct page *page) 360 void sb1_flush_icache_page(struct vm_area_struct *vma, struct page *page)
361 __attribute__((alias("local_sb1_flush_icache_page"))); 361 __attribute__((alias("local_sb1_flush_icache_page")));
362 #endif 362 #endif
363 363
364 /* 364 /*
365 * A signal trampoline must fit into a single cacheline. 365 * A signal trampoline must fit into a single cacheline.
366 */ 366 */
367 static void local_sb1_flush_cache_sigtramp(unsigned long addr) 367 static void local_sb1_flush_cache_sigtramp(unsigned long addr)
368 { 368 {
369 cache_set_op(Index_Writeback_Inv_D, addr & dcache_index_mask); 369 cache_set_op(Index_Writeback_Inv_D, addr & dcache_index_mask);
370 cache_set_op(Index_Writeback_Inv_D, (addr ^ (1<<12)) & dcache_index_mask); 370 cache_set_op(Index_Writeback_Inv_D, (addr ^ (1<<12)) & dcache_index_mask);
371 cache_set_op(Index_Invalidate_I, addr & icache_index_mask); 371 cache_set_op(Index_Invalidate_I, addr & icache_index_mask);
372 mispredict(); 372 mispredict();
373 } 373 }
374 374
375 #ifdef CONFIG_SMP 375 #ifdef CONFIG_SMP
376 static void sb1_flush_cache_sigtramp_ipi(void *info) 376 static void sb1_flush_cache_sigtramp_ipi(void *info)
377 { 377 {
378 unsigned long iaddr = (unsigned long) info; 378 unsigned long iaddr = (unsigned long) info;
379 local_sb1_flush_cache_sigtramp(iaddr); 379 local_sb1_flush_cache_sigtramp(iaddr);
380 } 380 }
381 381
382 static void sb1_flush_cache_sigtramp(unsigned long addr) 382 static void sb1_flush_cache_sigtramp(unsigned long addr)
383 { 383 {
384 on_each_cpu(sb1_flush_cache_sigtramp_ipi, (void *) addr, 1, 1); 384 on_each_cpu(sb1_flush_cache_sigtramp_ipi, (void *) addr, 1, 1);
385 } 385 }
386 #else 386 #else
387 void sb1_flush_cache_sigtramp(unsigned long addr) 387 void sb1_flush_cache_sigtramp(unsigned long addr)
388 __attribute__((alias("local_sb1_flush_cache_sigtramp"))); 388 __attribute__((alias("local_sb1_flush_cache_sigtramp")));
389 #endif 389 #endif
390 390
391 391
392 /* 392 /*
393 * Anything that just flushes dcache state can be ignored, as we're always 393 * Anything that just flushes dcache state can be ignored, as we're always
394 * coherent in dcache space. This is just a dummy function that all the 394 * coherent in dcache space. This is just a dummy function that all the
395 * nop'ed routines point to 395 * nop'ed routines point to
396 */ 396 */
397 static void sb1_nop(void) 397 static void sb1_nop(void)
398 { 398 {
399 } 399 }
400 400
401 /* 401 /*
402 * Cache set values (from the mips64 spec) 402 * Cache set values (from the mips64 spec)
403 * 0 - 64 403 * 0 - 64
404 * 1 - 128 404 * 1 - 128
405 * 2 - 256 405 * 2 - 256
406 * 3 - 512 406 * 3 - 512
407 * 4 - 1024 407 * 4 - 1024
408 * 5 - 2048 408 * 5 - 2048
409 * 6 - 4096 409 * 6 - 4096
410 * 7 - Reserved 410 * 7 - Reserved
411 */ 411 */
412 412
413 static unsigned int decode_cache_sets(unsigned int config_field) 413 static unsigned int decode_cache_sets(unsigned int config_field)
414 { 414 {
415 if (config_field == 7) { 415 if (config_field == 7) {
416 /* JDCXXX - Find a graceful way to abort. */ 416 /* JDCXXX - Find a graceful way to abort. */
417 return 0; 417 return 0;
418 } 418 }
419 return (1<<(config_field + 6)); 419 return (1<<(config_field + 6));
420 } 420 }
421 421
422 /* 422 /*
423 * Cache line size values (from the mips64 spec) 423 * Cache line size values (from the mips64 spec)
424 * 0 - No cache present. 424 * 0 - No cache present.
425 * 1 - 4 bytes 425 * 1 - 4 bytes
426 * 2 - 8 bytes 426 * 2 - 8 bytes
427 * 3 - 16 bytes 427 * 3 - 16 bytes
428 * 4 - 32 bytes 428 * 4 - 32 bytes
429 * 5 - 64 bytes 429 * 5 - 64 bytes
430 * 6 - 128 bytes 430 * 6 - 128 bytes
431 * 7 - Reserved 431 * 7 - Reserved
432 */ 432 */
433 433
434 static unsigned int decode_cache_line_size(unsigned int config_field) 434 static unsigned int decode_cache_line_size(unsigned int config_field)
435 { 435 {
436 if (config_field == 0) { 436 if (config_field == 0) {
437 return 0; 437 return 0;
438 } else if (config_field == 7) { 438 } else if (config_field == 7) {
439 /* JDCXXX - Find a graceful way to abort. */ 439 /* JDCXXX - Find a graceful way to abort. */
440 return 0; 440 return 0;
441 } 441 }
442 return (1<<(config_field + 1)); 442 return (1<<(config_field + 1));
443 } 443 }
444 444
445 /* 445 /*
446 * Relevant bits of the config1 register format (from the MIPS32/MIPS64 specs) 446 * Relevant bits of the config1 register format (from the MIPS32/MIPS64 specs)
447 * 447 *
448 * 24:22 Icache sets per way 448 * 24:22 Icache sets per way
449 * 21:19 Icache line size 449 * 21:19 Icache line size
450 * 18:16 Icache Associativity 450 * 18:16 Icache Associativity
451 * 15:13 Dcache sets per way 451 * 15:13 Dcache sets per way
452 * 12:10 Dcache line size 452 * 12:10 Dcache line size
453 * 9:7 Dcache Associativity 453 * 9:7 Dcache Associativity
454 */ 454 */
455 455
456 static char *way_string[] = { 456 static char *way_string[] = {
457 "direct mapped", "2-way", "3-way", "4-way", 457 "direct mapped", "2-way", "3-way", "4-way",
458 "5-way", "6-way", "7-way", "8-way", 458 "5-way", "6-way", "7-way", "8-way",
459 }; 459 };
460 460
461 static __init void probe_cache_sizes(void) 461 static __init void probe_cache_sizes(void)
462 { 462 {
463 u32 config1; 463 u32 config1;
464 464
465 config1 = read_c0_config1(); 465 config1 = read_c0_config1();
466 icache_line_size = decode_cache_line_size((config1 >> 19) & 0x7); 466 icache_line_size = decode_cache_line_size((config1 >> 19) & 0x7);
467 dcache_line_size = decode_cache_line_size((config1 >> 10) & 0x7); 467 dcache_line_size = decode_cache_line_size((config1 >> 10) & 0x7);
468 icache_sets = decode_cache_sets((config1 >> 22) & 0x7); 468 icache_sets = decode_cache_sets((config1 >> 22) & 0x7);
469 dcache_sets = decode_cache_sets((config1 >> 13) & 0x7); 469 dcache_sets = decode_cache_sets((config1 >> 13) & 0x7);
470 icache_assoc = ((config1 >> 16) & 0x7) + 1; 470 icache_assoc = ((config1 >> 16) & 0x7) + 1;
471 dcache_assoc = ((config1 >> 7) & 0x7) + 1; 471 dcache_assoc = ((config1 >> 7) & 0x7) + 1;
472 icache_size = icache_line_size * icache_sets * icache_assoc; 472 icache_size = icache_line_size * icache_sets * icache_assoc;
473 dcache_size = dcache_line_size * dcache_sets * dcache_assoc; 473 dcache_size = dcache_line_size * dcache_sets * dcache_assoc;
474 /* Need to remove non-index bits for index ops */ 474 /* Need to remove non-index bits for index ops */
475 icache_index_mask = (icache_sets - 1) * icache_line_size; 475 icache_index_mask = (icache_sets - 1) * icache_line_size;
476 dcache_index_mask = (dcache_sets - 1) * dcache_line_size; 476 dcache_index_mask = (dcache_sets - 1) * dcache_line_size;
477 /* 477 /*
478 * These are for choosing range (index ops) versus all. 478 * These are for choosing range (index ops) versus all.
479 * icache flushes all ways for each set, so drop icache_assoc. 479 * icache flushes all ways for each set, so drop icache_assoc.
480 * dcache flushes all ways and each setting of bit 12 for each 480 * dcache flushes all ways and each setting of bit 12 for each
481 * index, so drop dcache_assoc and halve the dcache_sets. 481 * index, so drop dcache_assoc and halve the dcache_sets.
482 */ 482 */
483 icache_range_cutoff = icache_sets * icache_line_size; 483 icache_range_cutoff = icache_sets * icache_line_size;
484 dcache_range_cutoff = (dcache_sets / 2) * icache_line_size; 484 dcache_range_cutoff = (dcache_sets / 2) * icache_line_size;
485 485
486 printk("Primary instruction cache %ldkB, %s, linesize %d bytes.\n", 486 printk("Primary instruction cache %ldkB, %s, linesize %d bytes.\n",
487 icache_size >> 10, way_string[icache_assoc - 1], 487 icache_size >> 10, way_string[icache_assoc - 1],
488 icache_line_size); 488 icache_line_size);
489 printk("Primary data cache %ldkB, %s, linesize %d bytes.\n", 489 printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
490 dcache_size >> 10, way_string[dcache_assoc - 1], 490 dcache_size >> 10, way_string[dcache_assoc - 1],
491 dcache_line_size); 491 dcache_line_size);
492 } 492 }
493 493
494 /* 494 /*
495 * This is called from loadmmu.c. We have to set up all the 495 * This is called from loadmmu.c. We have to set up all the
496 * memory management function pointers, as well as initialize 496 * memory management function pointers, as well as initialize
497 * the caches and tlbs 497 * the caches and tlbs
498 */ 498 */
499 void ld_mmu_sb1(void) 499 void ld_mmu_sb1(void)
500 { 500 {
501 extern char except_vec2_sb1; 501 extern char except_vec2_sb1;
502 extern char handle_vec2_sb1; 502 extern char handle_vec2_sb1;
503 503
504 /* Special cache error handler for SB1 */ 504 /* Special cache error handler for SB1 */
505 memcpy((void *)(CAC_BASE + 0x100), &except_vec2_sb1, 0x80); 505 memcpy((void *)(CAC_BASE + 0x100), &except_vec2_sb1, 0x80);
506 memcpy((void *)(UNCAC_BASE + 0x100), &except_vec2_sb1, 0x80); 506 memcpy((void *)(UNCAC_BASE + 0x100), &except_vec2_sb1, 0x80);
507 memcpy((void *)CKSEG1ADDR(&handle_vec2_sb1), &handle_vec2_sb1, 0x80); 507 memcpy((void *)CKSEG1ADDR(&handle_vec2_sb1), &handle_vec2_sb1, 0x80);
508 508
509 probe_cache_sizes(); 509 probe_cache_sizes();
510 510
511 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS 511 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
512 sb1_dma_init(); 512 sb1_dma_init();
513 #endif 513 #endif
514 514
515 /* 515 /*
516 * None of these are needed for the SB1 - the Dcache is 516 * None of these are needed for the SB1 - the Dcache is
517 * physically indexed and tagged, so no virtual aliasing can 517 * physically indexed and tagged, so no virtual aliasing can
518 * occur 518 * occur
519 */ 519 */
520 flush_cache_range = (void *) sb1_nop; 520 flush_cache_range = (void *) sb1_nop;
521 flush_cache_mm = (void (*)(struct mm_struct *))sb1_nop; 521 flush_cache_mm = (void (*)(struct mm_struct *))sb1_nop;
522 flush_cache_all = sb1_nop; 522 flush_cache_all = sb1_nop;
523 523
524 /* These routines are for Icache coherence with the Dcache */ 524 /* These routines are for Icache coherence with the Dcache */
525 flush_icache_range = sb1_flush_icache_range; 525 flush_icache_range = sb1_flush_icache_range;
526 flush_icache_page = sb1_flush_icache_page; 526 flush_icache_page = sb1_flush_icache_page;
527 flush_icache_all = __sb1_flush_icache_all; /* local only */ 527 flush_icache_all = __sb1_flush_icache_all; /* local only */
528 528
529 /* This implies an Icache flush too, so can't be nop'ed */ 529 /* This implies an Icache flush too, so can't be nop'ed */
530 flush_cache_page = sb1_flush_cache_page; 530 flush_cache_page = sb1_flush_cache_page;
531 531
532 flush_cache_sigtramp = sb1_flush_cache_sigtramp; 532 flush_cache_sigtramp = sb1_flush_cache_sigtramp;
533 flush_data_cache_page = (void *) sb1_nop; 533 flush_data_cache_page = (void *) sb1_nop;
534 534
535 /* Full flush */ 535 /* Full flush */
536 __flush_cache_all = sb1___flush_cache_all; 536 __flush_cache_all = sb1___flush_cache_all;
537 537
538 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT); 538 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
539 539
540 /* 540 /*
541 * This is the only way to force the update of K0 to complete 541 * This is the only way to force the update of K0 to complete
542 * before subsequent instruction fetch. 542 * before subsequent instruction fetch.
543 */ 543 */
544 __asm__ __volatile__( 544 __asm__ __volatile__(
545 ".set push \n" 545 ".set push \n"
546 " .set noat \n" 546 " .set noat \n"
547 " .set noreorder \n" 547 " .set noreorder \n"
548 " .set mips3 \n" 548 " .set mips3 \n"
549 " " STR(PTR_LA) " $1, 1f \n" 549 " " STR(PTR_LA) " $1, 1f \n"
550 " " STR(MTC0) " $1, $14 \n" 550 " " STR(MTC0) " $1, $14 \n"
551 " eret \n" 551 " eret \n"
552 "1: .set pop" 552 "1: .set pop"
553 : 553 :
554 : 554 :
555 : "memory"); 555 : "memory");
556 556
557 flush_cache_all(); 557 flush_cache_all();
558 } 558 }
559 559
include/asm-mips/paccess.h
1 /* 1 /*
2 * This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1996, 1997, 1998, 1999, 2000 by Ralf Baechle 6 * Copyright (C) 1996, 1997, 1998, 1999, 2000 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * 8 *
9 * Protected memory access. Used for everything that might take revenge 9 * Protected memory access. Used for everything that might take revenge
10 * by sending a DBE error like accessing possibly non-existant memory or 10 * by sending a DBE error like accessing possibly non-existant memory or
11 * devices. 11 * devices.
12 */ 12 */
13 #ifndef _ASM_PACCESS_H 13 #ifndef _ASM_PACCESS_H
14 #define _ASM_PACCESS_H 14 #define _ASM_PACCESS_H
15 15
16 #include <linux/config.h> 16 #include <linux/config.h>
17 #include <linux/errno.h> 17 #include <linux/errno.h>
18 18
19 #ifdef CONFIG_32BIT 19 #ifdef CONFIG_32BIT
20 #define __PA_ADDR ".word" 20 #define __PA_ADDR ".word"
21 #endif 21 #endif
22 #ifdef CONFIG_64BIT 22 #ifdef CONFIG_64BIT
23 #define __PA_ADDR ".dword" 23 #define __PA_ADDR ".dword"
24 #endif 24 #endif
25 25
26 extern asmlinkage void handle_ibe(void); 26 extern asmlinkage void handle_ibe(void);
27 extern asmlinkage void handle_dbe(void); 27 extern asmlinkage void handle_dbe(void);
28 28
29 #define put_dbe(x,ptr) __put_dbe((x),(ptr),sizeof(*(ptr))) 29 #define put_dbe(x,ptr) __put_dbe((x),(ptr),sizeof(*(ptr)))
30 #define get_dbe(x,ptr) __get_dbe((x),(ptr),sizeof(*(ptr))) 30 #define get_dbe(x,ptr) __get_dbe((x),(ptr),sizeof(*(ptr)))
31 31
32 struct __large_pstruct { unsigned long buf[100]; }; 32 struct __large_pstruct { unsigned long buf[100]; };
33 #define __mp(x) (*(struct __large_pstruct *)(x)) 33 #define __mp(x) (*(struct __large_pstruct *)(x))
34 34
35 #define __get_dbe(x,ptr,size) \ 35 #define __get_dbe(x,ptr,size) \
36 ({ \ 36 ({ \
37 long __gu_err; \ 37 long __gu_err; \
38 __typeof(*(ptr)) __gu_val; \ 38 __typeof(*(ptr)) __gu_val; \
39 unsigned long __gu_addr; \ 39 unsigned long __gu_addr; \
40 __asm__("":"=r" (__gu_val)); \ 40 __asm__("":"=r" (__gu_val)); \
41 __gu_addr = (unsigned long) (ptr); \ 41 __gu_addr = (unsigned long) (ptr); \
42 __asm__("":"=r" (__gu_err)); \ 42 __asm__("":"=r" (__gu_err)); \
43 switch (size) { \ 43 switch (size) { \
44 case 1: __get_dbe_asm("lb"); break; \ 44 case 1: __get_dbe_asm("lb"); break; \
45 case 2: __get_dbe_asm("lh"); break; \ 45 case 2: __get_dbe_asm("lh"); break; \
46 case 4: __get_dbe_asm("lw"); break; \ 46 case 4: __get_dbe_asm("lw"); break; \
47 case 8: __get_dbe_asm("ld"); break; \ 47 case 8: __get_dbe_asm("ld"); break; \
48 default: __get_dbe_unknown(); break; \ 48 default: __get_dbe_unknown(); break; \
49 } \ 49 } \
50 x = (__typeof__(*(ptr))) __gu_val; \ 50 x = (__typeof__(*(ptr))) __gu_val; \
51 __gu_err; \ 51 __gu_err; \
52 }) 52 })
53 53
54 #define __get_dbe_asm(insn) \ 54 #define __get_dbe_asm(insn) \
55 ({ \ 55 { \
56 __asm__ __volatile__( \ 56 __asm__ __volatile__( \
57 "1:\t" insn "\t%1,%2\n\t" \ 57 "1:\t" insn "\t%1,%2\n\t" \
58 "move\t%0,$0\n" \ 58 "move\t%0,$0\n" \
59 "2:\n\t" \ 59 "2:\n\t" \
60 ".section\t.fixup,\"ax\"\n" \ 60 ".section\t.fixup,\"ax\"\n" \
61 "3:\tli\t%0,%3\n\t" \ 61 "3:\tli\t%0,%3\n\t" \
62 "move\t%1,$0\n\t" \ 62 "move\t%1,$0\n\t" \
63 "j\t2b\n\t" \ 63 "j\t2b\n\t" \
64 ".previous\n\t" \ 64 ".previous\n\t" \
65 ".section\t__dbe_table,\"a\"\n\t" \ 65 ".section\t__dbe_table,\"a\"\n\t" \
66 __PA_ADDR "\t1b, 3b\n\t" \ 66 __PA_ADDR "\t1b, 3b\n\t" \
67 ".previous" \ 67 ".previous" \
68 :"=r" (__gu_err), "=r" (__gu_val) \ 68 :"=r" (__gu_err), "=r" (__gu_val) \
69 :"o" (__mp(__gu_addr)), "i" (-EFAULT)); \ 69 :"o" (__mp(__gu_addr)), "i" (-EFAULT)); \
70 }) 70 }
71 71
72 extern void __get_dbe_unknown(void); 72 extern void __get_dbe_unknown(void);
73 73
74 #define __put_dbe(x,ptr,size) \ 74 #define __put_dbe(x,ptr,size) \
75 ({ \ 75 ({ \
76 long __pu_err; \ 76 long __pu_err; \
77 __typeof__(*(ptr)) __pu_val; \ 77 __typeof__(*(ptr)) __pu_val; \
78 long __pu_addr; \ 78 long __pu_addr; \
79 __pu_val = (x); \ 79 __pu_val = (x); \
80 __pu_addr = (long) (ptr); \ 80 __pu_addr = (long) (ptr); \
81 __asm__("":"=r" (__pu_err)); \ 81 __asm__("":"=r" (__pu_err)); \
82 switch (size) { \ 82 switch (size) { \
83 case 1: __put_dbe_asm("sb"); break; \ 83 case 1: __put_dbe_asm("sb"); break; \
84 case 2: __put_dbe_asm("sh"); break; \ 84 case 2: __put_dbe_asm("sh"); break; \
85 case 4: __put_dbe_asm("sw"); break; \ 85 case 4: __put_dbe_asm("sw"); break; \
86 case 8: __put_dbe_asm("sd"); break; \ 86 case 8: __put_dbe_asm("sd"); break; \
87 default: __put_dbe_unknown(); break; \ 87 default: __put_dbe_unknown(); break; \
88 } \ 88 } \
89 __pu_err; \ 89 __pu_err; \
90 }) 90 })
91 91
92 #define __put_dbe_asm(insn) \ 92 #define __put_dbe_asm(insn) \
93 ({ \ 93 { \
94 __asm__ __volatile__( \ 94 __asm__ __volatile__( \
95 "1:\t" insn "\t%1,%2\n\t" \ 95 "1:\t" insn "\t%1,%2\n\t" \
96 "move\t%0,$0\n" \ 96 "move\t%0,$0\n" \
97 "2:\n\t" \ 97 "2:\n\t" \
98 ".section\t.fixup,\"ax\"\n" \ 98 ".section\t.fixup,\"ax\"\n" \
99 "3:\tli\t%0,%3\n\t" \ 99 "3:\tli\t%0,%3\n\t" \
100 "j\t2b\n\t" \ 100 "j\t2b\n\t" \
101 ".previous\n\t" \ 101 ".previous\n\t" \
102 ".section\t__dbe_table,\"a\"\n\t" \ 102 ".section\t__dbe_table,\"a\"\n\t" \
103 __PA_ADDR "\t1b, 3b\n\t" \ 103 __PA_ADDR "\t1b, 3b\n\t" \
104 ".previous" \ 104 ".previous" \
105 : "=r" (__pu_err) \ 105 : "=r" (__pu_err) \
106 : "r" (__pu_val), "o" (__mp(__pu_addr)), "i" (-EFAULT)); \ 106 : "r" (__pu_val), "o" (__mp(__pu_addr)), "i" (-EFAULT)); \
107 }) 107 }
108 108
109 extern void __put_dbe_unknown(void); 109 extern void __put_dbe_unknown(void);
110 110
111 extern unsigned long search_dbe_table(unsigned long addr); 111 extern unsigned long search_dbe_table(unsigned long addr);
112 112
113 #endif /* _ASM_PACCESS_H */ 113 #endif /* _ASM_PACCESS_H */
114 114