Commit 59586e5a262a29361c45c929ea3253d4aec830b0

Authored by Eric W. Biederman
Committed by Linus Torvalds
1 parent 16dcb4bbda

[PATCH] Don't export machine_restart, machine_halt, or machine_power_off.

machine_restart, machine_halt and machine_power_off are machine
specific hooks deep into the reboot logic, that modules
have no business messing with.  Usually code should be calling
kernel_restart, kernel_halt, kernel_power_off, or
emergency_restart. So don't export machine_restart,
machine_halt, and machine_power_off so we can catch buggy users.

Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 29 changed files with 0 additions and 152 deletions Inline Diff

arch/alpha/kernel/process.c
1 /* 1 /*
2 * linux/arch/alpha/kernel/process.c 2 * linux/arch/alpha/kernel/process.c
3 * 3 *
4 * Copyright (C) 1995 Linus Torvalds 4 * Copyright (C) 1995 Linus Torvalds
5 */ 5 */
6 6
7 /* 7 /*
8 * This file handles the architecture-dependent parts of process handling. 8 * This file handles the architecture-dependent parts of process handling.
9 */ 9 */
10 10
11 #include <linux/config.h> 11 #include <linux/config.h>
12 #include <linux/errno.h> 12 #include <linux/errno.h>
13 #include <linux/module.h> 13 #include <linux/module.h>
14 #include <linux/sched.h> 14 #include <linux/sched.h>
15 #include <linux/kernel.h> 15 #include <linux/kernel.h>
16 #include <linux/mm.h> 16 #include <linux/mm.h>
17 #include <linux/smp.h> 17 #include <linux/smp.h>
18 #include <linux/smp_lock.h> 18 #include <linux/smp_lock.h>
19 #include <linux/stddef.h> 19 #include <linux/stddef.h>
20 #include <linux/unistd.h> 20 #include <linux/unistd.h>
21 #include <linux/ptrace.h> 21 #include <linux/ptrace.h>
22 #include <linux/slab.h> 22 #include <linux/slab.h>
23 #include <linux/user.h> 23 #include <linux/user.h>
24 #include <linux/a.out.h> 24 #include <linux/a.out.h>
25 #include <linux/utsname.h> 25 #include <linux/utsname.h>
26 #include <linux/time.h> 26 #include <linux/time.h>
27 #include <linux/major.h> 27 #include <linux/major.h>
28 #include <linux/stat.h> 28 #include <linux/stat.h>
29 #include <linux/mman.h> 29 #include <linux/mman.h>
30 #include <linux/elfcore.h> 30 #include <linux/elfcore.h>
31 #include <linux/reboot.h> 31 #include <linux/reboot.h>
32 #include <linux/tty.h> 32 #include <linux/tty.h>
33 #include <linux/console.h> 33 #include <linux/console.h>
34 34
35 #include <asm/reg.h> 35 #include <asm/reg.h>
36 #include <asm/uaccess.h> 36 #include <asm/uaccess.h>
37 #include <asm/system.h> 37 #include <asm/system.h>
38 #include <asm/io.h> 38 #include <asm/io.h>
39 #include <asm/pgtable.h> 39 #include <asm/pgtable.h>
40 #include <asm/hwrpb.h> 40 #include <asm/hwrpb.h>
41 #include <asm/fpu.h> 41 #include <asm/fpu.h>
42 42
43 #include "proto.h" 43 #include "proto.h"
44 #include "pci_impl.h" 44 #include "pci_impl.h"
45 45
46 void default_idle(void) 46 void default_idle(void)
47 { 47 {
48 barrier(); 48 barrier();
49 } 49 }
50 50
51 void 51 void
52 cpu_idle(void) 52 cpu_idle(void)
53 { 53 {
54 while (1) { 54 while (1) {
55 void (*idle)(void) = default_idle; 55 void (*idle)(void) = default_idle;
56 /* FIXME -- EV6 and LCA45 know how to power down 56 /* FIXME -- EV6 and LCA45 know how to power down
57 the CPU. */ 57 the CPU. */
58 58
59 while (!need_resched()) 59 while (!need_resched())
60 idle(); 60 idle();
61 schedule(); 61 schedule();
62 } 62 }
63 } 63 }
64 64
65 65
66 struct halt_info { 66 struct halt_info {
67 int mode; 67 int mode;
68 char *restart_cmd; 68 char *restart_cmd;
69 }; 69 };
70 70
71 static void 71 static void
72 common_shutdown_1(void *generic_ptr) 72 common_shutdown_1(void *generic_ptr)
73 { 73 {
74 struct halt_info *how = (struct halt_info *)generic_ptr; 74 struct halt_info *how = (struct halt_info *)generic_ptr;
75 struct percpu_struct *cpup; 75 struct percpu_struct *cpup;
76 unsigned long *pflags, flags; 76 unsigned long *pflags, flags;
77 int cpuid = smp_processor_id(); 77 int cpuid = smp_processor_id();
78 78
79 /* No point in taking interrupts anymore. */ 79 /* No point in taking interrupts anymore. */
80 local_irq_disable(); 80 local_irq_disable();
81 81
82 cpup = (struct percpu_struct *) 82 cpup = (struct percpu_struct *)
83 ((unsigned long)hwrpb + hwrpb->processor_offset 83 ((unsigned long)hwrpb + hwrpb->processor_offset
84 + hwrpb->processor_size * cpuid); 84 + hwrpb->processor_size * cpuid);
85 pflags = &cpup->flags; 85 pflags = &cpup->flags;
86 flags = *pflags; 86 flags = *pflags;
87 87
88 /* Clear reason to "default"; clear "bootstrap in progress". */ 88 /* Clear reason to "default"; clear "bootstrap in progress". */
89 flags &= ~0x00ff0001UL; 89 flags &= ~0x00ff0001UL;
90 90
91 #ifdef CONFIG_SMP 91 #ifdef CONFIG_SMP
92 /* Secondaries halt here. */ 92 /* Secondaries halt here. */
93 if (cpuid != boot_cpuid) { 93 if (cpuid != boot_cpuid) {
94 flags |= 0x00040000UL; /* "remain halted" */ 94 flags |= 0x00040000UL; /* "remain halted" */
95 *pflags = flags; 95 *pflags = flags;
96 clear_bit(cpuid, &cpu_present_mask); 96 clear_bit(cpuid, &cpu_present_mask);
97 halt(); 97 halt();
98 } 98 }
99 #endif 99 #endif
100 100
101 if (how->mode == LINUX_REBOOT_CMD_RESTART) { 101 if (how->mode == LINUX_REBOOT_CMD_RESTART) {
102 if (!how->restart_cmd) { 102 if (!how->restart_cmd) {
103 flags |= 0x00020000UL; /* "cold bootstrap" */ 103 flags |= 0x00020000UL; /* "cold bootstrap" */
104 } else { 104 } else {
105 /* For SRM, we could probably set environment 105 /* For SRM, we could probably set environment
106 variables to get this to work. We'd have to 106 variables to get this to work. We'd have to
107 delay this until after srm_paging_stop unless 107 delay this until after srm_paging_stop unless
108 we ever got srm_fixup working. 108 we ever got srm_fixup working.
109 109
110 At the moment, SRM will use the last boot device, 110 At the moment, SRM will use the last boot device,
111 but the file and flags will be the defaults, when 111 but the file and flags will be the defaults, when
112 doing a "warm" bootstrap. */ 112 doing a "warm" bootstrap. */
113 flags |= 0x00030000UL; /* "warm bootstrap" */ 113 flags |= 0x00030000UL; /* "warm bootstrap" */
114 } 114 }
115 } else { 115 } else {
116 flags |= 0x00040000UL; /* "remain halted" */ 116 flags |= 0x00040000UL; /* "remain halted" */
117 } 117 }
118 *pflags = flags; 118 *pflags = flags;
119 119
120 #ifdef CONFIG_SMP 120 #ifdef CONFIG_SMP
121 /* Wait for the secondaries to halt. */ 121 /* Wait for the secondaries to halt. */
122 cpu_clear(boot_cpuid, cpu_possible_map); 122 cpu_clear(boot_cpuid, cpu_possible_map);
123 while (cpus_weight(cpu_possible_map)) 123 while (cpus_weight(cpu_possible_map))
124 barrier(); 124 barrier();
125 #endif 125 #endif
126 126
127 /* If booted from SRM, reset some of the original environment. */ 127 /* If booted from SRM, reset some of the original environment. */
128 if (alpha_using_srm) { 128 if (alpha_using_srm) {
129 #ifdef CONFIG_DUMMY_CONSOLE 129 #ifdef CONFIG_DUMMY_CONSOLE
130 /* This has the effect of resetting the VGA video origin. */ 130 /* This has the effect of resetting the VGA video origin. */
131 take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1); 131 take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1);
132 #endif 132 #endif
133 pci_restore_srm_config(); 133 pci_restore_srm_config();
134 set_hae(srm_hae); 134 set_hae(srm_hae);
135 } 135 }
136 136
137 if (alpha_mv.kill_arch) 137 if (alpha_mv.kill_arch)
138 alpha_mv.kill_arch(how->mode); 138 alpha_mv.kill_arch(how->mode);
139 139
140 if (! alpha_using_srm && how->mode != LINUX_REBOOT_CMD_RESTART) { 140 if (! alpha_using_srm && how->mode != LINUX_REBOOT_CMD_RESTART) {
141 /* Unfortunately, since MILO doesn't currently understand 141 /* Unfortunately, since MILO doesn't currently understand
142 the hwrpb bits above, we can't reliably halt the 142 the hwrpb bits above, we can't reliably halt the
143 processor and keep it halted. So just loop. */ 143 processor and keep it halted. So just loop. */
144 return; 144 return;
145 } 145 }
146 146
147 if (alpha_using_srm) 147 if (alpha_using_srm)
148 srm_paging_stop(); 148 srm_paging_stop();
149 149
150 halt(); 150 halt();
151 } 151 }
152 152
153 static void 153 static void
154 common_shutdown(int mode, char *restart_cmd) 154 common_shutdown(int mode, char *restart_cmd)
155 { 155 {
156 struct halt_info args; 156 struct halt_info args;
157 args.mode = mode; 157 args.mode = mode;
158 args.restart_cmd = restart_cmd; 158 args.restart_cmd = restart_cmd;
159 on_each_cpu(common_shutdown_1, &args, 1, 0); 159 on_each_cpu(common_shutdown_1, &args, 1, 0);
160 } 160 }
161 161
162 void 162 void
163 machine_restart(char *restart_cmd) 163 machine_restart(char *restart_cmd)
164 { 164 {
165 common_shutdown(LINUX_REBOOT_CMD_RESTART, restart_cmd); 165 common_shutdown(LINUX_REBOOT_CMD_RESTART, restart_cmd);
166 } 166 }
167 167
168 EXPORT_SYMBOL(machine_restart);
169 168
170 void 169 void
171 machine_halt(void) 170 machine_halt(void)
172 { 171 {
173 common_shutdown(LINUX_REBOOT_CMD_HALT, NULL); 172 common_shutdown(LINUX_REBOOT_CMD_HALT, NULL);
174 } 173 }
175 174
176 EXPORT_SYMBOL(machine_halt);
177 175
178 void 176 void
179 machine_power_off(void) 177 machine_power_off(void)
180 { 178 {
181 common_shutdown(LINUX_REBOOT_CMD_POWER_OFF, NULL); 179 common_shutdown(LINUX_REBOOT_CMD_POWER_OFF, NULL);
182 } 180 }
183 181
184 EXPORT_SYMBOL(machine_power_off);
185 182
186 /* Used by sysrq-p, among others. I don't believe r9-r15 are ever 183 /* Used by sysrq-p, among others. I don't believe r9-r15 are ever
187 saved in the context it's used. */ 184 saved in the context it's used. */
188 185
189 void 186 void
190 show_regs(struct pt_regs *regs) 187 show_regs(struct pt_regs *regs)
191 { 188 {
192 dik_show_regs(regs, NULL); 189 dik_show_regs(regs, NULL);
193 } 190 }
194 191
195 /* 192 /*
196 * Re-start a thread when doing execve() 193 * Re-start a thread when doing execve()
197 */ 194 */
198 void 195 void
199 start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) 196 start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
200 { 197 {
201 set_fs(USER_DS); 198 set_fs(USER_DS);
202 regs->pc = pc; 199 regs->pc = pc;
203 regs->ps = 8; 200 regs->ps = 8;
204 wrusp(sp); 201 wrusp(sp);
205 } 202 }
206 203
207 /* 204 /*
208 * Free current thread data structures etc.. 205 * Free current thread data structures etc..
209 */ 206 */
210 void 207 void
211 exit_thread(void) 208 exit_thread(void)
212 { 209 {
213 } 210 }
214 211
215 void 212 void
216 flush_thread(void) 213 flush_thread(void)
217 { 214 {
218 /* Arrange for each exec'ed process to start off with a clean slate 215 /* Arrange for each exec'ed process to start off with a clean slate
219 with respect to the FPU. This is all exceptions disabled. */ 216 with respect to the FPU. This is all exceptions disabled. */
220 current_thread_info()->ieee_state = 0; 217 current_thread_info()->ieee_state = 0;
221 wrfpcr(FPCR_DYN_NORMAL | ieee_swcr_to_fpcr(0)); 218 wrfpcr(FPCR_DYN_NORMAL | ieee_swcr_to_fpcr(0));
222 219
223 /* Clean slate for TLS. */ 220 /* Clean slate for TLS. */
224 current_thread_info()->pcb.unique = 0; 221 current_thread_info()->pcb.unique = 0;
225 } 222 }
226 223
227 void 224 void
228 release_thread(struct task_struct *dead_task) 225 release_thread(struct task_struct *dead_task)
229 { 226 {
230 } 227 }
231 228
232 /* 229 /*
233 * "alpha_clone()".. By the time we get here, the 230 * "alpha_clone()".. By the time we get here, the
234 * non-volatile registers have also been saved on the 231 * non-volatile registers have also been saved on the
235 * stack. We do some ugly pointer stuff here.. (see 232 * stack. We do some ugly pointer stuff here.. (see
236 * also copy_thread) 233 * also copy_thread)
237 * 234 *
238 * Notice that "fork()" is implemented in terms of clone, 235 * Notice that "fork()" is implemented in terms of clone,
239 * with parameters (SIGCHLD, 0). 236 * with parameters (SIGCHLD, 0).
240 */ 237 */
241 int 238 int
242 alpha_clone(unsigned long clone_flags, unsigned long usp, 239 alpha_clone(unsigned long clone_flags, unsigned long usp,
243 int __user *parent_tid, int __user *child_tid, 240 int __user *parent_tid, int __user *child_tid,
244 unsigned long tls_value, struct pt_regs *regs) 241 unsigned long tls_value, struct pt_regs *regs)
245 { 242 {
246 if (!usp) 243 if (!usp)
247 usp = rdusp(); 244 usp = rdusp();
248 245
249 return do_fork(clone_flags, usp, regs, 0, parent_tid, child_tid); 246 return do_fork(clone_flags, usp, regs, 0, parent_tid, child_tid);
250 } 247 }
251 248
252 int 249 int
253 alpha_vfork(struct pt_regs *regs) 250 alpha_vfork(struct pt_regs *regs)
254 { 251 {
255 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), 252 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(),
256 regs, 0, NULL, NULL); 253 regs, 0, NULL, NULL);
257 } 254 }
258 255
259 /* 256 /*
260 * Copy an alpha thread.. 257 * Copy an alpha thread..
261 * 258 *
262 * Note the "stack_offset" stuff: when returning to kernel mode, we need 259 * Note the "stack_offset" stuff: when returning to kernel mode, we need
263 * to have some extra stack-space for the kernel stack that still exists 260 * to have some extra stack-space for the kernel stack that still exists
264 * after the "ret_from_fork". When returning to user mode, we only want 261 * after the "ret_from_fork". When returning to user mode, we only want
265 * the space needed by the syscall stack frame (ie "struct pt_regs"). 262 * the space needed by the syscall stack frame (ie "struct pt_regs").
266 * Use the passed "regs" pointer to determine how much space we need 263 * Use the passed "regs" pointer to determine how much space we need
267 * for a kernel fork(). 264 * for a kernel fork().
268 */ 265 */
269 266
270 int 267 int
271 copy_thread(int nr, unsigned long clone_flags, unsigned long usp, 268 copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
272 unsigned long unused, 269 unsigned long unused,
273 struct task_struct * p, struct pt_regs * regs) 270 struct task_struct * p, struct pt_regs * regs)
274 { 271 {
275 extern void ret_from_fork(void); 272 extern void ret_from_fork(void);
276 273
277 struct thread_info *childti = p->thread_info; 274 struct thread_info *childti = p->thread_info;
278 struct pt_regs * childregs; 275 struct pt_regs * childregs;
279 struct switch_stack * childstack, *stack; 276 struct switch_stack * childstack, *stack;
280 unsigned long stack_offset, settls; 277 unsigned long stack_offset, settls;
281 278
282 stack_offset = PAGE_SIZE - sizeof(struct pt_regs); 279 stack_offset = PAGE_SIZE - sizeof(struct pt_regs);
283 if (!(regs->ps & 8)) 280 if (!(regs->ps & 8))
284 stack_offset = (PAGE_SIZE-1) & (unsigned long) regs; 281 stack_offset = (PAGE_SIZE-1) & (unsigned long) regs;
285 childregs = (struct pt_regs *) 282 childregs = (struct pt_regs *)
286 (stack_offset + PAGE_SIZE + (long) childti); 283 (stack_offset + PAGE_SIZE + (long) childti);
287 284
288 *childregs = *regs; 285 *childregs = *regs;
289 settls = regs->r20; 286 settls = regs->r20;
290 childregs->r0 = 0; 287 childregs->r0 = 0;
291 childregs->r19 = 0; 288 childregs->r19 = 0;
292 childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */ 289 childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */
293 regs->r20 = 0; 290 regs->r20 = 0;
294 stack = ((struct switch_stack *) regs) - 1; 291 stack = ((struct switch_stack *) regs) - 1;
295 childstack = ((struct switch_stack *) childregs) - 1; 292 childstack = ((struct switch_stack *) childregs) - 1;
296 *childstack = *stack; 293 *childstack = *stack;
297 childstack->r26 = (unsigned long) ret_from_fork; 294 childstack->r26 = (unsigned long) ret_from_fork;
298 childti->pcb.usp = usp; 295 childti->pcb.usp = usp;
299 childti->pcb.ksp = (unsigned long) childstack; 296 childti->pcb.ksp = (unsigned long) childstack;
300 childti->pcb.flags = 1; /* set FEN, clear everything else */ 297 childti->pcb.flags = 1; /* set FEN, clear everything else */
301 298
302 /* Set a new TLS for the child thread? Peek back into the 299 /* Set a new TLS for the child thread? Peek back into the
303 syscall arguments that we saved on syscall entry. Oops, 300 syscall arguments that we saved on syscall entry. Oops,
304 except we'd have clobbered it with the parent/child set 301 except we'd have clobbered it with the parent/child set
305 of r20. Read the saved copy. */ 302 of r20. Read the saved copy. */
306 /* Note: if CLONE_SETTLS is not set, then we must inherit the 303 /* Note: if CLONE_SETTLS is not set, then we must inherit the
307 value from the parent, which will have been set by the block 304 value from the parent, which will have been set by the block
308 copy in dup_task_struct. This is non-intuitive, but is 305 copy in dup_task_struct. This is non-intuitive, but is
309 required for proper operation in the case of a threaded 306 required for proper operation in the case of a threaded
310 application calling fork. */ 307 application calling fork. */
311 if (clone_flags & CLONE_SETTLS) 308 if (clone_flags & CLONE_SETTLS)
312 childti->pcb.unique = settls; 309 childti->pcb.unique = settls;
313 310
314 return 0; 311 return 0;
315 } 312 }
316 313
317 /* 314 /*
318 * Fill in the user structure for an ECOFF core dump. 315 * Fill in the user structure for an ECOFF core dump.
319 */ 316 */
320 void 317 void
321 dump_thread(struct pt_regs * pt, struct user * dump) 318 dump_thread(struct pt_regs * pt, struct user * dump)
322 { 319 {
323 /* switch stack follows right below pt_regs: */ 320 /* switch stack follows right below pt_regs: */
324 struct switch_stack * sw = ((struct switch_stack *) pt) - 1; 321 struct switch_stack * sw = ((struct switch_stack *) pt) - 1;
325 322
326 dump->magic = CMAGIC; 323 dump->magic = CMAGIC;
327 dump->start_code = current->mm->start_code; 324 dump->start_code = current->mm->start_code;
328 dump->start_data = current->mm->start_data; 325 dump->start_data = current->mm->start_data;
329 dump->start_stack = rdusp() & ~(PAGE_SIZE - 1); 326 dump->start_stack = rdusp() & ~(PAGE_SIZE - 1);
330 dump->u_tsize = ((current->mm->end_code - dump->start_code) 327 dump->u_tsize = ((current->mm->end_code - dump->start_code)
331 >> PAGE_SHIFT); 328 >> PAGE_SHIFT);
332 dump->u_dsize = ((current->mm->brk + PAGE_SIZE-1 - dump->start_data) 329 dump->u_dsize = ((current->mm->brk + PAGE_SIZE-1 - dump->start_data)
333 >> PAGE_SHIFT); 330 >> PAGE_SHIFT);
334 dump->u_ssize = (current->mm->start_stack - dump->start_stack 331 dump->u_ssize = (current->mm->start_stack - dump->start_stack
335 + PAGE_SIZE-1) >> PAGE_SHIFT; 332 + PAGE_SIZE-1) >> PAGE_SHIFT;
336 333
337 /* 334 /*
338 * We store the registers in an order/format that is 335 * We store the registers in an order/format that is
339 * compatible with DEC Unix/OSF/1 as this makes life easier 336 * compatible with DEC Unix/OSF/1 as this makes life easier
340 * for gdb. 337 * for gdb.
341 */ 338 */
342 dump->regs[EF_V0] = pt->r0; 339 dump->regs[EF_V0] = pt->r0;
343 dump->regs[EF_T0] = pt->r1; 340 dump->regs[EF_T0] = pt->r1;
344 dump->regs[EF_T1] = pt->r2; 341 dump->regs[EF_T1] = pt->r2;
345 dump->regs[EF_T2] = pt->r3; 342 dump->regs[EF_T2] = pt->r3;
346 dump->regs[EF_T3] = pt->r4; 343 dump->regs[EF_T3] = pt->r4;
347 dump->regs[EF_T4] = pt->r5; 344 dump->regs[EF_T4] = pt->r5;
348 dump->regs[EF_T5] = pt->r6; 345 dump->regs[EF_T5] = pt->r6;
349 dump->regs[EF_T6] = pt->r7; 346 dump->regs[EF_T6] = pt->r7;
350 dump->regs[EF_T7] = pt->r8; 347 dump->regs[EF_T7] = pt->r8;
351 dump->regs[EF_S0] = sw->r9; 348 dump->regs[EF_S0] = sw->r9;
352 dump->regs[EF_S1] = sw->r10; 349 dump->regs[EF_S1] = sw->r10;
353 dump->regs[EF_S2] = sw->r11; 350 dump->regs[EF_S2] = sw->r11;
354 dump->regs[EF_S3] = sw->r12; 351 dump->regs[EF_S3] = sw->r12;
355 dump->regs[EF_S4] = sw->r13; 352 dump->regs[EF_S4] = sw->r13;
356 dump->regs[EF_S5] = sw->r14; 353 dump->regs[EF_S5] = sw->r14;
357 dump->regs[EF_S6] = sw->r15; 354 dump->regs[EF_S6] = sw->r15;
358 dump->regs[EF_A3] = pt->r19; 355 dump->regs[EF_A3] = pt->r19;
359 dump->regs[EF_A4] = pt->r20; 356 dump->regs[EF_A4] = pt->r20;
360 dump->regs[EF_A5] = pt->r21; 357 dump->regs[EF_A5] = pt->r21;
361 dump->regs[EF_T8] = pt->r22; 358 dump->regs[EF_T8] = pt->r22;
362 dump->regs[EF_T9] = pt->r23; 359 dump->regs[EF_T9] = pt->r23;
363 dump->regs[EF_T10] = pt->r24; 360 dump->regs[EF_T10] = pt->r24;
364 dump->regs[EF_T11] = pt->r25; 361 dump->regs[EF_T11] = pt->r25;
365 dump->regs[EF_RA] = pt->r26; 362 dump->regs[EF_RA] = pt->r26;
366 dump->regs[EF_T12] = pt->r27; 363 dump->regs[EF_T12] = pt->r27;
367 dump->regs[EF_AT] = pt->r28; 364 dump->regs[EF_AT] = pt->r28;
368 dump->regs[EF_SP] = rdusp(); 365 dump->regs[EF_SP] = rdusp();
369 dump->regs[EF_PS] = pt->ps; 366 dump->regs[EF_PS] = pt->ps;
370 dump->regs[EF_PC] = pt->pc; 367 dump->regs[EF_PC] = pt->pc;
371 dump->regs[EF_GP] = pt->gp; 368 dump->regs[EF_GP] = pt->gp;
372 dump->regs[EF_A0] = pt->r16; 369 dump->regs[EF_A0] = pt->r16;
373 dump->regs[EF_A1] = pt->r17; 370 dump->regs[EF_A1] = pt->r17;
374 dump->regs[EF_A2] = pt->r18; 371 dump->regs[EF_A2] = pt->r18;
375 memcpy((char *)dump->regs + EF_SIZE, sw->fp, 32 * 8); 372 memcpy((char *)dump->regs + EF_SIZE, sw->fp, 32 * 8);
376 } 373 }
377 374
378 /* 375 /*
379 * Fill in the user structure for a ELF core dump. 376 * Fill in the user structure for a ELF core dump.
380 */ 377 */
381 void 378 void
382 dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti) 379 dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti)
383 { 380 {
384 /* switch stack follows right below pt_regs: */ 381 /* switch stack follows right below pt_regs: */
385 struct switch_stack * sw = ((struct switch_stack *) pt) - 1; 382 struct switch_stack * sw = ((struct switch_stack *) pt) - 1;
386 383
387 dest[ 0] = pt->r0; 384 dest[ 0] = pt->r0;
388 dest[ 1] = pt->r1; 385 dest[ 1] = pt->r1;
389 dest[ 2] = pt->r2; 386 dest[ 2] = pt->r2;
390 dest[ 3] = pt->r3; 387 dest[ 3] = pt->r3;
391 dest[ 4] = pt->r4; 388 dest[ 4] = pt->r4;
392 dest[ 5] = pt->r5; 389 dest[ 5] = pt->r5;
393 dest[ 6] = pt->r6; 390 dest[ 6] = pt->r6;
394 dest[ 7] = pt->r7; 391 dest[ 7] = pt->r7;
395 dest[ 8] = pt->r8; 392 dest[ 8] = pt->r8;
396 dest[ 9] = sw->r9; 393 dest[ 9] = sw->r9;
397 dest[10] = sw->r10; 394 dest[10] = sw->r10;
398 dest[11] = sw->r11; 395 dest[11] = sw->r11;
399 dest[12] = sw->r12; 396 dest[12] = sw->r12;
400 dest[13] = sw->r13; 397 dest[13] = sw->r13;
401 dest[14] = sw->r14; 398 dest[14] = sw->r14;
402 dest[15] = sw->r15; 399 dest[15] = sw->r15;
403 dest[16] = pt->r16; 400 dest[16] = pt->r16;
404 dest[17] = pt->r17; 401 dest[17] = pt->r17;
405 dest[18] = pt->r18; 402 dest[18] = pt->r18;
406 dest[19] = pt->r19; 403 dest[19] = pt->r19;
407 dest[20] = pt->r20; 404 dest[20] = pt->r20;
408 dest[21] = pt->r21; 405 dest[21] = pt->r21;
409 dest[22] = pt->r22; 406 dest[22] = pt->r22;
410 dest[23] = pt->r23; 407 dest[23] = pt->r23;
411 dest[24] = pt->r24; 408 dest[24] = pt->r24;
412 dest[25] = pt->r25; 409 dest[25] = pt->r25;
413 dest[26] = pt->r26; 410 dest[26] = pt->r26;
414 dest[27] = pt->r27; 411 dest[27] = pt->r27;
415 dest[28] = pt->r28; 412 dest[28] = pt->r28;
416 dest[29] = pt->gp; 413 dest[29] = pt->gp;
417 dest[30] = rdusp(); 414 dest[30] = rdusp();
418 dest[31] = pt->pc; 415 dest[31] = pt->pc;
419 416
420 /* Once upon a time this was the PS value. Which is stupid 417 /* Once upon a time this was the PS value. Which is stupid
421 since that is always 8 for usermode. Usurped for the more 418 since that is always 8 for usermode. Usurped for the more
422 useful value of the thread's UNIQUE field. */ 419 useful value of the thread's UNIQUE field. */
423 dest[32] = ti->pcb.unique; 420 dest[32] = ti->pcb.unique;
424 } 421 }
425 422
426 int 423 int
427 dump_elf_task(elf_greg_t *dest, struct task_struct *task) 424 dump_elf_task(elf_greg_t *dest, struct task_struct *task)
428 { 425 {
429 struct thread_info *ti; 426 struct thread_info *ti;
430 struct pt_regs *pt; 427 struct pt_regs *pt;
431 428
432 ti = task->thread_info; 429 ti = task->thread_info;
433 pt = (struct pt_regs *)((unsigned long)ti + 2*PAGE_SIZE) - 1; 430 pt = (struct pt_regs *)((unsigned long)ti + 2*PAGE_SIZE) - 1;
434 431
435 dump_elf_thread(dest, pt, ti); 432 dump_elf_thread(dest, pt, ti);
436 433
437 return 1; 434 return 1;
438 } 435 }
439 436
440 int 437 int
441 dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task) 438 dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task)
442 { 439 {
443 struct thread_info *ti; 440 struct thread_info *ti;
444 struct pt_regs *pt; 441 struct pt_regs *pt;
445 struct switch_stack *sw; 442 struct switch_stack *sw;
446 443
447 ti = task->thread_info; 444 ti = task->thread_info;
448 pt = (struct pt_regs *)((unsigned long)ti + 2*PAGE_SIZE) - 1; 445 pt = (struct pt_regs *)((unsigned long)ti + 2*PAGE_SIZE) - 1;
449 sw = (struct switch_stack *)pt - 1; 446 sw = (struct switch_stack *)pt - 1;
450 447
451 memcpy(dest, sw->fp, 32 * 8); 448 memcpy(dest, sw->fp, 32 * 8);
452 449
453 return 1; 450 return 1;
454 } 451 }
455 452
456 /* 453 /*
457 * sys_execve() executes a new program. 454 * sys_execve() executes a new program.
458 */ 455 */
459 asmlinkage int 456 asmlinkage int
460 do_sys_execve(char __user *ufilename, char __user * __user *argv, 457 do_sys_execve(char __user *ufilename, char __user * __user *argv,
461 char __user * __user *envp, struct pt_regs *regs) 458 char __user * __user *envp, struct pt_regs *regs)
462 { 459 {
463 int error; 460 int error;
464 char *filename; 461 char *filename;
465 462
466 filename = getname(ufilename); 463 filename = getname(ufilename);
467 error = PTR_ERR(filename); 464 error = PTR_ERR(filename);
468 if (IS_ERR(filename)) 465 if (IS_ERR(filename))
469 goto out; 466 goto out;
470 error = do_execve(filename, argv, envp, regs); 467 error = do_execve(filename, argv, envp, regs);
471 putname(filename); 468 putname(filename);
472 out: 469 out:
473 return error; 470 return error;
474 } 471 }
475 472
476 /* 473 /*
477 * Return saved PC of a blocked thread. This assumes the frame 474 * Return saved PC of a blocked thread. This assumes the frame
478 * pointer is the 6th saved long on the kernel stack and that the 475 * pointer is the 6th saved long on the kernel stack and that the
479 * saved return address is the first long in the frame. This all 476 * saved return address is the first long in the frame. This all
480 * holds provided the thread blocked through a call to schedule() ($15 477 * holds provided the thread blocked through a call to schedule() ($15
481 * is the frame pointer in schedule() and $15 is saved at offset 48 by 478 * is the frame pointer in schedule() and $15 is saved at offset 48 by
482 * entry.S:do_switch_stack). 479 * entry.S:do_switch_stack).
483 * 480 *
484 * Under heavy swap load I've seen this lose in an ugly way. So do 481 * Under heavy swap load I've seen this lose in an ugly way. So do
485 * some extra sanity checking on the ranges we expect these pointers 482 * some extra sanity checking on the ranges we expect these pointers
486 * to be in so that we can fail gracefully. This is just for ps after 483 * to be in so that we can fail gracefully. This is just for ps after
487 * all. -- r~ 484 * all. -- r~
488 */ 485 */
489 486
490 unsigned long 487 unsigned long
491 thread_saved_pc(task_t *t) 488 thread_saved_pc(task_t *t)
492 { 489 {
493 unsigned long base = (unsigned long)t->thread_info; 490 unsigned long base = (unsigned long)t->thread_info;
494 unsigned long fp, sp = t->thread_info->pcb.ksp; 491 unsigned long fp, sp = t->thread_info->pcb.ksp;
495 492
496 if (sp > base && sp+6*8 < base + 16*1024) { 493 if (sp > base && sp+6*8 < base + 16*1024) {
497 fp = ((unsigned long*)sp)[6]; 494 fp = ((unsigned long*)sp)[6];
498 if (fp > sp && fp < base + 16*1024) 495 if (fp > sp && fp < base + 16*1024)
499 return *(unsigned long *)fp; 496 return *(unsigned long *)fp;
500 } 497 }
501 498
502 return 0; 499 return 0;
503 } 500 }
504 501
505 unsigned long 502 unsigned long
506 get_wchan(struct task_struct *p) 503 get_wchan(struct task_struct *p)
507 { 504 {
508 unsigned long schedule_frame; 505 unsigned long schedule_frame;
509 unsigned long pc; 506 unsigned long pc;
510 if (!p || p == current || p->state == TASK_RUNNING) 507 if (!p || p == current || p->state == TASK_RUNNING)
511 return 0; 508 return 0;
512 /* 509 /*
513 * This one depends on the frame size of schedule(). Do a 510 * This one depends on the frame size of schedule(). Do a
514 * "disass schedule" in gdb to find the frame size. Also, the 511 * "disass schedule" in gdb to find the frame size. Also, the
515 * code assumes that sleep_on() follows immediately after 512 * code assumes that sleep_on() follows immediately after
516 * interruptible_sleep_on() and that add_timer() follows 513 * interruptible_sleep_on() and that add_timer() follows
517 * immediately after interruptible_sleep(). Ugly, isn't it? 514 * immediately after interruptible_sleep(). Ugly, isn't it?
518 * Maybe adding a wchan field to task_struct would be better, 515 * Maybe adding a wchan field to task_struct would be better,
519 * after all... 516 * after all...
520 */ 517 */
521 518
522 pc = thread_saved_pc(p); 519 pc = thread_saved_pc(p);
523 if (in_sched_functions(pc)) { 520 if (in_sched_functions(pc)) {
524 schedule_frame = ((unsigned long *)p->thread_info->pcb.ksp)[6]; 521 schedule_frame = ((unsigned long *)p->thread_info->pcb.ksp)[6];
525 return ((unsigned long *)schedule_frame)[12]; 522 return ((unsigned long *)schedule_frame)[12];
526 } 523 }
527 return pc; 524 return pc;
528 } 525 }
529 526
arch/arm/kernel/process.c
1 /* 1 /*
2 * linux/arch/arm/kernel/process.c 2 * linux/arch/arm/kernel/process.c
3 * 3 *
4 * Copyright (C) 1996-2000 Russell King - Converted to ARM. 4 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
5 * Original Copyright (C) 1995 Linus Torvalds 5 * Original Copyright (C) 1995 Linus Torvalds
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 #include <stdarg.h> 11 #include <stdarg.h>
12 12
13 #include <linux/config.h> 13 #include <linux/config.h>
14 #include <linux/module.h> 14 #include <linux/module.h>
15 #include <linux/sched.h> 15 #include <linux/sched.h>
16 #include <linux/kernel.h> 16 #include <linux/kernel.h>
17 #include <linux/mm.h> 17 #include <linux/mm.h>
18 #include <linux/stddef.h> 18 #include <linux/stddef.h>
19 #include <linux/unistd.h> 19 #include <linux/unistd.h>
20 #include <linux/ptrace.h> 20 #include <linux/ptrace.h>
21 #include <linux/slab.h> 21 #include <linux/slab.h>
22 #include <linux/user.h> 22 #include <linux/user.h>
23 #include <linux/a.out.h> 23 #include <linux/a.out.h>
24 #include <linux/delay.h> 24 #include <linux/delay.h>
25 #include <linux/reboot.h> 25 #include <linux/reboot.h>
26 #include <linux/interrupt.h> 26 #include <linux/interrupt.h>
27 #include <linux/kallsyms.h> 27 #include <linux/kallsyms.h>
28 #include <linux/init.h> 28 #include <linux/init.h>
29 29
30 #include <asm/system.h> 30 #include <asm/system.h>
31 #include <asm/io.h> 31 #include <asm/io.h>
32 #include <asm/leds.h> 32 #include <asm/leds.h>
33 #include <asm/processor.h> 33 #include <asm/processor.h>
34 #include <asm/uaccess.h> 34 #include <asm/uaccess.h>
35 #include <asm/mach/time.h> 35 #include <asm/mach/time.h>
36 36
37 extern const char *processor_modes[]; 37 extern const char *processor_modes[];
38 extern void setup_mm_for_reboot(char mode); 38 extern void setup_mm_for_reboot(char mode);
39 39
40 static volatile int hlt_counter; 40 static volatile int hlt_counter;
41 41
42 #include <asm/arch/system.h> 42 #include <asm/arch/system.h>
43 43
44 void disable_hlt(void) 44 void disable_hlt(void)
45 { 45 {
46 hlt_counter++; 46 hlt_counter++;
47 } 47 }
48 48
49 EXPORT_SYMBOL(disable_hlt); 49 EXPORT_SYMBOL(disable_hlt);
50 50
51 void enable_hlt(void) 51 void enable_hlt(void)
52 { 52 {
53 hlt_counter--; 53 hlt_counter--;
54 } 54 }
55 55
56 EXPORT_SYMBOL(enable_hlt); 56 EXPORT_SYMBOL(enable_hlt);
57 57
58 static int __init nohlt_setup(char *__unused) 58 static int __init nohlt_setup(char *__unused)
59 { 59 {
60 hlt_counter = 1; 60 hlt_counter = 1;
61 return 1; 61 return 1;
62 } 62 }
63 63
64 static int __init hlt_setup(char *__unused) 64 static int __init hlt_setup(char *__unused)
65 { 65 {
66 hlt_counter = 0; 66 hlt_counter = 0;
67 return 1; 67 return 1;
68 } 68 }
69 69
70 __setup("nohlt", nohlt_setup); 70 __setup("nohlt", nohlt_setup);
71 __setup("hlt", hlt_setup); 71 __setup("hlt", hlt_setup);
72 72
73 /* 73 /*
74 * The following aren't currently used. 74 * The following aren't currently used.
75 */ 75 */
76 void (*pm_idle)(void); 76 void (*pm_idle)(void);
77 EXPORT_SYMBOL(pm_idle); 77 EXPORT_SYMBOL(pm_idle);
78 78
79 void (*pm_power_off)(void); 79 void (*pm_power_off)(void);
80 EXPORT_SYMBOL(pm_power_off); 80 EXPORT_SYMBOL(pm_power_off);
81 81
82 /* 82 /*
83 * This is our default idle handler. We need to disable 83 * This is our default idle handler. We need to disable
84 * interrupts here to ensure we don't miss a wakeup call. 84 * interrupts here to ensure we don't miss a wakeup call.
85 */ 85 */
86 void default_idle(void) 86 void default_idle(void)
87 { 87 {
88 local_irq_disable(); 88 local_irq_disable();
89 if (!need_resched() && !hlt_counter) { 89 if (!need_resched() && !hlt_counter) {
90 timer_dyn_reprogram(); 90 timer_dyn_reprogram();
91 arch_idle(); 91 arch_idle();
92 } 92 }
93 local_irq_enable(); 93 local_irq_enable();
94 } 94 }
95 95
96 /* 96 /*
97 * The idle thread. We try to conserve power, while trying to keep 97 * The idle thread. We try to conserve power, while trying to keep
98 * overall latency low. The architecture specific idle is passed 98 * overall latency low. The architecture specific idle is passed
99 * a value to indicate the level of "idleness" of the system. 99 * a value to indicate the level of "idleness" of the system.
100 */ 100 */
101 void cpu_idle(void) 101 void cpu_idle(void)
102 { 102 {
103 local_fiq_enable(); 103 local_fiq_enable();
104 104
105 /* endless idle loop with no priority at all */ 105 /* endless idle loop with no priority at all */
106 while (1) { 106 while (1) {
107 void (*idle)(void) = pm_idle; 107 void (*idle)(void) = pm_idle;
108 if (!idle) 108 if (!idle)
109 idle = default_idle; 109 idle = default_idle;
110 preempt_disable(); 110 preempt_disable();
111 leds_event(led_idle_start); 111 leds_event(led_idle_start);
112 while (!need_resched()) 112 while (!need_resched())
113 idle(); 113 idle();
114 leds_event(led_idle_end); 114 leds_event(led_idle_end);
115 preempt_enable(); 115 preempt_enable();
116 schedule(); 116 schedule();
117 } 117 }
118 } 118 }
119 119
120 static char reboot_mode = 'h'; 120 static char reboot_mode = 'h';
121 121
122 int __init reboot_setup(char *str) 122 int __init reboot_setup(char *str)
123 { 123 {
124 reboot_mode = str[0]; 124 reboot_mode = str[0];
125 return 1; 125 return 1;
126 } 126 }
127 127
128 __setup("reboot=", reboot_setup); 128 __setup("reboot=", reboot_setup);
129 129
130 void machine_halt(void) 130 void machine_halt(void)
131 { 131 {
132 } 132 }
133 133
134 EXPORT_SYMBOL(machine_halt);
135 134
136 void machine_power_off(void) 135 void machine_power_off(void)
137 { 136 {
138 if (pm_power_off) 137 if (pm_power_off)
139 pm_power_off(); 138 pm_power_off();
140 } 139 }
141 140
142 EXPORT_SYMBOL(machine_power_off);
143 141
144 void machine_restart(char * __unused) 142 void machine_restart(char * __unused)
145 { 143 {
146 /* 144 /*
147 * Clean and disable cache, and turn off interrupts 145 * Clean and disable cache, and turn off interrupts
148 */ 146 */
149 cpu_proc_fin(); 147 cpu_proc_fin();
150 148
151 /* 149 /*
152 * Tell the mm system that we are going to reboot - 150 * Tell the mm system that we are going to reboot -
153 * we may need it to insert some 1:1 mappings so that 151 * we may need it to insert some 1:1 mappings so that
154 * soft boot works. 152 * soft boot works.
155 */ 153 */
156 setup_mm_for_reboot(reboot_mode); 154 setup_mm_for_reboot(reboot_mode);
157 155
158 /* 156 /*
159 * Now call the architecture specific reboot code. 157 * Now call the architecture specific reboot code.
160 */ 158 */
161 arch_reset(reboot_mode); 159 arch_reset(reboot_mode);
162 160
163 /* 161 /*
164 * Whoops - the architecture was unable to reboot. 162 * Whoops - the architecture was unable to reboot.
165 * Tell the user! 163 * Tell the user!
166 */ 164 */
167 mdelay(1000); 165 mdelay(1000);
168 printk("Reboot failed -- System halted\n"); 166 printk("Reboot failed -- System halted\n");
169 while (1); 167 while (1);
170 } 168 }
171
172 EXPORT_SYMBOL(machine_restart);
173 169
174 void __show_regs(struct pt_regs *regs) 170 void __show_regs(struct pt_regs *regs)
175 { 171 {
176 unsigned long flags = condition_codes(regs); 172 unsigned long flags = condition_codes(regs);
177 173
178 printk("CPU: %d\n", smp_processor_id()); 174 printk("CPU: %d\n", smp_processor_id());
179 print_symbol("PC is at %s\n", instruction_pointer(regs)); 175 print_symbol("PC is at %s\n", instruction_pointer(regs));
180 print_symbol("LR is at %s\n", regs->ARM_lr); 176 print_symbol("LR is at %s\n", regs->ARM_lr);
181 printk("pc : [<%08lx>] lr : [<%08lx>] %s\n" 177 printk("pc : [<%08lx>] lr : [<%08lx>] %s\n"
182 "sp : %08lx ip : %08lx fp : %08lx\n", 178 "sp : %08lx ip : %08lx fp : %08lx\n",
183 instruction_pointer(regs), 179 instruction_pointer(regs),
184 regs->ARM_lr, print_tainted(), regs->ARM_sp, 180 regs->ARM_lr, print_tainted(), regs->ARM_sp,
185 regs->ARM_ip, regs->ARM_fp); 181 regs->ARM_ip, regs->ARM_fp);
186 printk("r10: %08lx r9 : %08lx r8 : %08lx\n", 182 printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
187 regs->ARM_r10, regs->ARM_r9, 183 regs->ARM_r10, regs->ARM_r9,
188 regs->ARM_r8); 184 regs->ARM_r8);
189 printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", 185 printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
190 regs->ARM_r7, regs->ARM_r6, 186 regs->ARM_r7, regs->ARM_r6,
191 regs->ARM_r5, regs->ARM_r4); 187 regs->ARM_r5, regs->ARM_r4);
192 printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", 188 printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
193 regs->ARM_r3, regs->ARM_r2, 189 regs->ARM_r3, regs->ARM_r2,
194 regs->ARM_r1, regs->ARM_r0); 190 regs->ARM_r1, regs->ARM_r0);
195 printk("Flags: %c%c%c%c", 191 printk("Flags: %c%c%c%c",
196 flags & PSR_N_BIT ? 'N' : 'n', 192 flags & PSR_N_BIT ? 'N' : 'n',
197 flags & PSR_Z_BIT ? 'Z' : 'z', 193 flags & PSR_Z_BIT ? 'Z' : 'z',
198 flags & PSR_C_BIT ? 'C' : 'c', 194 flags & PSR_C_BIT ? 'C' : 'c',
199 flags & PSR_V_BIT ? 'V' : 'v'); 195 flags & PSR_V_BIT ? 'V' : 'v');
200 printk(" IRQs o%s FIQs o%s Mode %s%s Segment %s\n", 196 printk(" IRQs o%s FIQs o%s Mode %s%s Segment %s\n",
201 interrupts_enabled(regs) ? "n" : "ff", 197 interrupts_enabled(regs) ? "n" : "ff",
202 fast_interrupts_enabled(regs) ? "n" : "ff", 198 fast_interrupts_enabled(regs) ? "n" : "ff",
203 processor_modes[processor_mode(regs)], 199 processor_modes[processor_mode(regs)],
204 thumb_mode(regs) ? " (T)" : "", 200 thumb_mode(regs) ? " (T)" : "",
205 get_fs() == get_ds() ? "kernel" : "user"); 201 get_fs() == get_ds() ? "kernel" : "user");
206 { 202 {
207 unsigned int ctrl, transbase, dac; 203 unsigned int ctrl, transbase, dac;
208 __asm__ ( 204 __asm__ (
209 " mrc p15, 0, %0, c1, c0\n" 205 " mrc p15, 0, %0, c1, c0\n"
210 " mrc p15, 0, %1, c2, c0\n" 206 " mrc p15, 0, %1, c2, c0\n"
211 " mrc p15, 0, %2, c3, c0\n" 207 " mrc p15, 0, %2, c3, c0\n"
212 : "=r" (ctrl), "=r" (transbase), "=r" (dac)); 208 : "=r" (ctrl), "=r" (transbase), "=r" (dac));
213 printk("Control: %04X Table: %08X DAC: %08X\n", 209 printk("Control: %04X Table: %08X DAC: %08X\n",
214 ctrl, transbase, dac); 210 ctrl, transbase, dac);
215 } 211 }
216 } 212 }
217 213
218 void show_regs(struct pt_regs * regs) 214 void show_regs(struct pt_regs * regs)
219 { 215 {
220 printk("\n"); 216 printk("\n");
221 printk("Pid: %d, comm: %20s\n", current->pid, current->comm); 217 printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
222 __show_regs(regs); 218 __show_regs(regs);
223 __backtrace(); 219 __backtrace();
224 } 220 }
225 221
226 void show_fpregs(struct user_fp *regs) 222 void show_fpregs(struct user_fp *regs)
227 { 223 {
228 int i; 224 int i;
229 225
230 for (i = 0; i < 8; i++) { 226 for (i = 0; i < 8; i++) {
231 unsigned long *p; 227 unsigned long *p;
232 char type; 228 char type;
233 229
234 p = (unsigned long *)(regs->fpregs + i); 230 p = (unsigned long *)(regs->fpregs + i);
235 231
236 switch (regs->ftype[i]) { 232 switch (regs->ftype[i]) {
237 case 1: type = 'f'; break; 233 case 1: type = 'f'; break;
238 case 2: type = 'd'; break; 234 case 2: type = 'd'; break;
239 case 3: type = 'e'; break; 235 case 3: type = 'e'; break;
240 default: type = '?'; break; 236 default: type = '?'; break;
241 } 237 }
242 if (regs->init_flag) 238 if (regs->init_flag)
243 type = '?'; 239 type = '?';
244 240
245 printk(" f%d(%c): %08lx %08lx %08lx%c", 241 printk(" f%d(%c): %08lx %08lx %08lx%c",
246 i, type, p[0], p[1], p[2], i & 1 ? '\n' : ' '); 242 i, type, p[0], p[1], p[2], i & 1 ? '\n' : ' ');
247 } 243 }
248 244
249 245
250 printk("FPSR: %08lx FPCR: %08lx\n", 246 printk("FPSR: %08lx FPCR: %08lx\n",
251 (unsigned long)regs->fpsr, 247 (unsigned long)regs->fpsr,
252 (unsigned long)regs->fpcr); 248 (unsigned long)regs->fpcr);
253 } 249 }
254 250
255 /* 251 /*
256 * Task structure and kernel stack allocation. 252 * Task structure and kernel stack allocation.
257 */ 253 */
258 static unsigned long *thread_info_head; 254 static unsigned long *thread_info_head;
259 static unsigned int nr_thread_info; 255 static unsigned int nr_thread_info;
260 256
261 #define EXTRA_TASK_STRUCT 4 257 #define EXTRA_TASK_STRUCT 4
262 258
263 struct thread_info *alloc_thread_info(struct task_struct *task) 259 struct thread_info *alloc_thread_info(struct task_struct *task)
264 { 260 {
265 struct thread_info *thread = NULL; 261 struct thread_info *thread = NULL;
266 262
267 if (EXTRA_TASK_STRUCT) { 263 if (EXTRA_TASK_STRUCT) {
268 unsigned long *p = thread_info_head; 264 unsigned long *p = thread_info_head;
269 265
270 if (p) { 266 if (p) {
271 thread_info_head = (unsigned long *)p[0]; 267 thread_info_head = (unsigned long *)p[0];
272 nr_thread_info -= 1; 268 nr_thread_info -= 1;
273 } 269 }
274 thread = (struct thread_info *)p; 270 thread = (struct thread_info *)p;
275 } 271 }
276 272
277 if (!thread) 273 if (!thread)
278 thread = (struct thread_info *) 274 thread = (struct thread_info *)
279 __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); 275 __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
280 276
281 #ifdef CONFIG_DEBUG_STACK_USAGE 277 #ifdef CONFIG_DEBUG_STACK_USAGE
282 /* 278 /*
283 * The stack must be cleared if you want SYSRQ-T to 279 * The stack must be cleared if you want SYSRQ-T to
284 * give sensible stack usage information 280 * give sensible stack usage information
285 */ 281 */
286 if (thread) 282 if (thread)
287 memzero(thread, THREAD_SIZE); 283 memzero(thread, THREAD_SIZE);
288 #endif 284 #endif
289 return thread; 285 return thread;
290 } 286 }
291 287
292 void free_thread_info(struct thread_info *thread) 288 void free_thread_info(struct thread_info *thread)
293 { 289 {
294 if (EXTRA_TASK_STRUCT && nr_thread_info < EXTRA_TASK_STRUCT) { 290 if (EXTRA_TASK_STRUCT && nr_thread_info < EXTRA_TASK_STRUCT) {
295 unsigned long *p = (unsigned long *)thread; 291 unsigned long *p = (unsigned long *)thread;
296 p[0] = (unsigned long)thread_info_head; 292 p[0] = (unsigned long)thread_info_head;
297 thread_info_head = p; 293 thread_info_head = p;
298 nr_thread_info += 1; 294 nr_thread_info += 1;
299 } else 295 } else
300 free_pages((unsigned long)thread, THREAD_SIZE_ORDER); 296 free_pages((unsigned long)thread, THREAD_SIZE_ORDER);
301 } 297 }
302 298
303 /* 299 /*
304 * Free current thread data structures etc.. 300 * Free current thread data structures etc..
305 */ 301 */
306 void exit_thread(void) 302 void exit_thread(void)
307 { 303 {
308 } 304 }
309 305
310 static void default_fp_init(union fp_state *fp) 306 static void default_fp_init(union fp_state *fp)
311 { 307 {
312 memset(fp, 0, sizeof(union fp_state)); 308 memset(fp, 0, sizeof(union fp_state));
313 } 309 }
314 310
315 void (*fp_init)(union fp_state *) = default_fp_init; 311 void (*fp_init)(union fp_state *) = default_fp_init;
316 EXPORT_SYMBOL(fp_init); 312 EXPORT_SYMBOL(fp_init);
317 313
318 void flush_thread(void) 314 void flush_thread(void)
319 { 315 {
320 struct thread_info *thread = current_thread_info(); 316 struct thread_info *thread = current_thread_info();
321 struct task_struct *tsk = current; 317 struct task_struct *tsk = current;
322 318
323 memset(thread->used_cp, 0, sizeof(thread->used_cp)); 319 memset(thread->used_cp, 0, sizeof(thread->used_cp));
324 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 320 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
325 #if defined(CONFIG_IWMMXT) 321 #if defined(CONFIG_IWMMXT)
326 iwmmxt_task_release(thread); 322 iwmmxt_task_release(thread);
327 #endif 323 #endif
328 fp_init(&thread->fpstate); 324 fp_init(&thread->fpstate);
329 #if defined(CONFIG_VFP) 325 #if defined(CONFIG_VFP)
330 vfp_flush_thread(&thread->vfpstate); 326 vfp_flush_thread(&thread->vfpstate);
331 #endif 327 #endif
332 } 328 }
333 329
334 void release_thread(struct task_struct *dead_task) 330 void release_thread(struct task_struct *dead_task)
335 { 331 {
336 #if defined(CONFIG_VFP) 332 #if defined(CONFIG_VFP)
337 vfp_release_thread(&dead_task->thread_info->vfpstate); 333 vfp_release_thread(&dead_task->thread_info->vfpstate);
338 #endif 334 #endif
339 #if defined(CONFIG_IWMMXT) 335 #if defined(CONFIG_IWMMXT)
340 iwmmxt_task_release(dead_task->thread_info); 336 iwmmxt_task_release(dead_task->thread_info);
341 #endif 337 #endif
342 } 338 }
343 339
344 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 340 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
345 341
346 int 342 int
347 copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start, 343 copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start,
348 unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs) 344 unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs)
349 { 345 {
350 struct thread_info *thread = p->thread_info; 346 struct thread_info *thread = p->thread_info;
351 struct pt_regs *childregs; 347 struct pt_regs *childregs;
352 348
353 childregs = ((struct pt_regs *)((unsigned long)thread + THREAD_START_SP)) - 1; 349 childregs = ((struct pt_regs *)((unsigned long)thread + THREAD_START_SP)) - 1;
354 *childregs = *regs; 350 *childregs = *regs;
355 childregs->ARM_r0 = 0; 351 childregs->ARM_r0 = 0;
356 childregs->ARM_sp = stack_start; 352 childregs->ARM_sp = stack_start;
357 353
358 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); 354 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
359 thread->cpu_context.sp = (unsigned long)childregs; 355 thread->cpu_context.sp = (unsigned long)childregs;
360 thread->cpu_context.pc = (unsigned long)ret_from_fork; 356 thread->cpu_context.pc = (unsigned long)ret_from_fork;
361 357
362 if (clone_flags & CLONE_SETTLS) 358 if (clone_flags & CLONE_SETTLS)
363 thread->tp_value = regs->ARM_r3; 359 thread->tp_value = regs->ARM_r3;
364 360
365 return 0; 361 return 0;
366 } 362 }
367 363
368 /* 364 /*
369 * fill in the fpe structure for a core dump... 365 * fill in the fpe structure for a core dump...
370 */ 366 */
371 int dump_fpu (struct pt_regs *regs, struct user_fp *fp) 367 int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
372 { 368 {
373 struct thread_info *thread = current_thread_info(); 369 struct thread_info *thread = current_thread_info();
374 int used_math = thread->used_cp[1] | thread->used_cp[2]; 370 int used_math = thread->used_cp[1] | thread->used_cp[2];
375 371
376 if (used_math) 372 if (used_math)
377 memcpy(fp, &thread->fpstate.soft, sizeof (*fp)); 373 memcpy(fp, &thread->fpstate.soft, sizeof (*fp));
378 374
379 return used_math != 0; 375 return used_math != 0;
380 } 376 }
381 EXPORT_SYMBOL(dump_fpu); 377 EXPORT_SYMBOL(dump_fpu);
382 378
383 /* 379 /*
384 * fill in the user structure for a core dump.. 380 * fill in the user structure for a core dump..
385 */ 381 */
386 void dump_thread(struct pt_regs * regs, struct user * dump) 382 void dump_thread(struct pt_regs * regs, struct user * dump)
387 { 383 {
388 struct task_struct *tsk = current; 384 struct task_struct *tsk = current;
389 385
390 dump->magic = CMAGIC; 386 dump->magic = CMAGIC;
391 dump->start_code = tsk->mm->start_code; 387 dump->start_code = tsk->mm->start_code;
392 dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1); 388 dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
393 389
394 dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT; 390 dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
395 dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT; 391 dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
396 dump->u_ssize = 0; 392 dump->u_ssize = 0;
397 393
398 dump->u_debugreg[0] = tsk->thread.debug.bp[0].address; 394 dump->u_debugreg[0] = tsk->thread.debug.bp[0].address;
399 dump->u_debugreg[1] = tsk->thread.debug.bp[1].address; 395 dump->u_debugreg[1] = tsk->thread.debug.bp[1].address;
400 dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn.arm; 396 dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn.arm;
401 dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn.arm; 397 dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn.arm;
402 dump->u_debugreg[4] = tsk->thread.debug.nsaved; 398 dump->u_debugreg[4] = tsk->thread.debug.nsaved;
403 399
404 if (dump->start_stack < 0x04000000) 400 if (dump->start_stack < 0x04000000)
405 dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT; 401 dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
406 402
407 dump->regs = *regs; 403 dump->regs = *regs;
408 dump->u_fpvalid = dump_fpu (regs, &dump->u_fp); 404 dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
409 } 405 }
410 EXPORT_SYMBOL(dump_thread); 406 EXPORT_SYMBOL(dump_thread);
411 407
412 /* 408 /*
413 * Shuffle the argument into the correct register before calling the 409 * Shuffle the argument into the correct register before calling the
414 * thread function. r1 is the thread argument, r2 is the pointer to 410 * thread function. r1 is the thread argument, r2 is the pointer to
415 * the thread function, and r3 points to the exit function. 411 * the thread function, and r3 points to the exit function.
416 */ 412 */
417 extern void kernel_thread_helper(void); 413 extern void kernel_thread_helper(void);
418 asm( ".section .text\n" 414 asm( ".section .text\n"
419 " .align\n" 415 " .align\n"
420 " .type kernel_thread_helper, #function\n" 416 " .type kernel_thread_helper, #function\n"
421 "kernel_thread_helper:\n" 417 "kernel_thread_helper:\n"
422 " mov r0, r1\n" 418 " mov r0, r1\n"
423 " mov lr, r3\n" 419 " mov lr, r3\n"
424 " mov pc, r2\n" 420 " mov pc, r2\n"
425 " .size kernel_thread_helper, . - kernel_thread_helper\n" 421 " .size kernel_thread_helper, . - kernel_thread_helper\n"
426 " .previous"); 422 " .previous");
427 423
428 /* 424 /*
429 * Create a kernel thread. 425 * Create a kernel thread.
430 */ 426 */
431 pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) 427 pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
432 { 428 {
433 struct pt_regs regs; 429 struct pt_regs regs;
434 430
435 memset(&regs, 0, sizeof(regs)); 431 memset(&regs, 0, sizeof(regs));
436 432
437 regs.ARM_r1 = (unsigned long)arg; 433 regs.ARM_r1 = (unsigned long)arg;
438 regs.ARM_r2 = (unsigned long)fn; 434 regs.ARM_r2 = (unsigned long)fn;
439 regs.ARM_r3 = (unsigned long)do_exit; 435 regs.ARM_r3 = (unsigned long)do_exit;
440 regs.ARM_pc = (unsigned long)kernel_thread_helper; 436 regs.ARM_pc = (unsigned long)kernel_thread_helper;
441 regs.ARM_cpsr = SVC_MODE; 437 regs.ARM_cpsr = SVC_MODE;
442 438
443 return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); 439 return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
444 } 440 }
445 EXPORT_SYMBOL(kernel_thread); 441 EXPORT_SYMBOL(kernel_thread);
446 442
447 unsigned long get_wchan(struct task_struct *p) 443 unsigned long get_wchan(struct task_struct *p)
448 { 444 {
449 unsigned long fp, lr; 445 unsigned long fp, lr;
450 unsigned long stack_start, stack_end; 446 unsigned long stack_start, stack_end;
451 int count = 0; 447 int count = 0;
452 if (!p || p == current || p->state == TASK_RUNNING) 448 if (!p || p == current || p->state == TASK_RUNNING)
453 return 0; 449 return 0;
454 450
455 stack_start = (unsigned long)(p->thread_info + 1); 451 stack_start = (unsigned long)(p->thread_info + 1);
456 stack_end = ((unsigned long)p->thread_info) + THREAD_SIZE; 452 stack_end = ((unsigned long)p->thread_info) + THREAD_SIZE;
457 453
458 fp = thread_saved_fp(p); 454 fp = thread_saved_fp(p);
459 do { 455 do {
460 if (fp < stack_start || fp > stack_end) 456 if (fp < stack_start || fp > stack_end)
461 return 0; 457 return 0;
462 lr = pc_pointer (((unsigned long *)fp)[-1]); 458 lr = pc_pointer (((unsigned long *)fp)[-1]);
463 if (!in_sched_functions(lr)) 459 if (!in_sched_functions(lr))
464 return lr; 460 return lr;
465 fp = *(unsigned long *) (fp - 12); 461 fp = *(unsigned long *) (fp - 12);
466 } while (count ++ < 16); 462 } while (count ++ < 16);
467 return 0; 463 return 0;
468 } 464 }
469 EXPORT_SYMBOL(get_wchan); 465 EXPORT_SYMBOL(get_wchan);
470 466
arch/arm26/kernel/process.c
1 /* 1 /*
2 * linux/arch/arm26/kernel/process.c 2 * linux/arch/arm26/kernel/process.c
3 * 3 *
4 * Copyright (C) 2003 Ian Molton - adapted for ARM26 4 * Copyright (C) 2003 Ian Molton - adapted for ARM26
5 * Copyright (C) 1996-2000 Russell King - Converted to ARM. 5 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
6 * Origional Copyright (C) 1995 Linus Torvalds 6 * Origional Copyright (C) 1995 Linus Torvalds
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 #include <stdarg.h> 12 #include <stdarg.h>
13 13
14 #include <linux/config.h> 14 #include <linux/config.h>
15 #include <linux/module.h> 15 #include <linux/module.h>
16 #include <linux/sched.h> 16 #include <linux/sched.h>
17 #include <linux/kernel.h> 17 #include <linux/kernel.h>
18 #include <linux/mm.h> 18 #include <linux/mm.h>
19 #include <linux/stddef.h> 19 #include <linux/stddef.h>
20 #include <linux/unistd.h> 20 #include <linux/unistd.h>
21 #include <linux/ptrace.h> 21 #include <linux/ptrace.h>
22 #include <linux/slab.h> 22 #include <linux/slab.h>
23 #include <linux/user.h> 23 #include <linux/user.h>
24 #include <linux/a.out.h> 24 #include <linux/a.out.h>
25 #include <linux/delay.h> 25 #include <linux/delay.h>
26 #include <linux/reboot.h> 26 #include <linux/reboot.h>
27 #include <linux/interrupt.h> 27 #include <linux/interrupt.h>
28 #include <linux/init.h> 28 #include <linux/init.h>
29 29
30 #include <asm/system.h> 30 #include <asm/system.h>
31 #include <asm/io.h> 31 #include <asm/io.h>
32 #include <asm/leds.h> 32 #include <asm/leds.h>
33 #include <asm/processor.h> 33 #include <asm/processor.h>
34 #include <asm/uaccess.h> 34 #include <asm/uaccess.h>
35 35
36 extern const char *processor_modes[]; 36 extern const char *processor_modes[];
37 extern void setup_mm_for_reboot(char mode); 37 extern void setup_mm_for_reboot(char mode);
38 38
39 static volatile int hlt_counter; 39 static volatile int hlt_counter;
40 40
41 void disable_hlt(void) 41 void disable_hlt(void)
42 { 42 {
43 hlt_counter++; 43 hlt_counter++;
44 } 44 }
45 45
46 EXPORT_SYMBOL(disable_hlt); 46 EXPORT_SYMBOL(disable_hlt);
47 47
48 void enable_hlt(void) 48 void enable_hlt(void)
49 { 49 {
50 hlt_counter--; 50 hlt_counter--;
51 } 51 }
52 52
53 EXPORT_SYMBOL(enable_hlt); 53 EXPORT_SYMBOL(enable_hlt);
54 54
55 static int __init nohlt_setup(char *__unused) 55 static int __init nohlt_setup(char *__unused)
56 { 56 {
57 hlt_counter = 1; 57 hlt_counter = 1;
58 return 1; 58 return 1;
59 } 59 }
60 60
61 static int __init hlt_setup(char *__unused) 61 static int __init hlt_setup(char *__unused)
62 { 62 {
63 hlt_counter = 0; 63 hlt_counter = 0;
64 return 1; 64 return 1;
65 } 65 }
66 66
67 __setup("nohlt", nohlt_setup); 67 __setup("nohlt", nohlt_setup);
68 __setup("hlt", hlt_setup); 68 __setup("hlt", hlt_setup);
69 69
70 /* 70 /*
71 * This is our default idle handler. We need to disable 71 * This is our default idle handler. We need to disable
72 * interrupts here to ensure we don't miss a wakeup call. 72 * interrupts here to ensure we don't miss a wakeup call.
73 */ 73 */
74 void cpu_idle(void) 74 void cpu_idle(void)
75 { 75 {
76 /* endless idle loop with no priority at all */ 76 /* endless idle loop with no priority at all */
77 preempt_disable(); 77 preempt_disable();
78 while (1) { 78 while (1) {
79 while (!need_resched()) { 79 while (!need_resched()) {
80 local_irq_disable(); 80 local_irq_disable();
81 if (!need_resched() && !hlt_counter) 81 if (!need_resched() && !hlt_counter)
82 local_irq_enable(); 82 local_irq_enable();
83 } 83 }
84 } 84 }
85 schedule(); 85 schedule();
86 } 86 }
87 87
88 static char reboot_mode = 'h'; 88 static char reboot_mode = 'h';
89 89
90 int __init reboot_setup(char *str) 90 int __init reboot_setup(char *str)
91 { 91 {
92 reboot_mode = str[0]; 92 reboot_mode = str[0];
93 return 1; 93 return 1;
94 } 94 }
95 95
96 __setup("reboot=", reboot_setup); 96 __setup("reboot=", reboot_setup);
97 97
98 /* ARM26 cant do these but we still need to define them. */ 98 /* ARM26 cant do these but we still need to define them. */
99 void machine_halt(void) 99 void machine_halt(void)
100 { 100 {
101 } 101 }
102 void machine_power_off(void) 102 void machine_power_off(void)
103 { 103 {
104 } 104 }
105 105
106 EXPORT_SYMBOL(machine_halt);
107 EXPORT_SYMBOL(machine_power_off);
108
109 void machine_restart(char * __unused) 106 void machine_restart(char * __unused)
110 { 107 {
111 /* 108 /*
112 * Clean and disable cache, and turn off interrupts 109 * Clean and disable cache, and turn off interrupts
113 */ 110 */
114 cpu_proc_fin(); 111 cpu_proc_fin();
115 112
116 /* 113 /*
117 * Tell the mm system that we are going to reboot - 114 * Tell the mm system that we are going to reboot -
118 * we may need it to insert some 1:1 mappings so that 115 * we may need it to insert some 1:1 mappings so that
119 * soft boot works. 116 * soft boot works.
120 */ 117 */
121 setup_mm_for_reboot(reboot_mode); 118 setup_mm_for_reboot(reboot_mode);
122 119
123 /* 120 /*
124 * copy branch instruction to reset location and call it 121 * copy branch instruction to reset location and call it
125 */ 122 */
126 123
127 *(unsigned long *)0 = *(unsigned long *)0x03800000; 124 *(unsigned long *)0 = *(unsigned long *)0x03800000;
128 ((void(*)(void))0)(); 125 ((void(*)(void))0)();
129 126
130 /* 127 /*
131 * Whoops - the architecture was unable to reboot. 128 * Whoops - the architecture was unable to reboot.
132 * Tell the user! Should never happen... 129 * Tell the user! Should never happen...
133 */ 130 */
134 mdelay(1000); 131 mdelay(1000);
135 printk("Reboot failed -- System halted\n"); 132 printk("Reboot failed -- System halted\n");
136 while (1); 133 while (1);
137 } 134 }
138
139 EXPORT_SYMBOL(machine_restart);
140 135
141 void show_regs(struct pt_regs * regs) 136 void show_regs(struct pt_regs * regs)
142 { 137 {
143 unsigned long flags; 138 unsigned long flags;
144 139
145 flags = condition_codes(regs); 140 flags = condition_codes(regs);
146 141
147 printk("pc : [<%08lx>] lr : [<%08lx>] %s\n" 142 printk("pc : [<%08lx>] lr : [<%08lx>] %s\n"
148 "sp : %08lx ip : %08lx fp : %08lx\n", 143 "sp : %08lx ip : %08lx fp : %08lx\n",
149 instruction_pointer(regs), 144 instruction_pointer(regs),
150 regs->ARM_lr, print_tainted(), regs->ARM_sp, 145 regs->ARM_lr, print_tainted(), regs->ARM_sp,
151 regs->ARM_ip, regs->ARM_fp); 146 regs->ARM_ip, regs->ARM_fp);
152 printk("r10: %08lx r9 : %08lx r8 : %08lx\n", 147 printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
153 regs->ARM_r10, regs->ARM_r9, 148 regs->ARM_r10, regs->ARM_r9,
154 regs->ARM_r8); 149 regs->ARM_r8);
155 printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", 150 printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
156 regs->ARM_r7, regs->ARM_r6, 151 regs->ARM_r7, regs->ARM_r6,
157 regs->ARM_r5, regs->ARM_r4); 152 regs->ARM_r5, regs->ARM_r4);
158 printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", 153 printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
159 regs->ARM_r3, regs->ARM_r2, 154 regs->ARM_r3, regs->ARM_r2,
160 regs->ARM_r1, regs->ARM_r0); 155 regs->ARM_r1, regs->ARM_r0);
161 printk("Flags: %c%c%c%c", 156 printk("Flags: %c%c%c%c",
162 flags & PSR_N_BIT ? 'N' : 'n', 157 flags & PSR_N_BIT ? 'N' : 'n',
163 flags & PSR_Z_BIT ? 'Z' : 'z', 158 flags & PSR_Z_BIT ? 'Z' : 'z',
164 flags & PSR_C_BIT ? 'C' : 'c', 159 flags & PSR_C_BIT ? 'C' : 'c',
165 flags & PSR_V_BIT ? 'V' : 'v'); 160 flags & PSR_V_BIT ? 'V' : 'v');
166 printk(" IRQs o%s FIQs o%s Mode %s Segment %s\n", 161 printk(" IRQs o%s FIQs o%s Mode %s Segment %s\n",
167 interrupts_enabled(regs) ? "n" : "ff", 162 interrupts_enabled(regs) ? "n" : "ff",
168 fast_interrupts_enabled(regs) ? "n" : "ff", 163 fast_interrupts_enabled(regs) ? "n" : "ff",
169 processor_modes[processor_mode(regs)], 164 processor_modes[processor_mode(regs)],
170 get_fs() == get_ds() ? "kernel" : "user"); 165 get_fs() == get_ds() ? "kernel" : "user");
171 } 166 }
172 167
173 void show_fpregs(struct user_fp *regs) 168 void show_fpregs(struct user_fp *regs)
174 { 169 {
175 int i; 170 int i;
176 171
177 for (i = 0; i < 8; i++) { 172 for (i = 0; i < 8; i++) {
178 unsigned long *p; 173 unsigned long *p;
179 char type; 174 char type;
180 175
181 p = (unsigned long *)(regs->fpregs + i); 176 p = (unsigned long *)(regs->fpregs + i);
182 177
183 switch (regs->ftype[i]) { 178 switch (regs->ftype[i]) {
184 case 1: type = 'f'; break; 179 case 1: type = 'f'; break;
185 case 2: type = 'd'; break; 180 case 2: type = 'd'; break;
186 case 3: type = 'e'; break; 181 case 3: type = 'e'; break;
187 default: type = '?'; break; 182 default: type = '?'; break;
188 } 183 }
189 if (regs->init_flag) 184 if (regs->init_flag)
190 type = '?'; 185 type = '?';
191 186
192 printk(" f%d(%c): %08lx %08lx %08lx%c", 187 printk(" f%d(%c): %08lx %08lx %08lx%c",
193 i, type, p[0], p[1], p[2], i & 1 ? '\n' : ' '); 188 i, type, p[0], p[1], p[2], i & 1 ? '\n' : ' ');
194 } 189 }
195 190
196 191
197 printk("FPSR: %08lx FPCR: %08lx\n", 192 printk("FPSR: %08lx FPCR: %08lx\n",
198 (unsigned long)regs->fpsr, 193 (unsigned long)regs->fpsr,
199 (unsigned long)regs->fpcr); 194 (unsigned long)regs->fpcr);
200 } 195 }
201 196
202 /* 197 /*
203 * Task structure and kernel stack allocation. 198 * Task structure and kernel stack allocation.
204 */ 199 */
205 static unsigned long *thread_info_head; 200 static unsigned long *thread_info_head;
206 static unsigned int nr_thread_info; 201 static unsigned int nr_thread_info;
207 202
208 extern unsigned long get_page_8k(int priority); 203 extern unsigned long get_page_8k(int priority);
209 extern void free_page_8k(unsigned long page); 204 extern void free_page_8k(unsigned long page);
210 205
211 // FIXME - is this valid? 206 // FIXME - is this valid?
212 #define EXTRA_TASK_STRUCT 0 207 #define EXTRA_TASK_STRUCT 0
213 #define ll_alloc_task_struct() ((struct thread_info *)get_page_8k(GFP_KERNEL)) 208 #define ll_alloc_task_struct() ((struct thread_info *)get_page_8k(GFP_KERNEL))
214 #define ll_free_task_struct(p) free_page_8k((unsigned long)(p)) 209 #define ll_free_task_struct(p) free_page_8k((unsigned long)(p))
215 210
216 //FIXME - do we use *task param below looks like we dont, which is ok? 211 //FIXME - do we use *task param below looks like we dont, which is ok?
217 //FIXME - if EXTRA_TASK_STRUCT is zero we can optimise the below away permanently. *IF* its supposed to be zero. 212 //FIXME - if EXTRA_TASK_STRUCT is zero we can optimise the below away permanently. *IF* its supposed to be zero.
218 struct thread_info *alloc_thread_info(struct task_struct *task) 213 struct thread_info *alloc_thread_info(struct task_struct *task)
219 { 214 {
220 struct thread_info *thread = NULL; 215 struct thread_info *thread = NULL;
221 216
222 if (EXTRA_TASK_STRUCT) { 217 if (EXTRA_TASK_STRUCT) {
223 unsigned long *p = thread_info_head; 218 unsigned long *p = thread_info_head;
224 219
225 if (p) { 220 if (p) {
226 thread_info_head = (unsigned long *)p[0]; 221 thread_info_head = (unsigned long *)p[0];
227 nr_thread_info -= 1; 222 nr_thread_info -= 1;
228 } 223 }
229 thread = (struct thread_info *)p; 224 thread = (struct thread_info *)p;
230 } 225 }
231 226
232 if (!thread) 227 if (!thread)
233 thread = ll_alloc_task_struct(); 228 thread = ll_alloc_task_struct();
234 229
235 #ifdef CONFIG_MAGIC_SYSRQ 230 #ifdef CONFIG_MAGIC_SYSRQ
236 /* 231 /*
237 * The stack must be cleared if you want SYSRQ-T to 232 * The stack must be cleared if you want SYSRQ-T to
238 * give sensible stack usage information 233 * give sensible stack usage information
239 */ 234 */
240 if (thread) { 235 if (thread) {
241 char *p = (char *)thread; 236 char *p = (char *)thread;
242 memzero(p+KERNEL_STACK_SIZE, KERNEL_STACK_SIZE); 237 memzero(p+KERNEL_STACK_SIZE, KERNEL_STACK_SIZE);
243 } 238 }
244 #endif 239 #endif
245 return thread; 240 return thread;
246 } 241 }
247 242
248 void free_thread_info(struct thread_info *thread) 243 void free_thread_info(struct thread_info *thread)
249 { 244 {
250 if (EXTRA_TASK_STRUCT && nr_thread_info < EXTRA_TASK_STRUCT) { 245 if (EXTRA_TASK_STRUCT && nr_thread_info < EXTRA_TASK_STRUCT) {
251 unsigned long *p = (unsigned long *)thread; 246 unsigned long *p = (unsigned long *)thread;
252 p[0] = (unsigned long)thread_info_head; 247 p[0] = (unsigned long)thread_info_head;
253 thread_info_head = p; 248 thread_info_head = p;
254 nr_thread_info += 1; 249 nr_thread_info += 1;
255 } else 250 } else
256 ll_free_task_struct(thread); 251 ll_free_task_struct(thread);
257 } 252 }
258 253
259 /* 254 /*
260 * Free current thread data structures etc.. 255 * Free current thread data structures etc..
261 */ 256 */
262 void exit_thread(void) 257 void exit_thread(void)
263 { 258 {
264 } 259 }
265 260
266 void flush_thread(void) 261 void flush_thread(void)
267 { 262 {
268 struct thread_info *thread = current_thread_info(); 263 struct thread_info *thread = current_thread_info();
269 struct task_struct *tsk = current; 264 struct task_struct *tsk = current;
270 265
271 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 266 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
272 memset(&thread->fpstate, 0, sizeof(union fp_state)); 267 memset(&thread->fpstate, 0, sizeof(union fp_state));
273 268
274 clear_used_math(); 269 clear_used_math();
275 } 270 }
276 271
277 void release_thread(struct task_struct *dead_task) 272 void release_thread(struct task_struct *dead_task)
278 { 273 {
279 } 274 }
280 275
281 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 276 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
282 277
283 int 278 int
284 copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start, 279 copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start,
285 unsigned long unused, struct task_struct *p, struct pt_regs *regs) 280 unsigned long unused, struct task_struct *p, struct pt_regs *regs)
286 { 281 {
287 struct thread_info *thread = p->thread_info; 282 struct thread_info *thread = p->thread_info;
288 struct pt_regs *childregs; 283 struct pt_regs *childregs;
289 284
290 childregs = __get_user_regs(thread); 285 childregs = __get_user_regs(thread);
291 *childregs = *regs; 286 *childregs = *regs;
292 childregs->ARM_r0 = 0; 287 childregs->ARM_r0 = 0;
293 childregs->ARM_sp = stack_start; 288 childregs->ARM_sp = stack_start;
294 289
295 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); 290 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
296 thread->cpu_context.sp = (unsigned long)childregs; 291 thread->cpu_context.sp = (unsigned long)childregs;
297 thread->cpu_context.pc = (unsigned long)ret_from_fork | MODE_SVC26 | PSR_I_BIT; 292 thread->cpu_context.pc = (unsigned long)ret_from_fork | MODE_SVC26 | PSR_I_BIT;
298 293
299 return 0; 294 return 0;
300 } 295 }
301 296
302 /* 297 /*
303 * fill in the fpe structure for a core dump... 298 * fill in the fpe structure for a core dump...
304 */ 299 */
305 int dump_fpu (struct pt_regs *regs, struct user_fp *fp) 300 int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
306 { 301 {
307 struct thread_info *thread = current_thread_info(); 302 struct thread_info *thread = current_thread_info();
308 int used_math = !!used_math(); 303 int used_math = !!used_math();
309 304
310 if (used_math) 305 if (used_math)
311 memcpy(fp, &thread->fpstate.soft, sizeof (*fp)); 306 memcpy(fp, &thread->fpstate.soft, sizeof (*fp));
312 307
313 return used_math; 308 return used_math;
314 } 309 }
315 310
316 /* 311 /*
317 * fill in the user structure for a core dump.. 312 * fill in the user structure for a core dump..
318 */ 313 */
319 void dump_thread(struct pt_regs * regs, struct user * dump) 314 void dump_thread(struct pt_regs * regs, struct user * dump)
320 { 315 {
321 struct task_struct *tsk = current; 316 struct task_struct *tsk = current;
322 317
323 dump->magic = CMAGIC; 318 dump->magic = CMAGIC;
324 dump->start_code = tsk->mm->start_code; 319 dump->start_code = tsk->mm->start_code;
325 dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1); 320 dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
326 321
327 dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT; 322 dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
328 dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT; 323 dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
329 dump->u_ssize = 0; 324 dump->u_ssize = 0;
330 325
331 dump->u_debugreg[0] = tsk->thread.debug.bp[0].address; 326 dump->u_debugreg[0] = tsk->thread.debug.bp[0].address;
332 dump->u_debugreg[1] = tsk->thread.debug.bp[1].address; 327 dump->u_debugreg[1] = tsk->thread.debug.bp[1].address;
333 dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn; 328 dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn;
334 dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn; 329 dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn;
335 dump->u_debugreg[4] = tsk->thread.debug.nsaved; 330 dump->u_debugreg[4] = tsk->thread.debug.nsaved;
336 331
337 if (dump->start_stack < 0x04000000) 332 if (dump->start_stack < 0x04000000)
338 dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT; 333 dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
339 334
340 dump->regs = *regs; 335 dump->regs = *regs;
341 dump->u_fpvalid = dump_fpu (regs, &dump->u_fp); 336 dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
342 } 337 }
343 338
344 /* 339 /*
345 * Shuffle the argument into the correct register before calling the 340 * Shuffle the argument into the correct register before calling the
346 * thread function. r1 is the thread argument, r2 is the pointer to 341 * thread function. r1 is the thread argument, r2 is the pointer to
347 * the thread function, and r3 points to the exit function. 342 * the thread function, and r3 points to the exit function.
348 * FIXME - make sure this is right - the older code used to zero fp 343 * FIXME - make sure this is right - the older code used to zero fp
349 * and cause the parent to call sys_exit (do_exit in this version) 344 * and cause the parent to call sys_exit (do_exit in this version)
350 */ 345 */
351 extern void kernel_thread_helper(void); 346 extern void kernel_thread_helper(void);
352 347
353 asm( ".section .text\n" 348 asm( ".section .text\n"
354 " .align\n" 349 " .align\n"
355 " .type kernel_thread_helper, #function\n" 350 " .type kernel_thread_helper, #function\n"
356 "kernel_thread_helper:\n" 351 "kernel_thread_helper:\n"
357 " mov r0, r1\n" 352 " mov r0, r1\n"
358 " mov lr, r3\n" 353 " mov lr, r3\n"
359 " mov pc, r2\n" 354 " mov pc, r2\n"
360 " .size kernel_thread_helper, . - kernel_thread_helper\n" 355 " .size kernel_thread_helper, . - kernel_thread_helper\n"
361 " .previous"); 356 " .previous");
362 357
363 /* 358 /*
364 * Create a kernel thread. 359 * Create a kernel thread.
365 */ 360 */
366 pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) 361 pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
367 { 362 {
368 struct pt_regs regs; 363 struct pt_regs regs;
369 364
370 memset(&regs, 0, sizeof(regs)); 365 memset(&regs, 0, sizeof(regs));
371 366
372 regs.ARM_r1 = (unsigned long)arg; 367 regs.ARM_r1 = (unsigned long)arg;
373 regs.ARM_r2 = (unsigned long)fn; 368 regs.ARM_r2 = (unsigned long)fn;
374 regs.ARM_r3 = (unsigned long)do_exit; 369 regs.ARM_r3 = (unsigned long)do_exit;
375 regs.ARM_pc = (unsigned long)kernel_thread_helper | MODE_SVC26; 370 regs.ARM_pc = (unsigned long)kernel_thread_helper | MODE_SVC26;
376 371
377 return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); 372 return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
378 } 373 }
379 EXPORT_SYMBOL(kernel_thread); 374 EXPORT_SYMBOL(kernel_thread);
380 375
381 376
382 unsigned long get_wchan(struct task_struct *p) 377 unsigned long get_wchan(struct task_struct *p)
383 { 378 {
384 unsigned long fp, lr; 379 unsigned long fp, lr;
385 unsigned long stack_page; 380 unsigned long stack_page;
386 int count = 0; 381 int count = 0;
387 if (!p || p == current || p->state == TASK_RUNNING) 382 if (!p || p == current || p->state == TASK_RUNNING)
388 return 0; 383 return 0;
389 384
390 stack_page = 4096 + (unsigned long)p; 385 stack_page = 4096 + (unsigned long)p;
391 fp = thread_saved_fp(p); 386 fp = thread_saved_fp(p);
392 do { 387 do {
393 if (fp < stack_page || fp > 4092+stack_page) 388 if (fp < stack_page || fp > 4092+stack_page)
394 return 0; 389 return 0;
395 lr = pc_pointer (((unsigned long *)fp)[-1]); 390 lr = pc_pointer (((unsigned long *)fp)[-1]);
396 if (!in_sched_functions(lr)) 391 if (!in_sched_functions(lr))
397 return lr; 392 return lr;
398 fp = *(unsigned long *) (fp - 12); 393 fp = *(unsigned long *) (fp - 12);
399 } while (count ++ < 16); 394 } while (count ++ < 16);
400 return 0; 395 return 0;
401 } 396 }
402 397
arch/cris/kernel/process.c
1 /* $Id: process.c,v 1.17 2004/04/05 13:53:48 starvik Exp $ 1 /* $Id: process.c,v 1.17 2004/04/05 13:53:48 starvik Exp $
2 * 2 *
3 * linux/arch/cris/kernel/process.c 3 * linux/arch/cris/kernel/process.c
4 * 4 *
5 * Copyright (C) 1995 Linus Torvalds 5 * Copyright (C) 1995 Linus Torvalds
6 * Copyright (C) 2000-2002 Axis Communications AB 6 * Copyright (C) 2000-2002 Axis Communications AB
7 * 7 *
8 * Authors: Bjorn Wesen (bjornw@axis.com) 8 * Authors: Bjorn Wesen (bjornw@axis.com)
9 * 9 *
10 * $Log: process.c,v $ 10 * $Log: process.c,v $
11 * Revision 1.17 2004/04/05 13:53:48 starvik 11 * Revision 1.17 2004/04/05 13:53:48 starvik
12 * Merge of Linux 2.6.5 12 * Merge of Linux 2.6.5
13 * 13 *
14 * Revision 1.16 2003/10/27 08:04:33 starvik 14 * Revision 1.16 2003/10/27 08:04:33 starvik
15 * Merge of Linux 2.6.0-test9 15 * Merge of Linux 2.6.0-test9
16 * 16 *
17 * Revision 1.15 2003/09/11 07:29:52 starvik 17 * Revision 1.15 2003/09/11 07:29:52 starvik
18 * Merge of Linux 2.6.0-test5 18 * Merge of Linux 2.6.0-test5
19 * 19 *
20 * Revision 1.14 2003/06/10 10:21:12 johana 20 * Revision 1.14 2003/06/10 10:21:12 johana
21 * Moved thread_saved_pc() from arch/cris/kernel/process.c to 21 * Moved thread_saved_pc() from arch/cris/kernel/process.c to
22 * subarch specific process.c. arch-v32 has an erp, no irp. 22 * subarch specific process.c. arch-v32 has an erp, no irp.
23 * 23 *
24 * Revision 1.13 2003/04/09 05:20:47 starvik 24 * Revision 1.13 2003/04/09 05:20:47 starvik
25 * Merge of Linux 2.5.67 25 * Merge of Linux 2.5.67
26 * 26 *
27 * Revision 1.12 2002/12/11 15:41:11 starvik 27 * Revision 1.12 2002/12/11 15:41:11 starvik
28 * Extracted v10 (ETRAX 100LX) specific stuff to arch/cris/arch-v10/kernel 28 * Extracted v10 (ETRAX 100LX) specific stuff to arch/cris/arch-v10/kernel
29 * 29 *
30 * Revision 1.11 2002/12/10 09:00:10 starvik 30 * Revision 1.11 2002/12/10 09:00:10 starvik
31 * Merge of Linux 2.5.51 31 * Merge of Linux 2.5.51
32 * 32 *
33 * Revision 1.10 2002/11/27 08:42:34 starvik 33 * Revision 1.10 2002/11/27 08:42:34 starvik
34 * Argument to user_regs() is thread_info* 34 * Argument to user_regs() is thread_info*
35 * 35 *
36 * Revision 1.9 2002/11/26 09:44:21 starvik 36 * Revision 1.9 2002/11/26 09:44:21 starvik
37 * New threads exits through ret_from_fork (necessary for preemptive scheduling) 37 * New threads exits through ret_from_fork (necessary for preemptive scheduling)
38 * 38 *
39 * Revision 1.8 2002/11/19 14:35:24 starvik 39 * Revision 1.8 2002/11/19 14:35:24 starvik
40 * Changes from linux 2.4 40 * Changes from linux 2.4
41 * Changed struct initializer syntax to the currently prefered notation 41 * Changed struct initializer syntax to the currently prefered notation
42 * 42 *
43 * Revision 1.7 2002/11/18 07:39:42 starvik 43 * Revision 1.7 2002/11/18 07:39:42 starvik
44 * thread_saved_pc moved here from processor.h 44 * thread_saved_pc moved here from processor.h
45 * 45 *
46 * Revision 1.6 2002/11/14 06:51:27 starvik 46 * Revision 1.6 2002/11/14 06:51:27 starvik
47 * Made cpu_idle more similar with other archs 47 * Made cpu_idle more similar with other archs
48 * init_task_union -> init_thread_union 48 * init_task_union -> init_thread_union
49 * Updated for new interrupt macros 49 * Updated for new interrupt macros
50 * sys_clone and do_fork have a new argument, user_tid 50 * sys_clone and do_fork have a new argument, user_tid
51 * 51 *
52 * Revision 1.5 2002/11/05 06:45:11 starvik 52 * Revision 1.5 2002/11/05 06:45:11 starvik
53 * Merge of Linux 2.5.45 53 * Merge of Linux 2.5.45
54 * 54 *
55 * Revision 1.4 2002/02/05 15:37:44 bjornw 55 * Revision 1.4 2002/02/05 15:37:44 bjornw
56 * Need init_task.h 56 * Need init_task.h
57 * 57 *
58 * Revision 1.3 2002/01/21 15:22:49 bjornw 58 * Revision 1.3 2002/01/21 15:22:49 bjornw
59 * current->counter is gone 59 * current->counter is gone
60 * 60 *
61 * Revision 1.22 2001/11/13 09:40:43 orjanf 61 * Revision 1.22 2001/11/13 09:40:43 orjanf
62 * Added dump_fpu (needed for core dumps). 62 * Added dump_fpu (needed for core dumps).
63 * 63 *
64 * Revision 1.21 2001/11/12 18:26:21 pkj 64 * Revision 1.21 2001/11/12 18:26:21 pkj
65 * Fixed compiler warnings. 65 * Fixed compiler warnings.
66 * 66 *
67 * Revision 1.20 2001/10/03 08:21:39 jonashg 67 * Revision 1.20 2001/10/03 08:21:39 jonashg
68 * cause_of_death does not exist if CONFIG_SVINTO_SIM is defined. 68 * cause_of_death does not exist if CONFIG_SVINTO_SIM is defined.
69 * 69 *
70 * Revision 1.19 2001/09/26 11:52:54 bjornw 70 * Revision 1.19 2001/09/26 11:52:54 bjornw
71 * INIT_MMAP is gone in 2.4.10 71 * INIT_MMAP is gone in 2.4.10
72 * 72 *
73 * Revision 1.18 2001/08/21 21:43:51 hp 73 * Revision 1.18 2001/08/21 21:43:51 hp
74 * Move last watchdog fix inside #ifdef CONFIG_ETRAX_WATCHDOG 74 * Move last watchdog fix inside #ifdef CONFIG_ETRAX_WATCHDOG
75 * 75 *
76 * Revision 1.17 2001/08/21 13:48:01 jonashg 76 * Revision 1.17 2001/08/21 13:48:01 jonashg
77 * Added fix by HP to avoid oops when doing a hard_reset_now. 77 * Added fix by HP to avoid oops when doing a hard_reset_now.
78 * 78 *
79 * Revision 1.16 2001/06/21 02:00:40 hp 79 * Revision 1.16 2001/06/21 02:00:40 hp
80 * * entry.S: Include asm/unistd.h. 80 * * entry.S: Include asm/unistd.h.
81 * (_sys_call_table): Use section .rodata, not .data. 81 * (_sys_call_table): Use section .rodata, not .data.
82 * (_kernel_thread): Move from... 82 * (_kernel_thread): Move from...
83 * * process.c: ... here. 83 * * process.c: ... here.
84 * * entryoffsets.c (VAL): Break out from... 84 * * entryoffsets.c (VAL): Break out from...
85 * (OF): Use VAL. 85 * (OF): Use VAL.
86 * (LCLONE_VM): New asmified value from CLONE_VM. 86 * (LCLONE_VM): New asmified value from CLONE_VM.
87 * 87 *
88 * Revision 1.15 2001/06/20 16:31:57 hp 88 * Revision 1.15 2001/06/20 16:31:57 hp
89 * Add comments to describe empty functions according to review. 89 * Add comments to describe empty functions according to review.
90 * 90 *
91 * Revision 1.14 2001/05/29 11:27:59 markusl 91 * Revision 1.14 2001/05/29 11:27:59 markusl
92 * Fixed so that hard_reset_now will do reset even if watchdog wasn't enabled 92 * Fixed so that hard_reset_now will do reset even if watchdog wasn't enabled
93 * 93 *
94 * Revision 1.13 2001/03/20 19:44:06 bjornw 94 * Revision 1.13 2001/03/20 19:44:06 bjornw
95 * Use the 7th syscall argument for regs instead of current_regs 95 * Use the 7th syscall argument for regs instead of current_regs
96 * 96 *
97 */ 97 */
98 98
99 /* 99 /*
100 * This file handles the architecture-dependent parts of process handling.. 100 * This file handles the architecture-dependent parts of process handling..
101 */ 101 */
102 102
103 #include <asm/atomic.h> 103 #include <asm/atomic.h>
104 #include <asm/pgtable.h> 104 #include <asm/pgtable.h>
105 #include <asm/uaccess.h> 105 #include <asm/uaccess.h>
106 #include <asm/irq.h> 106 #include <asm/irq.h>
107 #include <linux/module.h> 107 #include <linux/module.h>
108 #include <linux/spinlock.h> 108 #include <linux/spinlock.h>
109 #include <linux/fs_struct.h> 109 #include <linux/fs_struct.h>
110 #include <linux/init_task.h> 110 #include <linux/init_task.h>
111 #include <linux/sched.h> 111 #include <linux/sched.h>
112 #include <linux/fs.h> 112 #include <linux/fs.h>
113 #include <linux/user.h> 113 #include <linux/user.h>
114 #include <linux/elfcore.h> 114 #include <linux/elfcore.h>
115 #include <linux/mqueue.h> 115 #include <linux/mqueue.h>
116 #include <linux/reboot.h> 116 #include <linux/reboot.h>
117 117
118 //#define DEBUG 118 //#define DEBUG
119 119
120 /* 120 /*
121 * Initial task structure. Make this a per-architecture thing, 121 * Initial task structure. Make this a per-architecture thing,
122 * because different architectures tend to have different 122 * because different architectures tend to have different
123 * alignment requirements and potentially different initial 123 * alignment requirements and potentially different initial
124 * setup. 124 * setup.
125 */ 125 */
126 126
127 static struct fs_struct init_fs = INIT_FS; 127 static struct fs_struct init_fs = INIT_FS;
128 static struct files_struct init_files = INIT_FILES; 128 static struct files_struct init_files = INIT_FILES;
129 static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 129 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
130 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 130 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
131 struct mm_struct init_mm = INIT_MM(init_mm); 131 struct mm_struct init_mm = INIT_MM(init_mm);
132 132
133 EXPORT_SYMBOL(init_mm); 133 EXPORT_SYMBOL(init_mm);
134 134
135 /* 135 /*
136 * Initial thread structure. 136 * Initial thread structure.
137 * 137 *
138 * We need to make sure that this is 8192-byte aligned due to the 138 * We need to make sure that this is 8192-byte aligned due to the
139 * way process stacks are handled. This is done by having a special 139 * way process stacks are handled. This is done by having a special
140 * "init_task" linker map entry.. 140 * "init_task" linker map entry..
141 */ 141 */
142 union thread_union init_thread_union 142 union thread_union init_thread_union
143 __attribute__((__section__(".data.init_task"))) = 143 __attribute__((__section__(".data.init_task"))) =
144 { INIT_THREAD_INFO(init_task) }; 144 { INIT_THREAD_INFO(init_task) };
145 145
146 /* 146 /*
147 * Initial task structure. 147 * Initial task structure.
148 * 148 *
149 * All other task structs will be allocated on slabs in fork.c 149 * All other task structs will be allocated on slabs in fork.c
150 */ 150 */
151 struct task_struct init_task = INIT_TASK(init_task); 151 struct task_struct init_task = INIT_TASK(init_task);
152 152
153 EXPORT_SYMBOL(init_task); 153 EXPORT_SYMBOL(init_task);
154 154
155 /* 155 /*
156 * The hlt_counter, disable_hlt and enable_hlt is just here as a hook if 156 * The hlt_counter, disable_hlt and enable_hlt is just here as a hook if
157 * there would ever be a halt sequence (for power save when idle) with 157 * there would ever be a halt sequence (for power save when idle) with
158 * some largish delay when halting or resuming *and* a driver that can't 158 * some largish delay when halting or resuming *and* a driver that can't
159 * afford that delay. The hlt_counter would then be checked before 159 * afford that delay. The hlt_counter would then be checked before
160 * executing the halt sequence, and the driver marks the unhaltable 160 * executing the halt sequence, and the driver marks the unhaltable
161 * region by enable_hlt/disable_hlt. 161 * region by enable_hlt/disable_hlt.
162 */ 162 */
163 163
164 static int hlt_counter=0; 164 static int hlt_counter=0;
165 165
166 void disable_hlt(void) 166 void disable_hlt(void)
167 { 167 {
168 hlt_counter++; 168 hlt_counter++;
169 } 169 }
170 170
171 EXPORT_SYMBOL(disable_hlt); 171 EXPORT_SYMBOL(disable_hlt);
172 172
173 void enable_hlt(void) 173 void enable_hlt(void)
174 { 174 {
175 hlt_counter--; 175 hlt_counter--;
176 } 176 }
177 177
178 EXPORT_SYMBOL(enable_hlt); 178 EXPORT_SYMBOL(enable_hlt);
179 179
180 /* 180 /*
181 * The following aren't currently used. 181 * The following aren't currently used.
182 */ 182 */
183 void (*pm_idle)(void); 183 void (*pm_idle)(void);
184 184
185 extern void default_idle(void); 185 extern void default_idle(void);
186 186
187 /* 187 /*
188 * The idle thread. There's no useful work to be 188 * The idle thread. There's no useful work to be
189 * done, so just try to conserve power and have a 189 * done, so just try to conserve power and have a
190 * low exit latency (ie sit in a loop waiting for 190 * low exit latency (ie sit in a loop waiting for
191 * somebody to say that they'd like to reschedule) 191 * somebody to say that they'd like to reschedule)
192 */ 192 */
193 void cpu_idle (void) 193 void cpu_idle (void)
194 { 194 {
195 /* endless idle loop with no priority at all */ 195 /* endless idle loop with no priority at all */
196 while (1) { 196 while (1) {
197 while (!need_resched()) { 197 while (!need_resched()) {
198 void (*idle)(void) = pm_idle; 198 void (*idle)(void) = pm_idle;
199 199
200 if (!idle) 200 if (!idle)
201 idle = default_idle; 201 idle = default_idle;
202 202
203 idle(); 203 idle();
204 } 204 }
205 schedule(); 205 schedule();
206 } 206 }
207 207
208 } 208 }
209 209
210 void hard_reset_now (void); 210 void hard_reset_now (void);
211 211
212 void machine_restart(char *cmd) 212 void machine_restart(char *cmd)
213 { 213 {
214 hard_reset_now(); 214 hard_reset_now();
215 } 215 }
216 216
217 EXPORT_SYMBOL(machine_restart);
218
219 /* 217 /*
220 * Similar to machine_power_off, but don't shut off power. Add code 218 * Similar to machine_power_off, but don't shut off power. Add code
221 * here to freeze the system for e.g. post-mortem debug purpose when 219 * here to freeze the system for e.g. post-mortem debug purpose when
222 * possible. This halt has nothing to do with the idle halt. 220 * possible. This halt has nothing to do with the idle halt.
223 */ 221 */
224 222
225 void machine_halt(void) 223 void machine_halt(void)
226 { 224 {
227 } 225 }
228 226
229 EXPORT_SYMBOL(machine_halt);
230
231 /* If or when software power-off is implemented, add code here. */ 227 /* If or when software power-off is implemented, add code here. */
232 228
233 void machine_power_off(void) 229 void machine_power_off(void)
234 { 230 {
235 } 231 }
236
237 EXPORT_SYMBOL(machine_power_off);
238 232
239 /* 233 /*
240 * When a process does an "exec", machine state like FPU and debug 234 * When a process does an "exec", machine state like FPU and debug
241 * registers need to be reset. This is a hook function for that. 235 * registers need to be reset. This is a hook function for that.
242 * Currently we don't have any such state to reset, so this is empty. 236 * Currently we don't have any such state to reset, so this is empty.
243 */ 237 */
244 238
245 void flush_thread(void) 239 void flush_thread(void)
246 { 240 {
247 } 241 }
248 242
249 /* 243 /*
250 * fill in the user structure for a core dump.. 244 * fill in the user structure for a core dump..
251 */ 245 */
252 void dump_thread(struct pt_regs * regs, struct user * dump) 246 void dump_thread(struct pt_regs * regs, struct user * dump)
253 { 247 {
254 #if 0 248 #if 0
255 int i; 249 int i;
256 250
257 /* changed the size calculations - should hopefully work better. lbt */ 251 /* changed the size calculations - should hopefully work better. lbt */
258 dump->magic = CMAGIC; 252 dump->magic = CMAGIC;
259 dump->start_code = 0; 253 dump->start_code = 0;
260 dump->start_stack = regs->esp & ~(PAGE_SIZE - 1); 254 dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
261 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; 255 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
262 dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT; 256 dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
263 dump->u_dsize -= dump->u_tsize; 257 dump->u_dsize -= dump->u_tsize;
264 dump->u_ssize = 0; 258 dump->u_ssize = 0;
265 for (i = 0; i < 8; i++) 259 for (i = 0; i < 8; i++)
266 dump->u_debugreg[i] = current->debugreg[i]; 260 dump->u_debugreg[i] = current->debugreg[i];
267 261
268 if (dump->start_stack < TASK_SIZE) 262 if (dump->start_stack < TASK_SIZE)
269 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; 263 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
270 264
271 dump->regs = *regs; 265 dump->regs = *regs;
272 266
273 dump->u_fpvalid = dump_fpu (regs, &dump->i387); 267 dump->u_fpvalid = dump_fpu (regs, &dump->i387);
274 #endif 268 #endif
275 } 269 }
276 270
277 /* Fill in the fpu structure for a core dump. */ 271 /* Fill in the fpu structure for a core dump. */
278 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) 272 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
279 { 273 {
280 return 0; 274 return 0;
281 } 275 }
282 276
arch/h8300/kernel/process.c
1 /* 1 /*
2 * linux/arch/h8300/kernel/process.c 2 * linux/arch/h8300/kernel/process.c
3 * 3 *
4 * Yoshinori Sato <ysato@users.sourceforge.jp> 4 * Yoshinori Sato <ysato@users.sourceforge.jp>
5 * 5 *
6 * Based on: 6 * Based on:
7 * 7 *
8 * linux/arch/m68knommu/kernel/process.c 8 * linux/arch/m68knommu/kernel/process.c
9 * 9 *
10 * Copyright (C) 1998 D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>, 10 * Copyright (C) 1998 D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>,
11 * Kenneth Albanowski <kjahds@kjahds.com>, 11 * Kenneth Albanowski <kjahds@kjahds.com>,
12 * The Silver Hammer Group, Ltd. 12 * The Silver Hammer Group, Ltd.
13 * 13 *
14 * linux/arch/m68k/kernel/process.c 14 * linux/arch/m68k/kernel/process.c
15 * 15 *
16 * Copyright (C) 1995 Hamish Macdonald 16 * Copyright (C) 1995 Hamish Macdonald
17 * 17 *
18 * 68060 fixes by Jesper Skov 18 * 68060 fixes by Jesper Skov
19 */ 19 */
20 20
21 /* 21 /*
22 * This file handles the architecture-dependent parts of process handling.. 22 * This file handles the architecture-dependent parts of process handling..
23 */ 23 */
24 24
25 #include <linux/config.h> 25 #include <linux/config.h>
26 #include <linux/errno.h> 26 #include <linux/errno.h>
27 #include <linux/module.h> 27 #include <linux/module.h>
28 #include <linux/sched.h> 28 #include <linux/sched.h>
29 #include <linux/kernel.h> 29 #include <linux/kernel.h>
30 #include <linux/mm.h> 30 #include <linux/mm.h>
31 #include <linux/smp.h> 31 #include <linux/smp.h>
32 #include <linux/smp_lock.h> 32 #include <linux/smp_lock.h>
33 #include <linux/stddef.h> 33 #include <linux/stddef.h>
34 #include <linux/unistd.h> 34 #include <linux/unistd.h>
35 #include <linux/ptrace.h> 35 #include <linux/ptrace.h>
36 #include <linux/slab.h> 36 #include <linux/slab.h>
37 #include <linux/user.h> 37 #include <linux/user.h>
38 #include <linux/a.out.h> 38 #include <linux/a.out.h>
39 #include <linux/interrupt.h> 39 #include <linux/interrupt.h>
40 #include <linux/reboot.h> 40 #include <linux/reboot.h>
41 41
42 #include <asm/uaccess.h> 42 #include <asm/uaccess.h>
43 #include <asm/system.h> 43 #include <asm/system.h>
44 #include <asm/traps.h> 44 #include <asm/traps.h>
45 #include <asm/setup.h> 45 #include <asm/setup.h>
46 #include <asm/pgtable.h> 46 #include <asm/pgtable.h>
47 47
48 asmlinkage void ret_from_fork(void); 48 asmlinkage void ret_from_fork(void);
49 49
50 /* 50 /*
51 * The idle loop on an H8/300.. 51 * The idle loop on an H8/300..
52 */ 52 */
53 #if !defined(CONFIG_H8300H_SIM) && !defined(CONFIG_H8S_SIM) 53 #if !defined(CONFIG_H8300H_SIM) && !defined(CONFIG_H8S_SIM)
54 void default_idle(void) 54 void default_idle(void)
55 { 55 {
56 while(1) { 56 while(1) {
57 if (!need_resched()) { 57 if (!need_resched()) {
58 local_irq_enable(); 58 local_irq_enable();
59 __asm__("sleep"); 59 __asm__("sleep");
60 local_irq_disable(); 60 local_irq_disable();
61 } 61 }
62 schedule(); 62 schedule();
63 } 63 }
64 } 64 }
65 #else 65 #else
66 void default_idle(void) 66 void default_idle(void)
67 { 67 {
68 while(1) { 68 while(1) {
69 if (need_resched()) 69 if (need_resched())
70 schedule(); 70 schedule();
71 } 71 }
72 } 72 }
73 #endif 73 #endif
74 void (*idle)(void) = default_idle; 74 void (*idle)(void) = default_idle;
75 75
76 /* 76 /*
77 * The idle thread. There's no useful work to be 77 * The idle thread. There's no useful work to be
78 * done, so just try to conserve power and have a 78 * done, so just try to conserve power and have a
79 * low exit latency (ie sit in a loop waiting for 79 * low exit latency (ie sit in a loop waiting for
80 * somebody to say that they'd like to reschedule) 80 * somebody to say that they'd like to reschedule)
81 */ 81 */
82 void cpu_idle(void) 82 void cpu_idle(void)
83 { 83 {
84 idle(); 84 idle();
85 } 85 }
86 86
87 void machine_restart(char * __unused) 87 void machine_restart(char * __unused)
88 { 88 {
89 local_irq_disable(); 89 local_irq_disable();
90 __asm__("jmp @@0"); 90 __asm__("jmp @@0");
91 } 91 }
92 92
93 EXPORT_SYMBOL(machine_restart);
94
95 void machine_halt(void) 93 void machine_halt(void)
96 { 94 {
97 local_irq_disable(); 95 local_irq_disable();
98 __asm__("sleep"); 96 __asm__("sleep");
99 for (;;); 97 for (;;);
100 } 98 }
101 99
102 EXPORT_SYMBOL(machine_halt);
103
104 void machine_power_off(void) 100 void machine_power_off(void)
105 { 101 {
106 local_irq_disable(); 102 local_irq_disable();
107 __asm__("sleep"); 103 __asm__("sleep");
108 for (;;); 104 for (;;);
109 } 105 }
110
111 EXPORT_SYMBOL(machine_power_off);
112 106
113 void show_regs(struct pt_regs * regs) 107 void show_regs(struct pt_regs * regs)
114 { 108 {
115 printk("\nPC: %08lx Status: %02x", 109 printk("\nPC: %08lx Status: %02x",
116 regs->pc, regs->ccr); 110 regs->pc, regs->ccr);
117 printk("\nORIG_ER0: %08lx ER0: %08lx ER1: %08lx", 111 printk("\nORIG_ER0: %08lx ER0: %08lx ER1: %08lx",
118 regs->orig_er0, regs->er0, regs->er1); 112 regs->orig_er0, regs->er0, regs->er1);
119 printk("\nER2: %08lx ER3: %08lx ER4: %08lx ER5: %08lx", 113 printk("\nER2: %08lx ER3: %08lx ER4: %08lx ER5: %08lx",
120 regs->er2, regs->er3, regs->er4, regs->er5); 114 regs->er2, regs->er3, regs->er4, regs->er5);
121 printk("\nER6' %08lx ",regs->er6); 115 printk("\nER6' %08lx ",regs->er6);
122 if (user_mode(regs)) 116 if (user_mode(regs))
123 printk("USP: %08lx\n", rdusp()); 117 printk("USP: %08lx\n", rdusp());
124 else 118 else
125 printk("\n"); 119 printk("\n");
126 } 120 }
127 121
128 /* 122 /*
129 * Create a kernel thread 123 * Create a kernel thread
130 */ 124 */
131 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 125 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
132 { 126 {
133 long retval; 127 long retval;
134 long clone_arg; 128 long clone_arg;
135 mm_segment_t fs; 129 mm_segment_t fs;
136 130
137 fs = get_fs(); 131 fs = get_fs();
138 set_fs (KERNEL_DS); 132 set_fs (KERNEL_DS);
139 clone_arg = flags | CLONE_VM; 133 clone_arg = flags | CLONE_VM;
140 __asm__("mov.l sp,er3\n\t" 134 __asm__("mov.l sp,er3\n\t"
141 "sub.l er2,er2\n\t" 135 "sub.l er2,er2\n\t"
142 "mov.l %2,er1\n\t" 136 "mov.l %2,er1\n\t"
143 "mov.l %1,er0\n\t" 137 "mov.l %1,er0\n\t"
144 "trapa #0\n\t" 138 "trapa #0\n\t"
145 "cmp.l sp,er3\n\t" 139 "cmp.l sp,er3\n\t"
146 "beq 1f\n\t" 140 "beq 1f\n\t"
147 "mov.l %4,er0\n\t" 141 "mov.l %4,er0\n\t"
148 "mov.l %3,er1\n\t" 142 "mov.l %3,er1\n\t"
149 "jsr @er1\n\t" 143 "jsr @er1\n\t"
150 "mov.l %5,er0\n\t" 144 "mov.l %5,er0\n\t"
151 "trapa #0\n" 145 "trapa #0\n"
152 "1:\n\t" 146 "1:\n\t"
153 "mov.l er0,%0" 147 "mov.l er0,%0"
154 :"=r"(retval) 148 :"=r"(retval)
155 :"i"(__NR_clone),"g"(clone_arg),"g"(fn),"g"(arg),"i"(__NR_exit) 149 :"i"(__NR_clone),"g"(clone_arg),"g"(fn),"g"(arg),"i"(__NR_exit)
156 :"er0","er1","er2","er3"); 150 :"er0","er1","er2","er3");
157 set_fs (fs); 151 set_fs (fs);
158 return retval; 152 return retval;
159 } 153 }
160 154
161 void flush_thread(void) 155 void flush_thread(void)
162 { 156 {
163 } 157 }
164 158
165 /* 159 /*
166 * "h8300_fork()".. By the time we get here, the 160 * "h8300_fork()".. By the time we get here, the
167 * non-volatile registers have also been saved on the 161 * non-volatile registers have also been saved on the
168 * stack. We do some ugly pointer stuff here.. (see 162 * stack. We do some ugly pointer stuff here.. (see
169 * also copy_thread) 163 * also copy_thread)
170 */ 164 */
171 165
172 asmlinkage int h8300_fork(struct pt_regs *regs) 166 asmlinkage int h8300_fork(struct pt_regs *regs)
173 { 167 {
174 return -EINVAL; 168 return -EINVAL;
175 } 169 }
176 170
177 asmlinkage int h8300_vfork(struct pt_regs *regs) 171 asmlinkage int h8300_vfork(struct pt_regs *regs)
178 { 172 {
179 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL, NULL); 173 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL, NULL);
180 } 174 }
181 175
182 asmlinkage int h8300_clone(struct pt_regs *regs) 176 asmlinkage int h8300_clone(struct pt_regs *regs)
183 { 177 {
184 unsigned long clone_flags; 178 unsigned long clone_flags;
185 unsigned long newsp; 179 unsigned long newsp;
186 180
187 /* syscall2 puts clone_flags in er1 and usp in er2 */ 181 /* syscall2 puts clone_flags in er1 and usp in er2 */
188 clone_flags = regs->er1; 182 clone_flags = regs->er1;
189 newsp = regs->er2; 183 newsp = regs->er2;
190 if (!newsp) 184 if (!newsp)
191 newsp = rdusp(); 185 newsp = rdusp();
192 return do_fork(clone_flags, newsp, regs, 0, NULL, NULL); 186 return do_fork(clone_flags, newsp, regs, 0, NULL, NULL);
193 187
194 } 188 }
195 189
196 int copy_thread(int nr, unsigned long clone_flags, 190 int copy_thread(int nr, unsigned long clone_flags,
197 unsigned long usp, unsigned long topstk, 191 unsigned long usp, unsigned long topstk,
198 struct task_struct * p, struct pt_regs * regs) 192 struct task_struct * p, struct pt_regs * regs)
199 { 193 {
200 struct pt_regs * childregs; 194 struct pt_regs * childregs;
201 195
202 childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1; 196 childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
203 197
204 *childregs = *regs; 198 *childregs = *regs;
205 childregs->retpc = (unsigned long) ret_from_fork; 199 childregs->retpc = (unsigned long) ret_from_fork;
206 childregs->er0 = 0; 200 childregs->er0 = 0;
207 201
208 p->thread.usp = usp; 202 p->thread.usp = usp;
209 p->thread.ksp = (unsigned long)childregs; 203 p->thread.ksp = (unsigned long)childregs;
210 204
211 return 0; 205 return 0;
212 } 206 }
213 207
214 /* 208 /*
215 * fill in the user structure for a core dump.. 209 * fill in the user structure for a core dump..
216 */ 210 */
217 void dump_thread(struct pt_regs * regs, struct user * dump) 211 void dump_thread(struct pt_regs * regs, struct user * dump)
218 { 212 {
219 /* changed the size calculations - should hopefully work better. lbt */ 213 /* changed the size calculations - should hopefully work better. lbt */
220 dump->magic = CMAGIC; 214 dump->magic = CMAGIC;
221 dump->start_code = 0; 215 dump->start_code = 0;
222 dump->start_stack = rdusp() & ~(PAGE_SIZE - 1); 216 dump->start_stack = rdusp() & ~(PAGE_SIZE - 1);
223 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; 217 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
224 dump->u_dsize = ((unsigned long) (current->mm->brk + 218 dump->u_dsize = ((unsigned long) (current->mm->brk +
225 (PAGE_SIZE-1))) >> PAGE_SHIFT; 219 (PAGE_SIZE-1))) >> PAGE_SHIFT;
226 dump->u_dsize -= dump->u_tsize; 220 dump->u_dsize -= dump->u_tsize;
227 dump->u_ssize = 0; 221 dump->u_ssize = 0;
228 222
229 dump->u_ar0 = (struct user_regs_struct *)(((int)(&dump->regs)) -((int)(dump))); 223 dump->u_ar0 = (struct user_regs_struct *)(((int)(&dump->regs)) -((int)(dump)));
230 dump->regs.er0 = regs->er0; 224 dump->regs.er0 = regs->er0;
231 dump->regs.er1 = regs->er1; 225 dump->regs.er1 = regs->er1;
232 dump->regs.er2 = regs->er2; 226 dump->regs.er2 = regs->er2;
233 dump->regs.er3 = regs->er3; 227 dump->regs.er3 = regs->er3;
234 dump->regs.er4 = regs->er4; 228 dump->regs.er4 = regs->er4;
235 dump->regs.er5 = regs->er5; 229 dump->regs.er5 = regs->er5;
236 dump->regs.er6 = regs->er6; 230 dump->regs.er6 = regs->er6;
237 dump->regs.orig_er0 = regs->orig_er0; 231 dump->regs.orig_er0 = regs->orig_er0;
238 dump->regs.ccr = regs->ccr; 232 dump->regs.ccr = regs->ccr;
239 dump->regs.pc = regs->pc; 233 dump->regs.pc = regs->pc;
240 } 234 }
241 235
242 /* 236 /*
243 * sys_execve() executes a new program. 237 * sys_execve() executes a new program.
244 */ 238 */
245 asmlinkage int sys_execve(char *name, char **argv, char **envp,int dummy,...) 239 asmlinkage int sys_execve(char *name, char **argv, char **envp,int dummy,...)
246 { 240 {
247 int error; 241 int error;
248 char * filename; 242 char * filename;
249 struct pt_regs *regs = (struct pt_regs *) ((unsigned char *)&dummy-4); 243 struct pt_regs *regs = (struct pt_regs *) ((unsigned char *)&dummy-4);
250 244
251 lock_kernel(); 245 lock_kernel();
252 filename = getname(name); 246 filename = getname(name);
253 error = PTR_ERR(filename); 247 error = PTR_ERR(filename);
254 if (IS_ERR(filename)) 248 if (IS_ERR(filename))
255 goto out; 249 goto out;
256 error = do_execve(filename, argv, envp, regs); 250 error = do_execve(filename, argv, envp, regs);
257 putname(filename); 251 putname(filename);
258 out: 252 out:
259 unlock_kernel(); 253 unlock_kernel();
260 return error; 254 return error;
261 } 255 }
262 256
263 unsigned long thread_saved_pc(struct task_struct *tsk) 257 unsigned long thread_saved_pc(struct task_struct *tsk)
264 { 258 {
265 return ((struct pt_regs *)tsk->thread.esp0)->pc; 259 return ((struct pt_regs *)tsk->thread.esp0)->pc;
266 } 260 }
267 261
268 unsigned long get_wchan(struct task_struct *p) 262 unsigned long get_wchan(struct task_struct *p)
269 { 263 {
270 unsigned long fp, pc; 264 unsigned long fp, pc;
271 unsigned long stack_page; 265 unsigned long stack_page;
272 int count = 0; 266 int count = 0;
273 if (!p || p == current || p->state == TASK_RUNNING) 267 if (!p || p == current || p->state == TASK_RUNNING)
274 return 0; 268 return 0;
275 269
276 stack_page = (unsigned long)p; 270 stack_page = (unsigned long)p;
277 fp = ((struct pt_regs *)p->thread.ksp)->er6; 271 fp = ((struct pt_regs *)p->thread.ksp)->er6;
278 do { 272 do {
279 if (fp < stack_page+sizeof(struct thread_info) || 273 if (fp < stack_page+sizeof(struct thread_info) ||
280 fp >= 8184+stack_page) 274 fp >= 8184+stack_page)
281 return 0; 275 return 0;
282 pc = ((unsigned long *)fp)[1]; 276 pc = ((unsigned long *)fp)[1];
283 if (!in_sched_functions(pc)) 277 if (!in_sched_functions(pc))
284 return pc; 278 return pc;
285 fp = *(unsigned long *) fp; 279 fp = *(unsigned long *) fp;
286 } while (count++ < 16); 280 } while (count++ < 16);
287 return 0; 281 return 0;
288 } 282 }
289 283
arch/i386/kernel/reboot.c
1 /* 1 /*
2 * linux/arch/i386/kernel/reboot.c 2 * linux/arch/i386/kernel/reboot.c
3 */ 3 */
4 4
5 #include <linux/config.h> 5 #include <linux/config.h>
6 #include <linux/mm.h> 6 #include <linux/mm.h>
7 #include <linux/module.h> 7 #include <linux/module.h>
8 #include <linux/delay.h> 8 #include <linux/delay.h>
9 #include <linux/init.h> 9 #include <linux/init.h>
10 #include <linux/interrupt.h> 10 #include <linux/interrupt.h>
11 #include <linux/mc146818rtc.h> 11 #include <linux/mc146818rtc.h>
12 #include <linux/efi.h> 12 #include <linux/efi.h>
13 #include <linux/dmi.h> 13 #include <linux/dmi.h>
14 #include <asm/uaccess.h> 14 #include <asm/uaccess.h>
15 #include <asm/apic.h> 15 #include <asm/apic.h>
16 #include "mach_reboot.h" 16 #include "mach_reboot.h"
17 #include <linux/reboot_fixups.h> 17 #include <linux/reboot_fixups.h>
18 18
19 /* 19 /*
20 * Power off function, if any 20 * Power off function, if any
21 */ 21 */
22 void (*pm_power_off)(void); 22 void (*pm_power_off)(void);
23 EXPORT_SYMBOL(pm_power_off); 23 EXPORT_SYMBOL(pm_power_off);
24 24
25 static int reboot_mode; 25 static int reboot_mode;
26 static int reboot_thru_bios; 26 static int reboot_thru_bios;
27 27
28 #ifdef CONFIG_SMP 28 #ifdef CONFIG_SMP
29 static int reboot_cpu = -1; 29 static int reboot_cpu = -1;
30 /* shamelessly grabbed from lib/vsprintf.c for readability */ 30 /* shamelessly grabbed from lib/vsprintf.c for readability */
31 #define is_digit(c) ((c) >= '0' && (c) <= '9') 31 #define is_digit(c) ((c) >= '0' && (c) <= '9')
32 #endif 32 #endif
33 static int __init reboot_setup(char *str) 33 static int __init reboot_setup(char *str)
34 { 34 {
35 while(1) { 35 while(1) {
36 switch (*str) { 36 switch (*str) {
37 case 'w': /* "warm" reboot (no memory testing etc) */ 37 case 'w': /* "warm" reboot (no memory testing etc) */
38 reboot_mode = 0x1234; 38 reboot_mode = 0x1234;
39 break; 39 break;
40 case 'c': /* "cold" reboot (with memory testing etc) */ 40 case 'c': /* "cold" reboot (with memory testing etc) */
41 reboot_mode = 0x0; 41 reboot_mode = 0x0;
42 break; 42 break;
43 case 'b': /* "bios" reboot by jumping through the BIOS */ 43 case 'b': /* "bios" reboot by jumping through the BIOS */
44 reboot_thru_bios = 1; 44 reboot_thru_bios = 1;
45 break; 45 break;
46 case 'h': /* "hard" reboot by toggling RESET and/or crashing the CPU */ 46 case 'h': /* "hard" reboot by toggling RESET and/or crashing the CPU */
47 reboot_thru_bios = 0; 47 reboot_thru_bios = 0;
48 break; 48 break;
49 #ifdef CONFIG_SMP 49 #ifdef CONFIG_SMP
50 case 's': /* "smp" reboot by executing reset on BSP or other CPU*/ 50 case 's': /* "smp" reboot by executing reset on BSP or other CPU*/
51 if (is_digit(*(str+1))) { 51 if (is_digit(*(str+1))) {
52 reboot_cpu = (int) (*(str+1) - '0'); 52 reboot_cpu = (int) (*(str+1) - '0');
53 if (is_digit(*(str+2))) 53 if (is_digit(*(str+2)))
54 reboot_cpu = reboot_cpu*10 + (int)(*(str+2) - '0'); 54 reboot_cpu = reboot_cpu*10 + (int)(*(str+2) - '0');
55 } 55 }
56 /* we will leave sorting out the final value 56 /* we will leave sorting out the final value
57 when we are ready to reboot, since we might not 57 when we are ready to reboot, since we might not
58 have set up boot_cpu_id or smp_num_cpu */ 58 have set up boot_cpu_id or smp_num_cpu */
59 break; 59 break;
60 #endif 60 #endif
61 } 61 }
62 if((str = strchr(str,',')) != NULL) 62 if((str = strchr(str,',')) != NULL)
63 str++; 63 str++;
64 else 64 else
65 break; 65 break;
66 } 66 }
67 return 1; 67 return 1;
68 } 68 }
69 69
70 __setup("reboot=", reboot_setup); 70 __setup("reboot=", reboot_setup);
71 71
72 /* 72 /*
73 * Reboot options and system auto-detection code provided by 73 * Reboot options and system auto-detection code provided by
74 * Dell Inc. so their systems "just work". :-) 74 * Dell Inc. so their systems "just work". :-)
75 */ 75 */
76 76
77 /* 77 /*
78 * Some machines require the "reboot=b" commandline option, this quirk makes that automatic. 78 * Some machines require the "reboot=b" commandline option, this quirk makes that automatic.
79 */ 79 */
80 static int __init set_bios_reboot(struct dmi_system_id *d) 80 static int __init set_bios_reboot(struct dmi_system_id *d)
81 { 81 {
82 if (!reboot_thru_bios) { 82 if (!reboot_thru_bios) {
83 reboot_thru_bios = 1; 83 reboot_thru_bios = 1;
84 printk(KERN_INFO "%s series board detected. Selecting BIOS-method for reboots.\n", d->ident); 84 printk(KERN_INFO "%s series board detected. Selecting BIOS-method for reboots.\n", d->ident);
85 } 85 }
86 return 0; 86 return 0;
87 } 87 }
88 88
89 static struct dmi_system_id __initdata reboot_dmi_table[] = { 89 static struct dmi_system_id __initdata reboot_dmi_table[] = {
90 { /* Handle problems with rebooting on Dell 1300's */ 90 { /* Handle problems with rebooting on Dell 1300's */
91 .callback = set_bios_reboot, 91 .callback = set_bios_reboot,
92 .ident = "Dell PowerEdge 1300", 92 .ident = "Dell PowerEdge 1300",
93 .matches = { 93 .matches = {
94 DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), 94 DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
95 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"), 95 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"),
96 }, 96 },
97 }, 97 },
98 { /* Handle problems with rebooting on Dell 300's */ 98 { /* Handle problems with rebooting on Dell 300's */
99 .callback = set_bios_reboot, 99 .callback = set_bios_reboot,
100 .ident = "Dell PowerEdge 300", 100 .ident = "Dell PowerEdge 300",
101 .matches = { 101 .matches = {
102 DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), 102 DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
103 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"), 103 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"),
104 }, 104 },
105 }, 105 },
106 { /* Handle problems with rebooting on Dell 2400's */ 106 { /* Handle problems with rebooting on Dell 2400's */
107 .callback = set_bios_reboot, 107 .callback = set_bios_reboot,
108 .ident = "Dell PowerEdge 2400", 108 .ident = "Dell PowerEdge 2400",
109 .matches = { 109 .matches = {
110 DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), 110 DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
111 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"), 111 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
112 }, 112 },
113 }, 113 },
114 { } 114 { }
115 }; 115 };
116 116
117 static int __init reboot_init(void) 117 static int __init reboot_init(void)
118 { 118 {
119 dmi_check_system(reboot_dmi_table); 119 dmi_check_system(reboot_dmi_table);
120 return 0; 120 return 0;
121 } 121 }
122 122
123 core_initcall(reboot_init); 123 core_initcall(reboot_init);
124 124
125 /* The following code and data reboots the machine by switching to real 125 /* The following code and data reboots the machine by switching to real
126 mode and jumping to the BIOS reset entry point, as if the CPU has 126 mode and jumping to the BIOS reset entry point, as if the CPU has
127 really been reset. The previous version asked the keyboard 127 really been reset. The previous version asked the keyboard
128 controller to pulse the CPU reset line, which is more thorough, but 128 controller to pulse the CPU reset line, which is more thorough, but
129 doesn't work with at least one type of 486 motherboard. It is easy 129 doesn't work with at least one type of 486 motherboard. It is easy
130 to stop this code working; hence the copious comments. */ 130 to stop this code working; hence the copious comments. */
131 131
132 static unsigned long long 132 static unsigned long long
133 real_mode_gdt_entries [3] = 133 real_mode_gdt_entries [3] =
134 { 134 {
135 0x0000000000000000ULL, /* Null descriptor */ 135 0x0000000000000000ULL, /* Null descriptor */
136 0x00009a000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */ 136 0x00009a000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
137 0x000092000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */ 137 0x000092000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
138 }; 138 };
139 139
140 static struct 140 static struct
141 { 141 {
142 unsigned short size __attribute__ ((packed)); 142 unsigned short size __attribute__ ((packed));
143 unsigned long long * base __attribute__ ((packed)); 143 unsigned long long * base __attribute__ ((packed));
144 } 144 }
145 real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, real_mode_gdt_entries }, 145 real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, real_mode_gdt_entries },
146 real_mode_idt = { 0x3ff, NULL }, 146 real_mode_idt = { 0x3ff, NULL },
147 no_idt = { 0, NULL }; 147 no_idt = { 0, NULL };
148 148
149 149
150 /* This is 16-bit protected mode code to disable paging and the cache, 150 /* This is 16-bit protected mode code to disable paging and the cache,
151 switch to real mode and jump to the BIOS reset code. 151 switch to real mode and jump to the BIOS reset code.
152 152
153 The instruction that switches to real mode by writing to CR0 must be 153 The instruction that switches to real mode by writing to CR0 must be
154 followed immediately by a far jump instruction, which set CS to a 154 followed immediately by a far jump instruction, which set CS to a
155 valid value for real mode, and flushes the prefetch queue to avoid 155 valid value for real mode, and flushes the prefetch queue to avoid
156 running instructions that have already been decoded in protected 156 running instructions that have already been decoded in protected
157 mode. 157 mode.
158 158
159 Clears all the flags except ET, especially PG (paging), PE 159 Clears all the flags except ET, especially PG (paging), PE
160 (protected-mode enable) and TS (task switch for coprocessor state 160 (protected-mode enable) and TS (task switch for coprocessor state
161 save). Flushes the TLB after paging has been disabled. Sets CD and 161 save). Flushes the TLB after paging has been disabled. Sets CD and
162 NW, to disable the cache on a 486, and invalidates the cache. This 162 NW, to disable the cache on a 486, and invalidates the cache. This
163 is more like the state of a 486 after reset. I don't know if 163 is more like the state of a 486 after reset. I don't know if
164 something else should be done for other chips. 164 something else should be done for other chips.
165 165
166 More could be done here to set up the registers as if a CPU reset had 166 More could be done here to set up the registers as if a CPU reset had
167 occurred; hopefully real BIOSs don't assume much. */ 167 occurred; hopefully real BIOSs don't assume much. */
168 168
169 static unsigned char real_mode_switch [] = 169 static unsigned char real_mode_switch [] =
170 { 170 {
171 0x66, 0x0f, 0x20, 0xc0, /* movl %cr0,%eax */ 171 0x66, 0x0f, 0x20, 0xc0, /* movl %cr0,%eax */
172 0x66, 0x83, 0xe0, 0x11, /* andl $0x00000011,%eax */ 172 0x66, 0x83, 0xe0, 0x11, /* andl $0x00000011,%eax */
173 0x66, 0x0d, 0x00, 0x00, 0x00, 0x60, /* orl $0x60000000,%eax */ 173 0x66, 0x0d, 0x00, 0x00, 0x00, 0x60, /* orl $0x60000000,%eax */
174 0x66, 0x0f, 0x22, 0xc0, /* movl %eax,%cr0 */ 174 0x66, 0x0f, 0x22, 0xc0, /* movl %eax,%cr0 */
175 0x66, 0x0f, 0x22, 0xd8, /* movl %eax,%cr3 */ 175 0x66, 0x0f, 0x22, 0xd8, /* movl %eax,%cr3 */
176 0x66, 0x0f, 0x20, 0xc3, /* movl %cr0,%ebx */ 176 0x66, 0x0f, 0x20, 0xc3, /* movl %cr0,%ebx */
177 0x66, 0x81, 0xe3, 0x00, 0x00, 0x00, 0x60, /* andl $0x60000000,%ebx */ 177 0x66, 0x81, 0xe3, 0x00, 0x00, 0x00, 0x60, /* andl $0x60000000,%ebx */
178 0x74, 0x02, /* jz f */ 178 0x74, 0x02, /* jz f */
179 0x0f, 0x09, /* wbinvd */ 179 0x0f, 0x09, /* wbinvd */
180 0x24, 0x10, /* f: andb $0x10,al */ 180 0x24, 0x10, /* f: andb $0x10,al */
181 0x66, 0x0f, 0x22, 0xc0 /* movl %eax,%cr0 */ 181 0x66, 0x0f, 0x22, 0xc0 /* movl %eax,%cr0 */
182 }; 182 };
183 static unsigned char jump_to_bios [] = 183 static unsigned char jump_to_bios [] =
184 { 184 {
185 0xea, 0x00, 0x00, 0xff, 0xff /* ljmp $0xffff,$0x0000 */ 185 0xea, 0x00, 0x00, 0xff, 0xff /* ljmp $0xffff,$0x0000 */
186 }; 186 };
187 187
188 /* 188 /*
189 * Switch to real mode and then execute the code 189 * Switch to real mode and then execute the code
190 * specified by the code and length parameters. 190 * specified by the code and length parameters.
191 * We assume that length will aways be less that 100! 191 * We assume that length will aways be less that 100!
192 */ 192 */
193 void machine_real_restart(unsigned char *code, int length) 193 void machine_real_restart(unsigned char *code, int length)
194 { 194 {
195 unsigned long flags; 195 unsigned long flags;
196 196
197 local_irq_disable(); 197 local_irq_disable();
198 198
199 /* Write zero to CMOS register number 0x0f, which the BIOS POST 199 /* Write zero to CMOS register number 0x0f, which the BIOS POST
200 routine will recognize as telling it to do a proper reboot. (Well 200 routine will recognize as telling it to do a proper reboot. (Well
201 that's what this book in front of me says -- it may only apply to 201 that's what this book in front of me says -- it may only apply to
202 the Phoenix BIOS though, it's not clear). At the same time, 202 the Phoenix BIOS though, it's not clear). At the same time,
203 disable NMIs by setting the top bit in the CMOS address register, 203 disable NMIs by setting the top bit in the CMOS address register,
204 as we're about to do peculiar things to the CPU. I'm not sure if 204 as we're about to do peculiar things to the CPU. I'm not sure if
205 `outb_p' is needed instead of just `outb'. Use it to be on the 205 `outb_p' is needed instead of just `outb'. Use it to be on the
206 safe side. (Yes, CMOS_WRITE does outb_p's. - Paul G.) 206 safe side. (Yes, CMOS_WRITE does outb_p's. - Paul G.)
207 */ 207 */
208 208
209 spin_lock_irqsave(&rtc_lock, flags); 209 spin_lock_irqsave(&rtc_lock, flags);
210 CMOS_WRITE(0x00, 0x8f); 210 CMOS_WRITE(0x00, 0x8f);
211 spin_unlock_irqrestore(&rtc_lock, flags); 211 spin_unlock_irqrestore(&rtc_lock, flags);
212 212
213 /* Remap the kernel at virtual address zero, as well as offset zero 213 /* Remap the kernel at virtual address zero, as well as offset zero
214 from the kernel segment. This assumes the kernel segment starts at 214 from the kernel segment. This assumes the kernel segment starts at
215 virtual address PAGE_OFFSET. */ 215 virtual address PAGE_OFFSET. */
216 216
217 memcpy (swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, 217 memcpy (swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
218 sizeof (swapper_pg_dir [0]) * KERNEL_PGD_PTRS); 218 sizeof (swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
219 219
220 /* 220 /*
221 * Use `swapper_pg_dir' as our page directory. 221 * Use `swapper_pg_dir' as our page directory.
222 */ 222 */
223 load_cr3(swapper_pg_dir); 223 load_cr3(swapper_pg_dir);
224 224
225 /* Write 0x1234 to absolute memory location 0x472. The BIOS reads 225 /* Write 0x1234 to absolute memory location 0x472. The BIOS reads
226 this on booting to tell it to "Bypass memory test (also warm 226 this on booting to tell it to "Bypass memory test (also warm
227 boot)". This seems like a fairly standard thing that gets set by 227 boot)". This seems like a fairly standard thing that gets set by
228 REBOOT.COM programs, and the previous reset routine did this 228 REBOOT.COM programs, and the previous reset routine did this
229 too. */ 229 too. */
230 230
231 *((unsigned short *)0x472) = reboot_mode; 231 *((unsigned short *)0x472) = reboot_mode;
232 232
233 /* For the switch to real mode, copy some code to low memory. It has 233 /* For the switch to real mode, copy some code to low memory. It has
234 to be in the first 64k because it is running in 16-bit mode, and it 234 to be in the first 64k because it is running in 16-bit mode, and it
235 has to have the same physical and virtual address, because it turns 235 has to have the same physical and virtual address, because it turns
236 off paging. Copy it near the end of the first page, out of the way 236 off paging. Copy it near the end of the first page, out of the way
237 of BIOS variables. */ 237 of BIOS variables. */
238 238
239 memcpy ((void *) (0x1000 - sizeof (real_mode_switch) - 100), 239 memcpy ((void *) (0x1000 - sizeof (real_mode_switch) - 100),
240 real_mode_switch, sizeof (real_mode_switch)); 240 real_mode_switch, sizeof (real_mode_switch));
241 memcpy ((void *) (0x1000 - 100), code, length); 241 memcpy ((void *) (0x1000 - 100), code, length);
242 242
243 /* Set up the IDT for real mode. */ 243 /* Set up the IDT for real mode. */
244 244
245 __asm__ __volatile__ ("lidt %0" : : "m" (real_mode_idt)); 245 __asm__ __volatile__ ("lidt %0" : : "m" (real_mode_idt));
246 246
247 /* Set up a GDT from which we can load segment descriptors for real 247 /* Set up a GDT from which we can load segment descriptors for real
248 mode. The GDT is not used in real mode; it is just needed here to 248 mode. The GDT is not used in real mode; it is just needed here to
249 prepare the descriptors. */ 249 prepare the descriptors. */
250 250
251 __asm__ __volatile__ ("lgdt %0" : : "m" (real_mode_gdt)); 251 __asm__ __volatile__ ("lgdt %0" : : "m" (real_mode_gdt));
252 252
253 /* Load the data segment registers, and thus the descriptors ready for 253 /* Load the data segment registers, and thus the descriptors ready for
254 real mode. The base address of each segment is 0x100, 16 times the 254 real mode. The base address of each segment is 0x100, 16 times the
255 selector value being loaded here. This is so that the segment 255 selector value being loaded here. This is so that the segment
256 registers don't have to be reloaded after switching to real mode: 256 registers don't have to be reloaded after switching to real mode:
257 the values are consistent for real mode operation already. */ 257 the values are consistent for real mode operation already. */
258 258
259 __asm__ __volatile__ ("movl $0x0010,%%eax\n" 259 __asm__ __volatile__ ("movl $0x0010,%%eax\n"
260 "\tmovl %%eax,%%ds\n" 260 "\tmovl %%eax,%%ds\n"
261 "\tmovl %%eax,%%es\n" 261 "\tmovl %%eax,%%es\n"
262 "\tmovl %%eax,%%fs\n" 262 "\tmovl %%eax,%%fs\n"
263 "\tmovl %%eax,%%gs\n" 263 "\tmovl %%eax,%%gs\n"
264 "\tmovl %%eax,%%ss" : : : "eax"); 264 "\tmovl %%eax,%%ss" : : : "eax");
265 265
266 /* Jump to the 16-bit code that we copied earlier. It disables paging 266 /* Jump to the 16-bit code that we copied earlier. It disables paging
267 and the cache, switches to real mode, and jumps to the BIOS reset 267 and the cache, switches to real mode, and jumps to the BIOS reset
268 entry point. */ 268 entry point. */
269 269
270 __asm__ __volatile__ ("ljmp $0x0008,%0" 270 __asm__ __volatile__ ("ljmp $0x0008,%0"
271 : 271 :
272 : "i" ((void *) (0x1000 - sizeof (real_mode_switch) - 100))); 272 : "i" ((void *) (0x1000 - sizeof (real_mode_switch) - 100)));
273 } 273 }
274 #ifdef CONFIG_APM_MODULE 274 #ifdef CONFIG_APM_MODULE
275 EXPORT_SYMBOL(machine_real_restart); 275 EXPORT_SYMBOL(machine_real_restart);
276 #endif 276 #endif
277 277
278 void machine_shutdown(void) 278 void machine_shutdown(void)
279 { 279 {
280 #ifdef CONFIG_SMP 280 #ifdef CONFIG_SMP
281 int reboot_cpu_id; 281 int reboot_cpu_id;
282 282
283 /* The boot cpu is always logical cpu 0 */ 283 /* The boot cpu is always logical cpu 0 */
284 reboot_cpu_id = 0; 284 reboot_cpu_id = 0;
285 285
286 /* See if there has been given a command line override */ 286 /* See if there has been given a command line override */
287 if ((reboot_cpu_id != -1) && (reboot_cpu < NR_CPUS) && 287 if ((reboot_cpu_id != -1) && (reboot_cpu < NR_CPUS) &&
288 cpu_isset(reboot_cpu, cpu_online_map)) { 288 cpu_isset(reboot_cpu, cpu_online_map)) {
289 reboot_cpu_id = reboot_cpu; 289 reboot_cpu_id = reboot_cpu;
290 } 290 }
291 291
292 /* Make certain the cpu I'm rebooting on is online */ 292 /* Make certain the cpu I'm rebooting on is online */
293 if (!cpu_isset(reboot_cpu_id, cpu_online_map)) { 293 if (!cpu_isset(reboot_cpu_id, cpu_online_map)) {
294 reboot_cpu_id = smp_processor_id(); 294 reboot_cpu_id = smp_processor_id();
295 } 295 }
296 296
297 /* Make certain I only run on the appropriate processor */ 297 /* Make certain I only run on the appropriate processor */
298 set_cpus_allowed(current, cpumask_of_cpu(reboot_cpu_id)); 298 set_cpus_allowed(current, cpumask_of_cpu(reboot_cpu_id));
299 299
300 /* O.K. Now that I'm on the appropriate processor, stop 300 /* O.K. Now that I'm on the appropriate processor, stop
301 * all of the others, and disable their local APICs. 301 * all of the others, and disable their local APICs.
302 */ 302 */
303 303
304 smp_send_stop(); 304 smp_send_stop();
305 #endif /* CONFIG_SMP */ 305 #endif /* CONFIG_SMP */
306 306
307 lapic_shutdown(); 307 lapic_shutdown();
308 308
309 #ifdef CONFIG_X86_IO_APIC 309 #ifdef CONFIG_X86_IO_APIC
310 disable_IO_APIC(); 310 disable_IO_APIC();
311 #endif 311 #endif
312 } 312 }
313 313
314 void machine_restart(char * __unused) 314 void machine_restart(char * __unused)
315 { 315 {
316 machine_shutdown(); 316 machine_shutdown();
317 317
318 if (!reboot_thru_bios) { 318 if (!reboot_thru_bios) {
319 if (efi_enabled) { 319 if (efi_enabled) {
320 efi.reset_system(EFI_RESET_COLD, EFI_SUCCESS, 0, NULL); 320 efi.reset_system(EFI_RESET_COLD, EFI_SUCCESS, 0, NULL);
321 __asm__ __volatile__("lidt %0": :"m" (no_idt)); 321 __asm__ __volatile__("lidt %0": :"m" (no_idt));
322 __asm__ __volatile__("int3"); 322 __asm__ __volatile__("int3");
323 } 323 }
324 /* rebooting needs to touch the page at absolute addr 0 */ 324 /* rebooting needs to touch the page at absolute addr 0 */
325 *((unsigned short *)__va(0x472)) = reboot_mode; 325 *((unsigned short *)__va(0x472)) = reboot_mode;
326 for (;;) { 326 for (;;) {
327 mach_reboot_fixups(); /* for board specific fixups */ 327 mach_reboot_fixups(); /* for board specific fixups */
328 mach_reboot(); 328 mach_reboot();
329 /* That didn't work - force a triple fault.. */ 329 /* That didn't work - force a triple fault.. */
330 __asm__ __volatile__("lidt %0": :"m" (no_idt)); 330 __asm__ __volatile__("lidt %0": :"m" (no_idt));
331 __asm__ __volatile__("int3"); 331 __asm__ __volatile__("int3");
332 } 332 }
333 } 333 }
334 if (efi_enabled) 334 if (efi_enabled)
335 efi.reset_system(EFI_RESET_WARM, EFI_SUCCESS, 0, NULL); 335 efi.reset_system(EFI_RESET_WARM, EFI_SUCCESS, 0, NULL);
336 336
337 machine_real_restart(jump_to_bios, sizeof(jump_to_bios)); 337 machine_real_restart(jump_to_bios, sizeof(jump_to_bios));
338 } 338 }
339 339
340 EXPORT_SYMBOL(machine_restart);
341
342 void machine_halt(void) 340 void machine_halt(void)
343 { 341 {
344 } 342 }
345 343
346 EXPORT_SYMBOL(machine_halt);
347
348 void machine_power_off(void) 344 void machine_power_off(void)
349 { 345 {
350 lapic_shutdown(); 346 lapic_shutdown();
351 347
352 if (efi_enabled) 348 if (efi_enabled)
353 efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); 349 efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
354 if (pm_power_off) 350 if (pm_power_off)
355 pm_power_off(); 351 pm_power_off();
356 } 352 }
357
358 EXPORT_SYMBOL(machine_power_off);
359 353
360 354
arch/i386/mach-visws/reboot.c
1 #include <linux/module.h> 1 #include <linux/module.h>
2 #include <linux/smp.h> 2 #include <linux/smp.h>
3 #include <linux/delay.h> 3 #include <linux/delay.h>
4 #include <linux/platform.h> 4 #include <linux/platform.h>
5 5
6 #include <asm/io.h> 6 #include <asm/io.h>
7 #include "piix4.h" 7 #include "piix4.h"
8 8
9 void (*pm_power_off)(void); 9 void (*pm_power_off)(void);
10 EXPORT_SYMBOL(pm_power_off); 10 EXPORT_SYMBOL(pm_power_off);
11 11
12 void machine_restart(char * __unused) 12 void machine_restart(char * __unused)
13 { 13 {
14 #ifdef CONFIG_SMP 14 #ifdef CONFIG_SMP
15 smp_send_stop(); 15 smp_send_stop();
16 #endif 16 #endif
17 17
18 /* 18 /*
19 * Visual Workstations restart after this 19 * Visual Workstations restart after this
20 * register is poked on the PIIX4 20 * register is poked on the PIIX4
21 */ 21 */
22 outb(PIIX4_RESET_VAL, PIIX4_RESET_PORT); 22 outb(PIIX4_RESET_VAL, PIIX4_RESET_PORT);
23 } 23 }
24 24
25 EXPORT_SYMBOL(machine_restart);
26
27 void machine_power_off(void) 25 void machine_power_off(void)
28 { 26 {
29 unsigned short pm_status; 27 unsigned short pm_status;
30 extern unsigned int pci_bus0; 28 extern unsigned int pci_bus0;
31 29
32 while ((pm_status = inw(PMSTS_PORT)) & 0x100) 30 while ((pm_status = inw(PMSTS_PORT)) & 0x100)
33 outw(pm_status, PMSTS_PORT); 31 outw(pm_status, PMSTS_PORT);
34 32
35 outw(PM_SUSPEND_ENABLE, PMCNTRL_PORT); 33 outw(PM_SUSPEND_ENABLE, PMCNTRL_PORT);
36 34
37 mdelay(10); 35 mdelay(10);
38 36
39 #define PCI_CONF1_ADDRESS(bus, devfn, reg) \ 37 #define PCI_CONF1_ADDRESS(bus, devfn, reg) \
40 (0x80000000 | (bus << 16) | (devfn << 8) | (reg & ~3)) 38 (0x80000000 | (bus << 16) | (devfn << 8) | (reg & ~3))
41 39
42 outl(PCI_CONF1_ADDRESS(pci_bus0, SPECIAL_DEV, SPECIAL_REG), 0xCF8); 40 outl(PCI_CONF1_ADDRESS(pci_bus0, SPECIAL_DEV, SPECIAL_REG), 0xCF8);
43 outl(PIIX_SPECIAL_STOP, 0xCFC); 41 outl(PIIX_SPECIAL_STOP, 0xCFC);
44 } 42 }
45 43
46 EXPORT_SYMBOL(machine_power_off);
47
48 void machine_halt(void) 44 void machine_halt(void)
49 { 45 {
50 } 46 }
51
52 EXPORT_SYMBOL(machine_halt);
53 47
arch/i386/mach-voyager/voyager_basic.c
1 /* Copyright (C) 1999,2001 1 /* Copyright (C) 1999,2001
2 * 2 *
3 * Author: J.E.J.Bottomley@HansenPartnership.com 3 * Author: J.E.J.Bottomley@HansenPartnership.com
4 * 4 *
5 * linux/arch/i386/kernel/voyager.c 5 * linux/arch/i386/kernel/voyager.c
6 * 6 *
7 * This file contains all the voyager specific routines for getting 7 * This file contains all the voyager specific routines for getting
8 * initialisation of the architecture to function. For additional 8 * initialisation of the architecture to function. For additional
9 * features see: 9 * features see:
10 * 10 *
11 * voyager_cat.c - Voyager CAT bus interface 11 * voyager_cat.c - Voyager CAT bus interface
12 * voyager_smp.c - Voyager SMP hal (emulates linux smp.c) 12 * voyager_smp.c - Voyager SMP hal (emulates linux smp.c)
13 */ 13 */
14 14
15 #include <linux/config.h> 15 #include <linux/config.h>
16 #include <linux/module.h> 16 #include <linux/module.h>
17 #include <linux/types.h> 17 #include <linux/types.h>
18 #include <linux/sched.h> 18 #include <linux/sched.h>
19 #include <linux/ptrace.h> 19 #include <linux/ptrace.h>
20 #include <linux/ioport.h> 20 #include <linux/ioport.h>
21 #include <linux/interrupt.h> 21 #include <linux/interrupt.h>
22 #include <linux/init.h> 22 #include <linux/init.h>
23 #include <linux/delay.h> 23 #include <linux/delay.h>
24 #include <linux/reboot.h> 24 #include <linux/reboot.h>
25 #include <linux/sysrq.h> 25 #include <linux/sysrq.h>
26 #include <asm/io.h> 26 #include <asm/io.h>
27 #include <asm/voyager.h> 27 #include <asm/voyager.h>
28 #include <asm/vic.h> 28 #include <asm/vic.h>
29 #include <linux/pm.h> 29 #include <linux/pm.h>
30 #include <linux/irq.h> 30 #include <linux/irq.h>
31 #include <asm/tlbflush.h> 31 #include <asm/tlbflush.h>
32 #include <asm/arch_hooks.h> 32 #include <asm/arch_hooks.h>
33 #include <asm/i8253.h> 33 #include <asm/i8253.h>
34 34
35 /* 35 /*
36 * Power off function, if any 36 * Power off function, if any
37 */ 37 */
38 void (*pm_power_off)(void); 38 void (*pm_power_off)(void);
39 EXPORT_SYMBOL(pm_power_off); 39 EXPORT_SYMBOL(pm_power_off);
40 40
41 int voyager_level = 0; 41 int voyager_level = 0;
42 42
43 struct voyager_SUS *voyager_SUS = NULL; 43 struct voyager_SUS *voyager_SUS = NULL;
44 44
45 #ifdef CONFIG_SMP 45 #ifdef CONFIG_SMP
46 static void 46 static void
47 voyager_dump(int dummy1, struct pt_regs *dummy2, struct tty_struct *dummy3) 47 voyager_dump(int dummy1, struct pt_regs *dummy2, struct tty_struct *dummy3)
48 { 48 {
49 /* get here via a sysrq */ 49 /* get here via a sysrq */
50 voyager_smp_dump(); 50 voyager_smp_dump();
51 } 51 }
52 52
53 static struct sysrq_key_op sysrq_voyager_dump_op = { 53 static struct sysrq_key_op sysrq_voyager_dump_op = {
54 .handler = voyager_dump, 54 .handler = voyager_dump,
55 .help_msg = "Voyager", 55 .help_msg = "Voyager",
56 .action_msg = "Dump Voyager Status", 56 .action_msg = "Dump Voyager Status",
57 }; 57 };
58 #endif 58 #endif
59 59
60 void 60 void
61 voyager_detect(struct voyager_bios_info *bios) 61 voyager_detect(struct voyager_bios_info *bios)
62 { 62 {
63 if(bios->len != 0xff) { 63 if(bios->len != 0xff) {
64 int class = (bios->class_1 << 8) 64 int class = (bios->class_1 << 8)
65 | (bios->class_2 & 0xff); 65 | (bios->class_2 & 0xff);
66 66
67 printk("Voyager System detected.\n" 67 printk("Voyager System detected.\n"
68 " Class %x, Revision %d.%d\n", 68 " Class %x, Revision %d.%d\n",
69 class, bios->major, bios->minor); 69 class, bios->major, bios->minor);
70 if(class == VOYAGER_LEVEL4) 70 if(class == VOYAGER_LEVEL4)
71 voyager_level = 4; 71 voyager_level = 4;
72 else if(class < VOYAGER_LEVEL5_AND_ABOVE) 72 else if(class < VOYAGER_LEVEL5_AND_ABOVE)
73 voyager_level = 3; 73 voyager_level = 3;
74 else 74 else
75 voyager_level = 5; 75 voyager_level = 5;
76 printk(" Architecture Level %d\n", voyager_level); 76 printk(" Architecture Level %d\n", voyager_level);
77 if(voyager_level < 4) 77 if(voyager_level < 4)
78 printk("\n**WARNING**: Voyager HAL only supports Levels 4 and 5 Architectures at the moment\n\n"); 78 printk("\n**WARNING**: Voyager HAL only supports Levels 4 and 5 Architectures at the moment\n\n");
79 /* install the power off handler */ 79 /* install the power off handler */
80 pm_power_off = voyager_power_off; 80 pm_power_off = voyager_power_off;
81 #ifdef CONFIG_SMP 81 #ifdef CONFIG_SMP
82 register_sysrq_key('v', &sysrq_voyager_dump_op); 82 register_sysrq_key('v', &sysrq_voyager_dump_op);
83 #endif 83 #endif
84 } else { 84 } else {
85 printk("\n\n**WARNING**: No Voyager Subsystem Found\n"); 85 printk("\n\n**WARNING**: No Voyager Subsystem Found\n");
86 } 86 }
87 } 87 }
88 88
89 void 89 void
90 voyager_system_interrupt(int cpl, void *dev_id, struct pt_regs *regs) 90 voyager_system_interrupt(int cpl, void *dev_id, struct pt_regs *regs)
91 { 91 {
92 printk("Voyager: detected system interrupt\n"); 92 printk("Voyager: detected system interrupt\n");
93 } 93 }
94 94
95 /* Routine to read information from the extended CMOS area */ 95 /* Routine to read information from the extended CMOS area */
96 __u8 96 __u8
97 voyager_extended_cmos_read(__u16 addr) 97 voyager_extended_cmos_read(__u16 addr)
98 { 98 {
99 outb(addr & 0xff, 0x74); 99 outb(addr & 0xff, 0x74);
100 outb((addr >> 8) & 0xff, 0x75); 100 outb((addr >> 8) & 0xff, 0x75);
101 return inb(0x76); 101 return inb(0x76);
102 } 102 }
103 103
104 /* internal definitions for the SUS Click Map of memory */ 104 /* internal definitions for the SUS Click Map of memory */
105 105
106 #define CLICK_ENTRIES 16 106 #define CLICK_ENTRIES 16
107 #define CLICK_SIZE 4096 /* click to byte conversion for Length */ 107 #define CLICK_SIZE 4096 /* click to byte conversion for Length */
108 108
109 typedef struct ClickMap { 109 typedef struct ClickMap {
110 struct Entry { 110 struct Entry {
111 __u32 Address; 111 __u32 Address;
112 __u32 Length; 112 __u32 Length;
113 } Entry[CLICK_ENTRIES]; 113 } Entry[CLICK_ENTRIES];
114 } ClickMap_t; 114 } ClickMap_t;
115 115
116 116
117 /* This routine is pretty much an awful hack to read the bios clickmap by 117 /* This routine is pretty much an awful hack to read the bios clickmap by
118 * mapping it into page 0. There are usually three regions in the map: 118 * mapping it into page 0. There are usually three regions in the map:
119 * Base Memory 119 * Base Memory
120 * Extended Memory 120 * Extended Memory
121 * zero length marker for end of map 121 * zero length marker for end of map
122 * 122 *
123 * Returns are 0 for failure and 1 for success on extracting region. 123 * Returns are 0 for failure and 1 for success on extracting region.
124 */ 124 */
125 int __init 125 int __init
126 voyager_memory_detect(int region, __u32 *start, __u32 *length) 126 voyager_memory_detect(int region, __u32 *start, __u32 *length)
127 { 127 {
128 int i; 128 int i;
129 int retval = 0; 129 int retval = 0;
130 __u8 cmos[4]; 130 __u8 cmos[4];
131 ClickMap_t *map; 131 ClickMap_t *map;
132 unsigned long map_addr; 132 unsigned long map_addr;
133 unsigned long old; 133 unsigned long old;
134 134
135 if(region >= CLICK_ENTRIES) { 135 if(region >= CLICK_ENTRIES) {
136 printk("Voyager: Illegal ClickMap region %d\n", region); 136 printk("Voyager: Illegal ClickMap region %d\n", region);
137 return 0; 137 return 0;
138 } 138 }
139 139
140 for(i = 0; i < sizeof(cmos); i++) 140 for(i = 0; i < sizeof(cmos); i++)
141 cmos[i] = voyager_extended_cmos_read(VOYAGER_MEMORY_CLICKMAP + i); 141 cmos[i] = voyager_extended_cmos_read(VOYAGER_MEMORY_CLICKMAP + i);
142 142
143 map_addr = *(unsigned long *)cmos; 143 map_addr = *(unsigned long *)cmos;
144 144
145 /* steal page 0 for this */ 145 /* steal page 0 for this */
146 old = pg0[0]; 146 old = pg0[0];
147 pg0[0] = ((map_addr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT); 147 pg0[0] = ((map_addr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT);
148 local_flush_tlb(); 148 local_flush_tlb();
149 /* now clear everything out but page 0 */ 149 /* now clear everything out but page 0 */
150 map = (ClickMap_t *)(map_addr & (~PAGE_MASK)); 150 map = (ClickMap_t *)(map_addr & (~PAGE_MASK));
151 151
152 /* zero length is the end of the clickmap */ 152 /* zero length is the end of the clickmap */
153 if(map->Entry[region].Length != 0) { 153 if(map->Entry[region].Length != 0) {
154 *length = map->Entry[region].Length * CLICK_SIZE; 154 *length = map->Entry[region].Length * CLICK_SIZE;
155 *start = map->Entry[region].Address; 155 *start = map->Entry[region].Address;
156 retval = 1; 156 retval = 1;
157 } 157 }
158 158
159 /* replace the mapping */ 159 /* replace the mapping */
160 pg0[0] = old; 160 pg0[0] = old;
161 local_flush_tlb(); 161 local_flush_tlb();
162 return retval; 162 return retval;
163 } 163 }
164 164
165 /* voyager specific handling code for timer interrupts. Used to hand 165 /* voyager specific handling code for timer interrupts. Used to hand
166 * off the timer tick to the SMP code, since the VIC doesn't have an 166 * off the timer tick to the SMP code, since the VIC doesn't have an
167 * internal timer (The QIC does, but that's another story). */ 167 * internal timer (The QIC does, but that's another story). */
168 void 168 void
169 voyager_timer_interrupt(struct pt_regs *regs) 169 voyager_timer_interrupt(struct pt_regs *regs)
170 { 170 {
171 if((jiffies & 0x3ff) == 0) { 171 if((jiffies & 0x3ff) == 0) {
172 172
173 /* There seems to be something flaky in either 173 /* There seems to be something flaky in either
174 * hardware or software that is resetting the timer 0 174 * hardware or software that is resetting the timer 0
175 * count to something much higher than it should be 175 * count to something much higher than it should be
176 * This seems to occur in the boot sequence, just 176 * This seems to occur in the boot sequence, just
177 * before root is mounted. Therefore, every 10 177 * before root is mounted. Therefore, every 10
178 * seconds or so, we sanity check the timer zero count 178 * seconds or so, we sanity check the timer zero count
179 * and kick it back to where it should be. 179 * and kick it back to where it should be.
180 * 180 *
181 * FIXME: This is the most awful hack yet seen. I 181 * FIXME: This is the most awful hack yet seen. I
182 * should work out exactly what is interfering with 182 * should work out exactly what is interfering with
183 * the timer count settings early in the boot sequence 183 * the timer count settings early in the boot sequence
184 * and swiftly introduce it to something sharp and 184 * and swiftly introduce it to something sharp and
185 * pointy. */ 185 * pointy. */
186 __u16 val; 186 __u16 val;
187 187
188 spin_lock(&i8253_lock); 188 spin_lock(&i8253_lock);
189 189
190 outb_p(0x00, 0x43); 190 outb_p(0x00, 0x43);
191 val = inb_p(0x40); 191 val = inb_p(0x40);
192 val |= inb(0x40) << 8; 192 val |= inb(0x40) << 8;
193 spin_unlock(&i8253_lock); 193 spin_unlock(&i8253_lock);
194 194
195 if(val > LATCH) { 195 if(val > LATCH) {
196 printk("\nVOYAGER: countdown timer value too high (%d), resetting\n\n", val); 196 printk("\nVOYAGER: countdown timer value too high (%d), resetting\n\n", val);
197 spin_lock(&i8253_lock); 197 spin_lock(&i8253_lock);
198 outb(0x34,0x43); 198 outb(0x34,0x43);
199 outb_p(LATCH & 0xff , 0x40); /* LSB */ 199 outb_p(LATCH & 0xff , 0x40); /* LSB */
200 outb(LATCH >> 8 , 0x40); /* MSB */ 200 outb(LATCH >> 8 , 0x40); /* MSB */
201 spin_unlock(&i8253_lock); 201 spin_unlock(&i8253_lock);
202 } 202 }
203 } 203 }
204 #ifdef CONFIG_SMP 204 #ifdef CONFIG_SMP
205 smp_vic_timer_interrupt(regs); 205 smp_vic_timer_interrupt(regs);
206 #endif 206 #endif
207 } 207 }
208 208
209 void 209 void
210 voyager_power_off(void) 210 voyager_power_off(void)
211 { 211 {
212 printk("VOYAGER Power Off\n"); 212 printk("VOYAGER Power Off\n");
213 213
214 if(voyager_level == 5) { 214 if(voyager_level == 5) {
215 voyager_cat_power_off(); 215 voyager_cat_power_off();
216 } else if(voyager_level == 4) { 216 } else if(voyager_level == 4) {
217 /* This doesn't apparently work on most L4 machines, 217 /* This doesn't apparently work on most L4 machines,
218 * but the specs say to do this to get automatic power 218 * but the specs say to do this to get automatic power
219 * off. Unfortunately, if it doesn't power off the 219 * off. Unfortunately, if it doesn't power off the
220 * machine, it ends up doing a cold restart, which 220 * machine, it ends up doing a cold restart, which
221 * isn't really intended, so comment out the code */ 221 * isn't really intended, so comment out the code */
222 #if 0 222 #if 0
223 int port; 223 int port;
224 224
225 225
226 /* enable the voyager Configuration Space */ 226 /* enable the voyager Configuration Space */
227 outb((inb(VOYAGER_MC_SETUP) & 0xf0) | 0x8, 227 outb((inb(VOYAGER_MC_SETUP) & 0xf0) | 0x8,
228 VOYAGER_MC_SETUP); 228 VOYAGER_MC_SETUP);
229 /* the port for the power off flag is an offset from the 229 /* the port for the power off flag is an offset from the
230 floating base */ 230 floating base */
231 port = (inb(VOYAGER_SSPB_RELOCATION_PORT) << 8) + 0x21; 231 port = (inb(VOYAGER_SSPB_RELOCATION_PORT) << 8) + 0x21;
232 /* set the power off flag */ 232 /* set the power off flag */
233 outb(inb(port) | 0x1, port); 233 outb(inb(port) | 0x1, port);
234 #endif 234 #endif
235 } 235 }
236 /* and wait for it to happen */ 236 /* and wait for it to happen */
237 for(;;) { 237 for(;;) {
238 __asm("cli"); 238 __asm("cli");
239 __asm("hlt"); 239 __asm("hlt");
240 } 240 }
241 } 241 }
242 242
243 /* copied from process.c */ 243 /* copied from process.c */
244 static inline void 244 static inline void
245 kb_wait(void) 245 kb_wait(void)
246 { 246 {
247 int i; 247 int i;
248 248
249 for (i=0; i<0x10000; i++) 249 for (i=0; i<0x10000; i++)
250 if ((inb_p(0x64) & 0x02) == 0) 250 if ((inb_p(0x64) & 0x02) == 0)
251 break; 251 break;
252 } 252 }
253 253
254 void 254 void
255 machine_restart(char *cmd) 255 machine_restart(char *cmd)
256 { 256 {
257 printk("Voyager Warm Restart\n"); 257 printk("Voyager Warm Restart\n");
258 kb_wait(); 258 kb_wait();
259 259
260 if(voyager_level == 5) { 260 if(voyager_level == 5) {
261 /* write magic values to the RTC to inform system that 261 /* write magic values to the RTC to inform system that
262 * shutdown is beginning */ 262 * shutdown is beginning */
263 outb(0x8f, 0x70); 263 outb(0x8f, 0x70);
264 outb(0x5 , 0x71); 264 outb(0x5 , 0x71);
265 265
266 udelay(50); 266 udelay(50);
267 outb(0xfe,0x64); /* pull reset low */ 267 outb(0xfe,0x64); /* pull reset low */
268 } else if(voyager_level == 4) { 268 } else if(voyager_level == 4) {
269 __u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT)<<8; 269 __u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT)<<8;
270 __u8 basebd = inb(VOYAGER_MC_SETUP); 270 __u8 basebd = inb(VOYAGER_MC_SETUP);
271 271
272 outb(basebd | 0x08, VOYAGER_MC_SETUP); 272 outb(basebd | 0x08, VOYAGER_MC_SETUP);
273 outb(0x02, catbase + 0x21); 273 outb(0x02, catbase + 0x21);
274 } 274 }
275 for(;;) { 275 for(;;) {
276 asm("cli"); 276 asm("cli");
277 asm("hlt"); 277 asm("hlt");
278 } 278 }
279 } 279 }
280 280
281 EXPORT_SYMBOL(machine_restart);
282
283 void 281 void
284 mca_nmi_hook(void) 282 mca_nmi_hook(void)
285 { 283 {
286 __u8 dumpval __attribute__((unused)) = inb(0xf823); 284 __u8 dumpval __attribute__((unused)) = inb(0xf823);
287 __u8 swnmi __attribute__((unused)) = inb(0xf813); 285 __u8 swnmi __attribute__((unused)) = inb(0xf813);
288 286
289 /* FIXME: assume dump switch pressed */ 287 /* FIXME: assume dump switch pressed */
290 /* check to see if the dump switch was pressed */ 288 /* check to see if the dump switch was pressed */
291 VDEBUG(("VOYAGER: dumpval = 0x%x, swnmi = 0x%x\n", dumpval, swnmi)); 289 VDEBUG(("VOYAGER: dumpval = 0x%x, swnmi = 0x%x\n", dumpval, swnmi));
292 /* clear swnmi */ 290 /* clear swnmi */
293 outb(0xff, 0xf813); 291 outb(0xff, 0xf813);
294 /* tell SUS to ignore dump */ 292 /* tell SUS to ignore dump */
295 if(voyager_level == 5 && voyager_SUS != NULL) { 293 if(voyager_level == 5 && voyager_SUS != NULL) {
296 if(voyager_SUS->SUS_mbox == VOYAGER_DUMP_BUTTON_NMI) { 294 if(voyager_SUS->SUS_mbox == VOYAGER_DUMP_BUTTON_NMI) {
297 voyager_SUS->kernel_mbox = VOYAGER_NO_COMMAND; 295 voyager_SUS->kernel_mbox = VOYAGER_NO_COMMAND;
298 voyager_SUS->kernel_flags |= VOYAGER_OS_IN_PROGRESS; 296 voyager_SUS->kernel_flags |= VOYAGER_OS_IN_PROGRESS;
299 udelay(1000); 297 udelay(1000);
300 voyager_SUS->kernel_mbox = VOYAGER_IGNORE_DUMP; 298 voyager_SUS->kernel_mbox = VOYAGER_IGNORE_DUMP;
301 voyager_SUS->kernel_flags &= ~VOYAGER_OS_IN_PROGRESS; 299 voyager_SUS->kernel_flags &= ~VOYAGER_OS_IN_PROGRESS;
302 } 300 }
303 } 301 }
304 printk(KERN_ERR "VOYAGER: Dump switch pressed, printing CPU%d tracebacks\n", smp_processor_id()); 302 printk(KERN_ERR "VOYAGER: Dump switch pressed, printing CPU%d tracebacks\n", smp_processor_id());
305 show_stack(NULL, NULL); 303 show_stack(NULL, NULL);
306 show_state(); 304 show_state();
307 } 305 }
308 306
309 307
310 308
311 void 309 void
312 machine_halt(void) 310 machine_halt(void)
313 { 311 {
314 /* treat a halt like a power off */ 312 /* treat a halt like a power off */
315 machine_power_off(); 313 machine_power_off();
316 } 314 }
317 315
318 EXPORT_SYMBOL(machine_halt);
319
320 void machine_power_off(void) 316 void machine_power_off(void)
321 { 317 {
322 if (pm_power_off) 318 if (pm_power_off)
323 pm_power_off(); 319 pm_power_off();
324 } 320 }
325
326 EXPORT_SYMBOL(machine_power_off);
327 321
arch/ia64/kernel/process.c
1 /* 1 /*
2 * Architecture-specific setup. 2 * Architecture-specific setup.
3 * 3 *
4 * Copyright (C) 1998-2003 Hewlett-Packard Co 4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com> 5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * 04/11/17 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support 6 * 04/11/17 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
7 */ 7 */
8 #define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */ 8 #define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */
9 #include <linux/config.h> 9 #include <linux/config.h>
10 10
11 #include <linux/cpu.h> 11 #include <linux/cpu.h>
12 #include <linux/pm.h> 12 #include <linux/pm.h>
13 #include <linux/elf.h> 13 #include <linux/elf.h>
14 #include <linux/errno.h> 14 #include <linux/errno.h>
15 #include <linux/kallsyms.h> 15 #include <linux/kallsyms.h>
16 #include <linux/kernel.h> 16 #include <linux/kernel.h>
17 #include <linux/mm.h> 17 #include <linux/mm.h>
18 #include <linux/module.h> 18 #include <linux/module.h>
19 #include <linux/notifier.h> 19 #include <linux/notifier.h>
20 #include <linux/personality.h> 20 #include <linux/personality.h>
21 #include <linux/sched.h> 21 #include <linux/sched.h>
22 #include <linux/slab.h> 22 #include <linux/slab.h>
23 #include <linux/smp_lock.h> 23 #include <linux/smp_lock.h>
24 #include <linux/stddef.h> 24 #include <linux/stddef.h>
25 #include <linux/thread_info.h> 25 #include <linux/thread_info.h>
26 #include <linux/unistd.h> 26 #include <linux/unistd.h>
27 #include <linux/efi.h> 27 #include <linux/efi.h>
28 #include <linux/interrupt.h> 28 #include <linux/interrupt.h>
29 #include <linux/delay.h> 29 #include <linux/delay.h>
30 #include <linux/kprobes.h> 30 #include <linux/kprobes.h>
31 31
32 #include <asm/cpu.h> 32 #include <asm/cpu.h>
33 #include <asm/delay.h> 33 #include <asm/delay.h>
34 #include <asm/elf.h> 34 #include <asm/elf.h>
35 #include <asm/ia32.h> 35 #include <asm/ia32.h>
36 #include <asm/irq.h> 36 #include <asm/irq.h>
37 #include <asm/pgalloc.h> 37 #include <asm/pgalloc.h>
38 #include <asm/processor.h> 38 #include <asm/processor.h>
39 #include <asm/sal.h> 39 #include <asm/sal.h>
40 #include <asm/tlbflush.h> 40 #include <asm/tlbflush.h>
41 #include <asm/uaccess.h> 41 #include <asm/uaccess.h>
42 #include <asm/unwind.h> 42 #include <asm/unwind.h>
43 #include <asm/user.h> 43 #include <asm/user.h>
44 44
45 #include "entry.h" 45 #include "entry.h"
46 46
47 #ifdef CONFIG_PERFMON 47 #ifdef CONFIG_PERFMON
48 # include <asm/perfmon.h> 48 # include <asm/perfmon.h>
49 #endif 49 #endif
50 50
51 #include "sigframe.h" 51 #include "sigframe.h"
52 52
53 void (*ia64_mark_idle)(int); 53 void (*ia64_mark_idle)(int);
54 static DEFINE_PER_CPU(unsigned int, cpu_idle_state); 54 static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
55 55
56 unsigned long boot_option_idle_override = 0; 56 unsigned long boot_option_idle_override = 0;
57 EXPORT_SYMBOL(boot_option_idle_override); 57 EXPORT_SYMBOL(boot_option_idle_override);
58 58
59 void 59 void
60 ia64_do_show_stack (struct unw_frame_info *info, void *arg) 60 ia64_do_show_stack (struct unw_frame_info *info, void *arg)
61 { 61 {
62 unsigned long ip, sp, bsp; 62 unsigned long ip, sp, bsp;
63 char buf[128]; /* don't make it so big that it overflows the stack! */ 63 char buf[128]; /* don't make it so big that it overflows the stack! */
64 64
65 printk("\nCall Trace:\n"); 65 printk("\nCall Trace:\n");
66 do { 66 do {
67 unw_get_ip(info, &ip); 67 unw_get_ip(info, &ip);
68 if (ip == 0) 68 if (ip == 0)
69 break; 69 break;
70 70
71 unw_get_sp(info, &sp); 71 unw_get_sp(info, &sp);
72 unw_get_bsp(info, &bsp); 72 unw_get_bsp(info, &bsp);
73 snprintf(buf, sizeof(buf), 73 snprintf(buf, sizeof(buf),
74 " [<%016lx>] %%s\n" 74 " [<%016lx>] %%s\n"
75 " sp=%016lx bsp=%016lx\n", 75 " sp=%016lx bsp=%016lx\n",
76 ip, sp, bsp); 76 ip, sp, bsp);
77 print_symbol(buf, ip); 77 print_symbol(buf, ip);
78 } while (unw_unwind(info) >= 0); 78 } while (unw_unwind(info) >= 0);
79 } 79 }
80 80
81 void 81 void
82 show_stack (struct task_struct *task, unsigned long *sp) 82 show_stack (struct task_struct *task, unsigned long *sp)
83 { 83 {
84 if (!task) 84 if (!task)
85 unw_init_running(ia64_do_show_stack, NULL); 85 unw_init_running(ia64_do_show_stack, NULL);
86 else { 86 else {
87 struct unw_frame_info info; 87 struct unw_frame_info info;
88 88
89 unw_init_from_blocked_task(&info, task); 89 unw_init_from_blocked_task(&info, task);
90 ia64_do_show_stack(&info, NULL); 90 ia64_do_show_stack(&info, NULL);
91 } 91 }
92 } 92 }
93 93
94 void 94 void
95 dump_stack (void) 95 dump_stack (void)
96 { 96 {
97 show_stack(NULL, NULL); 97 show_stack(NULL, NULL);
98 } 98 }
99 99
100 EXPORT_SYMBOL(dump_stack); 100 EXPORT_SYMBOL(dump_stack);
101 101
102 void 102 void
103 show_regs (struct pt_regs *regs) 103 show_regs (struct pt_regs *regs)
104 { 104 {
105 unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; 105 unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
106 106
107 print_modules(); 107 print_modules();
108 printk("\nPid: %d, CPU %d, comm: %20s\n", current->pid, smp_processor_id(), current->comm); 108 printk("\nPid: %d, CPU %d, comm: %20s\n", current->pid, smp_processor_id(), current->comm);
109 printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n", 109 printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n",
110 regs->cr_ipsr, regs->cr_ifs, ip, print_tainted()); 110 regs->cr_ipsr, regs->cr_ifs, ip, print_tainted());
111 print_symbol("ip is at %s\n", ip); 111 print_symbol("ip is at %s\n", ip);
112 printk("unat: %016lx pfs : %016lx rsc : %016lx\n", 112 printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
113 regs->ar_unat, regs->ar_pfs, regs->ar_rsc); 113 regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
114 printk("rnat: %016lx bsps: %016lx pr : %016lx\n", 114 printk("rnat: %016lx bsps: %016lx pr : %016lx\n",
115 regs->ar_rnat, regs->ar_bspstore, regs->pr); 115 regs->ar_rnat, regs->ar_bspstore, regs->pr);
116 printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n", 116 printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
117 regs->loadrs, regs->ar_ccv, regs->ar_fpsr); 117 regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
118 printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd); 118 printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
119 printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0, regs->b6, regs->b7); 119 printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0, regs->b6, regs->b7);
120 printk("f6 : %05lx%016lx f7 : %05lx%016lx\n", 120 printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
121 regs->f6.u.bits[1], regs->f6.u.bits[0], 121 regs->f6.u.bits[1], regs->f6.u.bits[0],
122 regs->f7.u.bits[1], regs->f7.u.bits[0]); 122 regs->f7.u.bits[1], regs->f7.u.bits[0]);
123 printk("f8 : %05lx%016lx f9 : %05lx%016lx\n", 123 printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
124 regs->f8.u.bits[1], regs->f8.u.bits[0], 124 regs->f8.u.bits[1], regs->f8.u.bits[0],
125 regs->f9.u.bits[1], regs->f9.u.bits[0]); 125 regs->f9.u.bits[1], regs->f9.u.bits[0]);
126 printk("f10 : %05lx%016lx f11 : %05lx%016lx\n", 126 printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
127 regs->f10.u.bits[1], regs->f10.u.bits[0], 127 regs->f10.u.bits[1], regs->f10.u.bits[0],
128 regs->f11.u.bits[1], regs->f11.u.bits[0]); 128 regs->f11.u.bits[1], regs->f11.u.bits[0]);
129 129
130 printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1, regs->r2, regs->r3); 130 printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1, regs->r2, regs->r3);
131 printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8, regs->r9, regs->r10); 131 printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8, regs->r9, regs->r10);
132 printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11, regs->r12, regs->r13); 132 printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11, regs->r12, regs->r13);
133 printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14, regs->r15, regs->r16); 133 printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14, regs->r15, regs->r16);
134 printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17, regs->r18, regs->r19); 134 printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17, regs->r18, regs->r19);
135 printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20, regs->r21, regs->r22); 135 printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20, regs->r21, regs->r22);
136 printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23, regs->r24, regs->r25); 136 printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23, regs->r24, regs->r25);
137 printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26, regs->r27, regs->r28); 137 printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26, regs->r27, regs->r28);
138 printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29, regs->r30, regs->r31); 138 printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29, regs->r30, regs->r31);
139 139
140 if (user_mode(regs)) { 140 if (user_mode(regs)) {
141 /* print the stacked registers */ 141 /* print the stacked registers */
142 unsigned long val, *bsp, ndirty; 142 unsigned long val, *bsp, ndirty;
143 int i, sof, is_nat = 0; 143 int i, sof, is_nat = 0;
144 144
145 sof = regs->cr_ifs & 0x7f; /* size of frame */ 145 sof = regs->cr_ifs & 0x7f; /* size of frame */
146 ndirty = (regs->loadrs >> 19); 146 ndirty = (regs->loadrs >> 19);
147 bsp = ia64_rse_skip_regs((unsigned long *) regs->ar_bspstore, ndirty); 147 bsp = ia64_rse_skip_regs((unsigned long *) regs->ar_bspstore, ndirty);
148 for (i = 0; i < sof; ++i) { 148 for (i = 0; i < sof; ++i) {
149 get_user(val, (unsigned long __user *) ia64_rse_skip_regs(bsp, i)); 149 get_user(val, (unsigned long __user *) ia64_rse_skip_regs(bsp, i));
150 printk("r%-3u:%c%016lx%s", 32 + i, is_nat ? '*' : ' ', val, 150 printk("r%-3u:%c%016lx%s", 32 + i, is_nat ? '*' : ' ', val,
151 ((i == sof - 1) || (i % 3) == 2) ? "\n" : " "); 151 ((i == sof - 1) || (i % 3) == 2) ? "\n" : " ");
152 } 152 }
153 } else 153 } else
154 show_stack(NULL, NULL); 154 show_stack(NULL, NULL);
155 } 155 }
156 156
157 void 157 void
158 do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall) 158 do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
159 { 159 {
160 if (fsys_mode(current, &scr->pt)) { 160 if (fsys_mode(current, &scr->pt)) {
161 /* defer signal-handling etc. until we return to privilege-level 0. */ 161 /* defer signal-handling etc. until we return to privilege-level 0. */
162 if (!ia64_psr(&scr->pt)->lp) 162 if (!ia64_psr(&scr->pt)->lp)
163 ia64_psr(&scr->pt)->lp = 1; 163 ia64_psr(&scr->pt)->lp = 1;
164 return; 164 return;
165 } 165 }
166 166
167 #ifdef CONFIG_PERFMON 167 #ifdef CONFIG_PERFMON
168 if (current->thread.pfm_needs_checking) 168 if (current->thread.pfm_needs_checking)
169 pfm_handle_work(); 169 pfm_handle_work();
170 #endif 170 #endif
171 171
172 /* deal with pending signal delivery */ 172 /* deal with pending signal delivery */
173 if (test_thread_flag(TIF_SIGPENDING)) 173 if (test_thread_flag(TIF_SIGPENDING))
174 ia64_do_signal(oldset, scr, in_syscall); 174 ia64_do_signal(oldset, scr, in_syscall);
175 } 175 }
176 176
177 static int pal_halt = 1; 177 static int pal_halt = 1;
178 static int can_do_pal_halt = 1; 178 static int can_do_pal_halt = 1;
179 179
180 static int __init nohalt_setup(char * str) 180 static int __init nohalt_setup(char * str)
181 { 181 {
182 pal_halt = 0; 182 pal_halt = 0;
183 return 1; 183 return 1;
184 } 184 }
185 __setup("nohalt", nohalt_setup); 185 __setup("nohalt", nohalt_setup);
186 186
187 void 187 void
188 update_pal_halt_status(int status) 188 update_pal_halt_status(int status)
189 { 189 {
190 can_do_pal_halt = pal_halt && status; 190 can_do_pal_halt = pal_halt && status;
191 } 191 }
192 192
193 /* 193 /*
194 * We use this if we don't have any better idle routine.. 194 * We use this if we don't have any better idle routine..
195 */ 195 */
196 void 196 void
197 default_idle (void) 197 default_idle (void)
198 { 198 {
199 local_irq_enable(); 199 local_irq_enable();
200 while (!need_resched()) 200 while (!need_resched())
201 if (can_do_pal_halt) 201 if (can_do_pal_halt)
202 safe_halt(); 202 safe_halt();
203 else 203 else
204 cpu_relax(); 204 cpu_relax();
205 } 205 }
206 206
207 #ifdef CONFIG_HOTPLUG_CPU 207 #ifdef CONFIG_HOTPLUG_CPU
208 /* We don't actually take CPU down, just spin without interrupts. */ 208 /* We don't actually take CPU down, just spin without interrupts. */
209 static inline void play_dead(void) 209 static inline void play_dead(void)
210 { 210 {
211 extern void ia64_cpu_local_tick (void); 211 extern void ia64_cpu_local_tick (void);
212 unsigned int this_cpu = smp_processor_id(); 212 unsigned int this_cpu = smp_processor_id();
213 213
214 /* Ack it */ 214 /* Ack it */
215 __get_cpu_var(cpu_state) = CPU_DEAD; 215 __get_cpu_var(cpu_state) = CPU_DEAD;
216 216
217 max_xtp(); 217 max_xtp();
218 local_irq_disable(); 218 local_irq_disable();
219 idle_task_exit(); 219 idle_task_exit();
220 ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]); 220 ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]);
221 /* 221 /*
222 * The above is a point of no-return, the processor is 222 * The above is a point of no-return, the processor is
223 * expected to be in SAL loop now. 223 * expected to be in SAL loop now.
224 */ 224 */
225 BUG(); 225 BUG();
226 } 226 }
227 #else 227 #else
228 static inline void play_dead(void) 228 static inline void play_dead(void)
229 { 229 {
230 BUG(); 230 BUG();
231 } 231 }
232 #endif /* CONFIG_HOTPLUG_CPU */ 232 #endif /* CONFIG_HOTPLUG_CPU */
233 233
234 void cpu_idle_wait(void) 234 void cpu_idle_wait(void)
235 { 235 {
236 unsigned int cpu, this_cpu = get_cpu(); 236 unsigned int cpu, this_cpu = get_cpu();
237 cpumask_t map; 237 cpumask_t map;
238 238
239 set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); 239 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
240 put_cpu(); 240 put_cpu();
241 241
242 cpus_clear(map); 242 cpus_clear(map);
243 for_each_online_cpu(cpu) { 243 for_each_online_cpu(cpu) {
244 per_cpu(cpu_idle_state, cpu) = 1; 244 per_cpu(cpu_idle_state, cpu) = 1;
245 cpu_set(cpu, map); 245 cpu_set(cpu, map);
246 } 246 }
247 247
248 __get_cpu_var(cpu_idle_state) = 0; 248 __get_cpu_var(cpu_idle_state) = 0;
249 249
250 wmb(); 250 wmb();
251 do { 251 do {
252 ssleep(1); 252 ssleep(1);
253 for_each_online_cpu(cpu) { 253 for_each_online_cpu(cpu) {
254 if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu)) 254 if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
255 cpu_clear(cpu, map); 255 cpu_clear(cpu, map);
256 } 256 }
257 cpus_and(map, map, cpu_online_map); 257 cpus_and(map, map, cpu_online_map);
258 } while (!cpus_empty(map)); 258 } while (!cpus_empty(map));
259 } 259 }
260 EXPORT_SYMBOL_GPL(cpu_idle_wait); 260 EXPORT_SYMBOL_GPL(cpu_idle_wait);
261 261
262 void __attribute__((noreturn)) 262 void __attribute__((noreturn))
263 cpu_idle (void) 263 cpu_idle (void)
264 { 264 {
265 void (*mark_idle)(int) = ia64_mark_idle; 265 void (*mark_idle)(int) = ia64_mark_idle;
266 266
267 /* endless idle loop with no priority at all */ 267 /* endless idle loop with no priority at all */
268 while (1) { 268 while (1) {
269 #ifdef CONFIG_SMP 269 #ifdef CONFIG_SMP
270 if (!need_resched()) 270 if (!need_resched())
271 min_xtp(); 271 min_xtp();
272 #endif 272 #endif
273 while (!need_resched()) { 273 while (!need_resched()) {
274 void (*idle)(void); 274 void (*idle)(void);
275 275
276 if (__get_cpu_var(cpu_idle_state)) 276 if (__get_cpu_var(cpu_idle_state))
277 __get_cpu_var(cpu_idle_state) = 0; 277 __get_cpu_var(cpu_idle_state) = 0;
278 278
279 rmb(); 279 rmb();
280 if (mark_idle) 280 if (mark_idle)
281 (*mark_idle)(1); 281 (*mark_idle)(1);
282 282
283 idle = pm_idle; 283 idle = pm_idle;
284 if (!idle) 284 if (!idle)
285 idle = default_idle; 285 idle = default_idle;
286 (*idle)(); 286 (*idle)();
287 } 287 }
288 288
289 if (mark_idle) 289 if (mark_idle)
290 (*mark_idle)(0); 290 (*mark_idle)(0);
291 291
292 #ifdef CONFIG_SMP 292 #ifdef CONFIG_SMP
293 normal_xtp(); 293 normal_xtp();
294 #endif 294 #endif
295 schedule(); 295 schedule();
296 check_pgt_cache(); 296 check_pgt_cache();
297 if (cpu_is_offline(smp_processor_id())) 297 if (cpu_is_offline(smp_processor_id()))
298 play_dead(); 298 play_dead();
299 } 299 }
300 } 300 }
301 301
302 void 302 void
303 ia64_save_extra (struct task_struct *task) 303 ia64_save_extra (struct task_struct *task)
304 { 304 {
305 #ifdef CONFIG_PERFMON 305 #ifdef CONFIG_PERFMON
306 unsigned long info; 306 unsigned long info;
307 #endif 307 #endif
308 308
309 if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) 309 if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
310 ia64_save_debug_regs(&task->thread.dbr[0]); 310 ia64_save_debug_regs(&task->thread.dbr[0]);
311 311
312 #ifdef CONFIG_PERFMON 312 #ifdef CONFIG_PERFMON
313 if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0) 313 if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
314 pfm_save_regs(task); 314 pfm_save_regs(task);
315 315
316 info = __get_cpu_var(pfm_syst_info); 316 info = __get_cpu_var(pfm_syst_info);
317 if (info & PFM_CPUINFO_SYST_WIDE) 317 if (info & PFM_CPUINFO_SYST_WIDE)
318 pfm_syst_wide_update_task(task, info, 0); 318 pfm_syst_wide_update_task(task, info, 0);
319 #endif 319 #endif
320 320
321 #ifdef CONFIG_IA32_SUPPORT 321 #ifdef CONFIG_IA32_SUPPORT
322 if (IS_IA32_PROCESS(ia64_task_regs(task))) 322 if (IS_IA32_PROCESS(ia64_task_regs(task)))
323 ia32_save_state(task); 323 ia32_save_state(task);
324 #endif 324 #endif
325 } 325 }
326 326
327 void 327 void
328 ia64_load_extra (struct task_struct *task) 328 ia64_load_extra (struct task_struct *task)
329 { 329 {
330 #ifdef CONFIG_PERFMON 330 #ifdef CONFIG_PERFMON
331 unsigned long info; 331 unsigned long info;
332 #endif 332 #endif
333 333
334 if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) 334 if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
335 ia64_load_debug_regs(&task->thread.dbr[0]); 335 ia64_load_debug_regs(&task->thread.dbr[0]);
336 336
337 #ifdef CONFIG_PERFMON 337 #ifdef CONFIG_PERFMON
338 if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0) 338 if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
339 pfm_load_regs(task); 339 pfm_load_regs(task);
340 340
341 info = __get_cpu_var(pfm_syst_info); 341 info = __get_cpu_var(pfm_syst_info);
342 if (info & PFM_CPUINFO_SYST_WIDE) 342 if (info & PFM_CPUINFO_SYST_WIDE)
343 pfm_syst_wide_update_task(task, info, 1); 343 pfm_syst_wide_update_task(task, info, 1);
344 #endif 344 #endif
345 345
346 #ifdef CONFIG_IA32_SUPPORT 346 #ifdef CONFIG_IA32_SUPPORT
347 if (IS_IA32_PROCESS(ia64_task_regs(task))) 347 if (IS_IA32_PROCESS(ia64_task_regs(task)))
348 ia32_load_state(task); 348 ia32_load_state(task);
349 #endif 349 #endif
350 } 350 }
351 351
352 /* 352 /*
353 * Copy the state of an ia-64 thread. 353 * Copy the state of an ia-64 thread.
354 * 354 *
355 * We get here through the following call chain: 355 * We get here through the following call chain:
356 * 356 *
357 * from user-level: from kernel: 357 * from user-level: from kernel:
358 * 358 *
359 * <clone syscall> <some kernel call frames> 359 * <clone syscall> <some kernel call frames>
360 * sys_clone : 360 * sys_clone :
361 * do_fork do_fork 361 * do_fork do_fork
362 * copy_thread copy_thread 362 * copy_thread copy_thread
363 * 363 *
364 * This means that the stack layout is as follows: 364 * This means that the stack layout is as follows:
365 * 365 *
366 * +---------------------+ (highest addr) 366 * +---------------------+ (highest addr)
367 * | struct pt_regs | 367 * | struct pt_regs |
368 * +---------------------+ 368 * +---------------------+
369 * | struct switch_stack | 369 * | struct switch_stack |
370 * +---------------------+ 370 * +---------------------+
371 * | | 371 * | |
372 * | memory stack | 372 * | memory stack |
373 * | | <-- sp (lowest addr) 373 * | | <-- sp (lowest addr)
374 * +---------------------+ 374 * +---------------------+
375 * 375 *
376 * Observe that we copy the unat values that are in pt_regs and switch_stack. Spilling an 376 * Observe that we copy the unat values that are in pt_regs and switch_stack. Spilling an
377 * integer to address X causes bit N in ar.unat to be set to the NaT bit of the register, 377 * integer to address X causes bit N in ar.unat to be set to the NaT bit of the register,
378 * with N=(X & 0x1ff)/8. Thus, copying the unat value preserves the NaT bits ONLY if the 378 * with N=(X & 0x1ff)/8. Thus, copying the unat value preserves the NaT bits ONLY if the
379 * pt_regs structure in the parent is congruent to that of the child, modulo 512. Since 379 * pt_regs structure in the parent is congruent to that of the child, modulo 512. Since
380 * the stack is page aligned and the page size is at least 4KB, this is always the case, 380 * the stack is page aligned and the page size is at least 4KB, this is always the case,
381 * so there is nothing to worry about. 381 * so there is nothing to worry about.
382 */ 382 */
383 int 383 int
384 copy_thread (int nr, unsigned long clone_flags, 384 copy_thread (int nr, unsigned long clone_flags,
385 unsigned long user_stack_base, unsigned long user_stack_size, 385 unsigned long user_stack_base, unsigned long user_stack_size,
386 struct task_struct *p, struct pt_regs *regs) 386 struct task_struct *p, struct pt_regs *regs)
387 { 387 {
388 extern char ia64_ret_from_clone, ia32_ret_from_clone; 388 extern char ia64_ret_from_clone, ia32_ret_from_clone;
389 struct switch_stack *child_stack, *stack; 389 struct switch_stack *child_stack, *stack;
390 unsigned long rbs, child_rbs, rbs_size; 390 unsigned long rbs, child_rbs, rbs_size;
391 struct pt_regs *child_ptregs; 391 struct pt_regs *child_ptregs;
392 int retval = 0; 392 int retval = 0;
393 393
394 #ifdef CONFIG_SMP 394 #ifdef CONFIG_SMP
395 /* 395 /*
396 * For SMP idle threads, fork_by_hand() calls do_fork with 396 * For SMP idle threads, fork_by_hand() calls do_fork with
397 * NULL regs. 397 * NULL regs.
398 */ 398 */
399 if (!regs) 399 if (!regs)
400 return 0; 400 return 0;
401 #endif 401 #endif
402 402
403 stack = ((struct switch_stack *) regs) - 1; 403 stack = ((struct switch_stack *) regs) - 1;
404 404
405 child_ptregs = (struct pt_regs *) ((unsigned long) p + IA64_STK_OFFSET) - 1; 405 child_ptregs = (struct pt_regs *) ((unsigned long) p + IA64_STK_OFFSET) - 1;
406 child_stack = (struct switch_stack *) child_ptregs - 1; 406 child_stack = (struct switch_stack *) child_ptregs - 1;
407 407
408 /* copy parent's switch_stack & pt_regs to child: */ 408 /* copy parent's switch_stack & pt_regs to child: */
409 memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack)); 409 memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack));
410 410
411 rbs = (unsigned long) current + IA64_RBS_OFFSET; 411 rbs = (unsigned long) current + IA64_RBS_OFFSET;
412 child_rbs = (unsigned long) p + IA64_RBS_OFFSET; 412 child_rbs = (unsigned long) p + IA64_RBS_OFFSET;
413 rbs_size = stack->ar_bspstore - rbs; 413 rbs_size = stack->ar_bspstore - rbs;
414 414
415 /* copy the parent's register backing store to the child: */ 415 /* copy the parent's register backing store to the child: */
416 memcpy((void *) child_rbs, (void *) rbs, rbs_size); 416 memcpy((void *) child_rbs, (void *) rbs, rbs_size);
417 417
418 if (likely(user_mode(child_ptregs))) { 418 if (likely(user_mode(child_ptregs))) {
419 if ((clone_flags & CLONE_SETTLS) && !IS_IA32_PROCESS(regs)) 419 if ((clone_flags & CLONE_SETTLS) && !IS_IA32_PROCESS(regs))
420 child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */ 420 child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */
421 if (user_stack_base) { 421 if (user_stack_base) {
422 child_ptregs->r12 = user_stack_base + user_stack_size - 16; 422 child_ptregs->r12 = user_stack_base + user_stack_size - 16;
423 child_ptregs->ar_bspstore = user_stack_base; 423 child_ptregs->ar_bspstore = user_stack_base;
424 child_ptregs->ar_rnat = 0; 424 child_ptregs->ar_rnat = 0;
425 child_ptregs->loadrs = 0; 425 child_ptregs->loadrs = 0;
426 } 426 }
427 } else { 427 } else {
428 /* 428 /*
429 * Note: we simply preserve the relative position of 429 * Note: we simply preserve the relative position of
430 * the stack pointer here. There is no need to 430 * the stack pointer here. There is no need to
431 * allocate a scratch area here, since that will have 431 * allocate a scratch area here, since that will have
432 * been taken care of by the caller of sys_clone() 432 * been taken care of by the caller of sys_clone()
433 * already. 433 * already.
434 */ 434 */
435 child_ptregs->r12 = (unsigned long) child_ptregs - 16; /* kernel sp */ 435 child_ptregs->r12 = (unsigned long) child_ptregs - 16; /* kernel sp */
436 child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */ 436 child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */
437 } 437 }
438 child_stack->ar_bspstore = child_rbs + rbs_size; 438 child_stack->ar_bspstore = child_rbs + rbs_size;
439 if (IS_IA32_PROCESS(regs)) 439 if (IS_IA32_PROCESS(regs))
440 child_stack->b0 = (unsigned long) &ia32_ret_from_clone; 440 child_stack->b0 = (unsigned long) &ia32_ret_from_clone;
441 else 441 else
442 child_stack->b0 = (unsigned long) &ia64_ret_from_clone; 442 child_stack->b0 = (unsigned long) &ia64_ret_from_clone;
443 443
444 /* copy parts of thread_struct: */ 444 /* copy parts of thread_struct: */
445 p->thread.ksp = (unsigned long) child_stack - 16; 445 p->thread.ksp = (unsigned long) child_stack - 16;
446 446
447 /* stop some PSR bits from being inherited. 447 /* stop some PSR bits from being inherited.
448 * the psr.up/psr.pp bits must be cleared on fork but inherited on execve() 448 * the psr.up/psr.pp bits must be cleared on fork but inherited on execve()
449 * therefore we must specify them explicitly here and not include them in 449 * therefore we must specify them explicitly here and not include them in
450 * IA64_PSR_BITS_TO_CLEAR. 450 * IA64_PSR_BITS_TO_CLEAR.
451 */ 451 */
452 child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET) 452 child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET)
453 & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP)); 453 & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP));
454 454
455 /* 455 /*
456 * NOTE: The calling convention considers all floating point 456 * NOTE: The calling convention considers all floating point
457 * registers in the high partition (fph) to be scratch. Since 457 * registers in the high partition (fph) to be scratch. Since
458 * the only way to get to this point is through a system call, 458 * the only way to get to this point is through a system call,
459 * we know that the values in fph are all dead. Hence, there 459 * we know that the values in fph are all dead. Hence, there
460 * is no need to inherit the fph state from the parent to the 460 * is no need to inherit the fph state from the parent to the
461 * child and all we have to do is to make sure that 461 * child and all we have to do is to make sure that
462 * IA64_THREAD_FPH_VALID is cleared in the child. 462 * IA64_THREAD_FPH_VALID is cleared in the child.
463 * 463 *
464 * XXX We could push this optimization a bit further by 464 * XXX We could push this optimization a bit further by
465 * clearing IA64_THREAD_FPH_VALID on ANY system call. 465 * clearing IA64_THREAD_FPH_VALID on ANY system call.
466 * However, it's not clear this is worth doing. Also, it 466 * However, it's not clear this is worth doing. Also, it
467 * would be a slight deviation from the normal Linux system 467 * would be a slight deviation from the normal Linux system
468 * call behavior where scratch registers are preserved across 468 * call behavior where scratch registers are preserved across
469 * system calls (unless used by the system call itself). 469 * system calls (unless used by the system call itself).
470 */ 470 */
471 # define THREAD_FLAGS_TO_CLEAR (IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID \ 471 # define THREAD_FLAGS_TO_CLEAR (IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID \
472 | IA64_THREAD_PM_VALID) 472 | IA64_THREAD_PM_VALID)
473 # define THREAD_FLAGS_TO_SET 0 473 # define THREAD_FLAGS_TO_SET 0
474 p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR) 474 p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR)
475 | THREAD_FLAGS_TO_SET); 475 | THREAD_FLAGS_TO_SET);
476 ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */ 476 ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */
477 #ifdef CONFIG_IA32_SUPPORT 477 #ifdef CONFIG_IA32_SUPPORT
478 /* 478 /*
479 * If we're cloning an IA32 task then save the IA32 extra 479 * If we're cloning an IA32 task then save the IA32 extra
480 * state from the current task to the new task 480 * state from the current task to the new task
481 */ 481 */
482 if (IS_IA32_PROCESS(ia64_task_regs(current))) { 482 if (IS_IA32_PROCESS(ia64_task_regs(current))) {
483 ia32_save_state(p); 483 ia32_save_state(p);
484 if (clone_flags & CLONE_SETTLS) 484 if (clone_flags & CLONE_SETTLS)
485 retval = ia32_clone_tls(p, child_ptregs); 485 retval = ia32_clone_tls(p, child_ptregs);
486 486
487 /* Copy partially mapped page list */ 487 /* Copy partially mapped page list */
488 if (!retval) 488 if (!retval)
489 retval = ia32_copy_partial_page_list(p, clone_flags); 489 retval = ia32_copy_partial_page_list(p, clone_flags);
490 } 490 }
491 #endif 491 #endif
492 492
493 #ifdef CONFIG_PERFMON 493 #ifdef CONFIG_PERFMON
494 if (current->thread.pfm_context) 494 if (current->thread.pfm_context)
495 pfm_inherit(p, child_ptregs); 495 pfm_inherit(p, child_ptregs);
496 #endif 496 #endif
497 return retval; 497 return retval;
498 } 498 }
499 499
500 static void 500 static void
501 do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg) 501 do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg)
502 { 502 {
503 unsigned long mask, sp, nat_bits = 0, ip, ar_rnat, urbs_end, cfm; 503 unsigned long mask, sp, nat_bits = 0, ip, ar_rnat, urbs_end, cfm;
504 elf_greg_t *dst = arg; 504 elf_greg_t *dst = arg;
505 struct pt_regs *pt; 505 struct pt_regs *pt;
506 char nat; 506 char nat;
507 int i; 507 int i;
508 508
509 memset(dst, 0, sizeof(elf_gregset_t)); /* don't leak any kernel bits to user-level */ 509 memset(dst, 0, sizeof(elf_gregset_t)); /* don't leak any kernel bits to user-level */
510 510
511 if (unw_unwind_to_user(info) < 0) 511 if (unw_unwind_to_user(info) < 0)
512 return; 512 return;
513 513
514 unw_get_sp(info, &sp); 514 unw_get_sp(info, &sp);
515 pt = (struct pt_regs *) (sp + 16); 515 pt = (struct pt_regs *) (sp + 16);
516 516
517 urbs_end = ia64_get_user_rbs_end(task, pt, &cfm); 517 urbs_end = ia64_get_user_rbs_end(task, pt, &cfm);
518 518
519 if (ia64_sync_user_rbs(task, info->sw, pt->ar_bspstore, urbs_end) < 0) 519 if (ia64_sync_user_rbs(task, info->sw, pt->ar_bspstore, urbs_end) < 0)
520 return; 520 return;
521 521
522 ia64_peek(task, info->sw, urbs_end, (long) ia64_rse_rnat_addr((long *) urbs_end), 522 ia64_peek(task, info->sw, urbs_end, (long) ia64_rse_rnat_addr((long *) urbs_end),
523 &ar_rnat); 523 &ar_rnat);
524 524
525 /* 525 /*
526 * coredump format: 526 * coredump format:
527 * r0-r31 527 * r0-r31
528 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT) 528 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
529 * predicate registers (p0-p63) 529 * predicate registers (p0-p63)
530 * b0-b7 530 * b0-b7
531 * ip cfm user-mask 531 * ip cfm user-mask
532 * ar.rsc ar.bsp ar.bspstore ar.rnat 532 * ar.rsc ar.bsp ar.bspstore ar.rnat
533 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec 533 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
534 */ 534 */
535 535
536 /* r0 is zero */ 536 /* r0 is zero */
537 for (i = 1, mask = (1UL << i); i < 32; ++i) { 537 for (i = 1, mask = (1UL << i); i < 32; ++i) {
538 unw_get_gr(info, i, &dst[i], &nat); 538 unw_get_gr(info, i, &dst[i], &nat);
539 if (nat) 539 if (nat)
540 nat_bits |= mask; 540 nat_bits |= mask;
541 mask <<= 1; 541 mask <<= 1;
542 } 542 }
543 dst[32] = nat_bits; 543 dst[32] = nat_bits;
544 unw_get_pr(info, &dst[33]); 544 unw_get_pr(info, &dst[33]);
545 545
546 for (i = 0; i < 8; ++i) 546 for (i = 0; i < 8; ++i)
547 unw_get_br(info, i, &dst[34 + i]); 547 unw_get_br(info, i, &dst[34 + i]);
548 548
549 unw_get_rp(info, &ip); 549 unw_get_rp(info, &ip);
550 dst[42] = ip + ia64_psr(pt)->ri; 550 dst[42] = ip + ia64_psr(pt)->ri;
551 dst[43] = cfm; 551 dst[43] = cfm;
552 dst[44] = pt->cr_ipsr & IA64_PSR_UM; 552 dst[44] = pt->cr_ipsr & IA64_PSR_UM;
553 553
554 unw_get_ar(info, UNW_AR_RSC, &dst[45]); 554 unw_get_ar(info, UNW_AR_RSC, &dst[45]);
555 /* 555 /*
556 * For bsp and bspstore, unw_get_ar() would return the kernel 556 * For bsp and bspstore, unw_get_ar() would return the kernel
557 * addresses, but we need the user-level addresses instead: 557 * addresses, but we need the user-level addresses instead:
558 */ 558 */
559 dst[46] = urbs_end; /* note: by convention PT_AR_BSP points to the end of the urbs! */ 559 dst[46] = urbs_end; /* note: by convention PT_AR_BSP points to the end of the urbs! */
560 dst[47] = pt->ar_bspstore; 560 dst[47] = pt->ar_bspstore;
561 dst[48] = ar_rnat; 561 dst[48] = ar_rnat;
562 unw_get_ar(info, UNW_AR_CCV, &dst[49]); 562 unw_get_ar(info, UNW_AR_CCV, &dst[49]);
563 unw_get_ar(info, UNW_AR_UNAT, &dst[50]); 563 unw_get_ar(info, UNW_AR_UNAT, &dst[50]);
564 unw_get_ar(info, UNW_AR_FPSR, &dst[51]); 564 unw_get_ar(info, UNW_AR_FPSR, &dst[51]);
565 dst[52] = pt->ar_pfs; /* UNW_AR_PFS is == to pt->cr_ifs for interrupt frames */ 565 dst[52] = pt->ar_pfs; /* UNW_AR_PFS is == to pt->cr_ifs for interrupt frames */
566 unw_get_ar(info, UNW_AR_LC, &dst[53]); 566 unw_get_ar(info, UNW_AR_LC, &dst[53]);
567 unw_get_ar(info, UNW_AR_EC, &dst[54]); 567 unw_get_ar(info, UNW_AR_EC, &dst[54]);
568 unw_get_ar(info, UNW_AR_CSD, &dst[55]); 568 unw_get_ar(info, UNW_AR_CSD, &dst[55]);
569 unw_get_ar(info, UNW_AR_SSD, &dst[56]); 569 unw_get_ar(info, UNW_AR_SSD, &dst[56]);
570 } 570 }
571 571
572 void 572 void
573 do_dump_task_fpu (struct task_struct *task, struct unw_frame_info *info, void *arg) 573 do_dump_task_fpu (struct task_struct *task, struct unw_frame_info *info, void *arg)
574 { 574 {
575 elf_fpreg_t *dst = arg; 575 elf_fpreg_t *dst = arg;
576 int i; 576 int i;
577 577
578 memset(dst, 0, sizeof(elf_fpregset_t)); /* don't leak any "random" bits */ 578 memset(dst, 0, sizeof(elf_fpregset_t)); /* don't leak any "random" bits */
579 579
580 if (unw_unwind_to_user(info) < 0) 580 if (unw_unwind_to_user(info) < 0)
581 return; 581 return;
582 582
583 /* f0 is 0.0, f1 is 1.0 */ 583 /* f0 is 0.0, f1 is 1.0 */
584 584
585 for (i = 2; i < 32; ++i) 585 for (i = 2; i < 32; ++i)
586 unw_get_fr(info, i, dst + i); 586 unw_get_fr(info, i, dst + i);
587 587
588 ia64_flush_fph(task); 588 ia64_flush_fph(task);
589 if ((task->thread.flags & IA64_THREAD_FPH_VALID) != 0) 589 if ((task->thread.flags & IA64_THREAD_FPH_VALID) != 0)
590 memcpy(dst + 32, task->thread.fph, 96*16); 590 memcpy(dst + 32, task->thread.fph, 96*16);
591 } 591 }
592 592
593 void 593 void
594 do_copy_regs (struct unw_frame_info *info, void *arg) 594 do_copy_regs (struct unw_frame_info *info, void *arg)
595 { 595 {
596 do_copy_task_regs(current, info, arg); 596 do_copy_task_regs(current, info, arg);
597 } 597 }
598 598
599 void 599 void
600 do_dump_fpu (struct unw_frame_info *info, void *arg) 600 do_dump_fpu (struct unw_frame_info *info, void *arg)
601 { 601 {
602 do_dump_task_fpu(current, info, arg); 602 do_dump_task_fpu(current, info, arg);
603 } 603 }
604 604
605 int 605 int
606 dump_task_regs(struct task_struct *task, elf_gregset_t *regs) 606 dump_task_regs(struct task_struct *task, elf_gregset_t *regs)
607 { 607 {
608 struct unw_frame_info tcore_info; 608 struct unw_frame_info tcore_info;
609 609
610 if (current == task) { 610 if (current == task) {
611 unw_init_running(do_copy_regs, regs); 611 unw_init_running(do_copy_regs, regs);
612 } else { 612 } else {
613 memset(&tcore_info, 0, sizeof(tcore_info)); 613 memset(&tcore_info, 0, sizeof(tcore_info));
614 unw_init_from_blocked_task(&tcore_info, task); 614 unw_init_from_blocked_task(&tcore_info, task);
615 do_copy_task_regs(task, &tcore_info, regs); 615 do_copy_task_regs(task, &tcore_info, regs);
616 } 616 }
617 return 1; 617 return 1;
618 } 618 }
619 619
620 void 620 void
621 ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst) 621 ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst)
622 { 622 {
623 unw_init_running(do_copy_regs, dst); 623 unw_init_running(do_copy_regs, dst);
624 } 624 }
625 625
626 int 626 int
627 dump_task_fpu (struct task_struct *task, elf_fpregset_t *dst) 627 dump_task_fpu (struct task_struct *task, elf_fpregset_t *dst)
628 { 628 {
629 struct unw_frame_info tcore_info; 629 struct unw_frame_info tcore_info;
630 630
631 if (current == task) { 631 if (current == task) {
632 unw_init_running(do_dump_fpu, dst); 632 unw_init_running(do_dump_fpu, dst);
633 } else { 633 } else {
634 memset(&tcore_info, 0, sizeof(tcore_info)); 634 memset(&tcore_info, 0, sizeof(tcore_info));
635 unw_init_from_blocked_task(&tcore_info, task); 635 unw_init_from_blocked_task(&tcore_info, task);
636 do_dump_task_fpu(task, &tcore_info, dst); 636 do_dump_task_fpu(task, &tcore_info, dst);
637 } 637 }
638 return 1; 638 return 1;
639 } 639 }
640 640
641 int 641 int
642 dump_fpu (struct pt_regs *pt, elf_fpregset_t dst) 642 dump_fpu (struct pt_regs *pt, elf_fpregset_t dst)
643 { 643 {
644 unw_init_running(do_dump_fpu, dst); 644 unw_init_running(do_dump_fpu, dst);
645 return 1; /* f0-f31 are always valid so we always return 1 */ 645 return 1; /* f0-f31 are always valid so we always return 1 */
646 } 646 }
647 647
648 long 648 long
649 sys_execve (char __user *filename, char __user * __user *argv, char __user * __user *envp, 649 sys_execve (char __user *filename, char __user * __user *argv, char __user * __user *envp,
650 struct pt_regs *regs) 650 struct pt_regs *regs)
651 { 651 {
652 char *fname; 652 char *fname;
653 int error; 653 int error;
654 654
655 fname = getname(filename); 655 fname = getname(filename);
656 error = PTR_ERR(fname); 656 error = PTR_ERR(fname);
657 if (IS_ERR(fname)) 657 if (IS_ERR(fname))
658 goto out; 658 goto out;
659 error = do_execve(fname, argv, envp, regs); 659 error = do_execve(fname, argv, envp, regs);
660 putname(fname); 660 putname(fname);
661 out: 661 out:
662 return error; 662 return error;
663 } 663 }
664 664
665 pid_t 665 pid_t
666 kernel_thread (int (*fn)(void *), void *arg, unsigned long flags) 666 kernel_thread (int (*fn)(void *), void *arg, unsigned long flags)
667 { 667 {
668 extern void start_kernel_thread (void); 668 extern void start_kernel_thread (void);
669 unsigned long *helper_fptr = (unsigned long *) &start_kernel_thread; 669 unsigned long *helper_fptr = (unsigned long *) &start_kernel_thread;
670 struct { 670 struct {
671 struct switch_stack sw; 671 struct switch_stack sw;
672 struct pt_regs pt; 672 struct pt_regs pt;
673 } regs; 673 } regs;
674 674
675 memset(&regs, 0, sizeof(regs)); 675 memset(&regs, 0, sizeof(regs));
676 regs.pt.cr_iip = helper_fptr[0]; /* set entry point (IP) */ 676 regs.pt.cr_iip = helper_fptr[0]; /* set entry point (IP) */
677 regs.pt.r1 = helper_fptr[1]; /* set GP */ 677 regs.pt.r1 = helper_fptr[1]; /* set GP */
678 regs.pt.r9 = (unsigned long) fn; /* 1st argument */ 678 regs.pt.r9 = (unsigned long) fn; /* 1st argument */
679 regs.pt.r11 = (unsigned long) arg; /* 2nd argument */ 679 regs.pt.r11 = (unsigned long) arg; /* 2nd argument */
680 /* Preserve PSR bits, except for bits 32-34 and 37-45, which we can't read. */ 680 /* Preserve PSR bits, except for bits 32-34 and 37-45, which we can't read. */
681 regs.pt.cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN; 681 regs.pt.cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN;
682 regs.pt.cr_ifs = 1UL << 63; /* mark as valid, empty frame */ 682 regs.pt.cr_ifs = 1UL << 63; /* mark as valid, empty frame */
683 regs.sw.ar_fpsr = regs.pt.ar_fpsr = ia64_getreg(_IA64_REG_AR_FPSR); 683 regs.sw.ar_fpsr = regs.pt.ar_fpsr = ia64_getreg(_IA64_REG_AR_FPSR);
684 regs.sw.ar_bspstore = (unsigned long) current + IA64_RBS_OFFSET; 684 regs.sw.ar_bspstore = (unsigned long) current + IA64_RBS_OFFSET;
685 regs.sw.pr = (1 << PRED_KERNEL_STACK); 685 regs.sw.pr = (1 << PRED_KERNEL_STACK);
686 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs.pt, 0, NULL, NULL); 686 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs.pt, 0, NULL, NULL);
687 } 687 }
688 EXPORT_SYMBOL(kernel_thread); 688 EXPORT_SYMBOL(kernel_thread);
689 689
690 /* This gets called from kernel_thread() via ia64_invoke_thread_helper(). */ 690 /* This gets called from kernel_thread() via ia64_invoke_thread_helper(). */
691 int 691 int
692 kernel_thread_helper (int (*fn)(void *), void *arg) 692 kernel_thread_helper (int (*fn)(void *), void *arg)
693 { 693 {
694 #ifdef CONFIG_IA32_SUPPORT 694 #ifdef CONFIG_IA32_SUPPORT
695 if (IS_IA32_PROCESS(ia64_task_regs(current))) { 695 if (IS_IA32_PROCESS(ia64_task_regs(current))) {
696 /* A kernel thread is always a 64-bit process. */ 696 /* A kernel thread is always a 64-bit process. */
697 current->thread.map_base = DEFAULT_MAP_BASE; 697 current->thread.map_base = DEFAULT_MAP_BASE;
698 current->thread.task_size = DEFAULT_TASK_SIZE; 698 current->thread.task_size = DEFAULT_TASK_SIZE;
699 ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob); 699 ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
700 ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1); 700 ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
701 } 701 }
702 #endif 702 #endif
703 return (*fn)(arg); 703 return (*fn)(arg);
704 } 704 }
705 705
706 /* 706 /*
707 * Flush thread state. This is called when a thread does an execve(). 707 * Flush thread state. This is called when a thread does an execve().
708 */ 708 */
709 void 709 void
710 flush_thread (void) 710 flush_thread (void)
711 { 711 {
712 /* 712 /*
713 * Remove function-return probe instances associated with this task 713 * Remove function-return probe instances associated with this task
714 * and put them back on the free list. Do not insert an exit probe for 714 * and put them back on the free list. Do not insert an exit probe for
715 * this function, it will be disabled by kprobe_flush_task if you do. 715 * this function, it will be disabled by kprobe_flush_task if you do.
716 */ 716 */
717 kprobe_flush_task(current); 717 kprobe_flush_task(current);
718 718
719 /* drop floating-point and debug-register state if it exists: */ 719 /* drop floating-point and debug-register state if it exists: */
720 current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); 720 current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
721 ia64_drop_fpu(current); 721 ia64_drop_fpu(current);
722 if (IS_IA32_PROCESS(ia64_task_regs(current))) 722 if (IS_IA32_PROCESS(ia64_task_regs(current)))
723 ia32_drop_partial_page_list(current); 723 ia32_drop_partial_page_list(current);
724 } 724 }
725 725
726 /* 726 /*
727 * Clean up state associated with current thread. This is called when 727 * Clean up state associated with current thread. This is called when
728 * the thread calls exit(). 728 * the thread calls exit().
729 */ 729 */
730 void 730 void
731 exit_thread (void) 731 exit_thread (void)
732 { 732 {
733 733
734 /* 734 /*
735 * Remove function-return probe instances associated with this task 735 * Remove function-return probe instances associated with this task
736 * and put them back on the free list. Do not insert an exit probe for 736 * and put them back on the free list. Do not insert an exit probe for
737 * this function, it will be disabled by kprobe_flush_task if you do. 737 * this function, it will be disabled by kprobe_flush_task if you do.
738 */ 738 */
739 kprobe_flush_task(current); 739 kprobe_flush_task(current);
740 740
741 ia64_drop_fpu(current); 741 ia64_drop_fpu(current);
742 #ifdef CONFIG_PERFMON 742 #ifdef CONFIG_PERFMON
743 /* if needed, stop monitoring and flush state to perfmon context */ 743 /* if needed, stop monitoring and flush state to perfmon context */
744 if (current->thread.pfm_context) 744 if (current->thread.pfm_context)
745 pfm_exit_thread(current); 745 pfm_exit_thread(current);
746 746
747 /* free debug register resources */ 747 /* free debug register resources */
748 if (current->thread.flags & IA64_THREAD_DBG_VALID) 748 if (current->thread.flags & IA64_THREAD_DBG_VALID)
749 pfm_release_debug_registers(current); 749 pfm_release_debug_registers(current);
750 #endif 750 #endif
751 if (IS_IA32_PROCESS(ia64_task_regs(current))) 751 if (IS_IA32_PROCESS(ia64_task_regs(current)))
752 ia32_drop_partial_page_list(current); 752 ia32_drop_partial_page_list(current);
753 } 753 }
754 754
755 unsigned long 755 unsigned long
756 get_wchan (struct task_struct *p) 756 get_wchan (struct task_struct *p)
757 { 757 {
758 struct unw_frame_info info; 758 struct unw_frame_info info;
759 unsigned long ip; 759 unsigned long ip;
760 int count = 0; 760 int count = 0;
761 761
762 /* 762 /*
763 * Note: p may not be a blocked task (it could be current or 763 * Note: p may not be a blocked task (it could be current or
764 * another process running on some other CPU. Rather than 764 * another process running on some other CPU. Rather than
765 * trying to determine if p is really blocked, we just assume 765 * trying to determine if p is really blocked, we just assume
766 * it's blocked and rely on the unwind routines to fail 766 * it's blocked and rely on the unwind routines to fail
767 * gracefully if the process wasn't really blocked after all. 767 * gracefully if the process wasn't really blocked after all.
768 * --davidm 99/12/15 768 * --davidm 99/12/15
769 */ 769 */
770 unw_init_from_blocked_task(&info, p); 770 unw_init_from_blocked_task(&info, p);
771 do { 771 do {
772 if (unw_unwind(&info) < 0) 772 if (unw_unwind(&info) < 0)
773 return 0; 773 return 0;
774 unw_get_ip(&info, &ip); 774 unw_get_ip(&info, &ip);
775 if (!in_sched_functions(ip)) 775 if (!in_sched_functions(ip))
776 return ip; 776 return ip;
777 } while (count++ < 16); 777 } while (count++ < 16);
778 return 0; 778 return 0;
779 } 779 }
780 780
781 void 781 void
782 cpu_halt (void) 782 cpu_halt (void)
783 { 783 {
784 pal_power_mgmt_info_u_t power_info[8]; 784 pal_power_mgmt_info_u_t power_info[8];
785 unsigned long min_power; 785 unsigned long min_power;
786 int i, min_power_state; 786 int i, min_power_state;
787 787
788 if (ia64_pal_halt_info(power_info) != 0) 788 if (ia64_pal_halt_info(power_info) != 0)
789 return; 789 return;
790 790
791 min_power_state = 0; 791 min_power_state = 0;
792 min_power = power_info[0].pal_power_mgmt_info_s.power_consumption; 792 min_power = power_info[0].pal_power_mgmt_info_s.power_consumption;
793 for (i = 1; i < 8; ++i) 793 for (i = 1; i < 8; ++i)
794 if (power_info[i].pal_power_mgmt_info_s.im 794 if (power_info[i].pal_power_mgmt_info_s.im
795 && power_info[i].pal_power_mgmt_info_s.power_consumption < min_power) { 795 && power_info[i].pal_power_mgmt_info_s.power_consumption < min_power) {
796 min_power = power_info[i].pal_power_mgmt_info_s.power_consumption; 796 min_power = power_info[i].pal_power_mgmt_info_s.power_consumption;
797 min_power_state = i; 797 min_power_state = i;
798 } 798 }
799 799
800 while (1) 800 while (1)
801 ia64_pal_halt(min_power_state); 801 ia64_pal_halt(min_power_state);
802 } 802 }
803 803
804 void 804 void
805 machine_restart (char *restart_cmd) 805 machine_restart (char *restart_cmd)
806 { 806 {
807 (*efi.reset_system)(EFI_RESET_WARM, 0, 0, NULL); 807 (*efi.reset_system)(EFI_RESET_WARM, 0, 0, NULL);
808 } 808 }
809 809
810 EXPORT_SYMBOL(machine_restart);
811
812 void 810 void
813 machine_halt (void) 811 machine_halt (void)
814 { 812 {
815 cpu_halt(); 813 cpu_halt();
816 } 814 }
817 815
818 EXPORT_SYMBOL(machine_halt);
819
820 void 816 void
821 machine_power_off (void) 817 machine_power_off (void)
822 { 818 {
823 if (pm_power_off) 819 if (pm_power_off)
824 pm_power_off(); 820 pm_power_off();
825 machine_halt(); 821 machine_halt();
826 } 822 }
827
828 EXPORT_SYMBOL(machine_power_off);
829 823
arch/m32r/kernel/process.c
1 /* 1 /*
2 * linux/arch/m32r/kernel/process.c 2 * linux/arch/m32r/kernel/process.c
3 * 3 *
4 * Copyright (c) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata, 4 * Copyright (c) 2001, 2002 Hiroyuki Kondo, Hirokazu Takata,
5 * Hitoshi Yamamoto 5 * Hitoshi Yamamoto
6 * Taken from sh version. 6 * Taken from sh version.
7 * Copyright (C) 1995 Linus Torvalds 7 * Copyright (C) 1995 Linus Torvalds
8 * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima 8 * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
9 */ 9 */
10 10
11 #undef DEBUG_PROCESS 11 #undef DEBUG_PROCESS
12 #ifdef DEBUG_PROCESS 12 #ifdef DEBUG_PROCESS
13 #define DPRINTK(fmt, args...) printk("%s:%d:%s: " fmt, __FILE__, __LINE__, \ 13 #define DPRINTK(fmt, args...) printk("%s:%d:%s: " fmt, __FILE__, __LINE__, \
14 __FUNCTION__, ##args) 14 __FUNCTION__, ##args)
15 #else 15 #else
16 #define DPRINTK(fmt, args...) 16 #define DPRINTK(fmt, args...)
17 #endif 17 #endif
18 18
19 /* 19 /*
20 * This file handles the architecture-dependent parts of process handling.. 20 * This file handles the architecture-dependent parts of process handling..
21 */ 21 */
22 22
23 #include <linux/fs.h> 23 #include <linux/fs.h>
24 #include <linux/config.h> 24 #include <linux/config.h>
25 #include <linux/module.h> 25 #include <linux/module.h>
26 #include <linux/ptrace.h> 26 #include <linux/ptrace.h>
27 #include <linux/unistd.h> 27 #include <linux/unistd.h>
28 #include <linux/slab.h> 28 #include <linux/slab.h>
29 #include <linux/hardirq.h> 29 #include <linux/hardirq.h>
30 30
31 #include <asm/io.h> 31 #include <asm/io.h>
32 #include <asm/uaccess.h> 32 #include <asm/uaccess.h>
33 #include <asm/mmu_context.h> 33 #include <asm/mmu_context.h>
34 #include <asm/elf.h> 34 #include <asm/elf.h>
35 #include <asm/m32r.h> 35 #include <asm/m32r.h>
36 36
37 #include <linux/err.h> 37 #include <linux/err.h>
38 38
39 static int hlt_counter=0; 39 static int hlt_counter=0;
40 40
41 /* 41 /*
42 * Return saved PC of a blocked thread. 42 * Return saved PC of a blocked thread.
43 */ 43 */
44 unsigned long thread_saved_pc(struct task_struct *tsk) 44 unsigned long thread_saved_pc(struct task_struct *tsk)
45 { 45 {
46 return tsk->thread.lr; 46 return tsk->thread.lr;
47 } 47 }
48 48
49 /* 49 /*
50 * Powermanagement idle function, if any.. 50 * Powermanagement idle function, if any..
51 */ 51 */
52 void (*pm_idle)(void) = NULL; 52 void (*pm_idle)(void) = NULL;
53 53
54 void disable_hlt(void) 54 void disable_hlt(void)
55 { 55 {
56 hlt_counter++; 56 hlt_counter++;
57 } 57 }
58 58
59 EXPORT_SYMBOL(disable_hlt); 59 EXPORT_SYMBOL(disable_hlt);
60 60
61 void enable_hlt(void) 61 void enable_hlt(void)
62 { 62 {
63 hlt_counter--; 63 hlt_counter--;
64 } 64 }
65 65
66 EXPORT_SYMBOL(enable_hlt); 66 EXPORT_SYMBOL(enable_hlt);
67 67
68 /* 68 /*
69 * We use this is we don't have any better 69 * We use this is we don't have any better
70 * idle routine.. 70 * idle routine..
71 */ 71 */
72 void default_idle(void) 72 void default_idle(void)
73 { 73 {
74 /* M32R_FIXME: Please use "cpu_sleep" mode. */ 74 /* M32R_FIXME: Please use "cpu_sleep" mode. */
75 cpu_relax(); 75 cpu_relax();
76 } 76 }
77 77
78 /* 78 /*
79 * On SMP it's slightly faster (but much more power-consuming!) 79 * On SMP it's slightly faster (but much more power-consuming!)
80 * to poll the ->work.need_resched flag instead of waiting for the 80 * to poll the ->work.need_resched flag instead of waiting for the
81 * cross-CPU IPI to arrive. Use this option with caution. 81 * cross-CPU IPI to arrive. Use this option with caution.
82 */ 82 */
83 static void poll_idle (void) 83 static void poll_idle (void)
84 { 84 {
85 /* M32R_FIXME */ 85 /* M32R_FIXME */
86 cpu_relax(); 86 cpu_relax();
87 } 87 }
88 88
89 /* 89 /*
90 * The idle thread. There's no useful work to be 90 * The idle thread. There's no useful work to be
91 * done, so just try to conserve power and have a 91 * done, so just try to conserve power and have a
92 * low exit latency (ie sit in a loop waiting for 92 * low exit latency (ie sit in a loop waiting for
93 * somebody to say that they'd like to reschedule) 93 * somebody to say that they'd like to reschedule)
94 */ 94 */
95 void cpu_idle (void) 95 void cpu_idle (void)
96 { 96 {
97 /* endless idle loop with no priority at all */ 97 /* endless idle loop with no priority at all */
98 while (1) { 98 while (1) {
99 while (!need_resched()) { 99 while (!need_resched()) {
100 void (*idle)(void) = pm_idle; 100 void (*idle)(void) = pm_idle;
101 101
102 if (!idle) 102 if (!idle)
103 idle = default_idle; 103 idle = default_idle;
104 104
105 idle(); 105 idle();
106 } 106 }
107 schedule(); 107 schedule();
108 } 108 }
109 } 109 }
110 110
111 void machine_restart(char *__unused) 111 void machine_restart(char *__unused)
112 { 112 {
113 printk("Please push reset button!\n"); 113 printk("Please push reset button!\n");
114 while (1) 114 while (1)
115 cpu_relax(); 115 cpu_relax();
116 } 116 }
117 117
118 EXPORT_SYMBOL(machine_restart);
119
120 void machine_halt(void) 118 void machine_halt(void)
121 { 119 {
122 printk("Please push reset button!\n"); 120 printk("Please push reset button!\n");
123 while (1) 121 while (1)
124 cpu_relax(); 122 cpu_relax();
125 } 123 }
126 124
127 EXPORT_SYMBOL(machine_halt);
128
129 void machine_power_off(void) 125 void machine_power_off(void)
130 { 126 {
131 /* M32R_FIXME */ 127 /* M32R_FIXME */
132 } 128 }
133
134 EXPORT_SYMBOL(machine_power_off);
135 129
136 static int __init idle_setup (char *str) 130 static int __init idle_setup (char *str)
137 { 131 {
138 if (!strncmp(str, "poll", 4)) { 132 if (!strncmp(str, "poll", 4)) {
139 printk("using poll in idle threads.\n"); 133 printk("using poll in idle threads.\n");
140 pm_idle = poll_idle; 134 pm_idle = poll_idle;
141 } else if (!strncmp(str, "sleep", 4)) { 135 } else if (!strncmp(str, "sleep", 4)) {
142 printk("using sleep in idle threads.\n"); 136 printk("using sleep in idle threads.\n");
143 pm_idle = default_idle; 137 pm_idle = default_idle;
144 } 138 }
145 139
146 return 1; 140 return 1;
147 } 141 }
148 142
149 __setup("idle=", idle_setup); 143 __setup("idle=", idle_setup);
150 144
151 void show_regs(struct pt_regs * regs) 145 void show_regs(struct pt_regs * regs)
152 { 146 {
153 printk("\n"); 147 printk("\n");
154 printk("BPC[%08lx]:PSW[%08lx]:LR [%08lx]:FP [%08lx]\n", \ 148 printk("BPC[%08lx]:PSW[%08lx]:LR [%08lx]:FP [%08lx]\n", \
155 regs->bpc, regs->psw, regs->lr, regs->fp); 149 regs->bpc, regs->psw, regs->lr, regs->fp);
156 printk("BBPC[%08lx]:BBPSW[%08lx]:SPU[%08lx]:SPI[%08lx]\n", \ 150 printk("BBPC[%08lx]:BBPSW[%08lx]:SPU[%08lx]:SPI[%08lx]\n", \
157 regs->bbpc, regs->bbpsw, regs->spu, regs->spi); 151 regs->bbpc, regs->bbpsw, regs->spu, regs->spi);
158 printk("R0 [%08lx]:R1 [%08lx]:R2 [%08lx]:R3 [%08lx]\n", \ 152 printk("R0 [%08lx]:R1 [%08lx]:R2 [%08lx]:R3 [%08lx]\n", \
159 regs->r0, regs->r1, regs->r2, regs->r3); 153 regs->r0, regs->r1, regs->r2, regs->r3);
160 printk("R4 [%08lx]:R5 [%08lx]:R6 [%08lx]:R7 [%08lx]\n", \ 154 printk("R4 [%08lx]:R5 [%08lx]:R6 [%08lx]:R7 [%08lx]\n", \
161 regs->r4, regs->r5, regs->r6, regs->r7); 155 regs->r4, regs->r5, regs->r6, regs->r7);
162 printk("R8 [%08lx]:R9 [%08lx]:R10[%08lx]:R11[%08lx]\n", \ 156 printk("R8 [%08lx]:R9 [%08lx]:R10[%08lx]:R11[%08lx]\n", \
163 regs->r8, regs->r9, regs->r10, regs->r11); 157 regs->r8, regs->r9, regs->r10, regs->r11);
164 printk("R12[%08lx]\n", \ 158 printk("R12[%08lx]\n", \
165 regs->r12); 159 regs->r12);
166 160
167 #if defined(CONFIG_ISA_M32R2) && defined(CONFIG_ISA_DSP_LEVEL2) 161 #if defined(CONFIG_ISA_M32R2) && defined(CONFIG_ISA_DSP_LEVEL2)
168 printk("ACC0H[%08lx]:ACC0L[%08lx]\n", \ 162 printk("ACC0H[%08lx]:ACC0L[%08lx]\n", \
169 regs->acc0h, regs->acc0l); 163 regs->acc0h, regs->acc0l);
170 printk("ACC1H[%08lx]:ACC1L[%08lx]\n", \ 164 printk("ACC1H[%08lx]:ACC1L[%08lx]\n", \
171 regs->acc1h, regs->acc1l); 165 regs->acc1h, regs->acc1l);
172 #elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R) 166 #elif defined(CONFIG_ISA_M32R2) || defined(CONFIG_ISA_M32R)
173 printk("ACCH[%08lx]:ACCL[%08lx]\n", \ 167 printk("ACCH[%08lx]:ACCL[%08lx]\n", \
174 regs->acch, regs->accl); 168 regs->acch, regs->accl);
175 #else 169 #else
176 #error unknown isa configuration 170 #error unknown isa configuration
177 #endif 171 #endif
178 } 172 }
179 173
180 /* 174 /*
181 * Create a kernel thread 175 * Create a kernel thread
182 */ 176 */
183 177
184 /* 178 /*
185 * This is the mechanism for creating a new kernel thread. 179 * This is the mechanism for creating a new kernel thread.
186 * 180 *
187 * NOTE! Only a kernel-only process(ie the swapper or direct descendants 181 * NOTE! Only a kernel-only process(ie the swapper or direct descendants
188 * who haven't done an "execve()") should use this: it will work within 182 * who haven't done an "execve()") should use this: it will work within
189 * a system call from a "real" process, but the process memory space will 183 * a system call from a "real" process, but the process memory space will
190 * not be free'd until both the parent and the child have exited. 184 * not be free'd until both the parent and the child have exited.
191 */ 185 */
192 static void kernel_thread_helper(void *nouse, int (*fn)(void *), void *arg) 186 static void kernel_thread_helper(void *nouse, int (*fn)(void *), void *arg)
193 { 187 {
194 fn(arg); 188 fn(arg);
195 do_exit(-1); 189 do_exit(-1);
196 } 190 }
197 191
198 int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) 192 int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
199 { 193 {
200 struct pt_regs regs; 194 struct pt_regs regs;
201 195
202 memset(&regs, 0, sizeof (regs)); 196 memset(&regs, 0, sizeof (regs));
203 regs.r1 = (unsigned long)fn; 197 regs.r1 = (unsigned long)fn;
204 regs.r2 = (unsigned long)arg; 198 regs.r2 = (unsigned long)arg;
205 199
206 regs.bpc = (unsigned long)kernel_thread_helper; 200 regs.bpc = (unsigned long)kernel_thread_helper;
207 201
208 regs.psw = M32R_PSW_BIE; 202 regs.psw = M32R_PSW_BIE;
209 203
210 /* Ok, create the new process. */ 204 /* Ok, create the new process. */
211 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, 205 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL,
212 NULL); 206 NULL);
213 } 207 }
214 208
215 /* 209 /*
216 * Free current thread data structures etc.. 210 * Free current thread data structures etc..
217 */ 211 */
218 void exit_thread(void) 212 void exit_thread(void)
219 { 213 {
220 /* Nothing to do. */ 214 /* Nothing to do. */
221 DPRINTK("pid = %d\n", current->pid); 215 DPRINTK("pid = %d\n", current->pid);
222 } 216 }
223 217
224 void flush_thread(void) 218 void flush_thread(void)
225 { 219 {
226 DPRINTK("pid = %d\n", current->pid); 220 DPRINTK("pid = %d\n", current->pid);
227 memset(&current->thread.debug_trap, 0, sizeof(struct debug_trap)); 221 memset(&current->thread.debug_trap, 0, sizeof(struct debug_trap));
228 } 222 }
229 223
230 void release_thread(struct task_struct *dead_task) 224 void release_thread(struct task_struct *dead_task)
231 { 225 {
232 /* do nothing */ 226 /* do nothing */
233 DPRINTK("pid = %d\n", dead_task->pid); 227 DPRINTK("pid = %d\n", dead_task->pid);
234 } 228 }
235 229
236 /* Fill in the fpu structure for a core dump.. */ 230 /* Fill in the fpu structure for a core dump.. */
237 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) 231 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
238 { 232 {
239 return 0; /* Task didn't use the fpu at all. */ 233 return 0; /* Task didn't use the fpu at all. */
240 } 234 }
241 235
242 int copy_thread(int nr, unsigned long clone_flags, unsigned long spu, 236 int copy_thread(int nr, unsigned long clone_flags, unsigned long spu,
243 unsigned long unused, struct task_struct *tsk, struct pt_regs *regs) 237 unsigned long unused, struct task_struct *tsk, struct pt_regs *regs)
244 { 238 {
245 struct pt_regs *childregs; 239 struct pt_regs *childregs;
246 unsigned long sp = (unsigned long)tsk->thread_info + THREAD_SIZE; 240 unsigned long sp = (unsigned long)tsk->thread_info + THREAD_SIZE;
247 extern void ret_from_fork(void); 241 extern void ret_from_fork(void);
248 242
249 /* Copy registers */ 243 /* Copy registers */
250 sp -= sizeof (struct pt_regs); 244 sp -= sizeof (struct pt_regs);
251 childregs = (struct pt_regs *)sp; 245 childregs = (struct pt_regs *)sp;
252 *childregs = *regs; 246 *childregs = *regs;
253 247
254 childregs->spu = spu; 248 childregs->spu = spu;
255 childregs->r0 = 0; /* Child gets zero as return value */ 249 childregs->r0 = 0; /* Child gets zero as return value */
256 regs->r0 = tsk->pid; 250 regs->r0 = tsk->pid;
257 tsk->thread.sp = (unsigned long)childregs; 251 tsk->thread.sp = (unsigned long)childregs;
258 tsk->thread.lr = (unsigned long)ret_from_fork; 252 tsk->thread.lr = (unsigned long)ret_from_fork;
259 253
260 return 0; 254 return 0;
261 } 255 }
262 256
263 /* 257 /*
264 * fill in the user structure for a core dump.. 258 * fill in the user structure for a core dump..
265 */ 259 */
266 void dump_thread(struct pt_regs * regs, struct user * dump) 260 void dump_thread(struct pt_regs * regs, struct user * dump)
267 { 261 {
268 /* M32R_FIXME */ 262 /* M32R_FIXME */
269 } 263 }
270 264
271 /* 265 /*
272 * Capture the user space registers if the task is not running (in user space) 266 * Capture the user space registers if the task is not running (in user space)
273 */ 267 */
274 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) 268 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
275 { 269 {
276 /* M32R_FIXME */ 270 /* M32R_FIXME */
277 return 1; 271 return 1;
278 } 272 }
279 273
280 asmlinkage int sys_fork(unsigned long r0, unsigned long r1, unsigned long r2, 274 asmlinkage int sys_fork(unsigned long r0, unsigned long r1, unsigned long r2,
281 unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, 275 unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6,
282 struct pt_regs regs) 276 struct pt_regs regs)
283 { 277 {
284 #ifdef CONFIG_MMU 278 #ifdef CONFIG_MMU
285 return do_fork(SIGCHLD, regs.spu, &regs, 0, NULL, NULL); 279 return do_fork(SIGCHLD, regs.spu, &regs, 0, NULL, NULL);
286 #else 280 #else
287 return -EINVAL; 281 return -EINVAL;
288 #endif /* CONFIG_MMU */ 282 #endif /* CONFIG_MMU */
289 } 283 }
290 284
291 asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, 285 asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
292 unsigned long parent_tidptr, 286 unsigned long parent_tidptr,
293 unsigned long child_tidptr, 287 unsigned long child_tidptr,
294 unsigned long r4, unsigned long r5, unsigned long r6, 288 unsigned long r4, unsigned long r5, unsigned long r6,
295 struct pt_regs regs) 289 struct pt_regs regs)
296 { 290 {
297 if (!newsp) 291 if (!newsp)
298 newsp = regs.spu; 292 newsp = regs.spu;
299 293
300 return do_fork(clone_flags, newsp, &regs, 0, 294 return do_fork(clone_flags, newsp, &regs, 0,
301 (int __user *)parent_tidptr, (int __user *)child_tidptr); 295 (int __user *)parent_tidptr, (int __user *)child_tidptr);
302 } 296 }
303 297
304 /* 298 /*
305 * This is trivial, and on the face of it looks like it 299 * This is trivial, and on the face of it looks like it
306 * could equally well be done in user mode. 300 * could equally well be done in user mode.
307 * 301 *
308 * Not so, for quite unobvious reasons - register pressure. 302 * Not so, for quite unobvious reasons - register pressure.
309 * In user mode vfork() cannot have a stack frame, and if 303 * In user mode vfork() cannot have a stack frame, and if
310 * done by calling the "clone()" system call directly, you 304 * done by calling the "clone()" system call directly, you
311 * do not have enough call-clobbered registers to hold all 305 * do not have enough call-clobbered registers to hold all
312 * the information you need. 306 * the information you need.
313 */ 307 */
314 asmlinkage int sys_vfork(unsigned long r0, unsigned long r1, unsigned long r2, 308 asmlinkage int sys_vfork(unsigned long r0, unsigned long r1, unsigned long r2,
315 unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, 309 unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6,
316 struct pt_regs regs) 310 struct pt_regs regs)
317 { 311 {
318 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.spu, &regs, 0, 312 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.spu, &regs, 0,
319 NULL, NULL); 313 NULL, NULL);
320 } 314 }
321 315
322 /* 316 /*
323 * sys_execve() executes a new program. 317 * sys_execve() executes a new program.
324 */ 318 */
325 asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv, 319 asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv,
326 char __user * __user *uenvp, 320 char __user * __user *uenvp,
327 unsigned long r3, unsigned long r4, unsigned long r5, 321 unsigned long r3, unsigned long r4, unsigned long r5,
328 unsigned long r6, struct pt_regs regs) 322 unsigned long r6, struct pt_regs regs)
329 { 323 {
330 int error; 324 int error;
331 char *filename; 325 char *filename;
332 326
333 filename = getname(ufilename); 327 filename = getname(ufilename);
334 error = PTR_ERR(filename); 328 error = PTR_ERR(filename);
335 if (IS_ERR(filename)) 329 if (IS_ERR(filename))
336 goto out; 330 goto out;
337 331
338 error = do_execve(filename, uargv, uenvp, &regs); 332 error = do_execve(filename, uargv, uenvp, &regs);
339 if (error == 0) { 333 if (error == 0) {
340 task_lock(current); 334 task_lock(current);
341 current->ptrace &= ~PT_DTRACE; 335 current->ptrace &= ~PT_DTRACE;
342 task_unlock(current); 336 task_unlock(current);
343 } 337 }
344 putname(filename); 338 putname(filename);
345 out: 339 out:
346 return error; 340 return error;
347 } 341 }
348 342
349 /* 343 /*
350 * These bracket the sleeping functions.. 344 * These bracket the sleeping functions..
351 */ 345 */
352 #define first_sched ((unsigned long) scheduling_functions_start_here) 346 #define first_sched ((unsigned long) scheduling_functions_start_here)
353 #define last_sched ((unsigned long) scheduling_functions_end_here) 347 #define last_sched ((unsigned long) scheduling_functions_end_here)
354 348
355 unsigned long get_wchan(struct task_struct *p) 349 unsigned long get_wchan(struct task_struct *p)
356 { 350 {
357 /* M32R_FIXME */ 351 /* M32R_FIXME */
358 return (0); 352 return (0);
359 } 353 }
360 354
arch/m68k/kernel/process.c
1 /* 1 /*
2 * linux/arch/m68k/kernel/process.c 2 * linux/arch/m68k/kernel/process.c
3 * 3 *
4 * Copyright (C) 1995 Hamish Macdonald 4 * Copyright (C) 1995 Hamish Macdonald
5 * 5 *
6 * 68060 fixes by Jesper Skov 6 * 68060 fixes by Jesper Skov
7 */ 7 */
8 8
9 /* 9 /*
10 * This file handles the architecture-dependent parts of process handling.. 10 * This file handles the architecture-dependent parts of process handling..
11 */ 11 */
12 12
13 #include <linux/config.h> 13 #include <linux/config.h>
14 #include <linux/errno.h> 14 #include <linux/errno.h>
15 #include <linux/module.h> 15 #include <linux/module.h>
16 #include <linux/sched.h> 16 #include <linux/sched.h>
17 #include <linux/kernel.h> 17 #include <linux/kernel.h>
18 #include <linux/mm.h> 18 #include <linux/mm.h>
19 #include <linux/smp.h> 19 #include <linux/smp.h>
20 #include <linux/smp_lock.h> 20 #include <linux/smp_lock.h>
21 #include <linux/stddef.h> 21 #include <linux/stddef.h>
22 #include <linux/unistd.h> 22 #include <linux/unistd.h>
23 #include <linux/ptrace.h> 23 #include <linux/ptrace.h>
24 #include <linux/slab.h> 24 #include <linux/slab.h>
25 #include <linux/user.h> 25 #include <linux/user.h>
26 #include <linux/a.out.h> 26 #include <linux/a.out.h>
27 #include <linux/reboot.h> 27 #include <linux/reboot.h>
28 #include <linux/init_task.h> 28 #include <linux/init_task.h>
29 #include <linux/mqueue.h> 29 #include <linux/mqueue.h>
30 30
31 #include <asm/uaccess.h> 31 #include <asm/uaccess.h>
32 #include <asm/system.h> 32 #include <asm/system.h>
33 #include <asm/traps.h> 33 #include <asm/traps.h>
34 #include <asm/machdep.h> 34 #include <asm/machdep.h>
35 #include <asm/setup.h> 35 #include <asm/setup.h>
36 #include <asm/pgtable.h> 36 #include <asm/pgtable.h>
37 37
38 /* 38 /*
39 * Initial task/thread structure. Make this a per-architecture thing, 39 * Initial task/thread structure. Make this a per-architecture thing,
40 * because different architectures tend to have different 40 * because different architectures tend to have different
41 * alignment requirements and potentially different initial 41 * alignment requirements and potentially different initial
42 * setup. 42 * setup.
43 */ 43 */
44 static struct fs_struct init_fs = INIT_FS; 44 static struct fs_struct init_fs = INIT_FS;
45 static struct files_struct init_files = INIT_FILES; 45 static struct files_struct init_files = INIT_FILES;
46 static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 46 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
47 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 47 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
48 struct mm_struct init_mm = INIT_MM(init_mm); 48 struct mm_struct init_mm = INIT_MM(init_mm);
49 49
50 EXPORT_SYMBOL(init_mm); 50 EXPORT_SYMBOL(init_mm);
51 51
52 union thread_union init_thread_union 52 union thread_union init_thread_union
53 __attribute__((section(".data.init_task"), aligned(THREAD_SIZE))) 53 __attribute__((section(".data.init_task"), aligned(THREAD_SIZE)))
54 = { INIT_THREAD_INFO(init_task) }; 54 = { INIT_THREAD_INFO(init_task) };
55 55
56 /* initial task structure */ 56 /* initial task structure */
57 struct task_struct init_task = INIT_TASK(init_task); 57 struct task_struct init_task = INIT_TASK(init_task);
58 58
59 EXPORT_SYMBOL(init_task); 59 EXPORT_SYMBOL(init_task);
60 60
61 asmlinkage void ret_from_fork(void); 61 asmlinkage void ret_from_fork(void);
62 62
63 63
64 /* 64 /*
65 * Return saved PC from a blocked thread 65 * Return saved PC from a blocked thread
66 */ 66 */
67 unsigned long thread_saved_pc(struct task_struct *tsk) 67 unsigned long thread_saved_pc(struct task_struct *tsk)
68 { 68 {
69 struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp; 69 struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;
70 /* Check whether the thread is blocked in resume() */ 70 /* Check whether the thread is blocked in resume() */
71 if (in_sched_functions(sw->retpc)) 71 if (in_sched_functions(sw->retpc))
72 return ((unsigned long *)sw->a6)[1]; 72 return ((unsigned long *)sw->a6)[1];
73 else 73 else
74 return sw->retpc; 74 return sw->retpc;
75 } 75 }
76 76
77 /* 77 /*
78 * The idle loop on an m68k.. 78 * The idle loop on an m68k..
79 */ 79 */
80 void default_idle(void) 80 void default_idle(void)
81 { 81 {
82 if (!need_resched()) 82 if (!need_resched())
83 #if defined(MACH_ATARI_ONLY) && !defined(CONFIG_HADES) 83 #if defined(MACH_ATARI_ONLY) && !defined(CONFIG_HADES)
84 /* block out HSYNC on the atari (falcon) */ 84 /* block out HSYNC on the atari (falcon) */
85 __asm__("stop #0x2200" : : : "cc"); 85 __asm__("stop #0x2200" : : : "cc");
86 #else 86 #else
87 __asm__("stop #0x2000" : : : "cc"); 87 __asm__("stop #0x2000" : : : "cc");
88 #endif 88 #endif
89 } 89 }
90 90
91 void (*idle)(void) = default_idle; 91 void (*idle)(void) = default_idle;
92 92
93 /* 93 /*
94 * The idle thread. There's no useful work to be 94 * The idle thread. There's no useful work to be
95 * done, so just try to conserve power and have a 95 * done, so just try to conserve power and have a
96 * low exit latency (ie sit in a loop waiting for 96 * low exit latency (ie sit in a loop waiting for
97 * somebody to say that they'd like to reschedule) 97 * somebody to say that they'd like to reschedule)
98 */ 98 */
99 void cpu_idle(void) 99 void cpu_idle(void)
100 { 100 {
101 /* endless idle loop with no priority at all */ 101 /* endless idle loop with no priority at all */
102 while (1) { 102 while (1) {
103 while (!need_resched()) 103 while (!need_resched())
104 idle(); 104 idle();
105 schedule(); 105 schedule();
106 } 106 }
107 } 107 }
108 108
109 void machine_restart(char * __unused) 109 void machine_restart(char * __unused)
110 { 110 {
111 if (mach_reset) 111 if (mach_reset)
112 mach_reset(); 112 mach_reset();
113 for (;;); 113 for (;;);
114 } 114 }
115 115
116 EXPORT_SYMBOL(machine_restart);
117
118 void machine_halt(void) 116 void machine_halt(void)
119 { 117 {
120 if (mach_halt) 118 if (mach_halt)
121 mach_halt(); 119 mach_halt();
122 for (;;); 120 for (;;);
123 } 121 }
124 122
125 EXPORT_SYMBOL(machine_halt);
126
127 void machine_power_off(void) 123 void machine_power_off(void)
128 { 124 {
129 if (mach_power_off) 125 if (mach_power_off)
130 mach_power_off(); 126 mach_power_off();
131 for (;;); 127 for (;;);
132 } 128 }
133
134 EXPORT_SYMBOL(machine_power_off);
135 129
136 void show_regs(struct pt_regs * regs) 130 void show_regs(struct pt_regs * regs)
137 { 131 {
138 printk("\n"); 132 printk("\n");
139 printk("Format %02x Vector: %04x PC: %08lx Status: %04x %s\n", 133 printk("Format %02x Vector: %04x PC: %08lx Status: %04x %s\n",
140 regs->format, regs->vector, regs->pc, regs->sr, print_tainted()); 134 regs->format, regs->vector, regs->pc, regs->sr, print_tainted());
141 printk("ORIG_D0: %08lx D0: %08lx A2: %08lx A1: %08lx\n", 135 printk("ORIG_D0: %08lx D0: %08lx A2: %08lx A1: %08lx\n",
142 regs->orig_d0, regs->d0, regs->a2, regs->a1); 136 regs->orig_d0, regs->d0, regs->a2, regs->a1);
143 printk("A0: %08lx D5: %08lx D4: %08lx\n", 137 printk("A0: %08lx D5: %08lx D4: %08lx\n",
144 regs->a0, regs->d5, regs->d4); 138 regs->a0, regs->d5, regs->d4);
145 printk("D3: %08lx D2: %08lx D1: %08lx\n", 139 printk("D3: %08lx D2: %08lx D1: %08lx\n",
146 regs->d3, regs->d2, regs->d1); 140 regs->d3, regs->d2, regs->d1);
147 if (!(regs->sr & PS_S)) 141 if (!(regs->sr & PS_S))
148 printk("USP: %08lx\n", rdusp()); 142 printk("USP: %08lx\n", rdusp());
149 } 143 }
150 144
151 /* 145 /*
152 * Create a kernel thread 146 * Create a kernel thread
153 */ 147 */
154 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 148 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
155 { 149 {
156 int pid; 150 int pid;
157 mm_segment_t fs; 151 mm_segment_t fs;
158 152
159 fs = get_fs(); 153 fs = get_fs();
160 set_fs (KERNEL_DS); 154 set_fs (KERNEL_DS);
161 155
162 { 156 {
163 register long retval __asm__ ("d0"); 157 register long retval __asm__ ("d0");
164 register long clone_arg __asm__ ("d1") = flags | CLONE_VM | CLONE_UNTRACED; 158 register long clone_arg __asm__ ("d1") = flags | CLONE_VM | CLONE_UNTRACED;
165 159
166 retval = __NR_clone; 160 retval = __NR_clone;
167 __asm__ __volatile__ 161 __asm__ __volatile__
168 ("clrl %%d2\n\t" 162 ("clrl %%d2\n\t"
169 "trap #0\n\t" /* Linux/m68k system call */ 163 "trap #0\n\t" /* Linux/m68k system call */
170 "tstl %0\n\t" /* child or parent */ 164 "tstl %0\n\t" /* child or parent */
171 "jne 1f\n\t" /* parent - jump */ 165 "jne 1f\n\t" /* parent - jump */
172 "lea %%sp@(%c7),%6\n\t" /* reload current */ 166 "lea %%sp@(%c7),%6\n\t" /* reload current */
173 "movel %6@,%6\n\t" 167 "movel %6@,%6\n\t"
174 "movel %3,%%sp@-\n\t" /* push argument */ 168 "movel %3,%%sp@-\n\t" /* push argument */
175 "jsr %4@\n\t" /* call fn */ 169 "jsr %4@\n\t" /* call fn */
176 "movel %0,%%d1\n\t" /* pass exit value */ 170 "movel %0,%%d1\n\t" /* pass exit value */
177 "movel %2,%%d0\n\t" /* exit */ 171 "movel %2,%%d0\n\t" /* exit */
178 "trap #0\n" 172 "trap #0\n"
179 "1:" 173 "1:"
180 : "+d" (retval) 174 : "+d" (retval)
181 : "i" (__NR_clone), "i" (__NR_exit), 175 : "i" (__NR_clone), "i" (__NR_exit),
182 "r" (arg), "a" (fn), "d" (clone_arg), "r" (current), 176 "r" (arg), "a" (fn), "d" (clone_arg), "r" (current),
183 "i" (-THREAD_SIZE) 177 "i" (-THREAD_SIZE)
184 : "d2"); 178 : "d2");
185 179
186 pid = retval; 180 pid = retval;
187 } 181 }
188 182
189 set_fs (fs); 183 set_fs (fs);
190 return pid; 184 return pid;
191 } 185 }
192 186
193 void flush_thread(void) 187 void flush_thread(void)
194 { 188 {
195 unsigned long zero = 0; 189 unsigned long zero = 0;
196 set_fs(USER_DS); 190 set_fs(USER_DS);
197 current->thread.fs = __USER_DS; 191 current->thread.fs = __USER_DS;
198 if (!FPU_IS_EMU) 192 if (!FPU_IS_EMU)
199 asm volatile (".chip 68k/68881\n\t" 193 asm volatile (".chip 68k/68881\n\t"
200 "frestore %0@\n\t" 194 "frestore %0@\n\t"
201 ".chip 68k" : : "a" (&zero)); 195 ".chip 68k" : : "a" (&zero));
202 } 196 }
203 197
204 /* 198 /*
205 * "m68k_fork()".. By the time we get here, the 199 * "m68k_fork()".. By the time we get here, the
206 * non-volatile registers have also been saved on the 200 * non-volatile registers have also been saved on the
207 * stack. We do some ugly pointer stuff here.. (see 201 * stack. We do some ugly pointer stuff here.. (see
208 * also copy_thread) 202 * also copy_thread)
209 */ 203 */
210 204
211 asmlinkage int m68k_fork(struct pt_regs *regs) 205 asmlinkage int m68k_fork(struct pt_regs *regs)
212 { 206 {
213 return do_fork(SIGCHLD, rdusp(), regs, 0, NULL, NULL); 207 return do_fork(SIGCHLD, rdusp(), regs, 0, NULL, NULL);
214 } 208 }
215 209
216 asmlinkage int m68k_vfork(struct pt_regs *regs) 210 asmlinkage int m68k_vfork(struct pt_regs *regs)
217 { 211 {
218 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, 212 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0,
219 NULL, NULL); 213 NULL, NULL);
220 } 214 }
221 215
222 asmlinkage int m68k_clone(struct pt_regs *regs) 216 asmlinkage int m68k_clone(struct pt_regs *regs)
223 { 217 {
224 unsigned long clone_flags; 218 unsigned long clone_flags;
225 unsigned long newsp; 219 unsigned long newsp;
226 int *parent_tidptr, *child_tidptr; 220 int *parent_tidptr, *child_tidptr;
227 221
228 /* syscall2 puts clone_flags in d1 and usp in d2 */ 222 /* syscall2 puts clone_flags in d1 and usp in d2 */
229 clone_flags = regs->d1; 223 clone_flags = regs->d1;
230 newsp = regs->d2; 224 newsp = regs->d2;
231 parent_tidptr = (int *)regs->d3; 225 parent_tidptr = (int *)regs->d3;
232 child_tidptr = (int *)regs->d4; 226 child_tidptr = (int *)regs->d4;
233 if (!newsp) 227 if (!newsp)
234 newsp = rdusp(); 228 newsp = rdusp();
235 return do_fork(clone_flags, newsp, regs, 0, 229 return do_fork(clone_flags, newsp, regs, 0,
236 parent_tidptr, child_tidptr); 230 parent_tidptr, child_tidptr);
237 } 231 }
238 232
239 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, 233 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
240 unsigned long unused, 234 unsigned long unused,
241 struct task_struct * p, struct pt_regs * regs) 235 struct task_struct * p, struct pt_regs * regs)
242 { 236 {
243 struct pt_regs * childregs; 237 struct pt_regs * childregs;
244 struct switch_stack * childstack, *stack; 238 struct switch_stack * childstack, *stack;
245 unsigned long stack_offset, *retp; 239 unsigned long stack_offset, *retp;
246 240
247 stack_offset = THREAD_SIZE - sizeof(struct pt_regs); 241 stack_offset = THREAD_SIZE - sizeof(struct pt_regs);
248 childregs = (struct pt_regs *) ((unsigned long) (p->thread_info) + stack_offset); 242 childregs = (struct pt_regs *) ((unsigned long) (p->thread_info) + stack_offset);
249 243
250 *childregs = *regs; 244 *childregs = *regs;
251 childregs->d0 = 0; 245 childregs->d0 = 0;
252 246
253 retp = ((unsigned long *) regs); 247 retp = ((unsigned long *) regs);
254 stack = ((struct switch_stack *) retp) - 1; 248 stack = ((struct switch_stack *) retp) - 1;
255 249
256 childstack = ((struct switch_stack *) childregs) - 1; 250 childstack = ((struct switch_stack *) childregs) - 1;
257 *childstack = *stack; 251 *childstack = *stack;
258 childstack->retpc = (unsigned long)ret_from_fork; 252 childstack->retpc = (unsigned long)ret_from_fork;
259 253
260 p->thread.usp = usp; 254 p->thread.usp = usp;
261 p->thread.ksp = (unsigned long)childstack; 255 p->thread.ksp = (unsigned long)childstack;
262 /* 256 /*
263 * Must save the current SFC/DFC value, NOT the value when 257 * Must save the current SFC/DFC value, NOT the value when
264 * the parent was last descheduled - RGH 10-08-96 258 * the parent was last descheduled - RGH 10-08-96
265 */ 259 */
266 p->thread.fs = get_fs().seg; 260 p->thread.fs = get_fs().seg;
267 261
268 if (!FPU_IS_EMU) { 262 if (!FPU_IS_EMU) {
269 /* Copy the current fpu state */ 263 /* Copy the current fpu state */
270 asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory"); 264 asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory");
271 265
272 if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2]) 266 if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2])
273 asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t" 267 asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t"
274 "fmoveml %/fpiar/%/fpcr/%/fpsr,%1" 268 "fmoveml %/fpiar/%/fpcr/%/fpsr,%1"
275 : : "m" (p->thread.fp[0]), "m" (p->thread.fpcntl[0]) 269 : : "m" (p->thread.fp[0]), "m" (p->thread.fpcntl[0])
276 : "memory"); 270 : "memory");
277 /* Restore the state in case the fpu was busy */ 271 /* Restore the state in case the fpu was busy */
278 asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0])); 272 asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0]));
279 } 273 }
280 274
281 return 0; 275 return 0;
282 } 276 }
283 277
284 /* Fill in the fpu structure for a core dump. */ 278 /* Fill in the fpu structure for a core dump. */
285 279
286 int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu) 280 int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu)
287 { 281 {
288 char fpustate[216]; 282 char fpustate[216];
289 283
290 if (FPU_IS_EMU) { 284 if (FPU_IS_EMU) {
291 int i; 285 int i;
292 286
293 memcpy(fpu->fpcntl, current->thread.fpcntl, 12); 287 memcpy(fpu->fpcntl, current->thread.fpcntl, 12);
294 memcpy(fpu->fpregs, current->thread.fp, 96); 288 memcpy(fpu->fpregs, current->thread.fp, 96);
295 /* Convert internal fpu reg representation 289 /* Convert internal fpu reg representation
296 * into long double format 290 * into long double format
297 */ 291 */
298 for (i = 0; i < 24; i += 3) 292 for (i = 0; i < 24; i += 3)
299 fpu->fpregs[i] = ((fpu->fpregs[i] & 0xffff0000) << 15) | 293 fpu->fpregs[i] = ((fpu->fpregs[i] & 0xffff0000) << 15) |
300 ((fpu->fpregs[i] & 0x0000ffff) << 16); 294 ((fpu->fpregs[i] & 0x0000ffff) << 16);
301 return 1; 295 return 1;
302 } 296 }
303 297
304 /* First dump the fpu context to avoid protocol violation. */ 298 /* First dump the fpu context to avoid protocol violation. */
305 asm volatile ("fsave %0" :: "m" (fpustate[0]) : "memory"); 299 asm volatile ("fsave %0" :: "m" (fpustate[0]) : "memory");
306 if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2]) 300 if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2])
307 return 0; 301 return 0;
308 302
309 asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0" 303 asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0"
310 :: "m" (fpu->fpcntl[0]) 304 :: "m" (fpu->fpcntl[0])
311 : "memory"); 305 : "memory");
312 asm volatile ("fmovemx %/fp0-%/fp7,%0" 306 asm volatile ("fmovemx %/fp0-%/fp7,%0"
313 :: "m" (fpu->fpregs[0]) 307 :: "m" (fpu->fpregs[0])
314 : "memory"); 308 : "memory");
315 return 1; 309 return 1;
316 } 310 }
317 311
318 /* 312 /*
319 * fill in the user structure for a core dump.. 313 * fill in the user structure for a core dump..
320 */ 314 */
321 void dump_thread(struct pt_regs * regs, struct user * dump) 315 void dump_thread(struct pt_regs * regs, struct user * dump)
322 { 316 {
323 struct switch_stack *sw; 317 struct switch_stack *sw;
324 318
325 /* changed the size calculations - should hopefully work better. lbt */ 319 /* changed the size calculations - should hopefully work better. lbt */
326 dump->magic = CMAGIC; 320 dump->magic = CMAGIC;
327 dump->start_code = 0; 321 dump->start_code = 0;
328 dump->start_stack = rdusp() & ~(PAGE_SIZE - 1); 322 dump->start_stack = rdusp() & ~(PAGE_SIZE - 1);
329 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; 323 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
330 dump->u_dsize = ((unsigned long) (current->mm->brk + 324 dump->u_dsize = ((unsigned long) (current->mm->brk +
331 (PAGE_SIZE-1))) >> PAGE_SHIFT; 325 (PAGE_SIZE-1))) >> PAGE_SHIFT;
332 dump->u_dsize -= dump->u_tsize; 326 dump->u_dsize -= dump->u_tsize;
333 dump->u_ssize = 0; 327 dump->u_ssize = 0;
334 328
335 if (dump->start_stack < TASK_SIZE) 329 if (dump->start_stack < TASK_SIZE)
336 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; 330 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
337 331
338 dump->u_ar0 = (struct user_regs_struct *)((int)&dump->regs - (int)dump); 332 dump->u_ar0 = (struct user_regs_struct *)((int)&dump->regs - (int)dump);
339 sw = ((struct switch_stack *)regs) - 1; 333 sw = ((struct switch_stack *)regs) - 1;
340 dump->regs.d1 = regs->d1; 334 dump->regs.d1 = regs->d1;
341 dump->regs.d2 = regs->d2; 335 dump->regs.d2 = regs->d2;
342 dump->regs.d3 = regs->d3; 336 dump->regs.d3 = regs->d3;
343 dump->regs.d4 = regs->d4; 337 dump->regs.d4 = regs->d4;
344 dump->regs.d5 = regs->d5; 338 dump->regs.d5 = regs->d5;
345 dump->regs.d6 = sw->d6; 339 dump->regs.d6 = sw->d6;
346 dump->regs.d7 = sw->d7; 340 dump->regs.d7 = sw->d7;
347 dump->regs.a0 = regs->a0; 341 dump->regs.a0 = regs->a0;
348 dump->regs.a1 = regs->a1; 342 dump->regs.a1 = regs->a1;
349 dump->regs.a2 = regs->a2; 343 dump->regs.a2 = regs->a2;
350 dump->regs.a3 = sw->a3; 344 dump->regs.a3 = sw->a3;
351 dump->regs.a4 = sw->a4; 345 dump->regs.a4 = sw->a4;
352 dump->regs.a5 = sw->a5; 346 dump->regs.a5 = sw->a5;
353 dump->regs.a6 = sw->a6; 347 dump->regs.a6 = sw->a6;
354 dump->regs.d0 = regs->d0; 348 dump->regs.d0 = regs->d0;
355 dump->regs.orig_d0 = regs->orig_d0; 349 dump->regs.orig_d0 = regs->orig_d0;
356 dump->regs.stkadj = regs->stkadj; 350 dump->regs.stkadj = regs->stkadj;
357 dump->regs.sr = regs->sr; 351 dump->regs.sr = regs->sr;
358 dump->regs.pc = regs->pc; 352 dump->regs.pc = regs->pc;
359 dump->regs.fmtvec = (regs->format << 12) | regs->vector; 353 dump->regs.fmtvec = (regs->format << 12) | regs->vector;
360 /* dump floating point stuff */ 354 /* dump floating point stuff */
361 dump->u_fpvalid = dump_fpu (regs, &dump->m68kfp); 355 dump->u_fpvalid = dump_fpu (regs, &dump->m68kfp);
362 } 356 }
363 357
364 /* 358 /*
365 * sys_execve() executes a new program. 359 * sys_execve() executes a new program.
366 */ 360 */
367 asmlinkage int sys_execve(char *name, char **argv, char **envp) 361 asmlinkage int sys_execve(char *name, char **argv, char **envp)
368 { 362 {
369 int error; 363 int error;
370 char * filename; 364 char * filename;
371 struct pt_regs *regs = (struct pt_regs *) &name; 365 struct pt_regs *regs = (struct pt_regs *) &name;
372 366
373 lock_kernel(); 367 lock_kernel();
374 filename = getname(name); 368 filename = getname(name);
375 error = PTR_ERR(filename); 369 error = PTR_ERR(filename);
376 if (IS_ERR(filename)) 370 if (IS_ERR(filename))
377 goto out; 371 goto out;
378 error = do_execve(filename, argv, envp, regs); 372 error = do_execve(filename, argv, envp, regs);
379 putname(filename); 373 putname(filename);
380 out: 374 out:
381 unlock_kernel(); 375 unlock_kernel();
382 return error; 376 return error;
383 } 377 }
384 378
385 unsigned long get_wchan(struct task_struct *p) 379 unsigned long get_wchan(struct task_struct *p)
386 { 380 {
387 unsigned long fp, pc; 381 unsigned long fp, pc;
388 unsigned long stack_page; 382 unsigned long stack_page;
389 int count = 0; 383 int count = 0;
390 if (!p || p == current || p->state == TASK_RUNNING) 384 if (!p || p == current || p->state == TASK_RUNNING)
391 return 0; 385 return 0;
392 386
393 stack_page = (unsigned long)(p->thread_info); 387 stack_page = (unsigned long)(p->thread_info);
394 fp = ((struct switch_stack *)p->thread.ksp)->a6; 388 fp = ((struct switch_stack *)p->thread.ksp)->a6;
395 do { 389 do {
396 if (fp < stack_page+sizeof(struct thread_info) || 390 if (fp < stack_page+sizeof(struct thread_info) ||
397 fp >= 8184+stack_page) 391 fp >= 8184+stack_page)
398 return 0; 392 return 0;
399 pc = ((unsigned long *)fp)[1]; 393 pc = ((unsigned long *)fp)[1];
400 if (!in_sched_functions(pc)) 394 if (!in_sched_functions(pc))
401 return pc; 395 return pc;
402 fp = *(unsigned long *) fp; 396 fp = *(unsigned long *) fp;
403 } while (count++ < 16); 397 } while (count++ < 16);
404 return 0; 398 return 0;
405 } 399 }
406 400
arch/m68knommu/kernel/process.c
1 /* 1 /*
2 * linux/arch/m68knommu/kernel/process.c 2 * linux/arch/m68knommu/kernel/process.c
3 * 3 *
4 * Copyright (C) 1995 Hamish Macdonald 4 * Copyright (C) 1995 Hamish Macdonald
5 * 5 *
6 * 68060 fixes by Jesper Skov 6 * 68060 fixes by Jesper Skov
7 * 7 *
8 * uClinux changes 8 * uClinux changes
9 * Copyright (C) 2000-2002, David McCullough <davidm@snapgear.com> 9 * Copyright (C) 2000-2002, David McCullough <davidm@snapgear.com>
10 */ 10 */
11 11
12 /* 12 /*
13 * This file handles the architecture-dependent parts of process handling.. 13 * This file handles the architecture-dependent parts of process handling..
14 */ 14 */
15 15
16 #include <linux/config.h> 16 #include <linux/config.h>
17 #include <linux/module.h> 17 #include <linux/module.h>
18 #include <linux/errno.h> 18 #include <linux/errno.h>
19 #include <linux/sched.h> 19 #include <linux/sched.h>
20 #include <linux/kernel.h> 20 #include <linux/kernel.h>
21 #include <linux/mm.h> 21 #include <linux/mm.h>
22 #include <linux/smp.h> 22 #include <linux/smp.h>
23 #include <linux/smp_lock.h> 23 #include <linux/smp_lock.h>
24 #include <linux/stddef.h> 24 #include <linux/stddef.h>
25 #include <linux/unistd.h> 25 #include <linux/unistd.h>
26 #include <linux/ptrace.h> 26 #include <linux/ptrace.h>
27 #include <linux/slab.h> 27 #include <linux/slab.h>
28 #include <linux/user.h> 28 #include <linux/user.h>
29 #include <linux/a.out.h> 29 #include <linux/a.out.h>
30 #include <linux/interrupt.h> 30 #include <linux/interrupt.h>
31 #include <linux/reboot.h> 31 #include <linux/reboot.h>
32 32
33 #include <asm/uaccess.h> 33 #include <asm/uaccess.h>
34 #include <asm/system.h> 34 #include <asm/system.h>
35 #include <asm/traps.h> 35 #include <asm/traps.h>
36 #include <asm/machdep.h> 36 #include <asm/machdep.h>
37 #include <asm/setup.h> 37 #include <asm/setup.h>
38 #include <asm/pgtable.h> 38 #include <asm/pgtable.h>
39 39
40 asmlinkage void ret_from_fork(void); 40 asmlinkage void ret_from_fork(void);
41 41
42 42
43 /* 43 /*
44 * The idle loop on an m68knommu.. 44 * The idle loop on an m68knommu..
45 */ 45 */
46 void default_idle(void) 46 void default_idle(void)
47 { 47 {
48 local_irq_disable(); 48 local_irq_disable();
49 while (!need_resched()) { 49 while (!need_resched()) {
50 /* This stop will re-enable interrupts */ 50 /* This stop will re-enable interrupts */
51 __asm__("stop #0x2000" : : : "cc"); 51 __asm__("stop #0x2000" : : : "cc");
52 local_irq_disable(); 52 local_irq_disable();
53 } 53 }
54 local_irq_enable(); 54 local_irq_enable();
55 } 55 }
56 56
57 void (*idle)(void) = default_idle; 57 void (*idle)(void) = default_idle;
58 58
59 /* 59 /*
60 * The idle thread. There's no useful work to be 60 * The idle thread. There's no useful work to be
61 * done, so just try to conserve power and have a 61 * done, so just try to conserve power and have a
62 * low exit latency (ie sit in a loop waiting for 62 * low exit latency (ie sit in a loop waiting for
63 * somebody to say that they'd like to reschedule) 63 * somebody to say that they'd like to reschedule)
64 */ 64 */
65 void cpu_idle(void) 65 void cpu_idle(void)
66 { 66 {
67 /* endless idle loop with no priority at all */ 67 /* endless idle loop with no priority at all */
68 while (1) { 68 while (1) {
69 idle(); 69 idle();
70 preempt_enable_no_resched(); 70 preempt_enable_no_resched();
71 schedule(); 71 schedule();
72 preempt_disable(); 72 preempt_disable();
73 } 73 }
74 } 74 }
75 75
76 void machine_restart(char * __unused) 76 void machine_restart(char * __unused)
77 { 77 {
78 if (mach_reset) 78 if (mach_reset)
79 mach_reset(); 79 mach_reset();
80 for (;;); 80 for (;;);
81 } 81 }
82 82
83 EXPORT_SYMBOL(machine_restart);
84
85 void machine_halt(void) 83 void machine_halt(void)
86 { 84 {
87 if (mach_halt) 85 if (mach_halt)
88 mach_halt(); 86 mach_halt();
89 for (;;); 87 for (;;);
90 } 88 }
91 89
92 EXPORT_SYMBOL(machine_halt);
93
94 void machine_power_off(void) 90 void machine_power_off(void)
95 { 91 {
96 if (mach_power_off) 92 if (mach_power_off)
97 mach_power_off(); 93 mach_power_off();
98 for (;;); 94 for (;;);
99 } 95 }
100
101 EXPORT_SYMBOL(machine_power_off);
102 96
103 void show_regs(struct pt_regs * regs) 97 void show_regs(struct pt_regs * regs)
104 { 98 {
105 printk(KERN_NOTICE "\n"); 99 printk(KERN_NOTICE "\n");
106 printk(KERN_NOTICE "Format %02x Vector: %04x PC: %08lx Status: %04x %s\n", 100 printk(KERN_NOTICE "Format %02x Vector: %04x PC: %08lx Status: %04x %s\n",
107 regs->format, regs->vector, regs->pc, regs->sr, print_tainted()); 101 regs->format, regs->vector, regs->pc, regs->sr, print_tainted());
108 printk(KERN_NOTICE "ORIG_D0: %08lx D0: %08lx A2: %08lx A1: %08lx\n", 102 printk(KERN_NOTICE "ORIG_D0: %08lx D0: %08lx A2: %08lx A1: %08lx\n",
109 regs->orig_d0, regs->d0, regs->a2, regs->a1); 103 regs->orig_d0, regs->d0, regs->a2, regs->a1);
110 printk(KERN_NOTICE "A0: %08lx D5: %08lx D4: %08lx\n", 104 printk(KERN_NOTICE "A0: %08lx D5: %08lx D4: %08lx\n",
111 regs->a0, regs->d5, regs->d4); 105 regs->a0, regs->d5, regs->d4);
112 printk(KERN_NOTICE "D3: %08lx D2: %08lx D1: %08lx\n", 106 printk(KERN_NOTICE "D3: %08lx D2: %08lx D1: %08lx\n",
113 regs->d3, regs->d2, regs->d1); 107 regs->d3, regs->d2, regs->d1);
114 if (!(regs->sr & PS_S)) 108 if (!(regs->sr & PS_S))
115 printk(KERN_NOTICE "USP: %08lx\n", rdusp()); 109 printk(KERN_NOTICE "USP: %08lx\n", rdusp());
116 } 110 }
117 111
118 /* 112 /*
119 * Create a kernel thread 113 * Create a kernel thread
120 */ 114 */
121 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 115 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
122 { 116 {
123 int retval; 117 int retval;
124 long clone_arg = flags | CLONE_VM; 118 long clone_arg = flags | CLONE_VM;
125 mm_segment_t fs; 119 mm_segment_t fs;
126 120
127 fs = get_fs(); 121 fs = get_fs();
128 set_fs(KERNEL_DS); 122 set_fs(KERNEL_DS);
129 123
130 __asm__ __volatile__ ( 124 __asm__ __volatile__ (
131 "movel %%sp, %%d2\n\t" 125 "movel %%sp, %%d2\n\t"
132 "movel %5, %%d1\n\t" 126 "movel %5, %%d1\n\t"
133 "movel %1, %%d0\n\t" 127 "movel %1, %%d0\n\t"
134 "trap #0\n\t" 128 "trap #0\n\t"
135 "cmpl %%sp, %%d2\n\t" 129 "cmpl %%sp, %%d2\n\t"
136 "jeq 1f\n\t" 130 "jeq 1f\n\t"
137 "movel %3, %%sp@-\n\t" 131 "movel %3, %%sp@-\n\t"
138 "jsr %4@\n\t" 132 "jsr %4@\n\t"
139 "movel %2, %%d0\n\t" 133 "movel %2, %%d0\n\t"
140 "trap #0\n" 134 "trap #0\n"
141 "1:\n\t" 135 "1:\n\t"
142 "movel %%d0, %0\n" 136 "movel %%d0, %0\n"
143 : "=d" (retval) 137 : "=d" (retval)
144 : "i" (__NR_clone), 138 : "i" (__NR_clone),
145 "i" (__NR_exit), 139 "i" (__NR_exit),
146 "a" (arg), 140 "a" (arg),
147 "a" (fn), 141 "a" (fn),
148 "a" (clone_arg) 142 "a" (clone_arg)
149 : "cc", "%d0", "%d1", "%d2"); 143 : "cc", "%d0", "%d1", "%d2");
150 144
151 set_fs(fs); 145 set_fs(fs);
152 return retval; 146 return retval;
153 } 147 }
154 148
155 void flush_thread(void) 149 void flush_thread(void)
156 { 150 {
157 #ifdef CONFIG_FPU 151 #ifdef CONFIG_FPU
158 unsigned long zero = 0; 152 unsigned long zero = 0;
159 #endif 153 #endif
160 set_fs(USER_DS); 154 set_fs(USER_DS);
161 current->thread.fs = __USER_DS; 155 current->thread.fs = __USER_DS;
162 #ifdef CONFIG_FPU 156 #ifdef CONFIG_FPU
163 if (!FPU_IS_EMU) 157 if (!FPU_IS_EMU)
164 asm volatile (".chip 68k/68881\n\t" 158 asm volatile (".chip 68k/68881\n\t"
165 "frestore %0@\n\t" 159 "frestore %0@\n\t"
166 ".chip 68k" : : "a" (&zero)); 160 ".chip 68k" : : "a" (&zero));
167 #endif 161 #endif
168 } 162 }
169 163
170 /* 164 /*
171 * "m68k_fork()".. By the time we get here, the 165 * "m68k_fork()".. By the time we get here, the
172 * non-volatile registers have also been saved on the 166 * non-volatile registers have also been saved on the
173 * stack. We do some ugly pointer stuff here.. (see 167 * stack. We do some ugly pointer stuff here.. (see
174 * also copy_thread) 168 * also copy_thread)
175 */ 169 */
176 170
177 asmlinkage int m68k_fork(struct pt_regs *regs) 171 asmlinkage int m68k_fork(struct pt_regs *regs)
178 { 172 {
179 /* fork almost works, enough to trick you into looking elsewhere :-( */ 173 /* fork almost works, enough to trick you into looking elsewhere :-( */
180 return(-EINVAL); 174 return(-EINVAL);
181 } 175 }
182 176
183 asmlinkage int m68k_vfork(struct pt_regs *regs) 177 asmlinkage int m68k_vfork(struct pt_regs *regs)
184 { 178 {
185 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL, NULL); 179 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL, NULL);
186 } 180 }
187 181
188 asmlinkage int m68k_clone(struct pt_regs *regs) 182 asmlinkage int m68k_clone(struct pt_regs *regs)
189 { 183 {
190 unsigned long clone_flags; 184 unsigned long clone_flags;
191 unsigned long newsp; 185 unsigned long newsp;
192 186
193 /* syscall2 puts clone_flags in d1 and usp in d2 */ 187 /* syscall2 puts clone_flags in d1 and usp in d2 */
194 clone_flags = regs->d1; 188 clone_flags = regs->d1;
195 newsp = regs->d2; 189 newsp = regs->d2;
196 if (!newsp) 190 if (!newsp)
197 newsp = rdusp(); 191 newsp = rdusp();
198 return do_fork(clone_flags, newsp, regs, 0, NULL, NULL); 192 return do_fork(clone_flags, newsp, regs, 0, NULL, NULL);
199 } 193 }
200 194
201 int copy_thread(int nr, unsigned long clone_flags, 195 int copy_thread(int nr, unsigned long clone_flags,
202 unsigned long usp, unsigned long topstk, 196 unsigned long usp, unsigned long topstk,
203 struct task_struct * p, struct pt_regs * regs) 197 struct task_struct * p, struct pt_regs * regs)
204 { 198 {
205 struct pt_regs * childregs; 199 struct pt_regs * childregs;
206 struct switch_stack * childstack, *stack; 200 struct switch_stack * childstack, *stack;
207 unsigned long stack_offset, *retp; 201 unsigned long stack_offset, *retp;
208 202
209 stack_offset = THREAD_SIZE - sizeof(struct pt_regs); 203 stack_offset = THREAD_SIZE - sizeof(struct pt_regs);
210 childregs = (struct pt_regs *) ((unsigned long) p->thread_info + stack_offset); 204 childregs = (struct pt_regs *) ((unsigned long) p->thread_info + stack_offset);
211 205
212 *childregs = *regs; 206 *childregs = *regs;
213 childregs->d0 = 0; 207 childregs->d0 = 0;
214 208
215 retp = ((unsigned long *) regs); 209 retp = ((unsigned long *) regs);
216 stack = ((struct switch_stack *) retp) - 1; 210 stack = ((struct switch_stack *) retp) - 1;
217 211
218 childstack = ((struct switch_stack *) childregs) - 1; 212 childstack = ((struct switch_stack *) childregs) - 1;
219 *childstack = *stack; 213 *childstack = *stack;
220 childstack->retpc = (unsigned long)ret_from_fork; 214 childstack->retpc = (unsigned long)ret_from_fork;
221 215
222 p->thread.usp = usp; 216 p->thread.usp = usp;
223 p->thread.ksp = (unsigned long)childstack; 217 p->thread.ksp = (unsigned long)childstack;
224 /* 218 /*
225 * Must save the current SFC/DFC value, NOT the value when 219 * Must save the current SFC/DFC value, NOT the value when
226 * the parent was last descheduled - RGH 10-08-96 220 * the parent was last descheduled - RGH 10-08-96
227 */ 221 */
228 p->thread.fs = get_fs().seg; 222 p->thread.fs = get_fs().seg;
229 223
230 #ifdef CONFIG_FPU 224 #ifdef CONFIG_FPU
231 if (!FPU_IS_EMU) { 225 if (!FPU_IS_EMU) {
232 /* Copy the current fpu state */ 226 /* Copy the current fpu state */
233 asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory"); 227 asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory");
234 228
235 if (p->thread.fpstate[0]) 229 if (p->thread.fpstate[0])
236 asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t" 230 asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t"
237 "fmoveml %/fpiar/%/fpcr/%/fpsr,%1" 231 "fmoveml %/fpiar/%/fpcr/%/fpsr,%1"
238 : : "m" (p->thread.fp[0]), "m" (p->thread.fpcntl[0]) 232 : : "m" (p->thread.fp[0]), "m" (p->thread.fpcntl[0])
239 : "memory"); 233 : "memory");
240 /* Restore the state in case the fpu was busy */ 234 /* Restore the state in case the fpu was busy */
241 asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0])); 235 asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0]));
242 } 236 }
243 #endif 237 #endif
244 238
245 return 0; 239 return 0;
246 } 240 }
247 241
248 /* Fill in the fpu structure for a core dump. */ 242 /* Fill in the fpu structure for a core dump. */
249 243
250 int dump_fpu(struct pt_regs *regs, struct user_m68kfp_struct *fpu) 244 int dump_fpu(struct pt_regs *regs, struct user_m68kfp_struct *fpu)
251 { 245 {
252 #ifdef CONFIG_FPU 246 #ifdef CONFIG_FPU
253 char fpustate[216]; 247 char fpustate[216];
254 248
255 if (FPU_IS_EMU) { 249 if (FPU_IS_EMU) {
256 int i; 250 int i;
257 251
258 memcpy(fpu->fpcntl, current->thread.fpcntl, 12); 252 memcpy(fpu->fpcntl, current->thread.fpcntl, 12);
259 memcpy(fpu->fpregs, current->thread.fp, 96); 253 memcpy(fpu->fpregs, current->thread.fp, 96);
260 /* Convert internal fpu reg representation 254 /* Convert internal fpu reg representation
261 * into long double format 255 * into long double format
262 */ 256 */
263 for (i = 0; i < 24; i += 3) 257 for (i = 0; i < 24; i += 3)
264 fpu->fpregs[i] = ((fpu->fpregs[i] & 0xffff0000) << 15) | 258 fpu->fpregs[i] = ((fpu->fpregs[i] & 0xffff0000) << 15) |
265 ((fpu->fpregs[i] & 0x0000ffff) << 16); 259 ((fpu->fpregs[i] & 0x0000ffff) << 16);
266 return 1; 260 return 1;
267 } 261 }
268 262
269 /* First dump the fpu context to avoid protocol violation. */ 263 /* First dump the fpu context to avoid protocol violation. */
270 asm volatile ("fsave %0" :: "m" (fpustate[0]) : "memory"); 264 asm volatile ("fsave %0" :: "m" (fpustate[0]) : "memory");
271 if (!fpustate[0]) 265 if (!fpustate[0])
272 return 0; 266 return 0;
273 267
274 asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0" 268 asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0"
275 :: "m" (fpu->fpcntl[0]) 269 :: "m" (fpu->fpcntl[0])
276 : "memory"); 270 : "memory");
277 asm volatile ("fmovemx %/fp0-%/fp7,%0" 271 asm volatile ("fmovemx %/fp0-%/fp7,%0"
278 :: "m" (fpu->fpregs[0]) 272 :: "m" (fpu->fpregs[0])
279 : "memory"); 273 : "memory");
280 #endif 274 #endif
281 return 1; 275 return 1;
282 } 276 }
283 277
284 /* 278 /*
285 * fill in the user structure for a core dump.. 279 * fill in the user structure for a core dump..
286 */ 280 */
287 void dump_thread(struct pt_regs * regs, struct user * dump) 281 void dump_thread(struct pt_regs * regs, struct user * dump)
288 { 282 {
289 struct switch_stack *sw; 283 struct switch_stack *sw;
290 284
291 /* changed the size calculations - should hopefully work better. lbt */ 285 /* changed the size calculations - should hopefully work better. lbt */
292 dump->magic = CMAGIC; 286 dump->magic = CMAGIC;
293 dump->start_code = 0; 287 dump->start_code = 0;
294 dump->start_stack = rdusp() & ~(PAGE_SIZE - 1); 288 dump->start_stack = rdusp() & ~(PAGE_SIZE - 1);
295 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; 289 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
296 dump->u_dsize = ((unsigned long) (current->mm->brk + 290 dump->u_dsize = ((unsigned long) (current->mm->brk +
297 (PAGE_SIZE-1))) >> PAGE_SHIFT; 291 (PAGE_SIZE-1))) >> PAGE_SHIFT;
298 dump->u_dsize -= dump->u_tsize; 292 dump->u_dsize -= dump->u_tsize;
299 dump->u_ssize = 0; 293 dump->u_ssize = 0;
300 294
301 if (dump->start_stack < TASK_SIZE) 295 if (dump->start_stack < TASK_SIZE)
302 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; 296 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
303 297
304 dump->u_ar0 = (struct user_regs_struct *)((int)&dump->regs - (int)dump); 298 dump->u_ar0 = (struct user_regs_struct *)((int)&dump->regs - (int)dump);
305 sw = ((struct switch_stack *)regs) - 1; 299 sw = ((struct switch_stack *)regs) - 1;
306 dump->regs.d1 = regs->d1; 300 dump->regs.d1 = regs->d1;
307 dump->regs.d2 = regs->d2; 301 dump->regs.d2 = regs->d2;
308 dump->regs.d3 = regs->d3; 302 dump->regs.d3 = regs->d3;
309 dump->regs.d4 = regs->d4; 303 dump->regs.d4 = regs->d4;
310 dump->regs.d5 = regs->d5; 304 dump->regs.d5 = regs->d5;
311 dump->regs.d6 = sw->d6; 305 dump->regs.d6 = sw->d6;
312 dump->regs.d7 = sw->d7; 306 dump->regs.d7 = sw->d7;
313 dump->regs.a0 = regs->a0; 307 dump->regs.a0 = regs->a0;
314 dump->regs.a1 = regs->a1; 308 dump->regs.a1 = regs->a1;
315 dump->regs.a2 = regs->a2; 309 dump->regs.a2 = regs->a2;
316 dump->regs.a3 = sw->a3; 310 dump->regs.a3 = sw->a3;
317 dump->regs.a4 = sw->a4; 311 dump->regs.a4 = sw->a4;
318 dump->regs.a5 = sw->a5; 312 dump->regs.a5 = sw->a5;
319 dump->regs.a6 = sw->a6; 313 dump->regs.a6 = sw->a6;
320 dump->regs.d0 = regs->d0; 314 dump->regs.d0 = regs->d0;
321 dump->regs.orig_d0 = regs->orig_d0; 315 dump->regs.orig_d0 = regs->orig_d0;
322 dump->regs.stkadj = regs->stkadj; 316 dump->regs.stkadj = regs->stkadj;
323 dump->regs.sr = regs->sr; 317 dump->regs.sr = regs->sr;
324 dump->regs.pc = regs->pc; 318 dump->regs.pc = regs->pc;
325 dump->regs.fmtvec = (regs->format << 12) | regs->vector; 319 dump->regs.fmtvec = (regs->format << 12) | regs->vector;
326 /* dump floating point stuff */ 320 /* dump floating point stuff */
327 dump->u_fpvalid = dump_fpu (regs, &dump->m68kfp); 321 dump->u_fpvalid = dump_fpu (regs, &dump->m68kfp);
328 } 322 }
329 323
330 /* 324 /*
331 * Generic dumping code. Used for panic and debug. 325 * Generic dumping code. Used for panic and debug.
332 */ 326 */
333 void dump(struct pt_regs *fp) 327 void dump(struct pt_regs *fp)
334 { 328 {
335 unsigned long *sp; 329 unsigned long *sp;
336 unsigned char *tp; 330 unsigned char *tp;
337 int i; 331 int i;
338 332
339 printk(KERN_EMERG "\nCURRENT PROCESS:\n\n"); 333 printk(KERN_EMERG "\nCURRENT PROCESS:\n\n");
340 printk(KERN_EMERG "COMM=%s PID=%d\n", current->comm, current->pid); 334 printk(KERN_EMERG "COMM=%s PID=%d\n", current->comm, current->pid);
341 335
342 if (current->mm) { 336 if (current->mm) {
343 printk(KERN_EMERG "TEXT=%08x-%08x DATA=%08x-%08x BSS=%08x-%08x\n", 337 printk(KERN_EMERG "TEXT=%08x-%08x DATA=%08x-%08x BSS=%08x-%08x\n",
344 (int) current->mm->start_code, 338 (int) current->mm->start_code,
345 (int) current->mm->end_code, 339 (int) current->mm->end_code,
346 (int) current->mm->start_data, 340 (int) current->mm->start_data,
347 (int) current->mm->end_data, 341 (int) current->mm->end_data,
348 (int) current->mm->end_data, 342 (int) current->mm->end_data,
349 (int) current->mm->brk); 343 (int) current->mm->brk);
350 printk(KERN_EMERG "USER-STACK=%08x KERNEL-STACK=%08x\n\n", 344 printk(KERN_EMERG "USER-STACK=%08x KERNEL-STACK=%08x\n\n",
351 (int) current->mm->start_stack, 345 (int) current->mm->start_stack,
352 (int)(((unsigned long) current) + THREAD_SIZE)); 346 (int)(((unsigned long) current) + THREAD_SIZE));
353 } 347 }
354 348
355 printk(KERN_EMERG "PC: %08lx\n", fp->pc); 349 printk(KERN_EMERG "PC: %08lx\n", fp->pc);
356 printk(KERN_EMERG "SR: %08lx SP: %08lx\n", (long) fp->sr, (long) fp); 350 printk(KERN_EMERG "SR: %08lx SP: %08lx\n", (long) fp->sr, (long) fp);
357 printk(KERN_EMERG "d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n", 351 printk(KERN_EMERG "d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
358 fp->d0, fp->d1, fp->d2, fp->d3); 352 fp->d0, fp->d1, fp->d2, fp->d3);
359 printk(KERN_EMERG "d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n", 353 printk(KERN_EMERG "d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
360 fp->d4, fp->d5, fp->a0, fp->a1); 354 fp->d4, fp->d5, fp->a0, fp->a1);
361 printk(KERN_EMERG "\nUSP: %08x TRAPFRAME: %08x\n", (unsigned int) rdusp(), 355 printk(KERN_EMERG "\nUSP: %08x TRAPFRAME: %08x\n", (unsigned int) rdusp(),
362 (unsigned int) fp); 356 (unsigned int) fp);
363 357
364 printk(KERN_EMERG "\nCODE:"); 358 printk(KERN_EMERG "\nCODE:");
365 tp = ((unsigned char *) fp->pc) - 0x20; 359 tp = ((unsigned char *) fp->pc) - 0x20;
366 for (sp = (unsigned long *) tp, i = 0; (i < 0x40); i += 4) { 360 for (sp = (unsigned long *) tp, i = 0; (i < 0x40); i += 4) {
367 if ((i % 0x10) == 0) 361 if ((i % 0x10) == 0)
368 printk(KERN_EMERG "\n%08x: ", (int) (tp + i)); 362 printk(KERN_EMERG "\n%08x: ", (int) (tp + i));
369 printk(KERN_EMERG "%08x ", (int) *sp++); 363 printk(KERN_EMERG "%08x ", (int) *sp++);
370 } 364 }
371 printk(KERN_EMERG "\n"); 365 printk(KERN_EMERG "\n");
372 366
373 printk(KERN_EMERG "\nKERNEL STACK:"); 367 printk(KERN_EMERG "\nKERNEL STACK:");
374 tp = ((unsigned char *) fp) - 0x40; 368 tp = ((unsigned char *) fp) - 0x40;
375 for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) { 369 for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) {
376 if ((i % 0x10) == 0) 370 if ((i % 0x10) == 0)
377 printk(KERN_EMERG "\n%08x: ", (int) (tp + i)); 371 printk(KERN_EMERG "\n%08x: ", (int) (tp + i));
378 printk(KERN_EMERG "%08x ", (int) *sp++); 372 printk(KERN_EMERG "%08x ", (int) *sp++);
379 } 373 }
380 printk(KERN_EMERG "\n"); 374 printk(KERN_EMERG "\n");
381 printk(KERN_EMERG "\n"); 375 printk(KERN_EMERG "\n");
382 376
383 printk(KERN_EMERG "\nUSER STACK:"); 377 printk(KERN_EMERG "\nUSER STACK:");
384 tp = (unsigned char *) (rdusp() - 0x10); 378 tp = (unsigned char *) (rdusp() - 0x10);
385 for (sp = (unsigned long *) tp, i = 0; (i < 0x80); i += 4) { 379 for (sp = (unsigned long *) tp, i = 0; (i < 0x80); i += 4) {
386 if ((i % 0x10) == 0) 380 if ((i % 0x10) == 0)
387 printk(KERN_EMERG "\n%08x: ", (int) (tp + i)); 381 printk(KERN_EMERG "\n%08x: ", (int) (tp + i));
388 printk(KERN_EMERG "%08x ", (int) *sp++); 382 printk(KERN_EMERG "%08x ", (int) *sp++);
389 } 383 }
390 printk(KERN_EMERG "\n\n"); 384 printk(KERN_EMERG "\n\n");
391 } 385 }
392 386
393 /* 387 /*
394 * sys_execve() executes a new program. 388 * sys_execve() executes a new program.
395 */ 389 */
396 asmlinkage int sys_execve(char *name, char **argv, char **envp) 390 asmlinkage int sys_execve(char *name, char **argv, char **envp)
397 { 391 {
398 int error; 392 int error;
399 char * filename; 393 char * filename;
400 struct pt_regs *regs = (struct pt_regs *) &name; 394 struct pt_regs *regs = (struct pt_regs *) &name;
401 395
402 lock_kernel(); 396 lock_kernel();
403 filename = getname(name); 397 filename = getname(name);
404 error = PTR_ERR(filename); 398 error = PTR_ERR(filename);
405 if (IS_ERR(filename)) 399 if (IS_ERR(filename))
406 goto out; 400 goto out;
407 error = do_execve(filename, argv, envp, regs); 401 error = do_execve(filename, argv, envp, regs);
408 putname(filename); 402 putname(filename);
409 out: 403 out:
410 unlock_kernel(); 404 unlock_kernel();
411 return error; 405 return error;
412 } 406 }
413 407
414 unsigned long get_wchan(struct task_struct *p) 408 unsigned long get_wchan(struct task_struct *p)
415 { 409 {
416 unsigned long fp, pc; 410 unsigned long fp, pc;
417 unsigned long stack_page; 411 unsigned long stack_page;
418 int count = 0; 412 int count = 0;
419 if (!p || p == current || p->state == TASK_RUNNING) 413 if (!p || p == current || p->state == TASK_RUNNING)
420 return 0; 414 return 0;
421 415
422 stack_page = (unsigned long)p; 416 stack_page = (unsigned long)p;
423 fp = ((struct switch_stack *)p->thread.ksp)->a6; 417 fp = ((struct switch_stack *)p->thread.ksp)->a6;
424 do { 418 do {
425 if (fp < stack_page+sizeof(struct thread_info) || 419 if (fp < stack_page+sizeof(struct thread_info) ||
426 fp >= 8184+stack_page) 420 fp >= 8184+stack_page)
427 return 0; 421 return 0;
428 pc = ((unsigned long *)fp)[1]; 422 pc = ((unsigned long *)fp)[1];
429 if (!in_sched_functions(pc)) 423 if (!in_sched_functions(pc))
430 return pc; 424 return pc;
431 fp = *(unsigned long *) fp; 425 fp = *(unsigned long *) fp;
432 } while (count++ < 16); 426 } while (count++ < 16);
433 return 0; 427 return 0;
434 } 428 }
435 429
436 /* 430 /*
437 * Return saved PC of a blocked thread. 431 * Return saved PC of a blocked thread.
438 */ 432 */
439 unsigned long thread_saved_pc(struct task_struct *tsk) 433 unsigned long thread_saved_pc(struct task_struct *tsk)
440 { 434 {
441 struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp; 435 struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;
442 436
443 /* Check whether the thread is blocked in resume() */ 437 /* Check whether the thread is blocked in resume() */
444 if (in_sched_functions(sw->retpc)) 438 if (in_sched_functions(sw->retpc))
445 return ((unsigned long *)sw->a6)[1]; 439 return ((unsigned long *)sw->a6)[1];
446 else 440 else
447 return sw->retpc; 441 return sw->retpc;
448 } 442 }
449 443
450 444
arch/mips/kernel/reset.c
1 /* 1 /*
2 * This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2001 by Ralf Baechle 6 * Copyright (C) 2001 by Ralf Baechle
7 * Copyright (C) 2001 MIPS Technologies, Inc. 7 * Copyright (C) 2001 MIPS Technologies, Inc.
8 */ 8 */
9 #include <linux/kernel.h> 9 #include <linux/kernel.h>
10 #include <linux/module.h> 10 #include <linux/module.h>
11 #include <linux/types.h> 11 #include <linux/types.h>
12 #include <linux/reboot.h> 12 #include <linux/reboot.h>
13 #include <asm/reboot.h> 13 #include <asm/reboot.h>
14 14
15 /* 15 /*
16 * Urgs ... Too many MIPS machines to handle this in a generic way. 16 * Urgs ... Too many MIPS machines to handle this in a generic way.
17 * So handle all using function pointers to machine specific 17 * So handle all using function pointers to machine specific
18 * functions. 18 * functions.
19 */ 19 */
20 void (*_machine_restart)(char *command); 20 void (*_machine_restart)(char *command);
21 void (*_machine_halt)(void); 21 void (*_machine_halt)(void);
22 void (*_machine_power_off)(void); 22 void (*_machine_power_off)(void);
23 23
24 void machine_restart(char *command) 24 void machine_restart(char *command)
25 { 25 {
26 _machine_restart(command); 26 _machine_restart(command);
27 } 27 }
28 28
29 EXPORT_SYMBOL(machine_restart);
30
31 void machine_halt(void) 29 void machine_halt(void)
32 { 30 {
33 _machine_halt(); 31 _machine_halt();
34 } 32 }
35 33
36 EXPORT_SYMBOL(machine_halt);
37
38 void machine_power_off(void) 34 void machine_power_off(void)
39 { 35 {
40 _machine_power_off(); 36 _machine_power_off();
41 } 37 }
42
43 EXPORT_SYMBOL(machine_power_off);
44 38
arch/parisc/kernel/process.c
1 /* 1 /*
2 * PARISC Architecture-dependent parts of process handling 2 * PARISC Architecture-dependent parts of process handling
3 * based on the work for i386 3 * based on the work for i386
4 * 4 *
5 * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org> 5 * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
6 * Copyright (C) 2000 Martin K Petersen <mkp at mkp.net> 6 * Copyright (C) 2000 Martin K Petersen <mkp at mkp.net>
7 * Copyright (C) 2000 John Marvin <jsm at parisc-linux.org> 7 * Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
8 * Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org> 8 * Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
9 * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org> 9 * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
10 * Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org> 10 * Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
11 * Copyright (C) 2000 David Kennedy <dkennedy with linuxcare.com> 11 * Copyright (C) 2000 David Kennedy <dkennedy with linuxcare.com>
12 * Copyright (C) 2000 Richard Hirst <rhirst with parisc-lixux.org> 12 * Copyright (C) 2000 Richard Hirst <rhirst with parisc-lixux.org>
13 * Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org> 13 * Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
14 * Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org> 14 * Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org>
15 * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org> 15 * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
16 * Copyright (C) 2001-2002 Helge Deller <deller at parisc-linux.org> 16 * Copyright (C) 2001-2002 Helge Deller <deller at parisc-linux.org>
17 * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org> 17 * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
18 * 18 *
19 * 19 *
20 * This program is free software; you can redistribute it and/or modify 20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by 21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or 22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version. 23 * (at your option) any later version.
24 * 24 *
25 * This program is distributed in the hope that it will be useful, 25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of 26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details. 28 * GNU General Public License for more details.
29 * 29 *
30 * You should have received a copy of the GNU General Public License 30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software 31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 */ 33 */
34 34
35 #include <stdarg.h> 35 #include <stdarg.h>
36 36
37 #include <linux/elf.h> 37 #include <linux/elf.h>
38 #include <linux/errno.h> 38 #include <linux/errno.h>
39 #include <linux/kernel.h> 39 #include <linux/kernel.h>
40 #include <linux/mm.h> 40 #include <linux/mm.h>
41 #include <linux/module.h> 41 #include <linux/module.h>
42 #include <linux/personality.h> 42 #include <linux/personality.h>
43 #include <linux/ptrace.h> 43 #include <linux/ptrace.h>
44 #include <linux/sched.h> 44 #include <linux/sched.h>
45 #include <linux/stddef.h> 45 #include <linux/stddef.h>
46 #include <linux/unistd.h> 46 #include <linux/unistd.h>
47 #include <linux/kallsyms.h> 47 #include <linux/kallsyms.h>
48 48
49 #include <asm/io.h> 49 #include <asm/io.h>
50 #include <asm/offsets.h> 50 #include <asm/offsets.h>
51 #include <asm/pdc.h> 51 #include <asm/pdc.h>
52 #include <asm/pdc_chassis.h> 52 #include <asm/pdc_chassis.h>
53 #include <asm/pgalloc.h> 53 #include <asm/pgalloc.h>
54 #include <asm/uaccess.h> 54 #include <asm/uaccess.h>
55 #include <asm/unwind.h> 55 #include <asm/unwind.h>
56 56
57 static int hlt_counter; 57 static int hlt_counter;
58 58
59 /* 59 /*
60 * Power off function, if any 60 * Power off function, if any
61 */ 61 */
62 void (*pm_power_off)(void); 62 void (*pm_power_off)(void);
63 63
64 void disable_hlt(void) 64 void disable_hlt(void)
65 { 65 {
66 hlt_counter++; 66 hlt_counter++;
67 } 67 }
68 68
69 EXPORT_SYMBOL(disable_hlt); 69 EXPORT_SYMBOL(disable_hlt);
70 70
71 void enable_hlt(void) 71 void enable_hlt(void)
72 { 72 {
73 hlt_counter--; 73 hlt_counter--;
74 } 74 }
75 75
76 EXPORT_SYMBOL(enable_hlt); 76 EXPORT_SYMBOL(enable_hlt);
77 77
78 void default_idle(void) 78 void default_idle(void)
79 { 79 {
80 barrier(); 80 barrier();
81 } 81 }
82 82
83 /* 83 /*
84 * The idle thread. There's no useful work to be 84 * The idle thread. There's no useful work to be
85 * done, so just try to conserve power and have a 85 * done, so just try to conserve power and have a
86 * low exit latency (ie sit in a loop waiting for 86 * low exit latency (ie sit in a loop waiting for
87 * somebody to say that they'd like to reschedule) 87 * somebody to say that they'd like to reschedule)
88 */ 88 */
89 void cpu_idle(void) 89 void cpu_idle(void)
90 { 90 {
91 /* endless idle loop with no priority at all */ 91 /* endless idle loop with no priority at all */
92 while (1) { 92 while (1) {
93 while (!need_resched()) 93 while (!need_resched())
94 barrier(); 94 barrier();
95 schedule(); 95 schedule();
96 check_pgt_cache(); 96 check_pgt_cache();
97 } 97 }
98 } 98 }
99 99
100 100
101 #ifdef __LP64__ 101 #ifdef __LP64__
102 #define COMMAND_GLOBAL 0xfffffffffffe0030UL 102 #define COMMAND_GLOBAL 0xfffffffffffe0030UL
103 #else 103 #else
104 #define COMMAND_GLOBAL 0xfffe0030 104 #define COMMAND_GLOBAL 0xfffe0030
105 #endif 105 #endif
106 106
107 #define CMD_RESET 5 /* reset any module */ 107 #define CMD_RESET 5 /* reset any module */
108 108
109 /* 109 /*
110 ** The Wright Brothers and Gecko systems have a H/W problem 110 ** The Wright Brothers and Gecko systems have a H/W problem
111 ** (Lasi...'nuf said) may cause a broadcast reset to lockup 111 ** (Lasi...'nuf said) may cause a broadcast reset to lockup
112 ** the system. An HVERSION dependent PDC call was developed 112 ** the system. An HVERSION dependent PDC call was developed
113 ** to perform a "safe", platform specific broadcast reset instead 113 ** to perform a "safe", platform specific broadcast reset instead
114 ** of kludging up all the code. 114 ** of kludging up all the code.
115 ** 115 **
116 ** Older machines which do not implement PDC_BROADCAST_RESET will 116 ** Older machines which do not implement PDC_BROADCAST_RESET will
117 ** return (with an error) and the regular broadcast reset can be 117 ** return (with an error) and the regular broadcast reset can be
118 ** issued. Obviously, if the PDC does implement PDC_BROADCAST_RESET 118 ** issued. Obviously, if the PDC does implement PDC_BROADCAST_RESET
119 ** the PDC call will not return (the system will be reset). 119 ** the PDC call will not return (the system will be reset).
120 */ 120 */
121 void machine_restart(char *cmd) 121 void machine_restart(char *cmd)
122 { 122 {
123 #ifdef FASTBOOT_SELFTEST_SUPPORT 123 #ifdef FASTBOOT_SELFTEST_SUPPORT
124 /* 124 /*
125 ** If user has modified the Firmware Selftest Bitmap, 125 ** If user has modified the Firmware Selftest Bitmap,
126 ** run the tests specified in the bitmap after the 126 ** run the tests specified in the bitmap after the
127 ** system is rebooted w/PDC_DO_RESET. 127 ** system is rebooted w/PDC_DO_RESET.
128 ** 128 **
129 ** ftc_bitmap = 0x1AUL "Skip destructive memory tests" 129 ** ftc_bitmap = 0x1AUL "Skip destructive memory tests"
130 ** 130 **
131 ** Using "directed resets" at each processor with the MEM_TOC 131 ** Using "directed resets" at each processor with the MEM_TOC
132 ** vector cleared will also avoid running destructive 132 ** vector cleared will also avoid running destructive
133 ** memory self tests. (Not implemented yet) 133 ** memory self tests. (Not implemented yet)
134 */ 134 */
135 if (ftc_bitmap) { 135 if (ftc_bitmap) {
136 pdc_do_firm_test_reset(ftc_bitmap); 136 pdc_do_firm_test_reset(ftc_bitmap);
137 } 137 }
138 #endif 138 #endif
139 /* set up a new led state on systems shipped with a LED State panel */ 139 /* set up a new led state on systems shipped with a LED State panel */
140 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN); 140 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
141 141
142 /* "Normal" system reset */ 142 /* "Normal" system reset */
143 pdc_do_reset(); 143 pdc_do_reset();
144 144
145 /* Nope...box should reset with just CMD_RESET now */ 145 /* Nope...box should reset with just CMD_RESET now */
146 gsc_writel(CMD_RESET, COMMAND_GLOBAL); 146 gsc_writel(CMD_RESET, COMMAND_GLOBAL);
147 147
148 /* Wait for RESET to lay us to rest. */ 148 /* Wait for RESET to lay us to rest. */
149 while (1) ; 149 while (1) ;
150 150
151 } 151 }
152 152
153 EXPORT_SYMBOL(machine_restart);
154
155 void machine_halt(void) 153 void machine_halt(void)
156 { 154 {
157 /* 155 /*
158 ** The LED/ChassisCodes are updated by the led_halt() 156 ** The LED/ChassisCodes are updated by the led_halt()
159 ** function, called by the reboot notifier chain. 157 ** function, called by the reboot notifier chain.
160 */ 158 */
161 } 159 }
162 160
163 EXPORT_SYMBOL(machine_halt);
164 161
165
166 /* 162 /*
167 * This routine is called from sys_reboot to actually turn off the 163 * This routine is called from sys_reboot to actually turn off the
168 * machine 164 * machine
169 */ 165 */
170 void machine_power_off(void) 166 void machine_power_off(void)
171 { 167 {
172 /* If there is a registered power off handler, call it. */ 168 /* If there is a registered power off handler, call it. */
173 if(pm_power_off) 169 if(pm_power_off)
174 pm_power_off(); 170 pm_power_off();
175 171
176 /* Put the soft power button back under hardware control. 172 /* Put the soft power button back under hardware control.
177 * If the user had already pressed the power button, the 173 * If the user had already pressed the power button, the
178 * following call will immediately power off. */ 174 * following call will immediately power off. */
179 pdc_soft_power_button(0); 175 pdc_soft_power_button(0);
180 176
181 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN); 177 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
182 178
183 /* It seems we have no way to power the system off via 179 /* It seems we have no way to power the system off via
184 * software. The user has to press the button himself. */ 180 * software. The user has to press the button himself. */
185 181
186 printk(KERN_EMERG "System shut down completed.\n" 182 printk(KERN_EMERG "System shut down completed.\n"
187 KERN_EMERG "Please power this system off now."); 183 KERN_EMERG "Please power this system off now.");
188 } 184 }
189
190 EXPORT_SYMBOL(machine_power_off);
191 185
192 186
193 /* 187 /*
194 * Create a kernel thread 188 * Create a kernel thread
195 */ 189 */
196 190
197 extern pid_t __kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 191 extern pid_t __kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
198 pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) 192 pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
199 { 193 {
200 194
201 /* 195 /*
202 * FIXME: Once we are sure we don't need any debug here, 196 * FIXME: Once we are sure we don't need any debug here,
203 * kernel_thread can become a #define. 197 * kernel_thread can become a #define.
204 */ 198 */
205 199
206 return __kernel_thread(fn, arg, flags); 200 return __kernel_thread(fn, arg, flags);
207 } 201 }
208 EXPORT_SYMBOL(kernel_thread); 202 EXPORT_SYMBOL(kernel_thread);
209 203
210 /* 204 /*
211 * Free current thread data structures etc.. 205 * Free current thread data structures etc..
212 */ 206 */
213 void exit_thread(void) 207 void exit_thread(void)
214 { 208 {
215 } 209 }
216 210
217 void flush_thread(void) 211 void flush_thread(void)
218 { 212 {
219 /* Only needs to handle fpu stuff or perf monitors. 213 /* Only needs to handle fpu stuff or perf monitors.
220 ** REVISIT: several arches implement a "lazy fpu state". 214 ** REVISIT: several arches implement a "lazy fpu state".
221 */ 215 */
222 set_fs(USER_DS); 216 set_fs(USER_DS);
223 } 217 }
224 218
225 void release_thread(struct task_struct *dead_task) 219 void release_thread(struct task_struct *dead_task)
226 { 220 {
227 } 221 }
228 222
229 /* 223 /*
230 * Fill in the FPU structure for a core dump. 224 * Fill in the FPU structure for a core dump.
231 */ 225 */
232 226
233 int dump_fpu (struct pt_regs * regs, elf_fpregset_t *r) 227 int dump_fpu (struct pt_regs * regs, elf_fpregset_t *r)
234 { 228 {
235 if (regs == NULL) 229 if (regs == NULL)
236 return 0; 230 return 0;
237 231
238 memcpy(r, regs->fr, sizeof *r); 232 memcpy(r, regs->fr, sizeof *r);
239 return 1; 233 return 1;
240 } 234 }
241 235
242 int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r) 236 int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
243 { 237 {
244 memcpy(r, tsk->thread.regs.fr, sizeof(*r)); 238 memcpy(r, tsk->thread.regs.fr, sizeof(*r));
245 return 1; 239 return 1;
246 } 240 }
247 241
248 /* Note that "fork()" is implemented in terms of clone, with 242 /* Note that "fork()" is implemented in terms of clone, with
249 parameters (SIGCHLD, regs->gr[30], regs). */ 243 parameters (SIGCHLD, regs->gr[30], regs). */
250 int 244 int
251 sys_clone(unsigned long clone_flags, unsigned long usp, 245 sys_clone(unsigned long clone_flags, unsigned long usp,
252 struct pt_regs *regs) 246 struct pt_regs *regs)
253 { 247 {
254 int __user *user_tid = (int __user *)regs->gr[26]; 248 int __user *user_tid = (int __user *)regs->gr[26];
255 249
256 /* usp must be word aligned. This also prevents users from 250 /* usp must be word aligned. This also prevents users from
257 * passing in the value 1 (which is the signal for a special 251 * passing in the value 1 (which is the signal for a special
258 * return for a kernel thread) */ 252 * return for a kernel thread) */
259 usp = ALIGN(usp, 4); 253 usp = ALIGN(usp, 4);
260 254
261 /* A zero value for usp means use the current stack */ 255 /* A zero value for usp means use the current stack */
262 if(usp == 0) 256 if(usp == 0)
263 usp = regs->gr[30]; 257 usp = regs->gr[30];
264 258
265 return do_fork(clone_flags, usp, regs, 0, user_tid, NULL); 259 return do_fork(clone_flags, usp, regs, 0, user_tid, NULL);
266 } 260 }
267 261
268 int 262 int
269 sys_vfork(struct pt_regs *regs) 263 sys_vfork(struct pt_regs *regs)
270 { 264 {
271 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gr[30], regs, 0, NULL, NULL); 265 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gr[30], regs, 0, NULL, NULL);
272 } 266 }
273 267
274 int 268 int
275 copy_thread(int nr, unsigned long clone_flags, unsigned long usp, 269 copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
276 unsigned long unused, /* in ia64 this is "user_stack_size" */ 270 unsigned long unused, /* in ia64 this is "user_stack_size" */
277 struct task_struct * p, struct pt_regs * pregs) 271 struct task_struct * p, struct pt_regs * pregs)
278 { 272 {
279 struct pt_regs * cregs = &(p->thread.regs); 273 struct pt_regs * cregs = &(p->thread.regs);
280 struct thread_info *ti = p->thread_info; 274 struct thread_info *ti = p->thread_info;
281 275
282 /* We have to use void * instead of a function pointer, because 276 /* We have to use void * instead of a function pointer, because
283 * function pointers aren't a pointer to the function on 64-bit. 277 * function pointers aren't a pointer to the function on 64-bit.
284 * Make them const so the compiler knows they live in .text */ 278 * Make them const so the compiler knows they live in .text */
285 extern void * const ret_from_kernel_thread; 279 extern void * const ret_from_kernel_thread;
286 extern void * const child_return; 280 extern void * const child_return;
287 #ifdef CONFIG_HPUX 281 #ifdef CONFIG_HPUX
288 extern void * const hpux_child_return; 282 extern void * const hpux_child_return;
289 #endif 283 #endif
290 284
291 *cregs = *pregs; 285 *cregs = *pregs;
292 286
293 /* Set the return value for the child. Note that this is not 287 /* Set the return value for the child. Note that this is not
294 actually restored by the syscall exit path, but we put it 288 actually restored by the syscall exit path, but we put it
295 here for consistency in case of signals. */ 289 here for consistency in case of signals. */
296 cregs->gr[28] = 0; /* child */ 290 cregs->gr[28] = 0; /* child */
297 291
298 /* 292 /*
299 * We need to differentiate between a user fork and a 293 * We need to differentiate between a user fork and a
300 * kernel fork. We can't use user_mode, because the 294 * kernel fork. We can't use user_mode, because the
301 * the syscall path doesn't save iaoq. Right now 295 * the syscall path doesn't save iaoq. Right now
302 * We rely on the fact that kernel_thread passes 296 * We rely on the fact that kernel_thread passes
303 * in zero for usp. 297 * in zero for usp.
304 */ 298 */
305 if (usp == 1) { 299 if (usp == 1) {
306 /* kernel thread */ 300 /* kernel thread */
307 cregs->ksp = (((unsigned long)(ti)) + THREAD_SZ_ALGN); 301 cregs->ksp = (((unsigned long)(ti)) + THREAD_SZ_ALGN);
308 /* Must exit via ret_from_kernel_thread in order 302 /* Must exit via ret_from_kernel_thread in order
309 * to call schedule_tail() 303 * to call schedule_tail()
310 */ 304 */
311 cregs->kpc = (unsigned long) &ret_from_kernel_thread; 305 cregs->kpc = (unsigned long) &ret_from_kernel_thread;
312 /* 306 /*
313 * Copy function and argument to be called from 307 * Copy function and argument to be called from
314 * ret_from_kernel_thread. 308 * ret_from_kernel_thread.
315 */ 309 */
316 #ifdef __LP64__ 310 #ifdef __LP64__
317 cregs->gr[27] = pregs->gr[27]; 311 cregs->gr[27] = pregs->gr[27];
318 #endif 312 #endif
319 cregs->gr[26] = pregs->gr[26]; 313 cregs->gr[26] = pregs->gr[26];
320 cregs->gr[25] = pregs->gr[25]; 314 cregs->gr[25] = pregs->gr[25];
321 } else { 315 } else {
322 /* user thread */ 316 /* user thread */
323 /* 317 /*
324 * Note that the fork wrappers are responsible 318 * Note that the fork wrappers are responsible
325 * for setting gr[21]. 319 * for setting gr[21].
326 */ 320 */
327 321
328 /* Use same stack depth as parent */ 322 /* Use same stack depth as parent */
329 cregs->ksp = ((unsigned long)(ti)) 323 cregs->ksp = ((unsigned long)(ti))
330 + (pregs->gr[21] & (THREAD_SIZE - 1)); 324 + (pregs->gr[21] & (THREAD_SIZE - 1));
331 cregs->gr[30] = usp; 325 cregs->gr[30] = usp;
332 if (p->personality == PER_HPUX) { 326 if (p->personality == PER_HPUX) {
333 #ifdef CONFIG_HPUX 327 #ifdef CONFIG_HPUX
334 cregs->kpc = (unsigned long) &hpux_child_return; 328 cregs->kpc = (unsigned long) &hpux_child_return;
335 #else 329 #else
336 BUG(); 330 BUG();
337 #endif 331 #endif
338 } else { 332 } else {
339 cregs->kpc = (unsigned long) &child_return; 333 cregs->kpc = (unsigned long) &child_return;
340 } 334 }
341 } 335 }
342 336
343 return 0; 337 return 0;
344 } 338 }
345 339
346 unsigned long thread_saved_pc(struct task_struct *t) 340 unsigned long thread_saved_pc(struct task_struct *t)
347 { 341 {
348 return t->thread.regs.kpc; 342 return t->thread.regs.kpc;
349 } 343 }
350 344
351 /* 345 /*
352 * sys_execve() executes a new program. 346 * sys_execve() executes a new program.
353 */ 347 */
354 348
355 asmlinkage int sys_execve(struct pt_regs *regs) 349 asmlinkage int sys_execve(struct pt_regs *regs)
356 { 350 {
357 int error; 351 int error;
358 char *filename; 352 char *filename;
359 353
360 filename = getname((const char __user *) regs->gr[26]); 354 filename = getname((const char __user *) regs->gr[26]);
361 error = PTR_ERR(filename); 355 error = PTR_ERR(filename);
362 if (IS_ERR(filename)) 356 if (IS_ERR(filename))
363 goto out; 357 goto out;
364 error = do_execve(filename, (char __user **) regs->gr[25], 358 error = do_execve(filename, (char __user **) regs->gr[25],
365 (char __user **) regs->gr[24], regs); 359 (char __user **) regs->gr[24], regs);
366 if (error == 0) { 360 if (error == 0) {
367 task_lock(current); 361 task_lock(current);
368 current->ptrace &= ~PT_DTRACE; 362 current->ptrace &= ~PT_DTRACE;
369 task_unlock(current); 363 task_unlock(current);
370 } 364 }
371 putname(filename); 365 putname(filename);
372 out: 366 out:
373 367
374 return error; 368 return error;
375 } 369 }
376 370
377 unsigned long 371 unsigned long
378 get_wchan(struct task_struct *p) 372 get_wchan(struct task_struct *p)
379 { 373 {
380 struct unwind_frame_info info; 374 struct unwind_frame_info info;
381 unsigned long ip; 375 unsigned long ip;
382 int count = 0; 376 int count = 0;
383 /* 377 /*
384 * These bracket the sleeping functions.. 378 * These bracket the sleeping functions..
385 */ 379 */
386 380
387 unwind_frame_init_from_blocked_task(&info, p); 381 unwind_frame_init_from_blocked_task(&info, p);
388 do { 382 do {
389 if (unwind_once(&info) < 0) 383 if (unwind_once(&info) < 0)
390 return 0; 384 return 0;
391 ip = info.ip; 385 ip = info.ip;
392 if (!in_sched_functions(ip)) 386 if (!in_sched_functions(ip))
393 return ip; 387 return ip;
394 } while (count++ < 16); 388 } while (count++ < 16);
395 return 0; 389 return 0;
396 } 390 }
397 391
arch/ppc/kernel/setup.c
1 /* 1 /*
2 * Common prep/pmac/chrp boot and setup code. 2 * Common prep/pmac/chrp boot and setup code.
3 */ 3 */
4 4
5 #include <linux/config.h> 5 #include <linux/config.h>
6 #include <linux/module.h> 6 #include <linux/module.h>
7 #include <linux/string.h> 7 #include <linux/string.h>
8 #include <linux/sched.h> 8 #include <linux/sched.h>
9 #include <linux/init.h> 9 #include <linux/init.h>
10 #include <linux/kernel.h> 10 #include <linux/kernel.h>
11 #include <linux/reboot.h> 11 #include <linux/reboot.h>
12 #include <linux/delay.h> 12 #include <linux/delay.h>
13 #include <linux/initrd.h> 13 #include <linux/initrd.h>
14 #include <linux/ide.h> 14 #include <linux/ide.h>
15 #include <linux/tty.h> 15 #include <linux/tty.h>
16 #include <linux/bootmem.h> 16 #include <linux/bootmem.h>
17 #include <linux/seq_file.h> 17 #include <linux/seq_file.h>
18 #include <linux/root_dev.h> 18 #include <linux/root_dev.h>
19 #include <linux/cpu.h> 19 #include <linux/cpu.h>
20 #include <linux/console.h> 20 #include <linux/console.h>
21 21
22 #include <asm/residual.h> 22 #include <asm/residual.h>
23 #include <asm/io.h> 23 #include <asm/io.h>
24 #include <asm/prom.h> 24 #include <asm/prom.h>
25 #include <asm/processor.h> 25 #include <asm/processor.h>
26 #include <asm/pgtable.h> 26 #include <asm/pgtable.h>
27 #include <asm/bootinfo.h> 27 #include <asm/bootinfo.h>
28 #include <asm/setup.h> 28 #include <asm/setup.h>
29 #include <asm/amigappc.h> 29 #include <asm/amigappc.h>
30 #include <asm/smp.h> 30 #include <asm/smp.h>
31 #include <asm/elf.h> 31 #include <asm/elf.h>
32 #include <asm/cputable.h> 32 #include <asm/cputable.h>
33 #include <asm/bootx.h> 33 #include <asm/bootx.h>
34 #include <asm/btext.h> 34 #include <asm/btext.h>
35 #include <asm/machdep.h> 35 #include <asm/machdep.h>
36 #include <asm/uaccess.h> 36 #include <asm/uaccess.h>
37 #include <asm/system.h> 37 #include <asm/system.h>
38 #include <asm/pmac_feature.h> 38 #include <asm/pmac_feature.h>
39 #include <asm/sections.h> 39 #include <asm/sections.h>
40 #include <asm/nvram.h> 40 #include <asm/nvram.h>
41 #include <asm/xmon.h> 41 #include <asm/xmon.h>
42 #include <asm/ocp.h> 42 #include <asm/ocp.h>
43 43
44 #if defined(CONFIG_85xx) || defined(CONFIG_83xx) || defined(CONFIG_MPC10X_BRIDGE) 44 #if defined(CONFIG_85xx) || defined(CONFIG_83xx) || defined(CONFIG_MPC10X_BRIDGE)
45 #include <asm/ppc_sys.h> 45 #include <asm/ppc_sys.h>
46 #endif 46 #endif
47 47
48 #if defined CONFIG_KGDB 48 #if defined CONFIG_KGDB
49 #include <asm/kgdb.h> 49 #include <asm/kgdb.h>
50 #endif 50 #endif
51 51
52 extern void platform_init(unsigned long r3, unsigned long r4, 52 extern void platform_init(unsigned long r3, unsigned long r4,
53 unsigned long r5, unsigned long r6, unsigned long r7); 53 unsigned long r5, unsigned long r6, unsigned long r7);
54 extern void bootx_init(unsigned long r4, unsigned long phys); 54 extern void bootx_init(unsigned long r4, unsigned long phys);
55 extern void identify_cpu(unsigned long offset, unsigned long cpu); 55 extern void identify_cpu(unsigned long offset, unsigned long cpu);
56 extern void do_cpu_ftr_fixups(unsigned long offset); 56 extern void do_cpu_ftr_fixups(unsigned long offset);
57 extern void reloc_got2(unsigned long offset); 57 extern void reloc_got2(unsigned long offset);
58 58
59 extern void ppc6xx_idle(void); 59 extern void ppc6xx_idle(void);
60 extern void power4_idle(void); 60 extern void power4_idle(void);
61 61
62 extern boot_infos_t *boot_infos; 62 extern boot_infos_t *boot_infos;
63 struct ide_machdep_calls ppc_ide_md; 63 struct ide_machdep_calls ppc_ide_md;
64 64
65 /* Used with the BI_MEMSIZE bootinfo parameter to store the memory 65 /* Used with the BI_MEMSIZE bootinfo parameter to store the memory
66 size value reported by the boot loader. */ 66 size value reported by the boot loader. */
67 unsigned long boot_mem_size; 67 unsigned long boot_mem_size;
68 68
69 unsigned long ISA_DMA_THRESHOLD; 69 unsigned long ISA_DMA_THRESHOLD;
70 unsigned long DMA_MODE_READ, DMA_MODE_WRITE; 70 unsigned long DMA_MODE_READ, DMA_MODE_WRITE;
71 71
72 #ifdef CONFIG_PPC_MULTIPLATFORM 72 #ifdef CONFIG_PPC_MULTIPLATFORM
73 int _machine = 0; 73 int _machine = 0;
74 74
75 extern void prep_init(unsigned long r3, unsigned long r4, 75 extern void prep_init(unsigned long r3, unsigned long r4,
76 unsigned long r5, unsigned long r6, unsigned long r7); 76 unsigned long r5, unsigned long r6, unsigned long r7);
77 extern void pmac_init(unsigned long r3, unsigned long r4, 77 extern void pmac_init(unsigned long r3, unsigned long r4,
78 unsigned long r5, unsigned long r6, unsigned long r7); 78 unsigned long r5, unsigned long r6, unsigned long r7);
79 extern void chrp_init(unsigned long r3, unsigned long r4, 79 extern void chrp_init(unsigned long r3, unsigned long r4,
80 unsigned long r5, unsigned long r6, unsigned long r7); 80 unsigned long r5, unsigned long r6, unsigned long r7);
81 #endif /* CONFIG_PPC_MULTIPLATFORM */ 81 #endif /* CONFIG_PPC_MULTIPLATFORM */
82 82
83 #ifdef CONFIG_MAGIC_SYSRQ 83 #ifdef CONFIG_MAGIC_SYSRQ
84 unsigned long SYSRQ_KEY = 0x54; 84 unsigned long SYSRQ_KEY = 0x54;
85 #endif /* CONFIG_MAGIC_SYSRQ */ 85 #endif /* CONFIG_MAGIC_SYSRQ */
86 86
87 #ifdef CONFIG_VGA_CONSOLE 87 #ifdef CONFIG_VGA_CONSOLE
88 unsigned long vgacon_remap_base; 88 unsigned long vgacon_remap_base;
89 #endif 89 #endif
90 90
91 struct machdep_calls ppc_md; 91 struct machdep_calls ppc_md;
92 92
93 /* 93 /*
94 * These are used in binfmt_elf.c to put aux entries on the stack 94 * These are used in binfmt_elf.c to put aux entries on the stack
95 * for each elf executable being started. 95 * for each elf executable being started.
96 */ 96 */
97 int dcache_bsize; 97 int dcache_bsize;
98 int icache_bsize; 98 int icache_bsize;
99 int ucache_bsize; 99 int ucache_bsize;
100 100
101 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_FB_VGA16) || \ 101 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_FB_VGA16) || \
102 defined(CONFIG_FB_VGA16_MODULE) || defined(CONFIG_FB_VESA) 102 defined(CONFIG_FB_VGA16_MODULE) || defined(CONFIG_FB_VESA)
103 struct screen_info screen_info = { 103 struct screen_info screen_info = {
104 0, 25, /* orig-x, orig-y */ 104 0, 25, /* orig-x, orig-y */
105 0, /* unused */ 105 0, /* unused */
106 0, /* orig-video-page */ 106 0, /* orig-video-page */
107 0, /* orig-video-mode */ 107 0, /* orig-video-mode */
108 80, /* orig-video-cols */ 108 80, /* orig-video-cols */
109 0,0,0, /* ega_ax, ega_bx, ega_cx */ 109 0,0,0, /* ega_ax, ega_bx, ega_cx */
110 25, /* orig-video-lines */ 110 25, /* orig-video-lines */
111 1, /* orig-video-isVGA */ 111 1, /* orig-video-isVGA */
112 16 /* orig-video-points */ 112 16 /* orig-video-points */
113 }; 113 };
114 #endif /* CONFIG_VGA_CONSOLE || CONFIG_FB_VGA16 || CONFIG_FB_VESA */ 114 #endif /* CONFIG_VGA_CONSOLE || CONFIG_FB_VGA16 || CONFIG_FB_VESA */
115 115
116 void machine_restart(char *cmd) 116 void machine_restart(char *cmd)
117 { 117 {
118 #ifdef CONFIG_NVRAM 118 #ifdef CONFIG_NVRAM
119 nvram_sync(); 119 nvram_sync();
120 #endif 120 #endif
121 ppc_md.restart(cmd); 121 ppc_md.restart(cmd);
122 } 122 }
123 123
124 EXPORT_SYMBOL(machine_restart);
125
126 void machine_power_off(void) 124 void machine_power_off(void)
127 { 125 {
128 #ifdef CONFIG_NVRAM 126 #ifdef CONFIG_NVRAM
129 nvram_sync(); 127 nvram_sync();
130 #endif 128 #endif
131 ppc_md.power_off(); 129 ppc_md.power_off();
132 } 130 }
133 131
134 EXPORT_SYMBOL(machine_power_off);
135
136 void machine_halt(void) 132 void machine_halt(void)
137 { 133 {
138 #ifdef CONFIG_NVRAM 134 #ifdef CONFIG_NVRAM
139 nvram_sync(); 135 nvram_sync();
140 #endif 136 #endif
141 ppc_md.halt(); 137 ppc_md.halt();
142 } 138 }
143
144 EXPORT_SYMBOL(machine_halt);
145 139
146 void (*pm_power_off)(void) = machine_power_off; 140 void (*pm_power_off)(void) = machine_power_off;
147 141
148 #ifdef CONFIG_TAU 142 #ifdef CONFIG_TAU
149 extern u32 cpu_temp(unsigned long cpu); 143 extern u32 cpu_temp(unsigned long cpu);
150 extern u32 cpu_temp_both(unsigned long cpu); 144 extern u32 cpu_temp_both(unsigned long cpu);
151 #endif /* CONFIG_TAU */ 145 #endif /* CONFIG_TAU */
152 146
153 int show_cpuinfo(struct seq_file *m, void *v) 147 int show_cpuinfo(struct seq_file *m, void *v)
154 { 148 {
155 int i = (int) v - 1; 149 int i = (int) v - 1;
156 int err = 0; 150 int err = 0;
157 unsigned int pvr; 151 unsigned int pvr;
158 unsigned short maj, min; 152 unsigned short maj, min;
159 unsigned long lpj; 153 unsigned long lpj;
160 154
161 if (i >= NR_CPUS) { 155 if (i >= NR_CPUS) {
162 /* Show summary information */ 156 /* Show summary information */
163 #ifdef CONFIG_SMP 157 #ifdef CONFIG_SMP
164 unsigned long bogosum = 0; 158 unsigned long bogosum = 0;
165 for (i = 0; i < NR_CPUS; ++i) 159 for (i = 0; i < NR_CPUS; ++i)
166 if (cpu_online(i)) 160 if (cpu_online(i))
167 bogosum += cpu_data[i].loops_per_jiffy; 161 bogosum += cpu_data[i].loops_per_jiffy;
168 seq_printf(m, "total bogomips\t: %lu.%02lu\n", 162 seq_printf(m, "total bogomips\t: %lu.%02lu\n",
169 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); 163 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
170 #endif /* CONFIG_SMP */ 164 #endif /* CONFIG_SMP */
171 165
172 if (ppc_md.show_cpuinfo != NULL) 166 if (ppc_md.show_cpuinfo != NULL)
173 err = ppc_md.show_cpuinfo(m); 167 err = ppc_md.show_cpuinfo(m);
174 return err; 168 return err;
175 } 169 }
176 170
177 #ifdef CONFIG_SMP 171 #ifdef CONFIG_SMP
178 if (!cpu_online(i)) 172 if (!cpu_online(i))
179 return 0; 173 return 0;
180 pvr = cpu_data[i].pvr; 174 pvr = cpu_data[i].pvr;
181 lpj = cpu_data[i].loops_per_jiffy; 175 lpj = cpu_data[i].loops_per_jiffy;
182 #else 176 #else
183 pvr = mfspr(SPRN_PVR); 177 pvr = mfspr(SPRN_PVR);
184 lpj = loops_per_jiffy; 178 lpj = loops_per_jiffy;
185 #endif 179 #endif
186 180
187 seq_printf(m, "processor\t: %d\n", i); 181 seq_printf(m, "processor\t: %d\n", i);
188 seq_printf(m, "cpu\t\t: "); 182 seq_printf(m, "cpu\t\t: ");
189 183
190 if (cur_cpu_spec[i]->pvr_mask) 184 if (cur_cpu_spec[i]->pvr_mask)
191 seq_printf(m, "%s", cur_cpu_spec[i]->cpu_name); 185 seq_printf(m, "%s", cur_cpu_spec[i]->cpu_name);
192 else 186 else
193 seq_printf(m, "unknown (%08x)", pvr); 187 seq_printf(m, "unknown (%08x)", pvr);
194 #ifdef CONFIG_ALTIVEC 188 #ifdef CONFIG_ALTIVEC
195 if (cur_cpu_spec[i]->cpu_features & CPU_FTR_ALTIVEC) 189 if (cur_cpu_spec[i]->cpu_features & CPU_FTR_ALTIVEC)
196 seq_printf(m, ", altivec supported"); 190 seq_printf(m, ", altivec supported");
197 #endif 191 #endif
198 seq_printf(m, "\n"); 192 seq_printf(m, "\n");
199 193
200 #ifdef CONFIG_TAU 194 #ifdef CONFIG_TAU
201 if (cur_cpu_spec[i]->cpu_features & CPU_FTR_TAU) { 195 if (cur_cpu_spec[i]->cpu_features & CPU_FTR_TAU) {
202 #ifdef CONFIG_TAU_AVERAGE 196 #ifdef CONFIG_TAU_AVERAGE
203 /* more straightforward, but potentially misleading */ 197 /* more straightforward, but potentially misleading */
204 seq_printf(m, "temperature \t: %u C (uncalibrated)\n", 198 seq_printf(m, "temperature \t: %u C (uncalibrated)\n",
205 cpu_temp(i)); 199 cpu_temp(i));
206 #else 200 #else
207 /* show the actual temp sensor range */ 201 /* show the actual temp sensor range */
208 u32 temp; 202 u32 temp;
209 temp = cpu_temp_both(i); 203 temp = cpu_temp_both(i);
210 seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n", 204 seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
211 temp & 0xff, temp >> 16); 205 temp & 0xff, temp >> 16);
212 #endif 206 #endif
213 } 207 }
214 #endif /* CONFIG_TAU */ 208 #endif /* CONFIG_TAU */
215 209
216 if (ppc_md.show_percpuinfo != NULL) { 210 if (ppc_md.show_percpuinfo != NULL) {
217 err = ppc_md.show_percpuinfo(m, i); 211 err = ppc_md.show_percpuinfo(m, i);
218 if (err) 212 if (err)
219 return err; 213 return err;
220 } 214 }
221 215
222 /* If we are a Freescale core do a simple check so 216 /* If we are a Freescale core do a simple check so
223 * we dont have to keep adding cases in the future */ 217 * we dont have to keep adding cases in the future */
224 if ((PVR_VER(pvr) & 0x8000) == 0x8000) { 218 if ((PVR_VER(pvr) & 0x8000) == 0x8000) {
225 maj = PVR_MAJ(pvr); 219 maj = PVR_MAJ(pvr);
226 min = PVR_MIN(pvr); 220 min = PVR_MIN(pvr);
227 } else { 221 } else {
228 switch (PVR_VER(pvr)) { 222 switch (PVR_VER(pvr)) {
229 case 0x0020: /* 403 family */ 223 case 0x0020: /* 403 family */
230 maj = PVR_MAJ(pvr) + 1; 224 maj = PVR_MAJ(pvr) + 1;
231 min = PVR_MIN(pvr); 225 min = PVR_MIN(pvr);
232 break; 226 break;
233 case 0x1008: /* 740P/750P ?? */ 227 case 0x1008: /* 740P/750P ?? */
234 maj = ((pvr >> 8) & 0xFF) - 1; 228 maj = ((pvr >> 8) & 0xFF) - 1;
235 min = pvr & 0xFF; 229 min = pvr & 0xFF;
236 break; 230 break;
237 default: 231 default:
238 maj = (pvr >> 8) & 0xFF; 232 maj = (pvr >> 8) & 0xFF;
239 min = pvr & 0xFF; 233 min = pvr & 0xFF;
240 break; 234 break;
241 } 235 }
242 } 236 }
243 237
244 seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n", 238 seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n",
245 maj, min, PVR_VER(pvr), PVR_REV(pvr)); 239 maj, min, PVR_VER(pvr), PVR_REV(pvr));
246 240
247 seq_printf(m, "bogomips\t: %lu.%02lu\n", 241 seq_printf(m, "bogomips\t: %lu.%02lu\n",
248 lpj / (500000/HZ), (lpj / (5000/HZ)) % 100); 242 lpj / (500000/HZ), (lpj / (5000/HZ)) % 100);
249 243
250 #if defined(CONFIG_85xx) || defined(CONFIG_83xx) || defined(CONFIG_MPC10X_BRIDGE) 244 #if defined(CONFIG_85xx) || defined(CONFIG_83xx) || defined(CONFIG_MPC10X_BRIDGE)
251 if (cur_ppc_sys_spec->ppc_sys_name) 245 if (cur_ppc_sys_spec->ppc_sys_name)
252 seq_printf(m, "chipset\t\t: %s\n", 246 seq_printf(m, "chipset\t\t: %s\n",
253 cur_ppc_sys_spec->ppc_sys_name); 247 cur_ppc_sys_spec->ppc_sys_name);
254 #endif 248 #endif
255 249
256 #ifdef CONFIG_SMP 250 #ifdef CONFIG_SMP
257 seq_printf(m, "\n"); 251 seq_printf(m, "\n");
258 #endif 252 #endif
259 253
260 return 0; 254 return 0;
261 } 255 }
262 256
263 static void *c_start(struct seq_file *m, loff_t *pos) 257 static void *c_start(struct seq_file *m, loff_t *pos)
264 { 258 {
265 int i = *pos; 259 int i = *pos;
266 260
267 return i <= NR_CPUS? (void *) (i + 1): NULL; 261 return i <= NR_CPUS? (void *) (i + 1): NULL;
268 } 262 }
269 263
270 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 264 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
271 { 265 {
272 ++*pos; 266 ++*pos;
273 return c_start(m, pos); 267 return c_start(m, pos);
274 } 268 }
275 269
276 static void c_stop(struct seq_file *m, void *v) 270 static void c_stop(struct seq_file *m, void *v)
277 { 271 {
278 } 272 }
279 273
280 struct seq_operations cpuinfo_op = { 274 struct seq_operations cpuinfo_op = {
281 .start =c_start, 275 .start =c_start,
282 .next = c_next, 276 .next = c_next,
283 .stop = c_stop, 277 .stop = c_stop,
284 .show = show_cpuinfo, 278 .show = show_cpuinfo,
285 }; 279 };
286 280
287 /* 281 /*
288 * We're called here very early in the boot. We determine the machine 282 * We're called here very early in the boot. We determine the machine
289 * type and call the appropriate low-level setup functions. 283 * type and call the appropriate low-level setup functions.
290 * -- Cort <cort@fsmlabs.com> 284 * -- Cort <cort@fsmlabs.com>
291 * 285 *
292 * Note that the kernel may be running at an address which is different 286 * Note that the kernel may be running at an address which is different
293 * from the address that it was linked at, so we must use RELOC/PTRRELOC 287 * from the address that it was linked at, so we must use RELOC/PTRRELOC
294 * to access static data (including strings). -- paulus 288 * to access static data (including strings). -- paulus
295 */ 289 */
296 __init 290 __init
297 unsigned long 291 unsigned long
298 early_init(int r3, int r4, int r5) 292 early_init(int r3, int r4, int r5)
299 { 293 {
300 unsigned long phys; 294 unsigned long phys;
301 unsigned long offset = reloc_offset(); 295 unsigned long offset = reloc_offset();
302 296
303 /* Default */ 297 /* Default */
304 phys = offset + KERNELBASE; 298 phys = offset + KERNELBASE;
305 299
306 /* First zero the BSS -- use memset, some arches don't have 300 /* First zero the BSS -- use memset, some arches don't have
307 * caches on yet */ 301 * caches on yet */
308 memset_io(PTRRELOC(&__bss_start), 0, _end - __bss_start); 302 memset_io(PTRRELOC(&__bss_start), 0, _end - __bss_start);
309 303
310 /* 304 /*
311 * Identify the CPU type and fix up code sections 305 * Identify the CPU type and fix up code sections
312 * that depend on which cpu we have. 306 * that depend on which cpu we have.
313 */ 307 */
314 identify_cpu(offset, 0); 308 identify_cpu(offset, 0);
315 do_cpu_ftr_fixups(offset); 309 do_cpu_ftr_fixups(offset);
316 310
317 #if defined(CONFIG_PPC_MULTIPLATFORM) 311 #if defined(CONFIG_PPC_MULTIPLATFORM)
318 reloc_got2(offset); 312 reloc_got2(offset);
319 313
320 /* If we came here from BootX, clear the screen, 314 /* If we came here from BootX, clear the screen,
321 * set up some pointers and return. */ 315 * set up some pointers and return. */
322 if ((r3 == 0x426f6f58) && (r5 == 0)) 316 if ((r3 == 0x426f6f58) && (r5 == 0))
323 bootx_init(r4, phys); 317 bootx_init(r4, phys);
324 318
325 /* 319 /*
326 * don't do anything on prep 320 * don't do anything on prep
327 * for now, don't use bootinfo because it breaks yaboot 0.5 321 * for now, don't use bootinfo because it breaks yaboot 0.5
328 * and assume that if we didn't find a magic number, we have OF 322 * and assume that if we didn't find a magic number, we have OF
329 */ 323 */
330 else if (*(unsigned long *)(0) != 0xdeadc0de) 324 else if (*(unsigned long *)(0) != 0xdeadc0de)
331 phys = prom_init(r3, r4, (prom_entry)r5); 325 phys = prom_init(r3, r4, (prom_entry)r5);
332 326
333 reloc_got2(-offset); 327 reloc_got2(-offset);
334 #endif 328 #endif
335 329
336 return phys; 330 return phys;
337 } 331 }
338 332
339 #ifdef CONFIG_PPC_OF 333 #ifdef CONFIG_PPC_OF
340 /* 334 /*
341 * Assume here that all clock rates are the same in a 335 * Assume here that all clock rates are the same in a
342 * smp system. -- Cort 336 * smp system. -- Cort
343 */ 337 */
344 int __openfirmware 338 int __openfirmware
345 of_show_percpuinfo(struct seq_file *m, int i) 339 of_show_percpuinfo(struct seq_file *m, int i)
346 { 340 {
347 struct device_node *cpu_node; 341 struct device_node *cpu_node;
348 u32 *fp; 342 u32 *fp;
349 int s; 343 int s;
350 344
351 cpu_node = find_type_devices("cpu"); 345 cpu_node = find_type_devices("cpu");
352 if (!cpu_node) 346 if (!cpu_node)
353 return 0; 347 return 0;
354 for (s = 0; s < i && cpu_node->next; s++) 348 for (s = 0; s < i && cpu_node->next; s++)
355 cpu_node = cpu_node->next; 349 cpu_node = cpu_node->next;
356 fp = (u32 *)get_property(cpu_node, "clock-frequency", NULL); 350 fp = (u32 *)get_property(cpu_node, "clock-frequency", NULL);
357 if (fp) 351 if (fp)
358 seq_printf(m, "clock\t\t: %dMHz\n", *fp / 1000000); 352 seq_printf(m, "clock\t\t: %dMHz\n", *fp / 1000000);
359 return 0; 353 return 0;
360 } 354 }
361 355
362 void __init 356 void __init
363 intuit_machine_type(void) 357 intuit_machine_type(void)
364 { 358 {
365 char *model; 359 char *model;
366 struct device_node *root; 360 struct device_node *root;
367 361
368 /* ask the OF info if we're a chrp or pmac */ 362 /* ask the OF info if we're a chrp or pmac */
369 root = find_path_device("/"); 363 root = find_path_device("/");
370 if (root != 0) { 364 if (root != 0) {
371 /* assume pmac unless proven to be chrp -- Cort */ 365 /* assume pmac unless proven to be chrp -- Cort */
372 _machine = _MACH_Pmac; 366 _machine = _MACH_Pmac;
373 model = get_property(root, "device_type", NULL); 367 model = get_property(root, "device_type", NULL);
374 if (model && !strncmp("chrp", model, 4)) 368 if (model && !strncmp("chrp", model, 4))
375 _machine = _MACH_chrp; 369 _machine = _MACH_chrp;
376 else { 370 else {
377 model = get_property(root, "model", NULL); 371 model = get_property(root, "model", NULL);
378 if (model && !strncmp(model, "IBM", 3)) 372 if (model && !strncmp(model, "IBM", 3))
379 _machine = _MACH_chrp; 373 _machine = _MACH_chrp;
380 } 374 }
381 } 375 }
382 } 376 }
383 #endif 377 #endif
384 378
385 #ifdef CONFIG_PPC_MULTIPLATFORM 379 #ifdef CONFIG_PPC_MULTIPLATFORM
386 /* 380 /*
387 * The PPC_MULTIPLATFORM version of platform_init... 381 * The PPC_MULTIPLATFORM version of platform_init...
388 */ 382 */
389 void __init 383 void __init
390 platform_init(unsigned long r3, unsigned long r4, unsigned long r5, 384 platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
391 unsigned long r6, unsigned long r7) 385 unsigned long r6, unsigned long r7)
392 { 386 {
393 #ifdef CONFIG_BOOTX_TEXT 387 #ifdef CONFIG_BOOTX_TEXT
394 if (boot_text_mapped) { 388 if (boot_text_mapped) {
395 btext_clearscreen(); 389 btext_clearscreen();
396 btext_welcome(); 390 btext_welcome();
397 } 391 }
398 #endif 392 #endif
399 393
400 parse_bootinfo(find_bootinfo()); 394 parse_bootinfo(find_bootinfo());
401 395
402 /* if we didn't get any bootinfo telling us what we are... */ 396 /* if we didn't get any bootinfo telling us what we are... */
403 if (_machine == 0) { 397 if (_machine == 0) {
404 /* prep boot loader tells us if we're prep or not */ 398 /* prep boot loader tells us if we're prep or not */
405 if ( *(unsigned long *)(KERNELBASE) == (0xdeadc0de) ) 399 if ( *(unsigned long *)(KERNELBASE) == (0xdeadc0de) )
406 _machine = _MACH_prep; 400 _machine = _MACH_prep;
407 } 401 }
408 402
409 /* not much more to do here, if prep */ 403 /* not much more to do here, if prep */
410 if (_machine == _MACH_prep) { 404 if (_machine == _MACH_prep) {
411 prep_init(r3, r4, r5, r6, r7); 405 prep_init(r3, r4, r5, r6, r7);
412 return; 406 return;
413 } 407 }
414 408
415 /* prom_init has already been called from __start */ 409 /* prom_init has already been called from __start */
416 if (boot_infos) 410 if (boot_infos)
417 relocate_nodes(); 411 relocate_nodes();
418 412
419 /* If we aren't PReP, we can find out if we're Pmac 413 /* If we aren't PReP, we can find out if we're Pmac
420 * or CHRP with this. */ 414 * or CHRP with this. */
421 if (_machine == 0) 415 if (_machine == 0)
422 intuit_machine_type(); 416 intuit_machine_type();
423 417
424 /* finish_device_tree may need _machine defined. */ 418 /* finish_device_tree may need _machine defined. */
425 finish_device_tree(); 419 finish_device_tree();
426 420
427 /* 421 /*
428 * If we were booted via quik, r3 points to the physical 422 * If we were booted via quik, r3 points to the physical
429 * address of the command-line parameters. 423 * address of the command-line parameters.
430 * If we were booted from an xcoff image (i.e. netbooted or 424 * If we were booted from an xcoff image (i.e. netbooted or
431 * booted from floppy), we get the command line from the 425 * booted from floppy), we get the command line from the
432 * bootargs property of the /chosen node. 426 * bootargs property of the /chosen node.
433 * If an initial ramdisk is present, r3 and r4 427 * If an initial ramdisk is present, r3 and r4
434 * are used for initrd_start and initrd_size, 428 * are used for initrd_start and initrd_size,
435 * otherwise they contain 0xdeadbeef. 429 * otherwise they contain 0xdeadbeef.
436 */ 430 */
437 if (r3 >= 0x4000 && r3 < 0x800000 && r4 == 0) { 431 if (r3 >= 0x4000 && r3 < 0x800000 && r4 == 0) {
438 strlcpy(cmd_line, (char *)r3 + KERNELBASE, 432 strlcpy(cmd_line, (char *)r3 + KERNELBASE,
439 sizeof(cmd_line)); 433 sizeof(cmd_line));
440 } else if (boot_infos != 0) { 434 } else if (boot_infos != 0) {
441 /* booted by BootX - check for ramdisk */ 435 /* booted by BootX - check for ramdisk */
442 if (boot_infos->kernelParamsOffset != 0) 436 if (boot_infos->kernelParamsOffset != 0)
443 strlcpy(cmd_line, (char *) boot_infos 437 strlcpy(cmd_line, (char *) boot_infos
444 + boot_infos->kernelParamsOffset, 438 + boot_infos->kernelParamsOffset,
445 sizeof(cmd_line)); 439 sizeof(cmd_line));
446 #ifdef CONFIG_BLK_DEV_INITRD 440 #ifdef CONFIG_BLK_DEV_INITRD
447 if (boot_infos->ramDisk) { 441 if (boot_infos->ramDisk) {
448 initrd_start = (unsigned long) boot_infos 442 initrd_start = (unsigned long) boot_infos
449 + boot_infos->ramDisk; 443 + boot_infos->ramDisk;
450 initrd_end = initrd_start + boot_infos->ramDiskSize; 444 initrd_end = initrd_start + boot_infos->ramDiskSize;
451 initrd_below_start_ok = 1; 445 initrd_below_start_ok = 1;
452 } 446 }
453 #endif 447 #endif
454 } else { 448 } else {
455 struct device_node *chosen; 449 struct device_node *chosen;
456 char *p; 450 char *p;
457 451
458 #ifdef CONFIG_BLK_DEV_INITRD 452 #ifdef CONFIG_BLK_DEV_INITRD
459 if (r3 && r4 && r4 != 0xdeadbeef) { 453 if (r3 && r4 && r4 != 0xdeadbeef) {
460 if (r3 < KERNELBASE) 454 if (r3 < KERNELBASE)
461 r3 += KERNELBASE; 455 r3 += KERNELBASE;
462 initrd_start = r3; 456 initrd_start = r3;
463 initrd_end = r3 + r4; 457 initrd_end = r3 + r4;
464 ROOT_DEV = Root_RAM0; 458 ROOT_DEV = Root_RAM0;
465 initrd_below_start_ok = 1; 459 initrd_below_start_ok = 1;
466 } 460 }
467 #endif 461 #endif
468 chosen = find_devices("chosen"); 462 chosen = find_devices("chosen");
469 if (chosen != NULL) { 463 if (chosen != NULL) {
470 p = get_property(chosen, "bootargs", NULL); 464 p = get_property(chosen, "bootargs", NULL);
471 if (p && *p) { 465 if (p && *p) {
472 strlcpy(cmd_line, p, sizeof(cmd_line)); 466 strlcpy(cmd_line, p, sizeof(cmd_line));
473 } 467 }
474 } 468 }
475 } 469 }
476 #ifdef CONFIG_ADB 470 #ifdef CONFIG_ADB
477 if (strstr(cmd_line, "adb_sync")) { 471 if (strstr(cmd_line, "adb_sync")) {
478 extern int __adb_probe_sync; 472 extern int __adb_probe_sync;
479 __adb_probe_sync = 1; 473 __adb_probe_sync = 1;
480 } 474 }
481 #endif /* CONFIG_ADB */ 475 #endif /* CONFIG_ADB */
482 476
483 switch (_machine) { 477 switch (_machine) {
484 case _MACH_Pmac: 478 case _MACH_Pmac:
485 pmac_init(r3, r4, r5, r6, r7); 479 pmac_init(r3, r4, r5, r6, r7);
486 break; 480 break;
487 case _MACH_chrp: 481 case _MACH_chrp:
488 chrp_init(r3, r4, r5, r6, r7); 482 chrp_init(r3, r4, r5, r6, r7);
489 break; 483 break;
490 } 484 }
491 } 485 }
492 486
493 #ifdef CONFIG_SERIAL_CORE_CONSOLE 487 #ifdef CONFIG_SERIAL_CORE_CONSOLE
494 extern char *of_stdout_device; 488 extern char *of_stdout_device;
495 489
496 static int __init set_preferred_console(void) 490 static int __init set_preferred_console(void)
497 { 491 {
498 struct device_node *prom_stdout; 492 struct device_node *prom_stdout;
499 char *name; 493 char *name;
500 int offset = 0; 494 int offset = 0;
501 495
502 if (of_stdout_device == NULL) 496 if (of_stdout_device == NULL)
503 return -ENODEV; 497 return -ENODEV;
504 498
505 /* The user has requested a console so this is already set up. */ 499 /* The user has requested a console so this is already set up. */
506 if (strstr(saved_command_line, "console=")) 500 if (strstr(saved_command_line, "console="))
507 return -EBUSY; 501 return -EBUSY;
508 502
509 prom_stdout = find_path_device(of_stdout_device); 503 prom_stdout = find_path_device(of_stdout_device);
510 if (!prom_stdout) 504 if (!prom_stdout)
511 return -ENODEV; 505 return -ENODEV;
512 506
513 name = (char *)get_property(prom_stdout, "name", NULL); 507 name = (char *)get_property(prom_stdout, "name", NULL);
514 if (!name) 508 if (!name)
515 return -ENODEV; 509 return -ENODEV;
516 510
517 if (strcmp(name, "serial") == 0) { 511 if (strcmp(name, "serial") == 0) {
518 int i; 512 int i;
519 u32 *reg = (u32 *)get_property(prom_stdout, "reg", &i); 513 u32 *reg = (u32 *)get_property(prom_stdout, "reg", &i);
520 if (i > 8) { 514 if (i > 8) {
521 switch (reg[1]) { 515 switch (reg[1]) {
522 case 0x3f8: 516 case 0x3f8:
523 offset = 0; 517 offset = 0;
524 break; 518 break;
525 case 0x2f8: 519 case 0x2f8:
526 offset = 1; 520 offset = 1;
527 break; 521 break;
528 case 0x898: 522 case 0x898:
529 offset = 2; 523 offset = 2;
530 break; 524 break;
531 case 0x890: 525 case 0x890:
532 offset = 3; 526 offset = 3;
533 break; 527 break;
534 default: 528 default:
535 /* We dont recognise the serial port */ 529 /* We dont recognise the serial port */
536 return -ENODEV; 530 return -ENODEV;
537 } 531 }
538 } 532 }
539 } else if (strcmp(name, "ch-a") == 0) 533 } else if (strcmp(name, "ch-a") == 0)
540 offset = 0; 534 offset = 0;
541 else if (strcmp(name, "ch-b") == 0) 535 else if (strcmp(name, "ch-b") == 0)
542 offset = 1; 536 offset = 1;
543 else 537 else
544 return -ENODEV; 538 return -ENODEV;
545 return add_preferred_console("ttyS", offset, NULL); 539 return add_preferred_console("ttyS", offset, NULL);
546 } 540 }
547 console_initcall(set_preferred_console); 541 console_initcall(set_preferred_console);
548 #endif /* CONFIG_SERIAL_CORE_CONSOLE */ 542 #endif /* CONFIG_SERIAL_CORE_CONSOLE */
549 #endif /* CONFIG_PPC_MULTIPLATFORM */ 543 #endif /* CONFIG_PPC_MULTIPLATFORM */
550 544
551 struct bi_record *find_bootinfo(void) 545 struct bi_record *find_bootinfo(void)
552 { 546 {
553 struct bi_record *rec; 547 struct bi_record *rec;
554 548
555 rec = (struct bi_record *)_ALIGN((ulong)__bss_start+(1<<20)-1,(1<<20)); 549 rec = (struct bi_record *)_ALIGN((ulong)__bss_start+(1<<20)-1,(1<<20));
556 if ( rec->tag != BI_FIRST ) { 550 if ( rec->tag != BI_FIRST ) {
557 /* 551 /*
558 * This 0x10000 offset is a terrible hack but it will go away when 552 * This 0x10000 offset is a terrible hack but it will go away when
559 * we have the bootloader handle all the relocation and 553 * we have the bootloader handle all the relocation and
560 * prom calls -- Cort 554 * prom calls -- Cort
561 */ 555 */
562 rec = (struct bi_record *)_ALIGN((ulong)__bss_start+0x10000+(1<<20)-1,(1<<20)); 556 rec = (struct bi_record *)_ALIGN((ulong)__bss_start+0x10000+(1<<20)-1,(1<<20));
563 if ( rec->tag != BI_FIRST ) 557 if ( rec->tag != BI_FIRST )
564 return NULL; 558 return NULL;
565 } 559 }
566 return rec; 560 return rec;
567 } 561 }
568 562
569 void parse_bootinfo(struct bi_record *rec) 563 void parse_bootinfo(struct bi_record *rec)
570 { 564 {
571 if (rec == NULL || rec->tag != BI_FIRST) 565 if (rec == NULL || rec->tag != BI_FIRST)
572 return; 566 return;
573 while (rec->tag != BI_LAST) { 567 while (rec->tag != BI_LAST) {
574 ulong *data = rec->data; 568 ulong *data = rec->data;
575 switch (rec->tag) { 569 switch (rec->tag) {
576 case BI_CMD_LINE: 570 case BI_CMD_LINE:
577 strlcpy(cmd_line, (void *)data, sizeof(cmd_line)); 571 strlcpy(cmd_line, (void *)data, sizeof(cmd_line));
578 break; 572 break;
579 #ifdef CONFIG_BLK_DEV_INITRD 573 #ifdef CONFIG_BLK_DEV_INITRD
580 case BI_INITRD: 574 case BI_INITRD:
581 initrd_start = data[0] + KERNELBASE; 575 initrd_start = data[0] + KERNELBASE;
582 initrd_end = data[0] + data[1] + KERNELBASE; 576 initrd_end = data[0] + data[1] + KERNELBASE;
583 break; 577 break;
584 #endif /* CONFIG_BLK_DEV_INITRD */ 578 #endif /* CONFIG_BLK_DEV_INITRD */
585 #ifdef CONFIG_PPC_MULTIPLATFORM 579 #ifdef CONFIG_PPC_MULTIPLATFORM
586 case BI_MACHTYPE: 580 case BI_MACHTYPE:
587 _machine = data[0]; 581 _machine = data[0];
588 break; 582 break;
589 #endif 583 #endif
590 case BI_MEMSIZE: 584 case BI_MEMSIZE:
591 boot_mem_size = data[0]; 585 boot_mem_size = data[0];
592 break; 586 break;
593 } 587 }
594 rec = (struct bi_record *)((ulong)rec + rec->size); 588 rec = (struct bi_record *)((ulong)rec + rec->size);
595 } 589 }
596 } 590 }
597 591
598 /* 592 /*
599 * Find out what kind of machine we're on and save any data we need 593 * Find out what kind of machine we're on and save any data we need
600 * from the early boot process (devtree is copied on pmac by prom_init()). 594 * from the early boot process (devtree is copied on pmac by prom_init()).
601 * This is called very early on the boot process, after a minimal 595 * This is called very early on the boot process, after a minimal
602 * MMU environment has been set up but before MMU_init is called. 596 * MMU environment has been set up but before MMU_init is called.
603 */ 597 */
604 void __init 598 void __init
605 machine_init(unsigned long r3, unsigned long r4, unsigned long r5, 599 machine_init(unsigned long r3, unsigned long r4, unsigned long r5,
606 unsigned long r6, unsigned long r7) 600 unsigned long r6, unsigned long r7)
607 { 601 {
608 #ifdef CONFIG_CMDLINE 602 #ifdef CONFIG_CMDLINE
609 strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line)); 603 strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line));
610 #endif /* CONFIG_CMDLINE */ 604 #endif /* CONFIG_CMDLINE */
611 605
612 #ifdef CONFIG_6xx 606 #ifdef CONFIG_6xx
613 ppc_md.power_save = ppc6xx_idle; 607 ppc_md.power_save = ppc6xx_idle;
614 #endif 608 #endif
615 #ifdef CONFIG_POWER4 609 #ifdef CONFIG_POWER4
616 ppc_md.power_save = power4_idle; 610 ppc_md.power_save = power4_idle;
617 #endif 611 #endif
618 612
619 platform_init(r3, r4, r5, r6, r7); 613 platform_init(r3, r4, r5, r6, r7);
620 614
621 if (ppc_md.progress) 615 if (ppc_md.progress)
622 ppc_md.progress("id mach(): done", 0x200); 616 ppc_md.progress("id mach(): done", 0x200);
623 } 617 }
624 618
625 /* Checks "l2cr=xxxx" command-line option */ 619 /* Checks "l2cr=xxxx" command-line option */
626 int __init ppc_setup_l2cr(char *str) 620 int __init ppc_setup_l2cr(char *str)
627 { 621 {
628 if (cpu_has_feature(CPU_FTR_L2CR)) { 622 if (cpu_has_feature(CPU_FTR_L2CR)) {
629 unsigned long val = simple_strtoul(str, NULL, 0); 623 unsigned long val = simple_strtoul(str, NULL, 0);
630 printk(KERN_INFO "l2cr set to %lx\n", val); 624 printk(KERN_INFO "l2cr set to %lx\n", val);
631 _set_L2CR(0); /* force invalidate by disable cache */ 625 _set_L2CR(0); /* force invalidate by disable cache */
632 _set_L2CR(val); /* and enable it */ 626 _set_L2CR(val); /* and enable it */
633 } 627 }
634 return 1; 628 return 1;
635 } 629 }
636 __setup("l2cr=", ppc_setup_l2cr); 630 __setup("l2cr=", ppc_setup_l2cr);
637 631
638 #ifdef CONFIG_GENERIC_NVRAM 632 #ifdef CONFIG_GENERIC_NVRAM
639 633
640 /* Generic nvram hooks used by drivers/char/gen_nvram.c */ 634 /* Generic nvram hooks used by drivers/char/gen_nvram.c */
641 unsigned char nvram_read_byte(int addr) 635 unsigned char nvram_read_byte(int addr)
642 { 636 {
643 if (ppc_md.nvram_read_val) 637 if (ppc_md.nvram_read_val)
644 return ppc_md.nvram_read_val(addr); 638 return ppc_md.nvram_read_val(addr);
645 return 0xff; 639 return 0xff;
646 } 640 }
647 EXPORT_SYMBOL(nvram_read_byte); 641 EXPORT_SYMBOL(nvram_read_byte);
648 642
649 void nvram_write_byte(unsigned char val, int addr) 643 void nvram_write_byte(unsigned char val, int addr)
650 { 644 {
651 if (ppc_md.nvram_write_val) 645 if (ppc_md.nvram_write_val)
652 ppc_md.nvram_write_val(addr, val); 646 ppc_md.nvram_write_val(addr, val);
653 } 647 }
654 EXPORT_SYMBOL(nvram_write_byte); 648 EXPORT_SYMBOL(nvram_write_byte);
655 649
656 void nvram_sync(void) 650 void nvram_sync(void)
657 { 651 {
658 if (ppc_md.nvram_sync) 652 if (ppc_md.nvram_sync)
659 ppc_md.nvram_sync(); 653 ppc_md.nvram_sync();
660 } 654 }
661 EXPORT_SYMBOL(nvram_sync); 655 EXPORT_SYMBOL(nvram_sync);
662 656
663 #endif /* CONFIG_NVRAM */ 657 #endif /* CONFIG_NVRAM */
664 658
665 static struct cpu cpu_devices[NR_CPUS]; 659 static struct cpu cpu_devices[NR_CPUS];
666 660
667 int __init ppc_init(void) 661 int __init ppc_init(void)
668 { 662 {
669 int i; 663 int i;
670 664
671 /* clear the progress line */ 665 /* clear the progress line */
672 if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff); 666 if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
673 667
674 /* register CPU devices */ 668 /* register CPU devices */
675 for (i = 0; i < NR_CPUS; i++) 669 for (i = 0; i < NR_CPUS; i++)
676 if (cpu_possible(i)) 670 if (cpu_possible(i))
677 register_cpu(&cpu_devices[i], i, NULL); 671 register_cpu(&cpu_devices[i], i, NULL);
678 672
679 /* call platform init */ 673 /* call platform init */
680 if (ppc_md.init != NULL) { 674 if (ppc_md.init != NULL) {
681 ppc_md.init(); 675 ppc_md.init();
682 } 676 }
683 return 0; 677 return 0;
684 } 678 }
685 679
686 arch_initcall(ppc_init); 680 arch_initcall(ppc_init);
687 681
688 /* Warning, IO base is not yet inited */ 682 /* Warning, IO base is not yet inited */
689 void __init setup_arch(char **cmdline_p) 683 void __init setup_arch(char **cmdline_p)
690 { 684 {
691 extern char *klimit; 685 extern char *klimit;
692 extern void do_init_bootmem(void); 686 extern void do_init_bootmem(void);
693 687
694 /* so udelay does something sensible, assume <= 1000 bogomips */ 688 /* so udelay does something sensible, assume <= 1000 bogomips */
695 loops_per_jiffy = 500000000 / HZ; 689 loops_per_jiffy = 500000000 / HZ;
696 690
697 #ifdef CONFIG_PPC_MULTIPLATFORM 691 #ifdef CONFIG_PPC_MULTIPLATFORM
698 /* This could be called "early setup arch", it must be done 692 /* This could be called "early setup arch", it must be done
699 * now because xmon need it 693 * now because xmon need it
700 */ 694 */
701 if (_machine == _MACH_Pmac) 695 if (_machine == _MACH_Pmac)
702 pmac_feature_init(); /* New cool way */ 696 pmac_feature_init(); /* New cool way */
703 #endif 697 #endif
704 698
705 #ifdef CONFIG_XMON 699 #ifdef CONFIG_XMON
706 xmon_map_scc(); 700 xmon_map_scc();
707 if (strstr(cmd_line, "xmon")) 701 if (strstr(cmd_line, "xmon"))
708 xmon(NULL); 702 xmon(NULL);
709 #endif /* CONFIG_XMON */ 703 #endif /* CONFIG_XMON */
710 if ( ppc_md.progress ) ppc_md.progress("setup_arch: enter", 0x3eab); 704 if ( ppc_md.progress ) ppc_md.progress("setup_arch: enter", 0x3eab);
711 705
712 #if defined(CONFIG_KGDB) 706 #if defined(CONFIG_KGDB)
713 if (ppc_md.kgdb_map_scc) 707 if (ppc_md.kgdb_map_scc)
714 ppc_md.kgdb_map_scc(); 708 ppc_md.kgdb_map_scc();
715 set_debug_traps(); 709 set_debug_traps();
716 if (strstr(cmd_line, "gdb")) { 710 if (strstr(cmd_line, "gdb")) {
717 if (ppc_md.progress) 711 if (ppc_md.progress)
718 ppc_md.progress("setup_arch: kgdb breakpoint", 0x4000); 712 ppc_md.progress("setup_arch: kgdb breakpoint", 0x4000);
719 printk("kgdb breakpoint activated\n"); 713 printk("kgdb breakpoint activated\n");
720 breakpoint(); 714 breakpoint();
721 } 715 }
722 #endif 716 #endif
723 717
724 /* 718 /*
725 * Set cache line size based on type of cpu as a default. 719 * Set cache line size based on type of cpu as a default.
726 * Systems with OF can look in the properties on the cpu node(s) 720 * Systems with OF can look in the properties on the cpu node(s)
727 * for a possibly more accurate value. 721 * for a possibly more accurate value.
728 */ 722 */
729 if (cpu_has_feature(CPU_FTR_SPLIT_ID_CACHE)) { 723 if (cpu_has_feature(CPU_FTR_SPLIT_ID_CACHE)) {
730 dcache_bsize = cur_cpu_spec[0]->dcache_bsize; 724 dcache_bsize = cur_cpu_spec[0]->dcache_bsize;
731 icache_bsize = cur_cpu_spec[0]->icache_bsize; 725 icache_bsize = cur_cpu_spec[0]->icache_bsize;
732 ucache_bsize = 0; 726 ucache_bsize = 0;
733 } else 727 } else
734 ucache_bsize = dcache_bsize = icache_bsize 728 ucache_bsize = dcache_bsize = icache_bsize
735 = cur_cpu_spec[0]->dcache_bsize; 729 = cur_cpu_spec[0]->dcache_bsize;
736 730
737 /* reboot on panic */ 731 /* reboot on panic */
738 panic_timeout = 180; 732 panic_timeout = 180;
739 733
740 init_mm.start_code = PAGE_OFFSET; 734 init_mm.start_code = PAGE_OFFSET;
741 init_mm.end_code = (unsigned long) _etext; 735 init_mm.end_code = (unsigned long) _etext;
742 init_mm.end_data = (unsigned long) _edata; 736 init_mm.end_data = (unsigned long) _edata;
743 init_mm.brk = (unsigned long) klimit; 737 init_mm.brk = (unsigned long) klimit;
744 738
745 /* Save unparsed command line copy for /proc/cmdline */ 739 /* Save unparsed command line copy for /proc/cmdline */
746 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE); 740 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
747 *cmdline_p = cmd_line; 741 *cmdline_p = cmd_line;
748 742
749 parse_early_param(); 743 parse_early_param();
750 744
751 /* set up the bootmem stuff with available memory */ 745 /* set up the bootmem stuff with available memory */
752 do_init_bootmem(); 746 do_init_bootmem();
753 if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab); 747 if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab);
754 748
755 #ifdef CONFIG_PPC_OCP 749 #ifdef CONFIG_PPC_OCP
756 /* Initialize OCP device list */ 750 /* Initialize OCP device list */
757 ocp_early_init(); 751 ocp_early_init();
758 if ( ppc_md.progress ) ppc_md.progress("ocp: exit", 0x3eab); 752 if ( ppc_md.progress ) ppc_md.progress("ocp: exit", 0x3eab);
759 #endif 753 #endif
760 754
761 #ifdef CONFIG_DUMMY_CONSOLE 755 #ifdef CONFIG_DUMMY_CONSOLE
762 conswitchp = &dummy_con; 756 conswitchp = &dummy_con;
763 #endif 757 #endif
764 758
765 ppc_md.setup_arch(); 759 ppc_md.setup_arch();
766 if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab); 760 if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
767 761
768 paging_init(); 762 paging_init();
769 763
770 /* this is for modules since _machine can be a define -- Cort */ 764 /* this is for modules since _machine can be a define -- Cort */
771 ppc_md.ppc_machine = _machine; 765 ppc_md.ppc_machine = _machine;
772 } 766 }
773 767
arch/ppc64/kernel/setup.c
1 /* 1 /*
2 * 2 *
3 * Common boot and setup code. 3 * Common boot and setup code.
4 * 4 *
5 * Copyright (C) 2001 PPC64 Team, IBM Corp 5 * Copyright (C) 2001 PPC64 Team, IBM Corp
6 * 6 *
7 * This program is free software; you can redistribute it and/or 7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 */ 11 */
12 12
13 #undef DEBUG 13 #undef DEBUG
14 14
15 #include <linux/config.h> 15 #include <linux/config.h>
16 #include <linux/module.h> 16 #include <linux/module.h>
17 #include <linux/string.h> 17 #include <linux/string.h>
18 #include <linux/sched.h> 18 #include <linux/sched.h>
19 #include <linux/init.h> 19 #include <linux/init.h>
20 #include <linux/kernel.h> 20 #include <linux/kernel.h>
21 #include <linux/reboot.h> 21 #include <linux/reboot.h>
22 #include <linux/delay.h> 22 #include <linux/delay.h>
23 #include <linux/initrd.h> 23 #include <linux/initrd.h>
24 #include <linux/ide.h> 24 #include <linux/ide.h>
25 #include <linux/seq_file.h> 25 #include <linux/seq_file.h>
26 #include <linux/ioport.h> 26 #include <linux/ioport.h>
27 #include <linux/console.h> 27 #include <linux/console.h>
28 #include <linux/version.h> 28 #include <linux/version.h>
29 #include <linux/tty.h> 29 #include <linux/tty.h>
30 #include <linux/root_dev.h> 30 #include <linux/root_dev.h>
31 #include <linux/notifier.h> 31 #include <linux/notifier.h>
32 #include <linux/cpu.h> 32 #include <linux/cpu.h>
33 #include <linux/unistd.h> 33 #include <linux/unistd.h>
34 #include <linux/serial.h> 34 #include <linux/serial.h>
35 #include <linux/serial_8250.h> 35 #include <linux/serial_8250.h>
36 #include <asm/io.h> 36 #include <asm/io.h>
37 #include <asm/prom.h> 37 #include <asm/prom.h>
38 #include <asm/processor.h> 38 #include <asm/processor.h>
39 #include <asm/pgtable.h> 39 #include <asm/pgtable.h>
40 #include <asm/bootinfo.h> 40 #include <asm/bootinfo.h>
41 #include <asm/smp.h> 41 #include <asm/smp.h>
42 #include <asm/elf.h> 42 #include <asm/elf.h>
43 #include <asm/machdep.h> 43 #include <asm/machdep.h>
44 #include <asm/paca.h> 44 #include <asm/paca.h>
45 #include <asm/ppcdebug.h> 45 #include <asm/ppcdebug.h>
46 #include <asm/time.h> 46 #include <asm/time.h>
47 #include <asm/cputable.h> 47 #include <asm/cputable.h>
48 #include <asm/sections.h> 48 #include <asm/sections.h>
49 #include <asm/btext.h> 49 #include <asm/btext.h>
50 #include <asm/nvram.h> 50 #include <asm/nvram.h>
51 #include <asm/setup.h> 51 #include <asm/setup.h>
52 #include <asm/system.h> 52 #include <asm/system.h>
53 #include <asm/rtas.h> 53 #include <asm/rtas.h>
54 #include <asm/iommu.h> 54 #include <asm/iommu.h>
55 #include <asm/serial.h> 55 #include <asm/serial.h>
56 #include <asm/cache.h> 56 #include <asm/cache.h>
57 #include <asm/page.h> 57 #include <asm/page.h>
58 #include <asm/mmu.h> 58 #include <asm/mmu.h>
59 #include <asm/lmb.h> 59 #include <asm/lmb.h>
60 #include <asm/iSeries/ItLpNaca.h> 60 #include <asm/iSeries/ItLpNaca.h>
61 61
62 #ifdef DEBUG 62 #ifdef DEBUG
63 #define DBG(fmt...) udbg_printf(fmt) 63 #define DBG(fmt...) udbg_printf(fmt)
64 #else 64 #else
65 #define DBG(fmt...) 65 #define DBG(fmt...)
66 #endif 66 #endif
67 67
68 /* 68 /*
69 * Here are some early debugging facilities. You can enable one 69 * Here are some early debugging facilities. You can enable one
70 * but your kernel will not boot on anything else if you do so 70 * but your kernel will not boot on anything else if you do so
71 */ 71 */
72 72
73 /* This one is for use on LPAR machines that support an HVC console 73 /* This one is for use on LPAR machines that support an HVC console
74 * on vterm 0 74 * on vterm 0
75 */ 75 */
76 extern void udbg_init_debug_lpar(void); 76 extern void udbg_init_debug_lpar(void);
77 /* This one is for use on Apple G5 machines 77 /* This one is for use on Apple G5 machines
78 */ 78 */
79 extern void udbg_init_pmac_realmode(void); 79 extern void udbg_init_pmac_realmode(void);
80 /* That's RTAS panel debug */ 80 /* That's RTAS panel debug */
81 extern void call_rtas_display_status_delay(unsigned char c); 81 extern void call_rtas_display_status_delay(unsigned char c);
82 /* Here's maple real mode debug */ 82 /* Here's maple real mode debug */
83 extern void udbg_init_maple_realmode(void); 83 extern void udbg_init_maple_realmode(void);
84 84
85 #define EARLY_DEBUG_INIT() do {} while(0) 85 #define EARLY_DEBUG_INIT() do {} while(0)
86 86
87 #if 0 87 #if 0
88 #define EARLY_DEBUG_INIT() udbg_init_debug_lpar() 88 #define EARLY_DEBUG_INIT() udbg_init_debug_lpar()
89 #define EARLY_DEBUG_INIT() udbg_init_maple_realmode() 89 #define EARLY_DEBUG_INIT() udbg_init_maple_realmode()
90 #define EARLY_DEBUG_INIT() udbg_init_pmac_realmode() 90 #define EARLY_DEBUG_INIT() udbg_init_pmac_realmode()
91 #define EARLY_DEBUG_INIT() \ 91 #define EARLY_DEBUG_INIT() \
92 do { ppc_md.udbg_putc = call_rtas_display_status_delay; } while(0) 92 do { ppc_md.udbg_putc = call_rtas_display_status_delay; } while(0)
93 #endif 93 #endif
94 94
95 /* extern void *stab; */ 95 /* extern void *stab; */
96 extern unsigned long klimit; 96 extern unsigned long klimit;
97 97
98 extern void mm_init_ppc64(void); 98 extern void mm_init_ppc64(void);
99 extern void stab_initialize(unsigned long stab); 99 extern void stab_initialize(unsigned long stab);
100 extern void htab_initialize(void); 100 extern void htab_initialize(void);
101 extern void early_init_devtree(void *flat_dt); 101 extern void early_init_devtree(void *flat_dt);
102 extern void unflatten_device_tree(void); 102 extern void unflatten_device_tree(void);
103 103
104 extern void smp_release_cpus(void); 104 extern void smp_release_cpus(void);
105 105
106 int have_of = 1; 106 int have_of = 1;
107 int boot_cpuid = 0; 107 int boot_cpuid = 0;
108 int boot_cpuid_phys = 0; 108 int boot_cpuid_phys = 0;
109 dev_t boot_dev; 109 dev_t boot_dev;
110 u64 ppc64_pft_size; 110 u64 ppc64_pft_size;
111 u64 ppc64_debug_switch; 111 u64 ppc64_debug_switch;
112 112
113 struct ppc64_caches ppc64_caches; 113 struct ppc64_caches ppc64_caches;
114 EXPORT_SYMBOL_GPL(ppc64_caches); 114 EXPORT_SYMBOL_GPL(ppc64_caches);
115 115
116 /* 116 /*
117 * These are used in binfmt_elf.c to put aux entries on the stack 117 * These are used in binfmt_elf.c to put aux entries on the stack
118 * for each elf executable being started. 118 * for each elf executable being started.
119 */ 119 */
120 int dcache_bsize; 120 int dcache_bsize;
121 int icache_bsize; 121 int icache_bsize;
122 int ucache_bsize; 122 int ucache_bsize;
123 123
124 /* The main machine-dep calls structure 124 /* The main machine-dep calls structure
125 */ 125 */
126 struct machdep_calls ppc_md; 126 struct machdep_calls ppc_md;
127 EXPORT_SYMBOL(ppc_md); 127 EXPORT_SYMBOL(ppc_md);
128 128
129 #ifdef CONFIG_MAGIC_SYSRQ 129 #ifdef CONFIG_MAGIC_SYSRQ
130 unsigned long SYSRQ_KEY; 130 unsigned long SYSRQ_KEY;
131 #endif /* CONFIG_MAGIC_SYSRQ */ 131 #endif /* CONFIG_MAGIC_SYSRQ */
132 132
133 133
134 static int ppc64_panic_event(struct notifier_block *, unsigned long, void *); 134 static int ppc64_panic_event(struct notifier_block *, unsigned long, void *);
135 static struct notifier_block ppc64_panic_block = { 135 static struct notifier_block ppc64_panic_block = {
136 .notifier_call = ppc64_panic_event, 136 .notifier_call = ppc64_panic_event,
137 .priority = INT_MIN /* may not return; must be done last */ 137 .priority = INT_MIN /* may not return; must be done last */
138 }; 138 };
139 139
140 /* 140 /*
141 * Perhaps we can put the pmac screen_info[] here 141 * Perhaps we can put the pmac screen_info[] here
142 * on pmac as well so we don't need the ifdef's. 142 * on pmac as well so we don't need the ifdef's.
143 * Until we get multiple-console support in here 143 * Until we get multiple-console support in here
144 * that is. -- Cort 144 * that is. -- Cort
145 * Maybe tie it to serial consoles, since this is really what 145 * Maybe tie it to serial consoles, since this is really what
146 * these processors use on existing boards. -- Dan 146 * these processors use on existing boards. -- Dan
147 */ 147 */
148 struct screen_info screen_info = { 148 struct screen_info screen_info = {
149 .orig_x = 0, 149 .orig_x = 0,
150 .orig_y = 25, 150 .orig_y = 25,
151 .orig_video_cols = 80, 151 .orig_video_cols = 80,
152 .orig_video_lines = 25, 152 .orig_video_lines = 25,
153 .orig_video_isVGA = 1, 153 .orig_video_isVGA = 1,
154 .orig_video_points = 16 154 .orig_video_points = 16
155 }; 155 };
156 156
157 /* 157 /*
158 * Initialize the PPCDBG state. Called before relocation has been enabled. 158 * Initialize the PPCDBG state. Called before relocation has been enabled.
159 */ 159 */
160 void __init ppcdbg_initialize(void) 160 void __init ppcdbg_initialize(void)
161 { 161 {
162 ppc64_debug_switch = PPC_DEBUG_DEFAULT; /* | PPCDBG_BUSWALK | */ 162 ppc64_debug_switch = PPC_DEBUG_DEFAULT; /* | PPCDBG_BUSWALK | */
163 /* PPCDBG_PHBINIT | PPCDBG_MM | PPCDBG_MMINIT | PPCDBG_TCEINIT | PPCDBG_TCE */; 163 /* PPCDBG_PHBINIT | PPCDBG_MM | PPCDBG_MMINIT | PPCDBG_TCEINIT | PPCDBG_TCE */;
164 } 164 }
165 165
166 /* 166 /*
167 * Early boot console based on udbg 167 * Early boot console based on udbg
168 */ 168 */
169 static struct console udbg_console = { 169 static struct console udbg_console = {
170 .name = "udbg", 170 .name = "udbg",
171 .write = udbg_console_write, 171 .write = udbg_console_write,
172 .flags = CON_PRINTBUFFER, 172 .flags = CON_PRINTBUFFER,
173 .index = -1, 173 .index = -1,
174 }; 174 };
175 static int early_console_initialized; 175 static int early_console_initialized;
176 176
177 void __init disable_early_printk(void) 177 void __init disable_early_printk(void)
178 { 178 {
179 if (!early_console_initialized) 179 if (!early_console_initialized)
180 return; 180 return;
181 unregister_console(&udbg_console); 181 unregister_console(&udbg_console);
182 early_console_initialized = 0; 182 early_console_initialized = 0;
183 } 183 }
184 184
185 #if defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_SMP) 185 #if defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_SMP)
186 186
187 static int smt_enabled_cmdline; 187 static int smt_enabled_cmdline;
188 188
189 /* Look for ibm,smt-enabled OF option */ 189 /* Look for ibm,smt-enabled OF option */
190 static void check_smt_enabled(void) 190 static void check_smt_enabled(void)
191 { 191 {
192 struct device_node *dn; 192 struct device_node *dn;
193 char *smt_option; 193 char *smt_option;
194 194
195 /* Allow the command line to overrule the OF option */ 195 /* Allow the command line to overrule the OF option */
196 if (smt_enabled_cmdline) 196 if (smt_enabled_cmdline)
197 return; 197 return;
198 198
199 dn = of_find_node_by_path("/options"); 199 dn = of_find_node_by_path("/options");
200 200
201 if (dn) { 201 if (dn) {
202 smt_option = (char *)get_property(dn, "ibm,smt-enabled", NULL); 202 smt_option = (char *)get_property(dn, "ibm,smt-enabled", NULL);
203 203
204 if (smt_option) { 204 if (smt_option) {
205 if (!strcmp(smt_option, "on")) 205 if (!strcmp(smt_option, "on"))
206 smt_enabled_at_boot = 1; 206 smt_enabled_at_boot = 1;
207 else if (!strcmp(smt_option, "off")) 207 else if (!strcmp(smt_option, "off"))
208 smt_enabled_at_boot = 0; 208 smt_enabled_at_boot = 0;
209 } 209 }
210 } 210 }
211 } 211 }
212 212
213 /* Look for smt-enabled= cmdline option */ 213 /* Look for smt-enabled= cmdline option */
214 static int __init early_smt_enabled(char *p) 214 static int __init early_smt_enabled(char *p)
215 { 215 {
216 smt_enabled_cmdline = 1; 216 smt_enabled_cmdline = 1;
217 217
218 if (!p) 218 if (!p)
219 return 0; 219 return 0;
220 220
221 if (!strcmp(p, "on") || !strcmp(p, "1")) 221 if (!strcmp(p, "on") || !strcmp(p, "1"))
222 smt_enabled_at_boot = 1; 222 smt_enabled_at_boot = 1;
223 else if (!strcmp(p, "off") || !strcmp(p, "0")) 223 else if (!strcmp(p, "off") || !strcmp(p, "0"))
224 smt_enabled_at_boot = 0; 224 smt_enabled_at_boot = 0;
225 225
226 return 0; 226 return 0;
227 } 227 }
228 early_param("smt-enabled", early_smt_enabled); 228 early_param("smt-enabled", early_smt_enabled);
229 229
230 /** 230 /**
231 * setup_cpu_maps - initialize the following cpu maps: 231 * setup_cpu_maps - initialize the following cpu maps:
232 * cpu_possible_map 232 * cpu_possible_map
233 * cpu_present_map 233 * cpu_present_map
234 * cpu_sibling_map 234 * cpu_sibling_map
235 * 235 *
236 * Having the possible map set up early allows us to restrict allocations 236 * Having the possible map set up early allows us to restrict allocations
237 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. 237 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
238 * 238 *
239 * We do not initialize the online map here; cpus set their own bits in 239 * We do not initialize the online map here; cpus set their own bits in
240 * cpu_online_map as they come up. 240 * cpu_online_map as they come up.
241 * 241 *
242 * This function is valid only for Open Firmware systems. finish_device_tree 242 * This function is valid only for Open Firmware systems. finish_device_tree
243 * must be called before using this. 243 * must be called before using this.
244 * 244 *
245 * While we're here, we may as well set the "physical" cpu ids in the paca. 245 * While we're here, we may as well set the "physical" cpu ids in the paca.
246 */ 246 */
247 static void __init setup_cpu_maps(void) 247 static void __init setup_cpu_maps(void)
248 { 248 {
249 struct device_node *dn = NULL; 249 struct device_node *dn = NULL;
250 int cpu = 0; 250 int cpu = 0;
251 int swap_cpuid = 0; 251 int swap_cpuid = 0;
252 252
253 check_smt_enabled(); 253 check_smt_enabled();
254 254
255 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) { 255 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
256 u32 *intserv; 256 u32 *intserv;
257 int j, len = sizeof(u32), nthreads; 257 int j, len = sizeof(u32), nthreads;
258 258
259 intserv = (u32 *)get_property(dn, "ibm,ppc-interrupt-server#s", 259 intserv = (u32 *)get_property(dn, "ibm,ppc-interrupt-server#s",
260 &len); 260 &len);
261 if (!intserv) 261 if (!intserv)
262 intserv = (u32 *)get_property(dn, "reg", NULL); 262 intserv = (u32 *)get_property(dn, "reg", NULL);
263 263
264 nthreads = len / sizeof(u32); 264 nthreads = len / sizeof(u32);
265 265
266 for (j = 0; j < nthreads && cpu < NR_CPUS; j++) { 266 for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
267 cpu_set(cpu, cpu_present_map); 267 cpu_set(cpu, cpu_present_map);
268 set_hard_smp_processor_id(cpu, intserv[j]); 268 set_hard_smp_processor_id(cpu, intserv[j]);
269 269
270 if (intserv[j] == boot_cpuid_phys) 270 if (intserv[j] == boot_cpuid_phys)
271 swap_cpuid = cpu; 271 swap_cpuid = cpu;
272 cpu_set(cpu, cpu_possible_map); 272 cpu_set(cpu, cpu_possible_map);
273 cpu++; 273 cpu++;
274 } 274 }
275 } 275 }
276 276
277 /* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that 277 /* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that
278 * boot cpu is logical 0. 278 * boot cpu is logical 0.
279 */ 279 */
280 if (boot_cpuid_phys != get_hard_smp_processor_id(0)) { 280 if (boot_cpuid_phys != get_hard_smp_processor_id(0)) {
281 u32 tmp; 281 u32 tmp;
282 tmp = get_hard_smp_processor_id(0); 282 tmp = get_hard_smp_processor_id(0);
283 set_hard_smp_processor_id(0, boot_cpuid_phys); 283 set_hard_smp_processor_id(0, boot_cpuid_phys);
284 set_hard_smp_processor_id(swap_cpuid, tmp); 284 set_hard_smp_processor_id(swap_cpuid, tmp);
285 } 285 }
286 286
287 /* 287 /*
288 * On pSeries LPAR, we need to know how many cpus 288 * On pSeries LPAR, we need to know how many cpus
289 * could possibly be added to this partition. 289 * could possibly be added to this partition.
290 */ 290 */
291 if (systemcfg->platform == PLATFORM_PSERIES_LPAR && 291 if (systemcfg->platform == PLATFORM_PSERIES_LPAR &&
292 (dn = of_find_node_by_path("/rtas"))) { 292 (dn = of_find_node_by_path("/rtas"))) {
293 int num_addr_cell, num_size_cell, maxcpus; 293 int num_addr_cell, num_size_cell, maxcpus;
294 unsigned int *ireg; 294 unsigned int *ireg;
295 295
296 num_addr_cell = prom_n_addr_cells(dn); 296 num_addr_cell = prom_n_addr_cells(dn);
297 num_size_cell = prom_n_size_cells(dn); 297 num_size_cell = prom_n_size_cells(dn);
298 298
299 ireg = (unsigned int *) 299 ireg = (unsigned int *)
300 get_property(dn, "ibm,lrdr-capacity", NULL); 300 get_property(dn, "ibm,lrdr-capacity", NULL);
301 301
302 if (!ireg) 302 if (!ireg)
303 goto out; 303 goto out;
304 304
305 maxcpus = ireg[num_addr_cell + num_size_cell]; 305 maxcpus = ireg[num_addr_cell + num_size_cell];
306 306
307 /* Double maxcpus for processors which have SMT capability */ 307 /* Double maxcpus for processors which have SMT capability */
308 if (cpu_has_feature(CPU_FTR_SMT)) 308 if (cpu_has_feature(CPU_FTR_SMT))
309 maxcpus *= 2; 309 maxcpus *= 2;
310 310
311 if (maxcpus > NR_CPUS) { 311 if (maxcpus > NR_CPUS) {
312 printk(KERN_WARNING 312 printk(KERN_WARNING
313 "Partition configured for %d cpus, " 313 "Partition configured for %d cpus, "
314 "operating system maximum is %d.\n", 314 "operating system maximum is %d.\n",
315 maxcpus, NR_CPUS); 315 maxcpus, NR_CPUS);
316 maxcpus = NR_CPUS; 316 maxcpus = NR_CPUS;
317 } else 317 } else
318 printk(KERN_INFO "Partition configured for %d cpus.\n", 318 printk(KERN_INFO "Partition configured for %d cpus.\n",
319 maxcpus); 319 maxcpus);
320 320
321 for (cpu = 0; cpu < maxcpus; cpu++) 321 for (cpu = 0; cpu < maxcpus; cpu++)
322 cpu_set(cpu, cpu_possible_map); 322 cpu_set(cpu, cpu_possible_map);
323 out: 323 out:
324 of_node_put(dn); 324 of_node_put(dn);
325 } 325 }
326 326
327 /* 327 /*
328 * Do the sibling map; assume only two threads per processor. 328 * Do the sibling map; assume only two threads per processor.
329 */ 329 */
330 for_each_cpu(cpu) { 330 for_each_cpu(cpu) {
331 cpu_set(cpu, cpu_sibling_map[cpu]); 331 cpu_set(cpu, cpu_sibling_map[cpu]);
332 if (cpu_has_feature(CPU_FTR_SMT)) 332 if (cpu_has_feature(CPU_FTR_SMT))
333 cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]); 333 cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
334 } 334 }
335 335
336 systemcfg->processorCount = num_present_cpus(); 336 systemcfg->processorCount = num_present_cpus();
337 } 337 }
338 #endif /* defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_SMP) */ 338 #endif /* defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_SMP) */
339 339
340 340
341 #ifdef CONFIG_PPC_MULTIPLATFORM 341 #ifdef CONFIG_PPC_MULTIPLATFORM
342 342
343 extern struct machdep_calls pSeries_md; 343 extern struct machdep_calls pSeries_md;
344 extern struct machdep_calls pmac_md; 344 extern struct machdep_calls pmac_md;
345 extern struct machdep_calls maple_md; 345 extern struct machdep_calls maple_md;
346 extern struct machdep_calls bpa_md; 346 extern struct machdep_calls bpa_md;
347 347
348 /* Ultimately, stuff them in an elf section like initcalls... */ 348 /* Ultimately, stuff them in an elf section like initcalls... */
349 static struct machdep_calls __initdata *machines[] = { 349 static struct machdep_calls __initdata *machines[] = {
350 #ifdef CONFIG_PPC_PSERIES 350 #ifdef CONFIG_PPC_PSERIES
351 &pSeries_md, 351 &pSeries_md,
352 #endif /* CONFIG_PPC_PSERIES */ 352 #endif /* CONFIG_PPC_PSERIES */
353 #ifdef CONFIG_PPC_PMAC 353 #ifdef CONFIG_PPC_PMAC
354 &pmac_md, 354 &pmac_md,
355 #endif /* CONFIG_PPC_PMAC */ 355 #endif /* CONFIG_PPC_PMAC */
356 #ifdef CONFIG_PPC_MAPLE 356 #ifdef CONFIG_PPC_MAPLE
357 &maple_md, 357 &maple_md,
358 #endif /* CONFIG_PPC_MAPLE */ 358 #endif /* CONFIG_PPC_MAPLE */
359 #ifdef CONFIG_PPC_BPA 359 #ifdef CONFIG_PPC_BPA
360 &bpa_md, 360 &bpa_md,
361 #endif 361 #endif
362 NULL 362 NULL
363 }; 363 };
364 364
365 /* 365 /*
366 * Early initialization entry point. This is called by head.S 366 * Early initialization entry point. This is called by head.S
367 * with MMU translation disabled. We rely on the "feature" of 367 * with MMU translation disabled. We rely on the "feature" of
368 * the CPU that ignores the top 2 bits of the address in real 368 * the CPU that ignores the top 2 bits of the address in real
369 * mode so we can access kernel globals normally provided we 369 * mode so we can access kernel globals normally provided we
370 * only toy with things in the RMO region. From here, we do 370 * only toy with things in the RMO region. From here, we do
371 * some early parsing of the device-tree to setup out LMB 371 * some early parsing of the device-tree to setup out LMB
372 * data structures, and allocate & initialize the hash table 372 * data structures, and allocate & initialize the hash table
373 * and segment tables so we can start running with translation 373 * and segment tables so we can start running with translation
374 * enabled. 374 * enabled.
375 * 375 *
376 * It is this function which will call the probe() callback of 376 * It is this function which will call the probe() callback of
377 * the various platform types and copy the matching one to the 377 * the various platform types and copy the matching one to the
378 * global ppc_md structure. Your platform can eventually do 378 * global ppc_md structure. Your platform can eventually do
379 * some very early initializations from the probe() routine, but 379 * some very early initializations from the probe() routine, but
380 * this is not recommended, be very careful as, for example, the 380 * this is not recommended, be very careful as, for example, the
381 * device-tree is not accessible via normal means at this point. 381 * device-tree is not accessible via normal means at this point.
382 */ 382 */
383 383
384 void __init early_setup(unsigned long dt_ptr) 384 void __init early_setup(unsigned long dt_ptr)
385 { 385 {
386 struct paca_struct *lpaca = get_paca(); 386 struct paca_struct *lpaca = get_paca();
387 static struct machdep_calls **mach; 387 static struct machdep_calls **mach;
388 388
389 /* 389 /*
390 * Enable early debugging if any specified (see top of 390 * Enable early debugging if any specified (see top of
391 * this file) 391 * this file)
392 */ 392 */
393 EARLY_DEBUG_INIT(); 393 EARLY_DEBUG_INIT();
394 394
395 DBG(" -> early_setup()\n"); 395 DBG(" -> early_setup()\n");
396 396
397 /* 397 /*
398 * Fill the default DBG level (do we want to keep 398 * Fill the default DBG level (do we want to keep
399 * that old mecanism around forever ?) 399 * that old mecanism around forever ?)
400 */ 400 */
401 ppcdbg_initialize(); 401 ppcdbg_initialize();
402 402
403 /* 403 /*
404 * Do early initializations using the flattened device 404 * Do early initializations using the flattened device
405 * tree, like retreiving the physical memory map or 405 * tree, like retreiving the physical memory map or
406 * calculating/retreiving the hash table size 406 * calculating/retreiving the hash table size
407 */ 407 */
408 early_init_devtree(__va(dt_ptr)); 408 early_init_devtree(__va(dt_ptr));
409 409
410 /* 410 /*
411 * Iterate all ppc_md structures until we find the proper 411 * Iterate all ppc_md structures until we find the proper
412 * one for the current machine type 412 * one for the current machine type
413 */ 413 */
414 DBG("Probing machine type for platform %x...\n", 414 DBG("Probing machine type for platform %x...\n",
415 systemcfg->platform); 415 systemcfg->platform);
416 416
417 for (mach = machines; *mach; mach++) { 417 for (mach = machines; *mach; mach++) {
418 if ((*mach)->probe(systemcfg->platform)) 418 if ((*mach)->probe(systemcfg->platform))
419 break; 419 break;
420 } 420 }
421 /* What can we do if we didn't find ? */ 421 /* What can we do if we didn't find ? */
422 if (*mach == NULL) { 422 if (*mach == NULL) {
423 DBG("No suitable machine found !\n"); 423 DBG("No suitable machine found !\n");
424 for (;;); 424 for (;;);
425 } 425 }
426 ppc_md = **mach; 426 ppc_md = **mach;
427 427
428 /* our udbg callbacks got overriden by the above, let's put them 428 /* our udbg callbacks got overriden by the above, let's put them
429 * back in. Ultimately, I want those things to be split from the 429 * back in. Ultimately, I want those things to be split from the
430 * main ppc_md 430 * main ppc_md
431 */ 431 */
432 EARLY_DEBUG_INIT(); 432 EARLY_DEBUG_INIT();
433 433
434 DBG("Found, Initializing memory management...\n"); 434 DBG("Found, Initializing memory management...\n");
435 435
436 /* 436 /*
437 * Initialize stab / SLB management 437 * Initialize stab / SLB management
438 */ 438 */
439 stab_initialize(lpaca->stab_real); 439 stab_initialize(lpaca->stab_real);
440 440
441 /* 441 /*
442 * Initialize the MMU Hash table and create the linear mapping 442 * Initialize the MMU Hash table and create the linear mapping
443 * of memory 443 * of memory
444 */ 444 */
445 htab_initialize(); 445 htab_initialize();
446 446
447 DBG(" <- early_setup()\n"); 447 DBG(" <- early_setup()\n");
448 } 448 }
449 449
450 450
451 /* 451 /*
452 * Initialize some remaining members of the ppc64_caches and systemcfg structures 452 * Initialize some remaining members of the ppc64_caches and systemcfg structures
453 * (at least until we get rid of them completely). This is mostly some 453 * (at least until we get rid of them completely). This is mostly some
454 * cache informations about the CPU that will be used by cache flush 454 * cache informations about the CPU that will be used by cache flush
455 * routines and/or provided to userland 455 * routines and/or provided to userland
456 */ 456 */
457 static void __init initialize_cache_info(void) 457 static void __init initialize_cache_info(void)
458 { 458 {
459 struct device_node *np; 459 struct device_node *np;
460 unsigned long num_cpus = 0; 460 unsigned long num_cpus = 0;
461 461
462 DBG(" -> initialize_cache_info()\n"); 462 DBG(" -> initialize_cache_info()\n");
463 463
464 for (np = NULL; (np = of_find_node_by_type(np, "cpu"));) { 464 for (np = NULL; (np = of_find_node_by_type(np, "cpu"));) {
465 num_cpus += 1; 465 num_cpus += 1;
466 466
467 /* We're assuming *all* of the CPUs have the same 467 /* We're assuming *all* of the CPUs have the same
468 * d-cache and i-cache sizes... -Peter 468 * d-cache and i-cache sizes... -Peter
469 */ 469 */
470 470
471 if ( num_cpus == 1 ) { 471 if ( num_cpus == 1 ) {
472 u32 *sizep, *lsizep; 472 u32 *sizep, *lsizep;
473 u32 size, lsize; 473 u32 size, lsize;
474 const char *dc, *ic; 474 const char *dc, *ic;
475 475
476 /* Then read cache informations */ 476 /* Then read cache informations */
477 if (systemcfg->platform == PLATFORM_POWERMAC) { 477 if (systemcfg->platform == PLATFORM_POWERMAC) {
478 dc = "d-cache-block-size"; 478 dc = "d-cache-block-size";
479 ic = "i-cache-block-size"; 479 ic = "i-cache-block-size";
480 } else { 480 } else {
481 dc = "d-cache-line-size"; 481 dc = "d-cache-line-size";
482 ic = "i-cache-line-size"; 482 ic = "i-cache-line-size";
483 } 483 }
484 484
485 size = 0; 485 size = 0;
486 lsize = cur_cpu_spec->dcache_bsize; 486 lsize = cur_cpu_spec->dcache_bsize;
487 sizep = (u32 *)get_property(np, "d-cache-size", NULL); 487 sizep = (u32 *)get_property(np, "d-cache-size", NULL);
488 if (sizep != NULL) 488 if (sizep != NULL)
489 size = *sizep; 489 size = *sizep;
490 lsizep = (u32 *) get_property(np, dc, NULL); 490 lsizep = (u32 *) get_property(np, dc, NULL);
491 if (lsizep != NULL) 491 if (lsizep != NULL)
492 lsize = *lsizep; 492 lsize = *lsizep;
493 if (sizep == 0 || lsizep == 0) 493 if (sizep == 0 || lsizep == 0)
494 DBG("Argh, can't find dcache properties ! " 494 DBG("Argh, can't find dcache properties ! "
495 "sizep: %p, lsizep: %p\n", sizep, lsizep); 495 "sizep: %p, lsizep: %p\n", sizep, lsizep);
496 496
497 systemcfg->dcache_size = ppc64_caches.dsize = size; 497 systemcfg->dcache_size = ppc64_caches.dsize = size;
498 systemcfg->dcache_line_size = 498 systemcfg->dcache_line_size =
499 ppc64_caches.dline_size = lsize; 499 ppc64_caches.dline_size = lsize;
500 ppc64_caches.log_dline_size = __ilog2(lsize); 500 ppc64_caches.log_dline_size = __ilog2(lsize);
501 ppc64_caches.dlines_per_page = PAGE_SIZE / lsize; 501 ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
502 502
503 size = 0; 503 size = 0;
504 lsize = cur_cpu_spec->icache_bsize; 504 lsize = cur_cpu_spec->icache_bsize;
505 sizep = (u32 *)get_property(np, "i-cache-size", NULL); 505 sizep = (u32 *)get_property(np, "i-cache-size", NULL);
506 if (sizep != NULL) 506 if (sizep != NULL)
507 size = *sizep; 507 size = *sizep;
508 lsizep = (u32 *)get_property(np, ic, NULL); 508 lsizep = (u32 *)get_property(np, ic, NULL);
509 if (lsizep != NULL) 509 if (lsizep != NULL)
510 lsize = *lsizep; 510 lsize = *lsizep;
511 if (sizep == 0 || lsizep == 0) 511 if (sizep == 0 || lsizep == 0)
512 DBG("Argh, can't find icache properties ! " 512 DBG("Argh, can't find icache properties ! "
513 "sizep: %p, lsizep: %p\n", sizep, lsizep); 513 "sizep: %p, lsizep: %p\n", sizep, lsizep);
514 514
515 systemcfg->icache_size = ppc64_caches.isize = size; 515 systemcfg->icache_size = ppc64_caches.isize = size;
516 systemcfg->icache_line_size = 516 systemcfg->icache_line_size =
517 ppc64_caches.iline_size = lsize; 517 ppc64_caches.iline_size = lsize;
518 ppc64_caches.log_iline_size = __ilog2(lsize); 518 ppc64_caches.log_iline_size = __ilog2(lsize);
519 ppc64_caches.ilines_per_page = PAGE_SIZE / lsize; 519 ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
520 } 520 }
521 } 521 }
522 522
523 /* Add an eye catcher and the systemcfg layout version number */ 523 /* Add an eye catcher and the systemcfg layout version number */
524 strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64"); 524 strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
525 systemcfg->version.major = SYSTEMCFG_MAJOR; 525 systemcfg->version.major = SYSTEMCFG_MAJOR;
526 systemcfg->version.minor = SYSTEMCFG_MINOR; 526 systemcfg->version.minor = SYSTEMCFG_MINOR;
527 systemcfg->processor = mfspr(SPRN_PVR); 527 systemcfg->processor = mfspr(SPRN_PVR);
528 528
529 DBG(" <- initialize_cache_info()\n"); 529 DBG(" <- initialize_cache_info()\n");
530 } 530 }
531 531
532 static void __init check_for_initrd(void) 532 static void __init check_for_initrd(void)
533 { 533 {
534 #ifdef CONFIG_BLK_DEV_INITRD 534 #ifdef CONFIG_BLK_DEV_INITRD
535 u64 *prop; 535 u64 *prop;
536 536
537 DBG(" -> check_for_initrd()\n"); 537 DBG(" -> check_for_initrd()\n");
538 538
539 prop = (u64 *)get_property(of_chosen, "linux,initrd-start", NULL); 539 prop = (u64 *)get_property(of_chosen, "linux,initrd-start", NULL);
540 if (prop != NULL) { 540 if (prop != NULL) {
541 initrd_start = (unsigned long)__va(*prop); 541 initrd_start = (unsigned long)__va(*prop);
542 prop = (u64 *)get_property(of_chosen, "linux,initrd-end", NULL); 542 prop = (u64 *)get_property(of_chosen, "linux,initrd-end", NULL);
543 if (prop != NULL) { 543 if (prop != NULL) {
544 initrd_end = (unsigned long)__va(*prop); 544 initrd_end = (unsigned long)__va(*prop);
545 initrd_below_start_ok = 1; 545 initrd_below_start_ok = 1;
546 } else 546 } else
547 initrd_start = 0; 547 initrd_start = 0;
548 } 548 }
549 549
550 /* If we were passed an initrd, set the ROOT_DEV properly if the values 550 /* If we were passed an initrd, set the ROOT_DEV properly if the values
551 * look sensible. If not, clear initrd reference. 551 * look sensible. If not, clear initrd reference.
552 */ 552 */
553 if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE && 553 if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
554 initrd_end > initrd_start) 554 initrd_end > initrd_start)
555 ROOT_DEV = Root_RAM0; 555 ROOT_DEV = Root_RAM0;
556 else 556 else
557 initrd_start = initrd_end = 0; 557 initrd_start = initrd_end = 0;
558 558
559 if (initrd_start) 559 if (initrd_start)
560 printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end); 560 printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
561 561
562 DBG(" <- check_for_initrd()\n"); 562 DBG(" <- check_for_initrd()\n");
563 #endif /* CONFIG_BLK_DEV_INITRD */ 563 #endif /* CONFIG_BLK_DEV_INITRD */
564 } 564 }
565 565
566 #endif /* CONFIG_PPC_MULTIPLATFORM */ 566 #endif /* CONFIG_PPC_MULTIPLATFORM */
567 567
568 /* 568 /*
569 * Do some initial setup of the system. The parameters are those which 569 * Do some initial setup of the system. The parameters are those which
570 * were passed in from the bootloader. 570 * were passed in from the bootloader.
571 */ 571 */
572 void __init setup_system(void) 572 void __init setup_system(void)
573 { 573 {
574 DBG(" -> setup_system()\n"); 574 DBG(" -> setup_system()\n");
575 575
576 #ifdef CONFIG_PPC_ISERIES 576 #ifdef CONFIG_PPC_ISERIES
577 /* pSeries systems are identified in prom.c via OF. */ 577 /* pSeries systems are identified in prom.c via OF. */
578 if (itLpNaca.xLparInstalled == 1) 578 if (itLpNaca.xLparInstalled == 1)
579 systemcfg->platform = PLATFORM_ISERIES_LPAR; 579 systemcfg->platform = PLATFORM_ISERIES_LPAR;
580 580
581 ppc_md.init_early(); 581 ppc_md.init_early();
582 #else /* CONFIG_PPC_ISERIES */ 582 #else /* CONFIG_PPC_ISERIES */
583 583
584 /* 584 /*
585 * Unflatten the device-tree passed by prom_init or kexec 585 * Unflatten the device-tree passed by prom_init or kexec
586 */ 586 */
587 unflatten_device_tree(); 587 unflatten_device_tree();
588 588
589 /* 589 /*
590 * Fill the ppc64_caches & systemcfg structures with informations 590 * Fill the ppc64_caches & systemcfg structures with informations
591 * retreived from the device-tree. Need to be called before 591 * retreived from the device-tree. Need to be called before
592 * finish_device_tree() since the later requires some of the 592 * finish_device_tree() since the later requires some of the
593 * informations filled up here to properly parse the interrupt 593 * informations filled up here to properly parse the interrupt
594 * tree. 594 * tree.
595 * It also sets up the cache line sizes which allows to call 595 * It also sets up the cache line sizes which allows to call
596 * routines like flush_icache_range (used by the hash init 596 * routines like flush_icache_range (used by the hash init
597 * later on). 597 * later on).
598 */ 598 */
599 initialize_cache_info(); 599 initialize_cache_info();
600 600
601 #ifdef CONFIG_PPC_RTAS 601 #ifdef CONFIG_PPC_RTAS
602 /* 602 /*
603 * Initialize RTAS if available 603 * Initialize RTAS if available
604 */ 604 */
605 rtas_initialize(); 605 rtas_initialize();
606 #endif /* CONFIG_PPC_RTAS */ 606 #endif /* CONFIG_PPC_RTAS */
607 607
608 /* 608 /*
609 * Check if we have an initrd provided via the device-tree 609 * Check if we have an initrd provided via the device-tree
610 */ 610 */
611 check_for_initrd(); 611 check_for_initrd();
612 612
613 /* 613 /*
614 * Do some platform specific early initializations, that includes 614 * Do some platform specific early initializations, that includes
615 * setting up the hash table pointers. It also sets up some interrupt-mapping 615 * setting up the hash table pointers. It also sets up some interrupt-mapping
616 * related options that will be used by finish_device_tree() 616 * related options that will be used by finish_device_tree()
617 */ 617 */
618 ppc_md.init_early(); 618 ppc_md.init_early();
619 619
620 /* 620 /*
621 * "Finish" the device-tree, that is do the actual parsing of 621 * "Finish" the device-tree, that is do the actual parsing of
622 * some of the properties like the interrupt map 622 * some of the properties like the interrupt map
623 */ 623 */
624 finish_device_tree(); 624 finish_device_tree();
625 625
626 /* 626 /*
627 * Initialize xmon 627 * Initialize xmon
628 */ 628 */
629 #ifdef CONFIG_XMON_DEFAULT 629 #ifdef CONFIG_XMON_DEFAULT
630 xmon_init(); 630 xmon_init();
631 #endif 631 #endif
632 /* 632 /*
633 * Register early console 633 * Register early console
634 */ 634 */
635 early_console_initialized = 1; 635 early_console_initialized = 1;
636 register_console(&udbg_console); 636 register_console(&udbg_console);
637 637
638 /* Save unparsed command line copy for /proc/cmdline */ 638 /* Save unparsed command line copy for /proc/cmdline */
639 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE); 639 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
640 640
641 parse_early_param(); 641 parse_early_param();
642 #endif /* !CONFIG_PPC_ISERIES */ 642 #endif /* !CONFIG_PPC_ISERIES */
643 643
644 #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES) 644 #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES)
645 /* 645 /*
646 * iSeries has already initialized the cpu maps at this point. 646 * iSeries has already initialized the cpu maps at this point.
647 */ 647 */
648 setup_cpu_maps(); 648 setup_cpu_maps();
649 649
650 /* Release secondary cpus out of their spinloops at 0x60 now that 650 /* Release secondary cpus out of their spinloops at 0x60 now that
651 * we can map physical -> logical CPU ids 651 * we can map physical -> logical CPU ids
652 */ 652 */
653 smp_release_cpus(); 653 smp_release_cpus();
654 #endif /* defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES) */ 654 #endif /* defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES) */
655 655
656 printk("Starting Linux PPC64 %s\n", UTS_RELEASE); 656 printk("Starting Linux PPC64 %s\n", UTS_RELEASE);
657 657
658 printk("-----------------------------------------------------\n"); 658 printk("-----------------------------------------------------\n");
659 printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size); 659 printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size);
660 printk("ppc64_debug_switch = 0x%lx\n", ppc64_debug_switch); 660 printk("ppc64_debug_switch = 0x%lx\n", ppc64_debug_switch);
661 printk("ppc64_interrupt_controller = 0x%ld\n", ppc64_interrupt_controller); 661 printk("ppc64_interrupt_controller = 0x%ld\n", ppc64_interrupt_controller);
662 printk("systemcfg = 0x%p\n", systemcfg); 662 printk("systemcfg = 0x%p\n", systemcfg);
663 printk("systemcfg->platform = 0x%x\n", systemcfg->platform); 663 printk("systemcfg->platform = 0x%x\n", systemcfg->platform);
664 printk("systemcfg->processorCount = 0x%lx\n", systemcfg->processorCount); 664 printk("systemcfg->processorCount = 0x%lx\n", systemcfg->processorCount);
665 printk("systemcfg->physicalMemorySize = 0x%lx\n", systemcfg->physicalMemorySize); 665 printk("systemcfg->physicalMemorySize = 0x%lx\n", systemcfg->physicalMemorySize);
666 printk("ppc64_caches.dcache_line_size = 0x%x\n", 666 printk("ppc64_caches.dcache_line_size = 0x%x\n",
667 ppc64_caches.dline_size); 667 ppc64_caches.dline_size);
668 printk("ppc64_caches.icache_line_size = 0x%x\n", 668 printk("ppc64_caches.icache_line_size = 0x%x\n",
669 ppc64_caches.iline_size); 669 ppc64_caches.iline_size);
670 printk("htab_address = 0x%p\n", htab_address); 670 printk("htab_address = 0x%p\n", htab_address);
671 printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); 671 printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
672 printk("-----------------------------------------------------\n"); 672 printk("-----------------------------------------------------\n");
673 673
674 mm_init_ppc64(); 674 mm_init_ppc64();
675 675
676 DBG(" <- setup_system()\n"); 676 DBG(" <- setup_system()\n");
677 } 677 }
678 678
679 /* also used by kexec */ 679 /* also used by kexec */
680 void machine_shutdown(void) 680 void machine_shutdown(void)
681 { 681 {
682 if (ppc_md.nvram_sync) 682 if (ppc_md.nvram_sync)
683 ppc_md.nvram_sync(); 683 ppc_md.nvram_sync();
684 } 684 }
685 685
686 void machine_restart(char *cmd) 686 void machine_restart(char *cmd)
687 { 687 {
688 machine_shutdown(); 688 machine_shutdown();
689 ppc_md.restart(cmd); 689 ppc_md.restart(cmd);
690 #ifdef CONFIG_SMP 690 #ifdef CONFIG_SMP
691 smp_send_stop(); 691 smp_send_stop();
692 #endif 692 #endif
693 printk(KERN_EMERG "System Halted, OK to turn off power\n"); 693 printk(KERN_EMERG "System Halted, OK to turn off power\n");
694 local_irq_disable(); 694 local_irq_disable();
695 while (1) ; 695 while (1) ;
696 } 696 }
697 EXPORT_SYMBOL(machine_restart);
698 697
699 void machine_power_off(void) 698 void machine_power_off(void)
700 { 699 {
701 machine_shutdown(); 700 machine_shutdown();
702 ppc_md.power_off(); 701 ppc_md.power_off();
703 #ifdef CONFIG_SMP 702 #ifdef CONFIG_SMP
704 smp_send_stop(); 703 smp_send_stop();
705 #endif 704 #endif
706 printk(KERN_EMERG "System Halted, OK to turn off power\n"); 705 printk(KERN_EMERG "System Halted, OK to turn off power\n");
707 local_irq_disable(); 706 local_irq_disable();
708 while (1) ; 707 while (1) ;
709 } 708 }
710 EXPORT_SYMBOL(machine_power_off);
711 709
712 void machine_halt(void) 710 void machine_halt(void)
713 { 711 {
714 machine_shutdown(); 712 machine_shutdown();
715 ppc_md.halt(); 713 ppc_md.halt();
716 #ifdef CONFIG_SMP 714 #ifdef CONFIG_SMP
717 smp_send_stop(); 715 smp_send_stop();
718 #endif 716 #endif
719 printk(KERN_EMERG "System Halted, OK to turn off power\n"); 717 printk(KERN_EMERG "System Halted, OK to turn off power\n");
720 local_irq_disable(); 718 local_irq_disable();
721 while (1) ; 719 while (1) ;
722 } 720 }
723 EXPORT_SYMBOL(machine_halt);
724 721
725 static int ppc64_panic_event(struct notifier_block *this, 722 static int ppc64_panic_event(struct notifier_block *this,
726 unsigned long event, void *ptr) 723 unsigned long event, void *ptr)
727 { 724 {
728 ppc_md.panic((char *)ptr); /* May not return */ 725 ppc_md.panic((char *)ptr); /* May not return */
729 return NOTIFY_DONE; 726 return NOTIFY_DONE;
730 } 727 }
731 728
732 729
733 #ifdef CONFIG_SMP 730 #ifdef CONFIG_SMP
734 DEFINE_PER_CPU(unsigned int, pvr); 731 DEFINE_PER_CPU(unsigned int, pvr);
735 #endif 732 #endif
736 733
737 static int show_cpuinfo(struct seq_file *m, void *v) 734 static int show_cpuinfo(struct seq_file *m, void *v)
738 { 735 {
739 unsigned long cpu_id = (unsigned long)v - 1; 736 unsigned long cpu_id = (unsigned long)v - 1;
740 unsigned int pvr; 737 unsigned int pvr;
741 unsigned short maj; 738 unsigned short maj;
742 unsigned short min; 739 unsigned short min;
743 740
744 if (cpu_id == NR_CPUS) { 741 if (cpu_id == NR_CPUS) {
745 seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq); 742 seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
746 743
747 if (ppc_md.get_cpuinfo != NULL) 744 if (ppc_md.get_cpuinfo != NULL)
748 ppc_md.get_cpuinfo(m); 745 ppc_md.get_cpuinfo(m);
749 746
750 return 0; 747 return 0;
751 } 748 }
752 749
753 /* We only show online cpus: disable preempt (overzealous, I 750 /* We only show online cpus: disable preempt (overzealous, I
754 * knew) to prevent cpu going down. */ 751 * knew) to prevent cpu going down. */
755 preempt_disable(); 752 preempt_disable();
756 if (!cpu_online(cpu_id)) { 753 if (!cpu_online(cpu_id)) {
757 preempt_enable(); 754 preempt_enable();
758 return 0; 755 return 0;
759 } 756 }
760 757
761 #ifdef CONFIG_SMP 758 #ifdef CONFIG_SMP
762 pvr = per_cpu(pvr, cpu_id); 759 pvr = per_cpu(pvr, cpu_id);
763 #else 760 #else
764 pvr = mfspr(SPRN_PVR); 761 pvr = mfspr(SPRN_PVR);
765 #endif 762 #endif
766 maj = (pvr >> 8) & 0xFF; 763 maj = (pvr >> 8) & 0xFF;
767 min = pvr & 0xFF; 764 min = pvr & 0xFF;
768 765
769 seq_printf(m, "processor\t: %lu\n", cpu_id); 766 seq_printf(m, "processor\t: %lu\n", cpu_id);
770 seq_printf(m, "cpu\t\t: "); 767 seq_printf(m, "cpu\t\t: ");
771 768
772 if (cur_cpu_spec->pvr_mask) 769 if (cur_cpu_spec->pvr_mask)
773 seq_printf(m, "%s", cur_cpu_spec->cpu_name); 770 seq_printf(m, "%s", cur_cpu_spec->cpu_name);
774 else 771 else
775 seq_printf(m, "unknown (%08x)", pvr); 772 seq_printf(m, "unknown (%08x)", pvr);
776 773
777 #ifdef CONFIG_ALTIVEC 774 #ifdef CONFIG_ALTIVEC
778 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 775 if (cpu_has_feature(CPU_FTR_ALTIVEC))
779 seq_printf(m, ", altivec supported"); 776 seq_printf(m, ", altivec supported");
780 #endif /* CONFIG_ALTIVEC */ 777 #endif /* CONFIG_ALTIVEC */
781 778
782 seq_printf(m, "\n"); 779 seq_printf(m, "\n");
783 780
784 /* 781 /*
785 * Assume here that all clock rates are the same in a 782 * Assume here that all clock rates are the same in a
786 * smp system. -- Cort 783 * smp system. -- Cort
787 */ 784 */
788 seq_printf(m, "clock\t\t: %lu.%06luMHz\n", ppc_proc_freq / 1000000, 785 seq_printf(m, "clock\t\t: %lu.%06luMHz\n", ppc_proc_freq / 1000000,
789 ppc_proc_freq % 1000000); 786 ppc_proc_freq % 1000000);
790 787
791 seq_printf(m, "revision\t: %hd.%hd\n\n", maj, min); 788 seq_printf(m, "revision\t: %hd.%hd\n\n", maj, min);
792 789
793 preempt_enable(); 790 preempt_enable();
794 return 0; 791 return 0;
795 } 792 }
796 793
797 static void *c_start(struct seq_file *m, loff_t *pos) 794 static void *c_start(struct seq_file *m, loff_t *pos)
798 { 795 {
799 return *pos <= NR_CPUS ? (void *)((*pos)+1) : NULL; 796 return *pos <= NR_CPUS ? (void *)((*pos)+1) : NULL;
800 } 797 }
801 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 798 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
802 { 799 {
803 ++*pos; 800 ++*pos;
804 return c_start(m, pos); 801 return c_start(m, pos);
805 } 802 }
806 static void c_stop(struct seq_file *m, void *v) 803 static void c_stop(struct seq_file *m, void *v)
807 { 804 {
808 } 805 }
809 struct seq_operations cpuinfo_op = { 806 struct seq_operations cpuinfo_op = {
810 .start =c_start, 807 .start =c_start,
811 .next = c_next, 808 .next = c_next,
812 .stop = c_stop, 809 .stop = c_stop,
813 .show = show_cpuinfo, 810 .show = show_cpuinfo,
814 }; 811 };
815 812
816 /* 813 /*
817 * These three variables are used to save values passed to us by prom_init() 814 * These three variables are used to save values passed to us by prom_init()
818 * via the device tree. The TCE variables are needed because with a memory_limit 815 * via the device tree. The TCE variables are needed because with a memory_limit
819 * in force we may need to explicitly map the TCE are at the top of RAM. 816 * in force we may need to explicitly map the TCE are at the top of RAM.
820 */ 817 */
821 unsigned long memory_limit; 818 unsigned long memory_limit;
822 unsigned long tce_alloc_start; 819 unsigned long tce_alloc_start;
823 unsigned long tce_alloc_end; 820 unsigned long tce_alloc_end;
824 821
825 #ifdef CONFIG_PPC_ISERIES 822 #ifdef CONFIG_PPC_ISERIES
826 /* 823 /*
827 * On iSeries we just parse the mem=X option from the command line. 824 * On iSeries we just parse the mem=X option from the command line.
828 * On pSeries it's a bit more complicated, see prom_init_mem() 825 * On pSeries it's a bit more complicated, see prom_init_mem()
829 */ 826 */
830 static int __init early_parsemem(char *p) 827 static int __init early_parsemem(char *p)
831 { 828 {
832 if (!p) 829 if (!p)
833 return 0; 830 return 0;
834 831
835 memory_limit = ALIGN(memparse(p, &p), PAGE_SIZE); 832 memory_limit = ALIGN(memparse(p, &p), PAGE_SIZE);
836 833
837 return 0; 834 return 0;
838 } 835 }
839 early_param("mem", early_parsemem); 836 early_param("mem", early_parsemem);
840 #endif /* CONFIG_PPC_ISERIES */ 837 #endif /* CONFIG_PPC_ISERIES */
841 838
842 #ifdef CONFIG_PPC_MULTIPLATFORM 839 #ifdef CONFIG_PPC_MULTIPLATFORM
843 static int __init set_preferred_console(void) 840 static int __init set_preferred_console(void)
844 { 841 {
845 struct device_node *prom_stdout = NULL; 842 struct device_node *prom_stdout = NULL;
846 char *name; 843 char *name;
847 u32 *spd; 844 u32 *spd;
848 int offset = 0; 845 int offset = 0;
849 846
850 DBG(" -> set_preferred_console()\n"); 847 DBG(" -> set_preferred_console()\n");
851 848
852 /* The user has requested a console so this is already set up. */ 849 /* The user has requested a console so this is already set up. */
853 if (strstr(saved_command_line, "console=")) { 850 if (strstr(saved_command_line, "console=")) {
854 DBG(" console was specified !\n"); 851 DBG(" console was specified !\n");
855 return -EBUSY; 852 return -EBUSY;
856 } 853 }
857 854
858 if (!of_chosen) { 855 if (!of_chosen) {
859 DBG(" of_chosen is NULL !\n"); 856 DBG(" of_chosen is NULL !\n");
860 return -ENODEV; 857 return -ENODEV;
861 } 858 }
862 /* We are getting a weird phandle from OF ... */ 859 /* We are getting a weird phandle from OF ... */
863 /* ... So use the full path instead */ 860 /* ... So use the full path instead */
864 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL); 861 name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
865 if (name == NULL) { 862 if (name == NULL) {
866 DBG(" no linux,stdout-path !\n"); 863 DBG(" no linux,stdout-path !\n");
867 return -ENODEV; 864 return -ENODEV;
868 } 865 }
869 prom_stdout = of_find_node_by_path(name); 866 prom_stdout = of_find_node_by_path(name);
870 if (!prom_stdout) { 867 if (!prom_stdout) {
871 DBG(" can't find stdout package %s !\n", name); 868 DBG(" can't find stdout package %s !\n", name);
872 return -ENODEV; 869 return -ENODEV;
873 } 870 }
874 DBG("stdout is %s\n", prom_stdout->full_name); 871 DBG("stdout is %s\n", prom_stdout->full_name);
875 872
876 name = (char *)get_property(prom_stdout, "name", NULL); 873 name = (char *)get_property(prom_stdout, "name", NULL);
877 if (!name) { 874 if (!name) {
878 DBG(" stdout package has no name !\n"); 875 DBG(" stdout package has no name !\n");
879 goto not_found; 876 goto not_found;
880 } 877 }
881 spd = (u32 *)get_property(prom_stdout, "current-speed", NULL); 878 spd = (u32 *)get_property(prom_stdout, "current-speed", NULL);
882 879
883 if (0) 880 if (0)
884 ; 881 ;
885 #ifdef CONFIG_SERIAL_8250_CONSOLE 882 #ifdef CONFIG_SERIAL_8250_CONSOLE
886 else if (strcmp(name, "serial") == 0) { 883 else if (strcmp(name, "serial") == 0) {
887 int i; 884 int i;
888 u32 *reg = (u32 *)get_property(prom_stdout, "reg", &i); 885 u32 *reg = (u32 *)get_property(prom_stdout, "reg", &i);
889 if (i > 8) { 886 if (i > 8) {
890 switch (reg[1]) { 887 switch (reg[1]) {
891 case 0x3f8: 888 case 0x3f8:
892 offset = 0; 889 offset = 0;
893 break; 890 break;
894 case 0x2f8: 891 case 0x2f8:
895 offset = 1; 892 offset = 1;
896 break; 893 break;
897 case 0x898: 894 case 0x898:
898 offset = 2; 895 offset = 2;
899 break; 896 break;
900 case 0x890: 897 case 0x890:
901 offset = 3; 898 offset = 3;
902 break; 899 break;
903 default: 900 default:
904 /* We dont recognise the serial port */ 901 /* We dont recognise the serial port */
905 goto not_found; 902 goto not_found;
906 } 903 }
907 } 904 }
908 } 905 }
909 #endif /* CONFIG_SERIAL_8250_CONSOLE */ 906 #endif /* CONFIG_SERIAL_8250_CONSOLE */
910 #ifdef CONFIG_PPC_PSERIES 907 #ifdef CONFIG_PPC_PSERIES
911 else if (strcmp(name, "vty") == 0) { 908 else if (strcmp(name, "vty") == 0) {
912 u32 *reg = (u32 *)get_property(prom_stdout, "reg", NULL); 909 u32 *reg = (u32 *)get_property(prom_stdout, "reg", NULL);
913 char *compat = (char *)get_property(prom_stdout, "compatible", NULL); 910 char *compat = (char *)get_property(prom_stdout, "compatible", NULL);
914 911
915 if (reg && compat && (strcmp(compat, "hvterm-protocol") == 0)) { 912 if (reg && compat && (strcmp(compat, "hvterm-protocol") == 0)) {
916 /* Host Virtual Serial Interface */ 913 /* Host Virtual Serial Interface */
917 int offset; 914 int offset;
918 switch (reg[0]) { 915 switch (reg[0]) {
919 case 0x30000000: 916 case 0x30000000:
920 offset = 0; 917 offset = 0;
921 break; 918 break;
922 case 0x30000001: 919 case 0x30000001:
923 offset = 1; 920 offset = 1;
924 break; 921 break;
925 default: 922 default:
926 goto not_found; 923 goto not_found;
927 } 924 }
928 of_node_put(prom_stdout); 925 of_node_put(prom_stdout);
929 DBG("Found hvsi console at offset %d\n", offset); 926 DBG("Found hvsi console at offset %d\n", offset);
930 return add_preferred_console("hvsi", offset, NULL); 927 return add_preferred_console("hvsi", offset, NULL);
931 } else { 928 } else {
932 /* pSeries LPAR virtual console */ 929 /* pSeries LPAR virtual console */
933 of_node_put(prom_stdout); 930 of_node_put(prom_stdout);
934 DBG("Found hvc console\n"); 931 DBG("Found hvc console\n");
935 return add_preferred_console("hvc", 0, NULL); 932 return add_preferred_console("hvc", 0, NULL);
936 } 933 }
937 } 934 }
938 #endif /* CONFIG_PPC_PSERIES */ 935 #endif /* CONFIG_PPC_PSERIES */
939 #ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE 936 #ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
940 else if (strcmp(name, "ch-a") == 0) 937 else if (strcmp(name, "ch-a") == 0)
941 offset = 0; 938 offset = 0;
942 else if (strcmp(name, "ch-b") == 0) 939 else if (strcmp(name, "ch-b") == 0)
943 offset = 1; 940 offset = 1;
944 #endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */ 941 #endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
945 else 942 else
946 goto not_found; 943 goto not_found;
947 of_node_put(prom_stdout); 944 of_node_put(prom_stdout);
948 945
949 DBG("Found serial console at ttyS%d\n", offset); 946 DBG("Found serial console at ttyS%d\n", offset);
950 947
951 if (spd) { 948 if (spd) {
952 static char __initdata opt[16]; 949 static char __initdata opt[16];
953 sprintf(opt, "%d", *spd); 950 sprintf(opt, "%d", *spd);
954 return add_preferred_console("ttyS", offset, opt); 951 return add_preferred_console("ttyS", offset, opt);
955 } else 952 } else
956 return add_preferred_console("ttyS", offset, NULL); 953 return add_preferred_console("ttyS", offset, NULL);
957 954
958 not_found: 955 not_found:
959 DBG("No preferred console found !\n"); 956 DBG("No preferred console found !\n");
960 of_node_put(prom_stdout); 957 of_node_put(prom_stdout);
961 return -ENODEV; 958 return -ENODEV;
962 } 959 }
963 console_initcall(set_preferred_console); 960 console_initcall(set_preferred_console);
964 #endif /* CONFIG_PPC_MULTIPLATFORM */ 961 #endif /* CONFIG_PPC_MULTIPLATFORM */
965 962
966 #ifdef CONFIG_IRQSTACKS 963 #ifdef CONFIG_IRQSTACKS
967 static void __init irqstack_early_init(void) 964 static void __init irqstack_early_init(void)
968 { 965 {
969 unsigned int i; 966 unsigned int i;
970 967
971 /* 968 /*
972 * interrupt stacks must be under 256MB, we cannot afford to take 969 * interrupt stacks must be under 256MB, we cannot afford to take
973 * SLB misses on them. 970 * SLB misses on them.
974 */ 971 */
975 for_each_cpu(i) { 972 for_each_cpu(i) {
976 softirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE, 973 softirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE,
977 THREAD_SIZE, 0x10000000)); 974 THREAD_SIZE, 0x10000000));
978 hardirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE, 975 hardirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE,
979 THREAD_SIZE, 0x10000000)); 976 THREAD_SIZE, 0x10000000));
980 } 977 }
981 } 978 }
982 #else 979 #else
983 #define irqstack_early_init() 980 #define irqstack_early_init()
984 #endif 981 #endif
985 982
986 /* 983 /*
987 * Stack space used when we detect a bad kernel stack pointer, and 984 * Stack space used when we detect a bad kernel stack pointer, and
988 * early in SMP boots before relocation is enabled. 985 * early in SMP boots before relocation is enabled.
989 */ 986 */
990 static void __init emergency_stack_init(void) 987 static void __init emergency_stack_init(void)
991 { 988 {
992 unsigned long limit; 989 unsigned long limit;
993 unsigned int i; 990 unsigned int i;
994 991
995 /* 992 /*
996 * Emergency stacks must be under 256MB, we cannot afford to take 993 * Emergency stacks must be under 256MB, we cannot afford to take
997 * SLB misses on them. The ABI also requires them to be 128-byte 994 * SLB misses on them. The ABI also requires them to be 128-byte
998 * aligned. 995 * aligned.
999 * 996 *
1000 * Since we use these as temporary stacks during secondary CPU 997 * Since we use these as temporary stacks during secondary CPU
1001 * bringup, we need to get at them in real mode. This means they 998 * bringup, we need to get at them in real mode. This means they
1002 * must also be within the RMO region. 999 * must also be within the RMO region.
1003 */ 1000 */
1004 limit = min(0x10000000UL, lmb.rmo_size); 1001 limit = min(0x10000000UL, lmb.rmo_size);
1005 1002
1006 for_each_cpu(i) 1003 for_each_cpu(i)
1007 paca[i].emergency_sp = __va(lmb_alloc_base(PAGE_SIZE, 128, 1004 paca[i].emergency_sp = __va(lmb_alloc_base(PAGE_SIZE, 128,
1008 limit)) + PAGE_SIZE; 1005 limit)) + PAGE_SIZE;
1009 } 1006 }
1010 1007
1011 /* 1008 /*
1012 * Called from setup_arch to initialize the bitmap of available 1009 * Called from setup_arch to initialize the bitmap of available
1013 * syscalls in the systemcfg page 1010 * syscalls in the systemcfg page
1014 */ 1011 */
1015 void __init setup_syscall_map(void) 1012 void __init setup_syscall_map(void)
1016 { 1013 {
1017 unsigned int i, count64 = 0, count32 = 0; 1014 unsigned int i, count64 = 0, count32 = 0;
1018 extern unsigned long *sys_call_table; 1015 extern unsigned long *sys_call_table;
1019 extern unsigned long *sys_call_table32; 1016 extern unsigned long *sys_call_table32;
1020 extern unsigned long sys_ni_syscall; 1017 extern unsigned long sys_ni_syscall;
1021 1018
1022 1019
1023 for (i = 0; i < __NR_syscalls; i++) { 1020 for (i = 0; i < __NR_syscalls; i++) {
1024 if (sys_call_table[i] == sys_ni_syscall) 1021 if (sys_call_table[i] == sys_ni_syscall)
1025 continue; 1022 continue;
1026 count64++; 1023 count64++;
1027 systemcfg->syscall_map_64[i >> 5] |= 0x80000000UL >> (i & 0x1f); 1024 systemcfg->syscall_map_64[i >> 5] |= 0x80000000UL >> (i & 0x1f);
1028 } 1025 }
1029 for (i = 0; i < __NR_syscalls; i++) { 1026 for (i = 0; i < __NR_syscalls; i++) {
1030 if (sys_call_table32[i] == sys_ni_syscall) 1027 if (sys_call_table32[i] == sys_ni_syscall)
1031 continue; 1028 continue;
1032 count32++; 1029 count32++;
1033 systemcfg->syscall_map_32[i >> 5] |= 0x80000000UL >> (i & 0x1f); 1030 systemcfg->syscall_map_32[i >> 5] |= 0x80000000UL >> (i & 0x1f);
1034 } 1031 }
1035 printk(KERN_INFO "Syscall map setup, %d 32 bits and %d 64 bits syscalls\n", 1032 printk(KERN_INFO "Syscall map setup, %d 32 bits and %d 64 bits syscalls\n",
1036 count32, count64); 1033 count32, count64);
1037 } 1034 }
1038 1035
1039 /* 1036 /*
1040 * Called into from start_kernel, after lock_kernel has been called. 1037 * Called into from start_kernel, after lock_kernel has been called.
1041 * Initializes bootmem, which is unsed to manage page allocation until 1038 * Initializes bootmem, which is unsed to manage page allocation until
1042 * mem_init is called. 1039 * mem_init is called.
1043 */ 1040 */
1044 void __init setup_arch(char **cmdline_p) 1041 void __init setup_arch(char **cmdline_p)
1045 { 1042 {
1046 extern void do_init_bootmem(void); 1043 extern void do_init_bootmem(void);
1047 1044
1048 ppc64_boot_msg(0x12, "Setup Arch"); 1045 ppc64_boot_msg(0x12, "Setup Arch");
1049 1046
1050 *cmdline_p = cmd_line; 1047 *cmdline_p = cmd_line;
1051 1048
1052 /* 1049 /*
1053 * Set cache line size based on type of cpu as a default. 1050 * Set cache line size based on type of cpu as a default.
1054 * Systems with OF can look in the properties on the cpu node(s) 1051 * Systems with OF can look in the properties on the cpu node(s)
1055 * for a possibly more accurate value. 1052 * for a possibly more accurate value.
1056 */ 1053 */
1057 dcache_bsize = ppc64_caches.dline_size; 1054 dcache_bsize = ppc64_caches.dline_size;
1058 icache_bsize = ppc64_caches.iline_size; 1055 icache_bsize = ppc64_caches.iline_size;
1059 1056
1060 /* reboot on panic */ 1057 /* reboot on panic */
1061 panic_timeout = 180; 1058 panic_timeout = 180;
1062 1059
1063 if (ppc_md.panic) 1060 if (ppc_md.panic)
1064 notifier_chain_register(&panic_notifier_list, &ppc64_panic_block); 1061 notifier_chain_register(&panic_notifier_list, &ppc64_panic_block);
1065 1062
1066 init_mm.start_code = PAGE_OFFSET; 1063 init_mm.start_code = PAGE_OFFSET;
1067 init_mm.end_code = (unsigned long) _etext; 1064 init_mm.end_code = (unsigned long) _etext;
1068 init_mm.end_data = (unsigned long) _edata; 1065 init_mm.end_data = (unsigned long) _edata;
1069 init_mm.brk = klimit; 1066 init_mm.brk = klimit;
1070 1067
1071 irqstack_early_init(); 1068 irqstack_early_init();
1072 emergency_stack_init(); 1069 emergency_stack_init();
1073 1070
1074 /* set up the bootmem stuff with available memory */ 1071 /* set up the bootmem stuff with available memory */
1075 do_init_bootmem(); 1072 do_init_bootmem();
1076 sparse_init(); 1073 sparse_init();
1077 1074
1078 /* initialize the syscall map in systemcfg */ 1075 /* initialize the syscall map in systemcfg */
1079 setup_syscall_map(); 1076 setup_syscall_map();
1080 1077
1081 ppc_md.setup_arch(); 1078 ppc_md.setup_arch();
1082 1079
1083 /* Use the default idle loop if the platform hasn't provided one. */ 1080 /* Use the default idle loop if the platform hasn't provided one. */
1084 if (NULL == ppc_md.idle_loop) { 1081 if (NULL == ppc_md.idle_loop) {
1085 ppc_md.idle_loop = default_idle; 1082 ppc_md.idle_loop = default_idle;
1086 printk(KERN_INFO "Using default idle loop\n"); 1083 printk(KERN_INFO "Using default idle loop\n");
1087 } 1084 }
1088 1085
1089 paging_init(); 1086 paging_init();
1090 ppc64_boot_msg(0x15, "Setup Done"); 1087 ppc64_boot_msg(0x15, "Setup Done");
1091 } 1088 }
1092 1089
1093 1090
1094 /* ToDo: do something useful if ppc_md is not yet setup. */ 1091 /* ToDo: do something useful if ppc_md is not yet setup. */
1095 #define PPC64_LINUX_FUNCTION 0x0f000000 1092 #define PPC64_LINUX_FUNCTION 0x0f000000
1096 #define PPC64_IPL_MESSAGE 0xc0000000 1093 #define PPC64_IPL_MESSAGE 0xc0000000
1097 #define PPC64_TERM_MESSAGE 0xb0000000 1094 #define PPC64_TERM_MESSAGE 0xb0000000
1098 #define PPC64_ATTN_MESSAGE 0xa0000000 1095 #define PPC64_ATTN_MESSAGE 0xa0000000
1099 #define PPC64_DUMP_MESSAGE 0xd0000000 1096 #define PPC64_DUMP_MESSAGE 0xd0000000
1100 1097
1101 static void ppc64_do_msg(unsigned int src, const char *msg) 1098 static void ppc64_do_msg(unsigned int src, const char *msg)
1102 { 1099 {
1103 if (ppc_md.progress) { 1100 if (ppc_md.progress) {
1104 char buf[128]; 1101 char buf[128];
1105 1102
1106 sprintf(buf, "%08X\n", src); 1103 sprintf(buf, "%08X\n", src);
1107 ppc_md.progress(buf, 0); 1104 ppc_md.progress(buf, 0);
1108 snprintf(buf, 128, "%s", msg); 1105 snprintf(buf, 128, "%s", msg);
1109 ppc_md.progress(buf, 0); 1106 ppc_md.progress(buf, 0);
1110 } 1107 }
1111 } 1108 }
1112 1109
1113 /* Print a boot progress message. */ 1110 /* Print a boot progress message. */
1114 void ppc64_boot_msg(unsigned int src, const char *msg) 1111 void ppc64_boot_msg(unsigned int src, const char *msg)
1115 { 1112 {
1116 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg); 1113 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg);
1117 printk("[boot]%04x %s\n", src, msg); 1114 printk("[boot]%04x %s\n", src, msg);
1118 } 1115 }
1119 1116
1120 /* Print a termination message (print only -- does not stop the kernel) */ 1117 /* Print a termination message (print only -- does not stop the kernel) */
1121 void ppc64_terminate_msg(unsigned int src, const char *msg) 1118 void ppc64_terminate_msg(unsigned int src, const char *msg)
1122 { 1119 {
1123 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_TERM_MESSAGE|src, msg); 1120 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_TERM_MESSAGE|src, msg);
1124 printk("[terminate]%04x %s\n", src, msg); 1121 printk("[terminate]%04x %s\n", src, msg);
1125 } 1122 }
1126 1123
1127 /* Print something that needs attention (device error, etc) */ 1124 /* Print something that needs attention (device error, etc) */
1128 void ppc64_attention_msg(unsigned int src, const char *msg) 1125 void ppc64_attention_msg(unsigned int src, const char *msg)
1129 { 1126 {
1130 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_ATTN_MESSAGE|src, msg); 1127 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_ATTN_MESSAGE|src, msg);
1131 printk("[attention]%04x %s\n", src, msg); 1128 printk("[attention]%04x %s\n", src, msg);
1132 } 1129 }
1133 1130
1134 /* Print a dump progress message. */ 1131 /* Print a dump progress message. */
1135 void ppc64_dump_msg(unsigned int src, const char *msg) 1132 void ppc64_dump_msg(unsigned int src, const char *msg)
1136 { 1133 {
1137 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_DUMP_MESSAGE|src, msg); 1134 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_DUMP_MESSAGE|src, msg);
1138 printk("[dump]%04x %s\n", src, msg); 1135 printk("[dump]%04x %s\n", src, msg);
1139 } 1136 }
1140 1137
1141 /* This should only be called on processor 0 during calibrate decr */ 1138 /* This should only be called on processor 0 during calibrate decr */
1142 void __init setup_default_decr(void) 1139 void __init setup_default_decr(void)
1143 { 1140 {
1144 struct paca_struct *lpaca = get_paca(); 1141 struct paca_struct *lpaca = get_paca();
1145 1142
1146 lpaca->default_decr = tb_ticks_per_jiffy; 1143 lpaca->default_decr = tb_ticks_per_jiffy;
1147 lpaca->next_jiffy_update_tb = get_tb() + tb_ticks_per_jiffy; 1144 lpaca->next_jiffy_update_tb = get_tb() + tb_ticks_per_jiffy;
1148 } 1145 }
1149 1146
1150 #ifndef CONFIG_PPC_ISERIES 1147 #ifndef CONFIG_PPC_ISERIES
1151 /* 1148 /*
1152 * This function can be used by platforms to "find" legacy serial ports. 1149 * This function can be used by platforms to "find" legacy serial ports.
1153 * It works for "serial" nodes under an "isa" node, and will try to 1150 * It works for "serial" nodes under an "isa" node, and will try to
1154 * respect the "ibm,aix-loc" property if any. It works with up to 8 1151 * respect the "ibm,aix-loc" property if any. It works with up to 8
1155 * ports. 1152 * ports.
1156 */ 1153 */
1157 1154
1158 #define MAX_LEGACY_SERIAL_PORTS 8 1155 #define MAX_LEGACY_SERIAL_PORTS 8
1159 static struct plat_serial8250_port serial_ports[MAX_LEGACY_SERIAL_PORTS+1]; 1156 static struct plat_serial8250_port serial_ports[MAX_LEGACY_SERIAL_PORTS+1];
1160 static unsigned int old_serial_count; 1157 static unsigned int old_serial_count;
1161 1158
1162 void __init generic_find_legacy_serial_ports(u64 *physport, 1159 void __init generic_find_legacy_serial_ports(u64 *physport,
1163 unsigned int *default_speed) 1160 unsigned int *default_speed)
1164 { 1161 {
1165 struct device_node *np; 1162 struct device_node *np;
1166 u32 *sizeprop; 1163 u32 *sizeprop;
1167 1164
1168 struct isa_reg_property { 1165 struct isa_reg_property {
1169 u32 space; 1166 u32 space;
1170 u32 address; 1167 u32 address;
1171 u32 size; 1168 u32 size;
1172 }; 1169 };
1173 struct pci_reg_property { 1170 struct pci_reg_property {
1174 struct pci_address addr; 1171 struct pci_address addr;
1175 u32 size_hi; 1172 u32 size_hi;
1176 u32 size_lo; 1173 u32 size_lo;
1177 }; 1174 };
1178 1175
1179 DBG(" -> generic_find_legacy_serial_port()\n"); 1176 DBG(" -> generic_find_legacy_serial_port()\n");
1180 1177
1181 *physport = 0; 1178 *physport = 0;
1182 if (default_speed) 1179 if (default_speed)
1183 *default_speed = 0; 1180 *default_speed = 0;
1184 1181
1185 np = of_find_node_by_path("/"); 1182 np = of_find_node_by_path("/");
1186 if (!np) 1183 if (!np)
1187 return; 1184 return;
1188 1185
1189 /* First fill our array */ 1186 /* First fill our array */
1190 for (np = NULL; (np = of_find_node_by_type(np, "serial"));) { 1187 for (np = NULL; (np = of_find_node_by_type(np, "serial"));) {
1191 struct device_node *isa, *pci; 1188 struct device_node *isa, *pci;
1192 struct isa_reg_property *reg; 1189 struct isa_reg_property *reg;
1193 unsigned long phys_size, addr_size, io_base; 1190 unsigned long phys_size, addr_size, io_base;
1194 u32 *rangesp; 1191 u32 *rangesp;
1195 u32 *interrupts, *clk, *spd; 1192 u32 *interrupts, *clk, *spd;
1196 char *typep; 1193 char *typep;
1197 int index, rlen, rentsize; 1194 int index, rlen, rentsize;
1198 1195
1199 /* Ok, first check if it's under an "isa" parent */ 1196 /* Ok, first check if it's under an "isa" parent */
1200 isa = of_get_parent(np); 1197 isa = of_get_parent(np);
1201 if (!isa || strcmp(isa->name, "isa")) { 1198 if (!isa || strcmp(isa->name, "isa")) {
1202 DBG("%s: no isa parent found\n", np->full_name); 1199 DBG("%s: no isa parent found\n", np->full_name);
1203 continue; 1200 continue;
1204 } 1201 }
1205 1202
1206 /* Now look for an "ibm,aix-loc" property that gives us ordering 1203 /* Now look for an "ibm,aix-loc" property that gives us ordering
1207 * if any... 1204 * if any...
1208 */ 1205 */
1209 typep = (char *)get_property(np, "ibm,aix-loc", NULL); 1206 typep = (char *)get_property(np, "ibm,aix-loc", NULL);
1210 1207
1211 /* Get the ISA port number */ 1208 /* Get the ISA port number */
1212 reg = (struct isa_reg_property *)get_property(np, "reg", NULL); 1209 reg = (struct isa_reg_property *)get_property(np, "reg", NULL);
1213 if (reg == NULL) 1210 if (reg == NULL)
1214 goto next_port; 1211 goto next_port;
1215 /* We assume the interrupt number isn't translated ... */ 1212 /* We assume the interrupt number isn't translated ... */
1216 interrupts = (u32 *)get_property(np, "interrupts", NULL); 1213 interrupts = (u32 *)get_property(np, "interrupts", NULL);
1217 /* get clock freq. if present */ 1214 /* get clock freq. if present */
1218 clk = (u32 *)get_property(np, "clock-frequency", NULL); 1215 clk = (u32 *)get_property(np, "clock-frequency", NULL);
1219 /* get default speed if present */ 1216 /* get default speed if present */
1220 spd = (u32 *)get_property(np, "current-speed", NULL); 1217 spd = (u32 *)get_property(np, "current-speed", NULL);
1221 /* Default to locate at end of array */ 1218 /* Default to locate at end of array */
1222 index = old_serial_count; /* end of the array by default */ 1219 index = old_serial_count; /* end of the array by default */
1223 1220
1224 /* If we have a location index, then use it */ 1221 /* If we have a location index, then use it */
1225 if (typep && *typep == 'S') { 1222 if (typep && *typep == 'S') {
1226 index = simple_strtol(typep+1, NULL, 0) - 1; 1223 index = simple_strtol(typep+1, NULL, 0) - 1;
1227 /* if index is out of range, use end of array instead */ 1224 /* if index is out of range, use end of array instead */
1228 if (index >= MAX_LEGACY_SERIAL_PORTS) 1225 if (index >= MAX_LEGACY_SERIAL_PORTS)
1229 index = old_serial_count; 1226 index = old_serial_count;
1230 /* if our index is still out of range, that mean that 1227 /* if our index is still out of range, that mean that
1231 * array is full, we could scan for a free slot but that 1228 * array is full, we could scan for a free slot but that
1232 * make little sense to bother, just skip the port 1229 * make little sense to bother, just skip the port
1233 */ 1230 */
1234 if (index >= MAX_LEGACY_SERIAL_PORTS) 1231 if (index >= MAX_LEGACY_SERIAL_PORTS)
1235 goto next_port; 1232 goto next_port;
1236 if (index >= old_serial_count) 1233 if (index >= old_serial_count)
1237 old_serial_count = index + 1; 1234 old_serial_count = index + 1;
1238 /* Check if there is a port who already claimed our slot */ 1235 /* Check if there is a port who already claimed our slot */
1239 if (serial_ports[index].iobase != 0) { 1236 if (serial_ports[index].iobase != 0) {
1240 /* if we still have some room, move it, else override */ 1237 /* if we still have some room, move it, else override */
1241 if (old_serial_count < MAX_LEGACY_SERIAL_PORTS) { 1238 if (old_serial_count < MAX_LEGACY_SERIAL_PORTS) {
1242 DBG("Moved legacy port %d -> %d\n", index, 1239 DBG("Moved legacy port %d -> %d\n", index,
1243 old_serial_count); 1240 old_serial_count);
1244 serial_ports[old_serial_count++] = 1241 serial_ports[old_serial_count++] =
1245 serial_ports[index]; 1242 serial_ports[index];
1246 } else { 1243 } else {
1247 DBG("Replacing legacy port %d\n", index); 1244 DBG("Replacing legacy port %d\n", index);
1248 } 1245 }
1249 } 1246 }
1250 } 1247 }
1251 if (index >= MAX_LEGACY_SERIAL_PORTS) 1248 if (index >= MAX_LEGACY_SERIAL_PORTS)
1252 goto next_port; 1249 goto next_port;
1253 if (index >= old_serial_count) 1250 if (index >= old_serial_count)
1254 old_serial_count = index + 1; 1251 old_serial_count = index + 1;
1255 1252
1256 /* Now fill the entry */ 1253 /* Now fill the entry */
1257 memset(&serial_ports[index], 0, sizeof(struct plat_serial8250_port)); 1254 memset(&serial_ports[index], 0, sizeof(struct plat_serial8250_port));
1258 serial_ports[index].uartclk = clk ? *clk : BASE_BAUD * 16; 1255 serial_ports[index].uartclk = clk ? *clk : BASE_BAUD * 16;
1259 serial_ports[index].iobase = reg->address; 1256 serial_ports[index].iobase = reg->address;
1260 serial_ports[index].irq = interrupts ? interrupts[0] : 0; 1257 serial_ports[index].irq = interrupts ? interrupts[0] : 0;
1261 serial_ports[index].flags = ASYNC_BOOT_AUTOCONF; 1258 serial_ports[index].flags = ASYNC_BOOT_AUTOCONF;
1262 1259
1263 DBG("Added legacy port, index: %d, port: %x, irq: %d, clk: %d\n", 1260 DBG("Added legacy port, index: %d, port: %x, irq: %d, clk: %d\n",
1264 index, 1261 index,
1265 serial_ports[index].iobase, 1262 serial_ports[index].iobase,
1266 serial_ports[index].irq, 1263 serial_ports[index].irq,
1267 serial_ports[index].uartclk); 1264 serial_ports[index].uartclk);
1268 1265
1269 /* Get phys address of IO reg for port 1 */ 1266 /* Get phys address of IO reg for port 1 */
1270 if (index != 0) 1267 if (index != 0)
1271 goto next_port; 1268 goto next_port;
1272 1269
1273 pci = of_get_parent(isa); 1270 pci = of_get_parent(isa);
1274 if (!pci) { 1271 if (!pci) {
1275 DBG("%s: no pci parent found\n", np->full_name); 1272 DBG("%s: no pci parent found\n", np->full_name);
1276 goto next_port; 1273 goto next_port;
1277 } 1274 }
1278 1275
1279 rangesp = (u32 *)get_property(pci, "ranges", &rlen); 1276 rangesp = (u32 *)get_property(pci, "ranges", &rlen);
1280 if (rangesp == NULL) { 1277 if (rangesp == NULL) {
1281 of_node_put(pci); 1278 of_node_put(pci);
1282 goto next_port; 1279 goto next_port;
1283 } 1280 }
1284 rlen /= 4; 1281 rlen /= 4;
1285 1282
1286 /* we need the #size-cells of the PCI bridge node itself */ 1283 /* we need the #size-cells of the PCI bridge node itself */
1287 phys_size = 1; 1284 phys_size = 1;
1288 sizeprop = (u32 *)get_property(pci, "#size-cells", NULL); 1285 sizeprop = (u32 *)get_property(pci, "#size-cells", NULL);
1289 if (sizeprop != NULL) 1286 if (sizeprop != NULL)
1290 phys_size = *sizeprop; 1287 phys_size = *sizeprop;
1291 /* we need the parent #addr-cells */ 1288 /* we need the parent #addr-cells */
1292 addr_size = prom_n_addr_cells(pci); 1289 addr_size = prom_n_addr_cells(pci);
1293 rentsize = 3 + addr_size + phys_size; 1290 rentsize = 3 + addr_size + phys_size;
1294 io_base = 0; 1291 io_base = 0;
1295 for (;rlen >= rentsize; rlen -= rentsize,rangesp += rentsize) { 1292 for (;rlen >= rentsize; rlen -= rentsize,rangesp += rentsize) {
1296 if (((rangesp[0] >> 24) & 0x3) != 1) 1293 if (((rangesp[0] >> 24) & 0x3) != 1)
1297 continue; /* not IO space */ 1294 continue; /* not IO space */
1298 io_base = rangesp[3]; 1295 io_base = rangesp[3];
1299 if (addr_size == 2) 1296 if (addr_size == 2)
1300 io_base = (io_base << 32) | rangesp[4]; 1297 io_base = (io_base << 32) | rangesp[4];
1301 } 1298 }
1302 if (io_base != 0) { 1299 if (io_base != 0) {
1303 *physport = io_base + reg->address; 1300 *physport = io_base + reg->address;
1304 if (default_speed && spd) 1301 if (default_speed && spd)
1305 *default_speed = *spd; 1302 *default_speed = *spd;
1306 } 1303 }
1307 of_node_put(pci); 1304 of_node_put(pci);
1308 next_port: 1305 next_port:
1309 of_node_put(isa); 1306 of_node_put(isa);
1310 } 1307 }
1311 1308
1312 DBG(" <- generic_find_legacy_serial_port()\n"); 1309 DBG(" <- generic_find_legacy_serial_port()\n");
1313 } 1310 }
1314 1311
1315 static struct platform_device serial_device = { 1312 static struct platform_device serial_device = {
1316 .name = "serial8250", 1313 .name = "serial8250",
1317 .id = 0, 1314 .id = 0,
1318 .dev = { 1315 .dev = {
1319 .platform_data = serial_ports, 1316 .platform_data = serial_ports,
1320 }, 1317 },
1321 }; 1318 };
1322 1319
1323 static int __init serial_dev_init(void) 1320 static int __init serial_dev_init(void)
1324 { 1321 {
1325 return platform_device_register(&serial_device); 1322 return platform_device_register(&serial_device);
1326 } 1323 }
1327 arch_initcall(serial_dev_init); 1324 arch_initcall(serial_dev_init);
1328 1325
1329 #endif /* CONFIG_PPC_ISERIES */ 1326 #endif /* CONFIG_PPC_ISERIES */
1330 1327
1331 int check_legacy_ioport(unsigned long base_port) 1328 int check_legacy_ioport(unsigned long base_port)
1332 { 1329 {
1333 if (ppc_md.check_legacy_ioport == NULL) 1330 if (ppc_md.check_legacy_ioport == NULL)
1334 return 0; 1331 return 0;
1335 return ppc_md.check_legacy_ioport(base_port); 1332 return ppc_md.check_legacy_ioport(base_port);
1336 } 1333 }
1337 EXPORT_SYMBOL(check_legacy_ioport); 1334 EXPORT_SYMBOL(check_legacy_ioport);
1338 1335
1339 #ifdef CONFIG_XMON 1336 #ifdef CONFIG_XMON
1340 static int __init early_xmon(char *p) 1337 static int __init early_xmon(char *p)
1341 { 1338 {
1342 /* ensure xmon is enabled */ 1339 /* ensure xmon is enabled */
1343 if (p) { 1340 if (p) {
1344 if (strncmp(p, "on", 2) == 0) 1341 if (strncmp(p, "on", 2) == 0)
1345 xmon_init(); 1342 xmon_init();
1346 if (strncmp(p, "early", 5) != 0) 1343 if (strncmp(p, "early", 5) != 0)
1347 return 0; 1344 return 0;
1348 } 1345 }
1349 xmon_init(); 1346 xmon_init();
1350 debugger(NULL); 1347 debugger(NULL);
1351 1348
1352 return 0; 1349 return 0;
1353 } 1350 }
1354 early_param("xmon", early_xmon); 1351 early_param("xmon", early_xmon);
1355 #endif 1352 #endif
1356 1353
1357 void cpu_die(void) 1354 void cpu_die(void)
1358 { 1355 {
1359 if (ppc_md.cpu_die) 1356 if (ppc_md.cpu_die)
1360 ppc_md.cpu_die(); 1357 ppc_md.cpu_die();
1361 } 1358 }
1362 1359
arch/s390/kernel/setup.c
1 /* 1 /*
2 * arch/s390/kernel/setup.c 2 * arch/s390/kernel/setup.c
3 * 3 *
4 * S390 version 4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com), 6 * Author(s): Hartmut Penner (hp@de.ibm.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * 8 *
9 * Derived from "arch/i386/kernel/setup.c" 9 * Derived from "arch/i386/kernel/setup.c"
10 * Copyright (C) 1995, Linus Torvalds 10 * Copyright (C) 1995, Linus Torvalds
11 */ 11 */
12 12
13 /* 13 /*
14 * This file handles the architecture-dependent parts of initialization 14 * This file handles the architecture-dependent parts of initialization
15 */ 15 */
16 16
17 #include <linux/errno.h> 17 #include <linux/errno.h>
18 #include <linux/module.h> 18 #include <linux/module.h>
19 #include <linux/sched.h> 19 #include <linux/sched.h>
20 #include <linux/kernel.h> 20 #include <linux/kernel.h>
21 #include <linux/mm.h> 21 #include <linux/mm.h>
22 #include <linux/stddef.h> 22 #include <linux/stddef.h>
23 #include <linux/unistd.h> 23 #include <linux/unistd.h>
24 #include <linux/ptrace.h> 24 #include <linux/ptrace.h>
25 #include <linux/slab.h> 25 #include <linux/slab.h>
26 #include <linux/user.h> 26 #include <linux/user.h>
27 #include <linux/a.out.h> 27 #include <linux/a.out.h>
28 #include <linux/tty.h> 28 #include <linux/tty.h>
29 #include <linux/ioport.h> 29 #include <linux/ioport.h>
30 #include <linux/delay.h> 30 #include <linux/delay.h>
31 #include <linux/config.h> 31 #include <linux/config.h>
32 #include <linux/init.h> 32 #include <linux/init.h>
33 #include <linux/initrd.h> 33 #include <linux/initrd.h>
34 #include <linux/bootmem.h> 34 #include <linux/bootmem.h>
35 #include <linux/root_dev.h> 35 #include <linux/root_dev.h>
36 #include <linux/console.h> 36 #include <linux/console.h>
37 #include <linux/seq_file.h> 37 #include <linux/seq_file.h>
38 #include <linux/kernel_stat.h> 38 #include <linux/kernel_stat.h>
39 39
40 #include <asm/uaccess.h> 40 #include <asm/uaccess.h>
41 #include <asm/system.h> 41 #include <asm/system.h>
42 #include <asm/smp.h> 42 #include <asm/smp.h>
43 #include <asm/mmu_context.h> 43 #include <asm/mmu_context.h>
44 #include <asm/cpcmd.h> 44 #include <asm/cpcmd.h>
45 #include <asm/lowcore.h> 45 #include <asm/lowcore.h>
46 #include <asm/irq.h> 46 #include <asm/irq.h>
47 #include <asm/page.h> 47 #include <asm/page.h>
48 #include <asm/ptrace.h> 48 #include <asm/ptrace.h>
49 49
50 /* 50 /*
51 * Machine setup.. 51 * Machine setup..
52 */ 52 */
53 unsigned int console_mode = 0; 53 unsigned int console_mode = 0;
54 unsigned int console_devno = -1; 54 unsigned int console_devno = -1;
55 unsigned int console_irq = -1; 55 unsigned int console_irq = -1;
56 unsigned long memory_size = 0; 56 unsigned long memory_size = 0;
57 unsigned long machine_flags = 0; 57 unsigned long machine_flags = 0;
58 struct { 58 struct {
59 unsigned long addr, size, type; 59 unsigned long addr, size, type;
60 } memory_chunk[MEMORY_CHUNKS] = { { 0 } }; 60 } memory_chunk[MEMORY_CHUNKS] = { { 0 } };
61 #define CHUNK_READ_WRITE 0 61 #define CHUNK_READ_WRITE 0
62 #define CHUNK_READ_ONLY 1 62 #define CHUNK_READ_ONLY 1
63 volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ 63 volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
64 unsigned long __initdata zholes_size[MAX_NR_ZONES]; 64 unsigned long __initdata zholes_size[MAX_NR_ZONES];
65 static unsigned long __initdata memory_end; 65 static unsigned long __initdata memory_end;
66 66
67 /* 67 /*
68 * Setup options 68 * Setup options
69 */ 69 */
70 extern int _text,_etext, _edata, _end; 70 extern int _text,_etext, _edata, _end;
71 71
72 /* 72 /*
73 * This is set up by the setup-routine at boot-time 73 * This is set up by the setup-routine at boot-time
74 * for S390 need to find out, what we have to setup 74 * for S390 need to find out, what we have to setup
75 * using address 0x10400 ... 75 * using address 0x10400 ...
76 */ 76 */
77 77
78 #include <asm/setup.h> 78 #include <asm/setup.h>
79 79
80 static char command_line[COMMAND_LINE_SIZE] = { 0, }; 80 static char command_line[COMMAND_LINE_SIZE] = { 0, };
81 81
82 static struct resource code_resource = { 82 static struct resource code_resource = {
83 .name = "Kernel code", 83 .name = "Kernel code",
84 .start = (unsigned long) &_text, 84 .start = (unsigned long) &_text,
85 .end = (unsigned long) &_etext - 1, 85 .end = (unsigned long) &_etext - 1,
86 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 86 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
87 }; 87 };
88 88
89 static struct resource data_resource = { 89 static struct resource data_resource = {
90 .name = "Kernel data", 90 .name = "Kernel data",
91 .start = (unsigned long) &_etext, 91 .start = (unsigned long) &_etext,
92 .end = (unsigned long) &_edata - 1, 92 .end = (unsigned long) &_edata - 1,
93 .flags = IORESOURCE_BUSY | IORESOURCE_MEM, 93 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
94 }; 94 };
95 95
96 /* 96 /*
97 * cpu_init() initializes state that is per-CPU. 97 * cpu_init() initializes state that is per-CPU.
98 */ 98 */
99 void __devinit cpu_init (void) 99 void __devinit cpu_init (void)
100 { 100 {
101 int addr = hard_smp_processor_id(); 101 int addr = hard_smp_processor_id();
102 102
103 /* 103 /*
104 * Store processor id in lowcore (used e.g. in timer_interrupt) 104 * Store processor id in lowcore (used e.g. in timer_interrupt)
105 */ 105 */
106 asm volatile ("stidp %0": "=m" (S390_lowcore.cpu_data.cpu_id)); 106 asm volatile ("stidp %0": "=m" (S390_lowcore.cpu_data.cpu_id));
107 S390_lowcore.cpu_data.cpu_addr = addr; 107 S390_lowcore.cpu_data.cpu_addr = addr;
108 108
109 /* 109 /*
110 * Force FPU initialization: 110 * Force FPU initialization:
111 */ 111 */
112 clear_thread_flag(TIF_USEDFPU); 112 clear_thread_flag(TIF_USEDFPU);
113 clear_used_math(); 113 clear_used_math();
114 114
115 atomic_inc(&init_mm.mm_count); 115 atomic_inc(&init_mm.mm_count);
116 current->active_mm = &init_mm; 116 current->active_mm = &init_mm;
117 if (current->mm) 117 if (current->mm)
118 BUG(); 118 BUG();
119 enter_lazy_tlb(&init_mm, current); 119 enter_lazy_tlb(&init_mm, current);
120 } 120 }
121 121
122 /* 122 /*
123 * VM halt and poweroff setup routines 123 * VM halt and poweroff setup routines
124 */ 124 */
125 char vmhalt_cmd[128] = ""; 125 char vmhalt_cmd[128] = "";
126 char vmpoff_cmd[128] = ""; 126 char vmpoff_cmd[128] = "";
127 127
128 static inline void strncpy_skip_quote(char *dst, char *src, int n) 128 static inline void strncpy_skip_quote(char *dst, char *src, int n)
129 { 129 {
130 int sx, dx; 130 int sx, dx;
131 131
132 dx = 0; 132 dx = 0;
133 for (sx = 0; src[sx] != 0; sx++) { 133 for (sx = 0; src[sx] != 0; sx++) {
134 if (src[sx] == '"') continue; 134 if (src[sx] == '"') continue;
135 dst[dx++] = src[sx]; 135 dst[dx++] = src[sx];
136 if (dx >= n) break; 136 if (dx >= n) break;
137 } 137 }
138 } 138 }
139 139
140 static int __init vmhalt_setup(char *str) 140 static int __init vmhalt_setup(char *str)
141 { 141 {
142 strncpy_skip_quote(vmhalt_cmd, str, 127); 142 strncpy_skip_quote(vmhalt_cmd, str, 127);
143 vmhalt_cmd[127] = 0; 143 vmhalt_cmd[127] = 0;
144 return 1; 144 return 1;
145 } 145 }
146 146
147 __setup("vmhalt=", vmhalt_setup); 147 __setup("vmhalt=", vmhalt_setup);
148 148
149 static int __init vmpoff_setup(char *str) 149 static int __init vmpoff_setup(char *str)
150 { 150 {
151 strncpy_skip_quote(vmpoff_cmd, str, 127); 151 strncpy_skip_quote(vmpoff_cmd, str, 127);
152 vmpoff_cmd[127] = 0; 152 vmpoff_cmd[127] = 0;
153 return 1; 153 return 1;
154 } 154 }
155 155
156 __setup("vmpoff=", vmpoff_setup); 156 __setup("vmpoff=", vmpoff_setup);
157 157
158 /* 158 /*
159 * condev= and conmode= setup parameter. 159 * condev= and conmode= setup parameter.
160 */ 160 */
161 161
162 static int __init condev_setup(char *str) 162 static int __init condev_setup(char *str)
163 { 163 {
164 int vdev; 164 int vdev;
165 165
166 vdev = simple_strtoul(str, &str, 0); 166 vdev = simple_strtoul(str, &str, 0);
167 if (vdev >= 0 && vdev < 65536) { 167 if (vdev >= 0 && vdev < 65536) {
168 console_devno = vdev; 168 console_devno = vdev;
169 console_irq = -1; 169 console_irq = -1;
170 } 170 }
171 return 1; 171 return 1;
172 } 172 }
173 173
174 __setup("condev=", condev_setup); 174 __setup("condev=", condev_setup);
175 175
176 static int __init conmode_setup(char *str) 176 static int __init conmode_setup(char *str)
177 { 177 {
178 #if defined(CONFIG_SCLP_CONSOLE) 178 #if defined(CONFIG_SCLP_CONSOLE)
179 if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0) 179 if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
180 SET_CONSOLE_SCLP; 180 SET_CONSOLE_SCLP;
181 #endif 181 #endif
182 #if defined(CONFIG_TN3215_CONSOLE) 182 #if defined(CONFIG_TN3215_CONSOLE)
183 if (strncmp(str, "3215", 5) == 0) 183 if (strncmp(str, "3215", 5) == 0)
184 SET_CONSOLE_3215; 184 SET_CONSOLE_3215;
185 #endif 185 #endif
186 #if defined(CONFIG_TN3270_CONSOLE) 186 #if defined(CONFIG_TN3270_CONSOLE)
187 if (strncmp(str, "3270", 5) == 0) 187 if (strncmp(str, "3270", 5) == 0)
188 SET_CONSOLE_3270; 188 SET_CONSOLE_3270;
189 #endif 189 #endif
190 return 1; 190 return 1;
191 } 191 }
192 192
193 __setup("conmode=", conmode_setup); 193 __setup("conmode=", conmode_setup);
194 194
195 static void __init conmode_default(void) 195 static void __init conmode_default(void)
196 { 196 {
197 char query_buffer[1024]; 197 char query_buffer[1024];
198 char *ptr; 198 char *ptr;
199 199
200 if (MACHINE_IS_VM) { 200 if (MACHINE_IS_VM) {
201 __cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL); 201 __cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
202 console_devno = simple_strtoul(query_buffer + 5, NULL, 16); 202 console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
203 ptr = strstr(query_buffer, "SUBCHANNEL ="); 203 ptr = strstr(query_buffer, "SUBCHANNEL =");
204 console_irq = simple_strtoul(ptr + 13, NULL, 16); 204 console_irq = simple_strtoul(ptr + 13, NULL, 16);
205 __cpcmd("QUERY TERM", query_buffer, 1024, NULL); 205 __cpcmd("QUERY TERM", query_buffer, 1024, NULL);
206 ptr = strstr(query_buffer, "CONMODE"); 206 ptr = strstr(query_buffer, "CONMODE");
207 /* 207 /*
208 * Set the conmode to 3215 so that the device recognition 208 * Set the conmode to 3215 so that the device recognition
209 * will set the cu_type of the console to 3215. If the 209 * will set the cu_type of the console to 3215. If the
210 * conmode is 3270 and we don't set it back then both 210 * conmode is 3270 and we don't set it back then both
211 * 3215 and the 3270 driver will try to access the console 211 * 3215 and the 3270 driver will try to access the console
212 * device (3215 as console and 3270 as normal tty). 212 * device (3215 as console and 3270 as normal tty).
213 */ 213 */
214 __cpcmd("TERM CONMODE 3215", NULL, 0, NULL); 214 __cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
215 if (ptr == NULL) { 215 if (ptr == NULL) {
216 #if defined(CONFIG_SCLP_CONSOLE) 216 #if defined(CONFIG_SCLP_CONSOLE)
217 SET_CONSOLE_SCLP; 217 SET_CONSOLE_SCLP;
218 #endif 218 #endif
219 return; 219 return;
220 } 220 }
221 if (strncmp(ptr + 8, "3270", 4) == 0) { 221 if (strncmp(ptr + 8, "3270", 4) == 0) {
222 #if defined(CONFIG_TN3270_CONSOLE) 222 #if defined(CONFIG_TN3270_CONSOLE)
223 SET_CONSOLE_3270; 223 SET_CONSOLE_3270;
224 #elif defined(CONFIG_TN3215_CONSOLE) 224 #elif defined(CONFIG_TN3215_CONSOLE)
225 SET_CONSOLE_3215; 225 SET_CONSOLE_3215;
226 #elif defined(CONFIG_SCLP_CONSOLE) 226 #elif defined(CONFIG_SCLP_CONSOLE)
227 SET_CONSOLE_SCLP; 227 SET_CONSOLE_SCLP;
228 #endif 228 #endif
229 } else if (strncmp(ptr + 8, "3215", 4) == 0) { 229 } else if (strncmp(ptr + 8, "3215", 4) == 0) {
230 #if defined(CONFIG_TN3215_CONSOLE) 230 #if defined(CONFIG_TN3215_CONSOLE)
231 SET_CONSOLE_3215; 231 SET_CONSOLE_3215;
232 #elif defined(CONFIG_TN3270_CONSOLE) 232 #elif defined(CONFIG_TN3270_CONSOLE)
233 SET_CONSOLE_3270; 233 SET_CONSOLE_3270;
234 #elif defined(CONFIG_SCLP_CONSOLE) 234 #elif defined(CONFIG_SCLP_CONSOLE)
235 SET_CONSOLE_SCLP; 235 SET_CONSOLE_SCLP;
236 #endif 236 #endif
237 } 237 }
238 } else if (MACHINE_IS_P390) { 238 } else if (MACHINE_IS_P390) {
239 #if defined(CONFIG_TN3215_CONSOLE) 239 #if defined(CONFIG_TN3215_CONSOLE)
240 SET_CONSOLE_3215; 240 SET_CONSOLE_3215;
241 #elif defined(CONFIG_TN3270_CONSOLE) 241 #elif defined(CONFIG_TN3270_CONSOLE)
242 SET_CONSOLE_3270; 242 SET_CONSOLE_3270;
243 #endif 243 #endif
244 } else { 244 } else {
245 #if defined(CONFIG_SCLP_CONSOLE) 245 #if defined(CONFIG_SCLP_CONSOLE)
246 SET_CONSOLE_SCLP; 246 SET_CONSOLE_SCLP;
247 #endif 247 #endif
248 } 248 }
249 } 249 }
250 250
251 #ifdef CONFIG_SMP 251 #ifdef CONFIG_SMP
252 extern void machine_restart_smp(char *); 252 extern void machine_restart_smp(char *);
253 extern void machine_halt_smp(void); 253 extern void machine_halt_smp(void);
254 extern void machine_power_off_smp(void); 254 extern void machine_power_off_smp(void);
255 255
256 void (*_machine_restart)(char *command) = machine_restart_smp; 256 void (*_machine_restart)(char *command) = machine_restart_smp;
257 void (*_machine_halt)(void) = machine_halt_smp; 257 void (*_machine_halt)(void) = machine_halt_smp;
258 void (*_machine_power_off)(void) = machine_power_off_smp; 258 void (*_machine_power_off)(void) = machine_power_off_smp;
259 #else 259 #else
260 /* 260 /*
261 * Reboot, halt and power_off routines for non SMP. 261 * Reboot, halt and power_off routines for non SMP.
262 */ 262 */
263 extern void reipl(unsigned long devno); 263 extern void reipl(unsigned long devno);
264 static void do_machine_restart_nonsmp(char * __unused) 264 static void do_machine_restart_nonsmp(char * __unused)
265 { 265 {
266 if (MACHINE_IS_VM) 266 if (MACHINE_IS_VM)
267 cpcmd ("IPL", NULL, 0); 267 cpcmd ("IPL", NULL, 0);
268 else 268 else
269 reipl (0x10000 | S390_lowcore.ipl_device); 269 reipl (0x10000 | S390_lowcore.ipl_device);
270 } 270 }
271 271
272 static void do_machine_halt_nonsmp(void) 272 static void do_machine_halt_nonsmp(void)
273 { 273 {
274 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) 274 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
275 cpcmd(vmhalt_cmd, NULL, 0); 275 cpcmd(vmhalt_cmd, NULL, 0);
276 signal_processor(smp_processor_id(), sigp_stop_and_store_status); 276 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
277 } 277 }
278 278
279 static void do_machine_power_off_nonsmp(void) 279 static void do_machine_power_off_nonsmp(void)
280 { 280 {
281 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) 281 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
282 cpcmd(vmpoff_cmd, NULL, 0); 282 cpcmd(vmpoff_cmd, NULL, 0);
283 signal_processor(smp_processor_id(), sigp_stop_and_store_status); 283 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
284 } 284 }
285 285
286 void (*_machine_restart)(char *command) = do_machine_restart_nonsmp; 286 void (*_machine_restart)(char *command) = do_machine_restart_nonsmp;
287 void (*_machine_halt)(void) = do_machine_halt_nonsmp; 287 void (*_machine_halt)(void) = do_machine_halt_nonsmp;
288 void (*_machine_power_off)(void) = do_machine_power_off_nonsmp; 288 void (*_machine_power_off)(void) = do_machine_power_off_nonsmp;
289 #endif 289 #endif
290 290
291 /* 291 /*
292 * Reboot, halt and power_off stubs. They just call _machine_restart, 292 * Reboot, halt and power_off stubs. They just call _machine_restart,
293 * _machine_halt or _machine_power_off. 293 * _machine_halt or _machine_power_off.
294 */ 294 */
295 295
296 void machine_restart(char *command) 296 void machine_restart(char *command)
297 { 297 {
298 console_unblank(); 298 console_unblank();
299 _machine_restart(command); 299 _machine_restart(command);
300 } 300 }
301 301
302 EXPORT_SYMBOL(machine_restart);
303
304 void machine_halt(void) 302 void machine_halt(void)
305 { 303 {
306 console_unblank(); 304 console_unblank();
307 _machine_halt(); 305 _machine_halt();
308 } 306 }
309 307
310 EXPORT_SYMBOL(machine_halt);
311
312 void machine_power_off(void) 308 void machine_power_off(void)
313 { 309 {
314 console_unblank(); 310 console_unblank();
315 _machine_power_off(); 311 _machine_power_off();
316 } 312 }
317
318 EXPORT_SYMBOL(machine_power_off);
319 313
320 static void __init 314 static void __init
321 add_memory_hole(unsigned long start, unsigned long end) 315 add_memory_hole(unsigned long start, unsigned long end)
322 { 316 {
323 unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; 317 unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
324 318
325 if (end <= dma_pfn) 319 if (end <= dma_pfn)
326 zholes_size[ZONE_DMA] += end - start + 1; 320 zholes_size[ZONE_DMA] += end - start + 1;
327 else if (start > dma_pfn) 321 else if (start > dma_pfn)
328 zholes_size[ZONE_NORMAL] += end - start + 1; 322 zholes_size[ZONE_NORMAL] += end - start + 1;
329 else { 323 else {
330 zholes_size[ZONE_DMA] += dma_pfn - start + 1; 324 zholes_size[ZONE_DMA] += dma_pfn - start + 1;
331 zholes_size[ZONE_NORMAL] += end - dma_pfn; 325 zholes_size[ZONE_NORMAL] += end - dma_pfn;
332 } 326 }
333 } 327 }
334 328
335 static void __init 329 static void __init
336 parse_cmdline_early(char **cmdline_p) 330 parse_cmdline_early(char **cmdline_p)
337 { 331 {
338 char c = ' ', cn, *to = command_line, *from = COMMAND_LINE; 332 char c = ' ', cn, *to = command_line, *from = COMMAND_LINE;
339 unsigned long delay = 0; 333 unsigned long delay = 0;
340 334
341 /* Save unparsed command line copy for /proc/cmdline */ 335 /* Save unparsed command line copy for /proc/cmdline */
342 memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE); 336 memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
343 saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; 337 saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
344 338
345 for (;;) { 339 for (;;) {
346 /* 340 /*
347 * "mem=XXX[kKmM]" sets memsize 341 * "mem=XXX[kKmM]" sets memsize
348 */ 342 */
349 if (c == ' ' && strncmp(from, "mem=", 4) == 0) { 343 if (c == ' ' && strncmp(from, "mem=", 4) == 0) {
350 memory_end = simple_strtoul(from+4, &from, 0); 344 memory_end = simple_strtoul(from+4, &from, 0);
351 if ( *from == 'K' || *from == 'k' ) { 345 if ( *from == 'K' || *from == 'k' ) {
352 memory_end = memory_end << 10; 346 memory_end = memory_end << 10;
353 from++; 347 from++;
354 } else if ( *from == 'M' || *from == 'm' ) { 348 } else if ( *from == 'M' || *from == 'm' ) {
355 memory_end = memory_end << 20; 349 memory_end = memory_end << 20;
356 from++; 350 from++;
357 } 351 }
358 } 352 }
359 /* 353 /*
360 * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes 354 * "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes
361 */ 355 */
362 if (c == ' ' && strncmp(from, "ipldelay=", 9) == 0) { 356 if (c == ' ' && strncmp(from, "ipldelay=", 9) == 0) {
363 delay = simple_strtoul(from+9, &from, 0); 357 delay = simple_strtoul(from+9, &from, 0);
364 if (*from == 's' || *from == 'S') { 358 if (*from == 's' || *from == 'S') {
365 delay = delay*1000000; 359 delay = delay*1000000;
366 from++; 360 from++;
367 } else if (*from == 'm' || *from == 'M') { 361 } else if (*from == 'm' || *from == 'M') {
368 delay = delay*60*1000000; 362 delay = delay*60*1000000;
369 from++; 363 from++;
370 } 364 }
371 /* now wait for the requested amount of time */ 365 /* now wait for the requested amount of time */
372 udelay(delay); 366 udelay(delay);
373 } 367 }
374 cn = *(from++); 368 cn = *(from++);
375 if (!cn) 369 if (!cn)
376 break; 370 break;
377 if (cn == '\n') 371 if (cn == '\n')
378 cn = ' '; /* replace newlines with space */ 372 cn = ' '; /* replace newlines with space */
379 if (cn == 0x0d) 373 if (cn == 0x0d)
380 cn = ' '; /* replace 0x0d with space */ 374 cn = ' '; /* replace 0x0d with space */
381 if (cn == ' ' && c == ' ') 375 if (cn == ' ' && c == ' ')
382 continue; /* remove additional spaces */ 376 continue; /* remove additional spaces */
383 c = cn; 377 c = cn;
384 if (to - command_line >= COMMAND_LINE_SIZE) 378 if (to - command_line >= COMMAND_LINE_SIZE)
385 break; 379 break;
386 *(to++) = c; 380 *(to++) = c;
387 } 381 }
388 if (c == ' ' && to > command_line) to--; 382 if (c == ' ' && to > command_line) to--;
389 *to = '\0'; 383 *to = '\0';
390 *cmdline_p = command_line; 384 *cmdline_p = command_line;
391 } 385 }
392 386
393 static void __init 387 static void __init
394 setup_lowcore(void) 388 setup_lowcore(void)
395 { 389 {
396 struct _lowcore *lc; 390 struct _lowcore *lc;
397 int lc_pages; 391 int lc_pages;
398 392
399 /* 393 /*
400 * Setup lowcore for boot cpu 394 * Setup lowcore for boot cpu
401 */ 395 */
402 lc_pages = sizeof(void *) == 8 ? 2 : 1; 396 lc_pages = sizeof(void *) == 8 ? 2 : 1;
403 lc = (struct _lowcore *) 397 lc = (struct _lowcore *)
404 __alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0); 398 __alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0);
405 memset(lc, 0, lc_pages * PAGE_SIZE); 399 memset(lc, 0, lc_pages * PAGE_SIZE);
406 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; 400 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
407 lc->restart_psw.addr = 401 lc->restart_psw.addr =
408 PSW_ADDR_AMODE | (unsigned long) restart_int_handler; 402 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
409 lc->external_new_psw.mask = PSW_KERNEL_BITS; 403 lc->external_new_psw.mask = PSW_KERNEL_BITS;
410 lc->external_new_psw.addr = 404 lc->external_new_psw.addr =
411 PSW_ADDR_AMODE | (unsigned long) ext_int_handler; 405 PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
412 lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT; 406 lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT;
413 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; 407 lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
414 lc->program_new_psw.mask = PSW_KERNEL_BITS; 408 lc->program_new_psw.mask = PSW_KERNEL_BITS;
415 lc->program_new_psw.addr = 409 lc->program_new_psw.addr =
416 PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; 410 PSW_ADDR_AMODE | (unsigned long)pgm_check_handler;
417 lc->mcck_new_psw.mask = 411 lc->mcck_new_psw.mask =
418 PSW_KERNEL_BITS & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT; 412 PSW_KERNEL_BITS & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT;
419 lc->mcck_new_psw.addr = 413 lc->mcck_new_psw.addr =
420 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; 414 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
421 lc->io_new_psw.mask = PSW_KERNEL_BITS; 415 lc->io_new_psw.mask = PSW_KERNEL_BITS;
422 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; 416 lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
423 lc->ipl_device = S390_lowcore.ipl_device; 417 lc->ipl_device = S390_lowcore.ipl_device;
424 lc->jiffy_timer = -1LL; 418 lc->jiffy_timer = -1LL;
425 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; 419 lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
426 lc->async_stack = (unsigned long) 420 lc->async_stack = (unsigned long)
427 __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; 421 __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
428 lc->panic_stack = (unsigned long) 422 lc->panic_stack = (unsigned long)
429 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; 423 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
430 lc->current_task = (unsigned long) init_thread_union.thread_info.task; 424 lc->current_task = (unsigned long) init_thread_union.thread_info.task;
431 lc->thread_info = (unsigned long) &init_thread_union; 425 lc->thread_info = (unsigned long) &init_thread_union;
432 #ifndef CONFIG_ARCH_S390X 426 #ifndef CONFIG_ARCH_S390X
433 if (MACHINE_HAS_IEEE) { 427 if (MACHINE_HAS_IEEE) {
434 lc->extended_save_area_addr = (__u32) 428 lc->extended_save_area_addr = (__u32)
435 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0); 429 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0);
436 /* enable extended save area */ 430 /* enable extended save area */
437 ctl_set_bit(14, 29); 431 ctl_set_bit(14, 29);
438 } 432 }
439 #endif 433 #endif
440 #ifdef CONFIG_ARCH_S390X 434 #ifdef CONFIG_ARCH_S390X
441 if (MACHINE_HAS_DIAG44) 435 if (MACHINE_HAS_DIAG44)
442 lc->diag44_opcode = 0x83000044; 436 lc->diag44_opcode = 0x83000044;
443 else 437 else
444 lc->diag44_opcode = 0x07000700; 438 lc->diag44_opcode = 0x07000700;
445 #endif /* CONFIG_ARCH_S390X */ 439 #endif /* CONFIG_ARCH_S390X */
446 set_prefix((u32)(unsigned long) lc); 440 set_prefix((u32)(unsigned long) lc);
447 } 441 }
448 442
449 static void __init 443 static void __init
450 setup_resources(void) 444 setup_resources(void)
451 { 445 {
452 struct resource *res; 446 struct resource *res;
453 int i; 447 int i;
454 448
455 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 449 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
456 res = alloc_bootmem_low(sizeof(struct resource)); 450 res = alloc_bootmem_low(sizeof(struct resource));
457 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; 451 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
458 switch (memory_chunk[i].type) { 452 switch (memory_chunk[i].type) {
459 case CHUNK_READ_WRITE: 453 case CHUNK_READ_WRITE:
460 res->name = "System RAM"; 454 res->name = "System RAM";
461 break; 455 break;
462 case CHUNK_READ_ONLY: 456 case CHUNK_READ_ONLY:
463 res->name = "System ROM"; 457 res->name = "System ROM";
464 res->flags |= IORESOURCE_READONLY; 458 res->flags |= IORESOURCE_READONLY;
465 break; 459 break;
466 default: 460 default:
467 res->name = "reserved"; 461 res->name = "reserved";
468 } 462 }
469 res->start = memory_chunk[i].addr; 463 res->start = memory_chunk[i].addr;
470 res->end = memory_chunk[i].addr + memory_chunk[i].size - 1; 464 res->end = memory_chunk[i].addr + memory_chunk[i].size - 1;
471 request_resource(&iomem_resource, res); 465 request_resource(&iomem_resource, res);
472 request_resource(res, &code_resource); 466 request_resource(res, &code_resource);
473 request_resource(res, &data_resource); 467 request_resource(res, &data_resource);
474 } 468 }
475 } 469 }
476 470
477 static void __init 471 static void __init
478 setup_memory(void) 472 setup_memory(void)
479 { 473 {
480 unsigned long bootmap_size; 474 unsigned long bootmap_size;
481 unsigned long start_pfn, end_pfn, init_pfn; 475 unsigned long start_pfn, end_pfn, init_pfn;
482 unsigned long last_rw_end; 476 unsigned long last_rw_end;
483 int i; 477 int i;
484 478
485 /* 479 /*
486 * partially used pages are not usable - thus 480 * partially used pages are not usable - thus
487 * we are rounding upwards: 481 * we are rounding upwards:
488 */ 482 */
489 start_pfn = (__pa(&_end) + PAGE_SIZE - 1) >> PAGE_SHIFT; 483 start_pfn = (__pa(&_end) + PAGE_SIZE - 1) >> PAGE_SHIFT;
490 end_pfn = max_pfn = memory_end >> PAGE_SHIFT; 484 end_pfn = max_pfn = memory_end >> PAGE_SHIFT;
491 485
492 /* Initialize storage key for kernel pages */ 486 /* Initialize storage key for kernel pages */
493 for (init_pfn = 0 ; init_pfn < start_pfn; init_pfn++) 487 for (init_pfn = 0 ; init_pfn < start_pfn; init_pfn++)
494 page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY); 488 page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
495 489
496 /* 490 /*
497 * Initialize the boot-time allocator (with low memory only): 491 * Initialize the boot-time allocator (with low memory only):
498 */ 492 */
499 bootmap_size = init_bootmem(start_pfn, end_pfn); 493 bootmap_size = init_bootmem(start_pfn, end_pfn);
500 494
501 /* 495 /*
502 * Register RAM areas with the bootmem allocator. 496 * Register RAM areas with the bootmem allocator.
503 */ 497 */
504 last_rw_end = start_pfn; 498 last_rw_end = start_pfn;
505 499
506 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 500 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
507 unsigned long start_chunk, end_chunk; 501 unsigned long start_chunk, end_chunk;
508 502
509 if (memory_chunk[i].type != CHUNK_READ_WRITE) 503 if (memory_chunk[i].type != CHUNK_READ_WRITE)
510 continue; 504 continue;
511 start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1); 505 start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1);
512 start_chunk >>= PAGE_SHIFT; 506 start_chunk >>= PAGE_SHIFT;
513 end_chunk = (memory_chunk[i].addr + memory_chunk[i].size); 507 end_chunk = (memory_chunk[i].addr + memory_chunk[i].size);
514 end_chunk >>= PAGE_SHIFT; 508 end_chunk >>= PAGE_SHIFT;
515 if (start_chunk < start_pfn) 509 if (start_chunk < start_pfn)
516 start_chunk = start_pfn; 510 start_chunk = start_pfn;
517 if (end_chunk > end_pfn) 511 if (end_chunk > end_pfn)
518 end_chunk = end_pfn; 512 end_chunk = end_pfn;
519 if (start_chunk < end_chunk) { 513 if (start_chunk < end_chunk) {
520 /* Initialize storage key for RAM pages */ 514 /* Initialize storage key for RAM pages */
521 for (init_pfn = start_chunk ; init_pfn < end_chunk; 515 for (init_pfn = start_chunk ; init_pfn < end_chunk;
522 init_pfn++) 516 init_pfn++)
523 page_set_storage_key(init_pfn << PAGE_SHIFT, 517 page_set_storage_key(init_pfn << PAGE_SHIFT,
524 PAGE_DEFAULT_KEY); 518 PAGE_DEFAULT_KEY);
525 free_bootmem(start_chunk << PAGE_SHIFT, 519 free_bootmem(start_chunk << PAGE_SHIFT,
526 (end_chunk - start_chunk) << PAGE_SHIFT); 520 (end_chunk - start_chunk) << PAGE_SHIFT);
527 if (last_rw_end < start_chunk) 521 if (last_rw_end < start_chunk)
528 add_memory_hole(last_rw_end, start_chunk - 1); 522 add_memory_hole(last_rw_end, start_chunk - 1);
529 last_rw_end = end_chunk; 523 last_rw_end = end_chunk;
530 } 524 }
531 } 525 }
532 526
533 psw_set_key(PAGE_DEFAULT_KEY); 527 psw_set_key(PAGE_DEFAULT_KEY);
534 528
535 if (last_rw_end < end_pfn - 1) 529 if (last_rw_end < end_pfn - 1)
536 add_memory_hole(last_rw_end, end_pfn - 1); 530 add_memory_hole(last_rw_end, end_pfn - 1);
537 531
538 /* 532 /*
539 * Reserve the bootmem bitmap itself as well. We do this in two 533 * Reserve the bootmem bitmap itself as well. We do this in two
540 * steps (first step was init_bootmem()) because this catches 534 * steps (first step was init_bootmem()) because this catches
541 * the (very unlikely) case of us accidentally initializing the 535 * the (very unlikely) case of us accidentally initializing the
542 * bootmem allocator with an invalid RAM area. 536 * bootmem allocator with an invalid RAM area.
543 */ 537 */
544 reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size); 538 reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size);
545 539
546 #ifdef CONFIG_BLK_DEV_INITRD 540 #ifdef CONFIG_BLK_DEV_INITRD
547 if (INITRD_START) { 541 if (INITRD_START) {
548 if (INITRD_START + INITRD_SIZE <= memory_end) { 542 if (INITRD_START + INITRD_SIZE <= memory_end) {
549 reserve_bootmem(INITRD_START, INITRD_SIZE); 543 reserve_bootmem(INITRD_START, INITRD_SIZE);
550 initrd_start = INITRD_START; 544 initrd_start = INITRD_START;
551 initrd_end = initrd_start + INITRD_SIZE; 545 initrd_end = initrd_start + INITRD_SIZE;
552 } else { 546 } else {
553 printk("initrd extends beyond end of memory " 547 printk("initrd extends beyond end of memory "
554 "(0x%08lx > 0x%08lx)\ndisabling initrd\n", 548 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
555 initrd_start + INITRD_SIZE, memory_end); 549 initrd_start + INITRD_SIZE, memory_end);
556 initrd_start = initrd_end = 0; 550 initrd_start = initrd_end = 0;
557 } 551 }
558 } 552 }
559 #endif 553 #endif
560 } 554 }
561 555
562 /* 556 /*
563 * Setup function called from init/main.c just after the banner 557 * Setup function called from init/main.c just after the banner
564 * was printed. 558 * was printed.
565 */ 559 */
566 560
567 void __init 561 void __init
568 setup_arch(char **cmdline_p) 562 setup_arch(char **cmdline_p)
569 { 563 {
570 /* 564 /*
571 * print what head.S has found out about the machine 565 * print what head.S has found out about the machine
572 */ 566 */
573 #ifndef CONFIG_ARCH_S390X 567 #ifndef CONFIG_ARCH_S390X
574 printk((MACHINE_IS_VM) ? 568 printk((MACHINE_IS_VM) ?
575 "We are running under VM (31 bit mode)\n" : 569 "We are running under VM (31 bit mode)\n" :
576 "We are running native (31 bit mode)\n"); 570 "We are running native (31 bit mode)\n");
577 printk((MACHINE_HAS_IEEE) ? 571 printk((MACHINE_HAS_IEEE) ?
578 "This machine has an IEEE fpu\n" : 572 "This machine has an IEEE fpu\n" :
579 "This machine has no IEEE fpu\n"); 573 "This machine has no IEEE fpu\n");
580 #else /* CONFIG_ARCH_S390X */ 574 #else /* CONFIG_ARCH_S390X */
581 printk((MACHINE_IS_VM) ? 575 printk((MACHINE_IS_VM) ?
582 "We are running under VM (64 bit mode)\n" : 576 "We are running under VM (64 bit mode)\n" :
583 "We are running native (64 bit mode)\n"); 577 "We are running native (64 bit mode)\n");
584 #endif /* CONFIG_ARCH_S390X */ 578 #endif /* CONFIG_ARCH_S390X */
585 579
586 ROOT_DEV = Root_RAM0; 580 ROOT_DEV = Root_RAM0;
587 #ifndef CONFIG_ARCH_S390X 581 #ifndef CONFIG_ARCH_S390X
588 memory_end = memory_size & ~0x400000UL; /* align memory end to 4MB */ 582 memory_end = memory_size & ~0x400000UL; /* align memory end to 4MB */
589 /* 583 /*
590 * We need some free virtual space to be able to do vmalloc. 584 * We need some free virtual space to be able to do vmalloc.
591 * On a machine with 2GB memory we make sure that we have at 585 * On a machine with 2GB memory we make sure that we have at
592 * least 128 MB free space for vmalloc. 586 * least 128 MB free space for vmalloc.
593 */ 587 */
594 if (memory_end > 1920*1024*1024) 588 if (memory_end > 1920*1024*1024)
595 memory_end = 1920*1024*1024; 589 memory_end = 1920*1024*1024;
596 #else /* CONFIG_ARCH_S390X */ 590 #else /* CONFIG_ARCH_S390X */
597 memory_end = memory_size & ~0x200000UL; /* detected in head.s */ 591 memory_end = memory_size & ~0x200000UL; /* detected in head.s */
598 #endif /* CONFIG_ARCH_S390X */ 592 #endif /* CONFIG_ARCH_S390X */
599 593
600 init_mm.start_code = PAGE_OFFSET; 594 init_mm.start_code = PAGE_OFFSET;
601 init_mm.end_code = (unsigned long) &_etext; 595 init_mm.end_code = (unsigned long) &_etext;
602 init_mm.end_data = (unsigned long) &_edata; 596 init_mm.end_data = (unsigned long) &_edata;
603 init_mm.brk = (unsigned long) &_end; 597 init_mm.brk = (unsigned long) &_end;
604 598
605 parse_cmdline_early(cmdline_p); 599 parse_cmdline_early(cmdline_p);
606 600
607 setup_memory(); 601 setup_memory();
608 setup_resources(); 602 setup_resources();
609 setup_lowcore(); 603 setup_lowcore();
610 604
611 cpu_init(); 605 cpu_init();
612 __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr; 606 __cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
613 607
614 /* 608 /*
615 * Create kernel page tables and switch to virtual addressing. 609 * Create kernel page tables and switch to virtual addressing.
616 */ 610 */
617 paging_init(); 611 paging_init();
618 612
619 /* Setup default console */ 613 /* Setup default console */
620 conmode_default(); 614 conmode_default();
621 } 615 }
622 616
623 void print_cpu_info(struct cpuinfo_S390 *cpuinfo) 617 void print_cpu_info(struct cpuinfo_S390 *cpuinfo)
624 { 618 {
625 printk("cpu %d " 619 printk("cpu %d "
626 #ifdef CONFIG_SMP 620 #ifdef CONFIG_SMP
627 "phys_idx=%d " 621 "phys_idx=%d "
628 #endif 622 #endif
629 "vers=%02X ident=%06X machine=%04X unused=%04X\n", 623 "vers=%02X ident=%06X machine=%04X unused=%04X\n",
630 cpuinfo->cpu_nr, 624 cpuinfo->cpu_nr,
631 #ifdef CONFIG_SMP 625 #ifdef CONFIG_SMP
632 cpuinfo->cpu_addr, 626 cpuinfo->cpu_addr,
633 #endif 627 #endif
634 cpuinfo->cpu_id.version, 628 cpuinfo->cpu_id.version,
635 cpuinfo->cpu_id.ident, 629 cpuinfo->cpu_id.ident,
636 cpuinfo->cpu_id.machine, 630 cpuinfo->cpu_id.machine,
637 cpuinfo->cpu_id.unused); 631 cpuinfo->cpu_id.unused);
638 } 632 }
639 633
640 /* 634 /*
641 * show_cpuinfo - Get information on one CPU for use by procfs. 635 * show_cpuinfo - Get information on one CPU for use by procfs.
642 */ 636 */
643 637
644 static int show_cpuinfo(struct seq_file *m, void *v) 638 static int show_cpuinfo(struct seq_file *m, void *v)
645 { 639 {
646 struct cpuinfo_S390 *cpuinfo; 640 struct cpuinfo_S390 *cpuinfo;
647 unsigned long n = (unsigned long) v - 1; 641 unsigned long n = (unsigned long) v - 1;
648 642
649 if (!n) { 643 if (!n) {
650 seq_printf(m, "vendor_id : IBM/S390\n" 644 seq_printf(m, "vendor_id : IBM/S390\n"
651 "# processors : %i\n" 645 "# processors : %i\n"
652 "bogomips per cpu: %lu.%02lu\n", 646 "bogomips per cpu: %lu.%02lu\n",
653 num_online_cpus(), loops_per_jiffy/(500000/HZ), 647 num_online_cpus(), loops_per_jiffy/(500000/HZ),
654 (loops_per_jiffy/(5000/HZ))%100); 648 (loops_per_jiffy/(5000/HZ))%100);
655 } 649 }
656 if (cpu_online(n)) { 650 if (cpu_online(n)) {
657 #ifdef CONFIG_SMP 651 #ifdef CONFIG_SMP
658 if (smp_processor_id() == n) 652 if (smp_processor_id() == n)
659 cpuinfo = &S390_lowcore.cpu_data; 653 cpuinfo = &S390_lowcore.cpu_data;
660 else 654 else
661 cpuinfo = &lowcore_ptr[n]->cpu_data; 655 cpuinfo = &lowcore_ptr[n]->cpu_data;
662 #else 656 #else
663 cpuinfo = &S390_lowcore.cpu_data; 657 cpuinfo = &S390_lowcore.cpu_data;
664 #endif 658 #endif
665 seq_printf(m, "processor %li: " 659 seq_printf(m, "processor %li: "
666 "version = %02X, " 660 "version = %02X, "
667 "identification = %06X, " 661 "identification = %06X, "
668 "machine = %04X\n", 662 "machine = %04X\n",
669 n, cpuinfo->cpu_id.version, 663 n, cpuinfo->cpu_id.version,
670 cpuinfo->cpu_id.ident, 664 cpuinfo->cpu_id.ident,
671 cpuinfo->cpu_id.machine); 665 cpuinfo->cpu_id.machine);
672 } 666 }
673 return 0; 667 return 0;
674 } 668 }
675 669
676 static void *c_start(struct seq_file *m, loff_t *pos) 670 static void *c_start(struct seq_file *m, loff_t *pos)
677 { 671 {
678 return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL; 672 return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL;
679 } 673 }
680 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 674 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
681 { 675 {
682 ++*pos; 676 ++*pos;
683 return c_start(m, pos); 677 return c_start(m, pos);
684 } 678 }
685 static void c_stop(struct seq_file *m, void *v) 679 static void c_stop(struct seq_file *m, void *v)
686 { 680 {
687 } 681 }
688 struct seq_operations cpuinfo_op = { 682 struct seq_operations cpuinfo_op = {
689 .start = c_start, 683 .start = c_start,
690 .next = c_next, 684 .next = c_next,
691 .stop = c_stop, 685 .stop = c_stop,
692 .show = show_cpuinfo, 686 .show = show_cpuinfo,
693 }; 687 };
694 688
695 689
arch/sh/kernel/process.c
1 /* $Id: process.c,v 1.28 2004/05/05 16:54:23 lethal Exp $ 1 /* $Id: process.c,v 1.28 2004/05/05 16:54:23 lethal Exp $
2 * 2 *
3 * linux/arch/sh/kernel/process.c 3 * linux/arch/sh/kernel/process.c
4 * 4 *
5 * Copyright (C) 1995 Linus Torvalds 5 * Copyright (C) 1995 Linus Torvalds
6 * 6 *
7 * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima 7 * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
8 */ 8 */
9 9
10 /* 10 /*
11 * This file handles the architecture-dependent parts of process handling.. 11 * This file handles the architecture-dependent parts of process handling..
12 */ 12 */
13 13
14 #include <linux/module.h> 14 #include <linux/module.h>
15 #include <linux/unistd.h> 15 #include <linux/unistd.h>
16 #include <linux/mm.h> 16 #include <linux/mm.h>
17 #include <linux/elfcore.h> 17 #include <linux/elfcore.h>
18 #include <linux/slab.h> 18 #include <linux/slab.h>
19 #include <linux/a.out.h> 19 #include <linux/a.out.h>
20 #include <linux/ptrace.h> 20 #include <linux/ptrace.h>
21 #include <linux/platform.h> 21 #include <linux/platform.h>
22 #include <linux/kallsyms.h> 22 #include <linux/kallsyms.h>
23 23
24 #include <asm/io.h> 24 #include <asm/io.h>
25 #include <asm/uaccess.h> 25 #include <asm/uaccess.h>
26 #include <asm/mmu_context.h> 26 #include <asm/mmu_context.h>
27 #include <asm/elf.h> 27 #include <asm/elf.h>
28 #if defined(CONFIG_SH_HS7751RVOIP) 28 #if defined(CONFIG_SH_HS7751RVOIP)
29 #include <asm/hs7751rvoip/hs7751rvoip.h> 29 #include <asm/hs7751rvoip/hs7751rvoip.h>
30 #elif defined(CONFIG_SH_RTS7751R2D) 30 #elif defined(CONFIG_SH_RTS7751R2D)
31 #include <asm/rts7751r2d/rts7751r2d.h> 31 #include <asm/rts7751r2d/rts7751r2d.h>
32 #endif 32 #endif
33 33
34 static int hlt_counter=0; 34 static int hlt_counter=0;
35 35
36 int ubc_usercnt = 0; 36 int ubc_usercnt = 0;
37 37
38 #define HARD_IDLE_TIMEOUT (HZ / 3) 38 #define HARD_IDLE_TIMEOUT (HZ / 3)
39 39
40 void disable_hlt(void) 40 void disable_hlt(void)
41 { 41 {
42 hlt_counter++; 42 hlt_counter++;
43 } 43 }
44 44
45 EXPORT_SYMBOL(disable_hlt); 45 EXPORT_SYMBOL(disable_hlt);
46 46
47 void enable_hlt(void) 47 void enable_hlt(void)
48 { 48 {
49 hlt_counter--; 49 hlt_counter--;
50 } 50 }
51 51
52 EXPORT_SYMBOL(enable_hlt); 52 EXPORT_SYMBOL(enable_hlt);
53 53
54 void default_idle(void) 54 void default_idle(void)
55 { 55 {
56 /* endless idle loop with no priority at all */ 56 /* endless idle loop with no priority at all */
57 while (1) { 57 while (1) {
58 if (hlt_counter) { 58 if (hlt_counter) {
59 while (1) 59 while (1)
60 if (need_resched()) 60 if (need_resched())
61 break; 61 break;
62 } else { 62 } else {
63 while (!need_resched()) 63 while (!need_resched())
64 cpu_sleep(); 64 cpu_sleep();
65 } 65 }
66 66
67 schedule(); 67 schedule();
68 } 68 }
69 } 69 }
70 70
71 void cpu_idle(void) 71 void cpu_idle(void)
72 { 72 {
73 default_idle(); 73 default_idle();
74 } 74 }
75 75
76 void machine_restart(char * __unused) 76 void machine_restart(char * __unused)
77 { 77 {
78 /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */ 78 /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */
79 asm volatile("ldc %0, sr\n\t" 79 asm volatile("ldc %0, sr\n\t"
80 "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001)); 80 "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001));
81 } 81 }
82 82
83 EXPORT_SYMBOL(machine_restart);
84
85 void machine_halt(void) 83 void machine_halt(void)
86 { 84 {
87 #if defined(CONFIG_SH_HS7751RVOIP) 85 #if defined(CONFIG_SH_HS7751RVOIP)
88 unsigned short value; 86 unsigned short value;
89 87
90 value = ctrl_inw(PA_OUTPORTR); 88 value = ctrl_inw(PA_OUTPORTR);
91 ctrl_outw((value & 0xffdf), PA_OUTPORTR); 89 ctrl_outw((value & 0xffdf), PA_OUTPORTR);
92 #elif defined(CONFIG_SH_RTS7751R2D) 90 #elif defined(CONFIG_SH_RTS7751R2D)
93 ctrl_outw(0x0001, PA_POWOFF); 91 ctrl_outw(0x0001, PA_POWOFF);
94 #endif 92 #endif
95 while (1) 93 while (1)
96 cpu_sleep(); 94 cpu_sleep();
97 } 95 }
98 96
99 EXPORT_SYMBOL(machine_halt);
100
101 void machine_power_off(void) 97 void machine_power_off(void)
102 { 98 {
103 #if defined(CONFIG_SH_HS7751RVOIP) 99 #if defined(CONFIG_SH_HS7751RVOIP)
104 unsigned short value; 100 unsigned short value;
105 101
106 value = ctrl_inw(PA_OUTPORTR); 102 value = ctrl_inw(PA_OUTPORTR);
107 ctrl_outw((value & 0xffdf), PA_OUTPORTR); 103 ctrl_outw((value & 0xffdf), PA_OUTPORTR);
108 #elif defined(CONFIG_SH_RTS7751R2D) 104 #elif defined(CONFIG_SH_RTS7751R2D)
109 ctrl_outw(0x0001, PA_POWOFF); 105 ctrl_outw(0x0001, PA_POWOFF);
110 #endif 106 #endif
111 } 107 }
112
113 EXPORT_SYMBOL(machine_power_off);
114 108
115 void show_regs(struct pt_regs * regs) 109 void show_regs(struct pt_regs * regs)
116 { 110 {
117 printk("\n"); 111 printk("\n");
118 printk("Pid : %d, Comm: %20s\n", current->pid, current->comm); 112 printk("Pid : %d, Comm: %20s\n", current->pid, current->comm);
119 print_symbol("PC is at %s\n", regs->pc); 113 print_symbol("PC is at %s\n", regs->pc);
120 printk("PC : %08lx SP : %08lx SR : %08lx ", 114 printk("PC : %08lx SP : %08lx SR : %08lx ",
121 regs->pc, regs->regs[15], regs->sr); 115 regs->pc, regs->regs[15], regs->sr);
122 #ifdef CONFIG_MMU 116 #ifdef CONFIG_MMU
123 printk("TEA : %08x ", ctrl_inl(MMU_TEA)); 117 printk("TEA : %08x ", ctrl_inl(MMU_TEA));
124 #else 118 #else
125 printk(" "); 119 printk(" ");
126 #endif 120 #endif
127 printk("%s\n", print_tainted()); 121 printk("%s\n", print_tainted());
128 122
129 printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n", 123 printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
130 regs->regs[0],regs->regs[1], 124 regs->regs[0],regs->regs[1],
131 regs->regs[2],regs->regs[3]); 125 regs->regs[2],regs->regs[3]);
132 printk("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n", 126 printk("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
133 regs->regs[4],regs->regs[5], 127 regs->regs[4],regs->regs[5],
134 regs->regs[6],regs->regs[7]); 128 regs->regs[6],regs->regs[7]);
135 printk("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n", 129 printk("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n",
136 regs->regs[8],regs->regs[9], 130 regs->regs[8],regs->regs[9],
137 regs->regs[10],regs->regs[11]); 131 regs->regs[10],regs->regs[11]);
138 printk("R12 : %08lx R13 : %08lx R14 : %08lx\n", 132 printk("R12 : %08lx R13 : %08lx R14 : %08lx\n",
139 regs->regs[12],regs->regs[13], 133 regs->regs[12],regs->regs[13],
140 regs->regs[14]); 134 regs->regs[14]);
141 printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n", 135 printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n",
142 regs->mach, regs->macl, regs->gbr, regs->pr); 136 regs->mach, regs->macl, regs->gbr, regs->pr);
143 137
144 /* 138 /*
145 * If we're in kernel mode, dump the stack too.. 139 * If we're in kernel mode, dump the stack too..
146 */ 140 */
147 if (!user_mode(regs)) { 141 if (!user_mode(regs)) {
148 extern void show_task(unsigned long *sp); 142 extern void show_task(unsigned long *sp);
149 unsigned long sp = regs->regs[15]; 143 unsigned long sp = regs->regs[15];
150 144
151 show_task((unsigned long *)sp); 145 show_task((unsigned long *)sp);
152 } 146 }
153 } 147 }
154 148
155 /* 149 /*
156 * Create a kernel thread 150 * Create a kernel thread
157 */ 151 */
158 152
159 /* 153 /*
160 * This is the mechanism for creating a new kernel thread. 154 * This is the mechanism for creating a new kernel thread.
161 * 155 *
162 */ 156 */
163 extern void kernel_thread_helper(void); 157 extern void kernel_thread_helper(void);
164 __asm__(".align 5\n" 158 __asm__(".align 5\n"
165 "kernel_thread_helper:\n\t" 159 "kernel_thread_helper:\n\t"
166 "jsr @r5\n\t" 160 "jsr @r5\n\t"
167 " nop\n\t" 161 " nop\n\t"
168 "mov.l 1f, r1\n\t" 162 "mov.l 1f, r1\n\t"
169 "jsr @r1\n\t" 163 "jsr @r1\n\t"
170 " mov r0, r4\n\t" 164 " mov r0, r4\n\t"
171 ".align 2\n\t" 165 ".align 2\n\t"
172 "1:.long do_exit"); 166 "1:.long do_exit");
173 167
174 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 168 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
175 { /* Don't use this in BL=1(cli). Or else, CPU resets! */ 169 { /* Don't use this in BL=1(cli). Or else, CPU resets! */
176 struct pt_regs regs; 170 struct pt_regs regs;
177 171
178 memset(&regs, 0, sizeof(regs)); 172 memset(&regs, 0, sizeof(regs));
179 regs.regs[4] = (unsigned long) arg; 173 regs.regs[4] = (unsigned long) arg;
180 regs.regs[5] = (unsigned long) fn; 174 regs.regs[5] = (unsigned long) fn;
181 175
182 regs.pc = (unsigned long) kernel_thread_helper; 176 regs.pc = (unsigned long) kernel_thread_helper;
183 regs.sr = (1 << 30); 177 regs.sr = (1 << 30);
184 178
185 /* Ok, create the new process.. */ 179 /* Ok, create the new process.. */
186 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); 180 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
187 } 181 }
188 182
189 /* 183 /*
190 * Free current thread data structures etc.. 184 * Free current thread data structures etc..
191 */ 185 */
192 void exit_thread(void) 186 void exit_thread(void)
193 { 187 {
194 if (current->thread.ubc_pc) { 188 if (current->thread.ubc_pc) {
195 current->thread.ubc_pc = 0; 189 current->thread.ubc_pc = 0;
196 ubc_usercnt -= 1; 190 ubc_usercnt -= 1;
197 } 191 }
198 } 192 }
199 193
200 void flush_thread(void) 194 void flush_thread(void)
201 { 195 {
202 #if defined(CONFIG_SH_FPU) 196 #if defined(CONFIG_SH_FPU)
203 struct task_struct *tsk = current; 197 struct task_struct *tsk = current;
204 struct pt_regs *regs = (struct pt_regs *) 198 struct pt_regs *regs = (struct pt_regs *)
205 ((unsigned long)tsk->thread_info 199 ((unsigned long)tsk->thread_info
206 + THREAD_SIZE - sizeof(struct pt_regs) 200 + THREAD_SIZE - sizeof(struct pt_regs)
207 - sizeof(unsigned long)); 201 - sizeof(unsigned long));
208 202
209 /* Forget lazy FPU state */ 203 /* Forget lazy FPU state */
210 clear_fpu(tsk, regs); 204 clear_fpu(tsk, regs);
211 clear_used_math(); 205 clear_used_math();
212 #endif 206 #endif
213 } 207 }
214 208
215 void release_thread(struct task_struct *dead_task) 209 void release_thread(struct task_struct *dead_task)
216 { 210 {
217 /* do nothing */ 211 /* do nothing */
218 } 212 }
219 213
220 /* Fill in the fpu structure for a core dump.. */ 214 /* Fill in the fpu structure for a core dump.. */
221 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) 215 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
222 { 216 {
223 int fpvalid = 0; 217 int fpvalid = 0;
224 218
225 #if defined(CONFIG_SH_FPU) 219 #if defined(CONFIG_SH_FPU)
226 struct task_struct *tsk = current; 220 struct task_struct *tsk = current;
227 221
228 fpvalid = !!tsk_used_math(tsk); 222 fpvalid = !!tsk_used_math(tsk);
229 if (fpvalid) { 223 if (fpvalid) {
230 unlazy_fpu(tsk, regs); 224 unlazy_fpu(tsk, regs);
231 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu)); 225 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
232 } 226 }
233 #endif 227 #endif
234 228
235 return fpvalid; 229 return fpvalid;
236 } 230 }
237 231
238 /* 232 /*
239 * Capture the user space registers if the task is not running (in user space) 233 * Capture the user space registers if the task is not running (in user space)
240 */ 234 */
241 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) 235 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
242 { 236 {
243 struct pt_regs ptregs; 237 struct pt_regs ptregs;
244 238
245 ptregs = *(struct pt_regs *) 239 ptregs = *(struct pt_regs *)
246 ((unsigned long)tsk->thread_info + THREAD_SIZE 240 ((unsigned long)tsk->thread_info + THREAD_SIZE
247 - sizeof(struct pt_regs) 241 - sizeof(struct pt_regs)
248 #ifdef CONFIG_SH_DSP 242 #ifdef CONFIG_SH_DSP
249 - sizeof(struct pt_dspregs) 243 - sizeof(struct pt_dspregs)
250 #endif 244 #endif
251 - sizeof(unsigned long)); 245 - sizeof(unsigned long));
252 elf_core_copy_regs(regs, &ptregs); 246 elf_core_copy_regs(regs, &ptregs);
253 247
254 return 1; 248 return 1;
255 } 249 }
256 250
257 int 251 int
258 dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *fpu) 252 dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *fpu)
259 { 253 {
260 int fpvalid = 0; 254 int fpvalid = 0;
261 255
262 #if defined(CONFIG_SH_FPU) 256 #if defined(CONFIG_SH_FPU)
263 fpvalid = !!tsk_used_math(tsk); 257 fpvalid = !!tsk_used_math(tsk);
264 if (fpvalid) { 258 if (fpvalid) {
265 struct pt_regs *regs = (struct pt_regs *) 259 struct pt_regs *regs = (struct pt_regs *)
266 ((unsigned long)tsk->thread_info 260 ((unsigned long)tsk->thread_info
267 + THREAD_SIZE - sizeof(struct pt_regs) 261 + THREAD_SIZE - sizeof(struct pt_regs)
268 - sizeof(unsigned long)); 262 - sizeof(unsigned long));
269 unlazy_fpu(tsk, regs); 263 unlazy_fpu(tsk, regs);
270 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu)); 264 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
271 } 265 }
272 #endif 266 #endif
273 267
274 return fpvalid; 268 return fpvalid;
275 } 269 }
276 270
277 asmlinkage void ret_from_fork(void); 271 asmlinkage void ret_from_fork(void);
278 272
279 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, 273 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
280 unsigned long unused, 274 unsigned long unused,
281 struct task_struct *p, struct pt_regs *regs) 275 struct task_struct *p, struct pt_regs *regs)
282 { 276 {
283 struct pt_regs *childregs; 277 struct pt_regs *childregs;
284 #if defined(CONFIG_SH_FPU) 278 #if defined(CONFIG_SH_FPU)
285 struct task_struct *tsk = current; 279 struct task_struct *tsk = current;
286 280
287 unlazy_fpu(tsk, regs); 281 unlazy_fpu(tsk, regs);
288 p->thread.fpu = tsk->thread.fpu; 282 p->thread.fpu = tsk->thread.fpu;
289 copy_to_stopped_child_used_math(p); 283 copy_to_stopped_child_used_math(p);
290 #endif 284 #endif
291 285
292 childregs = ((struct pt_regs *) 286 childregs = ((struct pt_regs *)
293 (THREAD_SIZE + (unsigned long) p->thread_info) 287 (THREAD_SIZE + (unsigned long) p->thread_info)
294 #ifdef CONFIG_SH_DSP 288 #ifdef CONFIG_SH_DSP
295 - sizeof(struct pt_dspregs) 289 - sizeof(struct pt_dspregs)
296 #endif 290 #endif
297 - sizeof(unsigned long)) - 1; 291 - sizeof(unsigned long)) - 1;
298 *childregs = *regs; 292 *childregs = *regs;
299 293
300 if (user_mode(regs)) { 294 if (user_mode(regs)) {
301 childregs->regs[15] = usp; 295 childregs->regs[15] = usp;
302 } else { 296 } else {
303 childregs->regs[15] = (unsigned long)p->thread_info + THREAD_SIZE; 297 childregs->regs[15] = (unsigned long)p->thread_info + THREAD_SIZE;
304 } 298 }
305 if (clone_flags & CLONE_SETTLS) { 299 if (clone_flags & CLONE_SETTLS) {
306 childregs->gbr = childregs->regs[0]; 300 childregs->gbr = childregs->regs[0];
307 } 301 }
308 childregs->regs[0] = 0; /* Set return value for child */ 302 childregs->regs[0] = 0; /* Set return value for child */
309 303
310 p->thread.sp = (unsigned long) childregs; 304 p->thread.sp = (unsigned long) childregs;
311 p->thread.pc = (unsigned long) ret_from_fork; 305 p->thread.pc = (unsigned long) ret_from_fork;
312 306
313 p->thread.ubc_pc = 0; 307 p->thread.ubc_pc = 0;
314 308
315 return 0; 309 return 0;
316 } 310 }
317 311
318 /* 312 /*
319 * fill in the user structure for a core dump.. 313 * fill in the user structure for a core dump..
320 */ 314 */
321 void dump_thread(struct pt_regs * regs, struct user * dump) 315 void dump_thread(struct pt_regs * regs, struct user * dump)
322 { 316 {
323 dump->magic = CMAGIC; 317 dump->magic = CMAGIC;
324 dump->start_code = current->mm->start_code; 318 dump->start_code = current->mm->start_code;
325 dump->start_data = current->mm->start_data; 319 dump->start_data = current->mm->start_data;
326 dump->start_stack = regs->regs[15] & ~(PAGE_SIZE - 1); 320 dump->start_stack = regs->regs[15] & ~(PAGE_SIZE - 1);
327 dump->u_tsize = (current->mm->end_code - dump->start_code) >> PAGE_SHIFT; 321 dump->u_tsize = (current->mm->end_code - dump->start_code) >> PAGE_SHIFT;
328 dump->u_dsize = (current->mm->brk + (PAGE_SIZE-1) - dump->start_data) >> PAGE_SHIFT; 322 dump->u_dsize = (current->mm->brk + (PAGE_SIZE-1) - dump->start_data) >> PAGE_SHIFT;
329 dump->u_ssize = (current->mm->start_stack - dump->start_stack + 323 dump->u_ssize = (current->mm->start_stack - dump->start_stack +
330 PAGE_SIZE - 1) >> PAGE_SHIFT; 324 PAGE_SIZE - 1) >> PAGE_SHIFT;
331 /* Debug registers will come here. */ 325 /* Debug registers will come here. */
332 326
333 dump->regs = *regs; 327 dump->regs = *regs;
334 328
335 dump->u_fpvalid = dump_fpu(regs, &dump->fpu); 329 dump->u_fpvalid = dump_fpu(regs, &dump->fpu);
336 } 330 }
337 331
338 /* Tracing by user break controller. */ 332 /* Tracing by user break controller. */
339 static void 333 static void
340 ubc_set_tracing(int asid, unsigned long pc) 334 ubc_set_tracing(int asid, unsigned long pc)
341 { 335 {
342 ctrl_outl(pc, UBC_BARA); 336 ctrl_outl(pc, UBC_BARA);
343 337
344 /* We don't have any ASID settings for the SH-2! */ 338 /* We don't have any ASID settings for the SH-2! */
345 if (cpu_data->type != CPU_SH7604) 339 if (cpu_data->type != CPU_SH7604)
346 ctrl_outb(asid, UBC_BASRA); 340 ctrl_outb(asid, UBC_BASRA);
347 341
348 ctrl_outl(0, UBC_BAMRA); 342 ctrl_outl(0, UBC_BAMRA);
349 343
350 if (cpu_data->type == CPU_SH7729) { 344 if (cpu_data->type == CPU_SH7729) {
351 ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA); 345 ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA);
352 ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR); 346 ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR);
353 } else { 347 } else {
354 ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA); 348 ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA);
355 ctrl_outw(BRCR_PCBA, UBC_BRCR); 349 ctrl_outw(BRCR_PCBA, UBC_BRCR);
356 } 350 }
357 } 351 }
358 352
359 /* 353 /*
360 * switch_to(x,y) should switch tasks from x to y. 354 * switch_to(x,y) should switch tasks from x to y.
361 * 355 *
362 */ 356 */
363 struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next) 357 struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next)
364 { 358 {
365 #if defined(CONFIG_SH_FPU) 359 #if defined(CONFIG_SH_FPU)
366 struct pt_regs *regs = (struct pt_regs *) 360 struct pt_regs *regs = (struct pt_regs *)
367 ((unsigned long)prev->thread_info 361 ((unsigned long)prev->thread_info
368 + THREAD_SIZE - sizeof(struct pt_regs) 362 + THREAD_SIZE - sizeof(struct pt_regs)
369 - sizeof(unsigned long)); 363 - sizeof(unsigned long));
370 unlazy_fpu(prev, regs); 364 unlazy_fpu(prev, regs);
371 #endif 365 #endif
372 366
373 #ifdef CONFIG_PREEMPT 367 #ifdef CONFIG_PREEMPT
374 { 368 {
375 unsigned long flags; 369 unsigned long flags;
376 struct pt_regs *regs; 370 struct pt_regs *regs;
377 371
378 local_irq_save(flags); 372 local_irq_save(flags);
379 regs = (struct pt_regs *) 373 regs = (struct pt_regs *)
380 ((unsigned long)prev->thread_info 374 ((unsigned long)prev->thread_info
381 + THREAD_SIZE - sizeof(struct pt_regs) 375 + THREAD_SIZE - sizeof(struct pt_regs)
382 #ifdef CONFIG_SH_DSP 376 #ifdef CONFIG_SH_DSP
383 - sizeof(struct pt_dspregs) 377 - sizeof(struct pt_dspregs)
384 #endif 378 #endif
385 - sizeof(unsigned long)); 379 - sizeof(unsigned long));
386 if (user_mode(regs) && regs->regs[15] >= 0xc0000000) { 380 if (user_mode(regs) && regs->regs[15] >= 0xc0000000) {
387 int offset = (int)regs->regs[15]; 381 int offset = (int)regs->regs[15];
388 382
389 /* Reset stack pointer: clear critical region mark */ 383 /* Reset stack pointer: clear critical region mark */
390 regs->regs[15] = regs->regs[1]; 384 regs->regs[15] = regs->regs[1];
391 if (regs->pc < regs->regs[0]) 385 if (regs->pc < regs->regs[0])
392 /* Go to rewind point */ 386 /* Go to rewind point */
393 regs->pc = regs->regs[0] + offset; 387 regs->pc = regs->regs[0] + offset;
394 } 388 }
395 local_irq_restore(flags); 389 local_irq_restore(flags);
396 } 390 }
397 #endif 391 #endif
398 392
399 /* 393 /*
400 * Restore the kernel mode register 394 * Restore the kernel mode register
401 * k7 (r7_bank1) 395 * k7 (r7_bank1)
402 */ 396 */
403 asm volatile("ldc %0, r7_bank" 397 asm volatile("ldc %0, r7_bank"
404 : /* no output */ 398 : /* no output */
405 : "r" (next->thread_info)); 399 : "r" (next->thread_info));
406 400
407 #ifdef CONFIG_MMU 401 #ifdef CONFIG_MMU
408 /* If no tasks are using the UBC, we're done */ 402 /* If no tasks are using the UBC, we're done */
409 if (ubc_usercnt == 0) 403 if (ubc_usercnt == 0)
410 /* If no tasks are using the UBC, we're done */; 404 /* If no tasks are using the UBC, we're done */;
411 else if (next->thread.ubc_pc && next->mm) { 405 else if (next->thread.ubc_pc && next->mm) {
412 ubc_set_tracing(next->mm->context & MMU_CONTEXT_ASID_MASK, 406 ubc_set_tracing(next->mm->context & MMU_CONTEXT_ASID_MASK,
413 next->thread.ubc_pc); 407 next->thread.ubc_pc);
414 } else { 408 } else {
415 ctrl_outw(0, UBC_BBRA); 409 ctrl_outw(0, UBC_BBRA);
416 ctrl_outw(0, UBC_BBRB); 410 ctrl_outw(0, UBC_BBRB);
417 } 411 }
418 #endif 412 #endif
419 413
420 return prev; 414 return prev;
421 } 415 }
422 416
423 asmlinkage int sys_fork(unsigned long r4, unsigned long r5, 417 asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
424 unsigned long r6, unsigned long r7, 418 unsigned long r6, unsigned long r7,
425 struct pt_regs regs) 419 struct pt_regs regs)
426 { 420 {
427 #ifdef CONFIG_MMU 421 #ifdef CONFIG_MMU
428 return do_fork(SIGCHLD, regs.regs[15], &regs, 0, NULL, NULL); 422 return do_fork(SIGCHLD, regs.regs[15], &regs, 0, NULL, NULL);
429 #else 423 #else
430 /* fork almost works, enough to trick you into looking elsewhere :-( */ 424 /* fork almost works, enough to trick you into looking elsewhere :-( */
431 return -EINVAL; 425 return -EINVAL;
432 #endif 426 #endif
433 } 427 }
434 428
435 asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, 429 asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
436 unsigned long parent_tidptr, 430 unsigned long parent_tidptr,
437 unsigned long child_tidptr, 431 unsigned long child_tidptr,
438 struct pt_regs regs) 432 struct pt_regs regs)
439 { 433 {
440 if (!newsp) 434 if (!newsp)
441 newsp = regs.regs[15]; 435 newsp = regs.regs[15];
442 return do_fork(clone_flags, newsp, &regs, 0, 436 return do_fork(clone_flags, newsp, &regs, 0,
443 (int __user *)parent_tidptr, (int __user *)child_tidptr); 437 (int __user *)parent_tidptr, (int __user *)child_tidptr);
444 } 438 }
445 439
446 /* 440 /*
447 * This is trivial, and on the face of it looks like it 441 * This is trivial, and on the face of it looks like it
448 * could equally well be done in user mode. 442 * could equally well be done in user mode.
449 * 443 *
450 * Not so, for quite unobvious reasons - register pressure. 444 * Not so, for quite unobvious reasons - register pressure.
451 * In user mode vfork() cannot have a stack frame, and if 445 * In user mode vfork() cannot have a stack frame, and if
452 * done by calling the "clone()" system call directly, you 446 * done by calling the "clone()" system call directly, you
453 * do not have enough call-clobbered registers to hold all 447 * do not have enough call-clobbered registers to hold all
454 * the information you need. 448 * the information you need.
455 */ 449 */
456 asmlinkage int sys_vfork(unsigned long r4, unsigned long r5, 450 asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
457 unsigned long r6, unsigned long r7, 451 unsigned long r6, unsigned long r7,
458 struct pt_regs regs) 452 struct pt_regs regs)
459 { 453 {
460 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.regs[15], &regs, 454 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.regs[15], &regs,
461 0, NULL, NULL); 455 0, NULL, NULL);
462 } 456 }
463 457
464 /* 458 /*
465 * sys_execve() executes a new program. 459 * sys_execve() executes a new program.
466 */ 460 */
467 asmlinkage int sys_execve(char *ufilename, char **uargv, 461 asmlinkage int sys_execve(char *ufilename, char **uargv,
468 char **uenvp, unsigned long r7, 462 char **uenvp, unsigned long r7,
469 struct pt_regs regs) 463 struct pt_regs regs)
470 { 464 {
471 int error; 465 int error;
472 char *filename; 466 char *filename;
473 467
474 filename = getname((char __user *)ufilename); 468 filename = getname((char __user *)ufilename);
475 error = PTR_ERR(filename); 469 error = PTR_ERR(filename);
476 if (IS_ERR(filename)) 470 if (IS_ERR(filename))
477 goto out; 471 goto out;
478 472
479 error = do_execve(filename, 473 error = do_execve(filename,
480 (char __user * __user *)uargv, 474 (char __user * __user *)uargv,
481 (char __user * __user *)uenvp, 475 (char __user * __user *)uenvp,
482 &regs); 476 &regs);
483 if (error == 0) { 477 if (error == 0) {
484 task_lock(current); 478 task_lock(current);
485 current->ptrace &= ~PT_DTRACE; 479 current->ptrace &= ~PT_DTRACE;
486 task_unlock(current); 480 task_unlock(current);
487 } 481 }
488 putname(filename); 482 putname(filename);
489 out: 483 out:
490 return error; 484 return error;
491 } 485 }
492 486
493 unsigned long get_wchan(struct task_struct *p) 487 unsigned long get_wchan(struct task_struct *p)
494 { 488 {
495 unsigned long schedule_frame; 489 unsigned long schedule_frame;
496 unsigned long pc; 490 unsigned long pc;
497 491
498 if (!p || p == current || p->state == TASK_RUNNING) 492 if (!p || p == current || p->state == TASK_RUNNING)
499 return 0; 493 return 0;
500 494
501 /* 495 /*
502 * The same comment as on the Alpha applies here, too ... 496 * The same comment as on the Alpha applies here, too ...
503 */ 497 */
504 pc = thread_saved_pc(p); 498 pc = thread_saved_pc(p);
505 if (in_sched_functions(pc)) { 499 if (in_sched_functions(pc)) {
506 schedule_frame = ((unsigned long *)(long)p->thread.sp)[1]; 500 schedule_frame = ((unsigned long *)(long)p->thread.sp)[1];
507 return (unsigned long)((unsigned long *)schedule_frame)[1]; 501 return (unsigned long)((unsigned long *)schedule_frame)[1];
508 } 502 }
509 return pc; 503 return pc;
510 } 504 }
511 505
512 asmlinkage void break_point_trap(unsigned long r4, unsigned long r5, 506 asmlinkage void break_point_trap(unsigned long r4, unsigned long r5,
513 unsigned long r6, unsigned long r7, 507 unsigned long r6, unsigned long r7,
514 struct pt_regs regs) 508 struct pt_regs regs)
515 { 509 {
516 /* Clear tracing. */ 510 /* Clear tracing. */
517 ctrl_outw(0, UBC_BBRA); 511 ctrl_outw(0, UBC_BBRA);
518 ctrl_outw(0, UBC_BBRB); 512 ctrl_outw(0, UBC_BBRB);
519 current->thread.ubc_pc = 0; 513 current->thread.ubc_pc = 0;
520 ubc_usercnt -= 1; 514 ubc_usercnt -= 1;
521 515
522 force_sig(SIGTRAP, current); 516 force_sig(SIGTRAP, current);
523 } 517 }
524 518
525 asmlinkage void break_point_trap_software(unsigned long r4, unsigned long r5, 519 asmlinkage void break_point_trap_software(unsigned long r4, unsigned long r5,
526 unsigned long r6, unsigned long r7, 520 unsigned long r6, unsigned long r7,
527 struct pt_regs regs) 521 struct pt_regs regs)
528 { 522 {
529 regs.pc -= 2; 523 regs.pc -= 2;
530 force_sig(SIGTRAP, current); 524 force_sig(SIGTRAP, current);
531 } 525 }
532 526
arch/sparc/kernel/process.c
1 /* $Id: process.c,v 1.161 2002/01/23 11:27:32 davem Exp $ 1 /* $Id: process.c,v 1.161 2002/01/23 11:27:32 davem Exp $
2 * linux/arch/sparc/kernel/process.c 2 * linux/arch/sparc/kernel/process.c
3 * 3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) 5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 */ 6 */
7 7
8 /* 8 /*
9 * This file handles the architecture-dependent parts of process handling.. 9 * This file handles the architecture-dependent parts of process handling..
10 */ 10 */
11 11
12 #include <stdarg.h> 12 #include <stdarg.h>
13 13
14 #include <linux/errno.h> 14 #include <linux/errno.h>
15 #include <linux/module.h> 15 #include <linux/module.h>
16 #include <linux/sched.h> 16 #include <linux/sched.h>
17 #include <linux/kernel.h> 17 #include <linux/kernel.h>
18 #include <linux/kallsyms.h> 18 #include <linux/kallsyms.h>
19 #include <linux/mm.h> 19 #include <linux/mm.h>
20 #include <linux/stddef.h> 20 #include <linux/stddef.h>
21 #include <linux/ptrace.h> 21 #include <linux/ptrace.h>
22 #include <linux/slab.h> 22 #include <linux/slab.h>
23 #include <linux/user.h> 23 #include <linux/user.h>
24 #include <linux/a.out.h> 24 #include <linux/a.out.h>
25 #include <linux/config.h> 25 #include <linux/config.h>
26 #include <linux/smp.h> 26 #include <linux/smp.h>
27 #include <linux/smp_lock.h> 27 #include <linux/smp_lock.h>
28 #include <linux/reboot.h> 28 #include <linux/reboot.h>
29 #include <linux/delay.h> 29 #include <linux/delay.h>
30 #include <linux/pm.h> 30 #include <linux/pm.h>
31 #include <linux/init.h> 31 #include <linux/init.h>
32 32
33 #include <asm/auxio.h> 33 #include <asm/auxio.h>
34 #include <asm/oplib.h> 34 #include <asm/oplib.h>
35 #include <asm/uaccess.h> 35 #include <asm/uaccess.h>
36 #include <asm/system.h> 36 #include <asm/system.h>
37 #include <asm/page.h> 37 #include <asm/page.h>
38 #include <asm/pgalloc.h> 38 #include <asm/pgalloc.h>
39 #include <asm/pgtable.h> 39 #include <asm/pgtable.h>
40 #include <asm/delay.h> 40 #include <asm/delay.h>
41 #include <asm/processor.h> 41 #include <asm/processor.h>
42 #include <asm/psr.h> 42 #include <asm/psr.h>
43 #include <asm/elf.h> 43 #include <asm/elf.h>
44 #include <asm/unistd.h> 44 #include <asm/unistd.h>
45 45
46 /* 46 /*
47 * Power management idle function 47 * Power management idle function
48 * Set in pm platform drivers (apc.c and pmc.c) 48 * Set in pm platform drivers (apc.c and pmc.c)
49 */ 49 */
50 void (*pm_idle)(void); 50 void (*pm_idle)(void);
51 51
52 /* 52 /*
53 * Power-off handler instantiation for pm.h compliance 53 * Power-off handler instantiation for pm.h compliance
54 * This is done via auxio, but could be used as a fallback 54 * This is done via auxio, but could be used as a fallback
55 * handler when auxio is not present-- unused for now... 55 * handler when auxio is not present-- unused for now...
56 */ 56 */
57 void (*pm_power_off)(void); 57 void (*pm_power_off)(void);
58 58
59 /* 59 /*
60 * sysctl - toggle power-off restriction for serial console 60 * sysctl - toggle power-off restriction for serial console
61 * systems in machine_power_off() 61 * systems in machine_power_off()
62 */ 62 */
63 int scons_pwroff = 1; 63 int scons_pwroff = 1;
64 64
65 extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *); 65 extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *);
66 66
67 struct task_struct *last_task_used_math = NULL; 67 struct task_struct *last_task_used_math = NULL;
68 struct thread_info *current_set[NR_CPUS]; 68 struct thread_info *current_set[NR_CPUS];
69 69
70 /* 70 /*
71 * default_idle is new in 2.5. XXX Review, currently stolen from sparc64. 71 * default_idle is new in 2.5. XXX Review, currently stolen from sparc64.
72 */ 72 */
73 void default_idle(void) 73 void default_idle(void)
74 { 74 {
75 } 75 }
76 76
77 #ifndef CONFIG_SMP 77 #ifndef CONFIG_SMP
78 78
79 #define SUN4C_FAULT_HIGH 100 79 #define SUN4C_FAULT_HIGH 100
80 80
81 /* 81 /*
82 * the idle loop on a Sparc... ;) 82 * the idle loop on a Sparc... ;)
83 */ 83 */
84 void cpu_idle(void) 84 void cpu_idle(void)
85 { 85 {
86 /* endless idle loop with no priority at all */ 86 /* endless idle loop with no priority at all */
87 for (;;) { 87 for (;;) {
88 if (ARCH_SUN4C_SUN4) { 88 if (ARCH_SUN4C_SUN4) {
89 static int count = HZ; 89 static int count = HZ;
90 static unsigned long last_jiffies; 90 static unsigned long last_jiffies;
91 static unsigned long last_faults; 91 static unsigned long last_faults;
92 static unsigned long fps; 92 static unsigned long fps;
93 unsigned long now; 93 unsigned long now;
94 unsigned long faults; 94 unsigned long faults;
95 unsigned long flags; 95 unsigned long flags;
96 96
97 extern unsigned long sun4c_kernel_faults; 97 extern unsigned long sun4c_kernel_faults;
98 extern void sun4c_grow_kernel_ring(void); 98 extern void sun4c_grow_kernel_ring(void);
99 99
100 local_irq_save(flags); 100 local_irq_save(flags);
101 now = jiffies; 101 now = jiffies;
102 count -= (now - last_jiffies); 102 count -= (now - last_jiffies);
103 last_jiffies = now; 103 last_jiffies = now;
104 if (count < 0) { 104 if (count < 0) {
105 count += HZ; 105 count += HZ;
106 faults = sun4c_kernel_faults; 106 faults = sun4c_kernel_faults;
107 fps = (fps + (faults - last_faults)) >> 1; 107 fps = (fps + (faults - last_faults)) >> 1;
108 last_faults = faults; 108 last_faults = faults;
109 #if 0 109 #if 0
110 printk("kernel faults / second = %ld\n", fps); 110 printk("kernel faults / second = %ld\n", fps);
111 #endif 111 #endif
112 if (fps >= SUN4C_FAULT_HIGH) { 112 if (fps >= SUN4C_FAULT_HIGH) {
113 sun4c_grow_kernel_ring(); 113 sun4c_grow_kernel_ring();
114 } 114 }
115 } 115 }
116 local_irq_restore(flags); 116 local_irq_restore(flags);
117 } 117 }
118 118
119 while((!need_resched()) && pm_idle) { 119 while((!need_resched()) && pm_idle) {
120 (*pm_idle)(); 120 (*pm_idle)();
121 } 121 }
122 122
123 schedule(); 123 schedule();
124 check_pgt_cache(); 124 check_pgt_cache();
125 } 125 }
126 } 126 }
127 127
128 #else 128 #else
129 129
130 /* This is being executed in task 0 'user space'. */ 130 /* This is being executed in task 0 'user space'. */
131 void cpu_idle(void) 131 void cpu_idle(void)
132 { 132 {
133 /* endless idle loop with no priority at all */ 133 /* endless idle loop with no priority at all */
134 while(1) { 134 while(1) {
135 if(need_resched()) { 135 if(need_resched()) {
136 schedule(); 136 schedule();
137 check_pgt_cache(); 137 check_pgt_cache();
138 } 138 }
139 barrier(); /* or else gcc optimizes... */ 139 barrier(); /* or else gcc optimizes... */
140 } 140 }
141 } 141 }
142 142
143 #endif 143 #endif
144 144
145 extern char reboot_command []; 145 extern char reboot_command [];
146 146
147 extern void (*prom_palette)(int); 147 extern void (*prom_palette)(int);
148 148
149 /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */ 149 /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
150 void machine_halt(void) 150 void machine_halt(void)
151 { 151 {
152 local_irq_enable(); 152 local_irq_enable();
153 mdelay(8); 153 mdelay(8);
154 local_irq_disable(); 154 local_irq_disable();
155 if (!serial_console && prom_palette) 155 if (!serial_console && prom_palette)
156 prom_palette (1); 156 prom_palette (1);
157 prom_halt(); 157 prom_halt();
158 panic("Halt failed!"); 158 panic("Halt failed!");
159 } 159 }
160 160
161 EXPORT_SYMBOL(machine_halt);
162
163 void machine_restart(char * cmd) 161 void machine_restart(char * cmd)
164 { 162 {
165 char *p; 163 char *p;
166 164
167 local_irq_enable(); 165 local_irq_enable();
168 mdelay(8); 166 mdelay(8);
169 local_irq_disable(); 167 local_irq_disable();
170 168
171 p = strchr (reboot_command, '\n'); 169 p = strchr (reboot_command, '\n');
172 if (p) *p = 0; 170 if (p) *p = 0;
173 if (!serial_console && prom_palette) 171 if (!serial_console && prom_palette)
174 prom_palette (1); 172 prom_palette (1);
175 if (cmd) 173 if (cmd)
176 prom_reboot(cmd); 174 prom_reboot(cmd);
177 if (*reboot_command) 175 if (*reboot_command)
178 prom_reboot(reboot_command); 176 prom_reboot(reboot_command);
179 prom_feval ("reset"); 177 prom_feval ("reset");
180 panic("Reboot failed!"); 178 panic("Reboot failed!");
181 } 179 }
182 180
183 EXPORT_SYMBOL(machine_restart);
184
185 void machine_power_off(void) 181 void machine_power_off(void)
186 { 182 {
187 #ifdef CONFIG_SUN_AUXIO 183 #ifdef CONFIG_SUN_AUXIO
188 if (auxio_power_register && (!serial_console || scons_pwroff)) 184 if (auxio_power_register && (!serial_console || scons_pwroff))
189 *auxio_power_register |= AUXIO_POWER_OFF; 185 *auxio_power_register |= AUXIO_POWER_OFF;
190 #endif 186 #endif
191 machine_halt(); 187 machine_halt();
192 } 188 }
193
194 EXPORT_SYMBOL(machine_power_off);
195 189
196 static DEFINE_SPINLOCK(sparc_backtrace_lock); 190 static DEFINE_SPINLOCK(sparc_backtrace_lock);
197 191
198 void __show_backtrace(unsigned long fp) 192 void __show_backtrace(unsigned long fp)
199 { 193 {
200 struct reg_window *rw; 194 struct reg_window *rw;
201 unsigned long flags; 195 unsigned long flags;
202 int cpu = smp_processor_id(); 196 int cpu = smp_processor_id();
203 197
204 spin_lock_irqsave(&sparc_backtrace_lock, flags); 198 spin_lock_irqsave(&sparc_backtrace_lock, flags);
205 199
206 rw = (struct reg_window *)fp; 200 rw = (struct reg_window *)fp;
207 while(rw && (((unsigned long) rw) >= PAGE_OFFSET) && 201 while(rw && (((unsigned long) rw) >= PAGE_OFFSET) &&
208 !(((unsigned long) rw) & 0x7)) { 202 !(((unsigned long) rw) & 0x7)) {
209 printk("CPU[%d]: ARGS[%08lx,%08lx,%08lx,%08lx,%08lx,%08lx] " 203 printk("CPU[%d]: ARGS[%08lx,%08lx,%08lx,%08lx,%08lx,%08lx] "
210 "FP[%08lx] CALLER[%08lx]: ", cpu, 204 "FP[%08lx] CALLER[%08lx]: ", cpu,
211 rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3], 205 rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3],
212 rw->ins[4], rw->ins[5], 206 rw->ins[4], rw->ins[5],
213 rw->ins[6], 207 rw->ins[6],
214 rw->ins[7]); 208 rw->ins[7]);
215 print_symbol("%s\n", rw->ins[7]); 209 print_symbol("%s\n", rw->ins[7]);
216 rw = (struct reg_window *) rw->ins[6]; 210 rw = (struct reg_window *) rw->ins[6];
217 } 211 }
218 spin_unlock_irqrestore(&sparc_backtrace_lock, flags); 212 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
219 } 213 }
220 214
221 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t") 215 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
222 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t") 216 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
223 #define __GET_FP(fp) __asm__ __volatile__("mov %%i6, %0" : "=r" (fp)) 217 #define __GET_FP(fp) __asm__ __volatile__("mov %%i6, %0" : "=r" (fp))
224 218
225 void show_backtrace(void) 219 void show_backtrace(void)
226 { 220 {
227 unsigned long fp; 221 unsigned long fp;
228 222
229 __SAVE; __SAVE; __SAVE; __SAVE; 223 __SAVE; __SAVE; __SAVE; __SAVE;
230 __SAVE; __SAVE; __SAVE; __SAVE; 224 __SAVE; __SAVE; __SAVE; __SAVE;
231 __RESTORE; __RESTORE; __RESTORE; __RESTORE; 225 __RESTORE; __RESTORE; __RESTORE; __RESTORE;
232 __RESTORE; __RESTORE; __RESTORE; __RESTORE; 226 __RESTORE; __RESTORE; __RESTORE; __RESTORE;
233 227
234 __GET_FP(fp); 228 __GET_FP(fp);
235 229
236 __show_backtrace(fp); 230 __show_backtrace(fp);
237 } 231 }
238 232
239 #ifdef CONFIG_SMP 233 #ifdef CONFIG_SMP
240 void smp_show_backtrace_all_cpus(void) 234 void smp_show_backtrace_all_cpus(void)
241 { 235 {
242 xc0((smpfunc_t) show_backtrace); 236 xc0((smpfunc_t) show_backtrace);
243 show_backtrace(); 237 show_backtrace();
244 } 238 }
245 #endif 239 #endif
246 240
247 #if 0 241 #if 0
248 void show_stackframe(struct sparc_stackf *sf) 242 void show_stackframe(struct sparc_stackf *sf)
249 { 243 {
250 unsigned long size; 244 unsigned long size;
251 unsigned long *stk; 245 unsigned long *stk;
252 int i; 246 int i;
253 247
254 printk("l0: %08lx l1: %08lx l2: %08lx l3: %08lx " 248 printk("l0: %08lx l1: %08lx l2: %08lx l3: %08lx "
255 "l4: %08lx l5: %08lx l6: %08lx l7: %08lx\n", 249 "l4: %08lx l5: %08lx l6: %08lx l7: %08lx\n",
256 sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3], 250 sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3],
257 sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]); 251 sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
258 printk("i0: %08lx i1: %08lx i2: %08lx i3: %08lx " 252 printk("i0: %08lx i1: %08lx i2: %08lx i3: %08lx "
259 "i4: %08lx i5: %08lx fp: %08lx i7: %08lx\n", 253 "i4: %08lx i5: %08lx fp: %08lx i7: %08lx\n",
260 sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3], 254 sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3],
261 sf->ins[4], sf->ins[5], (unsigned long)sf->fp, sf->callers_pc); 255 sf->ins[4], sf->ins[5], (unsigned long)sf->fp, sf->callers_pc);
262 printk("sp: %08lx x0: %08lx x1: %08lx x2: %08lx " 256 printk("sp: %08lx x0: %08lx x1: %08lx x2: %08lx "
263 "x3: %08lx x4: %08lx x5: %08lx xx: %08lx\n", 257 "x3: %08lx x4: %08lx x5: %08lx xx: %08lx\n",
264 (unsigned long)sf->structptr, sf->xargs[0], sf->xargs[1], 258 (unsigned long)sf->structptr, sf->xargs[0], sf->xargs[1],
265 sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5], 259 sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
266 sf->xxargs[0]); 260 sf->xxargs[0]);
267 size = ((unsigned long)sf->fp) - ((unsigned long)sf); 261 size = ((unsigned long)sf->fp) - ((unsigned long)sf);
268 size -= STACKFRAME_SZ; 262 size -= STACKFRAME_SZ;
269 stk = (unsigned long *)((unsigned long)sf + STACKFRAME_SZ); 263 stk = (unsigned long *)((unsigned long)sf + STACKFRAME_SZ);
270 i = 0; 264 i = 0;
271 do { 265 do {
272 printk("s%d: %08lx\n", i++, *stk++); 266 printk("s%d: %08lx\n", i++, *stk++);
273 } while ((size -= sizeof(unsigned long))); 267 } while ((size -= sizeof(unsigned long)));
274 } 268 }
275 #endif 269 #endif
276 270
277 void show_regs(struct pt_regs *r) 271 void show_regs(struct pt_regs *r)
278 { 272 {
279 struct reg_window *rw = (struct reg_window *) r->u_regs[14]; 273 struct reg_window *rw = (struct reg_window *) r->u_regs[14];
280 274
281 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n", 275 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
282 r->psr, r->pc, r->npc, r->y, print_tainted()); 276 r->psr, r->pc, r->npc, r->y, print_tainted());
283 print_symbol("PC: <%s>\n", r->pc); 277 print_symbol("PC: <%s>\n", r->pc);
284 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 278 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
285 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3], 279 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
286 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]); 280 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
287 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 281 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
288 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11], 282 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
289 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]); 283 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
290 print_symbol("RPC: <%s>\n", r->u_regs[15]); 284 print_symbol("RPC: <%s>\n", r->u_regs[15]);
291 285
292 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 286 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
293 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3], 287 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
294 rw->locals[4], rw->locals[5], rw->locals[6], rw->locals[7]); 288 rw->locals[4], rw->locals[5], rw->locals[6], rw->locals[7]);
295 printk("%%I: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", 289 printk("%%I: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
296 rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3], 290 rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3],
297 rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]); 291 rw->ins[4], rw->ins[5], rw->ins[6], rw->ins[7]);
298 } 292 }
299 293
300 /* 294 /*
301 * The show_stack is an external API which we do not use ourselves. 295 * The show_stack is an external API which we do not use ourselves.
302 * The oops is printed in die_if_kernel. 296 * The oops is printed in die_if_kernel.
303 */ 297 */
304 void show_stack(struct task_struct *tsk, unsigned long *_ksp) 298 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
305 { 299 {
306 unsigned long pc, fp; 300 unsigned long pc, fp;
307 unsigned long task_base; 301 unsigned long task_base;
308 struct reg_window *rw; 302 struct reg_window *rw;
309 int count = 0; 303 int count = 0;
310 304
311 if (tsk != NULL) 305 if (tsk != NULL)
312 task_base = (unsigned long) tsk->thread_info; 306 task_base = (unsigned long) tsk->thread_info;
313 else 307 else
314 task_base = (unsigned long) current_thread_info(); 308 task_base = (unsigned long) current_thread_info();
315 309
316 fp = (unsigned long) _ksp; 310 fp = (unsigned long) _ksp;
317 do { 311 do {
318 /* Bogus frame pointer? */ 312 /* Bogus frame pointer? */
319 if (fp < (task_base + sizeof(struct thread_info)) || 313 if (fp < (task_base + sizeof(struct thread_info)) ||
320 fp >= (task_base + (PAGE_SIZE << 1))) 314 fp >= (task_base + (PAGE_SIZE << 1)))
321 break; 315 break;
322 rw = (struct reg_window *) fp; 316 rw = (struct reg_window *) fp;
323 pc = rw->ins[7]; 317 pc = rw->ins[7];
324 printk("[%08lx : ", pc); 318 printk("[%08lx : ", pc);
325 print_symbol("%s ] ", pc); 319 print_symbol("%s ] ", pc);
326 fp = rw->ins[6]; 320 fp = rw->ins[6];
327 } while (++count < 16); 321 } while (++count < 16);
328 printk("\n"); 322 printk("\n");
329 } 323 }
330 324
331 void dump_stack(void) 325 void dump_stack(void)
332 { 326 {
333 unsigned long *ksp; 327 unsigned long *ksp;
334 328
335 __asm__ __volatile__("mov %%fp, %0" 329 __asm__ __volatile__("mov %%fp, %0"
336 : "=r" (ksp)); 330 : "=r" (ksp));
337 show_stack(current, ksp); 331 show_stack(current, ksp);
338 } 332 }
339 333
340 EXPORT_SYMBOL(dump_stack); 334 EXPORT_SYMBOL(dump_stack);
341 335
342 /* 336 /*
343 * Note: sparc64 has a pretty intricated thread_saved_pc, check it out. 337 * Note: sparc64 has a pretty intricated thread_saved_pc, check it out.
344 */ 338 */
345 unsigned long thread_saved_pc(struct task_struct *tsk) 339 unsigned long thread_saved_pc(struct task_struct *tsk)
346 { 340 {
347 return tsk->thread_info->kpc; 341 return tsk->thread_info->kpc;
348 } 342 }
349 343
350 /* 344 /*
351 * Free current thread data structures etc.. 345 * Free current thread data structures etc..
352 */ 346 */
353 void exit_thread(void) 347 void exit_thread(void)
354 { 348 {
355 #ifndef CONFIG_SMP 349 #ifndef CONFIG_SMP
356 if(last_task_used_math == current) { 350 if(last_task_used_math == current) {
357 #else 351 #else
358 if(current_thread_info()->flags & _TIF_USEDFPU) { 352 if(current_thread_info()->flags & _TIF_USEDFPU) {
359 #endif 353 #endif
360 /* Keep process from leaving FPU in a bogon state. */ 354 /* Keep process from leaving FPU in a bogon state. */
361 put_psr(get_psr() | PSR_EF); 355 put_psr(get_psr() | PSR_EF);
362 fpsave(&current->thread.float_regs[0], &current->thread.fsr, 356 fpsave(&current->thread.float_regs[0], &current->thread.fsr,
363 &current->thread.fpqueue[0], &current->thread.fpqdepth); 357 &current->thread.fpqueue[0], &current->thread.fpqdepth);
364 #ifndef CONFIG_SMP 358 #ifndef CONFIG_SMP
365 last_task_used_math = NULL; 359 last_task_used_math = NULL;
366 #else 360 #else
367 current_thread_info()->flags &= ~_TIF_USEDFPU; 361 current_thread_info()->flags &= ~_TIF_USEDFPU;
368 #endif 362 #endif
369 } 363 }
370 } 364 }
371 365
372 void flush_thread(void) 366 void flush_thread(void)
373 { 367 {
374 current_thread_info()->w_saved = 0; 368 current_thread_info()->w_saved = 0;
375 369
376 /* No new signal delivery by default */ 370 /* No new signal delivery by default */
377 current->thread.new_signal = 0; 371 current->thread.new_signal = 0;
378 #ifndef CONFIG_SMP 372 #ifndef CONFIG_SMP
379 if(last_task_used_math == current) { 373 if(last_task_used_math == current) {
380 #else 374 #else
381 if(current_thread_info()->flags & _TIF_USEDFPU) { 375 if(current_thread_info()->flags & _TIF_USEDFPU) {
382 #endif 376 #endif
383 /* Clean the fpu. */ 377 /* Clean the fpu. */
384 put_psr(get_psr() | PSR_EF); 378 put_psr(get_psr() | PSR_EF);
385 fpsave(&current->thread.float_regs[0], &current->thread.fsr, 379 fpsave(&current->thread.float_regs[0], &current->thread.fsr,
386 &current->thread.fpqueue[0], &current->thread.fpqdepth); 380 &current->thread.fpqueue[0], &current->thread.fpqdepth);
387 #ifndef CONFIG_SMP 381 #ifndef CONFIG_SMP
388 last_task_used_math = NULL; 382 last_task_used_math = NULL;
389 #else 383 #else
390 current_thread_info()->flags &= ~_TIF_USEDFPU; 384 current_thread_info()->flags &= ~_TIF_USEDFPU;
391 #endif 385 #endif
392 } 386 }
393 387
394 /* Now, this task is no longer a kernel thread. */ 388 /* Now, this task is no longer a kernel thread. */
395 current->thread.current_ds = USER_DS; 389 current->thread.current_ds = USER_DS;
396 if (current->thread.flags & SPARC_FLAG_KTHREAD) { 390 if (current->thread.flags & SPARC_FLAG_KTHREAD) {
397 current->thread.flags &= ~SPARC_FLAG_KTHREAD; 391 current->thread.flags &= ~SPARC_FLAG_KTHREAD;
398 392
399 /* We must fixup kregs as well. */ 393 /* We must fixup kregs as well. */
400 /* XXX This was not fixed for ti for a while, worked. Unused? */ 394 /* XXX This was not fixed for ti for a while, worked. Unused? */
401 current->thread.kregs = (struct pt_regs *) 395 current->thread.kregs = (struct pt_regs *)
402 ((char *)current->thread_info + (THREAD_SIZE - TRACEREG_SZ)); 396 ((char *)current->thread_info + (THREAD_SIZE - TRACEREG_SZ));
403 } 397 }
404 } 398 }
405 399
406 static __inline__ struct sparc_stackf __user * 400 static __inline__ struct sparc_stackf __user *
407 clone_stackframe(struct sparc_stackf __user *dst, 401 clone_stackframe(struct sparc_stackf __user *dst,
408 struct sparc_stackf __user *src) 402 struct sparc_stackf __user *src)
409 { 403 {
410 unsigned long size, fp; 404 unsigned long size, fp;
411 struct sparc_stackf *tmp; 405 struct sparc_stackf *tmp;
412 struct sparc_stackf __user *sp; 406 struct sparc_stackf __user *sp;
413 407
414 if (get_user(tmp, &src->fp)) 408 if (get_user(tmp, &src->fp))
415 return NULL; 409 return NULL;
416 410
417 fp = (unsigned long) tmp; 411 fp = (unsigned long) tmp;
418 size = (fp - ((unsigned long) src)); 412 size = (fp - ((unsigned long) src));
419 fp = (unsigned long) dst; 413 fp = (unsigned long) dst;
420 sp = (struct sparc_stackf __user *)(fp - size); 414 sp = (struct sparc_stackf __user *)(fp - size);
421 415
422 /* do_fork() grabs the parent semaphore, we must release it 416 /* do_fork() grabs the parent semaphore, we must release it
423 * temporarily so we can build the child clone stack frame 417 * temporarily so we can build the child clone stack frame
424 * without deadlocking. 418 * without deadlocking.
425 */ 419 */
426 if (__copy_user(sp, src, size)) 420 if (__copy_user(sp, src, size))
427 sp = NULL; 421 sp = NULL;
428 else if (put_user(fp, &sp->fp)) 422 else if (put_user(fp, &sp->fp))
429 sp = NULL; 423 sp = NULL;
430 424
431 return sp; 425 return sp;
432 } 426 }
433 427
434 asmlinkage int sparc_do_fork(unsigned long clone_flags, 428 asmlinkage int sparc_do_fork(unsigned long clone_flags,
435 unsigned long stack_start, 429 unsigned long stack_start,
436 struct pt_regs *regs, 430 struct pt_regs *regs,
437 unsigned long stack_size) 431 unsigned long stack_size)
438 { 432 {
439 unsigned long parent_tid_ptr, child_tid_ptr; 433 unsigned long parent_tid_ptr, child_tid_ptr;
440 434
441 parent_tid_ptr = regs->u_regs[UREG_I2]; 435 parent_tid_ptr = regs->u_regs[UREG_I2];
442 child_tid_ptr = regs->u_regs[UREG_I4]; 436 child_tid_ptr = regs->u_regs[UREG_I4];
443 437
444 return do_fork(clone_flags, stack_start, 438 return do_fork(clone_flags, stack_start,
445 regs, stack_size, 439 regs, stack_size,
446 (int __user *) parent_tid_ptr, 440 (int __user *) parent_tid_ptr,
447 (int __user *) child_tid_ptr); 441 (int __user *) child_tid_ptr);
448 } 442 }
449 443
450 /* Copy a Sparc thread. The fork() return value conventions 444 /* Copy a Sparc thread. The fork() return value conventions
451 * under SunOS are nothing short of bletcherous: 445 * under SunOS are nothing short of bletcherous:
452 * Parent --> %o0 == childs pid, %o1 == 0 446 * Parent --> %o0 == childs pid, %o1 == 0
453 * Child --> %o0 == parents pid, %o1 == 1 447 * Child --> %o0 == parents pid, %o1 == 1
454 * 448 *
455 * NOTE: We have a separate fork kpsr/kwim because 449 * NOTE: We have a separate fork kpsr/kwim because
456 * the parent could change these values between 450 * the parent could change these values between
457 * sys_fork invocation and when we reach here 451 * sys_fork invocation and when we reach here
458 * if the parent should sleep while trying to 452 * if the parent should sleep while trying to
459 * allocate the task_struct and kernel stack in 453 * allocate the task_struct and kernel stack in
460 * do_fork(). 454 * do_fork().
461 * XXX See comment above sys_vfork in sparc64. todo. 455 * XXX See comment above sys_vfork in sparc64. todo.
462 */ 456 */
463 extern void ret_from_fork(void); 457 extern void ret_from_fork(void);
464 458
465 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, 459 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
466 unsigned long unused, 460 unsigned long unused,
467 struct task_struct *p, struct pt_regs *regs) 461 struct task_struct *p, struct pt_regs *regs)
468 { 462 {
469 struct thread_info *ti = p->thread_info; 463 struct thread_info *ti = p->thread_info;
470 struct pt_regs *childregs; 464 struct pt_regs *childregs;
471 char *new_stack; 465 char *new_stack;
472 466
473 #ifndef CONFIG_SMP 467 #ifndef CONFIG_SMP
474 if(last_task_used_math == current) { 468 if(last_task_used_math == current) {
475 #else 469 #else
476 if(current_thread_info()->flags & _TIF_USEDFPU) { 470 if(current_thread_info()->flags & _TIF_USEDFPU) {
477 #endif 471 #endif
478 put_psr(get_psr() | PSR_EF); 472 put_psr(get_psr() | PSR_EF);
479 fpsave(&p->thread.float_regs[0], &p->thread.fsr, 473 fpsave(&p->thread.float_regs[0], &p->thread.fsr,
480 &p->thread.fpqueue[0], &p->thread.fpqdepth); 474 &p->thread.fpqueue[0], &p->thread.fpqdepth);
481 #ifdef CONFIG_SMP 475 #ifdef CONFIG_SMP
482 current_thread_info()->flags &= ~_TIF_USEDFPU; 476 current_thread_info()->flags &= ~_TIF_USEDFPU;
483 #endif 477 #endif
484 } 478 }
485 479
486 /* 480 /*
487 * p->thread_info new_stack childregs 481 * p->thread_info new_stack childregs
488 * ! ! ! {if(PSR_PS) } 482 * ! ! ! {if(PSR_PS) }
489 * V V (stk.fr.) V (pt_regs) { (stk.fr.) } 483 * V V (stk.fr.) V (pt_regs) { (stk.fr.) }
490 * +----- - - - - - ------+===========+============={+==========}+ 484 * +----- - - - - - ------+===========+============={+==========}+
491 */ 485 */
492 new_stack = (char*)ti + THREAD_SIZE; 486 new_stack = (char*)ti + THREAD_SIZE;
493 if (regs->psr & PSR_PS) 487 if (regs->psr & PSR_PS)
494 new_stack -= STACKFRAME_SZ; 488 new_stack -= STACKFRAME_SZ;
495 new_stack -= STACKFRAME_SZ + TRACEREG_SZ; 489 new_stack -= STACKFRAME_SZ + TRACEREG_SZ;
496 memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ); 490 memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ);
497 childregs = (struct pt_regs *) (new_stack + STACKFRAME_SZ); 491 childregs = (struct pt_regs *) (new_stack + STACKFRAME_SZ);
498 492
499 /* 493 /*
500 * A new process must start with interrupts closed in 2.5, 494 * A new process must start with interrupts closed in 2.5,
501 * because this is how Mingo's scheduler works (see schedule_tail 495 * because this is how Mingo's scheduler works (see schedule_tail
502 * and finish_arch_switch). If we do not do it, a timer interrupt hits 496 * and finish_arch_switch). If we do not do it, a timer interrupt hits
503 * before we unlock, attempts to re-take the rq->lock, and then we die. 497 * before we unlock, attempts to re-take the rq->lock, and then we die.
504 * Thus, kpsr|=PSR_PIL. 498 * Thus, kpsr|=PSR_PIL.
505 */ 499 */
506 ti->ksp = (unsigned long) new_stack; 500 ti->ksp = (unsigned long) new_stack;
507 ti->kpc = (((unsigned long) ret_from_fork) - 0x8); 501 ti->kpc = (((unsigned long) ret_from_fork) - 0x8);
508 ti->kpsr = current->thread.fork_kpsr | PSR_PIL; 502 ti->kpsr = current->thread.fork_kpsr | PSR_PIL;
509 ti->kwim = current->thread.fork_kwim; 503 ti->kwim = current->thread.fork_kwim;
510 504
511 if(regs->psr & PSR_PS) { 505 if(regs->psr & PSR_PS) {
512 extern struct pt_regs fake_swapper_regs; 506 extern struct pt_regs fake_swapper_regs;
513 507
514 p->thread.kregs = &fake_swapper_regs; 508 p->thread.kregs = &fake_swapper_regs;
515 new_stack += STACKFRAME_SZ + TRACEREG_SZ; 509 new_stack += STACKFRAME_SZ + TRACEREG_SZ;
516 childregs->u_regs[UREG_FP] = (unsigned long) new_stack; 510 childregs->u_regs[UREG_FP] = (unsigned long) new_stack;
517 p->thread.flags |= SPARC_FLAG_KTHREAD; 511 p->thread.flags |= SPARC_FLAG_KTHREAD;
518 p->thread.current_ds = KERNEL_DS; 512 p->thread.current_ds = KERNEL_DS;
519 memcpy(new_stack, (void *)regs->u_regs[UREG_FP], STACKFRAME_SZ); 513 memcpy(new_stack, (void *)regs->u_regs[UREG_FP], STACKFRAME_SZ);
520 childregs->u_regs[UREG_G6] = (unsigned long) ti; 514 childregs->u_regs[UREG_G6] = (unsigned long) ti;
521 } else { 515 } else {
522 p->thread.kregs = childregs; 516 p->thread.kregs = childregs;
523 childregs->u_regs[UREG_FP] = sp; 517 childregs->u_regs[UREG_FP] = sp;
524 p->thread.flags &= ~SPARC_FLAG_KTHREAD; 518 p->thread.flags &= ~SPARC_FLAG_KTHREAD;
525 p->thread.current_ds = USER_DS; 519 p->thread.current_ds = USER_DS;
526 520
527 if (sp != regs->u_regs[UREG_FP]) { 521 if (sp != regs->u_regs[UREG_FP]) {
528 struct sparc_stackf __user *childstack; 522 struct sparc_stackf __user *childstack;
529 struct sparc_stackf __user *parentstack; 523 struct sparc_stackf __user *parentstack;
530 524
531 /* 525 /*
532 * This is a clone() call with supplied user stack. 526 * This is a clone() call with supplied user stack.
533 * Set some valid stack frames to give to the child. 527 * Set some valid stack frames to give to the child.
534 */ 528 */
535 childstack = (struct sparc_stackf __user *) 529 childstack = (struct sparc_stackf __user *)
536 (sp & ~0x7UL); 530 (sp & ~0x7UL);
537 parentstack = (struct sparc_stackf __user *) 531 parentstack = (struct sparc_stackf __user *)
538 regs->u_regs[UREG_FP]; 532 regs->u_regs[UREG_FP];
539 533
540 #if 0 534 #if 0
541 printk("clone: parent stack:\n"); 535 printk("clone: parent stack:\n");
542 show_stackframe(parentstack); 536 show_stackframe(parentstack);
543 #endif 537 #endif
544 538
545 childstack = clone_stackframe(childstack, parentstack); 539 childstack = clone_stackframe(childstack, parentstack);
546 if (!childstack) 540 if (!childstack)
547 return -EFAULT; 541 return -EFAULT;
548 542
549 #if 0 543 #if 0
550 printk("clone: child stack:\n"); 544 printk("clone: child stack:\n");
551 show_stackframe(childstack); 545 show_stackframe(childstack);
552 #endif 546 #endif
553 547
554 childregs->u_regs[UREG_FP] = (unsigned long)childstack; 548 childregs->u_regs[UREG_FP] = (unsigned long)childstack;
555 } 549 }
556 } 550 }
557 551
558 #ifdef CONFIG_SMP 552 #ifdef CONFIG_SMP
559 /* FPU must be disabled on SMP. */ 553 /* FPU must be disabled on SMP. */
560 childregs->psr &= ~PSR_EF; 554 childregs->psr &= ~PSR_EF;
561 #endif 555 #endif
562 556
563 /* Set the return value for the child. */ 557 /* Set the return value for the child. */
564 childregs->u_regs[UREG_I0] = current->pid; 558 childregs->u_regs[UREG_I0] = current->pid;
565 childregs->u_regs[UREG_I1] = 1; 559 childregs->u_regs[UREG_I1] = 1;
566 560
567 /* Set the return value for the parent. */ 561 /* Set the return value for the parent. */
568 regs->u_regs[UREG_I1] = 0; 562 regs->u_regs[UREG_I1] = 0;
569 563
570 if (clone_flags & CLONE_SETTLS) 564 if (clone_flags & CLONE_SETTLS)
571 childregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3]; 565 childregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
572 566
573 return 0; 567 return 0;
574 } 568 }
575 569
576 /* 570 /*
577 * fill in the user structure for a core dump.. 571 * fill in the user structure for a core dump..
578 */ 572 */
579 void dump_thread(struct pt_regs * regs, struct user * dump) 573 void dump_thread(struct pt_regs * regs, struct user * dump)
580 { 574 {
581 unsigned long first_stack_page; 575 unsigned long first_stack_page;
582 576
583 dump->magic = SUNOS_CORE_MAGIC; 577 dump->magic = SUNOS_CORE_MAGIC;
584 dump->len = sizeof(struct user); 578 dump->len = sizeof(struct user);
585 dump->regs.psr = regs->psr; 579 dump->regs.psr = regs->psr;
586 dump->regs.pc = regs->pc; 580 dump->regs.pc = regs->pc;
587 dump->regs.npc = regs->npc; 581 dump->regs.npc = regs->npc;
588 dump->regs.y = regs->y; 582 dump->regs.y = regs->y;
589 /* fuck me plenty */ 583 /* fuck me plenty */
590 memcpy(&dump->regs.regs[0], &regs->u_regs[1], (sizeof(unsigned long) * 15)); 584 memcpy(&dump->regs.regs[0], &regs->u_regs[1], (sizeof(unsigned long) * 15));
591 dump->uexec = current->thread.core_exec; 585 dump->uexec = current->thread.core_exec;
592 dump->u_tsize = (((unsigned long) current->mm->end_code) - 586 dump->u_tsize = (((unsigned long) current->mm->end_code) -
593 ((unsigned long) current->mm->start_code)) & ~(PAGE_SIZE - 1); 587 ((unsigned long) current->mm->start_code)) & ~(PAGE_SIZE - 1);
594 dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))); 588 dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1)));
595 dump->u_dsize -= dump->u_tsize; 589 dump->u_dsize -= dump->u_tsize;
596 dump->u_dsize &= ~(PAGE_SIZE - 1); 590 dump->u_dsize &= ~(PAGE_SIZE - 1);
597 first_stack_page = (regs->u_regs[UREG_FP] & ~(PAGE_SIZE - 1)); 591 first_stack_page = (regs->u_regs[UREG_FP] & ~(PAGE_SIZE - 1));
598 dump->u_ssize = (TASK_SIZE - first_stack_page) & ~(PAGE_SIZE - 1); 592 dump->u_ssize = (TASK_SIZE - first_stack_page) & ~(PAGE_SIZE - 1);
599 memcpy(&dump->fpu.fpstatus.fregs.regs[0], &current->thread.float_regs[0], (sizeof(unsigned long) * 32)); 593 memcpy(&dump->fpu.fpstatus.fregs.regs[0], &current->thread.float_regs[0], (sizeof(unsigned long) * 32));
600 dump->fpu.fpstatus.fsr = current->thread.fsr; 594 dump->fpu.fpstatus.fsr = current->thread.fsr;
601 dump->fpu.fpstatus.flags = dump->fpu.fpstatus.extra = 0; 595 dump->fpu.fpstatus.flags = dump->fpu.fpstatus.extra = 0;
602 dump->fpu.fpstatus.fpq_count = current->thread.fpqdepth; 596 dump->fpu.fpstatus.fpq_count = current->thread.fpqdepth;
603 memcpy(&dump->fpu.fpstatus.fpq[0], &current->thread.fpqueue[0], 597 memcpy(&dump->fpu.fpstatus.fpq[0], &current->thread.fpqueue[0],
604 ((sizeof(unsigned long) * 2) * 16)); 598 ((sizeof(unsigned long) * 2) * 16));
605 dump->sigcode = 0; 599 dump->sigcode = 0;
606 } 600 }
607 601
608 /* 602 /*
609 * fill in the fpu structure for a core dump. 603 * fill in the fpu structure for a core dump.
610 */ 604 */
611 int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs) 605 int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
612 { 606 {
613 if (used_math()) { 607 if (used_math()) {
614 memset(fpregs, 0, sizeof(*fpregs)); 608 memset(fpregs, 0, sizeof(*fpregs));
615 fpregs->pr_q_entrysize = 8; 609 fpregs->pr_q_entrysize = 8;
616 return 1; 610 return 1;
617 } 611 }
618 #ifdef CONFIG_SMP 612 #ifdef CONFIG_SMP
619 if (current_thread_info()->flags & _TIF_USEDFPU) { 613 if (current_thread_info()->flags & _TIF_USEDFPU) {
620 put_psr(get_psr() | PSR_EF); 614 put_psr(get_psr() | PSR_EF);
621 fpsave(&current->thread.float_regs[0], &current->thread.fsr, 615 fpsave(&current->thread.float_regs[0], &current->thread.fsr,
622 &current->thread.fpqueue[0], &current->thread.fpqdepth); 616 &current->thread.fpqueue[0], &current->thread.fpqdepth);
623 if (regs != NULL) { 617 if (regs != NULL) {
624 regs->psr &= ~(PSR_EF); 618 regs->psr &= ~(PSR_EF);
625 current_thread_info()->flags &= ~(_TIF_USEDFPU); 619 current_thread_info()->flags &= ~(_TIF_USEDFPU);
626 } 620 }
627 } 621 }
628 #else 622 #else
629 if (current == last_task_used_math) { 623 if (current == last_task_used_math) {
630 put_psr(get_psr() | PSR_EF); 624 put_psr(get_psr() | PSR_EF);
631 fpsave(&current->thread.float_regs[0], &current->thread.fsr, 625 fpsave(&current->thread.float_regs[0], &current->thread.fsr,
632 &current->thread.fpqueue[0], &current->thread.fpqdepth); 626 &current->thread.fpqueue[0], &current->thread.fpqdepth);
633 if (regs != NULL) { 627 if (regs != NULL) {
634 regs->psr &= ~(PSR_EF); 628 regs->psr &= ~(PSR_EF);
635 last_task_used_math = NULL; 629 last_task_used_math = NULL;
636 } 630 }
637 } 631 }
638 #endif 632 #endif
639 memcpy(&fpregs->pr_fr.pr_regs[0], 633 memcpy(&fpregs->pr_fr.pr_regs[0],
640 &current->thread.float_regs[0], 634 &current->thread.float_regs[0],
641 (sizeof(unsigned long) * 32)); 635 (sizeof(unsigned long) * 32));
642 fpregs->pr_fsr = current->thread.fsr; 636 fpregs->pr_fsr = current->thread.fsr;
643 fpregs->pr_qcnt = current->thread.fpqdepth; 637 fpregs->pr_qcnt = current->thread.fpqdepth;
644 fpregs->pr_q_entrysize = 8; 638 fpregs->pr_q_entrysize = 8;
645 fpregs->pr_en = 1; 639 fpregs->pr_en = 1;
646 if(fpregs->pr_qcnt != 0) { 640 if(fpregs->pr_qcnt != 0) {
647 memcpy(&fpregs->pr_q[0], 641 memcpy(&fpregs->pr_q[0],
648 &current->thread.fpqueue[0], 642 &current->thread.fpqueue[0],
649 sizeof(struct fpq) * fpregs->pr_qcnt); 643 sizeof(struct fpq) * fpregs->pr_qcnt);
650 } 644 }
651 /* Zero out the rest. */ 645 /* Zero out the rest. */
652 memset(&fpregs->pr_q[fpregs->pr_qcnt], 0, 646 memset(&fpregs->pr_q[fpregs->pr_qcnt], 0,
653 sizeof(struct fpq) * (32 - fpregs->pr_qcnt)); 647 sizeof(struct fpq) * (32 - fpregs->pr_qcnt));
654 return 1; 648 return 1;
655 } 649 }
656 650
657 /* 651 /*
658 * sparc_execve() executes a new program after the asm stub has set 652 * sparc_execve() executes a new program after the asm stub has set
659 * things up for us. This should basically do what I want it to. 653 * things up for us. This should basically do what I want it to.
660 */ 654 */
661 asmlinkage int sparc_execve(struct pt_regs *regs) 655 asmlinkage int sparc_execve(struct pt_regs *regs)
662 { 656 {
663 int error, base = 0; 657 int error, base = 0;
664 char *filename; 658 char *filename;
665 659
666 /* Check for indirect call. */ 660 /* Check for indirect call. */
667 if(regs->u_regs[UREG_G1] == 0) 661 if(regs->u_regs[UREG_G1] == 0)
668 base = 1; 662 base = 1;
669 663
670 filename = getname((char __user *)regs->u_regs[base + UREG_I0]); 664 filename = getname((char __user *)regs->u_regs[base + UREG_I0]);
671 error = PTR_ERR(filename); 665 error = PTR_ERR(filename);
672 if(IS_ERR(filename)) 666 if(IS_ERR(filename))
673 goto out; 667 goto out;
674 error = do_execve(filename, 668 error = do_execve(filename,
675 (char __user * __user *)regs->u_regs[base + UREG_I1], 669 (char __user * __user *)regs->u_regs[base + UREG_I1],
676 (char __user * __user *)regs->u_regs[base + UREG_I2], 670 (char __user * __user *)regs->u_regs[base + UREG_I2],
677 regs); 671 regs);
678 putname(filename); 672 putname(filename);
679 if (error == 0) { 673 if (error == 0) {
680 task_lock(current); 674 task_lock(current);
681 current->ptrace &= ~PT_DTRACE; 675 current->ptrace &= ~PT_DTRACE;
682 task_unlock(current); 676 task_unlock(current);
683 } 677 }
684 out: 678 out:
685 return error; 679 return error;
686 } 680 }
687 681
688 /* 682 /*
689 * This is the mechanism for creating a new kernel thread. 683 * This is the mechanism for creating a new kernel thread.
690 * 684 *
691 * NOTE! Only a kernel-only process(ie the swapper or direct descendants 685 * NOTE! Only a kernel-only process(ie the swapper or direct descendants
692 * who haven't done an "execve()") should use this: it will work within 686 * who haven't done an "execve()") should use this: it will work within
693 * a system call from a "real" process, but the process memory space will 687 * a system call from a "real" process, but the process memory space will
694 * not be free'd until both the parent and the child have exited. 688 * not be free'd until both the parent and the child have exited.
695 */ 689 */
696 pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 690 pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
697 { 691 {
698 long retval; 692 long retval;
699 693
700 __asm__ __volatile__("mov %4, %%g2\n\t" /* Set aside fn ptr... */ 694 __asm__ __volatile__("mov %4, %%g2\n\t" /* Set aside fn ptr... */
701 "mov %5, %%g3\n\t" /* and arg. */ 695 "mov %5, %%g3\n\t" /* and arg. */
702 "mov %1, %%g1\n\t" 696 "mov %1, %%g1\n\t"
703 "mov %2, %%o0\n\t" /* Clone flags. */ 697 "mov %2, %%o0\n\t" /* Clone flags. */
704 "mov 0, %%o1\n\t" /* usp arg == 0 */ 698 "mov 0, %%o1\n\t" /* usp arg == 0 */
705 "t 0x10\n\t" /* Linux/Sparc clone(). */ 699 "t 0x10\n\t" /* Linux/Sparc clone(). */
706 "cmp %%o1, 0\n\t" 700 "cmp %%o1, 0\n\t"
707 "be 1f\n\t" /* The parent, just return. */ 701 "be 1f\n\t" /* The parent, just return. */
708 " nop\n\t" /* Delay slot. */ 702 " nop\n\t" /* Delay slot. */
709 "jmpl %%g2, %%o7\n\t" /* Call the function. */ 703 "jmpl %%g2, %%o7\n\t" /* Call the function. */
710 " mov %%g3, %%o0\n\t" /* Get back the arg in delay. */ 704 " mov %%g3, %%o0\n\t" /* Get back the arg in delay. */
711 "mov %3, %%g1\n\t" 705 "mov %3, %%g1\n\t"
712 "t 0x10\n\t" /* Linux/Sparc exit(). */ 706 "t 0x10\n\t" /* Linux/Sparc exit(). */
713 /* Notreached by child. */ 707 /* Notreached by child. */
714 "1: mov %%o0, %0\n\t" : 708 "1: mov %%o0, %0\n\t" :
715 "=r" (retval) : 709 "=r" (retval) :
716 "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED), 710 "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED),
717 "i" (__NR_exit), "r" (fn), "r" (arg) : 711 "i" (__NR_exit), "r" (fn), "r" (arg) :
718 "g1", "g2", "g3", "o0", "o1", "memory", "cc"); 712 "g1", "g2", "g3", "o0", "o1", "memory", "cc");
719 return retval; 713 return retval;
720 } 714 }
721 715
722 unsigned long get_wchan(struct task_struct *task) 716 unsigned long get_wchan(struct task_struct *task)
723 { 717 {
724 unsigned long pc, fp, bias = 0; 718 unsigned long pc, fp, bias = 0;
725 unsigned long task_base = (unsigned long) task; 719 unsigned long task_base = (unsigned long) task;
726 unsigned long ret = 0; 720 unsigned long ret = 0;
727 struct reg_window *rw; 721 struct reg_window *rw;
728 int count = 0; 722 int count = 0;
729 723
730 if (!task || task == current || 724 if (!task || task == current ||
731 task->state == TASK_RUNNING) 725 task->state == TASK_RUNNING)
732 goto out; 726 goto out;
733 727
734 fp = task->thread_info->ksp + bias; 728 fp = task->thread_info->ksp + bias;
735 do { 729 do {
736 /* Bogus frame pointer? */ 730 /* Bogus frame pointer? */
737 if (fp < (task_base + sizeof(struct thread_info)) || 731 if (fp < (task_base + sizeof(struct thread_info)) ||
738 fp >= (task_base + (2 * PAGE_SIZE))) 732 fp >= (task_base + (2 * PAGE_SIZE)))
739 break; 733 break;
740 rw = (struct reg_window *) fp; 734 rw = (struct reg_window *) fp;
741 pc = rw->ins[7]; 735 pc = rw->ins[7];
742 if (!in_sched_functions(pc)) { 736 if (!in_sched_functions(pc)) {
743 ret = pc; 737 ret = pc;
744 goto out; 738 goto out;
745 } 739 }
746 fp = rw->ins[6] + bias; 740 fp = rw->ins[6] + bias;
747 } while (++count < 16); 741 } while (++count < 16);
748 742
749 out: 743 out:
750 return ret; 744 return ret;
751 } 745 }
752 746
753 747
arch/sparc64/kernel/power.c
1 /* $Id: power.c,v 1.10 2001/12/11 01:57:16 davem Exp $ 1 /* $Id: power.c,v 1.10 2001/12/11 01:57:16 davem Exp $
2 * power.c: Power management driver. 2 * power.c: Power management driver.
3 * 3 *
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com) 4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 */ 5 */
6 6
7 #define __KERNEL_SYSCALLS__ 7 #define __KERNEL_SYSCALLS__
8 8
9 #include <linux/config.h> 9 #include <linux/config.h>
10 #include <linux/kernel.h> 10 #include <linux/kernel.h>
11 #include <linux/module.h> 11 #include <linux/module.h>
12 #include <linux/init.h> 12 #include <linux/init.h>
13 #include <linux/sched.h> 13 #include <linux/sched.h>
14 #include <linux/signal.h> 14 #include <linux/signal.h>
15 #include <linux/delay.h> 15 #include <linux/delay.h>
16 #include <linux/interrupt.h> 16 #include <linux/interrupt.h>
17 17
18 #include <asm/system.h> 18 #include <asm/system.h>
19 #include <asm/ebus.h> 19 #include <asm/ebus.h>
20 #include <asm/auxio.h> 20 #include <asm/auxio.h>
21 21
22 #include <linux/unistd.h> 22 #include <linux/unistd.h>
23 23
24 /* 24 /*
25 * sysctl - toggle power-off restriction for serial console 25 * sysctl - toggle power-off restriction for serial console
26 * systems in machine_power_off() 26 * systems in machine_power_off()
27 */ 27 */
28 int scons_pwroff = 1; 28 int scons_pwroff = 1;
29 29
30 #ifdef CONFIG_PCI 30 #ifdef CONFIG_PCI
31 static void __iomem *power_reg; 31 static void __iomem *power_reg;
32 32
33 static DECLARE_WAIT_QUEUE_HEAD(powerd_wait); 33 static DECLARE_WAIT_QUEUE_HEAD(powerd_wait);
34 static int button_pressed; 34 static int button_pressed;
35 35
36 static irqreturn_t power_handler(int irq, void *dev_id, struct pt_regs *regs) 36 static irqreturn_t power_handler(int irq, void *dev_id, struct pt_regs *regs)
37 { 37 {
38 if (button_pressed == 0) { 38 if (button_pressed == 0) {
39 button_pressed = 1; 39 button_pressed = 1;
40 wake_up(&powerd_wait); 40 wake_up(&powerd_wait);
41 } 41 }
42 42
43 /* FIXME: Check registers for status... */ 43 /* FIXME: Check registers for status... */
44 return IRQ_HANDLED; 44 return IRQ_HANDLED;
45 } 45 }
46 #endif /* CONFIG_PCI */ 46 #endif /* CONFIG_PCI */
47 47
48 extern void machine_halt(void); 48 extern void machine_halt(void);
49 extern void machine_alt_power_off(void); 49 extern void machine_alt_power_off(void);
50 static void (*poweroff_method)(void) = machine_alt_power_off; 50 static void (*poweroff_method)(void) = machine_alt_power_off;
51 51
52 void machine_power_off(void) 52 void machine_power_off(void)
53 { 53 {
54 if (!serial_console || scons_pwroff) { 54 if (!serial_console || scons_pwroff) {
55 #ifdef CONFIG_PCI 55 #ifdef CONFIG_PCI
56 if (power_reg) { 56 if (power_reg) {
57 /* Both register bits seem to have the 57 /* Both register bits seem to have the
58 * same effect, so until I figure out 58 * same effect, so until I figure out
59 * what the difference is... 59 * what the difference is...
60 */ 60 */
61 writel(AUXIO_PCIO_CPWR_OFF | AUXIO_PCIO_SPWR_OFF, power_reg); 61 writel(AUXIO_PCIO_CPWR_OFF | AUXIO_PCIO_SPWR_OFF, power_reg);
62 } else 62 } else
63 #endif /* CONFIG_PCI */ 63 #endif /* CONFIG_PCI */
64 if (poweroff_method != NULL) { 64 if (poweroff_method != NULL) {
65 poweroff_method(); 65 poweroff_method();
66 /* not reached */ 66 /* not reached */
67 } 67 }
68 } 68 }
69 machine_halt(); 69 machine_halt();
70 } 70 }
71 71
72 EXPORT_SYMBOL(machine_power_off);
73
74 #ifdef CONFIG_PCI 72 #ifdef CONFIG_PCI
75 static int powerd(void *__unused) 73 static int powerd(void *__unused)
76 { 74 {
77 static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; 75 static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
78 char *argv[] = { "/sbin/shutdown", "-h", "now", NULL }; 76 char *argv[] = { "/sbin/shutdown", "-h", "now", NULL };
79 DECLARE_WAITQUEUE(wait, current); 77 DECLARE_WAITQUEUE(wait, current);
80 78
81 daemonize("powerd"); 79 daemonize("powerd");
82 80
83 add_wait_queue(&powerd_wait, &wait); 81 add_wait_queue(&powerd_wait, &wait);
84 again: 82 again:
85 for (;;) { 83 for (;;) {
86 set_task_state(current, TASK_INTERRUPTIBLE); 84 set_task_state(current, TASK_INTERRUPTIBLE);
87 if (button_pressed) 85 if (button_pressed)
88 break; 86 break;
89 flush_signals(current); 87 flush_signals(current);
90 schedule(); 88 schedule();
91 } 89 }
92 __set_current_state(TASK_RUNNING); 90 __set_current_state(TASK_RUNNING);
93 remove_wait_queue(&powerd_wait, &wait); 91 remove_wait_queue(&powerd_wait, &wait);
94 92
95 /* Ok, down we go... */ 93 /* Ok, down we go... */
96 button_pressed = 0; 94 button_pressed = 0;
97 if (execve("/sbin/shutdown", argv, envp) < 0) { 95 if (execve("/sbin/shutdown", argv, envp) < 0) {
98 printk("powerd: shutdown execution failed\n"); 96 printk("powerd: shutdown execution failed\n");
99 add_wait_queue(&powerd_wait, &wait); 97 add_wait_queue(&powerd_wait, &wait);
100 goto again; 98 goto again;
101 } 99 }
102 return 0; 100 return 0;
103 } 101 }
104 102
105 static int __init has_button_interrupt(struct linux_ebus_device *edev) 103 static int __init has_button_interrupt(struct linux_ebus_device *edev)
106 { 104 {
107 if (edev->irqs[0] == PCI_IRQ_NONE) 105 if (edev->irqs[0] == PCI_IRQ_NONE)
108 return 0; 106 return 0;
109 if (!prom_node_has_property(edev->prom_node, "button")) 107 if (!prom_node_has_property(edev->prom_node, "button"))
110 return 0; 108 return 0;
111 109
112 return 1; 110 return 1;
113 } 111 }
114 112
115 void __init power_init(void) 113 void __init power_init(void)
116 { 114 {
117 struct linux_ebus *ebus; 115 struct linux_ebus *ebus;
118 struct linux_ebus_device *edev; 116 struct linux_ebus_device *edev;
119 static int invoked; 117 static int invoked;
120 118
121 if (invoked) 119 if (invoked)
122 return; 120 return;
123 invoked = 1; 121 invoked = 1;
124 122
125 for_each_ebus(ebus) { 123 for_each_ebus(ebus) {
126 for_each_ebusdev(edev, ebus) { 124 for_each_ebusdev(edev, ebus) {
127 if (!strcmp(edev->prom_name, "power")) 125 if (!strcmp(edev->prom_name, "power"))
128 goto found; 126 goto found;
129 } 127 }
130 } 128 }
131 return; 129 return;
132 130
133 found: 131 found:
134 power_reg = ioremap(edev->resource[0].start, 0x4); 132 power_reg = ioremap(edev->resource[0].start, 0x4);
135 printk("power: Control reg at %p ... ", power_reg); 133 printk("power: Control reg at %p ... ", power_reg);
136 poweroff_method = machine_halt; /* able to use the standard halt */ 134 poweroff_method = machine_halt; /* able to use the standard halt */
137 if (has_button_interrupt(edev)) { 135 if (has_button_interrupt(edev)) {
138 if (kernel_thread(powerd, NULL, CLONE_FS) < 0) { 136 if (kernel_thread(powerd, NULL, CLONE_FS) < 0) {
139 printk("Failed to start power daemon.\n"); 137 printk("Failed to start power daemon.\n");
140 return; 138 return;
141 } 139 }
142 printk("powerd running.\n"); 140 printk("powerd running.\n");
143 141
144 if (request_irq(edev->irqs[0], 142 if (request_irq(edev->irqs[0],
145 power_handler, SA_SHIRQ, "power", NULL) < 0) 143 power_handler, SA_SHIRQ, "power", NULL) < 0)
146 printk("power: Error, cannot register IRQ handler.\n"); 144 printk("power: Error, cannot register IRQ handler.\n");
147 } else { 145 } else {
148 printk("not using powerd.\n"); 146 printk("not using powerd.\n");
149 } 147 }
150 } 148 }
151 #endif /* CONFIG_PCI */ 149 #endif /* CONFIG_PCI */
152 150
arch/sparc64/kernel/process.c
1 /* $Id: process.c,v 1.131 2002/02/09 19:49:30 davem Exp $ 1 /* $Id: process.c,v 1.131 2002/02/09 19:49:30 davem Exp $
2 * arch/sparc64/kernel/process.c 2 * arch/sparc64/kernel/process.c
3 * 3 *
4 * Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu) 4 * Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) 5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 6 * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */ 7 */
8 8
9 /* 9 /*
10 * This file handles the architecture-dependent parts of process handling.. 10 * This file handles the architecture-dependent parts of process handling..
11 */ 11 */
12 12
13 #include <stdarg.h> 13 #include <stdarg.h>
14 14
15 #include <linux/config.h> 15 #include <linux/config.h>
16 #include <linux/errno.h> 16 #include <linux/errno.h>
17 #include <linux/module.h> 17 #include <linux/module.h>
18 #include <linux/sched.h> 18 #include <linux/sched.h>
19 #include <linux/kernel.h> 19 #include <linux/kernel.h>
20 #include <linux/kallsyms.h> 20 #include <linux/kallsyms.h>
21 #include <linux/mm.h> 21 #include <linux/mm.h>
22 #include <linux/smp.h> 22 #include <linux/smp.h>
23 #include <linux/smp_lock.h> 23 #include <linux/smp_lock.h>
24 #include <linux/stddef.h> 24 #include <linux/stddef.h>
25 #include <linux/ptrace.h> 25 #include <linux/ptrace.h>
26 #include <linux/slab.h> 26 #include <linux/slab.h>
27 #include <linux/user.h> 27 #include <linux/user.h>
28 #include <linux/a.out.h> 28 #include <linux/a.out.h>
29 #include <linux/config.h> 29 #include <linux/config.h>
30 #include <linux/reboot.h> 30 #include <linux/reboot.h>
31 #include <linux/delay.h> 31 #include <linux/delay.h>
32 #include <linux/compat.h> 32 #include <linux/compat.h>
33 #include <linux/init.h> 33 #include <linux/init.h>
34 34
35 #include <asm/oplib.h> 35 #include <asm/oplib.h>
36 #include <asm/uaccess.h> 36 #include <asm/uaccess.h>
37 #include <asm/system.h> 37 #include <asm/system.h>
38 #include <asm/page.h> 38 #include <asm/page.h>
39 #include <asm/pgalloc.h> 39 #include <asm/pgalloc.h>
40 #include <asm/pgtable.h> 40 #include <asm/pgtable.h>
41 #include <asm/processor.h> 41 #include <asm/processor.h>
42 #include <asm/pstate.h> 42 #include <asm/pstate.h>
43 #include <asm/elf.h> 43 #include <asm/elf.h>
44 #include <asm/fpumacro.h> 44 #include <asm/fpumacro.h>
45 #include <asm/head.h> 45 #include <asm/head.h>
46 #include <asm/cpudata.h> 46 #include <asm/cpudata.h>
47 #include <asm/unistd.h> 47 #include <asm/unistd.h>
48 48
49 /* #define VERBOSE_SHOWREGS */ 49 /* #define VERBOSE_SHOWREGS */
50 50
51 /* 51 /*
52 * Nothing special yet... 52 * Nothing special yet...
53 */ 53 */
54 void default_idle(void) 54 void default_idle(void)
55 { 55 {
56 } 56 }
57 57
58 #ifndef CONFIG_SMP 58 #ifndef CONFIG_SMP
59 59
60 /* 60 /*
61 * the idle loop on a Sparc... ;) 61 * the idle loop on a Sparc... ;)
62 */ 62 */
63 void cpu_idle(void) 63 void cpu_idle(void)
64 { 64 {
65 /* endless idle loop with no priority at all */ 65 /* endless idle loop with no priority at all */
66 for (;;) { 66 for (;;) {
67 /* If current->work.need_resched is zero we should really 67 /* If current->work.need_resched is zero we should really
68 * setup for a system wakup event and execute a shutdown 68 * setup for a system wakup event and execute a shutdown
69 * instruction. 69 * instruction.
70 * 70 *
71 * But this requires writing back the contents of the 71 * But this requires writing back the contents of the
72 * L2 cache etc. so implement this later. -DaveM 72 * L2 cache etc. so implement this later. -DaveM
73 */ 73 */
74 while (!need_resched()) 74 while (!need_resched())
75 barrier(); 75 barrier();
76 76
77 schedule(); 77 schedule();
78 check_pgt_cache(); 78 check_pgt_cache();
79 } 79 }
80 } 80 }
81 81
82 #else 82 #else
83 83
84 /* 84 /*
85 * the idle loop on a UltraMultiPenguin... 85 * the idle loop on a UltraMultiPenguin...
86 */ 86 */
87 #define idle_me_harder() (cpu_data(smp_processor_id()).idle_volume += 1) 87 #define idle_me_harder() (cpu_data(smp_processor_id()).idle_volume += 1)
88 #define unidle_me() (cpu_data(smp_processor_id()).idle_volume = 0) 88 #define unidle_me() (cpu_data(smp_processor_id()).idle_volume = 0)
89 void cpu_idle(void) 89 void cpu_idle(void)
90 { 90 {
91 set_thread_flag(TIF_POLLING_NRFLAG); 91 set_thread_flag(TIF_POLLING_NRFLAG);
92 while(1) { 92 while(1) {
93 if (need_resched()) { 93 if (need_resched()) {
94 unidle_me(); 94 unidle_me();
95 clear_thread_flag(TIF_POLLING_NRFLAG); 95 clear_thread_flag(TIF_POLLING_NRFLAG);
96 schedule(); 96 schedule();
97 set_thread_flag(TIF_POLLING_NRFLAG); 97 set_thread_flag(TIF_POLLING_NRFLAG);
98 check_pgt_cache(); 98 check_pgt_cache();
99 } 99 }
100 idle_me_harder(); 100 idle_me_harder();
101 101
102 /* The store ordering is so that IRQ handlers on 102 /* The store ordering is so that IRQ handlers on
103 * other cpus see our increasing idleness for the buddy 103 * other cpus see our increasing idleness for the buddy
104 * redistribution algorithm. -DaveM 104 * redistribution algorithm. -DaveM
105 */ 105 */
106 membar("#StoreStore | #StoreLoad"); 106 membar("#StoreStore | #StoreLoad");
107 } 107 }
108 } 108 }
109 109
110 #endif 110 #endif
111 111
112 extern char reboot_command []; 112 extern char reboot_command [];
113 113
114 extern void (*prom_palette)(int); 114 extern void (*prom_palette)(int);
115 extern void (*prom_keyboard)(void); 115 extern void (*prom_keyboard)(void);
116 116
117 void machine_halt(void) 117 void machine_halt(void)
118 { 118 {
119 if (!serial_console && prom_palette) 119 if (!serial_console && prom_palette)
120 prom_palette (1); 120 prom_palette (1);
121 if (prom_keyboard) 121 if (prom_keyboard)
122 prom_keyboard(); 122 prom_keyboard();
123 prom_halt(); 123 prom_halt();
124 panic("Halt failed!"); 124 panic("Halt failed!");
125 } 125 }
126 126
127 EXPORT_SYMBOL(machine_halt);
128
129 void machine_alt_power_off(void) 127 void machine_alt_power_off(void)
130 { 128 {
131 if (!serial_console && prom_palette) 129 if (!serial_console && prom_palette)
132 prom_palette(1); 130 prom_palette(1);
133 if (prom_keyboard) 131 if (prom_keyboard)
134 prom_keyboard(); 132 prom_keyboard();
135 prom_halt_power_off(); 133 prom_halt_power_off();
136 panic("Power-off failed!"); 134 panic("Power-off failed!");
137 } 135 }
138 136
139 void machine_restart(char * cmd) 137 void machine_restart(char * cmd)
140 { 138 {
141 char *p; 139 char *p;
142 140
143 p = strchr (reboot_command, '\n'); 141 p = strchr (reboot_command, '\n');
144 if (p) *p = 0; 142 if (p) *p = 0;
145 if (!serial_console && prom_palette) 143 if (!serial_console && prom_palette)
146 prom_palette (1); 144 prom_palette (1);
147 if (prom_keyboard) 145 if (prom_keyboard)
148 prom_keyboard(); 146 prom_keyboard();
149 if (cmd) 147 if (cmd)
150 prom_reboot(cmd); 148 prom_reboot(cmd);
151 if (*reboot_command) 149 if (*reboot_command)
152 prom_reboot(reboot_command); 150 prom_reboot(reboot_command);
153 prom_reboot(""); 151 prom_reboot("");
154 panic("Reboot failed!"); 152 panic("Reboot failed!");
155 } 153 }
156
157 EXPORT_SYMBOL(machine_restart);
158 154
159 static void show_regwindow32(struct pt_regs *regs) 155 static void show_regwindow32(struct pt_regs *regs)
160 { 156 {
161 struct reg_window32 __user *rw; 157 struct reg_window32 __user *rw;
162 struct reg_window32 r_w; 158 struct reg_window32 r_w;
163 mm_segment_t old_fs; 159 mm_segment_t old_fs;
164 160
165 __asm__ __volatile__ ("flushw"); 161 __asm__ __volatile__ ("flushw");
166 rw = compat_ptr((unsigned)regs->u_regs[14]); 162 rw = compat_ptr((unsigned)regs->u_regs[14]);
167 old_fs = get_fs(); 163 old_fs = get_fs();
168 set_fs (USER_DS); 164 set_fs (USER_DS);
169 if (copy_from_user (&r_w, rw, sizeof(r_w))) { 165 if (copy_from_user (&r_w, rw, sizeof(r_w))) {
170 set_fs (old_fs); 166 set_fs (old_fs);
171 return; 167 return;
172 } 168 }
173 169
174 set_fs (old_fs); 170 set_fs (old_fs);
175 printk("l0: %08x l1: %08x l2: %08x l3: %08x " 171 printk("l0: %08x l1: %08x l2: %08x l3: %08x "
176 "l4: %08x l5: %08x l6: %08x l7: %08x\n", 172 "l4: %08x l5: %08x l6: %08x l7: %08x\n",
177 r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3], 173 r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3],
178 r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]); 174 r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]);
179 printk("i0: %08x i1: %08x i2: %08x i3: %08x " 175 printk("i0: %08x i1: %08x i2: %08x i3: %08x "
180 "i4: %08x i5: %08x i6: %08x i7: %08x\n", 176 "i4: %08x i5: %08x i6: %08x i7: %08x\n",
181 r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3], 177 r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3],
182 r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]); 178 r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]);
183 } 179 }
184 180
185 static void show_regwindow(struct pt_regs *regs) 181 static void show_regwindow(struct pt_regs *regs)
186 { 182 {
187 struct reg_window __user *rw; 183 struct reg_window __user *rw;
188 struct reg_window *rwk; 184 struct reg_window *rwk;
189 struct reg_window r_w; 185 struct reg_window r_w;
190 mm_segment_t old_fs; 186 mm_segment_t old_fs;
191 187
192 if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) { 188 if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) {
193 __asm__ __volatile__ ("flushw"); 189 __asm__ __volatile__ ("flushw");
194 rw = (struct reg_window __user *) 190 rw = (struct reg_window __user *)
195 (regs->u_regs[14] + STACK_BIAS); 191 (regs->u_regs[14] + STACK_BIAS);
196 rwk = (struct reg_window *) 192 rwk = (struct reg_window *)
197 (regs->u_regs[14] + STACK_BIAS); 193 (regs->u_regs[14] + STACK_BIAS);
198 if (!(regs->tstate & TSTATE_PRIV)) { 194 if (!(regs->tstate & TSTATE_PRIV)) {
199 old_fs = get_fs(); 195 old_fs = get_fs();
200 set_fs (USER_DS); 196 set_fs (USER_DS);
201 if (copy_from_user (&r_w, rw, sizeof(r_w))) { 197 if (copy_from_user (&r_w, rw, sizeof(r_w))) {
202 set_fs (old_fs); 198 set_fs (old_fs);
203 return; 199 return;
204 } 200 }
205 rwk = &r_w; 201 rwk = &r_w;
206 set_fs (old_fs); 202 set_fs (old_fs);
207 } 203 }
208 } else { 204 } else {
209 show_regwindow32(regs); 205 show_regwindow32(regs);
210 return; 206 return;
211 } 207 }
212 printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n", 208 printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n",
213 rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]); 209 rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]);
214 printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n", 210 printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
215 rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]); 211 rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]);
216 printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n", 212 printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n",
217 rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]); 213 rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]);
218 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n", 214 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
219 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]); 215 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
220 if (regs->tstate & TSTATE_PRIV) 216 if (regs->tstate & TSTATE_PRIV)
221 print_symbol("I7: <%s>\n", rwk->ins[7]); 217 print_symbol("I7: <%s>\n", rwk->ins[7]);
222 } 218 }
223 219
224 void show_stackframe(struct sparc_stackf *sf) 220 void show_stackframe(struct sparc_stackf *sf)
225 { 221 {
226 unsigned long size; 222 unsigned long size;
227 unsigned long *stk; 223 unsigned long *stk;
228 int i; 224 int i;
229 225
230 printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n" 226 printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n"
231 "l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n", 227 "l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
232 sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3], 228 sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3],
233 sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]); 229 sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
234 printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n" 230 printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n"
235 "i4: %016lx i5: %016lx fp: %016lx ret_pc: %016lx\n", 231 "i4: %016lx i5: %016lx fp: %016lx ret_pc: %016lx\n",
236 sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3], 232 sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3],
237 sf->ins[4], sf->ins[5], (unsigned long)sf->fp, sf->callers_pc); 233 sf->ins[4], sf->ins[5], (unsigned long)sf->fp, sf->callers_pc);
238 printk("sp: %016lx x0: %016lx x1: %016lx x2: %016lx\n" 234 printk("sp: %016lx x0: %016lx x1: %016lx x2: %016lx\n"
239 "x3: %016lx x4: %016lx x5: %016lx xx: %016lx\n", 235 "x3: %016lx x4: %016lx x5: %016lx xx: %016lx\n",
240 (unsigned long)sf->structptr, sf->xargs[0], sf->xargs[1], 236 (unsigned long)sf->structptr, sf->xargs[0], sf->xargs[1],
241 sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5], 237 sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
242 sf->xxargs[0]); 238 sf->xxargs[0]);
243 size = ((unsigned long)sf->fp) - ((unsigned long)sf); 239 size = ((unsigned long)sf->fp) - ((unsigned long)sf);
244 size -= STACKFRAME_SZ; 240 size -= STACKFRAME_SZ;
245 stk = (unsigned long *)((unsigned long)sf + STACKFRAME_SZ); 241 stk = (unsigned long *)((unsigned long)sf + STACKFRAME_SZ);
246 i = 0; 242 i = 0;
247 do { 243 do {
248 printk("s%d: %016lx\n", i++, *stk++); 244 printk("s%d: %016lx\n", i++, *stk++);
249 } while ((size -= sizeof(unsigned long))); 245 } while ((size -= sizeof(unsigned long)));
250 } 246 }
251 247
252 void show_stackframe32(struct sparc_stackf32 *sf) 248 void show_stackframe32(struct sparc_stackf32 *sf)
253 { 249 {
254 unsigned long size; 250 unsigned long size;
255 unsigned *stk; 251 unsigned *stk;
256 int i; 252 int i;
257 253
258 printk("l0: %08x l1: %08x l2: %08x l3: %08x\n", 254 printk("l0: %08x l1: %08x l2: %08x l3: %08x\n",
259 sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3]); 255 sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3]);
260 printk("l4: %08x l5: %08x l6: %08x l7: %08x\n", 256 printk("l4: %08x l5: %08x l6: %08x l7: %08x\n",
261 sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]); 257 sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]);
262 printk("i0: %08x i1: %08x i2: %08x i3: %08x\n", 258 printk("i0: %08x i1: %08x i2: %08x i3: %08x\n",
263 sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3]); 259 sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3]);
264 printk("i4: %08x i5: %08x fp: %08x ret_pc: %08x\n", 260 printk("i4: %08x i5: %08x fp: %08x ret_pc: %08x\n",
265 sf->ins[4], sf->ins[5], sf->fp, sf->callers_pc); 261 sf->ins[4], sf->ins[5], sf->fp, sf->callers_pc);
266 printk("sp: %08x x0: %08x x1: %08x x2: %08x\n" 262 printk("sp: %08x x0: %08x x1: %08x x2: %08x\n"
267 "x3: %08x x4: %08x x5: %08x xx: %08x\n", 263 "x3: %08x x4: %08x x5: %08x xx: %08x\n",
268 sf->structptr, sf->xargs[0], sf->xargs[1], 264 sf->structptr, sf->xargs[0], sf->xargs[1],
269 sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5], 265 sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5],
270 sf->xxargs[0]); 266 sf->xxargs[0]);
271 size = ((unsigned long)sf->fp) - ((unsigned long)sf); 267 size = ((unsigned long)sf->fp) - ((unsigned long)sf);
272 size -= STACKFRAME32_SZ; 268 size -= STACKFRAME32_SZ;
273 stk = (unsigned *)((unsigned long)sf + STACKFRAME32_SZ); 269 stk = (unsigned *)((unsigned long)sf + STACKFRAME32_SZ);
274 i = 0; 270 i = 0;
275 do { 271 do {
276 printk("s%d: %08x\n", i++, *stk++); 272 printk("s%d: %08x\n", i++, *stk++);
277 } while ((size -= sizeof(unsigned))); 273 } while ((size -= sizeof(unsigned)));
278 } 274 }
279 275
280 #ifdef CONFIG_SMP 276 #ifdef CONFIG_SMP
281 static DEFINE_SPINLOCK(regdump_lock); 277 static DEFINE_SPINLOCK(regdump_lock);
282 #endif 278 #endif
283 279
284 void __show_regs(struct pt_regs * regs) 280 void __show_regs(struct pt_regs * regs)
285 { 281 {
286 #ifdef CONFIG_SMP 282 #ifdef CONFIG_SMP
287 unsigned long flags; 283 unsigned long flags;
288 284
289 /* Protect against xcall ipis which might lead to livelock on the lock */ 285 /* Protect against xcall ipis which might lead to livelock on the lock */
290 __asm__ __volatile__("rdpr %%pstate, %0\n\t" 286 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
291 "wrpr %0, %1, %%pstate" 287 "wrpr %0, %1, %%pstate"
292 : "=r" (flags) 288 : "=r" (flags)
293 : "i" (PSTATE_IE)); 289 : "i" (PSTATE_IE));
294 spin_lock(&regdump_lock); 290 spin_lock(&regdump_lock);
295 #endif 291 #endif
296 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate, 292 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
297 regs->tpc, regs->tnpc, regs->y, print_tainted()); 293 regs->tpc, regs->tnpc, regs->y, print_tainted());
298 print_symbol("TPC: <%s>\n", regs->tpc); 294 print_symbol("TPC: <%s>\n", regs->tpc);
299 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n", 295 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
300 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2], 296 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
301 regs->u_regs[3]); 297 regs->u_regs[3]);
302 printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n", 298 printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n",
303 regs->u_regs[4], regs->u_regs[5], regs->u_regs[6], 299 regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
304 regs->u_regs[7]); 300 regs->u_regs[7]);
305 printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n", 301 printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n",
306 regs->u_regs[8], regs->u_regs[9], regs->u_regs[10], 302 regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
307 regs->u_regs[11]); 303 regs->u_regs[11]);
308 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n", 304 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
309 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14], 305 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
310 regs->u_regs[15]); 306 regs->u_regs[15]);
311 print_symbol("RPC: <%s>\n", regs->u_regs[15]); 307 print_symbol("RPC: <%s>\n", regs->u_regs[15]);
312 show_regwindow(regs); 308 show_regwindow(regs);
313 #ifdef CONFIG_SMP 309 #ifdef CONFIG_SMP
314 spin_unlock(&regdump_lock); 310 spin_unlock(&regdump_lock);
315 __asm__ __volatile__("wrpr %0, 0, %%pstate" 311 __asm__ __volatile__("wrpr %0, 0, %%pstate"
316 : : "r" (flags)); 312 : : "r" (flags));
317 #endif 313 #endif
318 } 314 }
319 315
320 #ifdef VERBOSE_SHOWREGS 316 #ifdef VERBOSE_SHOWREGS
321 static void idump_from_user (unsigned int *pc) 317 static void idump_from_user (unsigned int *pc)
322 { 318 {
323 int i; 319 int i;
324 int code; 320 int code;
325 321
326 if((((unsigned long) pc) & 3)) 322 if((((unsigned long) pc) & 3))
327 return; 323 return;
328 324
329 pc -= 3; 325 pc -= 3;
330 for(i = -3; i < 6; i++) { 326 for(i = -3; i < 6; i++) {
331 get_user(code, pc); 327 get_user(code, pc);
332 printk("%c%08x%c",i?' ':'<',code,i?' ':'>'); 328 printk("%c%08x%c",i?' ':'<',code,i?' ':'>');
333 pc++; 329 pc++;
334 } 330 }
335 printk("\n"); 331 printk("\n");
336 } 332 }
337 #endif 333 #endif
338 334
339 void show_regs(struct pt_regs *regs) 335 void show_regs(struct pt_regs *regs)
340 { 336 {
341 #ifdef VERBOSE_SHOWREGS 337 #ifdef VERBOSE_SHOWREGS
342 extern long etrap, etraptl1; 338 extern long etrap, etraptl1;
343 #endif 339 #endif
344 __show_regs(regs); 340 __show_regs(regs);
345 #ifdef CONFIG_SMP 341 #ifdef CONFIG_SMP
346 { 342 {
347 extern void smp_report_regs(void); 343 extern void smp_report_regs(void);
348 344
349 smp_report_regs(); 345 smp_report_regs();
350 } 346 }
351 #endif 347 #endif
352 348
353 #ifdef VERBOSE_SHOWREGS 349 #ifdef VERBOSE_SHOWREGS
354 if (regs->tpc >= &etrap && regs->tpc < &etraptl1 && 350 if (regs->tpc >= &etrap && regs->tpc < &etraptl1 &&
355 regs->u_regs[14] >= (long)current - PAGE_SIZE && 351 regs->u_regs[14] >= (long)current - PAGE_SIZE &&
356 regs->u_regs[14] < (long)current + 6 * PAGE_SIZE) { 352 regs->u_regs[14] < (long)current + 6 * PAGE_SIZE) {
357 printk ("*********parent**********\n"); 353 printk ("*********parent**********\n");
358 __show_regs((struct pt_regs *)(regs->u_regs[14] + PTREGS_OFF)); 354 __show_regs((struct pt_regs *)(regs->u_regs[14] + PTREGS_OFF));
359 idump_from_user(((struct pt_regs *)(regs->u_regs[14] + PTREGS_OFF))->tpc); 355 idump_from_user(((struct pt_regs *)(regs->u_regs[14] + PTREGS_OFF))->tpc);
360 printk ("*********endpar**********\n"); 356 printk ("*********endpar**********\n");
361 } 357 }
362 #endif 358 #endif
363 } 359 }
364 360
365 void show_regs32(struct pt_regs32 *regs) 361 void show_regs32(struct pt_regs32 *regs)
366 { 362 {
367 printk("PSR: %08x PC: %08x NPC: %08x Y: %08x %s\n", regs->psr, 363 printk("PSR: %08x PC: %08x NPC: %08x Y: %08x %s\n", regs->psr,
368 regs->pc, regs->npc, regs->y, print_tainted()); 364 regs->pc, regs->npc, regs->y, print_tainted());
369 printk("g0: %08x g1: %08x g2: %08x g3: %08x ", 365 printk("g0: %08x g1: %08x g2: %08x g3: %08x ",
370 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2], 366 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
371 regs->u_regs[3]); 367 regs->u_regs[3]);
372 printk("g4: %08x g5: %08x g6: %08x g7: %08x\n", 368 printk("g4: %08x g5: %08x g6: %08x g7: %08x\n",
373 regs->u_regs[4], regs->u_regs[5], regs->u_regs[6], 369 regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
374 regs->u_regs[7]); 370 regs->u_regs[7]);
375 printk("o0: %08x o1: %08x o2: %08x o3: %08x ", 371 printk("o0: %08x o1: %08x o2: %08x o3: %08x ",
376 regs->u_regs[8], regs->u_regs[9], regs->u_regs[10], 372 regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
377 regs->u_regs[11]); 373 regs->u_regs[11]);
378 printk("o4: %08x o5: %08x sp: %08x ret_pc: %08x\n", 374 printk("o4: %08x o5: %08x sp: %08x ret_pc: %08x\n",
379 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14], 375 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
380 regs->u_regs[15]); 376 regs->u_regs[15]);
381 } 377 }
382 378
383 unsigned long thread_saved_pc(struct task_struct *tsk) 379 unsigned long thread_saved_pc(struct task_struct *tsk)
384 { 380 {
385 struct thread_info *ti = tsk->thread_info; 381 struct thread_info *ti = tsk->thread_info;
386 unsigned long ret = 0xdeadbeefUL; 382 unsigned long ret = 0xdeadbeefUL;
387 383
388 if (ti && ti->ksp) { 384 if (ti && ti->ksp) {
389 unsigned long *sp; 385 unsigned long *sp;
390 sp = (unsigned long *)(ti->ksp + STACK_BIAS); 386 sp = (unsigned long *)(ti->ksp + STACK_BIAS);
391 if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL && 387 if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
392 sp[14]) { 388 sp[14]) {
393 unsigned long *fp; 389 unsigned long *fp;
394 fp = (unsigned long *)(sp[14] + STACK_BIAS); 390 fp = (unsigned long *)(sp[14] + STACK_BIAS);
395 if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL) 391 if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
396 ret = fp[15]; 392 ret = fp[15];
397 } 393 }
398 } 394 }
399 return ret; 395 return ret;
400 } 396 }
401 397
402 /* Free current thread data structures etc.. */ 398 /* Free current thread data structures etc.. */
403 void exit_thread(void) 399 void exit_thread(void)
404 { 400 {
405 struct thread_info *t = current_thread_info(); 401 struct thread_info *t = current_thread_info();
406 402
407 if (t->utraps) { 403 if (t->utraps) {
408 if (t->utraps[0] < 2) 404 if (t->utraps[0] < 2)
409 kfree (t->utraps); 405 kfree (t->utraps);
410 else 406 else
411 t->utraps[0]--; 407 t->utraps[0]--;
412 } 408 }
413 409
414 if (test_and_clear_thread_flag(TIF_PERFCTR)) { 410 if (test_and_clear_thread_flag(TIF_PERFCTR)) {
415 t->user_cntd0 = t->user_cntd1 = NULL; 411 t->user_cntd0 = t->user_cntd1 = NULL;
416 t->pcr_reg = 0; 412 t->pcr_reg = 0;
417 write_pcr(0); 413 write_pcr(0);
418 } 414 }
419 } 415 }
420 416
421 void flush_thread(void) 417 void flush_thread(void)
422 { 418 {
423 struct thread_info *t = current_thread_info(); 419 struct thread_info *t = current_thread_info();
424 420
425 if (t->flags & _TIF_ABI_PENDING) 421 if (t->flags & _TIF_ABI_PENDING)
426 t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT); 422 t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
427 423
428 if (t->task->mm) { 424 if (t->task->mm) {
429 unsigned long pgd_cache = 0UL; 425 unsigned long pgd_cache = 0UL;
430 if (test_thread_flag(TIF_32BIT)) { 426 if (test_thread_flag(TIF_32BIT)) {
431 struct mm_struct *mm = t->task->mm; 427 struct mm_struct *mm = t->task->mm;
432 pgd_t *pgd0 = &mm->pgd[0]; 428 pgd_t *pgd0 = &mm->pgd[0];
433 pud_t *pud0 = pud_offset(pgd0, 0); 429 pud_t *pud0 = pud_offset(pgd0, 0);
434 430
435 if (pud_none(*pud0)) { 431 if (pud_none(*pud0)) {
436 pmd_t *page = pmd_alloc_one(mm, 0); 432 pmd_t *page = pmd_alloc_one(mm, 0);
437 pud_set(pud0, page); 433 pud_set(pud0, page);
438 } 434 }
439 pgd_cache = get_pgd_cache(pgd0); 435 pgd_cache = get_pgd_cache(pgd0);
440 } 436 }
441 __asm__ __volatile__("stxa %0, [%1] %2\n\t" 437 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
442 "membar #Sync" 438 "membar #Sync"
443 : /* no outputs */ 439 : /* no outputs */
444 : "r" (pgd_cache), 440 : "r" (pgd_cache),
445 "r" (TSB_REG), 441 "r" (TSB_REG),
446 "i" (ASI_DMMU)); 442 "i" (ASI_DMMU));
447 } 443 }
448 set_thread_wsaved(0); 444 set_thread_wsaved(0);
449 445
450 /* Turn off performance counters if on. */ 446 /* Turn off performance counters if on. */
451 if (test_and_clear_thread_flag(TIF_PERFCTR)) { 447 if (test_and_clear_thread_flag(TIF_PERFCTR)) {
452 t->user_cntd0 = t->user_cntd1 = NULL; 448 t->user_cntd0 = t->user_cntd1 = NULL;
453 t->pcr_reg = 0; 449 t->pcr_reg = 0;
454 write_pcr(0); 450 write_pcr(0);
455 } 451 }
456 452
457 /* Clear FPU register state. */ 453 /* Clear FPU register state. */
458 t->fpsaved[0] = 0; 454 t->fpsaved[0] = 0;
459 455
460 if (get_thread_current_ds() != ASI_AIUS) 456 if (get_thread_current_ds() != ASI_AIUS)
461 set_fs(USER_DS); 457 set_fs(USER_DS);
462 458
463 /* Init new signal delivery disposition. */ 459 /* Init new signal delivery disposition. */
464 clear_thread_flag(TIF_NEWSIGNALS); 460 clear_thread_flag(TIF_NEWSIGNALS);
465 } 461 }
466 462
467 /* It's a bit more tricky when 64-bit tasks are involved... */ 463 /* It's a bit more tricky when 64-bit tasks are involved... */
468 static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) 464 static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
469 { 465 {
470 unsigned long fp, distance, rval; 466 unsigned long fp, distance, rval;
471 467
472 if (!(test_thread_flag(TIF_32BIT))) { 468 if (!(test_thread_flag(TIF_32BIT))) {
473 csp += STACK_BIAS; 469 csp += STACK_BIAS;
474 psp += STACK_BIAS; 470 psp += STACK_BIAS;
475 __get_user(fp, &(((struct reg_window __user *)psp)->ins[6])); 471 __get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
476 fp += STACK_BIAS; 472 fp += STACK_BIAS;
477 } else 473 } else
478 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6])); 474 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
479 475
480 /* Now 8-byte align the stack as this is mandatory in the 476 /* Now 8-byte align the stack as this is mandatory in the
481 * Sparc ABI due to how register windows work. This hides 477 * Sparc ABI due to how register windows work. This hides
482 * the restriction from thread libraries etc. -DaveM 478 * the restriction from thread libraries etc. -DaveM
483 */ 479 */
484 csp &= ~7UL; 480 csp &= ~7UL;
485 481
486 distance = fp - psp; 482 distance = fp - psp;
487 rval = (csp - distance); 483 rval = (csp - distance);
488 if (copy_in_user((void __user *) rval, (void __user *) psp, distance)) 484 if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
489 rval = 0; 485 rval = 0;
490 else if (test_thread_flag(TIF_32BIT)) { 486 else if (test_thread_flag(TIF_32BIT)) {
491 if (put_user(((u32)csp), 487 if (put_user(((u32)csp),
492 &(((struct reg_window32 __user *)rval)->ins[6]))) 488 &(((struct reg_window32 __user *)rval)->ins[6])))
493 rval = 0; 489 rval = 0;
494 } else { 490 } else {
495 if (put_user(((u64)csp - STACK_BIAS), 491 if (put_user(((u64)csp - STACK_BIAS),
496 &(((struct reg_window __user *)rval)->ins[6]))) 492 &(((struct reg_window __user *)rval)->ins[6])))
497 rval = 0; 493 rval = 0;
498 else 494 else
499 rval = rval - STACK_BIAS; 495 rval = rval - STACK_BIAS;
500 } 496 }
501 497
502 return rval; 498 return rval;
503 } 499 }
504 500
505 /* Standard stuff. */ 501 /* Standard stuff. */
506 static inline void shift_window_buffer(int first_win, int last_win, 502 static inline void shift_window_buffer(int first_win, int last_win,
507 struct thread_info *t) 503 struct thread_info *t)
508 { 504 {
509 int i; 505 int i;
510 506
511 for (i = first_win; i < last_win; i++) { 507 for (i = first_win; i < last_win; i++) {
512 t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1]; 508 t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1];
513 memcpy(&t->reg_window[i], &t->reg_window[i+1], 509 memcpy(&t->reg_window[i], &t->reg_window[i+1],
514 sizeof(struct reg_window)); 510 sizeof(struct reg_window));
515 } 511 }
516 } 512 }
517 513
518 void synchronize_user_stack(void) 514 void synchronize_user_stack(void)
519 { 515 {
520 struct thread_info *t = current_thread_info(); 516 struct thread_info *t = current_thread_info();
521 unsigned long window; 517 unsigned long window;
522 518
523 flush_user_windows(); 519 flush_user_windows();
524 if ((window = get_thread_wsaved()) != 0) { 520 if ((window = get_thread_wsaved()) != 0) {
525 int winsize = sizeof(struct reg_window); 521 int winsize = sizeof(struct reg_window);
526 int bias = 0; 522 int bias = 0;
527 523
528 if (test_thread_flag(TIF_32BIT)) 524 if (test_thread_flag(TIF_32BIT))
529 winsize = sizeof(struct reg_window32); 525 winsize = sizeof(struct reg_window32);
530 else 526 else
531 bias = STACK_BIAS; 527 bias = STACK_BIAS;
532 528
533 window -= 1; 529 window -= 1;
534 do { 530 do {
535 unsigned long sp = (t->rwbuf_stkptrs[window] + bias); 531 unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
536 struct reg_window *rwin = &t->reg_window[window]; 532 struct reg_window *rwin = &t->reg_window[window];
537 533
538 if (!copy_to_user((char __user *)sp, rwin, winsize)) { 534 if (!copy_to_user((char __user *)sp, rwin, winsize)) {
539 shift_window_buffer(window, get_thread_wsaved() - 1, t); 535 shift_window_buffer(window, get_thread_wsaved() - 1, t);
540 set_thread_wsaved(get_thread_wsaved() - 1); 536 set_thread_wsaved(get_thread_wsaved() - 1);
541 } 537 }
542 } while (window--); 538 } while (window--);
543 } 539 }
544 } 540 }
545 541
546 void fault_in_user_windows(void) 542 void fault_in_user_windows(void)
547 { 543 {
548 struct thread_info *t = current_thread_info(); 544 struct thread_info *t = current_thread_info();
549 unsigned long window; 545 unsigned long window;
550 int winsize = sizeof(struct reg_window); 546 int winsize = sizeof(struct reg_window);
551 int bias = 0; 547 int bias = 0;
552 548
553 if (test_thread_flag(TIF_32BIT)) 549 if (test_thread_flag(TIF_32BIT))
554 winsize = sizeof(struct reg_window32); 550 winsize = sizeof(struct reg_window32);
555 else 551 else
556 bias = STACK_BIAS; 552 bias = STACK_BIAS;
557 553
558 flush_user_windows(); 554 flush_user_windows();
559 window = get_thread_wsaved(); 555 window = get_thread_wsaved();
560 556
561 if (window != 0) { 557 if (window != 0) {
562 window -= 1; 558 window -= 1;
563 do { 559 do {
564 unsigned long sp = (t->rwbuf_stkptrs[window] + bias); 560 unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
565 struct reg_window *rwin = &t->reg_window[window]; 561 struct reg_window *rwin = &t->reg_window[window];
566 562
567 if (copy_to_user((char __user *)sp, rwin, winsize)) 563 if (copy_to_user((char __user *)sp, rwin, winsize))
568 goto barf; 564 goto barf;
569 } while (window--); 565 } while (window--);
570 } 566 }
571 set_thread_wsaved(0); 567 set_thread_wsaved(0);
572 return; 568 return;
573 569
574 barf: 570 barf:
575 set_thread_wsaved(window + 1); 571 set_thread_wsaved(window + 1);
576 do_exit(SIGILL); 572 do_exit(SIGILL);
577 } 573 }
578 574
579 asmlinkage long sparc_do_fork(unsigned long clone_flags, 575 asmlinkage long sparc_do_fork(unsigned long clone_flags,
580 unsigned long stack_start, 576 unsigned long stack_start,
581 struct pt_regs *regs, 577 struct pt_regs *regs,
582 unsigned long stack_size) 578 unsigned long stack_size)
583 { 579 {
584 int __user *parent_tid_ptr, *child_tid_ptr; 580 int __user *parent_tid_ptr, *child_tid_ptr;
585 581
586 #ifdef CONFIG_COMPAT 582 #ifdef CONFIG_COMPAT
587 if (test_thread_flag(TIF_32BIT)) { 583 if (test_thread_flag(TIF_32BIT)) {
588 parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]); 584 parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]);
589 child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]); 585 child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]);
590 } else 586 } else
591 #endif 587 #endif
592 { 588 {
593 parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2]; 589 parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2];
594 child_tid_ptr = (int __user *) regs->u_regs[UREG_I4]; 590 child_tid_ptr = (int __user *) regs->u_regs[UREG_I4];
595 } 591 }
596 592
597 return do_fork(clone_flags, stack_start, 593 return do_fork(clone_flags, stack_start,
598 regs, stack_size, 594 regs, stack_size,
599 parent_tid_ptr, child_tid_ptr); 595 parent_tid_ptr, child_tid_ptr);
600 } 596 }
601 597
602 /* Copy a Sparc thread. The fork() return value conventions 598 /* Copy a Sparc thread. The fork() return value conventions
603 * under SunOS are nothing short of bletcherous: 599 * under SunOS are nothing short of bletcherous:
604 * Parent --> %o0 == childs pid, %o1 == 0 600 * Parent --> %o0 == childs pid, %o1 == 0
605 * Child --> %o0 == parents pid, %o1 == 1 601 * Child --> %o0 == parents pid, %o1 == 1
606 */ 602 */
607 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, 603 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
608 unsigned long unused, 604 unsigned long unused,
609 struct task_struct *p, struct pt_regs *regs) 605 struct task_struct *p, struct pt_regs *regs)
610 { 606 {
611 struct thread_info *t = p->thread_info; 607 struct thread_info *t = p->thread_info;
612 char *child_trap_frame; 608 char *child_trap_frame;
613 609
614 #ifdef CONFIG_DEBUG_SPINLOCK 610 #ifdef CONFIG_DEBUG_SPINLOCK
615 p->thread.smp_lock_count = 0; 611 p->thread.smp_lock_count = 0;
616 p->thread.smp_lock_pc = 0; 612 p->thread.smp_lock_pc = 0;
617 #endif 613 #endif
618 614
619 /* Calculate offset to stack_frame & pt_regs */ 615 /* Calculate offset to stack_frame & pt_regs */
620 child_trap_frame = ((char *)t) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ)); 616 child_trap_frame = ((char *)t) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ));
621 memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ)); 617 memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ));
622 618
623 t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) | 619 t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) |
624 (((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT); 620 (((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT);
625 t->new_child = 1; 621 t->new_child = 1;
626 t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS; 622 t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
627 t->kregs = (struct pt_regs *)(child_trap_frame+sizeof(struct sparc_stackf)); 623 t->kregs = (struct pt_regs *)(child_trap_frame+sizeof(struct sparc_stackf));
628 t->fpsaved[0] = 0; 624 t->fpsaved[0] = 0;
629 625
630 if (regs->tstate & TSTATE_PRIV) { 626 if (regs->tstate & TSTATE_PRIV) {
631 /* Special case, if we are spawning a kernel thread from 627 /* Special case, if we are spawning a kernel thread from
632 * a userspace task (via KMOD, NFS, or similar) we must 628 * a userspace task (via KMOD, NFS, or similar) we must
633 * disable performance counters in the child because the 629 * disable performance counters in the child because the
634 * address space and protection realm are changing. 630 * address space and protection realm are changing.
635 */ 631 */
636 if (t->flags & _TIF_PERFCTR) { 632 if (t->flags & _TIF_PERFCTR) {
637 t->user_cntd0 = t->user_cntd1 = NULL; 633 t->user_cntd0 = t->user_cntd1 = NULL;
638 t->pcr_reg = 0; 634 t->pcr_reg = 0;
639 t->flags &= ~_TIF_PERFCTR; 635 t->flags &= ~_TIF_PERFCTR;
640 } 636 }
641 t->kregs->u_regs[UREG_FP] = t->ksp; 637 t->kregs->u_regs[UREG_FP] = t->ksp;
642 t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT); 638 t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT);
643 flush_register_windows(); 639 flush_register_windows();
644 memcpy((void *)(t->ksp + STACK_BIAS), 640 memcpy((void *)(t->ksp + STACK_BIAS),
645 (void *)(regs->u_regs[UREG_FP] + STACK_BIAS), 641 (void *)(regs->u_regs[UREG_FP] + STACK_BIAS),
646 sizeof(struct sparc_stackf)); 642 sizeof(struct sparc_stackf));
647 t->kregs->u_regs[UREG_G6] = (unsigned long) t; 643 t->kregs->u_regs[UREG_G6] = (unsigned long) t;
648 t->kregs->u_regs[UREG_G4] = (unsigned long) t->task; 644 t->kregs->u_regs[UREG_G4] = (unsigned long) t->task;
649 } else { 645 } else {
650 if (t->flags & _TIF_32BIT) { 646 if (t->flags & _TIF_32BIT) {
651 sp &= 0x00000000ffffffffUL; 647 sp &= 0x00000000ffffffffUL;
652 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL; 648 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
653 } 649 }
654 t->kregs->u_regs[UREG_FP] = sp; 650 t->kregs->u_regs[UREG_FP] = sp;
655 t->flags |= ((long)ASI_AIUS << TI_FLAG_CURRENT_DS_SHIFT); 651 t->flags |= ((long)ASI_AIUS << TI_FLAG_CURRENT_DS_SHIFT);
656 if (sp != regs->u_regs[UREG_FP]) { 652 if (sp != regs->u_regs[UREG_FP]) {
657 unsigned long csp; 653 unsigned long csp;
658 654
659 csp = clone_stackframe(sp, regs->u_regs[UREG_FP]); 655 csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
660 if (!csp) 656 if (!csp)
661 return -EFAULT; 657 return -EFAULT;
662 t->kregs->u_regs[UREG_FP] = csp; 658 t->kregs->u_regs[UREG_FP] = csp;
663 } 659 }
664 if (t->utraps) 660 if (t->utraps)
665 t->utraps[0]++; 661 t->utraps[0]++;
666 } 662 }
667 663
668 /* Set the return value for the child. */ 664 /* Set the return value for the child. */
669 t->kregs->u_regs[UREG_I0] = current->pid; 665 t->kregs->u_regs[UREG_I0] = current->pid;
670 t->kregs->u_regs[UREG_I1] = 1; 666 t->kregs->u_regs[UREG_I1] = 1;
671 667
672 /* Set the second return value for the parent. */ 668 /* Set the second return value for the parent. */
673 regs->u_regs[UREG_I1] = 0; 669 regs->u_regs[UREG_I1] = 0;
674 670
675 if (clone_flags & CLONE_SETTLS) 671 if (clone_flags & CLONE_SETTLS)
676 t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3]; 672 t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
677 673
678 return 0; 674 return 0;
679 } 675 }
680 676
681 /* 677 /*
682 * This is the mechanism for creating a new kernel thread. 678 * This is the mechanism for creating a new kernel thread.
683 * 679 *
684 * NOTE! Only a kernel-only process(ie the swapper or direct descendants 680 * NOTE! Only a kernel-only process(ie the swapper or direct descendants
685 * who haven't done an "execve()") should use this: it will work within 681 * who haven't done an "execve()") should use this: it will work within
686 * a system call from a "real" process, but the process memory space will 682 * a system call from a "real" process, but the process memory space will
687 * not be free'd until both the parent and the child have exited. 683 * not be free'd until both the parent and the child have exited.
688 */ 684 */
689 pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) 685 pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
690 { 686 {
691 long retval; 687 long retval;
692 688
693 /* If the parent runs before fn(arg) is called by the child, 689 /* If the parent runs before fn(arg) is called by the child,
694 * the input registers of this function can be clobbered. 690 * the input registers of this function can be clobbered.
695 * So we stash 'fn' and 'arg' into global registers which 691 * So we stash 'fn' and 'arg' into global registers which
696 * will not be modified by the parent. 692 * will not be modified by the parent.
697 */ 693 */
698 __asm__ __volatile__("mov %4, %%g2\n\t" /* Save FN into global */ 694 __asm__ __volatile__("mov %4, %%g2\n\t" /* Save FN into global */
699 "mov %5, %%g3\n\t" /* Save ARG into global */ 695 "mov %5, %%g3\n\t" /* Save ARG into global */
700 "mov %1, %%g1\n\t" /* Clone syscall nr. */ 696 "mov %1, %%g1\n\t" /* Clone syscall nr. */
701 "mov %2, %%o0\n\t" /* Clone flags. */ 697 "mov %2, %%o0\n\t" /* Clone flags. */
702 "mov 0, %%o1\n\t" /* usp arg == 0 */ 698 "mov 0, %%o1\n\t" /* usp arg == 0 */
703 "t 0x6d\n\t" /* Linux/Sparc clone(). */ 699 "t 0x6d\n\t" /* Linux/Sparc clone(). */
704 "brz,a,pn %%o1, 1f\n\t" /* Parent, just return. */ 700 "brz,a,pn %%o1, 1f\n\t" /* Parent, just return. */
705 " mov %%o0, %0\n\t" 701 " mov %%o0, %0\n\t"
706 "jmpl %%g2, %%o7\n\t" /* Call the function. */ 702 "jmpl %%g2, %%o7\n\t" /* Call the function. */
707 " mov %%g3, %%o0\n\t" /* Set arg in delay. */ 703 " mov %%g3, %%o0\n\t" /* Set arg in delay. */
708 "mov %3, %%g1\n\t" 704 "mov %3, %%g1\n\t"
709 "t 0x6d\n\t" /* Linux/Sparc exit(). */ 705 "t 0x6d\n\t" /* Linux/Sparc exit(). */
710 /* Notreached by child. */ 706 /* Notreached by child. */
711 "1:" : 707 "1:" :
712 "=r" (retval) : 708 "=r" (retval) :
713 "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED), 709 "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED),
714 "i" (__NR_exit), "r" (fn), "r" (arg) : 710 "i" (__NR_exit), "r" (fn), "r" (arg) :
715 "g1", "g2", "g3", "o0", "o1", "memory", "cc"); 711 "g1", "g2", "g3", "o0", "o1", "memory", "cc");
716 return retval; 712 return retval;
717 } 713 }
718 714
719 /* 715 /*
720 * fill in the user structure for a core dump.. 716 * fill in the user structure for a core dump..
721 */ 717 */
722 void dump_thread(struct pt_regs * regs, struct user * dump) 718 void dump_thread(struct pt_regs * regs, struct user * dump)
723 { 719 {
724 /* Only should be used for SunOS and ancient a.out 720 /* Only should be used for SunOS and ancient a.out
725 * SparcLinux binaries... Not worth implementing. 721 * SparcLinux binaries... Not worth implementing.
726 */ 722 */
727 memset(dump, 0, sizeof(struct user)); 723 memset(dump, 0, sizeof(struct user));
728 } 724 }
729 725
730 typedef struct { 726 typedef struct {
731 union { 727 union {
732 unsigned int pr_regs[32]; 728 unsigned int pr_regs[32];
733 unsigned long pr_dregs[16]; 729 unsigned long pr_dregs[16];
734 } pr_fr; 730 } pr_fr;
735 unsigned int __unused; 731 unsigned int __unused;
736 unsigned int pr_fsr; 732 unsigned int pr_fsr;
737 unsigned char pr_qcnt; 733 unsigned char pr_qcnt;
738 unsigned char pr_q_entrysize; 734 unsigned char pr_q_entrysize;
739 unsigned char pr_en; 735 unsigned char pr_en;
740 unsigned int pr_q[64]; 736 unsigned int pr_q[64];
741 } elf_fpregset_t32; 737 } elf_fpregset_t32;
742 738
743 /* 739 /*
744 * fill in the fpu structure for a core dump. 740 * fill in the fpu structure for a core dump.
745 */ 741 */
746 int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs) 742 int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
747 { 743 {
748 unsigned long *kfpregs = current_thread_info()->fpregs; 744 unsigned long *kfpregs = current_thread_info()->fpregs;
749 unsigned long fprs = current_thread_info()->fpsaved[0]; 745 unsigned long fprs = current_thread_info()->fpsaved[0];
750 746
751 if (test_thread_flag(TIF_32BIT)) { 747 if (test_thread_flag(TIF_32BIT)) {
752 elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs; 748 elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs;
753 749
754 if (fprs & FPRS_DL) 750 if (fprs & FPRS_DL)
755 memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs, 751 memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs,
756 sizeof(unsigned int) * 32); 752 sizeof(unsigned int) * 32);
757 else 753 else
758 memset(&fpregs32->pr_fr.pr_regs[0], 0, 754 memset(&fpregs32->pr_fr.pr_regs[0], 0,
759 sizeof(unsigned int) * 32); 755 sizeof(unsigned int) * 32);
760 fpregs32->pr_qcnt = 0; 756 fpregs32->pr_qcnt = 0;
761 fpregs32->pr_q_entrysize = 8; 757 fpregs32->pr_q_entrysize = 8;
762 memset(&fpregs32->pr_q[0], 0, 758 memset(&fpregs32->pr_q[0], 0,
763 (sizeof(unsigned int) * 64)); 759 (sizeof(unsigned int) * 64));
764 if (fprs & FPRS_FEF) { 760 if (fprs & FPRS_FEF) {
765 fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0]; 761 fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0];
766 fpregs32->pr_en = 1; 762 fpregs32->pr_en = 1;
767 } else { 763 } else {
768 fpregs32->pr_fsr = 0; 764 fpregs32->pr_fsr = 0;
769 fpregs32->pr_en = 0; 765 fpregs32->pr_en = 0;
770 } 766 }
771 } else { 767 } else {
772 if(fprs & FPRS_DL) 768 if(fprs & FPRS_DL)
773 memcpy(&fpregs->pr_regs[0], kfpregs, 769 memcpy(&fpregs->pr_regs[0], kfpregs,
774 sizeof(unsigned int) * 32); 770 sizeof(unsigned int) * 32);
775 else 771 else
776 memset(&fpregs->pr_regs[0], 0, 772 memset(&fpregs->pr_regs[0], 0,
777 sizeof(unsigned int) * 32); 773 sizeof(unsigned int) * 32);
778 if(fprs & FPRS_DU) 774 if(fprs & FPRS_DU)
779 memcpy(&fpregs->pr_regs[16], kfpregs+16, 775 memcpy(&fpregs->pr_regs[16], kfpregs+16,
780 sizeof(unsigned int) * 32); 776 sizeof(unsigned int) * 32);
781 else 777 else
782 memset(&fpregs->pr_regs[16], 0, 778 memset(&fpregs->pr_regs[16], 0,
783 sizeof(unsigned int) * 32); 779 sizeof(unsigned int) * 32);
784 if(fprs & FPRS_FEF) { 780 if(fprs & FPRS_FEF) {
785 fpregs->pr_fsr = current_thread_info()->xfsr[0]; 781 fpregs->pr_fsr = current_thread_info()->xfsr[0];
786 fpregs->pr_gsr = current_thread_info()->gsr[0]; 782 fpregs->pr_gsr = current_thread_info()->gsr[0];
787 } else { 783 } else {
788 fpregs->pr_fsr = fpregs->pr_gsr = 0; 784 fpregs->pr_fsr = fpregs->pr_gsr = 0;
789 } 785 }
790 fpregs->pr_fprs = fprs; 786 fpregs->pr_fprs = fprs;
791 } 787 }
792 return 1; 788 return 1;
793 } 789 }
794 790
795 /* 791 /*
796 * sparc_execve() executes a new program after the asm stub has set 792 * sparc_execve() executes a new program after the asm stub has set
797 * things up for us. This should basically do what I want it to. 793 * things up for us. This should basically do what I want it to.
798 */ 794 */
799 asmlinkage int sparc_execve(struct pt_regs *regs) 795 asmlinkage int sparc_execve(struct pt_regs *regs)
800 { 796 {
801 int error, base = 0; 797 int error, base = 0;
802 char *filename; 798 char *filename;
803 799
804 /* User register window flush is done by entry.S */ 800 /* User register window flush is done by entry.S */
805 801
806 /* Check for indirect call. */ 802 /* Check for indirect call. */
807 if (regs->u_regs[UREG_G1] == 0) 803 if (regs->u_regs[UREG_G1] == 0)
808 base = 1; 804 base = 1;
809 805
810 filename = getname((char __user *)regs->u_regs[base + UREG_I0]); 806 filename = getname((char __user *)regs->u_regs[base + UREG_I0]);
811 error = PTR_ERR(filename); 807 error = PTR_ERR(filename);
812 if (IS_ERR(filename)) 808 if (IS_ERR(filename))
813 goto out; 809 goto out;
814 error = do_execve(filename, 810 error = do_execve(filename,
815 (char __user * __user *) 811 (char __user * __user *)
816 regs->u_regs[base + UREG_I1], 812 regs->u_regs[base + UREG_I1],
817 (char __user * __user *) 813 (char __user * __user *)
818 regs->u_regs[base + UREG_I2], regs); 814 regs->u_regs[base + UREG_I2], regs);
819 putname(filename); 815 putname(filename);
820 if (!error) { 816 if (!error) {
821 fprs_write(0); 817 fprs_write(0);
822 current_thread_info()->xfsr[0] = 0; 818 current_thread_info()->xfsr[0] = 0;
823 current_thread_info()->fpsaved[0] = 0; 819 current_thread_info()->fpsaved[0] = 0;
824 regs->tstate &= ~TSTATE_PEF; 820 regs->tstate &= ~TSTATE_PEF;
825 task_lock(current); 821 task_lock(current);
826 current->ptrace &= ~PT_DTRACE; 822 current->ptrace &= ~PT_DTRACE;
827 task_unlock(current); 823 task_unlock(current);
828 } 824 }
829 out: 825 out:
830 return error; 826 return error;
831 } 827 }
832 828
833 unsigned long get_wchan(struct task_struct *task) 829 unsigned long get_wchan(struct task_struct *task)
834 { 830 {
835 unsigned long pc, fp, bias = 0; 831 unsigned long pc, fp, bias = 0;
836 unsigned long thread_info_base; 832 unsigned long thread_info_base;
837 struct reg_window *rw; 833 struct reg_window *rw;
838 unsigned long ret = 0; 834 unsigned long ret = 0;
839 int count = 0; 835 int count = 0;
840 836
841 if (!task || task == current || 837 if (!task || task == current ||
842 task->state == TASK_RUNNING) 838 task->state == TASK_RUNNING)
843 goto out; 839 goto out;
844 840
845 thread_info_base = (unsigned long) task->thread_info; 841 thread_info_base = (unsigned long) task->thread_info;
846 bias = STACK_BIAS; 842 bias = STACK_BIAS;
847 fp = task->thread_info->ksp + bias; 843 fp = task->thread_info->ksp + bias;
848 844
849 do { 845 do {
850 /* Bogus frame pointer? */ 846 /* Bogus frame pointer? */
851 if (fp < (thread_info_base + sizeof(struct thread_info)) || 847 if (fp < (thread_info_base + sizeof(struct thread_info)) ||
852 fp >= (thread_info_base + THREAD_SIZE)) 848 fp >= (thread_info_base + THREAD_SIZE))
853 break; 849 break;
854 rw = (struct reg_window *) fp; 850 rw = (struct reg_window *) fp;
855 pc = rw->ins[7]; 851 pc = rw->ins[7];
856 if (!in_sched_functions(pc)) { 852 if (!in_sched_functions(pc)) {
857 ret = pc; 853 ret = pc;
858 goto out; 854 goto out;
859 } 855 }
860 fp = rw->ins[6] + bias; 856 fp = rw->ins[6] + bias;
861 } while (++count < 16); 857 } while (++count < 16);
862 858
863 out: 859 out:
864 return ret; 860 return ret;
865 } 861 }
866 862
arch/um/kernel/reboot.c
1 /* 1 /*
2 * Copyright (C) 2000, 2002 Jeff Dike (jdike@karaya.com) 2 * Copyright (C) 2000, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL 3 * Licensed under the GPL
4 */ 4 */
5 5
6 #include "linux/module.h" 6 #include "linux/module.h"
7 #include "linux/sched.h" 7 #include "linux/sched.h"
8 #include "user_util.h" 8 #include "user_util.h"
9 #include "kern_util.h" 9 #include "kern_util.h"
10 #include "kern.h" 10 #include "kern.h"
11 #include "os.h" 11 #include "os.h"
12 #include "mode.h" 12 #include "mode.h"
13 #include "choose-mode.h" 13 #include "choose-mode.h"
14 14
15 #ifdef CONFIG_SMP 15 #ifdef CONFIG_SMP
16 static void kill_idlers(int me) 16 static void kill_idlers(int me)
17 { 17 {
18 #ifdef CONFIG_MODE_TT 18 #ifdef CONFIG_MODE_TT
19 struct task_struct *p; 19 struct task_struct *p;
20 int i; 20 int i;
21 21
22 for(i = 0; i < sizeof(idle_threads)/sizeof(idle_threads[0]); i++){ 22 for(i = 0; i < sizeof(idle_threads)/sizeof(idle_threads[0]); i++){
23 p = idle_threads[i]; 23 p = idle_threads[i];
24 if((p != NULL) && (p->thread.mode.tt.extern_pid != me)) 24 if((p != NULL) && (p->thread.mode.tt.extern_pid != me))
25 os_kill_process(p->thread.mode.tt.extern_pid, 0); 25 os_kill_process(p->thread.mode.tt.extern_pid, 0);
26 } 26 }
27 #endif 27 #endif
28 } 28 }
29 #endif 29 #endif
30 30
31 static void kill_off_processes(void) 31 static void kill_off_processes(void)
32 { 32 {
33 CHOOSE_MODE(kill_off_processes_tt(), kill_off_processes_skas()); 33 CHOOSE_MODE(kill_off_processes_tt(), kill_off_processes_skas());
34 #ifdef CONFIG_SMP 34 #ifdef CONFIG_SMP
35 kill_idlers(os_getpid()); 35 kill_idlers(os_getpid());
36 #endif 36 #endif
37 } 37 }
38 38
39 void uml_cleanup(void) 39 void uml_cleanup(void)
40 { 40 {
41 kmalloc_ok = 0; 41 kmalloc_ok = 0;
42 do_uml_exitcalls(); 42 do_uml_exitcalls();
43 kill_off_processes(); 43 kill_off_processes();
44 } 44 }
45 45
46 void machine_restart(char * __unused) 46 void machine_restart(char * __unused)
47 { 47 {
48 uml_cleanup(); 48 uml_cleanup();
49 CHOOSE_MODE(reboot_tt(), reboot_skas()); 49 CHOOSE_MODE(reboot_tt(), reboot_skas());
50 } 50 }
51 51
52 EXPORT_SYMBOL(machine_restart);
53
54 void machine_power_off(void) 52 void machine_power_off(void)
55 { 53 {
56 uml_cleanup(); 54 uml_cleanup();
57 CHOOSE_MODE(halt_tt(), halt_skas()); 55 CHOOSE_MODE(halt_tt(), halt_skas());
58 } 56 }
59 57
60 EXPORT_SYMBOL(machine_power_off);
61
62 void machine_halt(void) 58 void machine_halt(void)
63 { 59 {
64 machine_power_off(); 60 machine_power_off();
65 } 61 }
66
67 EXPORT_SYMBOL(machine_halt);
68 62
69 /* 63 /*
70 * Overrides for Emacs so that we follow Linus's tabbing style. 64 * Overrides for Emacs so that we follow Linus's tabbing style.
71 * Emacs will notice this stuff at the end of the file and automatically 65 * Emacs will notice this stuff at the end of the file and automatically
72 * adjust the settings for this buffer only. This must remain at the end 66 * adjust the settings for this buffer only. This must remain at the end
73 * of the file. 67 * of the file.
74 * --------------------------------------------------------------------------- 68 * ---------------------------------------------------------------------------
75 * Local variables: 69 * Local variables:
76 * c-file-style: "linux" 70 * c-file-style: "linux"
77 * End: 71 * End:
78 */ 72 */
79 73
arch/v850/kernel/anna.c
1 /* 1 /*
2 * arch/v850/kernel/anna.c -- Anna V850E2 evaluation chip/board 2 * arch/v850/kernel/anna.c -- Anna V850E2 evaluation chip/board
3 * 3 *
4 * Copyright (C) 2002,03 NEC Electronics Corporation 4 * Copyright (C) 2002,03 NEC Electronics Corporation
5 * Copyright (C) 2002,03 Miles Bader <miles@gnu.org> 5 * Copyright (C) 2002,03 Miles Bader <miles@gnu.org>
6 * 6 *
7 * This file is subject to the terms and conditions of the GNU General 7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this 8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details. 9 * archive for more details.
10 * 10 *
11 * Written by Miles Bader <miles@gnu.org> 11 * Written by Miles Bader <miles@gnu.org>
12 */ 12 */
13 13
14 #include <linux/config.h> 14 #include <linux/config.h>
15 #include <linux/kernel.h> 15 #include <linux/kernel.h>
16 #include <linux/module.h> 16 #include <linux/module.h>
17 #include <linux/init.h> 17 #include <linux/init.h>
18 #include <linux/bootmem.h> 18 #include <linux/bootmem.h>
19 #include <linux/major.h> 19 #include <linux/major.h>
20 #include <linux/irq.h> 20 #include <linux/irq.h>
21 21
22 #include <asm/machdep.h> 22 #include <asm/machdep.h>
23 #include <asm/atomic.h> 23 #include <asm/atomic.h>
24 #include <asm/page.h> 24 #include <asm/page.h>
25 #include <asm/v850e_timer_d.h> 25 #include <asm/v850e_timer_d.h>
26 #include <asm/v850e_uart.h> 26 #include <asm/v850e_uart.h>
27 27
28 #include "mach.h" 28 #include "mach.h"
29 29
30 30
31 /* SRAM and SDRAM are vaguely contiguous (with a big hole in between; see 31 /* SRAM and SDRAM are vaguely contiguous (with a big hole in between; see
32 mach_reserve_bootmem for details); use both as one big area. */ 32 mach_reserve_bootmem for details); use both as one big area. */
33 #define RAM_START SRAM_ADDR 33 #define RAM_START SRAM_ADDR
34 #define RAM_END (SDRAM_ADDR + SDRAM_SIZE) 34 #define RAM_END (SDRAM_ADDR + SDRAM_SIZE)
35 35
36 /* The bits of this port are connected to an 8-LED bar-graph. */ 36 /* The bits of this port are connected to an 8-LED bar-graph. */
37 #define LEDS_PORT 0 37 #define LEDS_PORT 0
38 38
39 39
40 static void anna_led_tick (void); 40 static void anna_led_tick (void);
41 41
42 42
43 void __init mach_early_init (void) 43 void __init mach_early_init (void)
44 { 44 {
45 ANNA_ILBEN = 0; 45 ANNA_ILBEN = 0;
46 46
47 V850E2_CSC(0) = 0x402F; 47 V850E2_CSC(0) = 0x402F;
48 V850E2_CSC(1) = 0x4000; 48 V850E2_CSC(1) = 0x4000;
49 V850E2_BPC = 0; 49 V850E2_BPC = 0;
50 V850E2_BSC = 0xAAAA; 50 V850E2_BSC = 0xAAAA;
51 V850E2_BEC = 0; 51 V850E2_BEC = 0;
52 52
53 #if 0 53 #if 0
54 V850E2_BHC = 0xFFFF; /* icache all memory, dcache all */ 54 V850E2_BHC = 0xFFFF; /* icache all memory, dcache all */
55 #else 55 #else
56 V850E2_BHC = 0; /* cache no memory */ 56 V850E2_BHC = 0; /* cache no memory */
57 #endif 57 #endif
58 V850E2_BCT(0) = 0xB088; 58 V850E2_BCT(0) = 0xB088;
59 V850E2_BCT(1) = 0x0008; 59 V850E2_BCT(1) = 0x0008;
60 V850E2_DWC(0) = 0x0027; 60 V850E2_DWC(0) = 0x0027;
61 V850E2_DWC(1) = 0; 61 V850E2_DWC(1) = 0;
62 V850E2_BCC = 0x0006; 62 V850E2_BCC = 0x0006;
63 V850E2_ASC = 0; 63 V850E2_ASC = 0;
64 V850E2_LBS = 0x0089; 64 V850E2_LBS = 0x0089;
65 V850E2_SCR(3) = 0x21A9; 65 V850E2_SCR(3) = 0x21A9;
66 V850E2_RFS(3) = 0x8121; 66 V850E2_RFS(3) = 0x8121;
67 67
68 v850e_intc_disable_irqs (); 68 v850e_intc_disable_irqs ();
69 } 69 }
70 70
71 void __init mach_setup (char **cmdline) 71 void __init mach_setup (char **cmdline)
72 { 72 {
73 ANNA_PORT_PM (LEDS_PORT) = 0; /* Make all LED pins output pins. */ 73 ANNA_PORT_PM (LEDS_PORT) = 0; /* Make all LED pins output pins. */
74 mach_tick = anna_led_tick; 74 mach_tick = anna_led_tick;
75 } 75 }
76 76
77 void __init mach_get_physical_ram (unsigned long *ram_start, 77 void __init mach_get_physical_ram (unsigned long *ram_start,
78 unsigned long *ram_len) 78 unsigned long *ram_len)
79 { 79 {
80 *ram_start = RAM_START; 80 *ram_start = RAM_START;
81 *ram_len = RAM_END - RAM_START; 81 *ram_len = RAM_END - RAM_START;
82 } 82 }
83 83
84 void __init mach_reserve_bootmem () 84 void __init mach_reserve_bootmem ()
85 { 85 {
86 /* The space between SRAM and SDRAM is filled with duplicate 86 /* The space between SRAM and SDRAM is filled with duplicate
87 images of SRAM. Prevent the kernel from using them. */ 87 images of SRAM. Prevent the kernel from using them. */
88 reserve_bootmem (SRAM_ADDR + SRAM_SIZE, 88 reserve_bootmem (SRAM_ADDR + SRAM_SIZE,
89 SDRAM_ADDR - (SRAM_ADDR + SRAM_SIZE)); 89 SDRAM_ADDR - (SRAM_ADDR + SRAM_SIZE));
90 } 90 }
91 91
92 void mach_gettimeofday (struct timespec *tv) 92 void mach_gettimeofday (struct timespec *tv)
93 { 93 {
94 tv->tv_sec = 0; 94 tv->tv_sec = 0;
95 tv->tv_nsec = 0; 95 tv->tv_nsec = 0;
96 } 96 }
97 97
98 void __init mach_sched_init (struct irqaction *timer_action) 98 void __init mach_sched_init (struct irqaction *timer_action)
99 { 99 {
100 /* Start hardware timer. */ 100 /* Start hardware timer. */
101 v850e_timer_d_configure (0, HZ); 101 v850e_timer_d_configure (0, HZ);
102 /* Install timer interrupt handler. */ 102 /* Install timer interrupt handler. */
103 setup_irq (IRQ_INTCMD(0), timer_action); 103 setup_irq (IRQ_INTCMD(0), timer_action);
104 } 104 }
105 105
106 static struct v850e_intc_irq_init irq_inits[] = { 106 static struct v850e_intc_irq_init irq_inits[] = {
107 { "IRQ", 0, NUM_MACH_IRQS, 1, 7 }, 107 { "IRQ", 0, NUM_MACH_IRQS, 1, 7 },
108 { "PIN", IRQ_INTP(0), IRQ_INTP_NUM, 1, 4 }, 108 { "PIN", IRQ_INTP(0), IRQ_INTP_NUM, 1, 4 },
109 { "CCC", IRQ_INTCCC(0), IRQ_INTCCC_NUM, 1, 5 }, 109 { "CCC", IRQ_INTCCC(0), IRQ_INTCCC_NUM, 1, 5 },
110 { "CMD", IRQ_INTCMD(0), IRQ_INTCMD_NUM, 1, 5 }, 110 { "CMD", IRQ_INTCMD(0), IRQ_INTCMD_NUM, 1, 5 },
111 { "DMA", IRQ_INTDMA(0), IRQ_INTDMA_NUM, 1, 2 }, 111 { "DMA", IRQ_INTDMA(0), IRQ_INTDMA_NUM, 1, 2 },
112 { "DMXER", IRQ_INTDMXER,1, 1, 2 }, 112 { "DMXER", IRQ_INTDMXER,1, 1, 2 },
113 { "SRE", IRQ_INTSRE(0), IRQ_INTSRE_NUM, 3, 3 }, 113 { "SRE", IRQ_INTSRE(0), IRQ_INTSRE_NUM, 3, 3 },
114 { "SR", IRQ_INTSR(0), IRQ_INTSR_NUM, 3, 4 }, 114 { "SR", IRQ_INTSR(0), IRQ_INTSR_NUM, 3, 4 },
115 { "ST", IRQ_INTST(0), IRQ_INTST_NUM, 3, 5 }, 115 { "ST", IRQ_INTST(0), IRQ_INTST_NUM, 3, 5 },
116 { 0 } 116 { 0 }
117 }; 117 };
118 #define NUM_IRQ_INITS ((sizeof irq_inits / sizeof irq_inits[0]) - 1) 118 #define NUM_IRQ_INITS ((sizeof irq_inits / sizeof irq_inits[0]) - 1)
119 119
120 static struct hw_interrupt_type hw_itypes[NUM_IRQ_INITS]; 120 static struct hw_interrupt_type hw_itypes[NUM_IRQ_INITS];
121 121
122 void __init mach_init_irqs (void) 122 void __init mach_init_irqs (void)
123 { 123 {
124 v850e_intc_init_irq_types (irq_inits, hw_itypes); 124 v850e_intc_init_irq_types (irq_inits, hw_itypes);
125 } 125 }
126 126
127 void machine_restart (char *__unused) 127 void machine_restart (char *__unused)
128 { 128 {
129 #ifdef CONFIG_RESET_GUARD 129 #ifdef CONFIG_RESET_GUARD
130 disable_reset_guard (); 130 disable_reset_guard ();
131 #endif 131 #endif
132 asm ("jmp r0"); /* Jump to the reset vector. */ 132 asm ("jmp r0"); /* Jump to the reset vector. */
133 } 133 }
134 134
135 EXPORT_SYMBOL(machine_restart);
136
137 void machine_halt (void) 135 void machine_halt (void)
138 { 136 {
139 #ifdef CONFIG_RESET_GUARD 137 #ifdef CONFIG_RESET_GUARD
140 disable_reset_guard (); 138 disable_reset_guard ();
141 #endif 139 #endif
142 local_irq_disable (); /* Ignore all interrupts. */ 140 local_irq_disable (); /* Ignore all interrupts. */
143 ANNA_PORT_IO(LEDS_PORT) = 0xAA; /* Note that we halted. */ 141 ANNA_PORT_IO(LEDS_PORT) = 0xAA; /* Note that we halted. */
144 for (;;) 142 for (;;)
145 asm ("halt; nop; nop; nop; nop; nop"); 143 asm ("halt; nop; nop; nop; nop; nop");
146 } 144 }
147 145
148 EXPORT_SYMBOL(machine_halt);
149
150 void machine_power_off (void) 146 void machine_power_off (void)
151 { 147 {
152 machine_halt (); 148 machine_halt ();
153 } 149 }
154
155 EXPORT_SYMBOL(machine_power_off);
156 150
157 /* Called before configuring an on-chip UART. */ 151 /* Called before configuring an on-chip UART. */
158 void anna_uart_pre_configure (unsigned chan, unsigned cflags, unsigned baud) 152 void anna_uart_pre_configure (unsigned chan, unsigned cflags, unsigned baud)
159 { 153 {
160 /* The Anna connects some general-purpose I/O pins on the CPU to 154 /* The Anna connects some general-purpose I/O pins on the CPU to
161 the RTS/CTS lines of UART 1's serial connection. I/O pins P07 155 the RTS/CTS lines of UART 1's serial connection. I/O pins P07
162 and P37 are RTS and CTS respectively. */ 156 and P37 are RTS and CTS respectively. */
163 if (chan == 1) { 157 if (chan == 1) {
164 ANNA_PORT_PM(0) &= ~0x80; /* P07 in output mode */ 158 ANNA_PORT_PM(0) &= ~0x80; /* P07 in output mode */
165 ANNA_PORT_PM(3) |= 0x80; /* P37 in input mode */ 159 ANNA_PORT_PM(3) |= 0x80; /* P37 in input mode */
166 } 160 }
167 } 161 }
168 162
169 /* Minimum and maximum bounds for the moving upper LED boundary in the 163 /* Minimum and maximum bounds for the moving upper LED boundary in the
170 clock tick display. We can't use the last bit because it's used for 164 clock tick display. We can't use the last bit because it's used for
171 UART0's CTS output. */ 165 UART0's CTS output. */
172 #define MIN_MAX_POS 0 166 #define MIN_MAX_POS 0
173 #define MAX_MAX_POS 6 167 #define MAX_MAX_POS 6
174 168
175 /* There are MAX_MAX_POS^2 - MIN_MAX_POS^2 cycles in the animation, so if 169 /* There are MAX_MAX_POS^2 - MIN_MAX_POS^2 cycles in the animation, so if
176 we pick 6 and 0 as above, we get 49 cycles, which is when divided into 170 we pick 6 and 0 as above, we get 49 cycles, which is when divided into
177 the standard 100 value for HZ, gives us an almost 1s total time. */ 171 the standard 100 value for HZ, gives us an almost 1s total time. */
178 #define TICKS_PER_FRAME \ 172 #define TICKS_PER_FRAME \
179 (HZ / (MAX_MAX_POS * MAX_MAX_POS - MIN_MAX_POS * MIN_MAX_POS)) 173 (HZ / (MAX_MAX_POS * MAX_MAX_POS - MIN_MAX_POS * MIN_MAX_POS))
180 174
181 static void anna_led_tick () 175 static void anna_led_tick ()
182 { 176 {
183 static unsigned counter = 0; 177 static unsigned counter = 0;
184 178
185 if (++counter == TICKS_PER_FRAME) { 179 if (++counter == TICKS_PER_FRAME) {
186 static int pos = 0, max_pos = MAX_MAX_POS, dir = 1; 180 static int pos = 0, max_pos = MAX_MAX_POS, dir = 1;
187 181
188 if (dir > 0 && pos == max_pos) { 182 if (dir > 0 && pos == max_pos) {
189 dir = -1; 183 dir = -1;
190 if (max_pos == MIN_MAX_POS) 184 if (max_pos == MIN_MAX_POS)
191 max_pos = MAX_MAX_POS; 185 max_pos = MAX_MAX_POS;
192 else 186 else
193 max_pos--; 187 max_pos--;
194 } else { 188 } else {
195 if (dir < 0 && pos == 0) 189 if (dir < 0 && pos == 0)
196 dir = 1; 190 dir = 1;
197 191
198 if (pos + dir <= max_pos) { 192 if (pos + dir <= max_pos) {
199 /* Each bit of port 0 has a LED. */ 193 /* Each bit of port 0 has a LED. */
200 clear_bit (pos, &ANNA_PORT_IO(LEDS_PORT)); 194 clear_bit (pos, &ANNA_PORT_IO(LEDS_PORT));
201 pos += dir; 195 pos += dir;
202 set_bit (pos, &ANNA_PORT_IO(LEDS_PORT)); 196 set_bit (pos, &ANNA_PORT_IO(LEDS_PORT));
203 } 197 }
204 } 198 }
205 199
206 counter = 0; 200 counter = 0;
207 } 201 }
208 } 202 }
209 203
arch/v850/kernel/as85ep1.c
1 /* 1 /*
2 * arch/v850/kernel/as85ep1.c -- AS85EP1 V850E evaluation chip/board 2 * arch/v850/kernel/as85ep1.c -- AS85EP1 V850E evaluation chip/board
3 * 3 *
4 * Copyright (C) 2002,03 NEC Electronics Corporation 4 * Copyright (C) 2002,03 NEC Electronics Corporation
5 * Copyright (C) 2002,03 Miles Bader <miles@gnu.org> 5 * Copyright (C) 2002,03 Miles Bader <miles@gnu.org>
6 * 6 *
7 * This file is subject to the terms and conditions of the GNU General 7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this 8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details. 9 * archive for more details.
10 * 10 *
11 * Written by Miles Bader <miles@gnu.org> 11 * Written by Miles Bader <miles@gnu.org>
12 */ 12 */
13 13
14 #include <linux/config.h> 14 #include <linux/config.h>
15 #include <linux/kernel.h> 15 #include <linux/kernel.h>
16 #include <linux/module.h> 16 #include <linux/module.h>
17 #include <linux/init.h> 17 #include <linux/init.h>
18 #include <linux/bootmem.h> 18 #include <linux/bootmem.h>
19 #include <linux/major.h> 19 #include <linux/major.h>
20 #include <linux/irq.h> 20 #include <linux/irq.h>
21 21
22 #include <asm/machdep.h> 22 #include <asm/machdep.h>
23 #include <asm/atomic.h> 23 #include <asm/atomic.h>
24 #include <asm/page.h> 24 #include <asm/page.h>
25 #include <asm/v850e_timer_d.h> 25 #include <asm/v850e_timer_d.h>
26 #include <asm/v850e_uart.h> 26 #include <asm/v850e_uart.h>
27 27
28 #include "mach.h" 28 #include "mach.h"
29 29
30 30
31 /* SRAM and SDRAM are vaguely contiguous (with a big hole in between; see 31 /* SRAM and SDRAM are vaguely contiguous (with a big hole in between; see
32 mach_reserve_bootmem for details); use both as one big area. */ 32 mach_reserve_bootmem for details); use both as one big area. */
33 #define RAM_START SRAM_ADDR 33 #define RAM_START SRAM_ADDR
34 #define RAM_END (SDRAM_ADDR + SDRAM_SIZE) 34 #define RAM_END (SDRAM_ADDR + SDRAM_SIZE)
35 35
36 /* The bits of this port are connected to an 8-LED bar-graph. */ 36 /* The bits of this port are connected to an 8-LED bar-graph. */
37 #define LEDS_PORT 4 37 #define LEDS_PORT 4
38 38
39 39
40 static void as85ep1_led_tick (void); 40 static void as85ep1_led_tick (void);
41 41
42 extern char _intv_copy_src_start, _intv_copy_src_end; 42 extern char _intv_copy_src_start, _intv_copy_src_end;
43 extern char _intv_copy_dst_start; 43 extern char _intv_copy_dst_start;
44 44
45 45
46 void __init mach_early_init (void) 46 void __init mach_early_init (void)
47 { 47 {
48 #ifndef CONFIG_ROM_KERNEL 48 #ifndef CONFIG_ROM_KERNEL
49 const u32 *src; 49 const u32 *src;
50 register u32 *dst asm ("ep"); 50 register u32 *dst asm ("ep");
51 #endif 51 #endif
52 52
53 AS85EP1_CSC(0) = 0x0403; 53 AS85EP1_CSC(0) = 0x0403;
54 AS85EP1_BCT(0) = 0xB8B8; 54 AS85EP1_BCT(0) = 0xB8B8;
55 AS85EP1_DWC(0) = 0x0104; 55 AS85EP1_DWC(0) = 0x0104;
56 AS85EP1_BCC = 0x0012; 56 AS85EP1_BCC = 0x0012;
57 AS85EP1_ASC = 0; 57 AS85EP1_ASC = 0;
58 AS85EP1_LBS = 0x00A9; 58 AS85EP1_LBS = 0x00A9;
59 59
60 AS85EP1_PORT_PMC(6) = 0xFF; /* valid A0,A1,A20-A25 */ 60 AS85EP1_PORT_PMC(6) = 0xFF; /* valid A0,A1,A20-A25 */
61 AS85EP1_PORT_PMC(7) = 0x0E; /* valid CS1-CS3 */ 61 AS85EP1_PORT_PMC(7) = 0x0E; /* valid CS1-CS3 */
62 AS85EP1_PORT_PMC(9) = 0xFF; /* valid D16-D23 */ 62 AS85EP1_PORT_PMC(9) = 0xFF; /* valid D16-D23 */
63 AS85EP1_PORT_PMC(10) = 0xFF; /* valid D24-D31 */ 63 AS85EP1_PORT_PMC(10) = 0xFF; /* valid D24-D31 */
64 64
65 AS85EP1_RFS(1) = 0x800c; 65 AS85EP1_RFS(1) = 0x800c;
66 AS85EP1_RFS(3) = 0x800c; 66 AS85EP1_RFS(3) = 0x800c;
67 AS85EP1_SCR(1) = 0x20A9; 67 AS85EP1_SCR(1) = 0x20A9;
68 AS85EP1_SCR(3) = 0x20A9; 68 AS85EP1_SCR(3) = 0x20A9;
69 69
70 #ifndef CONFIG_ROM_KERNEL 70 #ifndef CONFIG_ROM_KERNEL
71 /* The early chip we have is buggy, and writing the interrupt 71 /* The early chip we have is buggy, and writing the interrupt
72 vectors into low RAM may screw up, so for non-ROM kernels, we 72 vectors into low RAM may screw up, so for non-ROM kernels, we
73 only rely on the reset vector being downloaded, and copy the 73 only rely on the reset vector being downloaded, and copy the
74 rest of the interrupt vectors into place here. The specific bug 74 rest of the interrupt vectors into place here. The specific bug
75 is that writing address N, where (N & 0x10) == 0x10, will _also_ 75 is that writing address N, where (N & 0x10) == 0x10, will _also_
76 write to address (N - 0x10). We avoid this (effectively) by 76 write to address (N - 0x10). We avoid this (effectively) by
77 writing in 16-byte chunks backwards from the end. */ 77 writing in 16-byte chunks backwards from the end. */
78 78
79 AS85EP1_IRAMM = 0x3; /* "write-mode" for the internal instruction memory */ 79 AS85EP1_IRAMM = 0x3; /* "write-mode" for the internal instruction memory */
80 80
81 src = (u32 *)(((u32)&_intv_copy_src_end - 1) & ~0xF); 81 src = (u32 *)(((u32)&_intv_copy_src_end - 1) & ~0xF);
82 dst = (u32 *)&_intv_copy_dst_start 82 dst = (u32 *)&_intv_copy_dst_start
83 + (src - (u32 *)&_intv_copy_src_start); 83 + (src - (u32 *)&_intv_copy_src_start);
84 do { 84 do {
85 u32 t0 = src[0], t1 = src[1], t2 = src[2], t3 = src[3]; 85 u32 t0 = src[0], t1 = src[1], t2 = src[2], t3 = src[3];
86 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3; 86 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
87 dst -= 4; 87 dst -= 4;
88 src -= 4; 88 src -= 4;
89 } while (src > (u32 *)&_intv_copy_src_start); 89 } while (src > (u32 *)&_intv_copy_src_start);
90 90
91 AS85EP1_IRAMM = 0x0; /* "read-mode" for the internal instruction memory */ 91 AS85EP1_IRAMM = 0x0; /* "read-mode" for the internal instruction memory */
92 #endif /* !CONFIG_ROM_KERNEL */ 92 #endif /* !CONFIG_ROM_KERNEL */
93 93
94 v850e_intc_disable_irqs (); 94 v850e_intc_disable_irqs ();
95 } 95 }
96 96
97 void __init mach_setup (char **cmdline) 97 void __init mach_setup (char **cmdline)
98 { 98 {
99 AS85EP1_PORT_PMC (LEDS_PORT) = 0; /* Make the LEDs port an I/O port. */ 99 AS85EP1_PORT_PMC (LEDS_PORT) = 0; /* Make the LEDs port an I/O port. */
100 AS85EP1_PORT_PM (LEDS_PORT) = 0; /* Make all the bits output pins. */ 100 AS85EP1_PORT_PM (LEDS_PORT) = 0; /* Make all the bits output pins. */
101 mach_tick = as85ep1_led_tick; 101 mach_tick = as85ep1_led_tick;
102 } 102 }
103 103
104 void __init mach_get_physical_ram (unsigned long *ram_start, 104 void __init mach_get_physical_ram (unsigned long *ram_start,
105 unsigned long *ram_len) 105 unsigned long *ram_len)
106 { 106 {
107 *ram_start = RAM_START; 107 *ram_start = RAM_START;
108 *ram_len = RAM_END - RAM_START; 108 *ram_len = RAM_END - RAM_START;
109 } 109 }
110 110
111 /* Convenience macros. */ 111 /* Convenience macros. */
112 #define SRAM_END (SRAM_ADDR + SRAM_SIZE) 112 #define SRAM_END (SRAM_ADDR + SRAM_SIZE)
113 #define SDRAM_END (SDRAM_ADDR + SDRAM_SIZE) 113 #define SDRAM_END (SDRAM_ADDR + SDRAM_SIZE)
114 114
115 void __init mach_reserve_bootmem () 115 void __init mach_reserve_bootmem ()
116 { 116 {
117 if (SDRAM_ADDR < RAM_END && SDRAM_ADDR > RAM_START) 117 if (SDRAM_ADDR < RAM_END && SDRAM_ADDR > RAM_START)
118 /* We can't use the space between SRAM and SDRAM, so 118 /* We can't use the space between SRAM and SDRAM, so
119 prevent the kernel from trying. */ 119 prevent the kernel from trying. */
120 reserve_bootmem (SRAM_END, SDRAM_ADDR - SRAM_END); 120 reserve_bootmem (SRAM_END, SDRAM_ADDR - SRAM_END);
121 } 121 }
122 122
123 void mach_gettimeofday (struct timespec *tv) 123 void mach_gettimeofday (struct timespec *tv)
124 { 124 {
125 tv->tv_sec = 0; 125 tv->tv_sec = 0;
126 tv->tv_nsec = 0; 126 tv->tv_nsec = 0;
127 } 127 }
128 128
129 void __init mach_sched_init (struct irqaction *timer_action) 129 void __init mach_sched_init (struct irqaction *timer_action)
130 { 130 {
131 /* Start hardware timer. */ 131 /* Start hardware timer. */
132 v850e_timer_d_configure (0, HZ); 132 v850e_timer_d_configure (0, HZ);
133 /* Install timer interrupt handler. */ 133 /* Install timer interrupt handler. */
134 setup_irq (IRQ_INTCMD(0), timer_action); 134 setup_irq (IRQ_INTCMD(0), timer_action);
135 } 135 }
136 136
137 static struct v850e_intc_irq_init irq_inits[] = { 137 static struct v850e_intc_irq_init irq_inits[] = {
138 { "IRQ", 0, NUM_MACH_IRQS, 1, 7 }, 138 { "IRQ", 0, NUM_MACH_IRQS, 1, 7 },
139 { "CCC", IRQ_INTCCC(0), IRQ_INTCCC_NUM, 1, 5 }, 139 { "CCC", IRQ_INTCCC(0), IRQ_INTCCC_NUM, 1, 5 },
140 { "CMD", IRQ_INTCMD(0), IRQ_INTCMD_NUM, 1, 5 }, 140 { "CMD", IRQ_INTCMD(0), IRQ_INTCMD_NUM, 1, 5 },
141 { "SRE", IRQ_INTSRE(0), IRQ_INTSRE_NUM, 3, 3 }, 141 { "SRE", IRQ_INTSRE(0), IRQ_INTSRE_NUM, 3, 3 },
142 { "SR", IRQ_INTSR(0), IRQ_INTSR_NUM, 3, 4 }, 142 { "SR", IRQ_INTSR(0), IRQ_INTSR_NUM, 3, 4 },
143 { "ST", IRQ_INTST(0), IRQ_INTST_NUM, 3, 5 }, 143 { "ST", IRQ_INTST(0), IRQ_INTST_NUM, 3, 5 },
144 { 0 } 144 { 0 }
145 }; 145 };
146 #define NUM_IRQ_INITS ((sizeof irq_inits / sizeof irq_inits[0]) - 1) 146 #define NUM_IRQ_INITS ((sizeof irq_inits / sizeof irq_inits[0]) - 1)
147 147
148 static struct hw_interrupt_type hw_itypes[NUM_IRQ_INITS]; 148 static struct hw_interrupt_type hw_itypes[NUM_IRQ_INITS];
149 149
150 void __init mach_init_irqs (void) 150 void __init mach_init_irqs (void)
151 { 151 {
152 v850e_intc_init_irq_types (irq_inits, hw_itypes); 152 v850e_intc_init_irq_types (irq_inits, hw_itypes);
153 } 153 }
154 154
155 void machine_restart (char *__unused) 155 void machine_restart (char *__unused)
156 { 156 {
157 #ifdef CONFIG_RESET_GUARD 157 #ifdef CONFIG_RESET_GUARD
158 disable_reset_guard (); 158 disable_reset_guard ();
159 #endif 159 #endif
160 asm ("jmp r0"); /* Jump to the reset vector. */ 160 asm ("jmp r0"); /* Jump to the reset vector. */
161 } 161 }
162 162
163 EXPORT_SYMBOL(machine_restart);
164
165 void machine_halt (void) 163 void machine_halt (void)
166 { 164 {
167 #ifdef CONFIG_RESET_GUARD 165 #ifdef CONFIG_RESET_GUARD
168 disable_reset_guard (); 166 disable_reset_guard ();
169 #endif 167 #endif
170 local_irq_disable (); /* Ignore all interrupts. */ 168 local_irq_disable (); /* Ignore all interrupts. */
171 AS85EP1_PORT_IO (LEDS_PORT) = 0xAA; /* Note that we halted. */ 169 AS85EP1_PORT_IO (LEDS_PORT) = 0xAA; /* Note that we halted. */
172 for (;;) 170 for (;;)
173 asm ("halt; nop; nop; nop; nop; nop"); 171 asm ("halt; nop; nop; nop; nop; nop");
174 } 172 }
175 173
176 EXPORT_SYMBOL(machine_halt);
177
178 void machine_power_off (void) 174 void machine_power_off (void)
179 { 175 {
180 machine_halt (); 176 machine_halt ();
181 } 177 }
182
183 EXPORT_SYMBOL(machine_power_off);
184 178
185 /* Called before configuring an on-chip UART. */ 179 /* Called before configuring an on-chip UART. */
186 void as85ep1_uart_pre_configure (unsigned chan, unsigned cflags, unsigned baud) 180 void as85ep1_uart_pre_configure (unsigned chan, unsigned cflags, unsigned baud)
187 { 181 {
188 /* Make the shared uart/port pins be uart pins. */ 182 /* Make the shared uart/port pins be uart pins. */
189 AS85EP1_PORT_PMC(3) |= (0x5 << chan); 183 AS85EP1_PORT_PMC(3) |= (0x5 << chan);
190 184
191 /* The AS85EP1 connects some general-purpose I/O pins on the CPU to 185 /* The AS85EP1 connects some general-purpose I/O pins on the CPU to
192 the RTS/CTS lines of UART 1's serial connection. I/O pins P53 186 the RTS/CTS lines of UART 1's serial connection. I/O pins P53
193 and P54 are RTS and CTS respectively. */ 187 and P54 are RTS and CTS respectively. */
194 if (chan == 1) { 188 if (chan == 1) {
195 /* Put P53 & P54 in I/O port mode. */ 189 /* Put P53 & P54 in I/O port mode. */
196 AS85EP1_PORT_PMC(5) &= ~0x18; 190 AS85EP1_PORT_PMC(5) &= ~0x18;
197 /* Make P53 an output, and P54 an input. */ 191 /* Make P53 an output, and P54 an input. */
198 AS85EP1_PORT_PM(5) |= 0x10; 192 AS85EP1_PORT_PM(5) |= 0x10;
199 } 193 }
200 } 194 }
201 195
202 /* Minimum and maximum bounds for the moving upper LED boundary in the 196 /* Minimum and maximum bounds for the moving upper LED boundary in the
203 clock tick display. */ 197 clock tick display. */
204 #define MIN_MAX_POS 0 198 #define MIN_MAX_POS 0
205 #define MAX_MAX_POS 7 199 #define MAX_MAX_POS 7
206 200
207 /* There are MAX_MAX_POS^2 - MIN_MAX_POS^2 cycles in the animation, so if 201 /* There are MAX_MAX_POS^2 - MIN_MAX_POS^2 cycles in the animation, so if
208 we pick 6 and 0 as above, we get 49 cycles, which is when divided into 202 we pick 6 and 0 as above, we get 49 cycles, which is when divided into
209 the standard 100 value for HZ, gives us an almost 1s total time. */ 203 the standard 100 value for HZ, gives us an almost 1s total time. */
210 #define TICKS_PER_FRAME \ 204 #define TICKS_PER_FRAME \
211 (HZ / (MAX_MAX_POS * MAX_MAX_POS - MIN_MAX_POS * MIN_MAX_POS)) 205 (HZ / (MAX_MAX_POS * MAX_MAX_POS - MIN_MAX_POS * MIN_MAX_POS))
212 206
213 static void as85ep1_led_tick () 207 static void as85ep1_led_tick ()
214 { 208 {
215 static unsigned counter = 0; 209 static unsigned counter = 0;
216 210
217 if (++counter == TICKS_PER_FRAME) { 211 if (++counter == TICKS_PER_FRAME) {
218 static int pos = 0, max_pos = MAX_MAX_POS, dir = 1; 212 static int pos = 0, max_pos = MAX_MAX_POS, dir = 1;
219 213
220 if (dir > 0 && pos == max_pos) { 214 if (dir > 0 && pos == max_pos) {
221 dir = -1; 215 dir = -1;
222 if (max_pos == MIN_MAX_POS) 216 if (max_pos == MIN_MAX_POS)
223 max_pos = MAX_MAX_POS; 217 max_pos = MAX_MAX_POS;
224 else 218 else
225 max_pos--; 219 max_pos--;
226 } else { 220 } else {
227 if (dir < 0 && pos == 0) 221 if (dir < 0 && pos == 0)
228 dir = 1; 222 dir = 1;
229 223
230 if (pos + dir <= max_pos) { 224 if (pos + dir <= max_pos) {
231 /* Each bit of port 0 has a LED. */ 225 /* Each bit of port 0 has a LED. */
232 set_bit (pos, &AS85EP1_PORT_IO(LEDS_PORT)); 226 set_bit (pos, &AS85EP1_PORT_IO(LEDS_PORT));
233 pos += dir; 227 pos += dir;
234 clear_bit (pos, &AS85EP1_PORT_IO(LEDS_PORT)); 228 clear_bit (pos, &AS85EP1_PORT_IO(LEDS_PORT));
235 } 229 }
236 } 230 }
237 231
238 counter = 0; 232 counter = 0;
239 } 233 }
240 } 234 }
241 235
arch/v850/kernel/fpga85e2c.c
1 /* 1 /*
2 * arch/v850/kernel/fpga85e2c.h -- Machine-dependent defs for 2 * arch/v850/kernel/fpga85e2c.h -- Machine-dependent defs for
3 * FPGA implementation of V850E2/NA85E2C 3 * FPGA implementation of V850E2/NA85E2C
4 * 4 *
5 * Copyright (C) 2002,03 NEC Electronics Corporation 5 * Copyright (C) 2002,03 NEC Electronics Corporation
6 * Copyright (C) 2002,03 Miles Bader <miles@gnu.org> 6 * Copyright (C) 2002,03 Miles Bader <miles@gnu.org>
7 * 7 *
8 * This file is subject to the terms and conditions of the GNU General 8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this 9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details. 10 * archive for more details.
11 * 11 *
12 * Written by Miles Bader <miles@gnu.org> 12 * Written by Miles Bader <miles@gnu.org>
13 */ 13 */
14 14
15 #include <linux/config.h> 15 #include <linux/config.h>
16 #include <linux/kernel.h> 16 #include <linux/kernel.h>
17 #include <linux/module.h> 17 #include <linux/module.h>
18 #include <linux/init.h> 18 #include <linux/init.h>
19 #include <linux/mm.h> 19 #include <linux/mm.h>
20 #include <linux/swap.h> 20 #include <linux/swap.h>
21 #include <linux/bootmem.h> 21 #include <linux/bootmem.h>
22 #include <linux/irq.h> 22 #include <linux/irq.h>
23 #include <linux/bitops.h> 23 #include <linux/bitops.h>
24 24
25 #include <asm/atomic.h> 25 #include <asm/atomic.h>
26 #include <asm/page.h> 26 #include <asm/page.h>
27 #include <asm/machdep.h> 27 #include <asm/machdep.h>
28 28
29 #include "mach.h" 29 #include "mach.h"
30 30
31 extern void memcons_setup (void); 31 extern void memcons_setup (void);
32 32
33 33
34 #define REG_DUMP_ADDR 0x220000 34 #define REG_DUMP_ADDR 0x220000
35 35
36 36
37 extern struct irqaction reg_snap_action; /* fwd decl */ 37 extern struct irqaction reg_snap_action; /* fwd decl */
38 38
39 39
40 void __init mach_early_init (void) 40 void __init mach_early_init (void)
41 { 41 {
42 int i; 42 int i;
43 const u32 *src; 43 const u32 *src;
44 register u32 *dst asm ("ep"); 44 register u32 *dst asm ("ep");
45 extern u32 _intv_end, _intv_load_start; 45 extern u32 _intv_end, _intv_load_start;
46 46
47 /* Set bus sizes: CS0 32-bit, CS1 16-bit, CS7 8-bit, 47 /* Set bus sizes: CS0 32-bit, CS1 16-bit, CS7 8-bit,
48 everything else 32-bit. */ 48 everything else 32-bit. */
49 V850E2_BSC = 0x2AA6; 49 V850E2_BSC = 0x2AA6;
50 for (i = 2; i <= 6; i++) 50 for (i = 2; i <= 6; i++)
51 CSDEV(i) = 0; /* 32 bit */ 51 CSDEV(i) = 0; /* 32 bit */
52 52
53 /* Ensure that the simulator halts on a panic, instead of going 53 /* Ensure that the simulator halts on a panic, instead of going
54 into an infinite loop inside the panic function. */ 54 into an infinite loop inside the panic function. */
55 panic_timeout = -1; 55 panic_timeout = -1;
56 56
57 /* Move the interrupt vectors into their real location. Note that 57 /* Move the interrupt vectors into their real location. Note that
58 any relocations there are relative to the real location, so we 58 any relocations there are relative to the real location, so we
59 don't have to fix anything up. We use a loop instead of calling 59 don't have to fix anything up. We use a loop instead of calling
60 memcpy to keep this a leaf function (to avoid a function 60 memcpy to keep this a leaf function (to avoid a function
61 prologue being generated). */ 61 prologue being generated). */
62 dst = 0x10; /* &_intv_start + 0x10. */ 62 dst = 0x10; /* &_intv_start + 0x10. */
63 src = &_intv_load_start; 63 src = &_intv_load_start;
64 do { 64 do {
65 u32 t0 = src[0], t1 = src[1], t2 = src[2], t3 = src[3]; 65 u32 t0 = src[0], t1 = src[1], t2 = src[2], t3 = src[3];
66 u32 t4 = src[4], t5 = src[5], t6 = src[6], t7 = src[7]; 66 u32 t4 = src[4], t5 = src[5], t6 = src[6], t7 = src[7];
67 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3; 67 dst[0] = t0; dst[1] = t1; dst[2] = t2; dst[3] = t3;
68 dst[4] = t4; dst[5] = t5; dst[6] = t6; dst[7] = t7; 68 dst[4] = t4; dst[5] = t5; dst[6] = t6; dst[7] = t7;
69 dst += 8; 69 dst += 8;
70 src += 8; 70 src += 8;
71 } while (dst < &_intv_end); 71 } while (dst < &_intv_end);
72 } 72 }
73 73
74 void __init mach_setup (char **cmdline) 74 void __init mach_setup (char **cmdline)
75 { 75 {
76 memcons_setup (); 76 memcons_setup ();
77 77
78 /* Setup up NMI0 to copy the registers to a known memory location. 78 /* Setup up NMI0 to copy the registers to a known memory location.
79 The FGPA board has a button that produces NMI0 when pressed, so 79 The FGPA board has a button that produces NMI0 when pressed, so
80 this allows us to push the button, and then look at memory to see 80 this allows us to push the button, and then look at memory to see
81 what's in the registers (there's no other way to easily do so). 81 what's in the registers (there's no other way to easily do so).
82 We have to use `setup_irq' instead of `request_irq' because it's 82 We have to use `setup_irq' instead of `request_irq' because it's
83 still too early to do memory allocation. */ 83 still too early to do memory allocation. */
84 setup_irq (IRQ_NMI (0), &reg_snap_action); 84 setup_irq (IRQ_NMI (0), &reg_snap_action);
85 } 85 }
86 86
87 void mach_get_physical_ram (unsigned long *ram_start, unsigned long *ram_len) 87 void mach_get_physical_ram (unsigned long *ram_start, unsigned long *ram_len)
88 { 88 {
89 *ram_start = ERAM_ADDR; 89 *ram_start = ERAM_ADDR;
90 *ram_len = ERAM_SIZE; 90 *ram_len = ERAM_SIZE;
91 } 91 }
92 92
93 void __init mach_sched_init (struct irqaction *timer_action) 93 void __init mach_sched_init (struct irqaction *timer_action)
94 { 94 {
95 /* Setup up the timer interrupt. The FPGA peripheral control 95 /* Setup up the timer interrupt. The FPGA peripheral control
96 registers _only_ work with single-bit writes (set1/clr1)! */ 96 registers _only_ work with single-bit writes (set1/clr1)! */
97 __clear_bit (RPU_GTMC_CE_BIT, &RPU_GTMC); 97 __clear_bit (RPU_GTMC_CE_BIT, &RPU_GTMC);
98 __clear_bit (RPU_GTMC_CLK_BIT, &RPU_GTMC); 98 __clear_bit (RPU_GTMC_CLK_BIT, &RPU_GTMC);
99 __set_bit (RPU_GTMC_CE_BIT, &RPU_GTMC); 99 __set_bit (RPU_GTMC_CE_BIT, &RPU_GTMC);
100 100
101 /* We use the first RPU interrupt, which occurs every 8.192ms. */ 101 /* We use the first RPU interrupt, which occurs every 8.192ms. */
102 setup_irq (IRQ_RPU (0), timer_action); 102 setup_irq (IRQ_RPU (0), timer_action);
103 } 103 }
104 104
105 105
106 void mach_gettimeofday (struct timespec *tv) 106 void mach_gettimeofday (struct timespec *tv)
107 { 107 {
108 tv->tv_sec = 0; 108 tv->tv_sec = 0;
109 tv->tv_nsec = 0; 109 tv->tv_nsec = 0;
110 } 110 }
111 111
112 void machine_halt (void) __attribute__ ((noreturn)); 112 void machine_halt (void) __attribute__ ((noreturn));
113 void machine_halt (void) 113 void machine_halt (void)
114 { 114 {
115 for (;;) { 115 for (;;) {
116 DWC(0) = 0x7777; 116 DWC(0) = 0x7777;
117 DWC(1) = 0x7777; 117 DWC(1) = 0x7777;
118 ASC = 0xffff; 118 ASC = 0xffff;
119 FLGREG(0) = 1; /* Halt immediately. */ 119 FLGREG(0) = 1; /* Halt immediately. */
120 asm ("di; halt; nop; nop; nop; nop; nop"); 120 asm ("di; halt; nop; nop; nop; nop; nop");
121 } 121 }
122 } 122 }
123 123
124 EXPORT_SYMBOL(machine_halt);
125
126 void machine_restart (char *__unused) 124 void machine_restart (char *__unused)
127 { 125 {
128 machine_halt (); 126 machine_halt ();
129 } 127 }
130 128
131 EXPORT_SYMBOL(machine_restart);
132
133 void machine_power_off (void) 129 void machine_power_off (void)
134 { 130 {
135 machine_halt (); 131 machine_halt ();
136 } 132 }
137
138 EXPORT_SYMBOL(machine_power_off);
139 133
140 134
141 /* Interrupts */ 135 /* Interrupts */
142 136
143 struct v850e_intc_irq_init irq_inits[] = { 137 struct v850e_intc_irq_init irq_inits[] = {
144 { "IRQ", 0, NUM_MACH_IRQS, 1, 7 }, 138 { "IRQ", 0, NUM_MACH_IRQS, 1, 7 },
145 { "RPU", IRQ_RPU(0), IRQ_RPU_NUM, 1, 6 }, 139 { "RPU", IRQ_RPU(0), IRQ_RPU_NUM, 1, 6 },
146 { 0 } 140 { 0 }
147 }; 141 };
148 #define NUM_IRQ_INITS ((sizeof irq_inits / sizeof irq_inits[0]) - 1) 142 #define NUM_IRQ_INITS ((sizeof irq_inits / sizeof irq_inits[0]) - 1)
149 143
150 struct hw_interrupt_type hw_itypes[NUM_IRQ_INITS]; 144 struct hw_interrupt_type hw_itypes[NUM_IRQ_INITS];
151 145
152 /* Initialize interrupts. */ 146 /* Initialize interrupts. */
153 void __init mach_init_irqs (void) 147 void __init mach_init_irqs (void)
154 { 148 {
155 v850e_intc_init_irq_types (irq_inits, hw_itypes); 149 v850e_intc_init_irq_types (irq_inits, hw_itypes);
156 } 150 }
157 151
158 152
159 /* An interrupt handler that copies the registers to a known memory location, 153 /* An interrupt handler that copies the registers to a known memory location,
160 for debugging purposes. */ 154 for debugging purposes. */
161 155
162 static void make_reg_snap (int irq, void *dummy, struct pt_regs *regs) 156 static void make_reg_snap (int irq, void *dummy, struct pt_regs *regs)
163 { 157 {
164 (*(unsigned *)REG_DUMP_ADDR)++; 158 (*(unsigned *)REG_DUMP_ADDR)++;
165 (*(struct pt_regs *)(REG_DUMP_ADDR + sizeof (unsigned))) = *regs; 159 (*(struct pt_regs *)(REG_DUMP_ADDR + sizeof (unsigned))) = *regs;
166 } 160 }
167 161
168 static int reg_snap_dev_id; 162 static int reg_snap_dev_id;
169 static struct irqaction reg_snap_action = { 163 static struct irqaction reg_snap_action = {
170 make_reg_snap, 0, CPU_MASK_NONE, "reg_snap", &reg_snap_dev_id, 0 164 make_reg_snap, 0, CPU_MASK_NONE, "reg_snap", &reg_snap_dev_id, 0
171 }; 165 };
172 166
arch/v850/kernel/rte_cb.c
1 /* 1 /*
2 * include/asm-v850/rte_cb.c -- Midas lab RTE-CB series of evaluation boards 2 * include/asm-v850/rte_cb.c -- Midas lab RTE-CB series of evaluation boards
3 * 3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation 4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org> 5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 * 6 *
7 * This file is subject to the terms and conditions of the GNU General 7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this 8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details. 9 * archive for more details.
10 * 10 *
11 * Written by Miles Bader <miles@gnu.org> 11 * Written by Miles Bader <miles@gnu.org>
12 */ 12 */
13 13
14 #include <linux/config.h> 14 #include <linux/config.h>
15 #include <linux/init.h> 15 #include <linux/init.h>
16 #include <linux/irq.h> 16 #include <linux/irq.h>
17 #include <linux/fs.h> 17 #include <linux/fs.h>
18 #include <linux/module.h> 18 #include <linux/module.h>
19 19
20 #include <asm/machdep.h> 20 #include <asm/machdep.h>
21 #include <asm/v850e_uart.h> 21 #include <asm/v850e_uart.h>
22 22
23 #include "mach.h" 23 #include "mach.h"
24 24
25 static void led_tick (void); 25 static void led_tick (void);
26 26
27 /* LED access routines. */ 27 /* LED access routines. */
28 extern unsigned read_leds (int pos, char *buf, int len); 28 extern unsigned read_leds (int pos, char *buf, int len);
29 extern unsigned write_leds (int pos, const char *buf, int len); 29 extern unsigned write_leds (int pos, const char *buf, int len);
30 30
31 #ifdef CONFIG_RTE_CB_MULTI 31 #ifdef CONFIG_RTE_CB_MULTI
32 extern void multi_init (void); 32 extern void multi_init (void);
33 #endif 33 #endif
34 34
35 35
36 void __init rte_cb_early_init (void) 36 void __init rte_cb_early_init (void)
37 { 37 {
38 v850e_intc_disable_irqs (); 38 v850e_intc_disable_irqs ();
39 39
40 #ifdef CONFIG_RTE_CB_MULTI 40 #ifdef CONFIG_RTE_CB_MULTI
41 multi_init (); 41 multi_init ();
42 #endif 42 #endif
43 } 43 }
44 44
45 void __init mach_setup (char **cmdline) 45 void __init mach_setup (char **cmdline)
46 { 46 {
47 #ifdef CONFIG_RTE_MB_A_PCI 47 #ifdef CONFIG_RTE_MB_A_PCI
48 /* Probe for Mother-A, and print a message if we find it. */ 48 /* Probe for Mother-A, and print a message if we find it. */
49 *(volatile unsigned long *)MB_A_SRAM_ADDR = 0xDEADBEEF; 49 *(volatile unsigned long *)MB_A_SRAM_ADDR = 0xDEADBEEF;
50 if (*(volatile unsigned long *)MB_A_SRAM_ADDR == 0xDEADBEEF) { 50 if (*(volatile unsigned long *)MB_A_SRAM_ADDR == 0xDEADBEEF) {
51 *(volatile unsigned long *)MB_A_SRAM_ADDR = 0x12345678; 51 *(volatile unsigned long *)MB_A_SRAM_ADDR = 0x12345678;
52 if (*(volatile unsigned long *)MB_A_SRAM_ADDR == 0x12345678) 52 if (*(volatile unsigned long *)MB_A_SRAM_ADDR == 0x12345678)
53 printk (KERN_INFO 53 printk (KERN_INFO
54 " NEC SolutionGear/Midas lab" 54 " NEC SolutionGear/Midas lab"
55 " RTE-MOTHER-A motherboard\n"); 55 " RTE-MOTHER-A motherboard\n");
56 } 56 }
57 #endif /* CONFIG_RTE_MB_A_PCI */ 57 #endif /* CONFIG_RTE_MB_A_PCI */
58 58
59 mach_tick = led_tick; 59 mach_tick = led_tick;
60 } 60 }
61 61
62 void machine_restart (char *__unused) 62 void machine_restart (char *__unused)
63 { 63 {
64 #ifdef CONFIG_RESET_GUARD 64 #ifdef CONFIG_RESET_GUARD
65 disable_reset_guard (); 65 disable_reset_guard ();
66 #endif 66 #endif
67 asm ("jmp r0"); /* Jump to the reset vector. */ 67 asm ("jmp r0"); /* Jump to the reset vector. */
68 } 68 }
69 69
70 EXPORT_SYMBOL(machine_restart);
71
72 /* This says `HALt.' in LEDese. */ 70 /* This says `HALt.' in LEDese. */
73 static unsigned char halt_leds_msg[] = { 0x76, 0x77, 0x38, 0xF8 }; 71 static unsigned char halt_leds_msg[] = { 0x76, 0x77, 0x38, 0xF8 };
74 72
75 void machine_halt (void) 73 void machine_halt (void)
76 { 74 {
77 #ifdef CONFIG_RESET_GUARD 75 #ifdef CONFIG_RESET_GUARD
78 disable_reset_guard (); 76 disable_reset_guard ();
79 #endif 77 #endif
80 78
81 /* Ignore all interrupts. */ 79 /* Ignore all interrupts. */
82 local_irq_disable (); 80 local_irq_disable ();
83 81
84 /* Write a little message. */ 82 /* Write a little message. */
85 write_leds (0, halt_leds_msg, sizeof halt_leds_msg); 83 write_leds (0, halt_leds_msg, sizeof halt_leds_msg);
86 84
87 /* Really halt. */ 85 /* Really halt. */
88 for (;;) 86 for (;;)
89 asm ("halt; nop; nop; nop; nop; nop"); 87 asm ("halt; nop; nop; nop; nop; nop");
90 } 88 }
91 89
92 EXPORT_SYMBOL(machine_halt);
93
94 void machine_power_off (void) 90 void machine_power_off (void)
95 { 91 {
96 machine_halt (); 92 machine_halt ();
97 } 93 }
98
99 EXPORT_SYMBOL(machine_power_off);
100 94
101 95
102 /* Animated LED display for timer tick. */ 96 /* Animated LED display for timer tick. */
103 97
104 #define TICK_UPD_FREQ 6 98 #define TICK_UPD_FREQ 6
105 static int tick_frames[][10] = { 99 static int tick_frames[][10] = {
106 { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, -1 }, 100 { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, -1 },
107 { 0x63, 0x5c, -1 }, 101 { 0x63, 0x5c, -1 },
108 { 0x5c, 0x00, -1 }, 102 { 0x5c, 0x00, -1 },
109 { 0x63, 0x00, -1 }, 103 { 0x63, 0x00, -1 },
110 { -1 } 104 { -1 }
111 }; 105 };
112 106
113 static void led_tick () 107 static void led_tick ()
114 { 108 {
115 static unsigned counter = 0; 109 static unsigned counter = 0;
116 110
117 if (++counter == (HZ / TICK_UPD_FREQ)) { 111 if (++counter == (HZ / TICK_UPD_FREQ)) {
118 /* Which frame we're currently displaying for each digit. */ 112 /* Which frame we're currently displaying for each digit. */
119 static unsigned frame_nums[LED_NUM_DIGITS] = { 0 }; 113 static unsigned frame_nums[LED_NUM_DIGITS] = { 0 };
120 /* Display image. */ 114 /* Display image. */
121 static unsigned char image[LED_NUM_DIGITS] = { 0 }; 115 static unsigned char image[LED_NUM_DIGITS] = { 0 };
122 unsigned char prev_image[LED_NUM_DIGITS]; 116 unsigned char prev_image[LED_NUM_DIGITS];
123 int write_to_leds = 1; /* true if we should actually display */ 117 int write_to_leds = 1; /* true if we should actually display */
124 int digit; 118 int digit;
125 119
126 /* We check to see if the physical LEDs contains what we last 120 /* We check to see if the physical LEDs contains what we last
127 wrote to them; if not, we suppress display (this is so that 121 wrote to them; if not, we suppress display (this is so that
128 users can write to the LEDs, and not have their output 122 users can write to the LEDs, and not have their output
129 overwritten). As a special case, we start writing again if 123 overwritten). As a special case, we start writing again if
130 all the LEDs are blank, or our display image is all zeros 124 all the LEDs are blank, or our display image is all zeros
131 (indicating that this is the initial update, when the actual 125 (indicating that this is the initial update, when the actual
132 LEDs might contain random data). */ 126 LEDs might contain random data). */
133 read_leds (0, prev_image, LED_NUM_DIGITS); 127 read_leds (0, prev_image, LED_NUM_DIGITS);
134 for (digit = 0; digit < LED_NUM_DIGITS; digit++) 128 for (digit = 0; digit < LED_NUM_DIGITS; digit++)
135 if (image[digit] != prev_image[digit] 129 if (image[digit] != prev_image[digit]
136 && image[digit] && prev_image[digit]) 130 && image[digit] && prev_image[digit])
137 { 131 {
138 write_to_leds = 0; 132 write_to_leds = 0;
139 break; 133 break;
140 } 134 }
141 135
142 /* Update display image. */ 136 /* Update display image. */
143 for (digit = 0; 137 for (digit = 0;
144 digit < LED_NUM_DIGITS && tick_frames[digit][0] >= 0; 138 digit < LED_NUM_DIGITS && tick_frames[digit][0] >= 0;
145 digit++) 139 digit++)
146 { 140 {
147 int frame = tick_frames[digit][frame_nums[digit]]; 141 int frame = tick_frames[digit][frame_nums[digit]];
148 if (frame < 0) { 142 if (frame < 0) {
149 image[digit] = tick_frames[digit][0]; 143 image[digit] = tick_frames[digit][0];
150 frame_nums[digit] = 1; 144 frame_nums[digit] = 1;
151 } else { 145 } else {
152 image[digit] = frame; 146 image[digit] = frame;
153 frame_nums[digit]++; 147 frame_nums[digit]++;
154 break; 148 break;
155 } 149 }
156 } 150 }
157 151
158 if (write_to_leds) 152 if (write_to_leds)
159 /* Write the display image to the physical LEDs. */ 153 /* Write the display image to the physical LEDs. */
160 write_leds (0, image, LED_NUM_DIGITS); 154 write_leds (0, image, LED_NUM_DIGITS);
161 155
162 counter = 0; 156 counter = 0;
163 } 157 }
164 } 158 }
165 159
166 160
167 /* Mother-A interrupts. */ 161 /* Mother-A interrupts. */
168 162
169 #ifdef CONFIG_RTE_GBUS_INT 163 #ifdef CONFIG_RTE_GBUS_INT
170 164
171 #define L GBUS_INT_PRIORITY_LOW 165 #define L GBUS_INT_PRIORITY_LOW
172 #define M GBUS_INT_PRIORITY_MEDIUM 166 #define M GBUS_INT_PRIORITY_MEDIUM
173 #define H GBUS_INT_PRIORITY_HIGH 167 #define H GBUS_INT_PRIORITY_HIGH
174 168
175 static struct gbus_int_irq_init gbus_irq_inits[] = { 169 static struct gbus_int_irq_init gbus_irq_inits[] = {
176 #ifdef CONFIG_RTE_MB_A_PCI 170 #ifdef CONFIG_RTE_MB_A_PCI
177 { "MB_A_LAN", IRQ_MB_A_LAN, 1, 1, L }, 171 { "MB_A_LAN", IRQ_MB_A_LAN, 1, 1, L },
178 { "MB_A_PCI1", IRQ_MB_A_PCI1(0), IRQ_MB_A_PCI1_NUM, 1, L }, 172 { "MB_A_PCI1", IRQ_MB_A_PCI1(0), IRQ_MB_A_PCI1_NUM, 1, L },
179 { "MB_A_PCI2", IRQ_MB_A_PCI2(0), IRQ_MB_A_PCI2_NUM, 1, L }, 173 { "MB_A_PCI2", IRQ_MB_A_PCI2(0), IRQ_MB_A_PCI2_NUM, 1, L },
180 { "MB_A_EXT", IRQ_MB_A_EXT(0), IRQ_MB_A_EXT_NUM, 1, L }, 174 { "MB_A_EXT", IRQ_MB_A_EXT(0), IRQ_MB_A_EXT_NUM, 1, L },
181 { "MB_A_USB_OC",IRQ_MB_A_USB_OC(0), IRQ_MB_A_USB_OC_NUM, 1, L }, 175 { "MB_A_USB_OC",IRQ_MB_A_USB_OC(0), IRQ_MB_A_USB_OC_NUM, 1, L },
182 { "MB_A_PCMCIA_OC",IRQ_MB_A_PCMCIA_OC, 1, 1, L }, 176 { "MB_A_PCMCIA_OC",IRQ_MB_A_PCMCIA_OC, 1, 1, L },
183 #endif 177 #endif
184 { 0 } 178 { 0 }
185 }; 179 };
186 #define NUM_GBUS_IRQ_INITS \ 180 #define NUM_GBUS_IRQ_INITS \
187 ((sizeof gbus_irq_inits / sizeof gbus_irq_inits[0]) - 1) 181 ((sizeof gbus_irq_inits / sizeof gbus_irq_inits[0]) - 1)
188 182
189 static struct hw_interrupt_type gbus_hw_itypes[NUM_GBUS_IRQ_INITS]; 183 static struct hw_interrupt_type gbus_hw_itypes[NUM_GBUS_IRQ_INITS];
190 184
191 #endif /* CONFIG_RTE_GBUS_INT */ 185 #endif /* CONFIG_RTE_GBUS_INT */
192 186
193 187
194 void __init rte_cb_init_irqs (void) 188 void __init rte_cb_init_irqs (void)
195 { 189 {
196 #ifdef CONFIG_RTE_GBUS_INT 190 #ifdef CONFIG_RTE_GBUS_INT
197 gbus_int_init_irqs (); 191 gbus_int_init_irqs ();
198 gbus_int_init_irq_types (gbus_irq_inits, gbus_hw_itypes); 192 gbus_int_init_irq_types (gbus_irq_inits, gbus_hw_itypes);
199 #endif /* CONFIG_RTE_GBUS_INT */ 193 #endif /* CONFIG_RTE_GBUS_INT */
200 } 194 }
201 195
arch/v850/kernel/sim.c
1 /* 1 /*
2 * arch/v850/kernel/sim.c -- Machine-specific stuff for GDB v850e simulator 2 * arch/v850/kernel/sim.c -- Machine-specific stuff for GDB v850e simulator
3 * 3 *
4 * Copyright (C) 2001,02 NEC Corporation 4 * Copyright (C) 2001,02 NEC Corporation
5 * Copyright (C) 2001,02 Miles Bader <miles@gnu.org> 5 * Copyright (C) 2001,02 Miles Bader <miles@gnu.org>
6 * 6 *
7 * This file is subject to the terms and conditions of the GNU General 7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this 8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details. 9 * archive for more details.
10 * 10 *
11 * Written by Miles Bader <miles@gnu.org> 11 * Written by Miles Bader <miles@gnu.org>
12 */ 12 */
13 13
14 #include <linux/config.h> 14 #include <linux/config.h>
15 #include <linux/kernel.h> 15 #include <linux/kernel.h>
16 #include <linux/module.h> 16 #include <linux/module.h>
17 #include <linux/init.h> 17 #include <linux/init.h>
18 #include <linux/mm.h> 18 #include <linux/mm.h>
19 #include <linux/swap.h> 19 #include <linux/swap.h>
20 #include <linux/bootmem.h> 20 #include <linux/bootmem.h>
21 #include <linux/irq.h> 21 #include <linux/irq.h>
22 22
23 #include <asm/atomic.h> 23 #include <asm/atomic.h>
24 #include <asm/page.h> 24 #include <asm/page.h>
25 #include <asm/machdep.h> 25 #include <asm/machdep.h>
26 #include <asm/simsyscall.h> 26 #include <asm/simsyscall.h>
27 27
28 #include "mach.h" 28 #include "mach.h"
29 29
30 /* The name of a file containing the root filesystem. */ 30 /* The name of a file containing the root filesystem. */
31 #define ROOT_FS "rootfs.image" 31 #define ROOT_FS "rootfs.image"
32 32
33 extern void simcons_setup (void); 33 extern void simcons_setup (void);
34 extern void simcons_poll_ttys (void); 34 extern void simcons_poll_ttys (void);
35 extern void set_mem_root (void *addr, size_t len, char *cmd_line); 35 extern void set_mem_root (void *addr, size_t len, char *cmd_line);
36 36
37 static int read_file (const char *name, 37 static int read_file (const char *name,
38 unsigned long *addr, unsigned long *len, 38 unsigned long *addr, unsigned long *len,
39 const char **err); 39 const char **err);
40 40
41 void __init mach_setup (char **cmdline) 41 void __init mach_setup (char **cmdline)
42 { 42 {
43 const char *err; 43 const char *err;
44 unsigned long root_dev_addr, root_dev_len; 44 unsigned long root_dev_addr, root_dev_len;
45 45
46 simcons_setup (); 46 simcons_setup ();
47 47
48 printk (KERN_INFO "Reading root filesystem: %s", ROOT_FS); 48 printk (KERN_INFO "Reading root filesystem: %s", ROOT_FS);
49 49
50 if (read_file (ROOT_FS, &root_dev_addr, &root_dev_len, &err)) { 50 if (read_file (ROOT_FS, &root_dev_addr, &root_dev_len, &err)) {
51 printk (" (size %luK)\n", root_dev_len / 1024); 51 printk (" (size %luK)\n", root_dev_len / 1024);
52 set_mem_root ((void *)root_dev_addr, (size_t)root_dev_len, 52 set_mem_root ((void *)root_dev_addr, (size_t)root_dev_len,
53 *cmdline); 53 *cmdline);
54 } else 54 } else
55 printk ("...%s failed!\n", err); 55 printk ("...%s failed!\n", err);
56 } 56 }
57 57
58 void mach_get_physical_ram (unsigned long *ram_start, unsigned long *ram_len) 58 void mach_get_physical_ram (unsigned long *ram_start, unsigned long *ram_len)
59 { 59 {
60 *ram_start = RAM_ADDR; 60 *ram_start = RAM_ADDR;
61 *ram_len = RAM_SIZE; 61 *ram_len = RAM_SIZE;
62 } 62 }
63 63
64 void __init mach_sched_init (struct irqaction *timer_action) 64 void __init mach_sched_init (struct irqaction *timer_action)
65 { 65 {
66 /* ...do magic timer initialization?... */ 66 /* ...do magic timer initialization?... */
67 mach_tick = simcons_poll_ttys; 67 mach_tick = simcons_poll_ttys;
68 setup_irq (0, timer_action); 68 setup_irq (0, timer_action);
69 } 69 }
70 70
71 71
72 static void irq_nop (unsigned irq) { } 72 static void irq_nop (unsigned irq) { }
73 static unsigned irq_zero (unsigned irq) { return 0; } 73 static unsigned irq_zero (unsigned irq) { return 0; }
74 74
75 static struct hw_interrupt_type sim_irq_type = { 75 static struct hw_interrupt_type sim_irq_type = {
76 "IRQ", 76 "IRQ",
77 irq_zero, /* startup */ 77 irq_zero, /* startup */
78 irq_nop, /* shutdown */ 78 irq_nop, /* shutdown */
79 irq_nop, /* enable */ 79 irq_nop, /* enable */
80 irq_nop, /* disable */ 80 irq_nop, /* disable */
81 irq_nop, /* ack */ 81 irq_nop, /* ack */
82 irq_nop, /* end */ 82 irq_nop, /* end */
83 }; 83 };
84 84
85 void __init mach_init_irqs (void) 85 void __init mach_init_irqs (void)
86 { 86 {
87 init_irq_handlers (0, NUM_MACH_IRQS, 1, &sim_irq_type); 87 init_irq_handlers (0, NUM_MACH_IRQS, 1, &sim_irq_type);
88 } 88 }
89 89
90 90
91 void mach_gettimeofday (struct timespec *tv) 91 void mach_gettimeofday (struct timespec *tv)
92 { 92 {
93 long timeval[2], timezone[2]; 93 long timeval[2], timezone[2];
94 int rval = V850_SIM_SYSCALL (gettimeofday, timeval, timezone); 94 int rval = V850_SIM_SYSCALL (gettimeofday, timeval, timezone);
95 if (rval == 0) { 95 if (rval == 0) {
96 tv->tv_sec = timeval[0]; 96 tv->tv_sec = timeval[0];
97 tv->tv_nsec = timeval[1] * 1000; 97 tv->tv_nsec = timeval[1] * 1000;
98 } 98 }
99 } 99 }
100 100
101 void machine_restart (char *__unused) 101 void machine_restart (char *__unused)
102 { 102 {
103 V850_SIM_SYSCALL (write, 1, "RESTART\n", 8); 103 V850_SIM_SYSCALL (write, 1, "RESTART\n", 8);
104 V850_SIM_SYSCALL (exit, 0); 104 V850_SIM_SYSCALL (exit, 0);
105 } 105 }
106 106
107 EXPORT_SYMBOL(machine_restart);
108
109 void machine_halt (void) 107 void machine_halt (void)
110 { 108 {
111 V850_SIM_SYSCALL (write, 1, "HALT\n", 5); 109 V850_SIM_SYSCALL (write, 1, "HALT\n", 5);
112 V850_SIM_SYSCALL (exit, 0); 110 V850_SIM_SYSCALL (exit, 0);
113 } 111 }
114 112
115 EXPORT_SYMBOL(machine_halt);
116
117 void machine_power_off (void) 113 void machine_power_off (void)
118 { 114 {
119 V850_SIM_SYSCALL (write, 1, "POWER OFF\n", 10); 115 V850_SIM_SYSCALL (write, 1, "POWER OFF\n", 10);
120 V850_SIM_SYSCALL (exit, 0); 116 V850_SIM_SYSCALL (exit, 0);
121 } 117 }
122
123 EXPORT_SYMBOL(machine_power_off);
124 118
125 119
126 /* Load data from a file called NAME into ram. The address and length 120 /* Load data from a file called NAME into ram. The address and length
127 of the data image are returned in ADDR and LEN. */ 121 of the data image are returned in ADDR and LEN. */
128 static int __init 122 static int __init
129 read_file (const char *name, 123 read_file (const char *name,
130 unsigned long *addr, unsigned long *len, 124 unsigned long *addr, unsigned long *len,
131 const char **err) 125 const char **err)
132 { 126 {
133 int rval, fd; 127 int rval, fd;
134 unsigned long cur, left; 128 unsigned long cur, left;
135 /* Note this is not a normal stat buffer, it's an ad-hoc 129 /* Note this is not a normal stat buffer, it's an ad-hoc
136 structure defined by the simulator. */ 130 structure defined by the simulator. */
137 unsigned long stat_buf[10]; 131 unsigned long stat_buf[10];
138 132
139 /* Stat the file to find out the length. */ 133 /* Stat the file to find out the length. */
140 rval = V850_SIM_SYSCALL (stat, name, stat_buf); 134 rval = V850_SIM_SYSCALL (stat, name, stat_buf);
141 if (rval < 0) { 135 if (rval < 0) {
142 if (err) *err = "stat"; 136 if (err) *err = "stat";
143 return 0; 137 return 0;
144 } 138 }
145 *len = stat_buf[4]; 139 *len = stat_buf[4];
146 140
147 /* Open the file; `0' is O_RDONLY. */ 141 /* Open the file; `0' is O_RDONLY. */
148 fd = V850_SIM_SYSCALL (open, name, 0); 142 fd = V850_SIM_SYSCALL (open, name, 0);
149 if (fd < 0) { 143 if (fd < 0) {
150 if (err) *err = "open"; 144 if (err) *err = "open";
151 return 0; 145 return 0;
152 } 146 }
153 147
154 *addr = (unsigned long)alloc_bootmem(*len); 148 *addr = (unsigned long)alloc_bootmem(*len);
155 if (! *addr) { 149 if (! *addr) {
156 V850_SIM_SYSCALL (close, fd); 150 V850_SIM_SYSCALL (close, fd);
157 if (err) *err = "alloc_bootmem"; 151 if (err) *err = "alloc_bootmem";
158 return 0; 152 return 0;
159 } 153 }
160 154
161 cur = *addr; 155 cur = *addr;
162 left = *len; 156 left = *len;
163 while (left > 0) { 157 while (left > 0) {
164 int chunk = V850_SIM_SYSCALL (read, fd, cur, left); 158 int chunk = V850_SIM_SYSCALL (read, fd, cur, left);
165 if (chunk <= 0) 159 if (chunk <= 0)
166 break; 160 break;
167 cur += chunk; 161 cur += chunk;
168 left -= chunk; 162 left -= chunk;
169 } 163 }
170 V850_SIM_SYSCALL (close, fd); 164 V850_SIM_SYSCALL (close, fd);
171 if (left > 0) { 165 if (left > 0) {
172 /* Some read failed. */ 166 /* Some read failed. */
173 free_bootmem (*addr, *len); 167 free_bootmem (*addr, *len);
174 if (err) *err = "read"; 168 if (err) *err = "read";
175 return 0; 169 return 0;
176 } 170 }
177 171
178 return 1; 172 return 1;
179 } 173 }
180 174
arch/v850/kernel/sim85e2.c
1 /* 1 /*
2 * arch/v850/kernel/sim85e2.c -- Machine-specific stuff for 2 * arch/v850/kernel/sim85e2.c -- Machine-specific stuff for
3 * V850E2 RTL simulator 3 * V850E2 RTL simulator
4 * 4 *
5 * Copyright (C) 2002,03 NEC Electronics Corporation 5 * Copyright (C) 2002,03 NEC Electronics Corporation
6 * Copyright (C) 2002,03 Miles Bader <miles@gnu.org> 6 * Copyright (C) 2002,03 Miles Bader <miles@gnu.org>
7 * 7 *
8 * This file is subject to the terms and conditions of the GNU General 8 * This file is subject to the terms and conditions of the GNU General
9 * Public License. See the file COPYING in the main directory of this 9 * Public License. See the file COPYING in the main directory of this
10 * archive for more details. 10 * archive for more details.
11 * 11 *
12 * Written by Miles Bader <miles@gnu.org> 12 * Written by Miles Bader <miles@gnu.org>
13 */ 13 */
14 14
15 #include <linux/config.h> 15 #include <linux/config.h>
16 #include <linux/kernel.h> 16 #include <linux/kernel.h>
17 #include <linux/module.h> 17 #include <linux/module.h>
18 #include <linux/init.h> 18 #include <linux/init.h>
19 #include <linux/mm.h> 19 #include <linux/mm.h>
20 #include <linux/swap.h> 20 #include <linux/swap.h>
21 #include <linux/bootmem.h> 21 #include <linux/bootmem.h>
22 #include <linux/irq.h> 22 #include <linux/irq.h>
23 23
24 #include <asm/atomic.h> 24 #include <asm/atomic.h>
25 #include <asm/page.h> 25 #include <asm/page.h>
26 #include <asm/machdep.h> 26 #include <asm/machdep.h>
27 27
28 #include "mach.h" 28 #include "mach.h"
29 29
30 30
31 /* There are 4 possible areas we can use: 31 /* There are 4 possible areas we can use:
32 32
33 IRAM (1MB) is fast for instruction fetches, but slow for data 33 IRAM (1MB) is fast for instruction fetches, but slow for data
34 DRAM (1020KB) is fast for data, but slow for instructions 34 DRAM (1020KB) is fast for data, but slow for instructions
35 ERAM is cached, so should be fast for both insns and data 35 ERAM is cached, so should be fast for both insns and data
36 SDRAM is external DRAM, similar to ERAM 36 SDRAM is external DRAM, similar to ERAM
37 */ 37 */
38 38
39 #define INIT_MEMC_FOR_SDRAM 39 #define INIT_MEMC_FOR_SDRAM
40 #define USE_SDRAM_AREA 40 #define USE_SDRAM_AREA
41 #define KERNEL_IN_SDRAM_AREA 41 #define KERNEL_IN_SDRAM_AREA
42 42
43 #define DCACHE_MODE V850E2_CACHE_BTSC_DCM_WT 43 #define DCACHE_MODE V850E2_CACHE_BTSC_DCM_WT
44 /*#define DCACHE_MODE V850E2_CACHE_BTSC_DCM_WB_ALLOC*/ 44 /*#define DCACHE_MODE V850E2_CACHE_BTSC_DCM_WB_ALLOC*/
45 45
46 #ifdef USE_SDRAM_AREA 46 #ifdef USE_SDRAM_AREA
47 #define RAM_START SDRAM_ADDR 47 #define RAM_START SDRAM_ADDR
48 #define RAM_END (SDRAM_ADDR + SDRAM_SIZE) 48 #define RAM_END (SDRAM_ADDR + SDRAM_SIZE)
49 #else 49 #else
50 /* When we use DRAM, we need to account for the fact that the end of it is 50 /* When we use DRAM, we need to account for the fact that the end of it is
51 used for R0_RAM. */ 51 used for R0_RAM. */
52 #define RAM_START DRAM_ADDR 52 #define RAM_START DRAM_ADDR
53 #define RAM_END R0_RAM_ADDR 53 #define RAM_END R0_RAM_ADDR
54 #endif 54 #endif
55 55
56 56
57 extern void memcons_setup (void); 57 extern void memcons_setup (void);
58 58
59 59
60 #ifdef KERNEL_IN_SDRAM_AREA 60 #ifdef KERNEL_IN_SDRAM_AREA
61 #define EARLY_INIT_SECTION_ATTR __attribute__ ((section (".early.text"))) 61 #define EARLY_INIT_SECTION_ATTR __attribute__ ((section (".early.text")))
62 #else 62 #else
63 #define EARLY_INIT_SECTION_ATTR __init 63 #define EARLY_INIT_SECTION_ATTR __init
64 #endif 64 #endif
65 65
66 void EARLY_INIT_SECTION_ATTR mach_early_init (void) 66 void EARLY_INIT_SECTION_ATTR mach_early_init (void)
67 { 67 {
68 /* The sim85e2 simulator tracks `undefined' values, so to make 68 /* The sim85e2 simulator tracks `undefined' values, so to make
69 debugging easier, we begin by zeroing out all otherwise 69 debugging easier, we begin by zeroing out all otherwise
70 undefined registers. This is not strictly necessary. 70 undefined registers. This is not strictly necessary.
71 71
72 The registers we zero are: 72 The registers we zero are:
73 Every GPR except: 73 Every GPR except:
74 stack-pointer (r3) 74 stack-pointer (r3)
75 task-pointer (r16) 75 task-pointer (r16)
76 our return addr (r31) 76 our return addr (r31)
77 Every system register (SPR) that we know about except for 77 Every system register (SPR) that we know about except for
78 the PSW (SPR 5), which we zero except for the 78 the PSW (SPR 5), which we zero except for the
79 disable-interrupts bit. 79 disable-interrupts bit.
80 */ 80 */
81 81
82 /* GPRs */ 82 /* GPRs */
83 asm volatile (" mov r0, r1 ; mov r0, r2 "); 83 asm volatile (" mov r0, r1 ; mov r0, r2 ");
84 asm volatile ("mov r0, r4 ; mov r0, r5 ; mov r0, r6 ; mov r0, r7 "); 84 asm volatile ("mov r0, r4 ; mov r0, r5 ; mov r0, r6 ; mov r0, r7 ");
85 asm volatile ("mov r0, r8 ; mov r0, r9 ; mov r0, r10; mov r0, r11"); 85 asm volatile ("mov r0, r8 ; mov r0, r9 ; mov r0, r10; mov r0, r11");
86 asm volatile ("mov r0, r12; mov r0, r13; mov r0, r14; mov r0, r15"); 86 asm volatile ("mov r0, r12; mov r0, r13; mov r0, r14; mov r0, r15");
87 asm volatile (" mov r0, r17; mov r0, r18; mov r0, r19"); 87 asm volatile (" mov r0, r17; mov r0, r18; mov r0, r19");
88 asm volatile ("mov r0, r20; mov r0, r21; mov r0, r22; mov r0, r23"); 88 asm volatile ("mov r0, r20; mov r0, r21; mov r0, r22; mov r0, r23");
89 asm volatile ("mov r0, r24; mov r0, r25; mov r0, r26; mov r0, r27"); 89 asm volatile ("mov r0, r24; mov r0, r25; mov r0, r26; mov r0, r27");
90 asm volatile ("mov r0, r28; mov r0, r29; mov r0, r30"); 90 asm volatile ("mov r0, r28; mov r0, r29; mov r0, r30");
91 91
92 /* SPRs */ 92 /* SPRs */
93 asm volatile ("ldsr r0, 0; ldsr r0, 1; ldsr r0, 2; ldsr r0, 3"); 93 asm volatile ("ldsr r0, 0; ldsr r0, 1; ldsr r0, 2; ldsr r0, 3");
94 asm volatile ("ldsr r0, 4"); 94 asm volatile ("ldsr r0, 4");
95 asm volatile ("addi 0x20, r0, r1; ldsr r1, 5"); /* PSW */ 95 asm volatile ("addi 0x20, r0, r1; ldsr r1, 5"); /* PSW */
96 asm volatile ("ldsr r0, 16; ldsr r0, 17; ldsr r0, 18; ldsr r0, 19"); 96 asm volatile ("ldsr r0, 16; ldsr r0, 17; ldsr r0, 18; ldsr r0, 19");
97 asm volatile ("ldsr r0, 20"); 97 asm volatile ("ldsr r0, 20");
98 98
99 99
100 #ifdef INIT_MEMC_FOR_SDRAM 100 #ifdef INIT_MEMC_FOR_SDRAM
101 /* Settings for SDRAM controller. */ 101 /* Settings for SDRAM controller. */
102 V850E2_VSWC = 0x0042; 102 V850E2_VSWC = 0x0042;
103 V850E2_BSC = 0x9286; 103 V850E2_BSC = 0x9286;
104 V850E2_BCT(0) = 0xb000; /* was: 0 */ 104 V850E2_BCT(0) = 0xb000; /* was: 0 */
105 V850E2_BCT(1) = 0x000b; 105 V850E2_BCT(1) = 0x000b;
106 V850E2_ASC = 0; 106 V850E2_ASC = 0;
107 V850E2_LBS = 0xa9aa; /* was: 0xaaaa */ 107 V850E2_LBS = 0xa9aa; /* was: 0xaaaa */
108 V850E2_LBC(0) = 0; 108 V850E2_LBC(0) = 0;
109 V850E2_LBC(1) = 0; /* was: 0x3 */ 109 V850E2_LBC(1) = 0; /* was: 0x3 */
110 V850E2_BCC = 0; 110 V850E2_BCC = 0;
111 V850E2_RFS(4) = 0x800a; /* was: 0xf109 */ 111 V850E2_RFS(4) = 0x800a; /* was: 0xf109 */
112 V850E2_SCR(4) = 0x2091; /* was: 0x20a1 */ 112 V850E2_SCR(4) = 0x2091; /* was: 0x20a1 */
113 V850E2_RFS(3) = 0x800c; 113 V850E2_RFS(3) = 0x800c;
114 V850E2_SCR(3) = 0x20a1; 114 V850E2_SCR(3) = 0x20a1;
115 V850E2_DWC(0) = 0; 115 V850E2_DWC(0) = 0;
116 V850E2_DWC(1) = 0; 116 V850E2_DWC(1) = 0;
117 #endif 117 #endif
118 118
119 #if 0 119 #if 0
120 #ifdef CONFIG_V850E2_SIM85E2S 120 #ifdef CONFIG_V850E2_SIM85E2S
121 /* Turn on the caches. */ 121 /* Turn on the caches. */
122 V850E2_CACHE_BTSC = V850E2_CACHE_BTSC_ICM | DCACHE_MODE; 122 V850E2_CACHE_BTSC = V850E2_CACHE_BTSC_ICM | DCACHE_MODE;
123 V850E2_BHC = 0x1010; 123 V850E2_BHC = 0x1010;
124 #elif CONFIG_V850E2_SIM85E2C 124 #elif CONFIG_V850E2_SIM85E2C
125 V850E2_CACHE_BTSC |= (V850E2_CACHE_BTSC_ICM | V850E2_CACHE_BTSC_DCM0); 125 V850E2_CACHE_BTSC |= (V850E2_CACHE_BTSC_ICM | V850E2_CACHE_BTSC_DCM0);
126 V850E2_BUSM_BHC = 0xFFFF; 126 V850E2_BUSM_BHC = 0xFFFF;
127 #endif 127 #endif
128 #else 128 #else
129 V850E2_BHC = 0; 129 V850E2_BHC = 0;
130 #endif 130 #endif
131 131
132 /* Don't stop the simulator at `halt' instructions. */ 132 /* Don't stop the simulator at `halt' instructions. */
133 SIM85E2_NOTHAL = 1; 133 SIM85E2_NOTHAL = 1;
134 134
135 /* Ensure that the simulator halts on a panic, instead of going 135 /* Ensure that the simulator halts on a panic, instead of going
136 into an infinite loop inside the panic function. */ 136 into an infinite loop inside the panic function. */
137 panic_timeout = -1; 137 panic_timeout = -1;
138 } 138 }
139 139
140 void __init mach_setup (char **cmdline) 140 void __init mach_setup (char **cmdline)
141 { 141 {
142 memcons_setup (); 142 memcons_setup ();
143 } 143 }
144 144
145 void mach_get_physical_ram (unsigned long *ram_start, unsigned long *ram_len) 145 void mach_get_physical_ram (unsigned long *ram_start, unsigned long *ram_len)
146 { 146 {
147 *ram_start = RAM_START; 147 *ram_start = RAM_START;
148 *ram_len = RAM_END - RAM_START; 148 *ram_len = RAM_END - RAM_START;
149 } 149 }
150 150
151 void __init mach_sched_init (struct irqaction *timer_action) 151 void __init mach_sched_init (struct irqaction *timer_action)
152 { 152 {
153 /* The simulator actually cycles through all interrupts 153 /* The simulator actually cycles through all interrupts
154 periodically. We just pay attention to IRQ0, which gives us 154 periodically. We just pay attention to IRQ0, which gives us
155 1/64 the rate of the periodic interrupts. */ 155 1/64 the rate of the periodic interrupts. */
156 setup_irq (0, timer_action); 156 setup_irq (0, timer_action);
157 } 157 }
158 158
159 void mach_gettimeofday (struct timespec *tv) 159 void mach_gettimeofday (struct timespec *tv)
160 { 160 {
161 tv->tv_sec = 0; 161 tv->tv_sec = 0;
162 tv->tv_nsec = 0; 162 tv->tv_nsec = 0;
163 } 163 }
164 164
165 /* Interrupts */ 165 /* Interrupts */
166 166
167 struct v850e_intc_irq_init irq_inits[] = { 167 struct v850e_intc_irq_init irq_inits[] = {
168 { "IRQ", 0, NUM_MACH_IRQS, 1, 7 }, 168 { "IRQ", 0, NUM_MACH_IRQS, 1, 7 },
169 { 0 } 169 { 0 }
170 }; 170 };
171 struct hw_interrupt_type hw_itypes[1]; 171 struct hw_interrupt_type hw_itypes[1];
172 172
173 /* Initialize interrupts. */ 173 /* Initialize interrupts. */
174 void __init mach_init_irqs (void) 174 void __init mach_init_irqs (void)
175 { 175 {
176 v850e_intc_init_irq_types (irq_inits, hw_itypes); 176 v850e_intc_init_irq_types (irq_inits, hw_itypes);
177 } 177 }
178 178
179 179
180 void machine_halt (void) __attribute__ ((noreturn)); 180 void machine_halt (void) __attribute__ ((noreturn));
181 void machine_halt (void) 181 void machine_halt (void)
182 { 182 {
183 SIM85E2_SIMFIN = 0; /* Halt immediately. */ 183 SIM85E2_SIMFIN = 0; /* Halt immediately. */
184 for (;;) {} 184 for (;;) {}
185 } 185 }
186 186
187 EXPORT_SYMBOL(machine_halt);
188
189 void machine_restart (char *__unused) 187 void machine_restart (char *__unused)
190 { 188 {
191 machine_halt (); 189 machine_halt ();
192 } 190 }
193 191
194 EXPORT_SYMBOL(machine_restart);
195
196 void machine_power_off (void) 192 void machine_power_off (void)
197 { 193 {
198 machine_halt (); 194 machine_halt ();
199 } 195 }
200
201 EXPORT_SYMBOL(machine_power_off);
202 196
arch/x86_64/kernel/reboot.c
1 /* Various gunk just to reboot the machine. */ 1 /* Various gunk just to reboot the machine. */
2 #include <linux/module.h> 2 #include <linux/module.h>
3 #include <linux/reboot.h> 3 #include <linux/reboot.h>
4 #include <linux/init.h> 4 #include <linux/init.h>
5 #include <linux/smp.h> 5 #include <linux/smp.h>
6 #include <linux/kernel.h> 6 #include <linux/kernel.h>
7 #include <linux/ctype.h> 7 #include <linux/ctype.h>
8 #include <linux/string.h> 8 #include <linux/string.h>
9 #include <asm/io.h> 9 #include <asm/io.h>
10 #include <asm/kdebug.h> 10 #include <asm/kdebug.h>
11 #include <asm/delay.h> 11 #include <asm/delay.h>
12 #include <asm/hw_irq.h> 12 #include <asm/hw_irq.h>
13 #include <asm/system.h> 13 #include <asm/system.h>
14 #include <asm/pgtable.h> 14 #include <asm/pgtable.h>
15 #include <asm/tlbflush.h> 15 #include <asm/tlbflush.h>
16 #include <asm/apic.h> 16 #include <asm/apic.h>
17 17
18 /* 18 /*
19 * Power off function, if any 19 * Power off function, if any
20 */ 20 */
21 void (*pm_power_off)(void); 21 void (*pm_power_off)(void);
22 22
23 static long no_idt[3]; 23 static long no_idt[3];
24 static enum { 24 static enum {
25 BOOT_TRIPLE = 't', 25 BOOT_TRIPLE = 't',
26 BOOT_KBD = 'k' 26 BOOT_KBD = 'k'
27 } reboot_type = BOOT_KBD; 27 } reboot_type = BOOT_KBD;
28 static int reboot_mode = 0; 28 static int reboot_mode = 0;
29 int reboot_force; 29 int reboot_force;
30 30
31 /* reboot=t[riple] | k[bd] [, [w]arm | [c]old] 31 /* reboot=t[riple] | k[bd] [, [w]arm | [c]old]
32 warm Don't set the cold reboot flag 32 warm Don't set the cold reboot flag
33 cold Set the cold reboot flag 33 cold Set the cold reboot flag
34 triple Force a triple fault (init) 34 triple Force a triple fault (init)
35 kbd Use the keyboard controller. cold reset (default) 35 kbd Use the keyboard controller. cold reset (default)
36 force Avoid anything that could hang. 36 force Avoid anything that could hang.
37 */ 37 */
38 static int __init reboot_setup(char *str) 38 static int __init reboot_setup(char *str)
39 { 39 {
40 for (;;) { 40 for (;;) {
41 switch (*str) { 41 switch (*str) {
42 case 'w': 42 case 'w':
43 reboot_mode = 0x1234; 43 reboot_mode = 0x1234;
44 break; 44 break;
45 45
46 case 'c': 46 case 'c':
47 reboot_mode = 0; 47 reboot_mode = 0;
48 break; 48 break;
49 49
50 case 't': 50 case 't':
51 case 'b': 51 case 'b':
52 case 'k': 52 case 'k':
53 reboot_type = *str; 53 reboot_type = *str;
54 break; 54 break;
55 case 'f': 55 case 'f':
56 reboot_force = 1; 56 reboot_force = 1;
57 break; 57 break;
58 } 58 }
59 if((str = strchr(str,',')) != NULL) 59 if((str = strchr(str,',')) != NULL)
60 str++; 60 str++;
61 else 61 else
62 break; 62 break;
63 } 63 }
64 return 1; 64 return 1;
65 } 65 }
66 66
67 __setup("reboot=", reboot_setup); 67 __setup("reboot=", reboot_setup);
68 68
69 static inline void kb_wait(void) 69 static inline void kb_wait(void)
70 { 70 {
71 int i; 71 int i;
72 72
73 for (i=0; i<0x10000; i++) 73 for (i=0; i<0x10000; i++)
74 if ((inb_p(0x64) & 0x02) == 0) 74 if ((inb_p(0x64) & 0x02) == 0)
75 break; 75 break;
76 } 76 }
77 77
78 void machine_shutdown(void) 78 void machine_shutdown(void)
79 { 79 {
80 /* Stop the cpus and apics */ 80 /* Stop the cpus and apics */
81 #ifdef CONFIG_SMP 81 #ifdef CONFIG_SMP
82 int reboot_cpu_id; 82 int reboot_cpu_id;
83 83
84 /* The boot cpu is always logical cpu 0 */ 84 /* The boot cpu is always logical cpu 0 */
85 reboot_cpu_id = 0; 85 reboot_cpu_id = 0;
86 86
87 /* Make certain the cpu I'm about to reboot on is online */ 87 /* Make certain the cpu I'm about to reboot on is online */
88 if (!cpu_isset(reboot_cpu_id, cpu_online_map)) { 88 if (!cpu_isset(reboot_cpu_id, cpu_online_map)) {
89 reboot_cpu_id = smp_processor_id(); 89 reboot_cpu_id = smp_processor_id();
90 } 90 }
91 91
92 /* Make certain I only run on the appropriate processor */ 92 /* Make certain I only run on the appropriate processor */
93 set_cpus_allowed(current, cpumask_of_cpu(reboot_cpu_id)); 93 set_cpus_allowed(current, cpumask_of_cpu(reboot_cpu_id));
94 94
95 /* O.K Now that I'm on the appropriate processor, 95 /* O.K Now that I'm on the appropriate processor,
96 * stop all of the others. 96 * stop all of the others.
97 */ 97 */
98 smp_send_stop(); 98 smp_send_stop();
99 #endif 99 #endif
100 100
101 local_irq_disable(); 101 local_irq_disable();
102 102
103 #ifndef CONFIG_SMP 103 #ifndef CONFIG_SMP
104 disable_local_APIC(); 104 disable_local_APIC();
105 #endif 105 #endif
106 106
107 disable_IO_APIC(); 107 disable_IO_APIC();
108 108
109 local_irq_enable(); 109 local_irq_enable();
110 } 110 }
111 111
112 void machine_restart(char * __unused) 112 void machine_restart(char * __unused)
113 { 113 {
114 int i; 114 int i;
115 115
116 printk("machine restart\n"); 116 printk("machine restart\n");
117 117
118 machine_shutdown(); 118 machine_shutdown();
119 119
120 if (!reboot_force) { 120 if (!reboot_force) {
121 local_irq_disable(); 121 local_irq_disable();
122 #ifndef CONFIG_SMP 122 #ifndef CONFIG_SMP
123 disable_local_APIC(); 123 disable_local_APIC();
124 #endif 124 #endif
125 disable_IO_APIC(); 125 disable_IO_APIC();
126 local_irq_enable(); 126 local_irq_enable();
127 } 127 }
128 128
129 /* Tell the BIOS if we want cold or warm reboot */ 129 /* Tell the BIOS if we want cold or warm reboot */
130 *((unsigned short *)__va(0x472)) = reboot_mode; 130 *((unsigned short *)__va(0x472)) = reboot_mode;
131 131
132 for (;;) { 132 for (;;) {
133 /* Could also try the reset bit in the Hammer NB */ 133 /* Could also try the reset bit in the Hammer NB */
134 switch (reboot_type) { 134 switch (reboot_type) {
135 case BOOT_KBD: 135 case BOOT_KBD:
136 for (i=0; i<100; i++) { 136 for (i=0; i<100; i++) {
137 kb_wait(); 137 kb_wait();
138 udelay(50); 138 udelay(50);
139 outb(0xfe,0x64); /* pulse reset low */ 139 outb(0xfe,0x64); /* pulse reset low */
140 udelay(50); 140 udelay(50);
141 } 141 }
142 142
143 case BOOT_TRIPLE: 143 case BOOT_TRIPLE:
144 __asm__ __volatile__("lidt (%0)": :"r" (&no_idt)); 144 __asm__ __volatile__("lidt (%0)": :"r" (&no_idt));
145 __asm__ __volatile__("int3"); 145 __asm__ __volatile__("int3");
146 146
147 reboot_type = BOOT_KBD; 147 reboot_type = BOOT_KBD;
148 break; 148 break;
149 } 149 }
150 } 150 }
151 } 151 }
152 152
153 EXPORT_SYMBOL(machine_restart);
154
155 void machine_halt(void) 153 void machine_halt(void)
156 { 154 {
157 } 155 }
158 156
159 EXPORT_SYMBOL(machine_halt);
160
161 void machine_power_off(void) 157 void machine_power_off(void)
162 { 158 {
163 if (pm_power_off) 159 if (pm_power_off)
164 pm_power_off(); 160 pm_power_off();
165 } 161 }
166
167 EXPORT_SYMBOL(machine_power_off);
168 162