Commit 19f9a34f87c48bbd270d617d1c986d0c23866a1a
1 parent
8c12b5dc13
Exists in
master
and in
7 other branches
sh: Initial vsyscall page support.
This implements initial support for the vsyscall page on SH. At the moment we leave it configurable due to having nommu to support from the same code base. We hook it up for the signal trampoline return at present, with more to be added later, once uClibc catches up. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Showing 19 changed files with 473 additions and 17 deletions Inline Diff
- arch/sh/kernel/Makefile
- arch/sh/kernel/process.c
- arch/sh/kernel/signal.c
- arch/sh/kernel/vsyscall/Makefile
- arch/sh/kernel/vsyscall/vsyscall-note.S
- arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
- arch/sh/kernel/vsyscall/vsyscall-syscall.S
- arch/sh/kernel/vsyscall/vsyscall-trapa.S
- arch/sh/kernel/vsyscall/vsyscall.c
- arch/sh/kernel/vsyscall/vsyscall.lds.S
- arch/sh/mm/Kconfig
- arch/sh/mm/init.c
- arch/sh/mm/tlb-flush.c
- include/asm-sh/auxvec.h
- include/asm-sh/elf.h
- include/asm-sh/mmu.h
- include/asm-sh/mmu_context.h
- include/asm-sh/page.h
- include/asm-sh/processor.h
arch/sh/kernel/Makefile
1 | # | 1 | # |
2 | # Makefile for the Linux/SuperH kernel. | 2 | # Makefile for the Linux/SuperH kernel. |
3 | # | 3 | # |
4 | 4 | ||
5 | extra-y := head.o init_task.o vmlinux.lds | 5 | extra-y := head.o init_task.o vmlinux.lds |
6 | 6 | ||
7 | obj-y := process.o signal.o entry.o traps.o irq.o \ | 7 | obj-y := process.o signal.o entry.o traps.o irq.o \ |
8 | ptrace.o setup.o time.o sys_sh.o semaphore.o \ | 8 | ptrace.o setup.o time.o sys_sh.o semaphore.o \ |
9 | io.o io_generic.o sh_ksyms.o syscalls.o | 9 | io.o io_generic.o sh_ksyms.o syscalls.o |
10 | 10 | ||
11 | obj-y += cpu/ timers/ | 11 | obj-y += cpu/ timers/ |
12 | obj-$(CONFIG_VSYSCALL) += vsyscall/ | ||
12 | 13 | ||
13 | obj-$(CONFIG_SMP) += smp.o | 14 | obj-$(CONFIG_SMP) += smp.o |
14 | obj-$(CONFIG_CF_ENABLER) += cf-enabler.o | 15 | obj-$(CONFIG_CF_ENABLER) += cf-enabler.o |
15 | obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o | 16 | obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o |
16 | obj-$(CONFIG_SH_KGDB) += kgdb_stub.o kgdb_jmp.o | 17 | obj-$(CONFIG_SH_KGDB) += kgdb_stub.o kgdb_jmp.o |
17 | obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o | 18 | obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o |
18 | obj-$(CONFIG_MODULES) += module.o | 19 | obj-$(CONFIG_MODULES) += module.o |
19 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | 20 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
20 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o | 21 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o |
21 | obj-$(CONFIG_APM) += apm.o | 22 | obj-$(CONFIG_APM) += apm.o |
22 | obj-$(CONFIG_PM) += pm.o | 23 | obj-$(CONFIG_PM) += pm.o |
23 | 24 |
arch/sh/kernel/process.c
1 | /* $Id: process.c,v 1.28 2004/05/05 16:54:23 lethal Exp $ | 1 | /* $Id: process.c,v 1.28 2004/05/05 16:54:23 lethal Exp $ |
2 | * | 2 | * |
3 | * linux/arch/sh/kernel/process.c | 3 | * linux/arch/sh/kernel/process.c |
4 | * | 4 | * |
5 | * Copyright (C) 1995 Linus Torvalds | 5 | * Copyright (C) 1995 Linus Torvalds |
6 | * | 6 | * |
7 | * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima | 7 | * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima |
8 | */ | 8 | */ |
9 | 9 | ||
10 | /* | 10 | /* |
11 | * This file handles the architecture-dependent parts of process handling.. | 11 | * This file handles the architecture-dependent parts of process handling.. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/unistd.h> | 15 | #include <linux/unistd.h> |
16 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
17 | #include <linux/elfcore.h> | 17 | #include <linux/elfcore.h> |
18 | #include <linux/a.out.h> | 18 | #include <linux/a.out.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/pm.h> | 20 | #include <linux/pm.h> |
21 | #include <linux/ptrace.h> | 21 | #include <linux/ptrace.h> |
22 | #include <linux/kallsyms.h> | 22 | #include <linux/kallsyms.h> |
23 | #include <linux/kexec.h> | 23 | #include <linux/kexec.h> |
24 | 24 | ||
25 | #include <asm/io.h> | 25 | #include <asm/io.h> |
26 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
27 | #include <asm/mmu_context.h> | 27 | #include <asm/mmu_context.h> |
28 | #include <asm/elf.h> | 28 | #include <asm/elf.h> |
29 | #include <asm/ubc.h> | 29 | #include <asm/ubc.h> |
30 | 30 | ||
31 | static int hlt_counter=0; | 31 | static int hlt_counter=0; |
32 | 32 | ||
33 | int ubc_usercnt = 0; | 33 | int ubc_usercnt = 0; |
34 | 34 | ||
35 | #define HARD_IDLE_TIMEOUT (HZ / 3) | 35 | #define HARD_IDLE_TIMEOUT (HZ / 3) |
36 | 36 | ||
37 | void (*pm_idle)(void); | 37 | void (*pm_idle)(void); |
38 | 38 | ||
39 | void (*pm_power_off)(void); | 39 | void (*pm_power_off)(void); |
40 | EXPORT_SYMBOL(pm_power_off); | 40 | EXPORT_SYMBOL(pm_power_off); |
41 | 41 | ||
42 | void disable_hlt(void) | 42 | void disable_hlt(void) |
43 | { | 43 | { |
44 | hlt_counter++; | 44 | hlt_counter++; |
45 | } | 45 | } |
46 | 46 | ||
47 | EXPORT_SYMBOL(disable_hlt); | 47 | EXPORT_SYMBOL(disable_hlt); |
48 | 48 | ||
49 | void enable_hlt(void) | 49 | void enable_hlt(void) |
50 | { | 50 | { |
51 | hlt_counter--; | 51 | hlt_counter--; |
52 | } | 52 | } |
53 | 53 | ||
54 | EXPORT_SYMBOL(enable_hlt); | 54 | EXPORT_SYMBOL(enable_hlt); |
55 | 55 | ||
56 | void default_idle(void) | 56 | void default_idle(void) |
57 | { | 57 | { |
58 | if (!hlt_counter) | 58 | if (!hlt_counter) |
59 | cpu_sleep(); | 59 | cpu_sleep(); |
60 | else | 60 | else |
61 | cpu_relax(); | 61 | cpu_relax(); |
62 | } | 62 | } |
63 | 63 | ||
64 | void cpu_idle(void) | 64 | void cpu_idle(void) |
65 | { | 65 | { |
66 | /* endless idle loop with no priority at all */ | 66 | /* endless idle loop with no priority at all */ |
67 | while (1) { | 67 | while (1) { |
68 | void (*idle)(void) = pm_idle; | 68 | void (*idle)(void) = pm_idle; |
69 | 69 | ||
70 | if (!idle) | 70 | if (!idle) |
71 | idle = default_idle; | 71 | idle = default_idle; |
72 | 72 | ||
73 | while (!need_resched()) | 73 | while (!need_resched()) |
74 | idle(); | 74 | idle(); |
75 | 75 | ||
76 | preempt_enable_no_resched(); | 76 | preempt_enable_no_resched(); |
77 | schedule(); | 77 | schedule(); |
78 | preempt_disable(); | 78 | preempt_disable(); |
79 | } | 79 | } |
80 | } | 80 | } |
81 | 81 | ||
82 | void machine_restart(char * __unused) | 82 | void machine_restart(char * __unused) |
83 | { | 83 | { |
84 | /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */ | 84 | /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */ |
85 | asm volatile("ldc %0, sr\n\t" | 85 | asm volatile("ldc %0, sr\n\t" |
86 | "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001)); | 86 | "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001)); |
87 | } | 87 | } |
88 | 88 | ||
89 | void machine_halt(void) | 89 | void machine_halt(void) |
90 | { | 90 | { |
91 | local_irq_disable(); | 91 | local_irq_disable(); |
92 | 92 | ||
93 | while (1) | 93 | while (1) |
94 | cpu_sleep(); | 94 | cpu_sleep(); |
95 | } | 95 | } |
96 | 96 | ||
97 | void machine_power_off(void) | 97 | void machine_power_off(void) |
98 | { | 98 | { |
99 | if (pm_power_off) | 99 | if (pm_power_off) |
100 | pm_power_off(); | 100 | pm_power_off(); |
101 | } | 101 | } |
102 | 102 | ||
103 | void show_regs(struct pt_regs * regs) | 103 | void show_regs(struct pt_regs * regs) |
104 | { | 104 | { |
105 | printk("\n"); | 105 | printk("\n"); |
106 | printk("Pid : %d, Comm: %20s\n", current->pid, current->comm); | 106 | printk("Pid : %d, Comm: %20s\n", current->pid, current->comm); |
107 | print_symbol("PC is at %s\n", regs->pc); | 107 | print_symbol("PC is at %s\n", regs->pc); |
108 | printk("PC : %08lx SP : %08lx SR : %08lx ", | 108 | printk("PC : %08lx SP : %08lx SR : %08lx ", |
109 | regs->pc, regs->regs[15], regs->sr); | 109 | regs->pc, regs->regs[15], regs->sr); |
110 | #ifdef CONFIG_MMU | 110 | #ifdef CONFIG_MMU |
111 | printk("TEA : %08x ", ctrl_inl(MMU_TEA)); | 111 | printk("TEA : %08x ", ctrl_inl(MMU_TEA)); |
112 | #else | 112 | #else |
113 | printk(" "); | 113 | printk(" "); |
114 | #endif | 114 | #endif |
115 | printk("%s\n", print_tainted()); | 115 | printk("%s\n", print_tainted()); |
116 | 116 | ||
117 | printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n", | 117 | printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n", |
118 | regs->regs[0],regs->regs[1], | 118 | regs->regs[0],regs->regs[1], |
119 | regs->regs[2],regs->regs[3]); | 119 | regs->regs[2],regs->regs[3]); |
120 | printk("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n", | 120 | printk("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n", |
121 | regs->regs[4],regs->regs[5], | 121 | regs->regs[4],regs->regs[5], |
122 | regs->regs[6],regs->regs[7]); | 122 | regs->regs[6],regs->regs[7]); |
123 | printk("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n", | 123 | printk("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n", |
124 | regs->regs[8],regs->regs[9], | 124 | regs->regs[8],regs->regs[9], |
125 | regs->regs[10],regs->regs[11]); | 125 | regs->regs[10],regs->regs[11]); |
126 | printk("R12 : %08lx R13 : %08lx R14 : %08lx\n", | 126 | printk("R12 : %08lx R13 : %08lx R14 : %08lx\n", |
127 | regs->regs[12],regs->regs[13], | 127 | regs->regs[12],regs->regs[13], |
128 | regs->regs[14]); | 128 | regs->regs[14]); |
129 | printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n", | 129 | printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n", |
130 | regs->mach, regs->macl, regs->gbr, regs->pr); | 130 | regs->mach, regs->macl, regs->gbr, regs->pr); |
131 | 131 | ||
132 | /* | 132 | /* |
133 | * If we're in kernel mode, dump the stack too.. | 133 | * If we're in kernel mode, dump the stack too.. |
134 | */ | 134 | */ |
135 | if (!user_mode(regs)) { | 135 | if (!user_mode(regs)) { |
136 | extern void show_task(unsigned long *sp); | 136 | extern void show_task(unsigned long *sp); |
137 | unsigned long sp = regs->regs[15]; | 137 | unsigned long sp = regs->regs[15]; |
138 | 138 | ||
139 | show_task((unsigned long *)sp); | 139 | show_task((unsigned long *)sp); |
140 | } | 140 | } |
141 | } | 141 | } |
142 | 142 | ||
143 | /* | 143 | /* |
144 | * Create a kernel thread | 144 | * Create a kernel thread |
145 | */ | 145 | */ |
146 | 146 | ||
147 | /* | 147 | /* |
148 | * This is the mechanism for creating a new kernel thread. | 148 | * This is the mechanism for creating a new kernel thread. |
149 | * | 149 | * |
150 | */ | 150 | */ |
151 | extern void kernel_thread_helper(void); | 151 | extern void kernel_thread_helper(void); |
152 | __asm__(".align 5\n" | 152 | __asm__(".align 5\n" |
153 | "kernel_thread_helper:\n\t" | 153 | "kernel_thread_helper:\n\t" |
154 | "jsr @r5\n\t" | 154 | "jsr @r5\n\t" |
155 | " nop\n\t" | 155 | " nop\n\t" |
156 | "mov.l 1f, r1\n\t" | 156 | "mov.l 1f, r1\n\t" |
157 | "jsr @r1\n\t" | 157 | "jsr @r1\n\t" |
158 | " mov r0, r4\n\t" | 158 | " mov r0, r4\n\t" |
159 | ".align 2\n\t" | 159 | ".align 2\n\t" |
160 | "1:.long do_exit"); | 160 | "1:.long do_exit"); |
161 | 161 | ||
162 | int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | 162 | int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) |
163 | { /* Don't use this in BL=1(cli). Or else, CPU resets! */ | 163 | { /* Don't use this in BL=1(cli). Or else, CPU resets! */ |
164 | struct pt_regs regs; | 164 | struct pt_regs regs; |
165 | 165 | ||
166 | memset(®s, 0, sizeof(regs)); | 166 | memset(®s, 0, sizeof(regs)); |
167 | regs.regs[4] = (unsigned long) arg; | 167 | regs.regs[4] = (unsigned long) arg; |
168 | regs.regs[5] = (unsigned long) fn; | 168 | regs.regs[5] = (unsigned long) fn; |
169 | 169 | ||
170 | regs.pc = (unsigned long) kernel_thread_helper; | 170 | regs.pc = (unsigned long) kernel_thread_helper; |
171 | regs.sr = (1 << 30); | 171 | regs.sr = (1 << 30); |
172 | 172 | ||
173 | /* Ok, create the new process.. */ | 173 | /* Ok, create the new process.. */ |
174 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); | 174 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); |
175 | } | 175 | } |
176 | 176 | ||
177 | /* | 177 | /* |
178 | * Free current thread data structures etc.. | 178 | * Free current thread data structures etc.. |
179 | */ | 179 | */ |
180 | void exit_thread(void) | 180 | void exit_thread(void) |
181 | { | 181 | { |
182 | if (current->thread.ubc_pc) { | 182 | if (current->thread.ubc_pc) { |
183 | current->thread.ubc_pc = 0; | 183 | current->thread.ubc_pc = 0; |
184 | ubc_usercnt -= 1; | 184 | ubc_usercnt -= 1; |
185 | } | 185 | } |
186 | } | 186 | } |
187 | 187 | ||
188 | void flush_thread(void) | 188 | void flush_thread(void) |
189 | { | 189 | { |
190 | #if defined(CONFIG_SH_FPU) | 190 | #if defined(CONFIG_SH_FPU) |
191 | struct task_struct *tsk = current; | 191 | struct task_struct *tsk = current; |
192 | /* Forget lazy FPU state */ | 192 | /* Forget lazy FPU state */ |
193 | clear_fpu(tsk, task_pt_regs(tsk)); | 193 | clear_fpu(tsk, task_pt_regs(tsk)); |
194 | clear_used_math(); | 194 | clear_used_math(); |
195 | #endif | 195 | #endif |
196 | } | 196 | } |
197 | 197 | ||
198 | void release_thread(struct task_struct *dead_task) | 198 | void release_thread(struct task_struct *dead_task) |
199 | { | 199 | { |
200 | /* do nothing */ | 200 | /* do nothing */ |
201 | } | 201 | } |
202 | 202 | ||
203 | /* Fill in the fpu structure for a core dump.. */ | 203 | /* Fill in the fpu structure for a core dump.. */ |
204 | int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) | 204 | int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) |
205 | { | 205 | { |
206 | int fpvalid = 0; | 206 | int fpvalid = 0; |
207 | 207 | ||
208 | #if defined(CONFIG_SH_FPU) | 208 | #if defined(CONFIG_SH_FPU) |
209 | struct task_struct *tsk = current; | 209 | struct task_struct *tsk = current; |
210 | 210 | ||
211 | fpvalid = !!tsk_used_math(tsk); | 211 | fpvalid = !!tsk_used_math(tsk); |
212 | if (fpvalid) { | 212 | if (fpvalid) { |
213 | unlazy_fpu(tsk, regs); | 213 | unlazy_fpu(tsk, regs); |
214 | memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu)); | 214 | memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu)); |
215 | } | 215 | } |
216 | #endif | 216 | #endif |
217 | 217 | ||
218 | return fpvalid; | 218 | return fpvalid; |
219 | } | 219 | } |
220 | 220 | ||
221 | /* | 221 | /* |
222 | * Capture the user space registers if the task is not running (in user space) | 222 | * Capture the user space registers if the task is not running (in user space) |
223 | */ | 223 | */ |
224 | int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) | 224 | int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) |
225 | { | 225 | { |
226 | struct pt_regs ptregs; | 226 | struct pt_regs ptregs; |
227 | 227 | ||
228 | ptregs = *task_pt_regs(tsk); | 228 | ptregs = *task_pt_regs(tsk); |
229 | elf_core_copy_regs(regs, &ptregs); | 229 | elf_core_copy_regs(regs, &ptregs); |
230 | 230 | ||
231 | return 1; | 231 | return 1; |
232 | } | 232 | } |
233 | 233 | ||
234 | int | 234 | int |
235 | dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *fpu) | 235 | dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *fpu) |
236 | { | 236 | { |
237 | int fpvalid = 0; | 237 | int fpvalid = 0; |
238 | 238 | ||
239 | #if defined(CONFIG_SH_FPU) | 239 | #if defined(CONFIG_SH_FPU) |
240 | fpvalid = !!tsk_used_math(tsk); | 240 | fpvalid = !!tsk_used_math(tsk); |
241 | if (fpvalid) { | 241 | if (fpvalid) { |
242 | unlazy_fpu(tsk, task_pt_regs(tsk)); | 242 | unlazy_fpu(tsk, task_pt_regs(tsk)); |
243 | memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu)); | 243 | memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu)); |
244 | } | 244 | } |
245 | #endif | 245 | #endif |
246 | 246 | ||
247 | return fpvalid; | 247 | return fpvalid; |
248 | } | 248 | } |
249 | 249 | ||
250 | asmlinkage void ret_from_fork(void); | 250 | asmlinkage void ret_from_fork(void); |
251 | 251 | ||
252 | int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | 252 | int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, |
253 | unsigned long unused, | 253 | unsigned long unused, |
254 | struct task_struct *p, struct pt_regs *regs) | 254 | struct task_struct *p, struct pt_regs *regs) |
255 | { | 255 | { |
256 | struct thread_info *ti = task_thread_info(p); | 256 | struct thread_info *ti = task_thread_info(p); |
257 | struct pt_regs *childregs; | 257 | struct pt_regs *childregs; |
258 | #if defined(CONFIG_SH_FPU) | 258 | #if defined(CONFIG_SH_FPU) |
259 | struct task_struct *tsk = current; | 259 | struct task_struct *tsk = current; |
260 | 260 | ||
261 | unlazy_fpu(tsk, regs); | 261 | unlazy_fpu(tsk, regs); |
262 | p->thread.fpu = tsk->thread.fpu; | 262 | p->thread.fpu = tsk->thread.fpu; |
263 | copy_to_stopped_child_used_math(p); | 263 | copy_to_stopped_child_used_math(p); |
264 | #endif | 264 | #endif |
265 | 265 | ||
266 | childregs = task_pt_regs(p); | 266 | childregs = task_pt_regs(p); |
267 | *childregs = *regs; | 267 | *childregs = *regs; |
268 | 268 | ||
269 | if (user_mode(regs)) { | 269 | if (user_mode(regs)) { |
270 | childregs->regs[15] = usp; | 270 | childregs->regs[15] = usp; |
271 | ti->addr_limit = USER_DS; | 271 | ti->addr_limit = USER_DS; |
272 | } else { | 272 | } else { |
273 | childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE; | 273 | childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE; |
274 | ti->addr_limit = KERNEL_DS; | 274 | ti->addr_limit = KERNEL_DS; |
275 | } | 275 | } |
276 | if (clone_flags & CLONE_SETTLS) { | 276 | if (clone_flags & CLONE_SETTLS) { |
277 | childregs->gbr = childregs->regs[0]; | 277 | childregs->gbr = childregs->regs[0]; |
278 | } | 278 | } |
279 | childregs->regs[0] = 0; /* Set return value for child */ | 279 | childregs->regs[0] = 0; /* Set return value for child */ |
280 | 280 | ||
281 | p->thread.sp = (unsigned long) childregs; | 281 | p->thread.sp = (unsigned long) childregs; |
282 | p->thread.pc = (unsigned long) ret_from_fork; | 282 | p->thread.pc = (unsigned long) ret_from_fork; |
283 | 283 | ||
284 | p->thread.ubc_pc = 0; | 284 | p->thread.ubc_pc = 0; |
285 | 285 | ||
286 | return 0; | 286 | return 0; |
287 | } | 287 | } |
288 | 288 | ||
289 | /* Tracing by user break controller. */ | 289 | /* Tracing by user break controller. */ |
290 | static void | 290 | static void |
291 | ubc_set_tracing(int asid, unsigned long pc) | 291 | ubc_set_tracing(int asid, unsigned long pc) |
292 | { | 292 | { |
293 | ctrl_outl(pc, UBC_BARA); | 293 | ctrl_outl(pc, UBC_BARA); |
294 | 294 | ||
295 | #ifdef CONFIG_MMU | 295 | #ifdef CONFIG_MMU |
296 | /* We don't have any ASID settings for the SH-2! */ | 296 | /* We don't have any ASID settings for the SH-2! */ |
297 | if (cpu_data->type != CPU_SH7604) | 297 | if (cpu_data->type != CPU_SH7604) |
298 | ctrl_outb(asid, UBC_BASRA); | 298 | ctrl_outb(asid, UBC_BASRA); |
299 | #endif | 299 | #endif |
300 | 300 | ||
301 | ctrl_outl(0, UBC_BAMRA); | 301 | ctrl_outl(0, UBC_BAMRA); |
302 | 302 | ||
303 | if (cpu_data->type == CPU_SH7729 || cpu_data->type == CPU_SH7710) { | 303 | if (cpu_data->type == CPU_SH7729 || cpu_data->type == CPU_SH7710) { |
304 | ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA); | 304 | ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA); |
305 | ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR); | 305 | ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR); |
306 | } else { | 306 | } else { |
307 | ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA); | 307 | ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA); |
308 | ctrl_outw(BRCR_PCBA, UBC_BRCR); | 308 | ctrl_outw(BRCR_PCBA, UBC_BRCR); |
309 | } | 309 | } |
310 | } | 310 | } |
311 | 311 | ||
312 | /* | 312 | /* |
313 | * switch_to(x,y) should switch tasks from x to y. | 313 | * switch_to(x,y) should switch tasks from x to y. |
314 | * | 314 | * |
315 | */ | 315 | */ |
316 | struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next) | 316 | struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next) |
317 | { | 317 | { |
318 | #if defined(CONFIG_SH_FPU) | 318 | #if defined(CONFIG_SH_FPU) |
319 | unlazy_fpu(prev, task_pt_regs(prev)); | 319 | unlazy_fpu(prev, task_pt_regs(prev)); |
320 | #endif | 320 | #endif |
321 | 321 | ||
322 | #ifdef CONFIG_PREEMPT | 322 | #ifdef CONFIG_PREEMPT |
323 | { | 323 | { |
324 | unsigned long flags; | 324 | unsigned long flags; |
325 | struct pt_regs *regs; | 325 | struct pt_regs *regs; |
326 | 326 | ||
327 | local_irq_save(flags); | 327 | local_irq_save(flags); |
328 | regs = task_pt_regs(prev); | 328 | regs = task_pt_regs(prev); |
329 | if (user_mode(regs) && regs->regs[15] >= 0xc0000000) { | 329 | if (user_mode(regs) && regs->regs[15] >= 0xc0000000) { |
330 | int offset = (int)regs->regs[15]; | 330 | int offset = (int)regs->regs[15]; |
331 | 331 | ||
332 | /* Reset stack pointer: clear critical region mark */ | 332 | /* Reset stack pointer: clear critical region mark */ |
333 | regs->regs[15] = regs->regs[1]; | 333 | regs->regs[15] = regs->regs[1]; |
334 | if (regs->pc < regs->regs[0]) | 334 | if (regs->pc < regs->regs[0]) |
335 | /* Go to rewind point */ | 335 | /* Go to rewind point */ |
336 | regs->pc = regs->regs[0] + offset; | 336 | regs->pc = regs->regs[0] + offset; |
337 | } | 337 | } |
338 | local_irq_restore(flags); | 338 | local_irq_restore(flags); |
339 | } | 339 | } |
340 | #endif | 340 | #endif |
341 | 341 | ||
342 | #ifdef CONFIG_MMU | 342 | #ifdef CONFIG_MMU |
343 | /* | 343 | /* |
344 | * Restore the kernel mode register | 344 | * Restore the kernel mode register |
345 | * k7 (r7_bank1) | 345 | * k7 (r7_bank1) |
346 | */ | 346 | */ |
347 | asm volatile("ldc %0, r7_bank" | 347 | asm volatile("ldc %0, r7_bank" |
348 | : /* no output */ | 348 | : /* no output */ |
349 | : "r" (task_thread_info(next))); | 349 | : "r" (task_thread_info(next))); |
350 | #endif | 350 | #endif |
351 | 351 | ||
352 | /* If no tasks are using the UBC, we're done */ | 352 | /* If no tasks are using the UBC, we're done */ |
353 | if (ubc_usercnt == 0) | 353 | if (ubc_usercnt == 0) |
354 | /* If no tasks are using the UBC, we're done */; | 354 | /* If no tasks are using the UBC, we're done */; |
355 | else if (next->thread.ubc_pc && next->mm) { | 355 | else if (next->thread.ubc_pc && next->mm) { |
356 | int asid = 0; | 356 | int asid = 0; |
357 | #ifdef CONFIG_MMU | 357 | #ifdef CONFIG_MMU |
358 | asid |= next->mm->context & MMU_CONTEXT_ASID_MASK; | 358 | asid |= next->mm->context.id & MMU_CONTEXT_ASID_MASK; |
359 | #endif | 359 | #endif |
360 | ubc_set_tracing(asid, next->thread.ubc_pc); | 360 | ubc_set_tracing(asid, next->thread.ubc_pc); |
361 | } else { | 361 | } else { |
362 | ctrl_outw(0, UBC_BBRA); | 362 | ctrl_outw(0, UBC_BBRA); |
363 | ctrl_outw(0, UBC_BBRB); | 363 | ctrl_outw(0, UBC_BBRB); |
364 | } | 364 | } |
365 | 365 | ||
366 | return prev; | 366 | return prev; |
367 | } | 367 | } |
368 | 368 | ||
369 | asmlinkage int sys_fork(unsigned long r4, unsigned long r5, | 369 | asmlinkage int sys_fork(unsigned long r4, unsigned long r5, |
370 | unsigned long r6, unsigned long r7, | 370 | unsigned long r6, unsigned long r7, |
371 | struct pt_regs regs) | 371 | struct pt_regs regs) |
372 | { | 372 | { |
373 | #ifdef CONFIG_MMU | 373 | #ifdef CONFIG_MMU |
374 | return do_fork(SIGCHLD, regs.regs[15], ®s, 0, NULL, NULL); | 374 | return do_fork(SIGCHLD, regs.regs[15], ®s, 0, NULL, NULL); |
375 | #else | 375 | #else |
376 | /* fork almost works, enough to trick you into looking elsewhere :-( */ | 376 | /* fork almost works, enough to trick you into looking elsewhere :-( */ |
377 | return -EINVAL; | 377 | return -EINVAL; |
378 | #endif | 378 | #endif |
379 | } | 379 | } |
380 | 380 | ||
381 | asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, | 381 | asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, |
382 | unsigned long parent_tidptr, | 382 | unsigned long parent_tidptr, |
383 | unsigned long child_tidptr, | 383 | unsigned long child_tidptr, |
384 | struct pt_regs regs) | 384 | struct pt_regs regs) |
385 | { | 385 | { |
386 | if (!newsp) | 386 | if (!newsp) |
387 | newsp = regs.regs[15]; | 387 | newsp = regs.regs[15]; |
388 | return do_fork(clone_flags, newsp, ®s, 0, | 388 | return do_fork(clone_flags, newsp, ®s, 0, |
389 | (int __user *)parent_tidptr, (int __user *)child_tidptr); | 389 | (int __user *)parent_tidptr, (int __user *)child_tidptr); |
390 | } | 390 | } |
391 | 391 | ||
392 | /* | 392 | /* |
393 | * This is trivial, and on the face of it looks like it | 393 | * This is trivial, and on the face of it looks like it |
394 | * could equally well be done in user mode. | 394 | * could equally well be done in user mode. |
395 | * | 395 | * |
396 | * Not so, for quite unobvious reasons - register pressure. | 396 | * Not so, for quite unobvious reasons - register pressure. |
397 | * In user mode vfork() cannot have a stack frame, and if | 397 | * In user mode vfork() cannot have a stack frame, and if |
398 | * done by calling the "clone()" system call directly, you | 398 | * done by calling the "clone()" system call directly, you |
399 | * do not have enough call-clobbered registers to hold all | 399 | * do not have enough call-clobbered registers to hold all |
400 | * the information you need. | 400 | * the information you need. |
401 | */ | 401 | */ |
402 | asmlinkage int sys_vfork(unsigned long r4, unsigned long r5, | 402 | asmlinkage int sys_vfork(unsigned long r4, unsigned long r5, |
403 | unsigned long r6, unsigned long r7, | 403 | unsigned long r6, unsigned long r7, |
404 | struct pt_regs regs) | 404 | struct pt_regs regs) |
405 | { | 405 | { |
406 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.regs[15], ®s, | 406 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.regs[15], ®s, |
407 | 0, NULL, NULL); | 407 | 0, NULL, NULL); |
408 | } | 408 | } |
409 | 409 | ||
410 | /* | 410 | /* |
411 | * sys_execve() executes a new program. | 411 | * sys_execve() executes a new program. |
412 | */ | 412 | */ |
413 | asmlinkage int sys_execve(char *ufilename, char **uargv, | 413 | asmlinkage int sys_execve(char *ufilename, char **uargv, |
414 | char **uenvp, unsigned long r7, | 414 | char **uenvp, unsigned long r7, |
415 | struct pt_regs regs) | 415 | struct pt_regs regs) |
416 | { | 416 | { |
417 | int error; | 417 | int error; |
418 | char *filename; | 418 | char *filename; |
419 | 419 | ||
420 | filename = getname((char __user *)ufilename); | 420 | filename = getname((char __user *)ufilename); |
421 | error = PTR_ERR(filename); | 421 | error = PTR_ERR(filename); |
422 | if (IS_ERR(filename)) | 422 | if (IS_ERR(filename)) |
423 | goto out; | 423 | goto out; |
424 | 424 | ||
425 | error = do_execve(filename, | 425 | error = do_execve(filename, |
426 | (char __user * __user *)uargv, | 426 | (char __user * __user *)uargv, |
427 | (char __user * __user *)uenvp, | 427 | (char __user * __user *)uenvp, |
428 | ®s); | 428 | ®s); |
429 | if (error == 0) { | 429 | if (error == 0) { |
430 | task_lock(current); | 430 | task_lock(current); |
431 | current->ptrace &= ~PT_DTRACE; | 431 | current->ptrace &= ~PT_DTRACE; |
432 | task_unlock(current); | 432 | task_unlock(current); |
433 | } | 433 | } |
434 | putname(filename); | 434 | putname(filename); |
435 | out: | 435 | out: |
436 | return error; | 436 | return error; |
437 | } | 437 | } |
438 | 438 | ||
439 | unsigned long get_wchan(struct task_struct *p) | 439 | unsigned long get_wchan(struct task_struct *p) |
440 | { | 440 | { |
441 | unsigned long schedule_frame; | 441 | unsigned long schedule_frame; |
442 | unsigned long pc; | 442 | unsigned long pc; |
443 | 443 | ||
444 | if (!p || p == current || p->state == TASK_RUNNING) | 444 | if (!p || p == current || p->state == TASK_RUNNING) |
445 | return 0; | 445 | return 0; |
446 | 446 | ||
447 | /* | 447 | /* |
448 | * The same comment as on the Alpha applies here, too ... | 448 | * The same comment as on the Alpha applies here, too ... |
449 | */ | 449 | */ |
450 | pc = thread_saved_pc(p); | 450 | pc = thread_saved_pc(p); |
451 | if (in_sched_functions(pc)) { | 451 | if (in_sched_functions(pc)) { |
452 | schedule_frame = ((unsigned long *)(long)p->thread.sp)[1]; | 452 | schedule_frame = ((unsigned long *)(long)p->thread.sp)[1]; |
453 | return (unsigned long)((unsigned long *)schedule_frame)[1]; | 453 | return (unsigned long)((unsigned long *)schedule_frame)[1]; |
454 | } | 454 | } |
455 | return pc; | 455 | return pc; |
456 | } | 456 | } |
457 | 457 | ||
458 | asmlinkage void break_point_trap(unsigned long r4, unsigned long r5, | 458 | asmlinkage void break_point_trap(unsigned long r4, unsigned long r5, |
459 | unsigned long r6, unsigned long r7, | 459 | unsigned long r6, unsigned long r7, |
460 | struct pt_regs regs) | 460 | struct pt_regs regs) |
461 | { | 461 | { |
462 | /* Clear tracing. */ | 462 | /* Clear tracing. */ |
463 | ctrl_outw(0, UBC_BBRA); | 463 | ctrl_outw(0, UBC_BBRA); |
464 | ctrl_outw(0, UBC_BBRB); | 464 | ctrl_outw(0, UBC_BBRB); |
465 | current->thread.ubc_pc = 0; | 465 | current->thread.ubc_pc = 0; |
466 | ubc_usercnt -= 1; | 466 | ubc_usercnt -= 1; |
467 | 467 | ||
468 | force_sig(SIGTRAP, current); | 468 | force_sig(SIGTRAP, current); |
469 | } | 469 | } |
470 | 470 | ||
471 | asmlinkage void break_point_trap_software(unsigned long r4, unsigned long r5, | 471 | asmlinkage void break_point_trap_software(unsigned long r4, unsigned long r5, |
472 | unsigned long r6, unsigned long r7, | 472 | unsigned long r6, unsigned long r7, |
473 | struct pt_regs regs) | 473 | struct pt_regs regs) |
474 | { | 474 | { |
475 | regs.pc -= 2; | 475 | regs.pc -= 2; |
476 | force_sig(SIGTRAP, current); | 476 | force_sig(SIGTRAP, current); |
477 | } | 477 | } |
478 | 478 |
arch/sh/kernel/signal.c
1 | /* | 1 | /* |
2 | * linux/arch/sh/kernel/signal.c | 2 | * linux/arch/sh/kernel/signal.c |
3 | * | 3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | 4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | * | 5 | * |
6 | * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson | 6 | * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson |
7 | * | 7 | * |
8 | * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima | 8 | * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima |
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | |||
12 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
13 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
14 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
15 | #include <linux/smp_lock.h> | 14 | #include <linux/smp_lock.h> |
16 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
17 | #include <linux/signal.h> | 16 | #include <linux/signal.h> |
18 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
19 | #include <linux/wait.h> | 18 | #include <linux/wait.h> |
20 | #include <linux/ptrace.h> | 19 | #include <linux/ptrace.h> |
21 | #include <linux/unistd.h> | 20 | #include <linux/unistd.h> |
22 | #include <linux/stddef.h> | 21 | #include <linux/stddef.h> |
23 | #include <linux/tty.h> | 22 | #include <linux/tty.h> |
23 | #include <linux/elf.h> | ||
24 | #include <linux/personality.h> | 24 | #include <linux/personality.h> |
25 | #include <linux/binfmts.h> | 25 | #include <linux/binfmts.h> |
26 | 26 | ||
27 | #include <asm/ucontext.h> | 27 | #include <asm/ucontext.h> |
28 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
29 | #include <asm/pgtable.h> | 29 | #include <asm/pgtable.h> |
30 | #include <asm/cacheflush.h> | 30 | #include <asm/cacheflush.h> |
31 | 31 | ||
32 | #undef DEBUG | ||
33 | |||
34 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 32 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
35 | 33 | ||
36 | /* | 34 | /* |
37 | * Atomically swap in the new signal mask, and wait for a signal. | 35 | * Atomically swap in the new signal mask, and wait for a signal. |
38 | */ | 36 | */ |
39 | asmlinkage int | 37 | asmlinkage int |
40 | sys_sigsuspend(old_sigset_t mask, | 38 | sys_sigsuspend(old_sigset_t mask, |
41 | unsigned long r5, unsigned long r6, unsigned long r7, | 39 | unsigned long r5, unsigned long r6, unsigned long r7, |
42 | struct pt_regs regs) | 40 | struct pt_regs regs) |
43 | { | 41 | { |
44 | mask &= _BLOCKABLE; | 42 | mask &= _BLOCKABLE; |
45 | spin_lock_irq(¤t->sighand->siglock); | 43 | spin_lock_irq(¤t->sighand->siglock); |
46 | current->saved_sigmask = current->blocked; | 44 | current->saved_sigmask = current->blocked; |
47 | siginitset(¤t->blocked, mask); | 45 | siginitset(¤t->blocked, mask); |
48 | recalc_sigpending(); | 46 | recalc_sigpending(); |
49 | spin_unlock_irq(¤t->sighand->siglock); | 47 | spin_unlock_irq(¤t->sighand->siglock); |
50 | 48 | ||
51 | current->state = TASK_INTERRUPTIBLE; | 49 | current->state = TASK_INTERRUPTIBLE; |
52 | schedule(); | 50 | schedule(); |
53 | set_thread_flag(TIF_RESTORE_SIGMASK); | 51 | set_thread_flag(TIF_RESTORE_SIGMASK); |
54 | return -ERESTARTNOHAND; | 52 | return -ERESTARTNOHAND; |
55 | } | 53 | } |
56 | 54 | ||
57 | asmlinkage int | 55 | asmlinkage int |
58 | sys_sigaction(int sig, const struct old_sigaction __user *act, | 56 | sys_sigaction(int sig, const struct old_sigaction __user *act, |
59 | struct old_sigaction __user *oact) | 57 | struct old_sigaction __user *oact) |
60 | { | 58 | { |
61 | struct k_sigaction new_ka, old_ka; | 59 | struct k_sigaction new_ka, old_ka; |
62 | int ret; | 60 | int ret; |
63 | 61 | ||
64 | if (act) { | 62 | if (act) { |
65 | old_sigset_t mask; | 63 | old_sigset_t mask; |
66 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | 64 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || |
67 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || | 65 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || |
68 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) | 66 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) |
69 | return -EFAULT; | 67 | return -EFAULT; |
70 | __get_user(new_ka.sa.sa_flags, &act->sa_flags); | 68 | __get_user(new_ka.sa.sa_flags, &act->sa_flags); |
71 | __get_user(mask, &act->sa_mask); | 69 | __get_user(mask, &act->sa_mask); |
72 | siginitset(&new_ka.sa.sa_mask, mask); | 70 | siginitset(&new_ka.sa.sa_mask, mask); |
73 | } | 71 | } |
74 | 72 | ||
75 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | 73 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); |
76 | 74 | ||
77 | if (!ret && oact) { | 75 | if (!ret && oact) { |
78 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | 76 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || |
79 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || | 77 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || |
80 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) | 78 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) |
81 | return -EFAULT; | 79 | return -EFAULT; |
82 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | 80 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags); |
83 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); | 81 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); |
84 | } | 82 | } |
85 | 83 | ||
86 | return ret; | 84 | return ret; |
87 | } | 85 | } |
88 | 86 | ||
89 | asmlinkage int | 87 | asmlinkage int |
90 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, | 88 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, |
91 | unsigned long r6, unsigned long r7, | 89 | unsigned long r6, unsigned long r7, |
92 | struct pt_regs regs) | 90 | struct pt_regs regs) |
93 | { | 91 | { |
94 | return do_sigaltstack(uss, uoss, regs.regs[15]); | 92 | return do_sigaltstack(uss, uoss, regs.regs[15]); |
95 | } | 93 | } |
96 | 94 | ||
97 | 95 | ||
98 | /* | 96 | /* |
99 | * Do a signal return; undo the signal stack. | 97 | * Do a signal return; undo the signal stack. |
100 | */ | 98 | */ |
101 | 99 | ||
102 | #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */ | 100 | #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */ |
103 | #define TRAP16 0xc310 /* Syscall w/no args (NR in R3) */ | 101 | #define TRAP16 0xc310 /* Syscall w/no args (NR in R3) */ |
104 | #define OR_R0_R0 0x200b /* or r0,r0 (insert to avoid hardware bug) */ | 102 | #define OR_R0_R0 0x200b /* or r0,r0 (insert to avoid hardware bug) */ |
105 | 103 | ||
106 | struct sigframe | 104 | struct sigframe |
107 | { | 105 | { |
108 | struct sigcontext sc; | 106 | struct sigcontext sc; |
109 | unsigned long extramask[_NSIG_WORDS-1]; | 107 | unsigned long extramask[_NSIG_WORDS-1]; |
110 | u16 retcode[8]; | 108 | u16 retcode[8]; |
111 | }; | 109 | }; |
112 | 110 | ||
113 | struct rt_sigframe | 111 | struct rt_sigframe |
114 | { | 112 | { |
115 | struct siginfo info; | 113 | struct siginfo info; |
116 | struct ucontext uc; | 114 | struct ucontext uc; |
117 | u16 retcode[8]; | 115 | u16 retcode[8]; |
118 | }; | 116 | }; |
119 | 117 | ||
120 | #ifdef CONFIG_SH_FPU | 118 | #ifdef CONFIG_SH_FPU |
121 | static inline int restore_sigcontext_fpu(struct sigcontext __user *sc) | 119 | static inline int restore_sigcontext_fpu(struct sigcontext __user *sc) |
122 | { | 120 | { |
123 | struct task_struct *tsk = current; | 121 | struct task_struct *tsk = current; |
124 | 122 | ||
125 | if (!(cpu_data->flags & CPU_HAS_FPU)) | 123 | if (!(cpu_data->flags & CPU_HAS_FPU)) |
126 | return 0; | 124 | return 0; |
127 | 125 | ||
128 | set_used_math(); | 126 | set_used_math(); |
129 | return __copy_from_user(&tsk->thread.fpu.hard, &sc->sc_fpregs[0], | 127 | return __copy_from_user(&tsk->thread.fpu.hard, &sc->sc_fpregs[0], |
130 | sizeof(long)*(16*2+2)); | 128 | sizeof(long)*(16*2+2)); |
131 | } | 129 | } |
132 | 130 | ||
133 | static inline int save_sigcontext_fpu(struct sigcontext __user *sc, | 131 | static inline int save_sigcontext_fpu(struct sigcontext __user *sc, |
134 | struct pt_regs *regs) | 132 | struct pt_regs *regs) |
135 | { | 133 | { |
136 | struct task_struct *tsk = current; | 134 | struct task_struct *tsk = current; |
137 | 135 | ||
138 | if (!(cpu_data->flags & CPU_HAS_FPU)) | 136 | if (!(cpu_data->flags & CPU_HAS_FPU)) |
139 | return 0; | 137 | return 0; |
140 | 138 | ||
141 | if (!used_math()) { | 139 | if (!used_math()) { |
142 | __put_user(0, &sc->sc_ownedfp); | 140 | __put_user(0, &sc->sc_ownedfp); |
143 | return 0; | 141 | return 0; |
144 | } | 142 | } |
145 | 143 | ||
146 | __put_user(1, &sc->sc_ownedfp); | 144 | __put_user(1, &sc->sc_ownedfp); |
147 | 145 | ||
148 | /* This will cause a "finit" to be triggered by the next | 146 | /* This will cause a "finit" to be triggered by the next |
149 | attempted FPU operation by the 'current' process. | 147 | attempted FPU operation by the 'current' process. |
150 | */ | 148 | */ |
151 | clear_used_math(); | 149 | clear_used_math(); |
152 | 150 | ||
153 | unlazy_fpu(tsk, regs); | 151 | unlazy_fpu(tsk, regs); |
154 | return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.fpu.hard, | 152 | return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.fpu.hard, |
155 | sizeof(long)*(16*2+2)); | 153 | sizeof(long)*(16*2+2)); |
156 | } | 154 | } |
157 | #endif /* CONFIG_SH_FPU */ | 155 | #endif /* CONFIG_SH_FPU */ |
158 | 156 | ||
159 | static int | 157 | static int |
160 | restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p) | 158 | restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p) |
161 | { | 159 | { |
162 | unsigned int err = 0; | 160 | unsigned int err = 0; |
163 | 161 | ||
164 | #define COPY(x) err |= __get_user(regs->x, &sc->sc_##x) | 162 | #define COPY(x) err |= __get_user(regs->x, &sc->sc_##x) |
165 | COPY(regs[1]); | 163 | COPY(regs[1]); |
166 | COPY(regs[2]); COPY(regs[3]); | 164 | COPY(regs[2]); COPY(regs[3]); |
167 | COPY(regs[4]); COPY(regs[5]); | 165 | COPY(regs[4]); COPY(regs[5]); |
168 | COPY(regs[6]); COPY(regs[7]); | 166 | COPY(regs[6]); COPY(regs[7]); |
169 | COPY(regs[8]); COPY(regs[9]); | 167 | COPY(regs[8]); COPY(regs[9]); |
170 | COPY(regs[10]); COPY(regs[11]); | 168 | COPY(regs[10]); COPY(regs[11]); |
171 | COPY(regs[12]); COPY(regs[13]); | 169 | COPY(regs[12]); COPY(regs[13]); |
172 | COPY(regs[14]); COPY(regs[15]); | 170 | COPY(regs[14]); COPY(regs[15]); |
173 | COPY(gbr); COPY(mach); | 171 | COPY(gbr); COPY(mach); |
174 | COPY(macl); COPY(pr); | 172 | COPY(macl); COPY(pr); |
175 | COPY(sr); COPY(pc); | 173 | COPY(sr); COPY(pc); |
176 | #undef COPY | 174 | #undef COPY |
177 | 175 | ||
178 | #ifdef CONFIG_SH_FPU | 176 | #ifdef CONFIG_SH_FPU |
179 | if (cpu_data->flags & CPU_HAS_FPU) { | 177 | if (cpu_data->flags & CPU_HAS_FPU) { |
180 | int owned_fp; | 178 | int owned_fp; |
181 | struct task_struct *tsk = current; | 179 | struct task_struct *tsk = current; |
182 | 180 | ||
183 | regs->sr |= SR_FD; /* Release FPU */ | 181 | regs->sr |= SR_FD; /* Release FPU */ |
184 | clear_fpu(tsk, regs); | 182 | clear_fpu(tsk, regs); |
185 | clear_used_math(); | 183 | clear_used_math(); |
186 | __get_user (owned_fp, &sc->sc_ownedfp); | 184 | __get_user (owned_fp, &sc->sc_ownedfp); |
187 | if (owned_fp) | 185 | if (owned_fp) |
188 | err |= restore_sigcontext_fpu(sc); | 186 | err |= restore_sigcontext_fpu(sc); |
189 | } | 187 | } |
190 | #endif | 188 | #endif |
191 | 189 | ||
192 | regs->tra = -1; /* disable syscall checks */ | 190 | regs->tra = -1; /* disable syscall checks */ |
193 | err |= __get_user(*r0_p, &sc->sc_regs[0]); | 191 | err |= __get_user(*r0_p, &sc->sc_regs[0]); |
194 | return err; | 192 | return err; |
195 | } | 193 | } |
196 | 194 | ||
197 | asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5, | 195 | asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5, |
198 | unsigned long r6, unsigned long r7, | 196 | unsigned long r6, unsigned long r7, |
199 | struct pt_regs regs) | 197 | struct pt_regs regs) |
200 | { | 198 | { |
201 | struct sigframe __user *frame = (struct sigframe __user *)regs.regs[15]; | 199 | struct sigframe __user *frame = (struct sigframe __user *)regs.regs[15]; |
202 | sigset_t set; | 200 | sigset_t set; |
203 | int r0; | 201 | int r0; |
204 | 202 | ||
205 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | 203 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
206 | goto badframe; | 204 | goto badframe; |
207 | 205 | ||
208 | if (__get_user(set.sig[0], &frame->sc.oldmask) | 206 | if (__get_user(set.sig[0], &frame->sc.oldmask) |
209 | || (_NSIG_WORDS > 1 | 207 | || (_NSIG_WORDS > 1 |
210 | && __copy_from_user(&set.sig[1], &frame->extramask, | 208 | && __copy_from_user(&set.sig[1], &frame->extramask, |
211 | sizeof(frame->extramask)))) | 209 | sizeof(frame->extramask)))) |
212 | goto badframe; | 210 | goto badframe; |
213 | 211 | ||
214 | sigdelsetmask(&set, ~_BLOCKABLE); | 212 | sigdelsetmask(&set, ~_BLOCKABLE); |
215 | 213 | ||
216 | spin_lock_irq(¤t->sighand->siglock); | 214 | spin_lock_irq(¤t->sighand->siglock); |
217 | current->blocked = set; | 215 | current->blocked = set; |
218 | recalc_sigpending(); | 216 | recalc_sigpending(); |
219 | spin_unlock_irq(¤t->sighand->siglock); | 217 | spin_unlock_irq(¤t->sighand->siglock); |
220 | 218 | ||
221 | if (restore_sigcontext(®s, &frame->sc, &r0)) | 219 | if (restore_sigcontext(®s, &frame->sc, &r0)) |
222 | goto badframe; | 220 | goto badframe; |
223 | return r0; | 221 | return r0; |
224 | 222 | ||
225 | badframe: | 223 | badframe: |
226 | force_sig(SIGSEGV, current); | 224 | force_sig(SIGSEGV, current); |
227 | return 0; | 225 | return 0; |
228 | } | 226 | } |
229 | 227 | ||
230 | asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5, | 228 | asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5, |
231 | unsigned long r6, unsigned long r7, | 229 | unsigned long r6, unsigned long r7, |
232 | struct pt_regs regs) | 230 | struct pt_regs regs) |
233 | { | 231 | { |
234 | struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs.regs[15]; | 232 | struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs.regs[15]; |
235 | sigset_t set; | 233 | sigset_t set; |
236 | stack_t st; | 234 | stack_t st; |
237 | int r0; | 235 | int r0; |
238 | 236 | ||
239 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | 237 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
240 | goto badframe; | 238 | goto badframe; |
241 | 239 | ||
242 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | 240 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) |
243 | goto badframe; | 241 | goto badframe; |
244 | 242 | ||
245 | sigdelsetmask(&set, ~_BLOCKABLE); | 243 | sigdelsetmask(&set, ~_BLOCKABLE); |
246 | spin_lock_irq(¤t->sighand->siglock); | 244 | spin_lock_irq(¤t->sighand->siglock); |
247 | current->blocked = set; | 245 | current->blocked = set; |
248 | recalc_sigpending(); | 246 | recalc_sigpending(); |
249 | spin_unlock_irq(¤t->sighand->siglock); | 247 | spin_unlock_irq(¤t->sighand->siglock); |
250 | 248 | ||
251 | if (restore_sigcontext(®s, &frame->uc.uc_mcontext, &r0)) | 249 | if (restore_sigcontext(®s, &frame->uc.uc_mcontext, &r0)) |
252 | goto badframe; | 250 | goto badframe; |
253 | 251 | ||
254 | if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st))) | 252 | if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st))) |
255 | goto badframe; | 253 | goto badframe; |
256 | /* It is more difficult to avoid calling this function than to | 254 | /* It is more difficult to avoid calling this function than to |
257 | call it and ignore errors. */ | 255 | call it and ignore errors. */ |
258 | do_sigaltstack(&st, NULL, regs.regs[15]); | 256 | do_sigaltstack(&st, NULL, regs.regs[15]); |
259 | 257 | ||
260 | return r0; | 258 | return r0; |
261 | 259 | ||
262 | badframe: | 260 | badframe: |
263 | force_sig(SIGSEGV, current); | 261 | force_sig(SIGSEGV, current); |
264 | return 0; | 262 | return 0; |
265 | } | 263 | } |
266 | 264 | ||
267 | /* | 265 | /* |
268 | * Set up a signal frame. | 266 | * Set up a signal frame. |
269 | */ | 267 | */ |
270 | 268 | ||
271 | static int | 269 | static int |
272 | setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, | 270 | setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, |
273 | unsigned long mask) | 271 | unsigned long mask) |
274 | { | 272 | { |
275 | int err = 0; | 273 | int err = 0; |
276 | 274 | ||
277 | #define COPY(x) err |= __put_user(regs->x, &sc->sc_##x) | 275 | #define COPY(x) err |= __put_user(regs->x, &sc->sc_##x) |
278 | COPY(regs[0]); COPY(regs[1]); | 276 | COPY(regs[0]); COPY(regs[1]); |
279 | COPY(regs[2]); COPY(regs[3]); | 277 | COPY(regs[2]); COPY(regs[3]); |
280 | COPY(regs[4]); COPY(regs[5]); | 278 | COPY(regs[4]); COPY(regs[5]); |
281 | COPY(regs[6]); COPY(regs[7]); | 279 | COPY(regs[6]); COPY(regs[7]); |
282 | COPY(regs[8]); COPY(regs[9]); | 280 | COPY(regs[8]); COPY(regs[9]); |
283 | COPY(regs[10]); COPY(regs[11]); | 281 | COPY(regs[10]); COPY(regs[11]); |
284 | COPY(regs[12]); COPY(regs[13]); | 282 | COPY(regs[12]); COPY(regs[13]); |
285 | COPY(regs[14]); COPY(regs[15]); | 283 | COPY(regs[14]); COPY(regs[15]); |
286 | COPY(gbr); COPY(mach); | 284 | COPY(gbr); COPY(mach); |
287 | COPY(macl); COPY(pr); | 285 | COPY(macl); COPY(pr); |
288 | COPY(sr); COPY(pc); | 286 | COPY(sr); COPY(pc); |
289 | #undef COPY | 287 | #undef COPY |
290 | 288 | ||
291 | #ifdef CONFIG_SH_FPU | 289 | #ifdef CONFIG_SH_FPU |
292 | err |= save_sigcontext_fpu(sc, regs); | 290 | err |= save_sigcontext_fpu(sc, regs); |
293 | #endif | 291 | #endif |
294 | 292 | ||
295 | /* non-iBCS2 extensions.. */ | 293 | /* non-iBCS2 extensions.. */ |
296 | err |= __put_user(mask, &sc->oldmask); | 294 | err |= __put_user(mask, &sc->oldmask); |
297 | 295 | ||
298 | return err; | 296 | return err; |
299 | } | 297 | } |
300 | 298 | ||
301 | /* | 299 | /* |
302 | * Determine which stack to use.. | 300 | * Determine which stack to use.. |
303 | */ | 301 | */ |
304 | static inline void __user * | 302 | static inline void __user * |
305 | get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) | 303 | get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) |
306 | { | 304 | { |
307 | if (ka->sa.sa_flags & SA_ONSTACK) { | 305 | if (ka->sa.sa_flags & SA_ONSTACK) { |
308 | if (sas_ss_flags(sp) == 0) | 306 | if (sas_ss_flags(sp) == 0) |
309 | sp = current->sas_ss_sp + current->sas_ss_size; | 307 | sp = current->sas_ss_sp + current->sas_ss_size; |
310 | } | 308 | } |
311 | 309 | ||
312 | return (void __user *)((sp - frame_size) & -8ul); | 310 | return (void __user *)((sp - frame_size) & -8ul); |
313 | } | 311 | } |
314 | 312 | ||
313 | /* These symbols are defined with the addresses in the vsyscall page. | ||
314 | See vsyscall-trapa.S. */ | ||
315 | extern void __user __kernel_sigreturn; | ||
316 | extern void __user __kernel_rt_sigreturn; | ||
317 | |||
315 | static int setup_frame(int sig, struct k_sigaction *ka, | 318 | static int setup_frame(int sig, struct k_sigaction *ka, |
316 | sigset_t *set, struct pt_regs *regs) | 319 | sigset_t *set, struct pt_regs *regs) |
317 | { | 320 | { |
318 | struct sigframe __user *frame; | 321 | struct sigframe __user *frame; |
319 | int err = 0; | 322 | int err = 0; |
320 | int signal; | 323 | int signal; |
321 | 324 | ||
322 | frame = get_sigframe(ka, regs->regs[15], sizeof(*frame)); | 325 | frame = get_sigframe(ka, regs->regs[15], sizeof(*frame)); |
323 | 326 | ||
324 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 327 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
325 | goto give_sigsegv; | 328 | goto give_sigsegv; |
326 | 329 | ||
327 | signal = current_thread_info()->exec_domain | 330 | signal = current_thread_info()->exec_domain |
328 | && current_thread_info()->exec_domain->signal_invmap | 331 | && current_thread_info()->exec_domain->signal_invmap |
329 | && sig < 32 | 332 | && sig < 32 |
330 | ? current_thread_info()->exec_domain->signal_invmap[sig] | 333 | ? current_thread_info()->exec_domain->signal_invmap[sig] |
331 | : sig; | 334 | : sig; |
332 | 335 | ||
333 | err |= setup_sigcontext(&frame->sc, regs, set->sig[0]); | 336 | err |= setup_sigcontext(&frame->sc, regs, set->sig[0]); |
334 | 337 | ||
335 | if (_NSIG_WORDS > 1) | 338 | if (_NSIG_WORDS > 1) |
336 | err |= __copy_to_user(frame->extramask, &set->sig[1], | 339 | err |= __copy_to_user(frame->extramask, &set->sig[1], |
337 | sizeof(frame->extramask)); | 340 | sizeof(frame->extramask)); |
338 | 341 | ||
339 | /* Set up to return from userspace. If provided, use a stub | 342 | /* Set up to return from userspace. If provided, use a stub |
340 | already in userspace. */ | 343 | already in userspace. */ |
341 | if (ka->sa.sa_flags & SA_RESTORER) { | 344 | if (ka->sa.sa_flags & SA_RESTORER) { |
342 | regs->pr = (unsigned long) ka->sa.sa_restorer; | 345 | regs->pr = (unsigned long) ka->sa.sa_restorer; |
346 | #ifdef CONFIG_VSYSCALL | ||
347 | } else if (likely(current->mm->context.vdso)) { | ||
348 | regs->pr = VDSO_SYM(&__kernel_sigreturn); | ||
349 | #endif | ||
343 | } else { | 350 | } else { |
344 | /* Generate return code (system call to sigreturn) */ | 351 | /* Generate return code (system call to sigreturn) */ |
345 | err |= __put_user(MOVW(7), &frame->retcode[0]); | 352 | err |= __put_user(MOVW(7), &frame->retcode[0]); |
346 | err |= __put_user(TRAP16, &frame->retcode[1]); | 353 | err |= __put_user(TRAP16, &frame->retcode[1]); |
347 | err |= __put_user(OR_R0_R0, &frame->retcode[2]); | 354 | err |= __put_user(OR_R0_R0, &frame->retcode[2]); |
348 | err |= __put_user(OR_R0_R0, &frame->retcode[3]); | 355 | err |= __put_user(OR_R0_R0, &frame->retcode[3]); |
349 | err |= __put_user(OR_R0_R0, &frame->retcode[4]); | 356 | err |= __put_user(OR_R0_R0, &frame->retcode[4]); |
350 | err |= __put_user(OR_R0_R0, &frame->retcode[5]); | 357 | err |= __put_user(OR_R0_R0, &frame->retcode[5]); |
351 | err |= __put_user(OR_R0_R0, &frame->retcode[6]); | 358 | err |= __put_user(OR_R0_R0, &frame->retcode[6]); |
352 | err |= __put_user((__NR_sigreturn), &frame->retcode[7]); | 359 | err |= __put_user((__NR_sigreturn), &frame->retcode[7]); |
353 | regs->pr = (unsigned long) frame->retcode; | 360 | regs->pr = (unsigned long) frame->retcode; |
354 | } | 361 | } |
355 | 362 | ||
356 | if (err) | 363 | if (err) |
357 | goto give_sigsegv; | 364 | goto give_sigsegv; |
358 | 365 | ||
359 | /* Set up registers for signal handler */ | 366 | /* Set up registers for signal handler */ |
360 | regs->regs[15] = (unsigned long) frame; | 367 | regs->regs[15] = (unsigned long) frame; |
361 | regs->regs[4] = signal; /* Arg for signal handler */ | 368 | regs->regs[4] = signal; /* Arg for signal handler */ |
362 | regs->regs[5] = 0; | 369 | regs->regs[5] = 0; |
363 | regs->regs[6] = (unsigned long) &frame->sc; | 370 | regs->regs[6] = (unsigned long) &frame->sc; |
364 | regs->pc = (unsigned long) ka->sa.sa_handler; | 371 | regs->pc = (unsigned long) ka->sa.sa_handler; |
365 | 372 | ||
366 | set_fs(USER_DS); | 373 | set_fs(USER_DS); |
367 | 374 | ||
368 | pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n", | 375 | pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n", |
369 | current->comm, current->pid, frame, regs->pc, regs->pr); | 376 | current->comm, current->pid, frame, regs->pc, regs->pr); |
370 | 377 | ||
371 | flush_cache_sigtramp(regs->pr); | 378 | flush_cache_sigtramp(regs->pr); |
372 | 379 | ||
373 | if ((-regs->pr & (L1_CACHE_BYTES-1)) < sizeof(frame->retcode)) | 380 | if ((-regs->pr & (L1_CACHE_BYTES-1)) < sizeof(frame->retcode)) |
374 | flush_cache_sigtramp(regs->pr + L1_CACHE_BYTES); | 381 | flush_cache_sigtramp(regs->pr + L1_CACHE_BYTES); |
375 | 382 | ||
376 | return 0; | 383 | return 0; |
377 | 384 | ||
378 | give_sigsegv: | 385 | give_sigsegv: |
379 | force_sigsegv(sig, current); | 386 | force_sigsegv(sig, current); |
380 | return -EFAULT; | 387 | return -EFAULT; |
381 | } | 388 | } |
382 | 389 | ||
383 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | 390 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, |
384 | sigset_t *set, struct pt_regs *regs) | 391 | sigset_t *set, struct pt_regs *regs) |
385 | { | 392 | { |
386 | struct rt_sigframe __user *frame; | 393 | struct rt_sigframe __user *frame; |
387 | int err = 0; | 394 | int err = 0; |
388 | int signal; | 395 | int signal; |
389 | 396 | ||
390 | frame = get_sigframe(ka, regs->regs[15], sizeof(*frame)); | 397 | frame = get_sigframe(ka, regs->regs[15], sizeof(*frame)); |
391 | 398 | ||
392 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | 399 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) |
393 | goto give_sigsegv; | 400 | goto give_sigsegv; |
394 | 401 | ||
395 | signal = current_thread_info()->exec_domain | 402 | signal = current_thread_info()->exec_domain |
396 | && current_thread_info()->exec_domain->signal_invmap | 403 | && current_thread_info()->exec_domain->signal_invmap |
397 | && sig < 32 | 404 | && sig < 32 |
398 | ? current_thread_info()->exec_domain->signal_invmap[sig] | 405 | ? current_thread_info()->exec_domain->signal_invmap[sig] |
399 | : sig; | 406 | : sig; |
400 | 407 | ||
401 | err |= copy_siginfo_to_user(&frame->info, info); | 408 | err |= copy_siginfo_to_user(&frame->info, info); |
402 | 409 | ||
403 | /* Create the ucontext. */ | 410 | /* Create the ucontext. */ |
404 | err |= __put_user(0, &frame->uc.uc_flags); | 411 | err |= __put_user(0, &frame->uc.uc_flags); |
405 | err |= __put_user(0, &frame->uc.uc_link); | 412 | err |= __put_user(0, &frame->uc.uc_link); |
406 | err |= __put_user((void *)current->sas_ss_sp, | 413 | err |= __put_user((void *)current->sas_ss_sp, |
407 | &frame->uc.uc_stack.ss_sp); | 414 | &frame->uc.uc_stack.ss_sp); |
408 | err |= __put_user(sas_ss_flags(regs->regs[15]), | 415 | err |= __put_user(sas_ss_flags(regs->regs[15]), |
409 | &frame->uc.uc_stack.ss_flags); | 416 | &frame->uc.uc_stack.ss_flags); |
410 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | 417 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); |
411 | err |= setup_sigcontext(&frame->uc.uc_mcontext, | 418 | err |= setup_sigcontext(&frame->uc.uc_mcontext, |
412 | regs, set->sig[0]); | 419 | regs, set->sig[0]); |
413 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 420 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
414 | 421 | ||
415 | /* Set up to return from userspace. If provided, use a stub | 422 | /* Set up to return from userspace. If provided, use a stub |
416 | already in userspace. */ | 423 | already in userspace. */ |
417 | if (ka->sa.sa_flags & SA_RESTORER) { | 424 | if (ka->sa.sa_flags & SA_RESTORER) { |
418 | regs->pr = (unsigned long) ka->sa.sa_restorer; | 425 | regs->pr = (unsigned long) ka->sa.sa_restorer; |
426 | #ifdef CONFIG_VSYSCALL | ||
427 | } else if (likely(current->mm->context.vdso)) { | ||
428 | regs->pr = VDSO_SYM(&__kernel_rt_sigreturn); | ||
429 | #endif | ||
419 | } else { | 430 | } else { |
420 | /* Generate return code (system call to rt_sigreturn) */ | 431 | /* Generate return code (system call to rt_sigreturn) */ |
421 | err |= __put_user(MOVW(7), &frame->retcode[0]); | 432 | err |= __put_user(MOVW(7), &frame->retcode[0]); |
422 | err |= __put_user(TRAP16, &frame->retcode[1]); | 433 | err |= __put_user(TRAP16, &frame->retcode[1]); |
423 | err |= __put_user(OR_R0_R0, &frame->retcode[2]); | 434 | err |= __put_user(OR_R0_R0, &frame->retcode[2]); |
424 | err |= __put_user(OR_R0_R0, &frame->retcode[3]); | 435 | err |= __put_user(OR_R0_R0, &frame->retcode[3]); |
425 | err |= __put_user(OR_R0_R0, &frame->retcode[4]); | 436 | err |= __put_user(OR_R0_R0, &frame->retcode[4]); |
426 | err |= __put_user(OR_R0_R0, &frame->retcode[5]); | 437 | err |= __put_user(OR_R0_R0, &frame->retcode[5]); |
427 | err |= __put_user(OR_R0_R0, &frame->retcode[6]); | 438 | err |= __put_user(OR_R0_R0, &frame->retcode[6]); |
428 | err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]); | 439 | err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]); |
429 | regs->pr = (unsigned long) frame->retcode; | 440 | regs->pr = (unsigned long) frame->retcode; |
430 | } | 441 | } |
431 | 442 | ||
432 | if (err) | 443 | if (err) |
433 | goto give_sigsegv; | 444 | goto give_sigsegv; |
434 | 445 | ||
435 | /* Set up registers for signal handler */ | 446 | /* Set up registers for signal handler */ |
436 | regs->regs[15] = (unsigned long) frame; | 447 | regs->regs[15] = (unsigned long) frame; |
437 | regs->regs[4] = signal; /* Arg for signal handler */ | 448 | regs->regs[4] = signal; /* Arg for signal handler */ |
438 | regs->regs[5] = (unsigned long) &frame->info; | 449 | regs->regs[5] = (unsigned long) &frame->info; |
439 | regs->regs[6] = (unsigned long) &frame->uc; | 450 | regs->regs[6] = (unsigned long) &frame->uc; |
440 | regs->pc = (unsigned long) ka->sa.sa_handler; | 451 | regs->pc = (unsigned long) ka->sa.sa_handler; |
441 | 452 | ||
442 | set_fs(USER_DS); | 453 | set_fs(USER_DS); |
443 | 454 | ||
444 | pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n", | 455 | pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n", |
445 | current->comm, current->pid, frame, regs->pc, regs->pr); | 456 | current->comm, current->pid, frame, regs->pc, regs->pr); |
446 | 457 | ||
447 | flush_cache_sigtramp(regs->pr); | 458 | flush_cache_sigtramp(regs->pr); |
448 | 459 | ||
449 | if ((-regs->pr & (L1_CACHE_BYTES-1)) < sizeof(frame->retcode)) | 460 | if ((-regs->pr & (L1_CACHE_BYTES-1)) < sizeof(frame->retcode)) |
450 | flush_cache_sigtramp(regs->pr + L1_CACHE_BYTES); | 461 | flush_cache_sigtramp(regs->pr + L1_CACHE_BYTES); |
451 | 462 | ||
452 | return 0; | 463 | return 0; |
453 | 464 | ||
454 | give_sigsegv: | 465 | give_sigsegv: |
455 | force_sigsegv(sig, current); | 466 | force_sigsegv(sig, current); |
456 | return -EFAULT; | 467 | return -EFAULT; |
457 | } | 468 | } |
458 | 469 | ||
459 | /* | 470 | /* |
460 | * OK, we're invoking a handler | 471 | * OK, we're invoking a handler |
461 | */ | 472 | */ |
462 | 473 | ||
463 | static int | 474 | static int |
464 | handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, | 475 | handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, |
465 | sigset_t *oldset, struct pt_regs *regs) | 476 | sigset_t *oldset, struct pt_regs *regs) |
466 | { | 477 | { |
467 | int ret; | 478 | int ret; |
468 | 479 | ||
469 | /* Are we from a system call? */ | 480 | /* Are we from a system call? */ |
470 | if (regs->tra >= 0) { | 481 | if (regs->tra >= 0) { |
471 | /* If so, check system call restarting.. */ | 482 | /* If so, check system call restarting.. */ |
472 | switch (regs->regs[0]) { | 483 | switch (regs->regs[0]) { |
473 | case -ERESTARTNOHAND: | 484 | case -ERESTARTNOHAND: |
474 | regs->regs[0] = -EINTR; | 485 | regs->regs[0] = -EINTR; |
475 | break; | 486 | break; |
476 | 487 | ||
477 | case -ERESTARTSYS: | 488 | case -ERESTARTSYS: |
478 | if (!(ka->sa.sa_flags & SA_RESTART)) { | 489 | if (!(ka->sa.sa_flags & SA_RESTART)) { |
479 | regs->regs[0] = -EINTR; | 490 | regs->regs[0] = -EINTR; |
480 | break; | 491 | break; |
481 | } | 492 | } |
482 | /* fallthrough */ | 493 | /* fallthrough */ |
483 | case -ERESTARTNOINTR: | 494 | case -ERESTARTNOINTR: |
484 | regs->pc -= 2; | 495 | regs->pc -= 2; |
485 | } | 496 | } |
486 | } else { | 497 | } else { |
487 | /* gUSA handling */ | 498 | /* gUSA handling */ |
488 | #ifdef CONFIG_PREEMPT | 499 | #ifdef CONFIG_PREEMPT |
489 | unsigned long flags; | 500 | unsigned long flags; |
490 | 501 | ||
491 | local_irq_save(flags); | 502 | local_irq_save(flags); |
492 | #endif | 503 | #endif |
493 | if (regs->regs[15] >= 0xc0000000) { | 504 | if (regs->regs[15] >= 0xc0000000) { |
494 | int offset = (int)regs->regs[15]; | 505 | int offset = (int)regs->regs[15]; |
495 | 506 | ||
496 | /* Reset stack pointer: clear critical region mark */ | 507 | /* Reset stack pointer: clear critical region mark */ |
497 | regs->regs[15] = regs->regs[1]; | 508 | regs->regs[15] = regs->regs[1]; |
498 | if (regs->pc < regs->regs[0]) | 509 | if (regs->pc < regs->regs[0]) |
499 | /* Go to rewind point #1 */ | 510 | /* Go to rewind point #1 */ |
500 | regs->pc = regs->regs[0] + offset - 2; | 511 | regs->pc = regs->regs[0] + offset - 2; |
501 | } | 512 | } |
502 | #ifdef CONFIG_PREEMPT | 513 | #ifdef CONFIG_PREEMPT |
503 | local_irq_restore(flags); | 514 | local_irq_restore(flags); |
504 | #endif | 515 | #endif |
505 | } | 516 | } |
506 | 517 | ||
507 | /* Set up the stack frame */ | 518 | /* Set up the stack frame */ |
508 | if (ka->sa.sa_flags & SA_SIGINFO) | 519 | if (ka->sa.sa_flags & SA_SIGINFO) |
509 | ret = setup_rt_frame(sig, ka, info, oldset, regs); | 520 | ret = setup_rt_frame(sig, ka, info, oldset, regs); |
510 | else | 521 | else |
511 | ret = setup_frame(sig, ka, oldset, regs); | 522 | ret = setup_frame(sig, ka, oldset, regs); |
512 | 523 | ||
513 | if (ka->sa.sa_flags & SA_ONESHOT) | 524 | if (ka->sa.sa_flags & SA_ONESHOT) |
514 | ka->sa.sa_handler = SIG_DFL; | 525 | ka->sa.sa_handler = SIG_DFL; |
515 | 526 | ||
516 | if (ret == 0) { | 527 | if (ret == 0) { |
517 | spin_lock_irq(¤t->sighand->siglock); | 528 | spin_lock_irq(¤t->sighand->siglock); |
518 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | 529 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); |
519 | if (!(ka->sa.sa_flags & SA_NODEFER)) | 530 | if (!(ka->sa.sa_flags & SA_NODEFER)) |
520 | sigaddset(¤t->blocked,sig); | 531 | sigaddset(¤t->blocked,sig); |
521 | recalc_sigpending(); | 532 | recalc_sigpending(); |
522 | spin_unlock_irq(¤t->sighand->siglock); | 533 | spin_unlock_irq(¤t->sighand->siglock); |
523 | } | 534 | } |
524 | 535 | ||
525 | return ret; | 536 | return ret; |
526 | } | 537 | } |
527 | 538 | ||
528 | /* | 539 | /* |
529 | * Note that 'init' is a special process: it doesn't get signals it doesn't | 540 | * Note that 'init' is a special process: it doesn't get signals it doesn't |
530 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | 541 | * want to handle. Thus you cannot kill init even with a SIGKILL even by |
531 | * mistake. | 542 | * mistake. |
532 | * | 543 | * |
533 | * Note that we go through the signals twice: once to check the signals that | 544 | * Note that we go through the signals twice: once to check the signals that |
534 | * the kernel can handle, and then we build all the user-level signal handling | 545 | * the kernel can handle, and then we build all the user-level signal handling |
535 | * stack-frames in one go after that. | 546 | * stack-frames in one go after that. |
536 | */ | 547 | */ |
537 | static void do_signal(struct pt_regs *regs, unsigned int save_r0) | 548 | static void do_signal(struct pt_regs *regs, unsigned int save_r0) |
538 | { | 549 | { |
539 | siginfo_t info; | 550 | siginfo_t info; |
540 | int signr; | 551 | int signr; |
541 | struct k_sigaction ka; | 552 | struct k_sigaction ka; |
542 | sigset_t *oldset; | 553 | sigset_t *oldset; |
543 | 554 | ||
544 | /* | 555 | /* |
545 | * We want the common case to go fast, which | 556 | * We want the common case to go fast, which |
546 | * is why we may in certain cases get here from | 557 | * is why we may in certain cases get here from |
547 | * kernel mode. Just return without doing anything | 558 | * kernel mode. Just return without doing anything |
548 | * if so. | 559 | * if so. |
549 | */ | 560 | */ |
550 | if (!user_mode(regs)) | 561 | if (!user_mode(regs)) |
551 | return; | 562 | return; |
552 | 563 | ||
553 | if (try_to_freeze()) | 564 | if (try_to_freeze()) |
554 | goto no_signal; | 565 | goto no_signal; |
555 | 566 | ||
556 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | 567 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) |
557 | oldset = ¤t->saved_sigmask; | 568 | oldset = ¤t->saved_sigmask; |
558 | else | 569 | else |
559 | oldset = ¤t->blocked; | 570 | oldset = ¤t->blocked; |
560 | 571 | ||
561 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | 572 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); |
562 | if (signr > 0) { | 573 | if (signr > 0) { |
563 | /* Whee! Actually deliver the signal. */ | 574 | /* Whee! Actually deliver the signal. */ |
564 | if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { | 575 | if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { |
565 | /* a signal was successfully delivered; the saved | 576 | /* a signal was successfully delivered; the saved |
566 | * sigmask will have been stored in the signal frame, | 577 | * sigmask will have been stored in the signal frame, |
567 | * and will be restored by sigreturn, so we can simply | 578 | * and will be restored by sigreturn, so we can simply |
568 | * clear the TIF_RESTORE_SIGMASK flag */ | 579 | * clear the TIF_RESTORE_SIGMASK flag */ |
569 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | 580 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) |
570 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 581 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
571 | } | 582 | } |
572 | } | 583 | } |
573 | 584 | ||
574 | no_signal: | 585 | no_signal: |
575 | /* Did we come from a system call? */ | 586 | /* Did we come from a system call? */ |
576 | if (regs->tra >= 0) { | 587 | if (regs->tra >= 0) { |
577 | /* Restart the system call - no handlers present */ | 588 | /* Restart the system call - no handlers present */ |
578 | if (regs->regs[0] == -ERESTARTNOHAND || | 589 | if (regs->regs[0] == -ERESTARTNOHAND || |
579 | regs->regs[0] == -ERESTARTSYS || | 590 | regs->regs[0] == -ERESTARTSYS || |
580 | regs->regs[0] == -ERESTARTNOINTR) { | 591 | regs->regs[0] == -ERESTARTNOINTR) { |
581 | regs->regs[0] = save_r0; | 592 | regs->regs[0] = save_r0; |
582 | regs->pc -= 2; | 593 | regs->pc -= 2; |
583 | } else if (regs->regs[0] == -ERESTART_RESTARTBLOCK) { | 594 | } else if (regs->regs[0] == -ERESTART_RESTARTBLOCK) { |
584 | regs->pc -= 2; | 595 | regs->pc -= 2; |
585 | regs->regs[3] = __NR_restart_syscall; | 596 | regs->regs[3] = __NR_restart_syscall; |
586 | } | 597 | } |
587 | } | 598 | } |
588 | 599 | ||
589 | /* if there's no signal to deliver, we just put the saved sigmask | 600 | /* if there's no signal to deliver, we just put the saved sigmask |
590 | * back */ | 601 | * back */ |
591 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) { | 602 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) { |
592 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 603 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
593 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | 604 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); |
594 | } | 605 | } |
595 | } | 606 | } |
596 | 607 | ||
597 | asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0, | 608 | asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0, |
598 | __u32 thread_info_flags) | 609 | __u32 thread_info_flags) |
599 | { | 610 | { |
600 | /* deal with pending signal delivery */ | 611 | /* deal with pending signal delivery */ |
601 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) | 612 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) |
arch/sh/kernel/vsyscall/Makefile
File was created | 1 | obj-y += vsyscall.o vsyscall-syscall.o | |
2 | |||
3 | $(obj)/vsyscall-syscall.o: \ | ||
4 | $(foreach F,trapa,$(obj)/vsyscall-$F.so) | ||
5 | |||
6 | # Teach kbuild about targets | ||
7 | targets += $(foreach F,trapa,vsyscall-$F.o vsyscall-$F.so) | ||
8 | targets += vsyscall-note.o vsyscall.lds | ||
9 | |||
10 | # The DSO images are built using a special linker script | ||
11 | quiet_cmd_syscall = SYSCALL $@ | ||
12 | cmd_syscall = $(CC) -nostdlib $(SYSCFLAGS_$(@F)) \ | ||
13 | -Wl,-T,$(filter-out FORCE,$^) -o $@ | ||
14 | |||
15 | export CPPFLAGS_vsyscall.lds += -P -C -Ush | ||
16 | |||
17 | vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 \ | ||
18 | $(call ld-option, -Wl$(comma)--hash-style=sysv) | ||
19 | |||
20 | SYSCFLAGS_vsyscall-trapa.so = $(vsyscall-flags) | ||
21 | |||
22 | $(obj)/vsyscall-trapa.so: \ | ||
23 | $(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE | ||
24 | $(call if_changed,syscall) | ||
25 | |||
26 | # We also create a special relocatable object that should mirror the symbol | ||
27 | # table and layout of the linked DSO. With ld -R we can then refer to | ||
28 | # these symbols in the kernel code rather than hand-coded addresses. | ||
29 | extra-y += vsyscall-syms.o | ||
30 | $(obj)/built-in.o: $(obj)/vsyscall-syms.o | ||
31 | $(obj)/built-in.o: ld_flags += -R $(obj)/vsyscall-syms.o | ||
32 | |||
33 | SYSCFLAGS_vsyscall-syms.o = -r | ||
34 | $(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \ | ||
35 | $(obj)/vsyscall-trapa.o $(obj)/vsyscall-note.o FORCE | ||
36 | $(call if_changed,syscall) | ||
37 |
arch/sh/kernel/vsyscall/vsyscall-note.S
File was created | 1 | /* | |
2 | * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. | ||
3 | * Here we can supply some information useful to userland. | ||
4 | */ | ||
5 | |||
6 | #include <linux/uts.h> | ||
7 | #include <linux/version.h> | ||
8 | |||
9 | #define ASM_ELF_NOTE_BEGIN(name, flags, vendor, type) \ | ||
10 | .section name, flags; \ | ||
11 | .balign 4; \ | ||
12 | .long 1f - 0f; /* name length */ \ | ||
13 | .long 3f - 2f; /* data length */ \ | ||
14 | .long type; /* note type */ \ | ||
15 | 0: .asciz vendor; /* vendor name */ \ | ||
16 | 1: .balign 4; \ | ||
17 | 2: | ||
18 | |||
19 | #define ASM_ELF_NOTE_END \ | ||
20 | 3: .balign 4; /* pad out section */ \ | ||
21 | .previous | ||
22 | |||
23 | ASM_ELF_NOTE_BEGIN(".note.kernel-version", "a", UTS_SYSNAME, 0) | ||
24 | .long LINUX_VERSION_CODE | ||
25 | ASM_ELF_NOTE_END | ||
26 |
arch/sh/kernel/vsyscall/vsyscall-sigreturn.S
File was created | 1 | #include <asm/unistd.h> | |
2 | |||
3 | .text | ||
4 | .balign 32 | ||
5 | .globl __kernel_sigreturn | ||
6 | .type __kernel_sigreturn,@function | ||
7 | __kernel_sigreturn: | ||
8 | .LSTART_sigreturn: | ||
9 | mov.w 1f, r3 | ||
10 | trapa #0x10 | ||
11 | or r0, r0 | ||
12 | or r0, r0 | ||
13 | or r0, r0 | ||
14 | or r0, r0 | ||
15 | or r0, r0 | ||
16 | |||
17 | 1: .short __NR_sigreturn | ||
18 | .LEND_sigreturn: | ||
19 | .size __kernel_sigreturn,.-.LSTART_sigreturn | ||
20 | |||
21 | .balign 32 | ||
22 | .globl __kernel_rt_sigreturn | ||
23 | .type __kernel_rt_sigreturn,@function | ||
24 | __kernel_rt_sigreturn: | ||
25 | .LSTART_rt_sigreturn: | ||
26 | mov.w 1f, r3 | ||
27 | trapa #0x10 | ||
28 | or r0, r0 | ||
29 | or r0, r0 | ||
30 | or r0, r0 | ||
31 | or r0, r0 | ||
32 | or r0, r0 | ||
33 | |||
34 | 1: .short __NR_rt_sigreturn | ||
35 | .LEND_rt_sigreturn: | ||
36 | .size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn | ||
37 | |||
38 | .section .eh_frame,"a",@progbits | ||
39 | .previous | ||
40 |
arch/sh/kernel/vsyscall/vsyscall-syscall.S
File was created | 1 | #include <linux/init.h> | |
2 | |||
3 | __INITDATA | ||
4 | |||
5 | .globl vsyscall_trapa_start, vsyscall_trapa_end | ||
6 | vsyscall_trapa_start: | ||
7 | .incbin "arch/sh/kernel/vsyscall/vsyscall-trapa.so" | ||
8 | vsyscall_trapa_end: | ||
9 | |||
10 | __FINIT | ||
11 |
arch/sh/kernel/vsyscall/vsyscall-trapa.S
File was created | 1 | .text | |
2 | .globl __kernel_vsyscall | ||
3 | .type __kernel_vsyscall,@function | ||
4 | __kernel_vsyscall: | ||
5 | .LSTART_vsyscall: | ||
6 | /* XXX: We'll have to do something here once we opt to use the vDSO | ||
7 | * page for something other than the signal trampoline.. as well as | ||
8 | * fill out .eh_frame -- PFM. */ | ||
9 | .LEND_vsyscall: | ||
10 | .size __kernel_vsyscall,.-.LSTART_vsyscall | ||
11 | .previous | ||
12 | |||
13 | .section .eh_frame,"a",@progbits | ||
14 | .LCIE: | ||
15 | .ualong .LCIE_end - .LCIE_start | ||
16 | .LCIE_start: | ||
17 | .ualong 0 /* CIE ID */ | ||
18 | .byte 0x1 /* Version number */ | ||
19 | .string "zRS" /* NUL-terminated augmentation string */ | ||
20 | .uleb128 0x1 /* Code alignment factor */ | ||
21 | .sleb128 -4 /* Data alignment factor */ | ||
22 | .byte 0x11 /* Return address register column */ | ||
23 | /* Augmentation length and data (none) */ | ||
24 | .byte 0xc /* DW_CFA_def_cfa */ | ||
25 | .uleb128 0xf /* r15 */ | ||
26 | .uleb128 0x0 /* offset 0 */ | ||
27 | |||
28 | .align 2 | ||
29 | .LCIE_end: | ||
30 | |||
31 | .ualong .LFDE_end-.LFDE_start /* Length FDE */ | ||
32 | .LFDE_start: | ||
33 | .ualong .LCIE /* CIE pointer */ | ||
34 | .ualong .LSTART_vsyscall-. /* start address */ | ||
35 | .ualong .LEND_vsyscall-.LSTART_vsyscall | ||
36 | .uleb128 0 | ||
37 | .align 2 | ||
38 | .LFDE_end: | ||
39 | .previous | ||
40 | |||
41 | /* Get the common code for the sigreturn entry points */ | ||
42 | #include "vsyscall-sigreturn.S" | ||
43 |
arch/sh/kernel/vsyscall/vsyscall.c
File was created | 1 | /* | |
2 | * arch/sh/kernel/vsyscall.c | ||
3 | * | ||
4 | * Copyright (C) 2006 Paul Mundt | ||
5 | * | ||
6 | * vDSO randomization | ||
7 | * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar | ||
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/gfp.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/elf.h> | ||
20 | |||
21 | /* | ||
22 | * Should the kernel map a VDSO page into processes and pass its | ||
23 | * address down to glibc upon exec()? | ||
24 | */ | ||
25 | unsigned int __read_mostly vdso_enabled = 1; | ||
26 | EXPORT_SYMBOL_GPL(vdso_enabled); | ||
27 | |||
28 | static int __init vdso_setup(char *s) | ||
29 | { | ||
30 | vdso_enabled = simple_strtoul(s, NULL, 0); | ||
31 | return 1; | ||
32 | } | ||
33 | __setup("vdso=", vdso_setup); | ||
34 | |||
35 | /* | ||
36 | * These symbols are defined by vsyscall.o to mark the bounds | ||
37 | * of the ELF DSO images included therein. | ||
38 | */ | ||
39 | extern const char vsyscall_trapa_start, vsyscall_trapa_end; | ||
40 | static void *syscall_page; | ||
41 | |||
42 | int __init vsyscall_init(void) | ||
43 | { | ||
44 | syscall_page = (void *)get_zeroed_page(GFP_ATOMIC); | ||
45 | |||
46 | /* | ||
47 | * XXX: Map this page to a fixmap entry if we get around | ||
48 | * to adding the page to ELF core dumps | ||
49 | */ | ||
50 | |||
51 | memcpy(syscall_page, | ||
52 | &vsyscall_trapa_start, | ||
53 | &vsyscall_trapa_end - &vsyscall_trapa_start); | ||
54 | |||
55 | return 0; | ||
56 | } | ||
57 | |||
58 | static struct page *syscall_vma_nopage(struct vm_area_struct *vma, | ||
59 | unsigned long address, int *type) | ||
60 | { | ||
61 | unsigned long offset = address - vma->vm_start; | ||
62 | struct page *page; | ||
63 | |||
64 | if (address < vma->vm_start || address > vma->vm_end) | ||
65 | return NOPAGE_SIGBUS; | ||
66 | |||
67 | page = virt_to_page(syscall_page + offset); | ||
68 | |||
69 | get_page(page); | ||
70 | |||
71 | return page; | ||
72 | } | ||
73 | |||
74 | /* Prevent VMA merging */ | ||
75 | static void syscall_vma_close(struct vm_area_struct *vma) | ||
76 | { | ||
77 | } | ||
78 | |||
79 | static struct vm_operations_struct syscall_vm_ops = { | ||
80 | .nopage = syscall_vma_nopage, | ||
81 | .close = syscall_vma_close, | ||
82 | }; | ||
83 | |||
84 | /* Setup a VMA at program startup for the vsyscall page */ | ||
85 | int arch_setup_additional_pages(struct linux_binprm *bprm, | ||
86 | int executable_stack) | ||
87 | { | ||
88 | struct vm_area_struct *vma; | ||
89 | struct mm_struct *mm = current->mm; | ||
90 | unsigned long addr; | ||
91 | int ret; | ||
92 | |||
93 | down_write(&mm->mmap_sem); | ||
94 | addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); | ||
95 | if (IS_ERR_VALUE(addr)) { | ||
96 | ret = addr; | ||
97 | goto up_fail; | ||
98 | } | ||
99 | |||
100 | vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL); | ||
101 | if (!vma) { | ||
102 | ret = -ENOMEM; | ||
103 | goto up_fail; | ||
104 | } | ||
105 | |||
106 | vma->vm_start = addr; | ||
107 | vma->vm_end = addr + PAGE_SIZE; | ||
108 | /* MAYWRITE to allow gdb to COW and set breakpoints */ | ||
109 | vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE; | ||
110 | vma->vm_flags |= mm->def_flags; | ||
111 | vma->vm_page_prot = protection_map[vma->vm_flags & 7]; | ||
112 | vma->vm_ops = &syscall_vm_ops; | ||
113 | vma->vm_mm = mm; | ||
114 | |||
115 | ret = insert_vm_struct(mm, vma); | ||
116 | if (unlikely(ret)) { | ||
117 | kmem_cache_free(vm_area_cachep, vma); | ||
118 | goto up_fail; | ||
119 | } | ||
120 | |||
121 | current->mm->context.vdso = (void *)addr; | ||
122 | |||
123 | mm->total_vm++; | ||
124 | up_fail: | ||
125 | up_write(&mm->mmap_sem); | ||
126 | return ret; | ||
127 | } | ||
128 | |||
129 | const char *arch_vma_name(struct vm_area_struct *vma) | ||
130 | { | ||
131 | if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) | ||
132 | return "[vdso]"; | ||
133 | |||
134 | return NULL; | ||
135 | } | ||
136 | |||
137 | struct vm_area_struct *get_gate_vma(struct task_struct *task) | ||
138 | { | ||
139 | return NULL; | ||
140 | } | ||
141 | |||
142 | int in_gate_area(struct task_struct *task, unsigned long address) | ||
143 | { | ||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | int in_gate_area_no_task(unsigned long address) | ||
148 | { | ||
149 | return 0; | ||
150 | } | ||
151 |
arch/sh/kernel/vsyscall/vsyscall.lds.S
File was created | 1 | /* | |
2 | * Linker script for vsyscall DSO. The vsyscall page is an ELF shared | ||
3 | * object prelinked to its virtual address, and with only one read-only | ||
4 | * segment (that fits in one page). This script controls its layout. | ||
5 | */ | ||
6 | #include <asm/asm-offsets.h> | ||
7 | |||
8 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
9 | OUTPUT_FORMAT("elf32-sh-linux", "elf32-sh-linux", "elf32-sh-linux") | ||
10 | #else | ||
11 | OUTPUT_FORMAT("elf32-shbig-linux", "elf32-shbig-linux", "elf32-shbig-linux") | ||
12 | #endif | ||
13 | OUTPUT_ARCH(sh) | ||
14 | |||
15 | /* The ELF entry point can be used to set the AT_SYSINFO value. */ | ||
16 | ENTRY(__kernel_vsyscall); | ||
17 | |||
18 | SECTIONS | ||
19 | { | ||
20 | . = SIZEOF_HEADERS; | ||
21 | |||
22 | .hash : { *(.hash) } :text | ||
23 | .gnu.hash : { *(.gnu.hash) } | ||
24 | .dynsym : { *(.dynsym) } | ||
25 | .dynstr : { *(.dynstr) } | ||
26 | .gnu.version : { *(.gnu.version) } | ||
27 | .gnu.version_d : { *(.gnu.version_d) } | ||
28 | .gnu.version_r : { *(.gnu.version_r) } | ||
29 | |||
30 | /* This linker script is used both with -r and with -shared. | ||
31 | For the layouts to match, we need to skip more than enough | ||
32 | space for the dynamic symbol table et al. If this amount | ||
33 | is insufficient, ld -shared will barf. Just increase it here. */ | ||
34 | . = 0x400; | ||
35 | |||
36 | .text : { *(.text) } :text =0x90909090 | ||
37 | .note : { *(.note.*) } :text :note | ||
38 | .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr | ||
39 | .eh_frame : { KEEP (*(.eh_frame)) } :text | ||
40 | .dynamic : { *(.dynamic) } :text :dynamic | ||
41 | .useless : { | ||
42 | *(.got.plt) *(.got) | ||
43 | *(.data .data.* .gnu.linkonce.d.*) | ||
44 | *(.dynbss) | ||
45 | *(.bss .bss.* .gnu.linkonce.b.*) | ||
46 | } :text | ||
47 | } | ||
48 | |||
49 | /* | ||
50 | * We must supply the ELF program headers explicitly to get just one | ||
51 | * PT_LOAD segment, and set the flags explicitly to make segments read-only. | ||
52 | */ | ||
53 | PHDRS | ||
54 | { | ||
55 | text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ | ||
56 | dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ | ||
57 | note PT_NOTE FLAGS(4); /* PF_R */ | ||
58 | eh_frame_hdr 0x6474e550; /* PT_GNU_EH_FRAME, but ld doesn't match the name */ | ||
59 | } | ||
60 | |||
61 | /* | ||
62 | * This controls what symbols we export from the DSO. | ||
63 | */ | ||
64 | VERSION | ||
65 | { | ||
66 | LINUX_2.6 { | ||
67 | global: | ||
68 | __kernel_vsyscall; | ||
69 | __kernel_sigreturn; | ||
70 | __kernel_rt_sigreturn; | ||
71 | |||
72 | local: *; | ||
73 | }; | ||
74 | } | ||
75 |
arch/sh/mm/Kconfig
1 | menu "Processor selection" | 1 | menu "Processor selection" |
2 | 2 | ||
3 | # | 3 | # |
4 | # Processor families | 4 | # Processor families |
5 | # | 5 | # |
6 | config CPU_SH2 | 6 | config CPU_SH2 |
7 | bool | 7 | bool |
8 | select SH_WRITETHROUGH | 8 | select SH_WRITETHROUGH |
9 | 9 | ||
10 | config CPU_SH3 | 10 | config CPU_SH3 |
11 | bool | 11 | bool |
12 | select CPU_HAS_INTEVT | 12 | select CPU_HAS_INTEVT |
13 | select CPU_HAS_SR_RB | 13 | select CPU_HAS_SR_RB |
14 | 14 | ||
15 | config CPU_SH4 | 15 | config CPU_SH4 |
16 | bool | 16 | bool |
17 | select CPU_HAS_INTEVT | 17 | select CPU_HAS_INTEVT |
18 | select CPU_HAS_SR_RB | 18 | select CPU_HAS_SR_RB |
19 | 19 | ||
20 | config CPU_SH4A | 20 | config CPU_SH4A |
21 | bool | 21 | bool |
22 | select CPU_SH4 | 22 | select CPU_SH4 |
23 | 23 | ||
24 | config CPU_SH4AL_DSP | 24 | config CPU_SH4AL_DSP |
25 | bool | 25 | bool |
26 | select CPU_SH4A | 26 | select CPU_SH4A |
27 | 27 | ||
28 | config CPU_SUBTYPE_ST40 | 28 | config CPU_SUBTYPE_ST40 |
29 | bool | 29 | bool |
30 | select CPU_SH4 | 30 | select CPU_SH4 |
31 | select CPU_HAS_INTC2_IRQ | 31 | select CPU_HAS_INTC2_IRQ |
32 | 32 | ||
33 | # | 33 | # |
34 | # Processor subtypes | 34 | # Processor subtypes |
35 | # | 35 | # |
36 | 36 | ||
37 | comment "SH-2 Processor Support" | 37 | comment "SH-2 Processor Support" |
38 | 38 | ||
39 | config CPU_SUBTYPE_SH7604 | 39 | config CPU_SUBTYPE_SH7604 |
40 | bool "Support SH7604 processor" | 40 | bool "Support SH7604 processor" |
41 | select CPU_SH2 | 41 | select CPU_SH2 |
42 | 42 | ||
43 | comment "SH-3 Processor Support" | 43 | comment "SH-3 Processor Support" |
44 | 44 | ||
45 | config CPU_SUBTYPE_SH7300 | 45 | config CPU_SUBTYPE_SH7300 |
46 | bool "Support SH7300 processor" | 46 | bool "Support SH7300 processor" |
47 | select CPU_SH3 | 47 | select CPU_SH3 |
48 | 48 | ||
49 | config CPU_SUBTYPE_SH7705 | 49 | config CPU_SUBTYPE_SH7705 |
50 | bool "Support SH7705 processor" | 50 | bool "Support SH7705 processor" |
51 | select CPU_SH3 | 51 | select CPU_SH3 |
52 | select CPU_HAS_PINT_IRQ | 52 | select CPU_HAS_PINT_IRQ |
53 | 53 | ||
54 | config CPU_SUBTYPE_SH7706 | 54 | config CPU_SUBTYPE_SH7706 |
55 | bool "Support SH7706 processor" | 55 | bool "Support SH7706 processor" |
56 | select CPU_SH3 | 56 | select CPU_SH3 |
57 | help | 57 | help |
58 | Select SH7706 if you have a 133 Mhz SH-3 HD6417706 CPU. | 58 | Select SH7706 if you have a 133 Mhz SH-3 HD6417706 CPU. |
59 | 59 | ||
60 | config CPU_SUBTYPE_SH7707 | 60 | config CPU_SUBTYPE_SH7707 |
61 | bool "Support SH7707 processor" | 61 | bool "Support SH7707 processor" |
62 | select CPU_SH3 | 62 | select CPU_SH3 |
63 | select CPU_HAS_PINT_IRQ | 63 | select CPU_HAS_PINT_IRQ |
64 | help | 64 | help |
65 | Select SH7707 if you have a 60 Mhz SH-3 HD6417707 CPU. | 65 | Select SH7707 if you have a 60 Mhz SH-3 HD6417707 CPU. |
66 | 66 | ||
67 | config CPU_SUBTYPE_SH7708 | 67 | config CPU_SUBTYPE_SH7708 |
68 | bool "Support SH7708 processor" | 68 | bool "Support SH7708 processor" |
69 | select CPU_SH3 | 69 | select CPU_SH3 |
70 | help | 70 | help |
71 | Select SH7708 if you have a 60 Mhz SH-3 HD6417708S or | 71 | Select SH7708 if you have a 60 Mhz SH-3 HD6417708S or |
72 | if you have a 100 Mhz SH-3 HD6417708R CPU. | 72 | if you have a 100 Mhz SH-3 HD6417708R CPU. |
73 | 73 | ||
74 | config CPU_SUBTYPE_SH7709 | 74 | config CPU_SUBTYPE_SH7709 |
75 | bool "Support SH7709 processor" | 75 | bool "Support SH7709 processor" |
76 | select CPU_SH3 | 76 | select CPU_SH3 |
77 | select CPU_HAS_PINT_IRQ | 77 | select CPU_HAS_PINT_IRQ |
78 | help | 78 | help |
79 | Select SH7709 if you have a 80 Mhz SH-3 HD6417709 CPU. | 79 | Select SH7709 if you have a 80 Mhz SH-3 HD6417709 CPU. |
80 | 80 | ||
81 | config CPU_SUBTYPE_SH7710 | 81 | config CPU_SUBTYPE_SH7710 |
82 | bool "Support SH7710 processor" | 82 | bool "Support SH7710 processor" |
83 | select CPU_SH3 | 83 | select CPU_SH3 |
84 | help | 84 | help |
85 | Select SH7710 if you have a SH3-DSP SH7710 CPU. | 85 | Select SH7710 if you have a SH3-DSP SH7710 CPU. |
86 | 86 | ||
87 | comment "SH-4 Processor Support" | 87 | comment "SH-4 Processor Support" |
88 | 88 | ||
89 | config CPU_SUBTYPE_SH7750 | 89 | config CPU_SUBTYPE_SH7750 |
90 | bool "Support SH7750 processor" | 90 | bool "Support SH7750 processor" |
91 | select CPU_SH4 | 91 | select CPU_SH4 |
92 | help | 92 | help |
93 | Select SH7750 if you have a 200 Mhz SH-4 HD6417750 CPU. | 93 | Select SH7750 if you have a 200 Mhz SH-4 HD6417750 CPU. |
94 | 94 | ||
95 | config CPU_SUBTYPE_SH7091 | 95 | config CPU_SUBTYPE_SH7091 |
96 | bool "Support SH7091 processor" | 96 | bool "Support SH7091 processor" |
97 | select CPU_SH4 | 97 | select CPU_SH4 |
98 | select CPU_SUBTYPE_SH7750 | 98 | select CPU_SUBTYPE_SH7750 |
99 | help | 99 | help |
100 | Select SH7091 if you have an SH-4 based Sega device (such as | 100 | Select SH7091 if you have an SH-4 based Sega device (such as |
101 | the Dreamcast, Naomi, and Naomi 2). | 101 | the Dreamcast, Naomi, and Naomi 2). |
102 | 102 | ||
103 | config CPU_SUBTYPE_SH7750R | 103 | config CPU_SUBTYPE_SH7750R |
104 | bool "Support SH7750R processor" | 104 | bool "Support SH7750R processor" |
105 | select CPU_SH4 | 105 | select CPU_SH4 |
106 | select CPU_SUBTYPE_SH7750 | 106 | select CPU_SUBTYPE_SH7750 |
107 | 107 | ||
108 | config CPU_SUBTYPE_SH7750S | 108 | config CPU_SUBTYPE_SH7750S |
109 | bool "Support SH7750S processor" | 109 | bool "Support SH7750S processor" |
110 | select CPU_SH4 | 110 | select CPU_SH4 |
111 | select CPU_SUBTYPE_SH7750 | 111 | select CPU_SUBTYPE_SH7750 |
112 | 112 | ||
113 | config CPU_SUBTYPE_SH7751 | 113 | config CPU_SUBTYPE_SH7751 |
114 | bool "Support SH7751 processor" | 114 | bool "Support SH7751 processor" |
115 | select CPU_SH4 | 115 | select CPU_SH4 |
116 | help | 116 | help |
117 | Select SH7751 if you have a 166 Mhz SH-4 HD6417751 CPU, | 117 | Select SH7751 if you have a 166 Mhz SH-4 HD6417751 CPU, |
118 | or if you have a HD6417751R CPU. | 118 | or if you have a HD6417751R CPU. |
119 | 119 | ||
120 | config CPU_SUBTYPE_SH7751R | 120 | config CPU_SUBTYPE_SH7751R |
121 | bool "Support SH7751R processor" | 121 | bool "Support SH7751R processor" |
122 | select CPU_SH4 | 122 | select CPU_SH4 |
123 | select CPU_SUBTYPE_SH7751 | 123 | select CPU_SUBTYPE_SH7751 |
124 | 124 | ||
125 | config CPU_SUBTYPE_SH7760 | 125 | config CPU_SUBTYPE_SH7760 |
126 | bool "Support SH7760 processor" | 126 | bool "Support SH7760 processor" |
127 | select CPU_SH4 | 127 | select CPU_SH4 |
128 | select CPU_HAS_INTC2_IRQ | 128 | select CPU_HAS_INTC2_IRQ |
129 | 129 | ||
130 | config CPU_SUBTYPE_SH4_202 | 130 | config CPU_SUBTYPE_SH4_202 |
131 | bool "Support SH4-202 processor" | 131 | bool "Support SH4-202 processor" |
132 | select CPU_SH4 | 132 | select CPU_SH4 |
133 | 133 | ||
134 | comment "ST40 Processor Support" | 134 | comment "ST40 Processor Support" |
135 | 135 | ||
136 | config CPU_SUBTYPE_ST40STB1 | 136 | config CPU_SUBTYPE_ST40STB1 |
137 | bool "Support ST40STB1/ST40RA processors" | 137 | bool "Support ST40STB1/ST40RA processors" |
138 | select CPU_SUBTYPE_ST40 | 138 | select CPU_SUBTYPE_ST40 |
139 | help | 139 | help |
140 | Select ST40STB1 if you have a ST40RA CPU. | 140 | Select ST40STB1 if you have a ST40RA CPU. |
141 | This was previously called the ST40STB1, hence the option name. | 141 | This was previously called the ST40STB1, hence the option name. |
142 | 142 | ||
143 | config CPU_SUBTYPE_ST40GX1 | 143 | config CPU_SUBTYPE_ST40GX1 |
144 | bool "Support ST40GX1 processor" | 144 | bool "Support ST40GX1 processor" |
145 | select CPU_SUBTYPE_ST40 | 145 | select CPU_SUBTYPE_ST40 |
146 | help | 146 | help |
147 | Select ST40GX1 if you have a ST40GX1 CPU. | 147 | Select ST40GX1 if you have a ST40GX1 CPU. |
148 | 148 | ||
149 | comment "SH-4A Processor Support" | 149 | comment "SH-4A Processor Support" |
150 | 150 | ||
151 | config CPU_SUBTYPE_SH7770 | 151 | config CPU_SUBTYPE_SH7770 |
152 | bool "Support SH7770 processor" | 152 | bool "Support SH7770 processor" |
153 | select CPU_SH4A | 153 | select CPU_SH4A |
154 | 154 | ||
155 | config CPU_SUBTYPE_SH7780 | 155 | config CPU_SUBTYPE_SH7780 |
156 | bool "Support SH7780 processor" | 156 | bool "Support SH7780 processor" |
157 | select CPU_SH4A | 157 | select CPU_SH4A |
158 | select CPU_HAS_INTC2_IRQ | 158 | select CPU_HAS_INTC2_IRQ |
159 | 159 | ||
160 | comment "SH4AL-DSP Processor Support" | 160 | comment "SH4AL-DSP Processor Support" |
161 | 161 | ||
162 | config CPU_SUBTYPE_SH73180 | 162 | config CPU_SUBTYPE_SH73180 |
163 | bool "Support SH73180 processor" | 163 | bool "Support SH73180 processor" |
164 | select CPU_SH4AL_DSP | 164 | select CPU_SH4AL_DSP |
165 | 165 | ||
166 | config CPU_SUBTYPE_SH7343 | 166 | config CPU_SUBTYPE_SH7343 |
167 | bool "Support SH7343 processor" | 167 | bool "Support SH7343 processor" |
168 | select CPU_SH4AL_DSP | 168 | select CPU_SH4AL_DSP |
169 | 169 | ||
170 | endmenu | 170 | endmenu |
171 | 171 | ||
172 | menu "Memory management options" | 172 | menu "Memory management options" |
173 | 173 | ||
174 | config MMU | 174 | config MMU |
175 | bool "Support for memory management hardware" | 175 | bool "Support for memory management hardware" |
176 | depends on !CPU_SH2 | 176 | depends on !CPU_SH2 |
177 | default y | 177 | default y |
178 | help | 178 | help |
179 | Some SH processors (such as SH-2/SH-2A) lack an MMU. In order to | 179 | Some SH processors (such as SH-2/SH-2A) lack an MMU. In order to |
180 | boot on these systems, this option must not be set. | 180 | boot on these systems, this option must not be set. |
181 | 181 | ||
182 | On other systems (such as the SH-3 and 4) where an MMU exists, | 182 | On other systems (such as the SH-3 and 4) where an MMU exists, |
183 | turning this off will boot the kernel on these machines with the | 183 | turning this off will boot the kernel on these machines with the |
184 | MMU implicitly switched off. | 184 | MMU implicitly switched off. |
185 | 185 | ||
186 | config PAGE_OFFSET | 186 | config PAGE_OFFSET |
187 | hex | 187 | hex |
188 | default "0x80000000" if MMU | 188 | default "0x80000000" if MMU |
189 | default "0x00000000" | 189 | default "0x00000000" |
190 | 190 | ||
191 | config MEMORY_START | 191 | config MEMORY_START |
192 | hex "Physical memory start address" | 192 | hex "Physical memory start address" |
193 | default "0x08000000" | 193 | default "0x08000000" |
194 | ---help--- | 194 | ---help--- |
195 | Computers built with Hitachi SuperH processors always | 195 | Computers built with Hitachi SuperH processors always |
196 | map the ROM starting at address zero. But the processor | 196 | map the ROM starting at address zero. But the processor |
197 | does not specify the range that RAM takes. | 197 | does not specify the range that RAM takes. |
198 | 198 | ||
199 | The physical memory (RAM) start address will be automatically | 199 | The physical memory (RAM) start address will be automatically |
200 | set to 08000000. Other platforms, such as the Solution Engine | 200 | set to 08000000. Other platforms, such as the Solution Engine |
201 | boards typically map RAM at 0C000000. | 201 | boards typically map RAM at 0C000000. |
202 | 202 | ||
203 | Tweak this only when porting to a new machine which does not | 203 | Tweak this only when porting to a new machine which does not |
204 | already have a defconfig. Changing it from the known correct | 204 | already have a defconfig. Changing it from the known correct |
205 | value on any of the known systems will only lead to disaster. | 205 | value on any of the known systems will only lead to disaster. |
206 | 206 | ||
207 | config MEMORY_SIZE | 207 | config MEMORY_SIZE |
208 | hex "Physical memory size" | 208 | hex "Physical memory size" |
209 | default "0x00400000" | 209 | default "0x00400000" |
210 | help | 210 | help |
211 | This sets the default memory size assumed by your SH kernel. It can | 211 | This sets the default memory size assumed by your SH kernel. It can |
212 | be overridden as normal by the 'mem=' argument on the kernel command | 212 | be overridden as normal by the 'mem=' argument on the kernel command |
213 | line. If unsure, consult your board specifications or just leave it | 213 | line. If unsure, consult your board specifications or just leave it |
214 | as 0x00400000 which was the default value before this became | 214 | as 0x00400000 which was the default value before this became |
215 | configurable. | 215 | configurable. |
216 | 216 | ||
217 | config 32BIT | 217 | config 32BIT |
218 | bool "Support 32-bit physical addressing through PMB" | 218 | bool "Support 32-bit physical addressing through PMB" |
219 | depends on CPU_SH4A && MMU | 219 | depends on CPU_SH4A && MMU |
220 | default y | 220 | default y |
221 | help | 221 | help |
222 | If you say Y here, physical addressing will be extended to | 222 | If you say Y here, physical addressing will be extended to |
223 | 32-bits through the SH-4A PMB. If this is not set, legacy | 223 | 32-bits through the SH-4A PMB. If this is not set, legacy |
224 | 29-bit physical addressing will be used. | 224 | 29-bit physical addressing will be used. |
225 | 225 | ||
226 | config VSYSCALL | ||
227 | bool "Support vsyscall page" | ||
228 | depends on MMU | ||
229 | default y | ||
230 | help | ||
231 | This will enable support for the kernel mapping a vDSO page | ||
232 | in process space, and subsequently handing down the entry point | ||
233 | to the libc through the ELF auxiliary vector. | ||
234 | |||
235 | From the kernel side this is used for the signal trampoline. | ||
236 | For systems with an MMU that can afford to give up a page, | ||
237 | (the default value) say Y. | ||
238 | |||
226 | choice | 239 | choice |
227 | prompt "HugeTLB page size" | 240 | prompt "HugeTLB page size" |
228 | depends on HUGETLB_PAGE && CPU_SH4 && MMU | 241 | depends on HUGETLB_PAGE && CPU_SH4 && MMU |
229 | default HUGETLB_PAGE_SIZE_64K | 242 | default HUGETLB_PAGE_SIZE_64K |
230 | 243 | ||
231 | config HUGETLB_PAGE_SIZE_64K | 244 | config HUGETLB_PAGE_SIZE_64K |
232 | bool "64K" | 245 | bool "64K" |
233 | 246 | ||
234 | config HUGETLB_PAGE_SIZE_1MB | 247 | config HUGETLB_PAGE_SIZE_1MB |
235 | bool "1MB" | 248 | bool "1MB" |
236 | 249 | ||
237 | endchoice | 250 | endchoice |
238 | 251 | ||
239 | source "mm/Kconfig" | 252 | source "mm/Kconfig" |
240 | 253 | ||
241 | endmenu | 254 | endmenu |
242 | 255 | ||
243 | menu "Cache configuration" | 256 | menu "Cache configuration" |
244 | 257 | ||
245 | config SH7705_CACHE_32KB | 258 | config SH7705_CACHE_32KB |
246 | bool "Enable 32KB cache size for SH7705" | 259 | bool "Enable 32KB cache size for SH7705" |
247 | depends on CPU_SUBTYPE_SH7705 | 260 | depends on CPU_SUBTYPE_SH7705 |
248 | default y | 261 | default y |
249 | 262 | ||
250 | config SH_DIRECT_MAPPED | 263 | config SH_DIRECT_MAPPED |
251 | bool "Use direct-mapped caching" | 264 | bool "Use direct-mapped caching" |
252 | default n | 265 | default n |
253 | help | 266 | help |
254 | Selecting this option will configure the caches to be direct-mapped, | 267 | Selecting this option will configure the caches to be direct-mapped, |
255 | even if the cache supports a 2 or 4-way mode. This is useful primarily | 268 | even if the cache supports a 2 or 4-way mode. This is useful primarily |
256 | for debugging on platforms with 2 and 4-way caches (SH7750R/SH7751R, | 269 | for debugging on platforms with 2 and 4-way caches (SH7750R/SH7751R, |
257 | SH4-202, SH4-501, etc.) | 270 | SH4-202, SH4-501, etc.) |
258 | 271 | ||
259 | Turn this option off for platforms that do not have a direct-mapped | 272 | Turn this option off for platforms that do not have a direct-mapped |
260 | cache, and you have no need to run the caches in such a configuration. | 273 | cache, and you have no need to run the caches in such a configuration. |
261 | 274 | ||
262 | config SH_WRITETHROUGH | 275 | config SH_WRITETHROUGH |
263 | bool "Use write-through caching" | 276 | bool "Use write-through caching" |
264 | default y if CPU_SH2 | 277 | default y if CPU_SH2 |
265 | help | 278 | help |
266 | Selecting this option will configure the caches in write-through | 279 | Selecting this option will configure the caches in write-through |
267 | mode, as opposed to the default write-back configuration. | 280 | mode, as opposed to the default write-back configuration. |
268 | 281 | ||
269 | Since there's sill some aliasing issues on SH-4, this option will | 282 | Since there's sill some aliasing issues on SH-4, this option will |
270 | unfortunately still require the majority of flushing functions to | 283 | unfortunately still require the majority of flushing functions to |
271 | be implemented to deal with aliasing. | 284 | be implemented to deal with aliasing. |
272 | 285 | ||
273 | If unsure, say N. | 286 | If unsure, say N. |
274 | 287 | ||
275 | config SH_OCRAM | 288 | config SH_OCRAM |
276 | bool "Operand Cache RAM (OCRAM) support" | 289 | bool "Operand Cache RAM (OCRAM) support" |
277 | help | 290 | help |
278 | Selecting this option will automatically tear down the number of | 291 | Selecting this option will automatically tear down the number of |
279 | sets in the dcache by half, which in turn exposes a memory range. | 292 | sets in the dcache by half, which in turn exposes a memory range. |
280 | 293 | ||
281 | The addresses for the OC RAM base will vary according to the | 294 | The addresses for the OC RAM base will vary according to the |
282 | processor version. Consult vendor documentation for specifics. | 295 | processor version. Consult vendor documentation for specifics. |
283 | 296 | ||
284 | If unsure, say N. | 297 | If unsure, say N. |
285 | 298 | ||
286 | endmenu | 299 | endmenu |
287 | 300 |
arch/sh/mm/init.c
1 | /* $Id: init.c,v 1.19 2004/02/21 04:42:16 kkojima Exp $ | 1 | /* $Id: init.c,v 1.19 2004/02/21 04:42:16 kkojima Exp $ |
2 | * | 2 | * |
3 | * linux/arch/sh/mm/init.c | 3 | * linux/arch/sh/mm/init.c |
4 | * | 4 | * |
5 | * Copyright (C) 1999 Niibe Yutaka | 5 | * Copyright (C) 1999 Niibe Yutaka |
6 | * Copyright (C) 2002, 2004 Paul Mundt | 6 | * Copyright (C) 2002, 2004 Paul Mundt |
7 | * | 7 | * |
8 | * Based on linux/arch/i386/mm/init.c: | 8 | * Based on linux/arch/i386/mm/init.c: |
9 | * Copyright (C) 1995 Linus Torvalds | 9 | * Copyright (C) 1995 Linus Torvalds |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/signal.h> | 12 | #include <linux/signal.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
16 | #include <linux/string.h> | 16 | #include <linux/string.h> |
17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
18 | #include <linux/ptrace.h> | 18 | #include <linux/ptrace.h> |
19 | #include <linux/mman.h> | 19 | #include <linux/mman.h> |
20 | #include <linux/mm.h> | 20 | #include <linux/mm.h> |
21 | #include <linux/swap.h> | 21 | #include <linux/swap.h> |
22 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/highmem.h> | 24 | #include <linux/highmem.h> |
25 | #include <linux/bootmem.h> | 25 | #include <linux/bootmem.h> |
26 | #include <linux/pagemap.h> | 26 | #include <linux/pagemap.h> |
27 | #include <linux/proc_fs.h> | 27 | #include <linux/proc_fs.h> |
28 | #include <asm/processor.h> | 28 | #include <asm/processor.h> |
29 | #include <asm/system.h> | 29 | #include <asm/system.h> |
30 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
31 | #include <asm/pgtable.h> | 31 | #include <asm/pgtable.h> |
32 | #include <asm/pgalloc.h> | 32 | #include <asm/pgalloc.h> |
33 | #include <asm/mmu_context.h> | 33 | #include <asm/mmu_context.h> |
34 | #include <asm/io.h> | 34 | #include <asm/io.h> |
35 | #include <asm/tlb.h> | 35 | #include <asm/tlb.h> |
36 | #include <asm/cacheflush.h> | 36 | #include <asm/cacheflush.h> |
37 | #include <asm/cache.h> | 37 | #include <asm/cache.h> |
38 | 38 | ||
39 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 39 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
40 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; | 40 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * Cache of MMU context last used. | 43 | * Cache of MMU context last used. |
44 | */ | 44 | */ |
45 | unsigned long mmu_context_cache = NO_CONTEXT; | 45 | unsigned long mmu_context_cache = NO_CONTEXT; |
46 | 46 | ||
47 | #ifdef CONFIG_MMU | 47 | #ifdef CONFIG_MMU |
48 | /* It'd be good if these lines were in the standard header file. */ | 48 | /* It'd be good if these lines were in the standard header file. */ |
49 | #define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT) | 49 | #define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT) |
50 | #define MAX_LOW_PFN (NODE_DATA(0)->bdata->node_low_pfn) | 50 | #define MAX_LOW_PFN (NODE_DATA(0)->bdata->node_low_pfn) |
51 | #endif | 51 | #endif |
52 | 52 | ||
53 | void (*copy_page)(void *from, void *to); | 53 | void (*copy_page)(void *from, void *to); |
54 | void (*clear_page)(void *to); | 54 | void (*clear_page)(void *to); |
55 | 55 | ||
56 | void show_mem(void) | 56 | void show_mem(void) |
57 | { | 57 | { |
58 | int i, total = 0, reserved = 0; | 58 | int i, total = 0, reserved = 0; |
59 | int shared = 0, cached = 0; | 59 | int shared = 0, cached = 0; |
60 | 60 | ||
61 | printk("Mem-info:\n"); | 61 | printk("Mem-info:\n"); |
62 | show_free_areas(); | 62 | show_free_areas(); |
63 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | 63 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); |
64 | i = max_mapnr; | 64 | i = max_mapnr; |
65 | while (i-- > 0) { | 65 | while (i-- > 0) { |
66 | total++; | 66 | total++; |
67 | if (PageReserved(mem_map+i)) | 67 | if (PageReserved(mem_map+i)) |
68 | reserved++; | 68 | reserved++; |
69 | else if (PageSwapCache(mem_map+i)) | 69 | else if (PageSwapCache(mem_map+i)) |
70 | cached++; | 70 | cached++; |
71 | else if (page_count(mem_map+i)) | 71 | else if (page_count(mem_map+i)) |
72 | shared += page_count(mem_map+i) - 1; | 72 | shared += page_count(mem_map+i) - 1; |
73 | } | 73 | } |
74 | printk("%d pages of RAM\n",total); | 74 | printk("%d pages of RAM\n",total); |
75 | printk("%d reserved pages\n",reserved); | 75 | printk("%d reserved pages\n",reserved); |
76 | printk("%d pages shared\n",shared); | 76 | printk("%d pages shared\n",shared); |
77 | printk("%d pages swap cached\n",cached); | 77 | printk("%d pages swap cached\n",cached); |
78 | } | 78 | } |
79 | 79 | ||
80 | static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | 80 | static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) |
81 | { | 81 | { |
82 | pgd_t *pgd; | 82 | pgd_t *pgd; |
83 | pud_t *pud; | 83 | pud_t *pud; |
84 | pmd_t *pmd; | 84 | pmd_t *pmd; |
85 | pte_t *pte; | 85 | pte_t *pte; |
86 | 86 | ||
87 | pgd = swapper_pg_dir + pgd_index(addr); | 87 | pgd = swapper_pg_dir + pgd_index(addr); |
88 | if (pgd_none(*pgd)) { | 88 | if (pgd_none(*pgd)) { |
89 | pgd_ERROR(*pgd); | 89 | pgd_ERROR(*pgd); |
90 | return; | 90 | return; |
91 | } | 91 | } |
92 | 92 | ||
93 | pud = pud_offset(pgd, addr); | 93 | pud = pud_offset(pgd, addr); |
94 | if (pud_none(*pud)) { | 94 | if (pud_none(*pud)) { |
95 | pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); | 95 | pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); |
96 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); | 96 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); |
97 | if (pmd != pmd_offset(pud, 0)) { | 97 | if (pmd != pmd_offset(pud, 0)) { |
98 | pud_ERROR(*pud); | 98 | pud_ERROR(*pud); |
99 | return; | 99 | return; |
100 | } | 100 | } |
101 | } | 101 | } |
102 | 102 | ||
103 | pmd = pmd_offset(pud, addr); | 103 | pmd = pmd_offset(pud, addr); |
104 | if (pmd_none(*pmd)) { | 104 | if (pmd_none(*pmd)) { |
105 | pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); | 105 | pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); |
106 | set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); | 106 | set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); |
107 | if (pte != pte_offset_kernel(pmd, 0)) { | 107 | if (pte != pte_offset_kernel(pmd, 0)) { |
108 | pmd_ERROR(*pmd); | 108 | pmd_ERROR(*pmd); |
109 | return; | 109 | return; |
110 | } | 110 | } |
111 | } | 111 | } |
112 | 112 | ||
113 | pte = pte_offset_kernel(pmd, addr); | 113 | pte = pte_offset_kernel(pmd, addr); |
114 | if (!pte_none(*pte)) { | 114 | if (!pte_none(*pte)) { |
115 | pte_ERROR(*pte); | 115 | pte_ERROR(*pte); |
116 | return; | 116 | return; |
117 | } | 117 | } |
118 | 118 | ||
119 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); | 119 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); |
120 | 120 | ||
121 | __flush_tlb_page(get_asid(), addr); | 121 | __flush_tlb_page(get_asid(), addr); |
122 | } | 122 | } |
123 | 123 | ||
124 | /* | 124 | /* |
125 | * As a performance optimization, other platforms preserve the fixmap mapping | 125 | * As a performance optimization, other platforms preserve the fixmap mapping |
126 | * across a context switch, we don't presently do this, but this could be done | 126 | * across a context switch, we don't presently do this, but this could be done |
127 | * in a similar fashion as to the wired TLB interface that sh64 uses (by way | 127 | * in a similar fashion as to the wired TLB interface that sh64 uses (by way |
128 | * of the memorry mapped UTLB configuration) -- this unfortunately forces us to | 128 | * of the memorry mapped UTLB configuration) -- this unfortunately forces us to |
129 | * give up a TLB entry for each mapping we want to preserve. While this may be | 129 | * give up a TLB entry for each mapping we want to preserve. While this may be |
130 | * viable for a small number of fixmaps, it's not particularly useful for | 130 | * viable for a small number of fixmaps, it's not particularly useful for |
131 | * everything and needs to be carefully evaluated. (ie, we may want this for | 131 | * everything and needs to be carefully evaluated. (ie, we may want this for |
132 | * the vsyscall page). | 132 | * the vsyscall page). |
133 | * | 133 | * |
134 | * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass | 134 | * XXX: Perhaps add a _PAGE_WIRED flag or something similar that we can pass |
135 | * in at __set_fixmap() time to determine the appropriate behavior to follow. | 135 | * in at __set_fixmap() time to determine the appropriate behavior to follow. |
136 | * | 136 | * |
137 | * -- PFM. | 137 | * -- PFM. |
138 | */ | 138 | */ |
139 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) | 139 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) |
140 | { | 140 | { |
141 | unsigned long address = __fix_to_virt(idx); | 141 | unsigned long address = __fix_to_virt(idx); |
142 | 142 | ||
143 | if (idx >= __end_of_fixed_addresses) { | 143 | if (idx >= __end_of_fixed_addresses) { |
144 | BUG(); | 144 | BUG(); |
145 | return; | 145 | return; |
146 | } | 146 | } |
147 | 147 | ||
148 | set_pte_phys(address, phys, prot); | 148 | set_pte_phys(address, phys, prot); |
149 | } | 149 | } |
150 | 150 | ||
151 | /* References to section boundaries */ | 151 | /* References to section boundaries */ |
152 | 152 | ||
153 | extern char _text, _etext, _edata, __bss_start, _end; | 153 | extern char _text, _etext, _edata, __bss_start, _end; |
154 | extern char __init_begin, __init_end; | 154 | extern char __init_begin, __init_end; |
155 | 155 | ||
156 | /* | 156 | /* |
157 | * paging_init() sets up the page tables | 157 | * paging_init() sets up the page tables |
158 | * | 158 | * |
159 | * This routines also unmaps the page at virtual kernel address 0, so | 159 | * This routines also unmaps the page at virtual kernel address 0, so |
160 | * that we can trap those pesky NULL-reference errors in the kernel. | 160 | * that we can trap those pesky NULL-reference errors in the kernel. |
161 | */ | 161 | */ |
162 | void __init paging_init(void) | 162 | void __init paging_init(void) |
163 | { | 163 | { |
164 | unsigned long zones_size[MAX_NR_ZONES] = { 0, }; | 164 | unsigned long zones_size[MAX_NR_ZONES] = { 0, }; |
165 | 165 | ||
166 | /* | 166 | /* |
167 | * Setup some defaults for the zone sizes.. these should be safe | 167 | * Setup some defaults for the zone sizes.. these should be safe |
168 | * regardless of distcontiguous memory or MMU settings. | 168 | * regardless of distcontiguous memory or MMU settings. |
169 | */ | 169 | */ |
170 | zones_size[ZONE_DMA] = 0 >> PAGE_SHIFT; | 170 | zones_size[ZONE_DMA] = 0 >> PAGE_SHIFT; |
171 | zones_size[ZONE_NORMAL] = __MEMORY_SIZE >> PAGE_SHIFT; | 171 | zones_size[ZONE_NORMAL] = __MEMORY_SIZE >> PAGE_SHIFT; |
172 | #ifdef CONFIG_HIGHMEM | 172 | #ifdef CONFIG_HIGHMEM |
173 | zones_size[ZONE_HIGHMEM] = 0 >> PAGE_SHIFT; | 173 | zones_size[ZONE_HIGHMEM] = 0 >> PAGE_SHIFT; |
174 | #endif | 174 | #endif |
175 | 175 | ||
176 | #ifdef CONFIG_MMU | 176 | #ifdef CONFIG_MMU |
177 | /* | 177 | /* |
178 | * If we have an MMU, and want to be using it .. we need to adjust | 178 | * If we have an MMU, and want to be using it .. we need to adjust |
179 | * the zone sizes accordingly, in addition to turning it on. | 179 | * the zone sizes accordingly, in addition to turning it on. |
180 | */ | 180 | */ |
181 | { | 181 | { |
182 | unsigned long max_dma, low, start_pfn; | 182 | unsigned long max_dma, low, start_pfn; |
183 | pgd_t *pg_dir; | 183 | pgd_t *pg_dir; |
184 | int i; | 184 | int i; |
185 | 185 | ||
186 | /* We don't need kernel mapping as hardware support that. */ | 186 | /* We don't need kernel mapping as hardware support that. */ |
187 | pg_dir = swapper_pg_dir; | 187 | pg_dir = swapper_pg_dir; |
188 | 188 | ||
189 | for (i = 0; i < PTRS_PER_PGD; i++) | 189 | for (i = 0; i < PTRS_PER_PGD; i++) |
190 | pgd_val(pg_dir[i]) = 0; | 190 | pgd_val(pg_dir[i]) = 0; |
191 | 191 | ||
192 | /* Turn on the MMU */ | 192 | /* Turn on the MMU */ |
193 | enable_mmu(); | 193 | enable_mmu(); |
194 | 194 | ||
195 | /* Fixup the zone sizes */ | 195 | /* Fixup the zone sizes */ |
196 | start_pfn = START_PFN; | 196 | start_pfn = START_PFN; |
197 | max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; | 197 | max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; |
198 | low = MAX_LOW_PFN; | 198 | low = MAX_LOW_PFN; |
199 | 199 | ||
200 | if (low < max_dma) { | 200 | if (low < max_dma) { |
201 | zones_size[ZONE_DMA] = low - start_pfn; | 201 | zones_size[ZONE_DMA] = low - start_pfn; |
202 | zones_size[ZONE_NORMAL] = 0; | 202 | zones_size[ZONE_NORMAL] = 0; |
203 | } else { | 203 | } else { |
204 | zones_size[ZONE_DMA] = max_dma - start_pfn; | 204 | zones_size[ZONE_DMA] = max_dma - start_pfn; |
205 | zones_size[ZONE_NORMAL] = low - max_dma; | 205 | zones_size[ZONE_NORMAL] = low - max_dma; |
206 | } | 206 | } |
207 | } | 207 | } |
208 | 208 | ||
209 | #elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) | 209 | #elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) |
210 | /* | 210 | /* |
211 | * If we don't have CONFIG_MMU set and the processor in question | 211 | * If we don't have CONFIG_MMU set and the processor in question |
212 | * still has an MMU, care needs to be taken to make sure it doesn't | 212 | * still has an MMU, care needs to be taken to make sure it doesn't |
213 | * stay on.. Since the boot loader could have potentially already | 213 | * stay on.. Since the boot loader could have potentially already |
214 | * turned it on, and we clearly don't want it, we simply turn it off. | 214 | * turned it on, and we clearly don't want it, we simply turn it off. |
215 | * | 215 | * |
216 | * We don't need to do anything special for the zone sizes, since the | 216 | * We don't need to do anything special for the zone sizes, since the |
217 | * default values that were already configured up above should be | 217 | * default values that were already configured up above should be |
218 | * satisfactory. | 218 | * satisfactory. |
219 | */ | 219 | */ |
220 | disable_mmu(); | 220 | disable_mmu(); |
221 | #endif | 221 | #endif |
222 | NODE_DATA(0)->node_mem_map = NULL; | 222 | NODE_DATA(0)->node_mem_map = NULL; |
223 | free_area_init_node(0, NODE_DATA(0), zones_size, __MEMORY_START >> PAGE_SHIFT, 0); | 223 | free_area_init_node(0, NODE_DATA(0), zones_size, __MEMORY_START >> PAGE_SHIFT, 0); |
224 | } | 224 | } |
225 | 225 | ||
226 | static struct kcore_list kcore_mem, kcore_vmalloc; | 226 | static struct kcore_list kcore_mem, kcore_vmalloc; |
227 | 227 | ||
228 | void __init mem_init(void) | 228 | void __init mem_init(void) |
229 | { | 229 | { |
230 | extern unsigned long empty_zero_page[1024]; | 230 | extern unsigned long empty_zero_page[1024]; |
231 | int codesize, reservedpages, datasize, initsize; | 231 | int codesize, reservedpages, datasize, initsize; |
232 | int tmp; | 232 | int tmp; |
233 | extern unsigned long memory_start; | 233 | extern unsigned long memory_start; |
234 | 234 | ||
235 | #ifdef CONFIG_MMU | 235 | #ifdef CONFIG_MMU |
236 | high_memory = (void *)__va(MAX_LOW_PFN * PAGE_SIZE); | 236 | high_memory = (void *)__va(MAX_LOW_PFN * PAGE_SIZE); |
237 | #else | 237 | #else |
238 | extern unsigned long memory_end; | 238 | extern unsigned long memory_end; |
239 | 239 | ||
240 | high_memory = (void *)(memory_end & PAGE_MASK); | 240 | high_memory = (void *)(memory_end & PAGE_MASK); |
241 | #endif | 241 | #endif |
242 | 242 | ||
243 | max_mapnr = num_physpages = MAP_NR(high_memory) - MAP_NR(memory_start); | 243 | max_mapnr = num_physpages = MAP_NR(high_memory) - MAP_NR(memory_start); |
244 | 244 | ||
245 | /* clear the zero-page */ | 245 | /* clear the zero-page */ |
246 | memset(empty_zero_page, 0, PAGE_SIZE); | 246 | memset(empty_zero_page, 0, PAGE_SIZE); |
247 | __flush_wback_region(empty_zero_page, PAGE_SIZE); | 247 | __flush_wback_region(empty_zero_page, PAGE_SIZE); |
248 | 248 | ||
249 | /* | 249 | /* |
250 | * Setup wrappers for copy/clear_page(), these will get overridden | 250 | * Setup wrappers for copy/clear_page(), these will get overridden |
251 | * later in the boot process if a better method is available. | 251 | * later in the boot process if a better method is available. |
252 | */ | 252 | */ |
253 | #ifdef CONFIG_MMU | 253 | #ifdef CONFIG_MMU |
254 | copy_page = copy_page_slow; | 254 | copy_page = copy_page_slow; |
255 | clear_page = clear_page_slow; | 255 | clear_page = clear_page_slow; |
256 | #else | 256 | #else |
257 | copy_page = copy_page_nommu; | 257 | copy_page = copy_page_nommu; |
258 | clear_page = clear_page_nommu; | 258 | clear_page = clear_page_nommu; |
259 | #endif | 259 | #endif |
260 | 260 | ||
261 | /* this will put all low memory onto the freelists */ | 261 | /* this will put all low memory onto the freelists */ |
262 | totalram_pages += free_all_bootmem_node(NODE_DATA(0)); | 262 | totalram_pages += free_all_bootmem_node(NODE_DATA(0)); |
263 | reservedpages = 0; | 263 | reservedpages = 0; |
264 | for (tmp = 0; tmp < num_physpages; tmp++) | 264 | for (tmp = 0; tmp < num_physpages; tmp++) |
265 | /* | 265 | /* |
266 | * Only count reserved RAM pages | 266 | * Only count reserved RAM pages |
267 | */ | 267 | */ |
268 | if (PageReserved(mem_map+tmp)) | 268 | if (PageReserved(mem_map+tmp)) |
269 | reservedpages++; | 269 | reservedpages++; |
270 | 270 | ||
271 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | 271 | codesize = (unsigned long) &_etext - (unsigned long) &_text; |
272 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | 272 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; |
273 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | 273 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; |
274 | 274 | ||
275 | kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); | 275 | kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); |
276 | kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, | 276 | kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, |
277 | VMALLOC_END - VMALLOC_START); | 277 | VMALLOC_END - VMALLOC_START); |
278 | 278 | ||
279 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " | 279 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " |
280 | "%dk reserved, %dk data, %dk init)\n", | 280 | "%dk reserved, %dk data, %dk init)\n", |
281 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | 281 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), |
282 | max_mapnr << (PAGE_SHIFT-10), | 282 | max_mapnr << (PAGE_SHIFT-10), |
283 | codesize >> 10, | 283 | codesize >> 10, |
284 | reservedpages << (PAGE_SHIFT-10), | 284 | reservedpages << (PAGE_SHIFT-10), |
285 | datasize >> 10, | 285 | datasize >> 10, |
286 | initsize >> 10); | 286 | initsize >> 10); |
287 | 287 | ||
288 | p3_cache_init(); | 288 | p3_cache_init(); |
289 | |||
290 | /* Initialize the vDSO */ | ||
291 | vsyscall_init(); | ||
289 | } | 292 | } |
290 | 293 | ||
291 | void free_initmem(void) | 294 | void free_initmem(void) |
292 | { | 295 | { |
293 | unsigned long addr; | 296 | unsigned long addr; |
294 | 297 | ||
295 | addr = (unsigned long)(&__init_begin); | 298 | addr = (unsigned long)(&__init_begin); |
296 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | 299 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { |
297 | ClearPageReserved(virt_to_page(addr)); | 300 | ClearPageReserved(virt_to_page(addr)); |
298 | init_page_count(virt_to_page(addr)); | 301 | init_page_count(virt_to_page(addr)); |
299 | free_page(addr); | 302 | free_page(addr); |
300 | totalram_pages++; | 303 | totalram_pages++; |
301 | } | 304 | } |
302 | printk ("Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10); | 305 | printk ("Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10); |
303 | } | 306 | } |
304 | 307 | ||
305 | #ifdef CONFIG_BLK_DEV_INITRD | 308 | #ifdef CONFIG_BLK_DEV_INITRD |
306 | void free_initrd_mem(unsigned long start, unsigned long end) | 309 | void free_initrd_mem(unsigned long start, unsigned long end) |
307 | { | 310 | { |
308 | unsigned long p; | 311 | unsigned long p; |
309 | for (p = start; p < end; p += PAGE_SIZE) { | 312 | for (p = start; p < end; p += PAGE_SIZE) { |
310 | ClearPageReserved(virt_to_page(p)); | 313 | ClearPageReserved(virt_to_page(p)); |
311 | init_page_count(virt_to_page(p)); | 314 | init_page_count(virt_to_page(p)); |
312 | free_page(p); | 315 | free_page(p); |
313 | totalram_pages++; | 316 | totalram_pages++; |
314 | } | 317 | } |
315 | printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | 318 | printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); |
316 | } | 319 | } |
317 | #endif | 320 | #endif |
318 | 321 | ||
319 | 322 |
arch/sh/mm/tlb-flush.c
1 | /* | 1 | /* |
2 | * TLB flushing operations for SH with an MMU. | 2 | * TLB flushing operations for SH with an MMU. |
3 | * | 3 | * |
4 | * Copyright (C) 1999 Niibe Yutaka | 4 | * Copyright (C) 1999 Niibe Yutaka |
5 | * Copyright (C) 2003 Paul Mundt | 5 | * Copyright (C) 2003 Paul Mundt |
6 | * | 6 | * |
7 | * This file is subject to the terms and conditions of the GNU General Public | 7 | * This file is subject to the terms and conditions of the GNU General Public |
8 | * License. See the file "COPYING" in the main directory of this archive | 8 | * License. See the file "COPYING" in the main directory of this archive |
9 | * for more details. | 9 | * for more details. |
10 | */ | 10 | */ |
11 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
12 | #include <asm/mmu_context.h> | 12 | #include <asm/mmu_context.h> |
13 | #include <asm/tlbflush.h> | 13 | #include <asm/tlbflush.h> |
14 | 14 | ||
15 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | 15 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
16 | { | 16 | { |
17 | if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) { | 17 | if (vma->vm_mm && vma->vm_mm->context.id != NO_CONTEXT) { |
18 | unsigned long flags; | 18 | unsigned long flags; |
19 | unsigned long asid; | 19 | unsigned long asid; |
20 | unsigned long saved_asid = MMU_NO_ASID; | 20 | unsigned long saved_asid = MMU_NO_ASID; |
21 | 21 | ||
22 | asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK; | 22 | asid = vma->vm_mm->context.id & MMU_CONTEXT_ASID_MASK; |
23 | page &= PAGE_MASK; | 23 | page &= PAGE_MASK; |
24 | 24 | ||
25 | local_irq_save(flags); | 25 | local_irq_save(flags); |
26 | if (vma->vm_mm != current->mm) { | 26 | if (vma->vm_mm != current->mm) { |
27 | saved_asid = get_asid(); | 27 | saved_asid = get_asid(); |
28 | set_asid(asid); | 28 | set_asid(asid); |
29 | } | 29 | } |
30 | __flush_tlb_page(asid, page); | 30 | __flush_tlb_page(asid, page); |
31 | if (saved_asid != MMU_NO_ASID) | 31 | if (saved_asid != MMU_NO_ASID) |
32 | set_asid(saved_asid); | 32 | set_asid(saved_asid); |
33 | local_irq_restore(flags); | 33 | local_irq_restore(flags); |
34 | } | 34 | } |
35 | } | 35 | } |
36 | 36 | ||
37 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | 37 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
38 | unsigned long end) | 38 | unsigned long end) |
39 | { | 39 | { |
40 | struct mm_struct *mm = vma->vm_mm; | 40 | struct mm_struct *mm = vma->vm_mm; |
41 | 41 | ||
42 | if (mm->context != NO_CONTEXT) { | 42 | if (mm->context.id != NO_CONTEXT) { |
43 | unsigned long flags; | 43 | unsigned long flags; |
44 | int size; | 44 | int size; |
45 | 45 | ||
46 | local_irq_save(flags); | 46 | local_irq_save(flags); |
47 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 47 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
48 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ | 48 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ |
49 | mm->context = NO_CONTEXT; | 49 | mm->context.id = NO_CONTEXT; |
50 | if (mm == current->mm) | 50 | if (mm == current->mm) |
51 | activate_context(mm); | 51 | activate_context(mm); |
52 | } else { | 52 | } else { |
53 | unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK; | 53 | unsigned long asid; |
54 | unsigned long saved_asid = MMU_NO_ASID; | 54 | unsigned long saved_asid = MMU_NO_ASID; |
55 | 55 | ||
56 | asid = mm->context.id & MMU_CONTEXT_ASID_MASK; | ||
56 | start &= PAGE_MASK; | 57 | start &= PAGE_MASK; |
57 | end += (PAGE_SIZE - 1); | 58 | end += (PAGE_SIZE - 1); |
58 | end &= PAGE_MASK; | 59 | end &= PAGE_MASK; |
59 | if (mm != current->mm) { | 60 | if (mm != current->mm) { |
60 | saved_asid = get_asid(); | 61 | saved_asid = get_asid(); |
61 | set_asid(asid); | 62 | set_asid(asid); |
62 | } | 63 | } |
63 | while (start < end) { | 64 | while (start < end) { |
64 | __flush_tlb_page(asid, start); | 65 | __flush_tlb_page(asid, start); |
65 | start += PAGE_SIZE; | 66 | start += PAGE_SIZE; |
66 | } | 67 | } |
67 | if (saved_asid != MMU_NO_ASID) | 68 | if (saved_asid != MMU_NO_ASID) |
68 | set_asid(saved_asid); | 69 | set_asid(saved_asid); |
69 | } | 70 | } |
70 | local_irq_restore(flags); | 71 | local_irq_restore(flags); |
71 | } | 72 | } |
72 | } | 73 | } |
73 | 74 | ||
74 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | 75 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
75 | { | 76 | { |
76 | unsigned long flags; | 77 | unsigned long flags; |
77 | int size; | 78 | int size; |
78 | 79 | ||
79 | local_irq_save(flags); | 80 | local_irq_save(flags); |
80 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 81 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
81 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ | 82 | if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ |
82 | flush_tlb_all(); | 83 | flush_tlb_all(); |
83 | } else { | 84 | } else { |
84 | unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK; | 85 | unsigned long asid; |
85 | unsigned long saved_asid = get_asid(); | 86 | unsigned long saved_asid = get_asid(); |
86 | 87 | ||
88 | asid = init_mm.context.id & MMU_CONTEXT_ASID_MASK; | ||
87 | start &= PAGE_MASK; | 89 | start &= PAGE_MASK; |
88 | end += (PAGE_SIZE - 1); | 90 | end += (PAGE_SIZE - 1); |
89 | end &= PAGE_MASK; | 91 | end &= PAGE_MASK; |
90 | set_asid(asid); | 92 | set_asid(asid); |
91 | while (start < end) { | 93 | while (start < end) { |
92 | __flush_tlb_page(asid, start); | 94 | __flush_tlb_page(asid, start); |
93 | start += PAGE_SIZE; | 95 | start += PAGE_SIZE; |
94 | } | 96 | } |
95 | set_asid(saved_asid); | 97 | set_asid(saved_asid); |
96 | } | 98 | } |
97 | local_irq_restore(flags); | 99 | local_irq_restore(flags); |
98 | } | 100 | } |
99 | 101 | ||
100 | void flush_tlb_mm(struct mm_struct *mm) | 102 | void flush_tlb_mm(struct mm_struct *mm) |
101 | { | 103 | { |
102 | /* Invalidate all TLB of this process. */ | 104 | /* Invalidate all TLB of this process. */ |
103 | /* Instead of invalidating each TLB, we get new MMU context. */ | 105 | /* Instead of invalidating each TLB, we get new MMU context. */ |
104 | if (mm->context != NO_CONTEXT) { | 106 | if (mm->context.id != NO_CONTEXT) { |
105 | unsigned long flags; | 107 | unsigned long flags; |
106 | 108 | ||
107 | local_irq_save(flags); | 109 | local_irq_save(flags); |
108 | mm->context = NO_CONTEXT; | 110 | mm->context.id = NO_CONTEXT; |
109 | if (mm == current->mm) | 111 | if (mm == current->mm) |
110 | activate_context(mm); | 112 | activate_context(mm); |
111 | local_irq_restore(flags); | 113 | local_irq_restore(flags); |
112 | } | 114 | } |
113 | } | 115 | } |
114 | 116 | ||
115 | void flush_tlb_all(void) | 117 | void flush_tlb_all(void) |
116 | { | 118 | { |
117 | unsigned long flags, status; | 119 | unsigned long flags, status; |
118 | 120 | ||
119 | /* | 121 | /* |
120 | * Flush all the TLB. | 122 | * Flush all the TLB. |
121 | * | 123 | * |
122 | * Write to the MMU control register's bit: | 124 | * Write to the MMU control register's bit: |
123 | * TF-bit for SH-3, TI-bit for SH-4. | 125 | * TF-bit for SH-3, TI-bit for SH-4. |
124 | * It's same position, bit #2. | 126 | * It's same position, bit #2. |
125 | */ | 127 | */ |
126 | local_irq_save(flags); | 128 | local_irq_save(flags); |
127 | status = ctrl_inl(MMUCR); | 129 | status = ctrl_inl(MMUCR); |
128 | status |= 0x04; | 130 | status |= 0x04; |
129 | ctrl_outl(status, MMUCR); | 131 | ctrl_outl(status, MMUCR); |
130 | ctrl_barrier(); | 132 | ctrl_barrier(); |
131 | local_irq_restore(flags); | 133 | local_irq_restore(flags); |
132 | } | 134 | } |
133 | 135 |
include/asm-sh/auxvec.h
1 | #ifndef __ASM_SH_AUXVEC_H | 1 | #ifndef __ASM_SH_AUXVEC_H |
2 | #define __ASM_SH_AUXVEC_H | 2 | #define __ASM_SH_AUXVEC_H |
3 | 3 | ||
4 | /* | ||
5 | * Architecture-neutral AT_ values in 0-17, leave some room | ||
6 | * for more of them. | ||
7 | */ | ||
8 | |||
9 | #ifdef CONFIG_VSYSCALL | ||
10 | /* | ||
11 | * Only define this in the vsyscall case, the entry point to | ||
12 | * the vsyscall page gets placed here. The kernel will attempt | ||
13 | * to build a gate VMA we don't care about otherwise.. | ||
14 | */ | ||
15 | #define AT_SYSINFO_EHDR 33 | ||
16 | #endif | ||
17 | |||
4 | #endif /* __ASM_SH_AUXVEC_H */ | 18 | #endif /* __ASM_SH_AUXVEC_H */ |
5 | 19 |
include/asm-sh/elf.h
1 | #ifndef __ASM_SH_ELF_H | 1 | #ifndef __ASM_SH_ELF_H |
2 | #define __ASM_SH_ELF_H | 2 | #define __ASM_SH_ELF_H |
3 | 3 | ||
4 | #include <asm/processor.h> | 4 | #include <asm/processor.h> |
5 | #include <asm/auxvec.h> | 5 | #include <asm/auxvec.h> |
6 | #include <asm/ptrace.h> | 6 | #include <asm/ptrace.h> |
7 | #include <asm/user.h> | 7 | #include <asm/user.h> |
8 | 8 | ||
9 | /* SH relocation types */ | 9 | /* SH relocation types */ |
10 | #define R_SH_NONE 0 | 10 | #define R_SH_NONE 0 |
11 | #define R_SH_DIR32 1 | 11 | #define R_SH_DIR32 1 |
12 | #define R_SH_REL32 2 | 12 | #define R_SH_REL32 2 |
13 | #define R_SH_DIR8WPN 3 | 13 | #define R_SH_DIR8WPN 3 |
14 | #define R_SH_IND12W 4 | 14 | #define R_SH_IND12W 4 |
15 | #define R_SH_DIR8WPL 5 | 15 | #define R_SH_DIR8WPL 5 |
16 | #define R_SH_DIR8WPZ 6 | 16 | #define R_SH_DIR8WPZ 6 |
17 | #define R_SH_DIR8BP 7 | 17 | #define R_SH_DIR8BP 7 |
18 | #define R_SH_DIR8W 8 | 18 | #define R_SH_DIR8W 8 |
19 | #define R_SH_DIR8L 9 | 19 | #define R_SH_DIR8L 9 |
20 | #define R_SH_SWITCH16 25 | 20 | #define R_SH_SWITCH16 25 |
21 | #define R_SH_SWITCH32 26 | 21 | #define R_SH_SWITCH32 26 |
22 | #define R_SH_USES 27 | 22 | #define R_SH_USES 27 |
23 | #define R_SH_COUNT 28 | 23 | #define R_SH_COUNT 28 |
24 | #define R_SH_ALIGN 29 | 24 | #define R_SH_ALIGN 29 |
25 | #define R_SH_CODE 30 | 25 | #define R_SH_CODE 30 |
26 | #define R_SH_DATA 31 | 26 | #define R_SH_DATA 31 |
27 | #define R_SH_LABEL 32 | 27 | #define R_SH_LABEL 32 |
28 | #define R_SH_SWITCH8 33 | 28 | #define R_SH_SWITCH8 33 |
29 | #define R_SH_GNU_VTINHERIT 34 | 29 | #define R_SH_GNU_VTINHERIT 34 |
30 | #define R_SH_GNU_VTENTRY 35 | 30 | #define R_SH_GNU_VTENTRY 35 |
31 | #define R_SH_TLS_GD_32 144 | 31 | #define R_SH_TLS_GD_32 144 |
32 | #define R_SH_TLS_LD_32 145 | 32 | #define R_SH_TLS_LD_32 145 |
33 | #define R_SH_TLS_LDO_32 146 | 33 | #define R_SH_TLS_LDO_32 146 |
34 | #define R_SH_TLS_IE_32 147 | 34 | #define R_SH_TLS_IE_32 147 |
35 | #define R_SH_TLS_LE_32 148 | 35 | #define R_SH_TLS_LE_32 148 |
36 | #define R_SH_TLS_DTPMOD32 149 | 36 | #define R_SH_TLS_DTPMOD32 149 |
37 | #define R_SH_TLS_DTPOFF32 150 | 37 | #define R_SH_TLS_DTPOFF32 150 |
38 | #define R_SH_TLS_TPOFF32 151 | 38 | #define R_SH_TLS_TPOFF32 151 |
39 | #define R_SH_GOT32 160 | 39 | #define R_SH_GOT32 160 |
40 | #define R_SH_PLT32 161 | 40 | #define R_SH_PLT32 161 |
41 | #define R_SH_COPY 162 | 41 | #define R_SH_COPY 162 |
42 | #define R_SH_GLOB_DAT 163 | 42 | #define R_SH_GLOB_DAT 163 |
43 | #define R_SH_JMP_SLOT 164 | 43 | #define R_SH_JMP_SLOT 164 |
44 | #define R_SH_RELATIVE 165 | 44 | #define R_SH_RELATIVE 165 |
45 | #define R_SH_GOTOFF 166 | 45 | #define R_SH_GOTOFF 166 |
46 | #define R_SH_GOTPC 167 | 46 | #define R_SH_GOTPC 167 |
47 | /* Keep this the last entry. */ | 47 | /* Keep this the last entry. */ |
48 | #define R_SH_NUM 256 | 48 | #define R_SH_NUM 256 |
49 | 49 | ||
50 | /* | 50 | /* |
51 | * ELF register definitions.. | 51 | * ELF register definitions.. |
52 | */ | 52 | */ |
53 | 53 | ||
54 | typedef unsigned long elf_greg_t; | 54 | typedef unsigned long elf_greg_t; |
55 | 55 | ||
56 | #define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t)) | 56 | #define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t)) |
57 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | 57 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; |
58 | 58 | ||
59 | typedef struct user_fpu_struct elf_fpregset_t; | 59 | typedef struct user_fpu_struct elf_fpregset_t; |
60 | 60 | ||
61 | /* | 61 | /* |
62 | * This is used to ensure we don't load something for the wrong architecture. | 62 | * This is used to ensure we don't load something for the wrong architecture. |
63 | */ | 63 | */ |
64 | #define elf_check_arch(x) ( (x)->e_machine == EM_SH ) | 64 | #define elf_check_arch(x) ( (x)->e_machine == EM_SH ) |
65 | 65 | ||
66 | /* | 66 | /* |
67 | * These are used to set parameters in the core dumps. | 67 | * These are used to set parameters in the core dumps. |
68 | */ | 68 | */ |
69 | #define ELF_CLASS ELFCLASS32 | 69 | #define ELF_CLASS ELFCLASS32 |
70 | #ifdef __LITTLE_ENDIAN__ | 70 | #ifdef __LITTLE_ENDIAN__ |
71 | #define ELF_DATA ELFDATA2LSB | 71 | #define ELF_DATA ELFDATA2LSB |
72 | #else | 72 | #else |
73 | #define ELF_DATA ELFDATA2MSB | 73 | #define ELF_DATA ELFDATA2MSB |
74 | #endif | 74 | #endif |
75 | #define ELF_ARCH EM_SH | 75 | #define ELF_ARCH EM_SH |
76 | 76 | ||
77 | #define USE_ELF_CORE_DUMP | 77 | #define USE_ELF_CORE_DUMP |
78 | #define ELF_EXEC_PAGESIZE 4096 | 78 | #define ELF_EXEC_PAGESIZE 4096 |
79 | 79 | ||
80 | /* This is the location that an ET_DYN program is loaded if exec'ed. Typical | 80 | /* This is the location that an ET_DYN program is loaded if exec'ed. Typical |
81 | use of this is to invoke "./ld.so someprog" to test out a new version of | 81 | use of this is to invoke "./ld.so someprog" to test out a new version of |
82 | the loader. We need to make sure that it is out of the way of the program | 82 | the loader. We need to make sure that it is out of the way of the program |
83 | that it will "exec", and that there is sufficient room for the brk. */ | 83 | that it will "exec", and that there is sufficient room for the brk. */ |
84 | 84 | ||
85 | #define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) | 85 | #define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) |
86 | 86 | ||
87 | 87 | ||
88 | #define ELF_CORE_COPY_REGS(_dest,_regs) \ | 88 | #define ELF_CORE_COPY_REGS(_dest,_regs) \ |
89 | memcpy((char *) &_dest, (char *) _regs, \ | 89 | memcpy((char *) &_dest, (char *) _regs, \ |
90 | sizeof(struct pt_regs)); | 90 | sizeof(struct pt_regs)); |
91 | 91 | ||
92 | /* This yields a mask that user programs can use to figure out what | 92 | /* This yields a mask that user programs can use to figure out what |
93 | instruction set this CPU supports. This could be done in user space, | 93 | instruction set this CPU supports. This could be done in user space, |
94 | but it's not easy, and we've already done it here. */ | 94 | but it's not easy, and we've already done it here. */ |
95 | 95 | ||
96 | #define ELF_HWCAP (boot_cpu_data.flags) | 96 | #define ELF_HWCAP (boot_cpu_data.flags) |
97 | 97 | ||
98 | /* This yields a string that ld.so will use to load implementation | 98 | /* This yields a string that ld.so will use to load implementation |
99 | specific libraries for optimization. This is more specific in | 99 | specific libraries for optimization. This is more specific in |
100 | intent than poking at uname or /proc/cpuinfo. | 100 | intent than poking at uname or /proc/cpuinfo. |
101 | 101 | ||
102 | For the moment, we have only optimizations for the Intel generations, | 102 | For the moment, we have only optimizations for the Intel generations, |
103 | but that could change... */ | 103 | but that could change... */ |
104 | 104 | ||
105 | #define ELF_PLATFORM (NULL) | 105 | #define ELF_PLATFORM (NULL) |
106 | 106 | ||
107 | #define ELF_PLAT_INIT(_r, load_addr) \ | 107 | #define ELF_PLAT_INIT(_r, load_addr) \ |
108 | do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \ | 108 | do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \ |
109 | _r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \ | 109 | _r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \ |
110 | _r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \ | 110 | _r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \ |
111 | _r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; \ | 111 | _r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; \ |
112 | _r->sr = SR_FD; } while (0) | 112 | _r->sr = SR_FD; } while (0) |
113 | 113 | ||
114 | #ifdef __KERNEL__ | 114 | #ifdef __KERNEL__ |
115 | #define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT) | 115 | #define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT) |
116 | struct task_struct; | 116 | struct task_struct; |
117 | extern int dump_task_regs (struct task_struct *, elf_gregset_t *); | 117 | extern int dump_task_regs (struct task_struct *, elf_gregset_t *); |
118 | extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); | 118 | extern int dump_task_fpu (struct task_struct *, elf_fpregset_t *); |
119 | 119 | ||
120 | #define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) | 120 | #define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) |
121 | #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) | 121 | #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs) |
122 | #endif | 122 | #endif |
123 | 123 | ||
124 | #ifdef CONFIG_VSYSCALL | ||
125 | /* vDSO has arch_setup_additional_pages */ | ||
126 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES | ||
127 | struct linux_binprm; | ||
128 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, | ||
129 | int executable_stack); | ||
130 | |||
131 | extern unsigned int vdso_enabled; | ||
132 | extern void __kernel_vsyscall; | ||
133 | |||
134 | #define VDSO_BASE ((unsigned long)current->mm->context.vdso) | ||
135 | #define VDSO_SYM(x) (VDSO_BASE + (unsigned long)(x)) | ||
136 | |||
137 | #define ARCH_DLINFO \ | ||
138 | do { \ | ||
139 | if (vdso_enabled) \ | ||
140 | NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \ | ||
141 | } while (0) | ||
142 | #endif /* CONFIG_VSYSCALL */ | ||
143 | |||
124 | #endif /* __ASM_SH_ELF_H */ | 144 | #endif /* __ASM_SH_ELF_H */ |
125 | 145 |
include/asm-sh/mmu.h
1 | #ifndef __MMU_H | 1 | #ifndef __MMU_H |
2 | #define __MMU_H | 2 | #define __MMU_H |
3 | 3 | ||
4 | #if !defined(CONFIG_MMU) | 4 | #if !defined(CONFIG_MMU) |
5 | 5 | ||
6 | typedef struct { | 6 | typedef struct { |
7 | struct vm_list_struct *vmlist; | 7 | struct vm_list_struct *vmlist; |
8 | unsigned long end_brk; | 8 | unsigned long end_brk; |
9 | } mm_context_t; | 9 | } mm_context_t; |
10 | 10 | ||
11 | #else | 11 | #else |
12 | 12 | ||
13 | /* Default "unsigned long" context */ | 13 | /* Default "unsigned long" context */ |
14 | typedef unsigned long mm_context_t; | 14 | typedef unsigned long mm_context_id_t; |
15 | |||
16 | typedef struct { | ||
17 | mm_context_id_t id; | ||
18 | void *vdso; | ||
19 | } mm_context_t; | ||
15 | 20 | ||
16 | #endif /* CONFIG_MMU */ | 21 | #endif /* CONFIG_MMU */ |
17 | 22 | ||
18 | /* | 23 | /* |
19 | * Privileged Space Mapping Buffer (PMB) definitions | 24 | * Privileged Space Mapping Buffer (PMB) definitions |
20 | */ | 25 | */ |
21 | #define PMB_PASCR 0xff000070 | 26 | #define PMB_PASCR 0xff000070 |
22 | #define PMB_IRMCR 0xff000078 | 27 | #define PMB_IRMCR 0xff000078 |
23 | 28 | ||
24 | #define PMB_ADDR 0xf6100000 | 29 | #define PMB_ADDR 0xf6100000 |
25 | #define PMB_DATA 0xf7100000 | 30 | #define PMB_DATA 0xf7100000 |
26 | #define PMB_ENTRY_MAX 16 | 31 | #define PMB_ENTRY_MAX 16 |
27 | #define PMB_E_MASK 0x0000000f | 32 | #define PMB_E_MASK 0x0000000f |
28 | #define PMB_E_SHIFT 8 | 33 | #define PMB_E_SHIFT 8 |
29 | 34 | ||
30 | #define PMB_SZ_16M 0x00000000 | 35 | #define PMB_SZ_16M 0x00000000 |
31 | #define PMB_SZ_64M 0x00000010 | 36 | #define PMB_SZ_64M 0x00000010 |
32 | #define PMB_SZ_128M 0x00000080 | 37 | #define PMB_SZ_128M 0x00000080 |
33 | #define PMB_SZ_512M 0x00000090 | 38 | #define PMB_SZ_512M 0x00000090 |
34 | #define PMB_SZ_MASK PMB_SZ_512M | 39 | #define PMB_SZ_MASK PMB_SZ_512M |
35 | #define PMB_C 0x00000008 | 40 | #define PMB_C 0x00000008 |
36 | #define PMB_WT 0x00000001 | 41 | #define PMB_WT 0x00000001 |
37 | #define PMB_UB 0x00000200 | 42 | #define PMB_UB 0x00000200 |
38 | #define PMB_V 0x00000100 | 43 | #define PMB_V 0x00000100 |
39 | 44 | ||
40 | #define PMB_NO_ENTRY (-1) | 45 | #define PMB_NO_ENTRY (-1) |
41 | 46 | ||
42 | struct pmb_entry; | 47 | struct pmb_entry; |
43 | 48 | ||
44 | struct pmb_entry { | 49 | struct pmb_entry { |
45 | unsigned long vpn; | 50 | unsigned long vpn; |
46 | unsigned long ppn; | 51 | unsigned long ppn; |
47 | unsigned long flags; | 52 | unsigned long flags; |
48 | 53 | ||
49 | /* | 54 | /* |
50 | * 0 .. NR_PMB_ENTRIES for specific entry selection, or | 55 | * 0 .. NR_PMB_ENTRIES for specific entry selection, or |
51 | * PMB_NO_ENTRY to search for a free one | 56 | * PMB_NO_ENTRY to search for a free one |
52 | */ | 57 | */ |
53 | int entry; | 58 | int entry; |
54 | 59 | ||
55 | struct pmb_entry *next; | 60 | struct pmb_entry *next; |
56 | /* Adjacent entry link for contiguous multi-entry mappings */ | 61 | /* Adjacent entry link for contiguous multi-entry mappings */ |
57 | struct pmb_entry *link; | 62 | struct pmb_entry *link; |
58 | }; | 63 | }; |
59 | 64 | ||
60 | /* arch/sh/mm/pmb.c */ | 65 | /* arch/sh/mm/pmb.c */ |
61 | int __set_pmb_entry(unsigned long vpn, unsigned long ppn, | 66 | int __set_pmb_entry(unsigned long vpn, unsigned long ppn, |
62 | unsigned long flags, int *entry); | 67 | unsigned long flags, int *entry); |
63 | int set_pmb_entry(struct pmb_entry *pmbe); | 68 | int set_pmb_entry(struct pmb_entry *pmbe); |
64 | void clear_pmb_entry(struct pmb_entry *pmbe); | 69 | void clear_pmb_entry(struct pmb_entry *pmbe); |
65 | struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | 70 | struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, |
66 | unsigned long flags); | 71 | unsigned long flags); |
67 | void pmb_free(struct pmb_entry *pmbe); | 72 | void pmb_free(struct pmb_entry *pmbe); |
68 | long pmb_remap(unsigned long virt, unsigned long phys, | 73 | long pmb_remap(unsigned long virt, unsigned long phys, |
69 | unsigned long size, unsigned long flags); | 74 | unsigned long size, unsigned long flags); |
70 | void pmb_unmap(unsigned long addr); | 75 | void pmb_unmap(unsigned long addr); |
71 | 76 | ||
72 | #endif /* __MMU_H */ | 77 | #endif /* __MMU_H */ |
73 | 78 | ||
74 | 79 |
include/asm-sh/mmu_context.h
1 | /* | 1 | /* |
2 | * Copyright (C) 1999 Niibe Yutaka | 2 | * Copyright (C) 1999 Niibe Yutaka |
3 | * Copyright (C) 2003 Paul Mundt | 3 | * Copyright (C) 2003 Paul Mundt |
4 | * | 4 | * |
5 | * ASID handling idea taken from MIPS implementation. | 5 | * ASID handling idea taken from MIPS implementation. |
6 | */ | 6 | */ |
7 | #ifndef __ASM_SH_MMU_CONTEXT_H | 7 | #ifndef __ASM_SH_MMU_CONTEXT_H |
8 | #define __ASM_SH_MMU_CONTEXT_H | 8 | #define __ASM_SH_MMU_CONTEXT_H |
9 | #ifdef __KERNEL__ | 9 | #ifdef __KERNEL__ |
10 | 10 | ||
11 | #include <asm/cpu/mmu_context.h> | 11 | #include <asm/cpu/mmu_context.h> |
12 | #include <asm/tlbflush.h> | 12 | #include <asm/tlbflush.h> |
13 | #include <asm/pgalloc.h> | 13 | #include <asm/pgalloc.h> |
14 | #include <asm/uaccess.h> | 14 | #include <asm/uaccess.h> |
15 | #include <asm/io.h> | 15 | #include <asm/io.h> |
16 | 16 | ||
17 | /* | 17 | /* |
18 | * The MMU "context" consists of two things: | 18 | * The MMU "context" consists of two things: |
19 | * (a) TLB cache version (or round, cycle whatever expression you like) | 19 | * (a) TLB cache version (or round, cycle whatever expression you like) |
20 | * (b) ASID (Address Space IDentifier) | 20 | * (b) ASID (Address Space IDentifier) |
21 | */ | 21 | */ |
22 | 22 | ||
23 | /* | 23 | /* |
24 | * Cache of MMU context last used. | 24 | * Cache of MMU context last used. |
25 | */ | 25 | */ |
26 | extern unsigned long mmu_context_cache; | 26 | extern unsigned long mmu_context_cache; |
27 | 27 | ||
28 | #define MMU_CONTEXT_ASID_MASK 0x000000ff | 28 | #define MMU_CONTEXT_ASID_MASK 0x000000ff |
29 | #define MMU_CONTEXT_VERSION_MASK 0xffffff00 | 29 | #define MMU_CONTEXT_VERSION_MASK 0xffffff00 |
30 | #define MMU_CONTEXT_FIRST_VERSION 0x00000100 | 30 | #define MMU_CONTEXT_FIRST_VERSION 0x00000100 |
31 | #define NO_CONTEXT 0 | 31 | #define NO_CONTEXT 0 |
32 | 32 | ||
33 | /* ASID is 8-bit value, so it can't be 0x100 */ | 33 | /* ASID is 8-bit value, so it can't be 0x100 */ |
34 | #define MMU_NO_ASID 0x100 | 34 | #define MMU_NO_ASID 0x100 |
35 | 35 | ||
36 | /* | 36 | /* |
37 | * Virtual Page Number mask | 37 | * Virtual Page Number mask |
38 | */ | 38 | */ |
39 | #define MMU_VPN_MASK 0xfffff000 | 39 | #define MMU_VPN_MASK 0xfffff000 |
40 | 40 | ||
41 | #ifdef CONFIG_MMU | 41 | #ifdef CONFIG_MMU |
42 | /* | 42 | /* |
43 | * Get MMU context if needed. | 43 | * Get MMU context if needed. |
44 | */ | 44 | */ |
45 | static __inline__ void | 45 | static __inline__ void |
46 | get_mmu_context(struct mm_struct *mm) | 46 | get_mmu_context(struct mm_struct *mm) |
47 | { | 47 | { |
48 | extern void flush_tlb_all(void); | 48 | extern void flush_tlb_all(void); |
49 | unsigned long mc = mmu_context_cache; | 49 | unsigned long mc = mmu_context_cache; |
50 | 50 | ||
51 | /* Check if we have old version of context. */ | 51 | /* Check if we have old version of context. */ |
52 | if (((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0) | 52 | if (((mm->context.id ^ mc) & MMU_CONTEXT_VERSION_MASK) == 0) |
53 | /* It's up to date, do nothing */ | 53 | /* It's up to date, do nothing */ |
54 | return; | 54 | return; |
55 | 55 | ||
56 | /* It's old, we need to get new context with new version. */ | 56 | /* It's old, we need to get new context with new version. */ |
57 | mc = ++mmu_context_cache; | 57 | mc = ++mmu_context_cache; |
58 | if (!(mc & MMU_CONTEXT_ASID_MASK)) { | 58 | if (!(mc & MMU_CONTEXT_ASID_MASK)) { |
59 | /* | 59 | /* |
60 | * We exhaust ASID of this version. | 60 | * We exhaust ASID of this version. |
61 | * Flush all TLB and start new cycle. | 61 | * Flush all TLB and start new cycle. |
62 | */ | 62 | */ |
63 | flush_tlb_all(); | 63 | flush_tlb_all(); |
64 | /* | 64 | /* |
65 | * Fix version; Note that we avoid version #0 | 65 | * Fix version; Note that we avoid version #0 |
66 | * to distingush NO_CONTEXT. | 66 | * to distingush NO_CONTEXT. |
67 | */ | 67 | */ |
68 | if (!mc) | 68 | if (!mc) |
69 | mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION; | 69 | mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION; |
70 | } | 70 | } |
71 | mm->context = mc; | 71 | mm->context.id = mc; |
72 | } | 72 | } |
73 | 73 | ||
74 | /* | 74 | /* |
75 | * Initialize the context related info for a new mm_struct | 75 | * Initialize the context related info for a new mm_struct |
76 | * instance. | 76 | * instance. |
77 | */ | 77 | */ |
78 | static __inline__ int init_new_context(struct task_struct *tsk, | 78 | static __inline__ int init_new_context(struct task_struct *tsk, |
79 | struct mm_struct *mm) | 79 | struct mm_struct *mm) |
80 | { | 80 | { |
81 | mm->context = NO_CONTEXT; | 81 | mm->context.id = NO_CONTEXT; |
82 | 82 | ||
83 | return 0; | 83 | return 0; |
84 | } | 84 | } |
85 | 85 | ||
86 | /* | 86 | /* |
87 | * Destroy context related info for an mm_struct that is about | 87 | * Destroy context related info for an mm_struct that is about |
88 | * to be put to rest. | 88 | * to be put to rest. |
89 | */ | 89 | */ |
90 | static __inline__ void destroy_context(struct mm_struct *mm) | 90 | static __inline__ void destroy_context(struct mm_struct *mm) |
91 | { | 91 | { |
92 | /* Do nothing */ | 92 | /* Do nothing */ |
93 | } | 93 | } |
94 | 94 | ||
95 | static __inline__ void set_asid(unsigned long asid) | 95 | static __inline__ void set_asid(unsigned long asid) |
96 | { | 96 | { |
97 | unsigned long __dummy; | 97 | unsigned long __dummy; |
98 | 98 | ||
99 | __asm__ __volatile__ ("mov.l %2, %0\n\t" | 99 | __asm__ __volatile__ ("mov.l %2, %0\n\t" |
100 | "and %3, %0\n\t" | 100 | "and %3, %0\n\t" |
101 | "or %1, %0\n\t" | 101 | "or %1, %0\n\t" |
102 | "mov.l %0, %2" | 102 | "mov.l %0, %2" |
103 | : "=&r" (__dummy) | 103 | : "=&r" (__dummy) |
104 | : "r" (asid), "m" (__m(MMU_PTEH)), | 104 | : "r" (asid), "m" (__m(MMU_PTEH)), |
105 | "r" (0xffffff00)); | 105 | "r" (0xffffff00)); |
106 | } | 106 | } |
107 | 107 | ||
108 | static __inline__ unsigned long get_asid(void) | 108 | static __inline__ unsigned long get_asid(void) |
109 | { | 109 | { |
110 | unsigned long asid; | 110 | unsigned long asid; |
111 | 111 | ||
112 | __asm__ __volatile__ ("mov.l %1, %0" | 112 | __asm__ __volatile__ ("mov.l %1, %0" |
113 | : "=r" (asid) | 113 | : "=r" (asid) |
114 | : "m" (__m(MMU_PTEH))); | 114 | : "m" (__m(MMU_PTEH))); |
115 | asid &= MMU_CONTEXT_ASID_MASK; | 115 | asid &= MMU_CONTEXT_ASID_MASK; |
116 | return asid; | 116 | return asid; |
117 | } | 117 | } |
118 | 118 | ||
119 | /* | 119 | /* |
120 | * After we have set current->mm to a new value, this activates | 120 | * After we have set current->mm to a new value, this activates |
121 | * the context for the new mm so we see the new mappings. | 121 | * the context for the new mm so we see the new mappings. |
122 | */ | 122 | */ |
123 | static __inline__ void activate_context(struct mm_struct *mm) | 123 | static __inline__ void activate_context(struct mm_struct *mm) |
124 | { | 124 | { |
125 | get_mmu_context(mm); | 125 | get_mmu_context(mm); |
126 | set_asid(mm->context & MMU_CONTEXT_ASID_MASK); | 126 | set_asid(mm->context.id & MMU_CONTEXT_ASID_MASK); |
127 | } | 127 | } |
128 | 128 | ||
129 | /* MMU_TTB can be used for optimizing the fault handling. | 129 | /* MMU_TTB can be used for optimizing the fault handling. |
130 | (Currently not used) */ | 130 | (Currently not used) */ |
131 | static __inline__ void switch_mm(struct mm_struct *prev, | 131 | static __inline__ void switch_mm(struct mm_struct *prev, |
132 | struct mm_struct *next, | 132 | struct mm_struct *next, |
133 | struct task_struct *tsk) | 133 | struct task_struct *tsk) |
134 | { | 134 | { |
135 | if (likely(prev != next)) { | 135 | if (likely(prev != next)) { |
136 | unsigned long __pgdir = (unsigned long)next->pgd; | 136 | unsigned long __pgdir = (unsigned long)next->pgd; |
137 | 137 | ||
138 | __asm__ __volatile__("mov.l %0, %1" | 138 | __asm__ __volatile__("mov.l %0, %1" |
139 | : /* no output */ | 139 | : /* no output */ |
140 | : "r" (__pgdir), "m" (__m(MMU_TTB))); | 140 | : "r" (__pgdir), "m" (__m(MMU_TTB))); |
141 | activate_context(next); | 141 | activate_context(next); |
142 | } | 142 | } |
143 | } | 143 | } |
144 | 144 | ||
145 | #define deactivate_mm(tsk,mm) do { } while (0) | 145 | #define deactivate_mm(tsk,mm) do { } while (0) |
146 | 146 | ||
147 | #define activate_mm(prev, next) \ | 147 | #define activate_mm(prev, next) \ |
148 | switch_mm((prev),(next),NULL) | 148 | switch_mm((prev),(next),NULL) |
149 | 149 | ||
150 | static __inline__ void | 150 | static __inline__ void |
151 | enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 151 | enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
152 | { | 152 | { |
153 | } | 153 | } |
154 | #else /* !CONFIG_MMU */ | 154 | #else /* !CONFIG_MMU */ |
155 | #define get_mmu_context(mm) do { } while (0) | 155 | #define get_mmu_context(mm) do { } while (0) |
156 | #define init_new_context(tsk,mm) (0) | 156 | #define init_new_context(tsk,mm) (0) |
157 | #define destroy_context(mm) do { } while (0) | 157 | #define destroy_context(mm) do { } while (0) |
158 | #define set_asid(asid) do { } while (0) | 158 | #define set_asid(asid) do { } while (0) |
159 | #define get_asid() (0) | 159 | #define get_asid() (0) |
160 | #define activate_context(mm) do { } while (0) | 160 | #define activate_context(mm) do { } while (0) |
161 | #define switch_mm(prev,next,tsk) do { } while (0) | 161 | #define switch_mm(prev,next,tsk) do { } while (0) |
162 | #define deactivate_mm(tsk,mm) do { } while (0) | 162 | #define deactivate_mm(tsk,mm) do { } while (0) |
163 | #define activate_mm(prev,next) do { } while (0) | 163 | #define activate_mm(prev,next) do { } while (0) |
164 | #define enter_lazy_tlb(mm,tsk) do { } while (0) | 164 | #define enter_lazy_tlb(mm,tsk) do { } while (0) |
165 | #endif /* CONFIG_MMU */ | 165 | #endif /* CONFIG_MMU */ |
166 | 166 | ||
167 | #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) | 167 | #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) |
168 | /* | 168 | /* |
169 | * If this processor has an MMU, we need methods to turn it off/on .. | 169 | * If this processor has an MMU, we need methods to turn it off/on .. |
170 | * paging_init() will also have to be updated for the processor in | 170 | * paging_init() will also have to be updated for the processor in |
171 | * question. | 171 | * question. |
172 | */ | 172 | */ |
173 | static inline void enable_mmu(void) | 173 | static inline void enable_mmu(void) |
174 | { | 174 | { |
175 | /* Enable MMU */ | 175 | /* Enable MMU */ |
176 | ctrl_outl(MMU_CONTROL_INIT, MMUCR); | 176 | ctrl_outl(MMU_CONTROL_INIT, MMUCR); |
177 | ctrl_barrier(); | 177 | ctrl_barrier(); |
178 | 178 | ||
179 | if (mmu_context_cache == NO_CONTEXT) | 179 | if (mmu_context_cache == NO_CONTEXT) |
180 | mmu_context_cache = MMU_CONTEXT_FIRST_VERSION; | 180 | mmu_context_cache = MMU_CONTEXT_FIRST_VERSION; |
181 | 181 | ||
182 | set_asid(mmu_context_cache & MMU_CONTEXT_ASID_MASK); | 182 | set_asid(mmu_context_cache & MMU_CONTEXT_ASID_MASK); |
183 | } | 183 | } |
184 | 184 | ||
185 | static inline void disable_mmu(void) | 185 | static inline void disable_mmu(void) |
186 | { | 186 | { |
187 | unsigned long cr; | 187 | unsigned long cr; |
188 | 188 | ||
189 | cr = ctrl_inl(MMUCR); | 189 | cr = ctrl_inl(MMUCR); |
190 | cr &= ~MMU_CONTROL_INIT; | 190 | cr &= ~MMU_CONTROL_INIT; |
191 | ctrl_outl(cr, MMUCR); | 191 | ctrl_outl(cr, MMUCR); |
192 | 192 | ||
193 | ctrl_barrier(); | 193 | ctrl_barrier(); |
194 | } | 194 | } |
195 | #else | 195 | #else |
196 | /* | 196 | /* |
197 | * MMU control handlers for processors lacking memory | 197 | * MMU control handlers for processors lacking memory |
198 | * management hardware. | 198 | * management hardware. |
199 | */ | 199 | */ |
200 | #define enable_mmu() do { BUG(); } while (0) | 200 | #define enable_mmu() do { BUG(); } while (0) |
201 | #define disable_mmu() do { BUG(); } while (0) | 201 | #define disable_mmu() do { BUG(); } while (0) |
202 | #endif | 202 | #endif |
203 | 203 | ||
204 | #endif /* __KERNEL__ */ | 204 | #endif /* __KERNEL__ */ |
205 | #endif /* __ASM_SH_MMU_CONTEXT_H */ | 205 | #endif /* __ASM_SH_MMU_CONTEXT_H */ |
206 | 206 |
include/asm-sh/page.h
1 | #ifndef __ASM_SH_PAGE_H | 1 | #ifndef __ASM_SH_PAGE_H |
2 | #define __ASM_SH_PAGE_H | 2 | #define __ASM_SH_PAGE_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Copyright (C) 1999 Niibe Yutaka | 5 | * Copyright (C) 1999 Niibe Yutaka |
6 | */ | 6 | */ |
7 | 7 | ||
8 | /* | 8 | /* |
9 | [ P0/U0 (virtual) ] 0x00000000 <------ User space | 9 | [ P0/U0 (virtual) ] 0x00000000 <------ User space |
10 | [ P1 (fixed) cached ] 0x80000000 <------ Kernel space | 10 | [ P1 (fixed) cached ] 0x80000000 <------ Kernel space |
11 | [ P2 (fixed) non-cachable] 0xA0000000 <------ Physical access | 11 | [ P2 (fixed) non-cachable] 0xA0000000 <------ Physical access |
12 | [ P3 (virtual) cached] 0xC0000000 <------ vmalloced area | 12 | [ P3 (virtual) cached] 0xC0000000 <------ vmalloced area |
13 | [ P4 control ] 0xE0000000 | 13 | [ P4 control ] 0xE0000000 |
14 | */ | 14 | */ |
15 | 15 | ||
16 | 16 | ||
17 | /* PAGE_SHIFT determines the page size */ | 17 | /* PAGE_SHIFT determines the page size */ |
18 | #define PAGE_SHIFT 12 | 18 | #define PAGE_SHIFT 12 |
19 | 19 | ||
20 | #ifdef __ASSEMBLY__ | 20 | #ifdef __ASSEMBLY__ |
21 | #define PAGE_SIZE (1 << PAGE_SHIFT) | 21 | #define PAGE_SIZE (1 << PAGE_SHIFT) |
22 | #else | 22 | #else |
23 | #define PAGE_SIZE (1UL << PAGE_SHIFT) | 23 | #define PAGE_SIZE (1UL << PAGE_SHIFT) |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | #define PAGE_MASK (~(PAGE_SIZE-1)) | 26 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
27 | #define PTE_MASK PAGE_MASK | 27 | #define PTE_MASK PAGE_MASK |
28 | 28 | ||
29 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | 29 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) |
30 | #define HPAGE_SHIFT 16 | 30 | #define HPAGE_SHIFT 16 |
31 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) | 31 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) |
32 | #define HPAGE_SHIFT 20 | 32 | #define HPAGE_SHIFT 20 |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | #ifdef CONFIG_HUGETLB_PAGE | 35 | #ifdef CONFIG_HUGETLB_PAGE |
36 | #define HPAGE_SIZE (1UL << HPAGE_SHIFT) | 36 | #define HPAGE_SIZE (1UL << HPAGE_SHIFT) |
37 | #define HPAGE_MASK (~(HPAGE_SIZE-1)) | 37 | #define HPAGE_MASK (~(HPAGE_SIZE-1)) |
38 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT) | 38 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT) |
39 | #endif | 39 | #endif |
40 | 40 | ||
41 | #ifdef __KERNEL__ | 41 | #ifdef __KERNEL__ |
42 | #ifndef __ASSEMBLY__ | 42 | #ifndef __ASSEMBLY__ |
43 | 43 | ||
44 | extern void (*clear_page)(void *to); | 44 | extern void (*clear_page)(void *to); |
45 | extern void (*copy_page)(void *to, void *from); | 45 | extern void (*copy_page)(void *to, void *from); |
46 | 46 | ||
47 | #ifdef CONFIG_MMU | 47 | #ifdef CONFIG_MMU |
48 | extern void clear_page_slow(void *to); | 48 | extern void clear_page_slow(void *to); |
49 | extern void copy_page_slow(void *to, void *from); | 49 | extern void copy_page_slow(void *to, void *from); |
50 | #else | 50 | #else |
51 | extern void clear_page_nommu(void *to); | 51 | extern void clear_page_nommu(void *to); |
52 | extern void copy_page_nommu(void *to, void *from); | 52 | extern void copy_page_nommu(void *to, void *from); |
53 | #endif | 53 | #endif |
54 | 54 | ||
55 | #if defined(CONFIG_MMU) && (defined(CONFIG_CPU_SH4) || \ | 55 | #if defined(CONFIG_MMU) && (defined(CONFIG_CPU_SH4) || \ |
56 | defined(CONFIG_SH7705_CACHE_32KB)) | 56 | defined(CONFIG_SH7705_CACHE_32KB)) |
57 | struct page; | 57 | struct page; |
58 | extern void clear_user_page(void *to, unsigned long address, struct page *pg); | 58 | extern void clear_user_page(void *to, unsigned long address, struct page *pg); |
59 | extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg); | 59 | extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg); |
60 | extern void __clear_user_page(void *to, void *orig_to); | 60 | extern void __clear_user_page(void *to, void *orig_to); |
61 | extern void __copy_user_page(void *to, void *from, void *orig_to); | 61 | extern void __copy_user_page(void *to, void *from, void *orig_to); |
62 | #elif defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH3) || !defined(CONFIG_MMU) | 62 | #elif defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH3) || !defined(CONFIG_MMU) |
63 | #define clear_user_page(page, vaddr, pg) clear_page(page) | 63 | #define clear_user_page(page, vaddr, pg) clear_page(page) |
64 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) | 64 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) |
65 | #endif | 65 | #endif |
66 | 66 | ||
67 | /* | 67 | /* |
68 | * These are used to make use of C type-checking.. | 68 | * These are used to make use of C type-checking.. |
69 | */ | 69 | */ |
70 | typedef struct { unsigned long pte; } pte_t; | 70 | typedef struct { unsigned long pte; } pte_t; |
71 | typedef struct { unsigned long pgd; } pgd_t; | 71 | typedef struct { unsigned long pgd; } pgd_t; |
72 | typedef struct { unsigned long pgprot; } pgprot_t; | 72 | typedef struct { unsigned long pgprot; } pgprot_t; |
73 | 73 | ||
74 | #define pte_val(x) ((x).pte) | 74 | #define pte_val(x) ((x).pte) |
75 | #define pgd_val(x) ((x).pgd) | 75 | #define pgd_val(x) ((x).pgd) |
76 | #define pgprot_val(x) ((x).pgprot) | 76 | #define pgprot_val(x) ((x).pgprot) |
77 | 77 | ||
78 | #define __pte(x) ((pte_t) { (x) } ) | 78 | #define __pte(x) ((pte_t) { (x) } ) |
79 | #define __pgd(x) ((pgd_t) { (x) } ) | 79 | #define __pgd(x) ((pgd_t) { (x) } ) |
80 | #define __pgprot(x) ((pgprot_t) { (x) } ) | 80 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
81 | 81 | ||
82 | #endif /* !__ASSEMBLY__ */ | 82 | #endif /* !__ASSEMBLY__ */ |
83 | 83 | ||
84 | /* to align the pointer to the (next) page boundary */ | 84 | /* to align the pointer to the (next) page boundary */ |
85 | #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) | 85 | #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) |
86 | 86 | ||
87 | /* | 87 | /* |
88 | * IF YOU CHANGE THIS, PLEASE ALSO CHANGE | 88 | * IF YOU CHANGE THIS, PLEASE ALSO CHANGE |
89 | * | 89 | * |
90 | * arch/sh/kernel/vmlinux.lds.S | 90 | * arch/sh/kernel/vmlinux.lds.S |
91 | * | 91 | * |
92 | * which has the same constant encoded.. | 92 | * which has the same constant encoded.. |
93 | */ | 93 | */ |
94 | 94 | ||
95 | #define __MEMORY_START CONFIG_MEMORY_START | 95 | #define __MEMORY_START CONFIG_MEMORY_START |
96 | #define __MEMORY_SIZE CONFIG_MEMORY_SIZE | 96 | #define __MEMORY_SIZE CONFIG_MEMORY_SIZE |
97 | 97 | ||
98 | #define PAGE_OFFSET CONFIG_PAGE_OFFSET | 98 | #define PAGE_OFFSET CONFIG_PAGE_OFFSET |
99 | #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) | 99 | #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) |
100 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) | 100 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) |
101 | 101 | ||
102 | #define MAP_NR(addr) (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT) | 102 | #define MAP_NR(addr) (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT) |
103 | 103 | ||
104 | #define phys_to_page(phys) (mem_map + (((phys)-__MEMORY_START) >> PAGE_SHIFT)) | 104 | #define phys_to_page(phys) (mem_map + (((phys)-__MEMORY_START) >> PAGE_SHIFT)) |
105 | #define page_to_phys(page) (((page - mem_map) << PAGE_SHIFT) + __MEMORY_START) | 105 | #define page_to_phys(page) (((page - mem_map) << PAGE_SHIFT) + __MEMORY_START) |
106 | 106 | ||
107 | /* PFN start number, because of __MEMORY_START */ | 107 | /* PFN start number, because of __MEMORY_START */ |
108 | #define PFN_START (__MEMORY_START >> PAGE_SHIFT) | 108 | #define PFN_START (__MEMORY_START >> PAGE_SHIFT) |
109 | #define ARCH_PFN_OFFSET (PFN_START) | 109 | #define ARCH_PFN_OFFSET (PFN_START) |
110 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | 110 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
111 | #define pfn_valid(pfn) (((pfn) - PFN_START) < max_mapnr) | 111 | #define pfn_valid(pfn) (((pfn) - PFN_START) < max_mapnr) |
112 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) | 112 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
113 | 113 | ||
114 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ | 114 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ |
115 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | 115 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
116 | 116 | ||
117 | #include <asm-generic/memory_model.h> | 117 | #include <asm-generic/memory_model.h> |
118 | #include <asm-generic/page.h> | 118 | #include <asm-generic/page.h> |
119 | 119 | ||
120 | /* vDSO support */ | ||
121 | #ifdef CONFIG_VSYSCALL | ||
122 | #define __HAVE_ARCH_GATE_AREA | ||
123 | #endif | ||
124 | |||
120 | #endif /* __KERNEL__ */ | 125 | #endif /* __KERNEL__ */ |
121 | #endif /* __ASM_SH_PAGE_H */ | 126 | #endif /* __ASM_SH_PAGE_H */ |
122 | 127 |
include/asm-sh/processor.h
1 | /* | 1 | /* |
2 | * include/asm-sh/processor.h | 2 | * include/asm-sh/processor.h |
3 | * | 3 | * |
4 | * Copyright (C) 1999, 2000 Niibe Yutaka | 4 | * Copyright (C) 1999, 2000 Niibe Yutaka |
5 | * Copyright (C) 2002, 2003 Paul Mundt | 5 | * Copyright (C) 2002, 2003 Paul Mundt |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #ifndef __ASM_SH_PROCESSOR_H | 8 | #ifndef __ASM_SH_PROCESSOR_H |
9 | #define __ASM_SH_PROCESSOR_H | 9 | #define __ASM_SH_PROCESSOR_H |
10 | #ifdef __KERNEL__ | 10 | #ifdef __KERNEL__ |
11 | 11 | ||
12 | #include <linux/compiler.h> | 12 | #include <linux/compiler.h> |
13 | #include <asm/page.h> | 13 | #include <asm/page.h> |
14 | #include <asm/types.h> | 14 | #include <asm/types.h> |
15 | #include <asm/cache.h> | 15 | #include <asm/cache.h> |
16 | #include <asm/ptrace.h> | 16 | #include <asm/ptrace.h> |
17 | #include <asm/cpu-features.h> | 17 | #include <asm/cpu-features.h> |
18 | 18 | ||
19 | /* | 19 | /* |
20 | * Default implementation of macro that returns current | 20 | * Default implementation of macro that returns current |
21 | * instruction pointer ("program counter"). | 21 | * instruction pointer ("program counter"). |
22 | */ | 22 | */ |
23 | #define current_text_addr() ({ void *pc; __asm__("mova 1f, %0\n1:":"=z" (pc)); pc; }) | 23 | #define current_text_addr() ({ void *pc; __asm__("mova 1f, %0\n1:":"=z" (pc)); pc; }) |
24 | 24 | ||
25 | /* Core Processor Version Register */ | 25 | /* Core Processor Version Register */ |
26 | #define CCN_PVR 0xff000030 | 26 | #define CCN_PVR 0xff000030 |
27 | #define CCN_CVR 0xff000040 | 27 | #define CCN_CVR 0xff000040 |
28 | #define CCN_PRR 0xff000044 | 28 | #define CCN_PRR 0xff000044 |
29 | 29 | ||
30 | /* | 30 | /* |
31 | * CPU type and hardware bug flags. Kept separately for each CPU. | 31 | * CPU type and hardware bug flags. Kept separately for each CPU. |
32 | * | 32 | * |
33 | * Each one of these also needs a CONFIG_CPU_SUBTYPE_xxx entry | 33 | * Each one of these also needs a CONFIG_CPU_SUBTYPE_xxx entry |
34 | * in arch/sh/mm/Kconfig, as well as an entry in arch/sh/kernel/setup.c | 34 | * in arch/sh/mm/Kconfig, as well as an entry in arch/sh/kernel/setup.c |
35 | * for parsing the subtype in get_cpu_subtype(). | 35 | * for parsing the subtype in get_cpu_subtype(). |
36 | */ | 36 | */ |
37 | enum cpu_type { | 37 | enum cpu_type { |
38 | /* SH-2 types */ | 38 | /* SH-2 types */ |
39 | CPU_SH7604, | 39 | CPU_SH7604, |
40 | 40 | ||
41 | /* SH-3 types */ | 41 | /* SH-3 types */ |
42 | CPU_SH7705, CPU_SH7706, CPU_SH7707, | 42 | CPU_SH7705, CPU_SH7706, CPU_SH7707, |
43 | CPU_SH7708, CPU_SH7708S, CPU_SH7708R, | 43 | CPU_SH7708, CPU_SH7708S, CPU_SH7708R, |
44 | CPU_SH7709, CPU_SH7709A, CPU_SH7710, | 44 | CPU_SH7709, CPU_SH7709A, CPU_SH7710, |
45 | CPU_SH7729, CPU_SH7300, | 45 | CPU_SH7729, CPU_SH7300, |
46 | 46 | ||
47 | /* SH-4 types */ | 47 | /* SH-4 types */ |
48 | CPU_SH7750, CPU_SH7750S, CPU_SH7750R, CPU_SH7751, CPU_SH7751R, | 48 | CPU_SH7750, CPU_SH7750S, CPU_SH7750R, CPU_SH7751, CPU_SH7751R, |
49 | CPU_SH7760, CPU_ST40RA, CPU_ST40GX1, CPU_SH4_202, CPU_SH4_501, | 49 | CPU_SH7760, CPU_ST40RA, CPU_ST40GX1, CPU_SH4_202, CPU_SH4_501, |
50 | CPU_SH73180, CPU_SH7343, CPU_SH7770, CPU_SH7780, CPU_SH7781, | 50 | CPU_SH73180, CPU_SH7343, CPU_SH7770, CPU_SH7780, CPU_SH7781, |
51 | 51 | ||
52 | /* Unknown subtype */ | 52 | /* Unknown subtype */ |
53 | CPU_SH_NONE | 53 | CPU_SH_NONE |
54 | }; | 54 | }; |
55 | 55 | ||
56 | struct sh_cpuinfo { | 56 | struct sh_cpuinfo { |
57 | unsigned int type; | 57 | unsigned int type; |
58 | unsigned long loops_per_jiffy; | 58 | unsigned long loops_per_jiffy; |
59 | 59 | ||
60 | struct cache_info icache; /* Primary I-cache */ | 60 | struct cache_info icache; /* Primary I-cache */ |
61 | struct cache_info dcache; /* Primary D-cache */ | 61 | struct cache_info dcache; /* Primary D-cache */ |
62 | struct cache_info scache; /* Secondary cache */ | 62 | struct cache_info scache; /* Secondary cache */ |
63 | 63 | ||
64 | unsigned long flags; | 64 | unsigned long flags; |
65 | } __attribute__ ((aligned(SMP_CACHE_BYTES))); | 65 | } __attribute__ ((aligned(SMP_CACHE_BYTES))); |
66 | 66 | ||
67 | extern struct sh_cpuinfo boot_cpu_data; | 67 | extern struct sh_cpuinfo boot_cpu_data; |
68 | 68 | ||
69 | #ifdef CONFIG_SMP | 69 | #ifdef CONFIG_SMP |
70 | extern struct sh_cpuinfo cpu_data[]; | 70 | extern struct sh_cpuinfo cpu_data[]; |
71 | #define current_cpu_data cpu_data[smp_processor_id()] | 71 | #define current_cpu_data cpu_data[smp_processor_id()] |
72 | #else | 72 | #else |
73 | #define cpu_data (&boot_cpu_data) | 73 | #define cpu_data (&boot_cpu_data) |
74 | #define current_cpu_data boot_cpu_data | 74 | #define current_cpu_data boot_cpu_data |
75 | #endif | 75 | #endif |
76 | 76 | ||
77 | /* | 77 | /* |
78 | * User space process size: 2GB. | 78 | * User space process size: 2GB. |
79 | * | 79 | * |
80 | * Since SH7709 and SH7750 have "area 7", we can't use 0x7c000000--0x7fffffff | 80 | * Since SH7709 and SH7750 have "area 7", we can't use 0x7c000000--0x7fffffff |
81 | */ | 81 | */ |
82 | #define TASK_SIZE 0x7c000000UL | 82 | #define TASK_SIZE 0x7c000000UL |
83 | 83 | ||
84 | /* This decides where the kernel will search for a free chunk of vm | 84 | /* This decides where the kernel will search for a free chunk of vm |
85 | * space during mmap's. | 85 | * space during mmap's. |
86 | */ | 86 | */ |
87 | #define TASK_UNMAPPED_BASE (TASK_SIZE / 3) | 87 | #define TASK_UNMAPPED_BASE (TASK_SIZE / 3) |
88 | 88 | ||
89 | /* | 89 | /* |
90 | * Bit of SR register | 90 | * Bit of SR register |
91 | * | 91 | * |
92 | * FD-bit: | 92 | * FD-bit: |
93 | * When it's set, it means the processor doesn't have right to use FPU, | 93 | * When it's set, it means the processor doesn't have right to use FPU, |
94 | * and it results exception when the floating operation is executed. | 94 | * and it results exception when the floating operation is executed. |
95 | * | 95 | * |
96 | * IMASK-bit: | 96 | * IMASK-bit: |
97 | * Interrupt level mask | 97 | * Interrupt level mask |
98 | */ | 98 | */ |
99 | #define SR_FD 0x00008000 | 99 | #define SR_FD 0x00008000 |
100 | #define SR_DSP 0x00001000 | 100 | #define SR_DSP 0x00001000 |
101 | #define SR_IMASK 0x000000f0 | 101 | #define SR_IMASK 0x000000f0 |
102 | 102 | ||
103 | /* | 103 | /* |
104 | * FPU structure and data | 104 | * FPU structure and data |
105 | */ | 105 | */ |
106 | 106 | ||
107 | struct sh_fpu_hard_struct { | 107 | struct sh_fpu_hard_struct { |
108 | unsigned long fp_regs[16]; | 108 | unsigned long fp_regs[16]; |
109 | unsigned long xfp_regs[16]; | 109 | unsigned long xfp_regs[16]; |
110 | unsigned long fpscr; | 110 | unsigned long fpscr; |
111 | unsigned long fpul; | 111 | unsigned long fpul; |
112 | 112 | ||
113 | long status; /* software status information */ | 113 | long status; /* software status information */ |
114 | }; | 114 | }; |
115 | 115 | ||
116 | /* Dummy fpu emulator */ | 116 | /* Dummy fpu emulator */ |
117 | struct sh_fpu_soft_struct { | 117 | struct sh_fpu_soft_struct { |
118 | unsigned long fp_regs[16]; | 118 | unsigned long fp_regs[16]; |
119 | unsigned long xfp_regs[16]; | 119 | unsigned long xfp_regs[16]; |
120 | unsigned long fpscr; | 120 | unsigned long fpscr; |
121 | unsigned long fpul; | 121 | unsigned long fpul; |
122 | 122 | ||
123 | unsigned char lookahead; | 123 | unsigned char lookahead; |
124 | unsigned long entry_pc; | 124 | unsigned long entry_pc; |
125 | }; | 125 | }; |
126 | 126 | ||
127 | union sh_fpu_union { | 127 | union sh_fpu_union { |
128 | struct sh_fpu_hard_struct hard; | 128 | struct sh_fpu_hard_struct hard; |
129 | struct sh_fpu_soft_struct soft; | 129 | struct sh_fpu_soft_struct soft; |
130 | }; | 130 | }; |
131 | 131 | ||
132 | struct thread_struct { | 132 | struct thread_struct { |
133 | unsigned long sp; | 133 | unsigned long sp; |
134 | unsigned long pc; | 134 | unsigned long pc; |
135 | 135 | ||
136 | unsigned long trap_no, error_code; | 136 | unsigned long trap_no, error_code; |
137 | unsigned long address; | 137 | unsigned long address; |
138 | /* Hardware debugging registers may come here */ | 138 | /* Hardware debugging registers may come here */ |
139 | unsigned long ubc_pc; | 139 | unsigned long ubc_pc; |
140 | 140 | ||
141 | /* floating point info */ | 141 | /* floating point info */ |
142 | union sh_fpu_union fpu; | 142 | union sh_fpu_union fpu; |
143 | }; | 143 | }; |
144 | 144 | ||
145 | typedef struct { | 145 | typedef struct { |
146 | unsigned long seg; | 146 | unsigned long seg; |
147 | } mm_segment_t; | 147 | } mm_segment_t; |
148 | 148 | ||
149 | /* Count of active tasks with UBC settings */ | 149 | /* Count of active tasks with UBC settings */ |
150 | extern int ubc_usercnt; | 150 | extern int ubc_usercnt; |
151 | 151 | ||
152 | #define INIT_THREAD { \ | 152 | #define INIT_THREAD { \ |
153 | sizeof(init_stack) + (long) &init_stack, /* sp */ \ | 153 | sizeof(init_stack) + (long) &init_stack, /* sp */ \ |
154 | 0, /* pc */ \ | 154 | 0, /* pc */ \ |
155 | 0, 0, \ | 155 | 0, 0, \ |
156 | 0, \ | 156 | 0, \ |
157 | 0, \ | 157 | 0, \ |
158 | {{{0,}},} /* fpu state */ \ | 158 | {{{0,}},} /* fpu state */ \ |
159 | } | 159 | } |
160 | 160 | ||
161 | /* | 161 | /* |
162 | * Do necessary setup to start up a newly executed thread. | 162 | * Do necessary setup to start up a newly executed thread. |
163 | */ | 163 | */ |
164 | #define start_thread(regs, new_pc, new_sp) \ | 164 | #define start_thread(regs, new_pc, new_sp) \ |
165 | set_fs(USER_DS); \ | 165 | set_fs(USER_DS); \ |
166 | regs->pr = 0; \ | 166 | regs->pr = 0; \ |
167 | regs->sr = SR_FD; /* User mode. */ \ | 167 | regs->sr = SR_FD; /* User mode. */ \ |
168 | regs->pc = new_pc; \ | 168 | regs->pc = new_pc; \ |
169 | regs->regs[15] = new_sp | 169 | regs->regs[15] = new_sp |
170 | 170 | ||
171 | /* Forward declaration, a strange C thing */ | 171 | /* Forward declaration, a strange C thing */ |
172 | struct task_struct; | 172 | struct task_struct; |
173 | struct mm_struct; | 173 | struct mm_struct; |
174 | 174 | ||
175 | /* Free all resources held by a thread. */ | 175 | /* Free all resources held by a thread. */ |
176 | extern void release_thread(struct task_struct *); | 176 | extern void release_thread(struct task_struct *); |
177 | 177 | ||
178 | /* Prepare to copy thread state - unlazy all lazy status */ | 178 | /* Prepare to copy thread state - unlazy all lazy status */ |
179 | #define prepare_to_copy(tsk) do { } while (0) | 179 | #define prepare_to_copy(tsk) do { } while (0) |
180 | 180 | ||
181 | /* | 181 | /* |
182 | * create a kernel thread without removing it from tasklists | 182 | * create a kernel thread without removing it from tasklists |
183 | */ | 183 | */ |
184 | extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); | 184 | extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); |
185 | 185 | ||
186 | /* Copy and release all segment info associated with a VM */ | 186 | /* Copy and release all segment info associated with a VM */ |
187 | #define copy_segments(p, mm) do { } while(0) | 187 | #define copy_segments(p, mm) do { } while(0) |
188 | #define release_segments(mm) do { } while(0) | 188 | #define release_segments(mm) do { } while(0) |
189 | 189 | ||
190 | /* | 190 | /* |
191 | * FPU lazy state save handling. | 191 | * FPU lazy state save handling. |
192 | */ | 192 | */ |
193 | 193 | ||
194 | static __inline__ void disable_fpu(void) | 194 | static __inline__ void disable_fpu(void) |
195 | { | 195 | { |
196 | unsigned long __dummy; | 196 | unsigned long __dummy; |
197 | 197 | ||
198 | /* Set FD flag in SR */ | 198 | /* Set FD flag in SR */ |
199 | __asm__ __volatile__("stc sr, %0\n\t" | 199 | __asm__ __volatile__("stc sr, %0\n\t" |
200 | "or %1, %0\n\t" | 200 | "or %1, %0\n\t" |
201 | "ldc %0, sr" | 201 | "ldc %0, sr" |
202 | : "=&r" (__dummy) | 202 | : "=&r" (__dummy) |
203 | : "r" (SR_FD)); | 203 | : "r" (SR_FD)); |
204 | } | 204 | } |
205 | 205 | ||
206 | static __inline__ void enable_fpu(void) | 206 | static __inline__ void enable_fpu(void) |
207 | { | 207 | { |
208 | unsigned long __dummy; | 208 | unsigned long __dummy; |
209 | 209 | ||
210 | /* Clear out FD flag in SR */ | 210 | /* Clear out FD flag in SR */ |
211 | __asm__ __volatile__("stc sr, %0\n\t" | 211 | __asm__ __volatile__("stc sr, %0\n\t" |
212 | "and %1, %0\n\t" | 212 | "and %1, %0\n\t" |
213 | "ldc %0, sr" | 213 | "ldc %0, sr" |
214 | : "=&r" (__dummy) | 214 | : "=&r" (__dummy) |
215 | : "r" (~SR_FD)); | 215 | : "r" (~SR_FD)); |
216 | } | 216 | } |
217 | 217 | ||
218 | static __inline__ void release_fpu(struct pt_regs *regs) | 218 | static __inline__ void release_fpu(struct pt_regs *regs) |
219 | { | 219 | { |
220 | regs->sr |= SR_FD; | 220 | regs->sr |= SR_FD; |
221 | } | 221 | } |
222 | 222 | ||
223 | static __inline__ void grab_fpu(struct pt_regs *regs) | 223 | static __inline__ void grab_fpu(struct pt_regs *regs) |
224 | { | 224 | { |
225 | regs->sr &= ~SR_FD; | 225 | regs->sr &= ~SR_FD; |
226 | } | 226 | } |
227 | 227 | ||
228 | #ifdef CONFIG_CPU_SH4 | 228 | #ifdef CONFIG_CPU_SH4 |
229 | extern void save_fpu(struct task_struct *__tsk, struct pt_regs *regs); | 229 | extern void save_fpu(struct task_struct *__tsk, struct pt_regs *regs); |
230 | #else | 230 | #else |
231 | #define save_fpu(tsk) do { } while (0) | 231 | #define save_fpu(tsk) do { } while (0) |
232 | #endif | 232 | #endif |
233 | 233 | ||
234 | #define unlazy_fpu(tsk, regs) do { \ | 234 | #define unlazy_fpu(tsk, regs) do { \ |
235 | if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) { \ | 235 | if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) { \ |
236 | save_fpu(tsk, regs); \ | 236 | save_fpu(tsk, regs); \ |
237 | } \ | 237 | } \ |
238 | } while (0) | 238 | } while (0) |
239 | 239 | ||
240 | #define clear_fpu(tsk, regs) do { \ | 240 | #define clear_fpu(tsk, regs) do { \ |
241 | if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) { \ | 241 | if (test_tsk_thread_flag(tsk, TIF_USEDFPU)) { \ |
242 | clear_tsk_thread_flag(tsk, TIF_USEDFPU); \ | 242 | clear_tsk_thread_flag(tsk, TIF_USEDFPU); \ |
243 | release_fpu(regs); \ | 243 | release_fpu(regs); \ |
244 | } \ | 244 | } \ |
245 | } while (0) | 245 | } while (0) |
246 | 246 | ||
247 | /* Double presision, NANS as NANS, rounding to nearest, no exceptions */ | 247 | /* Double presision, NANS as NANS, rounding to nearest, no exceptions */ |
248 | #define FPSCR_INIT 0x00080000 | 248 | #define FPSCR_INIT 0x00080000 |
249 | 249 | ||
250 | #define FPSCR_CAUSE_MASK 0x0001f000 /* Cause bits */ | 250 | #define FPSCR_CAUSE_MASK 0x0001f000 /* Cause bits */ |
251 | #define FPSCR_FLAG_MASK 0x0000007c /* Flag bits */ | 251 | #define FPSCR_FLAG_MASK 0x0000007c /* Flag bits */ |
252 | 252 | ||
253 | /* | 253 | /* |
254 | * Return saved PC of a blocked thread. | 254 | * Return saved PC of a blocked thread. |
255 | */ | 255 | */ |
256 | #define thread_saved_pc(tsk) (tsk->thread.pc) | 256 | #define thread_saved_pc(tsk) (tsk->thread.pc) |
257 | 257 | ||
258 | extern unsigned long get_wchan(struct task_struct *p); | 258 | extern unsigned long get_wchan(struct task_struct *p); |
259 | 259 | ||
260 | #define KSTK_EIP(tsk) ((tsk)->thread.pc) | 260 | #define KSTK_EIP(tsk) ((tsk)->thread.pc) |
261 | #define KSTK_ESP(tsk) ((tsk)->thread.sp) | 261 | #define KSTK_ESP(tsk) ((tsk)->thread.sp) |
262 | 262 | ||
263 | #define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory") | 263 | #define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory") |
264 | #define cpu_relax() barrier() | 264 | #define cpu_relax() barrier() |
265 | 265 | ||
266 | #if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH3) || \ | 266 | #if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH3) || \ |
267 | defined(CONFIG_CPU_SH4) | 267 | defined(CONFIG_CPU_SH4) |
268 | #define PREFETCH_STRIDE L1_CACHE_BYTES | 268 | #define PREFETCH_STRIDE L1_CACHE_BYTES |
269 | #define ARCH_HAS_PREFETCH | 269 | #define ARCH_HAS_PREFETCH |
270 | #define ARCH_HAS_PREFETCHW | 270 | #define ARCH_HAS_PREFETCHW |
271 | static inline void prefetch(void *x) | 271 | static inline void prefetch(void *x) |
272 | { | 272 | { |
273 | __asm__ __volatile__ ("pref @%0\n\t" : : "r" (x) : "memory"); | 273 | __asm__ __volatile__ ("pref @%0\n\t" : : "r" (x) : "memory"); |
274 | } | 274 | } |
275 | 275 | ||
276 | #define prefetchw(x) prefetch(x) | 276 | #define prefetchw(x) prefetch(x) |
277 | #endif | 277 | #endif |
278 | 278 | ||
279 | #ifdef CONFIG_VSYSCALL | ||
280 | extern int vsyscall_init(void); | ||
281 | #else | ||
282 | #define vsyscall_init() do { } while (0) | ||
283 | #endif | ||
284 | |||
279 | #endif /* __KERNEL__ */ | 285 | #endif /* __KERNEL__ */ |
280 | #endif /* __ASM_SH_PROCESSOR_H */ | 286 | #endif /* __ASM_SH_PROCESSOR_H */ |
281 | 287 |