Commit 3b74d18e54e20fc1d398eb391bea5b9aed22aca5
Committed by
Tony Luck
1 parent
a07ee86205
Exists in
master
and in
7 other branches
[IA64] rename partial_page
Jens has added a partial_page thing in splice whcih conflicts with the ia64 one. Rename ia64 out of the way. (ia64 chose poorly). Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Tony Luck <tony.luck@intel.com>
Showing 6 changed files with 62 additions and 59 deletions Inline Diff
arch/ia64/ia32/ia32_support.c
1 | /* | 1 | /* |
2 | * IA32 helper functions | 2 | * IA32 helper functions |
3 | * | 3 | * |
4 | * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com> | 4 | * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com> |
5 | * Copyright (C) 2000 Asit K. Mallick <asit.k.mallick@intel.com> | 5 | * Copyright (C) 2000 Asit K. Mallick <asit.k.mallick@intel.com> |
6 | * Copyright (C) 2001-2002 Hewlett-Packard Co | 6 | * Copyright (C) 2001-2002 Hewlett-Packard Co |
7 | * David Mosberger-Tang <davidm@hpl.hp.com> | 7 | * David Mosberger-Tang <davidm@hpl.hp.com> |
8 | * | 8 | * |
9 | * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 thread context | 9 | * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 thread context |
10 | * 02/19/01 D. Mosberger dropped tssd; it's not needed | 10 | * 02/19/01 D. Mosberger dropped tssd; it's not needed |
11 | * 09/14/01 D. Mosberger fixed memory management for gdt/tss page | 11 | * 09/14/01 D. Mosberger fixed memory management for gdt/tss page |
12 | * 09/29/01 D. Mosberger added ia32_load_segment_descriptors() | 12 | * 09/29/01 D. Mosberger added ia32_load_segment_descriptors() |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/personality.h> | 18 | #include <linux/personality.h> |
19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
20 | 20 | ||
21 | #include <asm/intrinsics.h> | 21 | #include <asm/intrinsics.h> |
22 | #include <asm/page.h> | 22 | #include <asm/page.h> |
23 | #include <asm/pgtable.h> | 23 | #include <asm/pgtable.h> |
24 | #include <asm/system.h> | 24 | #include <asm/system.h> |
25 | #include <asm/processor.h> | 25 | #include <asm/processor.h> |
26 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
27 | 27 | ||
28 | #include "ia32priv.h" | 28 | #include "ia32priv.h" |
29 | 29 | ||
30 | extern void die_if_kernel (char *str, struct pt_regs *regs, long err); | 30 | extern void die_if_kernel (char *str, struct pt_regs *regs, long err); |
31 | 31 | ||
32 | struct exec_domain ia32_exec_domain; | 32 | struct exec_domain ia32_exec_domain; |
33 | struct page *ia32_shared_page[NR_CPUS]; | 33 | struct page *ia32_shared_page[NR_CPUS]; |
34 | unsigned long *ia32_boot_gdt; | 34 | unsigned long *ia32_boot_gdt; |
35 | unsigned long *cpu_gdt_table[NR_CPUS]; | 35 | unsigned long *cpu_gdt_table[NR_CPUS]; |
36 | struct page *ia32_gate_page; | 36 | struct page *ia32_gate_page; |
37 | 37 | ||
38 | static unsigned long | 38 | static unsigned long |
39 | load_desc (u16 selector) | 39 | load_desc (u16 selector) |
40 | { | 40 | { |
41 | unsigned long *table, limit, index; | 41 | unsigned long *table, limit, index; |
42 | 42 | ||
43 | if (!selector) | 43 | if (!selector) |
44 | return 0; | 44 | return 0; |
45 | if (selector & IA32_SEGSEL_TI) { | 45 | if (selector & IA32_SEGSEL_TI) { |
46 | table = (unsigned long *) IA32_LDT_OFFSET; | 46 | table = (unsigned long *) IA32_LDT_OFFSET; |
47 | limit = IA32_LDT_ENTRIES; | 47 | limit = IA32_LDT_ENTRIES; |
48 | } else { | 48 | } else { |
49 | table = cpu_gdt_table[smp_processor_id()]; | 49 | table = cpu_gdt_table[smp_processor_id()]; |
50 | limit = IA32_PAGE_SIZE / sizeof(ia32_boot_gdt[0]); | 50 | limit = IA32_PAGE_SIZE / sizeof(ia32_boot_gdt[0]); |
51 | } | 51 | } |
52 | index = selector >> IA32_SEGSEL_INDEX_SHIFT; | 52 | index = selector >> IA32_SEGSEL_INDEX_SHIFT; |
53 | if (index >= limit) | 53 | if (index >= limit) |
54 | return 0; | 54 | return 0; |
55 | return IA32_SEG_UNSCRAMBLE(table[index]); | 55 | return IA32_SEG_UNSCRAMBLE(table[index]); |
56 | } | 56 | } |
57 | 57 | ||
58 | void | 58 | void |
59 | ia32_load_segment_descriptors (struct task_struct *task) | 59 | ia32_load_segment_descriptors (struct task_struct *task) |
60 | { | 60 | { |
61 | struct pt_regs *regs = task_pt_regs(task); | 61 | struct pt_regs *regs = task_pt_regs(task); |
62 | 62 | ||
63 | /* Setup the segment descriptors */ | 63 | /* Setup the segment descriptors */ |
64 | regs->r24 = load_desc(regs->r16 >> 16); /* ESD */ | 64 | regs->r24 = load_desc(regs->r16 >> 16); /* ESD */ |
65 | regs->r27 = load_desc(regs->r16 >> 0); /* DSD */ | 65 | regs->r27 = load_desc(regs->r16 >> 0); /* DSD */ |
66 | regs->r28 = load_desc(regs->r16 >> 32); /* FSD */ | 66 | regs->r28 = load_desc(regs->r16 >> 32); /* FSD */ |
67 | regs->r29 = load_desc(regs->r16 >> 48); /* GSD */ | 67 | regs->r29 = load_desc(regs->r16 >> 48); /* GSD */ |
68 | regs->ar_csd = load_desc(regs->r17 >> 0); /* CSD */ | 68 | regs->ar_csd = load_desc(regs->r17 >> 0); /* CSD */ |
69 | regs->ar_ssd = load_desc(regs->r17 >> 16); /* SSD */ | 69 | regs->ar_ssd = load_desc(regs->r17 >> 16); /* SSD */ |
70 | } | 70 | } |
71 | 71 | ||
72 | int | 72 | int |
73 | ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs) | 73 | ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs) |
74 | { | 74 | { |
75 | struct desc_struct *desc; | 75 | struct desc_struct *desc; |
76 | struct ia32_user_desc info; | 76 | struct ia32_user_desc info; |
77 | int idx; | 77 | int idx; |
78 | 78 | ||
79 | if (copy_from_user(&info, (void __user *)(childregs->r14 & 0xffffffff), sizeof(info))) | 79 | if (copy_from_user(&info, (void __user *)(childregs->r14 & 0xffffffff), sizeof(info))) |
80 | return -EFAULT; | 80 | return -EFAULT; |
81 | if (LDT_empty(&info)) | 81 | if (LDT_empty(&info)) |
82 | return -EINVAL; | 82 | return -EINVAL; |
83 | 83 | ||
84 | idx = info.entry_number; | 84 | idx = info.entry_number; |
85 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) | 85 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) |
86 | return -EINVAL; | 86 | return -EINVAL; |
87 | 87 | ||
88 | desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; | 88 | desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; |
89 | desc->a = LDT_entry_a(&info); | 89 | desc->a = LDT_entry_a(&info); |
90 | desc->b = LDT_entry_b(&info); | 90 | desc->b = LDT_entry_b(&info); |
91 | 91 | ||
92 | /* XXX: can this be done in a cleaner way ? */ | 92 | /* XXX: can this be done in a cleaner way ? */ |
93 | load_TLS(&child->thread, smp_processor_id()); | 93 | load_TLS(&child->thread, smp_processor_id()); |
94 | ia32_load_segment_descriptors(child); | 94 | ia32_load_segment_descriptors(child); |
95 | load_TLS(¤t->thread, smp_processor_id()); | 95 | load_TLS(¤t->thread, smp_processor_id()); |
96 | 96 | ||
97 | return 0; | 97 | return 0; |
98 | } | 98 | } |
99 | 99 | ||
100 | void | 100 | void |
101 | ia32_save_state (struct task_struct *t) | 101 | ia32_save_state (struct task_struct *t) |
102 | { | 102 | { |
103 | t->thread.eflag = ia64_getreg(_IA64_REG_AR_EFLAG); | 103 | t->thread.eflag = ia64_getreg(_IA64_REG_AR_EFLAG); |
104 | t->thread.fsr = ia64_getreg(_IA64_REG_AR_FSR); | 104 | t->thread.fsr = ia64_getreg(_IA64_REG_AR_FSR); |
105 | t->thread.fcr = ia64_getreg(_IA64_REG_AR_FCR); | 105 | t->thread.fcr = ia64_getreg(_IA64_REG_AR_FCR); |
106 | t->thread.fir = ia64_getreg(_IA64_REG_AR_FIR); | 106 | t->thread.fir = ia64_getreg(_IA64_REG_AR_FIR); |
107 | t->thread.fdr = ia64_getreg(_IA64_REG_AR_FDR); | 107 | t->thread.fdr = ia64_getreg(_IA64_REG_AR_FDR); |
108 | ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob); | 108 | ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob); |
109 | ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1); | 109 | ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1); |
110 | } | 110 | } |
111 | 111 | ||
112 | void | 112 | void |
113 | ia32_load_state (struct task_struct *t) | 113 | ia32_load_state (struct task_struct *t) |
114 | { | 114 | { |
115 | unsigned long eflag, fsr, fcr, fir, fdr, tssd; | 115 | unsigned long eflag, fsr, fcr, fir, fdr, tssd; |
116 | struct pt_regs *regs = task_pt_regs(t); | 116 | struct pt_regs *regs = task_pt_regs(t); |
117 | 117 | ||
118 | eflag = t->thread.eflag; | 118 | eflag = t->thread.eflag; |
119 | fsr = t->thread.fsr; | 119 | fsr = t->thread.fsr; |
120 | fcr = t->thread.fcr; | 120 | fcr = t->thread.fcr; |
121 | fir = t->thread.fir; | 121 | fir = t->thread.fir; |
122 | fdr = t->thread.fdr; | 122 | fdr = t->thread.fdr; |
123 | tssd = load_desc(_TSS); /* TSSD */ | 123 | tssd = load_desc(_TSS); /* TSSD */ |
124 | 124 | ||
125 | ia64_setreg(_IA64_REG_AR_EFLAG, eflag); | 125 | ia64_setreg(_IA64_REG_AR_EFLAG, eflag); |
126 | ia64_setreg(_IA64_REG_AR_FSR, fsr); | 126 | ia64_setreg(_IA64_REG_AR_FSR, fsr); |
127 | ia64_setreg(_IA64_REG_AR_FCR, fcr); | 127 | ia64_setreg(_IA64_REG_AR_FCR, fcr); |
128 | ia64_setreg(_IA64_REG_AR_FIR, fir); | 128 | ia64_setreg(_IA64_REG_AR_FIR, fir); |
129 | ia64_setreg(_IA64_REG_AR_FDR, fdr); | 129 | ia64_setreg(_IA64_REG_AR_FDR, fdr); |
130 | current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE); | 130 | current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE); |
131 | current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD); | 131 | current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD); |
132 | ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE); | 132 | ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE); |
133 | ia64_set_kr(IA64_KR_TSSD, tssd); | 133 | ia64_set_kr(IA64_KR_TSSD, tssd); |
134 | 134 | ||
135 | regs->r17 = (_TSS << 48) | (_LDT << 32) | (__u32) regs->r17; | 135 | regs->r17 = (_TSS << 48) | (_LDT << 32) | (__u32) regs->r17; |
136 | regs->r30 = load_desc(_LDT); /* LDTD */ | 136 | regs->r30 = load_desc(_LDT); /* LDTD */ |
137 | load_TLS(&t->thread, smp_processor_id()); | 137 | load_TLS(&t->thread, smp_processor_id()); |
138 | } | 138 | } |
139 | 139 | ||
140 | /* | 140 | /* |
141 | * Setup IA32 GDT and TSS | 141 | * Setup IA32 GDT and TSS |
142 | */ | 142 | */ |
143 | void | 143 | void |
144 | ia32_gdt_init (void) | 144 | ia32_gdt_init (void) |
145 | { | 145 | { |
146 | int cpu = smp_processor_id(); | 146 | int cpu = smp_processor_id(); |
147 | 147 | ||
148 | ia32_shared_page[cpu] = alloc_page(GFP_KERNEL); | 148 | ia32_shared_page[cpu] = alloc_page(GFP_KERNEL); |
149 | if (!ia32_shared_page[cpu]) | 149 | if (!ia32_shared_page[cpu]) |
150 | panic("failed to allocate ia32_shared_page[%d]\n", cpu); | 150 | panic("failed to allocate ia32_shared_page[%d]\n", cpu); |
151 | 151 | ||
152 | cpu_gdt_table[cpu] = page_address(ia32_shared_page[cpu]); | 152 | cpu_gdt_table[cpu] = page_address(ia32_shared_page[cpu]); |
153 | 153 | ||
154 | /* Copy from the boot cpu's GDT */ | 154 | /* Copy from the boot cpu's GDT */ |
155 | memcpy(cpu_gdt_table[cpu], ia32_boot_gdt, PAGE_SIZE); | 155 | memcpy(cpu_gdt_table[cpu], ia32_boot_gdt, PAGE_SIZE); |
156 | } | 156 | } |
157 | 157 | ||
158 | 158 | ||
159 | /* | 159 | /* |
160 | * Setup IA32 GDT and TSS | 160 | * Setup IA32 GDT and TSS |
161 | */ | 161 | */ |
162 | static void | 162 | static void |
163 | ia32_boot_gdt_init (void) | 163 | ia32_boot_gdt_init (void) |
164 | { | 164 | { |
165 | unsigned long ldt_size; | 165 | unsigned long ldt_size; |
166 | 166 | ||
167 | ia32_shared_page[0] = alloc_page(GFP_KERNEL); | 167 | ia32_shared_page[0] = alloc_page(GFP_KERNEL); |
168 | if (!ia32_shared_page[0]) | 168 | if (!ia32_shared_page[0]) |
169 | panic("failed to allocate ia32_shared_page[0]\n"); | 169 | panic("failed to allocate ia32_shared_page[0]\n"); |
170 | 170 | ||
171 | ia32_boot_gdt = page_address(ia32_shared_page[0]); | 171 | ia32_boot_gdt = page_address(ia32_shared_page[0]); |
172 | cpu_gdt_table[0] = ia32_boot_gdt; | 172 | cpu_gdt_table[0] = ia32_boot_gdt; |
173 | 173 | ||
174 | /* CS descriptor in IA-32 (scrambled) format */ | 174 | /* CS descriptor in IA-32 (scrambled) format */ |
175 | ia32_boot_gdt[__USER_CS >> 3] | 175 | ia32_boot_gdt[__USER_CS >> 3] |
176 | = IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END-1) >> IA32_PAGE_SHIFT, | 176 | = IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END-1) >> IA32_PAGE_SHIFT, |
177 | 0xb, 1, 3, 1, 1, 1, 1); | 177 | 0xb, 1, 3, 1, 1, 1, 1); |
178 | 178 | ||
179 | /* DS descriptor in IA-32 (scrambled) format */ | 179 | /* DS descriptor in IA-32 (scrambled) format */ |
180 | ia32_boot_gdt[__USER_DS >> 3] | 180 | ia32_boot_gdt[__USER_DS >> 3] |
181 | = IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END-1) >> IA32_PAGE_SHIFT, | 181 | = IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END-1) >> IA32_PAGE_SHIFT, |
182 | 0x3, 1, 3, 1, 1, 1, 1); | 182 | 0x3, 1, 3, 1, 1, 1, 1); |
183 | 183 | ||
184 | ldt_size = PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE); | 184 | ldt_size = PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE); |
185 | ia32_boot_gdt[TSS_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_TSS_OFFSET, 235, | 185 | ia32_boot_gdt[TSS_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_TSS_OFFSET, 235, |
186 | 0xb, 0, 3, 1, 1, 1, 0); | 186 | 0xb, 0, 3, 1, 1, 1, 0); |
187 | ia32_boot_gdt[LDT_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_LDT_OFFSET, ldt_size - 1, | 187 | ia32_boot_gdt[LDT_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_LDT_OFFSET, ldt_size - 1, |
188 | 0x2, 0, 3, 1, 1, 1, 0); | 188 | 0x2, 0, 3, 1, 1, 1, 0); |
189 | } | 189 | } |
190 | 190 | ||
191 | static void | 191 | static void |
192 | ia32_gate_page_init(void) | 192 | ia32_gate_page_init(void) |
193 | { | 193 | { |
194 | unsigned long *sr; | 194 | unsigned long *sr; |
195 | 195 | ||
196 | ia32_gate_page = alloc_page(GFP_KERNEL); | 196 | ia32_gate_page = alloc_page(GFP_KERNEL); |
197 | sr = page_address(ia32_gate_page); | 197 | sr = page_address(ia32_gate_page); |
198 | /* This is popl %eax ; movl $,%eax ; int $0x80 */ | 198 | /* This is popl %eax ; movl $,%eax ; int $0x80 */ |
199 | *sr++ = 0xb858 | (__IA32_NR_sigreturn << 16) | (0x80cdUL << 48); | 199 | *sr++ = 0xb858 | (__IA32_NR_sigreturn << 16) | (0x80cdUL << 48); |
200 | 200 | ||
201 | /* This is movl $,%eax ; int $0x80 */ | 201 | /* This is movl $,%eax ; int $0x80 */ |
202 | *sr = 0xb8 | (__IA32_NR_rt_sigreturn << 8) | (0x80cdUL << 40); | 202 | *sr = 0xb8 | (__IA32_NR_rt_sigreturn << 8) | (0x80cdUL << 40); |
203 | } | 203 | } |
204 | 204 | ||
205 | void | 205 | void |
206 | ia32_mem_init(void) | 206 | ia32_mem_init(void) |
207 | { | 207 | { |
208 | ia32_boot_gdt_init(); | 208 | ia32_boot_gdt_init(); |
209 | ia32_gate_page_init(); | 209 | ia32_gate_page_init(); |
210 | } | 210 | } |
211 | 211 | ||
212 | /* | 212 | /* |
213 | * Handle bad IA32 interrupt via syscall | 213 | * Handle bad IA32 interrupt via syscall |
214 | */ | 214 | */ |
215 | void | 215 | void |
216 | ia32_bad_interrupt (unsigned long int_num, struct pt_regs *regs) | 216 | ia32_bad_interrupt (unsigned long int_num, struct pt_regs *regs) |
217 | { | 217 | { |
218 | siginfo_t siginfo; | 218 | siginfo_t siginfo; |
219 | 219 | ||
220 | die_if_kernel("Bad IA-32 interrupt", regs, int_num); | 220 | die_if_kernel("Bad IA-32 interrupt", regs, int_num); |
221 | 221 | ||
222 | siginfo.si_signo = SIGTRAP; | 222 | siginfo.si_signo = SIGTRAP; |
223 | siginfo.si_errno = int_num; /* XXX is it OK to abuse si_errno like this? */ | 223 | siginfo.si_errno = int_num; /* XXX is it OK to abuse si_errno like this? */ |
224 | siginfo.si_flags = 0; | 224 | siginfo.si_flags = 0; |
225 | siginfo.si_isr = 0; | 225 | siginfo.si_isr = 0; |
226 | siginfo.si_addr = NULL; | 226 | siginfo.si_addr = NULL; |
227 | siginfo.si_imm = 0; | 227 | siginfo.si_imm = 0; |
228 | siginfo.si_code = TRAP_BRKPT; | 228 | siginfo.si_code = TRAP_BRKPT; |
229 | force_sig_info(SIGTRAP, &siginfo, current); | 229 | force_sig_info(SIGTRAP, &siginfo, current); |
230 | } | 230 | } |
231 | 231 | ||
232 | void | 232 | void |
233 | ia32_cpu_init (void) | 233 | ia32_cpu_init (void) |
234 | { | 234 | { |
235 | /* initialize global ia32 state - CR0 and CR4 */ | 235 | /* initialize global ia32 state - CR0 and CR4 */ |
236 | ia64_setreg(_IA64_REG_AR_CFLAG, (((ulong) IA32_CR4 << 32) | IA32_CR0)); | 236 | ia64_setreg(_IA64_REG_AR_CFLAG, (((ulong) IA32_CR4 << 32) | IA32_CR0)); |
237 | } | 237 | } |
238 | 238 | ||
239 | static int __init | 239 | static int __init |
240 | ia32_init (void) | 240 | ia32_init (void) |
241 | { | 241 | { |
242 | ia32_exec_domain.name = "Linux/x86"; | 242 | ia32_exec_domain.name = "Linux/x86"; |
243 | ia32_exec_domain.handler = NULL; | 243 | ia32_exec_domain.handler = NULL; |
244 | ia32_exec_domain.pers_low = PER_LINUX32; | 244 | ia32_exec_domain.pers_low = PER_LINUX32; |
245 | ia32_exec_domain.pers_high = PER_LINUX32; | 245 | ia32_exec_domain.pers_high = PER_LINUX32; |
246 | ia32_exec_domain.signal_map = default_exec_domain.signal_map; | 246 | ia32_exec_domain.signal_map = default_exec_domain.signal_map; |
247 | ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap; | 247 | ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap; |
248 | register_exec_domain(&ia32_exec_domain); | 248 | register_exec_domain(&ia32_exec_domain); |
249 | 249 | ||
250 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | 250 | #if PAGE_SHIFT > IA32_PAGE_SHIFT |
251 | { | 251 | { |
252 | extern struct kmem_cache *partial_page_cachep; | 252 | extern struct kmem_cache *ia64_partial_page_cachep; |
253 | 253 | ||
254 | partial_page_cachep = kmem_cache_create("partial_page_cache", | 254 | ia64_partial_page_cachep = kmem_cache_create("ia64_partial_page_cache", |
255 | sizeof(struct partial_page), | 255 | sizeof(struct ia64_partial_page), |
256 | 0, SLAB_PANIC, NULL); | 256 | 0, SLAB_PANIC, NULL); |
257 | } | 257 | } |
258 | #endif | 258 | #endif |
259 | return 0; | 259 | return 0; |
260 | } | 260 | } |
261 | 261 | ||
262 | __initcall(ia32_init); | 262 | __initcall(ia32_init); |
263 | 263 |
arch/ia64/ia32/ia32priv.h
1 | #ifndef _ASM_IA64_IA32_PRIV_H | 1 | #ifndef _ASM_IA64_IA32_PRIV_H |
2 | #define _ASM_IA64_IA32_PRIV_H | 2 | #define _ASM_IA64_IA32_PRIV_H |
3 | 3 | ||
4 | 4 | ||
5 | #include <asm/ia32.h> | 5 | #include <asm/ia32.h> |
6 | 6 | ||
7 | #ifdef CONFIG_IA32_SUPPORT | 7 | #ifdef CONFIG_IA32_SUPPORT |
8 | 8 | ||
9 | #include <linux/binfmts.h> | 9 | #include <linux/binfmts.h> |
10 | #include <linux/compat.h> | 10 | #include <linux/compat.h> |
11 | #include <linux/rbtree.h> | 11 | #include <linux/rbtree.h> |
12 | 12 | ||
13 | #include <asm/processor.h> | 13 | #include <asm/processor.h> |
14 | 14 | ||
15 | /* | 15 | /* |
16 | * 32 bit structures for IA32 support. | 16 | * 32 bit structures for IA32 support. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #define IA32_PAGE_SIZE (1UL << IA32_PAGE_SHIFT) | 19 | #define IA32_PAGE_SIZE (1UL << IA32_PAGE_SHIFT) |
20 | #define IA32_PAGE_MASK (~(IA32_PAGE_SIZE - 1)) | 20 | #define IA32_PAGE_MASK (~(IA32_PAGE_SIZE - 1)) |
21 | #define IA32_PAGE_ALIGN(addr) (((addr) + IA32_PAGE_SIZE - 1) & IA32_PAGE_MASK) | 21 | #define IA32_PAGE_ALIGN(addr) (((addr) + IA32_PAGE_SIZE - 1) & IA32_PAGE_MASK) |
22 | #define IA32_CLOCKS_PER_SEC 100 /* Cast in stone for IA32 Linux */ | 22 | #define IA32_CLOCKS_PER_SEC 100 /* Cast in stone for IA32 Linux */ |
23 | 23 | ||
24 | /* | 24 | /* |
25 | * partially mapped pages provide precise accounting of which 4k sub pages | 25 | * partially mapped pages provide precise accounting of which 4k sub pages |
26 | * are mapped and which ones are not, thereby improving IA-32 compatibility. | 26 | * are mapped and which ones are not, thereby improving IA-32 compatibility. |
27 | */ | 27 | */ |
28 | struct partial_page { | 28 | struct ia64_partial_page { |
29 | struct partial_page *next; /* linked list, sorted by address */ | 29 | struct ia64_partial_page *next; /* linked list, sorted by address */ |
30 | struct rb_node pp_rb; | 30 | struct rb_node pp_rb; |
31 | /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64 | 31 | /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64 |
32 | * should suffice.*/ | 32 | * should suffice.*/ |
33 | unsigned long bitmap; | 33 | unsigned long bitmap; |
34 | unsigned int base; | 34 | unsigned int base; |
35 | }; | 35 | }; |
36 | 36 | ||
37 | struct partial_page_list { | 37 | struct ia64_partial_page_list { |
38 | struct partial_page *pp_head; /* list head, points to the lowest | 38 | struct ia64_partial_page *pp_head; /* list head, points to the lowest |
39 | * addressed partial page */ | 39 | * addressed partial page */ |
40 | struct rb_root ppl_rb; | 40 | struct rb_root ppl_rb; |
41 | struct partial_page *pp_hint; /* pp_hint->next is the last | 41 | struct ia64_partial_page *pp_hint; /* pp_hint->next is the last |
42 | * accessed partial page */ | 42 | * accessed partial page */ |
43 | atomic_t pp_count; /* reference count */ | 43 | atomic_t pp_count; /* reference count */ |
44 | }; | 44 | }; |
45 | 45 | ||
46 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | 46 | #if PAGE_SHIFT > IA32_PAGE_SHIFT |
47 | struct partial_page_list* ia32_init_pp_list (void); | 47 | struct ia64_partial_page_list* ia32_init_pp_list (void); |
48 | #else | 48 | #else |
49 | # define ia32_init_pp_list() 0 | 49 | # define ia32_init_pp_list() 0 |
50 | #endif | 50 | #endif |
51 | 51 | ||
52 | /* sigcontext.h */ | 52 | /* sigcontext.h */ |
53 | /* | 53 | /* |
54 | * As documented in the iBCS2 standard.. | 54 | * As documented in the iBCS2 standard.. |
55 | * | 55 | * |
56 | * The first part of "struct _fpstate" is just the | 56 | * The first part of "struct _fpstate" is just the |
57 | * normal i387 hardware setup, the extra "status" | 57 | * normal i387 hardware setup, the extra "status" |
58 | * word is used to save the coprocessor status word | 58 | * word is used to save the coprocessor status word |
59 | * before entering the handler. | 59 | * before entering the handler. |
60 | */ | 60 | */ |
61 | struct _fpreg_ia32 { | 61 | struct _fpreg_ia32 { |
62 | unsigned short significand[4]; | 62 | unsigned short significand[4]; |
63 | unsigned short exponent; | 63 | unsigned short exponent; |
64 | }; | 64 | }; |
65 | 65 | ||
66 | struct _fpxreg_ia32 { | 66 | struct _fpxreg_ia32 { |
67 | unsigned short significand[4]; | 67 | unsigned short significand[4]; |
68 | unsigned short exponent; | 68 | unsigned short exponent; |
69 | unsigned short padding[3]; | 69 | unsigned short padding[3]; |
70 | }; | 70 | }; |
71 | 71 | ||
72 | struct _xmmreg_ia32 { | 72 | struct _xmmreg_ia32 { |
73 | unsigned int element[4]; | 73 | unsigned int element[4]; |
74 | }; | 74 | }; |
75 | 75 | ||
76 | 76 | ||
77 | struct _fpstate_ia32 { | 77 | struct _fpstate_ia32 { |
78 | unsigned int cw, | 78 | unsigned int cw, |
79 | sw, | 79 | sw, |
80 | tag, | 80 | tag, |
81 | ipoff, | 81 | ipoff, |
82 | cssel, | 82 | cssel, |
83 | dataoff, | 83 | dataoff, |
84 | datasel; | 84 | datasel; |
85 | struct _fpreg_ia32 _st[8]; | 85 | struct _fpreg_ia32 _st[8]; |
86 | unsigned short status; | 86 | unsigned short status; |
87 | unsigned short magic; /* 0xffff = regular FPU data only */ | 87 | unsigned short magic; /* 0xffff = regular FPU data only */ |
88 | 88 | ||
89 | /* FXSR FPU environment */ | 89 | /* FXSR FPU environment */ |
90 | unsigned int _fxsr_env[6]; /* FXSR FPU env is ignored */ | 90 | unsigned int _fxsr_env[6]; /* FXSR FPU env is ignored */ |
91 | unsigned int mxcsr; | 91 | unsigned int mxcsr; |
92 | unsigned int reserved; | 92 | unsigned int reserved; |
93 | struct _fpxreg_ia32 _fxsr_st[8]; /* FXSR FPU reg data is ignored */ | 93 | struct _fpxreg_ia32 _fxsr_st[8]; /* FXSR FPU reg data is ignored */ |
94 | struct _xmmreg_ia32 _xmm[8]; | 94 | struct _xmmreg_ia32 _xmm[8]; |
95 | unsigned int padding[56]; | 95 | unsigned int padding[56]; |
96 | }; | 96 | }; |
97 | 97 | ||
98 | struct sigcontext_ia32 { | 98 | struct sigcontext_ia32 { |
99 | unsigned short gs, __gsh; | 99 | unsigned short gs, __gsh; |
100 | unsigned short fs, __fsh; | 100 | unsigned short fs, __fsh; |
101 | unsigned short es, __esh; | 101 | unsigned short es, __esh; |
102 | unsigned short ds, __dsh; | 102 | unsigned short ds, __dsh; |
103 | unsigned int edi; | 103 | unsigned int edi; |
104 | unsigned int esi; | 104 | unsigned int esi; |
105 | unsigned int ebp; | 105 | unsigned int ebp; |
106 | unsigned int esp; | 106 | unsigned int esp; |
107 | unsigned int ebx; | 107 | unsigned int ebx; |
108 | unsigned int edx; | 108 | unsigned int edx; |
109 | unsigned int ecx; | 109 | unsigned int ecx; |
110 | unsigned int eax; | 110 | unsigned int eax; |
111 | unsigned int trapno; | 111 | unsigned int trapno; |
112 | unsigned int err; | 112 | unsigned int err; |
113 | unsigned int eip; | 113 | unsigned int eip; |
114 | unsigned short cs, __csh; | 114 | unsigned short cs, __csh; |
115 | unsigned int eflags; | 115 | unsigned int eflags; |
116 | unsigned int esp_at_signal; | 116 | unsigned int esp_at_signal; |
117 | unsigned short ss, __ssh; | 117 | unsigned short ss, __ssh; |
118 | unsigned int fpstate; /* really (struct _fpstate_ia32 *) */ | 118 | unsigned int fpstate; /* really (struct _fpstate_ia32 *) */ |
119 | unsigned int oldmask; | 119 | unsigned int oldmask; |
120 | unsigned int cr2; | 120 | unsigned int cr2; |
121 | }; | 121 | }; |
122 | 122 | ||
123 | /* user.h */ | 123 | /* user.h */ |
124 | /* | 124 | /* |
125 | * IA32 (Pentium III/4) FXSR, SSE support | 125 | * IA32 (Pentium III/4) FXSR, SSE support |
126 | * | 126 | * |
127 | * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for | 127 | * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for |
128 | * interacting with the FXSR-format floating point environment. Floating | 128 | * interacting with the FXSR-format floating point environment. Floating |
129 | * point data can be accessed in the regular format in the usual manner, | 129 | * point data can be accessed in the regular format in the usual manner, |
130 | * and both the standard and SIMD floating point data can be accessed via | 130 | * and both the standard and SIMD floating point data can be accessed via |
131 | * the new ptrace requests. In either case, changes to the FPU environment | 131 | * the new ptrace requests. In either case, changes to the FPU environment |
132 | * will be reflected in the task's state as expected. | 132 | * will be reflected in the task's state as expected. |
133 | */ | 133 | */ |
134 | struct ia32_user_i387_struct { | 134 | struct ia32_user_i387_struct { |
135 | int cwd; | 135 | int cwd; |
136 | int swd; | 136 | int swd; |
137 | int twd; | 137 | int twd; |
138 | int fip; | 138 | int fip; |
139 | int fcs; | 139 | int fcs; |
140 | int foo; | 140 | int foo; |
141 | int fos; | 141 | int fos; |
142 | /* 8*10 bytes for each FP-reg = 80 bytes */ | 142 | /* 8*10 bytes for each FP-reg = 80 bytes */ |
143 | struct _fpreg_ia32 st_space[8]; | 143 | struct _fpreg_ia32 st_space[8]; |
144 | }; | 144 | }; |
145 | 145 | ||
146 | struct ia32_user_fxsr_struct { | 146 | struct ia32_user_fxsr_struct { |
147 | unsigned short cwd; | 147 | unsigned short cwd; |
148 | unsigned short swd; | 148 | unsigned short swd; |
149 | unsigned short twd; | 149 | unsigned short twd; |
150 | unsigned short fop; | 150 | unsigned short fop; |
151 | int fip; | 151 | int fip; |
152 | int fcs; | 152 | int fcs; |
153 | int foo; | 153 | int foo; |
154 | int fos; | 154 | int fos; |
155 | int mxcsr; | 155 | int mxcsr; |
156 | int reserved; | 156 | int reserved; |
157 | int st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ | 157 | int st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */ |
158 | int xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ | 158 | int xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */ |
159 | int padding[56]; | 159 | int padding[56]; |
160 | }; | 160 | }; |
161 | 161 | ||
162 | /* signal.h */ | 162 | /* signal.h */ |
163 | #define IA32_SET_SA_HANDLER(ka,handler,restorer) \ | 163 | #define IA32_SET_SA_HANDLER(ka,handler,restorer) \ |
164 | ((ka)->sa.sa_handler = (__sighandler_t) \ | 164 | ((ka)->sa.sa_handler = (__sighandler_t) \ |
165 | (((unsigned long)(restorer) << 32) \ | 165 | (((unsigned long)(restorer) << 32) \ |
166 | | ((handler) & 0xffffffff))) | 166 | | ((handler) & 0xffffffff))) |
167 | #define IA32_SA_HANDLER(ka) ((unsigned long) (ka)->sa.sa_handler & 0xffffffff) | 167 | #define IA32_SA_HANDLER(ka) ((unsigned long) (ka)->sa.sa_handler & 0xffffffff) |
168 | #define IA32_SA_RESTORER(ka) ((unsigned long) (ka)->sa.sa_handler >> 32) | 168 | #define IA32_SA_RESTORER(ka) ((unsigned long) (ka)->sa.sa_handler >> 32) |
169 | 169 | ||
170 | #define __IA32_NR_sigreturn 119 | 170 | #define __IA32_NR_sigreturn 119 |
171 | #define __IA32_NR_rt_sigreturn 173 | 171 | #define __IA32_NR_rt_sigreturn 173 |
172 | 172 | ||
173 | struct sigaction32 { | 173 | struct sigaction32 { |
174 | unsigned int sa_handler; /* Really a pointer, but need to deal with 32 bits */ | 174 | unsigned int sa_handler; /* Really a pointer, but need to deal with 32 bits */ |
175 | unsigned int sa_flags; | 175 | unsigned int sa_flags; |
176 | unsigned int sa_restorer; /* Another 32 bit pointer */ | 176 | unsigned int sa_restorer; /* Another 32 bit pointer */ |
177 | compat_sigset_t sa_mask; /* A 32 bit mask */ | 177 | compat_sigset_t sa_mask; /* A 32 bit mask */ |
178 | }; | 178 | }; |
179 | 179 | ||
180 | struct old_sigaction32 { | 180 | struct old_sigaction32 { |
181 | unsigned int sa_handler; /* Really a pointer, but need to deal | 181 | unsigned int sa_handler; /* Really a pointer, but need to deal |
182 | with 32 bits */ | 182 | with 32 bits */ |
183 | compat_old_sigset_t sa_mask; /* A 32 bit mask */ | 183 | compat_old_sigset_t sa_mask; /* A 32 bit mask */ |
184 | unsigned int sa_flags; | 184 | unsigned int sa_flags; |
185 | unsigned int sa_restorer; /* Another 32 bit pointer */ | 185 | unsigned int sa_restorer; /* Another 32 bit pointer */ |
186 | }; | 186 | }; |
187 | 187 | ||
188 | typedef struct sigaltstack_ia32 { | 188 | typedef struct sigaltstack_ia32 { |
189 | unsigned int ss_sp; | 189 | unsigned int ss_sp; |
190 | int ss_flags; | 190 | int ss_flags; |
191 | unsigned int ss_size; | 191 | unsigned int ss_size; |
192 | } stack_ia32_t; | 192 | } stack_ia32_t; |
193 | 193 | ||
194 | struct ucontext_ia32 { | 194 | struct ucontext_ia32 { |
195 | unsigned int uc_flags; | 195 | unsigned int uc_flags; |
196 | unsigned int uc_link; | 196 | unsigned int uc_link; |
197 | stack_ia32_t uc_stack; | 197 | stack_ia32_t uc_stack; |
198 | struct sigcontext_ia32 uc_mcontext; | 198 | struct sigcontext_ia32 uc_mcontext; |
199 | sigset_t uc_sigmask; /* mask last for extensibility */ | 199 | sigset_t uc_sigmask; /* mask last for extensibility */ |
200 | }; | 200 | }; |
201 | 201 | ||
202 | struct stat64 { | 202 | struct stat64 { |
203 | unsigned long long st_dev; | 203 | unsigned long long st_dev; |
204 | unsigned char __pad0[4]; | 204 | unsigned char __pad0[4]; |
205 | unsigned int __st_ino; | 205 | unsigned int __st_ino; |
206 | unsigned int st_mode; | 206 | unsigned int st_mode; |
207 | unsigned int st_nlink; | 207 | unsigned int st_nlink; |
208 | unsigned int st_uid; | 208 | unsigned int st_uid; |
209 | unsigned int st_gid; | 209 | unsigned int st_gid; |
210 | unsigned long long st_rdev; | 210 | unsigned long long st_rdev; |
211 | unsigned char __pad3[4]; | 211 | unsigned char __pad3[4]; |
212 | unsigned int st_size_lo; | 212 | unsigned int st_size_lo; |
213 | unsigned int st_size_hi; | 213 | unsigned int st_size_hi; |
214 | unsigned int st_blksize; | 214 | unsigned int st_blksize; |
215 | unsigned int st_blocks; /* Number 512-byte blocks allocated. */ | 215 | unsigned int st_blocks; /* Number 512-byte blocks allocated. */ |
216 | unsigned int __pad4; /* future possible st_blocks high bits */ | 216 | unsigned int __pad4; /* future possible st_blocks high bits */ |
217 | unsigned int st_atime; | 217 | unsigned int st_atime; |
218 | unsigned int st_atime_nsec; | 218 | unsigned int st_atime_nsec; |
219 | unsigned int st_mtime; | 219 | unsigned int st_mtime; |
220 | unsigned int st_mtime_nsec; | 220 | unsigned int st_mtime_nsec; |
221 | unsigned int st_ctime; | 221 | unsigned int st_ctime; |
222 | unsigned int st_ctime_nsec; | 222 | unsigned int st_ctime_nsec; |
223 | unsigned int st_ino_lo; | 223 | unsigned int st_ino_lo; |
224 | unsigned int st_ino_hi; | 224 | unsigned int st_ino_hi; |
225 | }; | 225 | }; |
226 | 226 | ||
227 | typedef struct compat_siginfo { | 227 | typedef struct compat_siginfo { |
228 | int si_signo; | 228 | int si_signo; |
229 | int si_errno; | 229 | int si_errno; |
230 | int si_code; | 230 | int si_code; |
231 | 231 | ||
232 | union { | 232 | union { |
233 | int _pad[((128/sizeof(int)) - 3)]; | 233 | int _pad[((128/sizeof(int)) - 3)]; |
234 | 234 | ||
235 | /* kill() */ | 235 | /* kill() */ |
236 | struct { | 236 | struct { |
237 | unsigned int _pid; /* sender's pid */ | 237 | unsigned int _pid; /* sender's pid */ |
238 | unsigned int _uid; /* sender's uid */ | 238 | unsigned int _uid; /* sender's uid */ |
239 | } _kill; | 239 | } _kill; |
240 | 240 | ||
241 | /* POSIX.1b timers */ | 241 | /* POSIX.1b timers */ |
242 | struct { | 242 | struct { |
243 | compat_timer_t _tid; /* timer id */ | 243 | compat_timer_t _tid; /* timer id */ |
244 | int _overrun; /* overrun count */ | 244 | int _overrun; /* overrun count */ |
245 | char _pad[sizeof(unsigned int) - sizeof(int)]; | 245 | char _pad[sizeof(unsigned int) - sizeof(int)]; |
246 | compat_sigval_t _sigval; /* same as below */ | 246 | compat_sigval_t _sigval; /* same as below */ |
247 | int _sys_private; /* not to be passed to user */ | 247 | int _sys_private; /* not to be passed to user */ |
248 | } _timer; | 248 | } _timer; |
249 | 249 | ||
250 | /* POSIX.1b signals */ | 250 | /* POSIX.1b signals */ |
251 | struct { | 251 | struct { |
252 | unsigned int _pid; /* sender's pid */ | 252 | unsigned int _pid; /* sender's pid */ |
253 | unsigned int _uid; /* sender's uid */ | 253 | unsigned int _uid; /* sender's uid */ |
254 | compat_sigval_t _sigval; | 254 | compat_sigval_t _sigval; |
255 | } _rt; | 255 | } _rt; |
256 | 256 | ||
257 | /* SIGCHLD */ | 257 | /* SIGCHLD */ |
258 | struct { | 258 | struct { |
259 | unsigned int _pid; /* which child */ | 259 | unsigned int _pid; /* which child */ |
260 | unsigned int _uid; /* sender's uid */ | 260 | unsigned int _uid; /* sender's uid */ |
261 | int _status; /* exit code */ | 261 | int _status; /* exit code */ |
262 | compat_clock_t _utime; | 262 | compat_clock_t _utime; |
263 | compat_clock_t _stime; | 263 | compat_clock_t _stime; |
264 | } _sigchld; | 264 | } _sigchld; |
265 | 265 | ||
266 | /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ | 266 | /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ |
267 | struct { | 267 | struct { |
268 | unsigned int _addr; /* faulting insn/memory ref. */ | 268 | unsigned int _addr; /* faulting insn/memory ref. */ |
269 | } _sigfault; | 269 | } _sigfault; |
270 | 270 | ||
271 | /* SIGPOLL */ | 271 | /* SIGPOLL */ |
272 | struct { | 272 | struct { |
273 | int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ | 273 | int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ |
274 | int _fd; | 274 | int _fd; |
275 | } _sigpoll; | 275 | } _sigpoll; |
276 | } _sifields; | 276 | } _sifields; |
277 | } compat_siginfo_t; | 277 | } compat_siginfo_t; |
278 | 278 | ||
279 | struct old_linux32_dirent { | 279 | struct old_linux32_dirent { |
280 | u32 d_ino; | 280 | u32 d_ino; |
281 | u32 d_offset; | 281 | u32 d_offset; |
282 | u16 d_namlen; | 282 | u16 d_namlen; |
283 | char d_name[1]; | 283 | char d_name[1]; |
284 | }; | 284 | }; |
285 | 285 | ||
286 | /* | 286 | /* |
287 | * IA-32 ELF specific definitions for IA-64. | 287 | * IA-32 ELF specific definitions for IA-64. |
288 | */ | 288 | */ |
289 | 289 | ||
290 | #define _ASM_IA64_ELF_H /* Don't include elf.h */ | 290 | #define _ASM_IA64_ELF_H /* Don't include elf.h */ |
291 | 291 | ||
292 | #include <linux/sched.h> | 292 | #include <linux/sched.h> |
293 | #include <asm/processor.h> | 293 | #include <asm/processor.h> |
294 | 294 | ||
295 | /* | 295 | /* |
296 | * This is used to ensure we don't load something for the wrong architecture. | 296 | * This is used to ensure we don't load something for the wrong architecture. |
297 | */ | 297 | */ |
298 | #define elf_check_arch(x) ((x)->e_machine == EM_386) | 298 | #define elf_check_arch(x) ((x)->e_machine == EM_386) |
299 | 299 | ||
300 | /* | 300 | /* |
301 | * These are used to set parameters in the core dumps. | 301 | * These are used to set parameters in the core dumps. |
302 | */ | 302 | */ |
303 | #define ELF_CLASS ELFCLASS32 | 303 | #define ELF_CLASS ELFCLASS32 |
304 | #define ELF_DATA ELFDATA2LSB | 304 | #define ELF_DATA ELFDATA2LSB |
305 | #define ELF_ARCH EM_386 | 305 | #define ELF_ARCH EM_386 |
306 | 306 | ||
307 | #define IA32_STACK_TOP IA32_PAGE_OFFSET | 307 | #define IA32_STACK_TOP IA32_PAGE_OFFSET |
308 | #define IA32_GATE_OFFSET IA32_PAGE_OFFSET | 308 | #define IA32_GATE_OFFSET IA32_PAGE_OFFSET |
309 | #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE | 309 | #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE |
310 | 310 | ||
311 | /* | 311 | /* |
312 | * The system segments (GDT, TSS, LDT) have to be mapped below 4GB so the IA-32 engine can | 312 | * The system segments (GDT, TSS, LDT) have to be mapped below 4GB so the IA-32 engine can |
313 | * access them. | 313 | * access them. |
314 | */ | 314 | */ |
315 | #define IA32_GDT_OFFSET (IA32_PAGE_OFFSET + PAGE_SIZE) | 315 | #define IA32_GDT_OFFSET (IA32_PAGE_OFFSET + PAGE_SIZE) |
316 | #define IA32_TSS_OFFSET (IA32_PAGE_OFFSET + 2*PAGE_SIZE) | 316 | #define IA32_TSS_OFFSET (IA32_PAGE_OFFSET + 2*PAGE_SIZE) |
317 | #define IA32_LDT_OFFSET (IA32_PAGE_OFFSET + 3*PAGE_SIZE) | 317 | #define IA32_LDT_OFFSET (IA32_PAGE_OFFSET + 3*PAGE_SIZE) |
318 | 318 | ||
319 | #define ELF_EXEC_PAGESIZE IA32_PAGE_SIZE | 319 | #define ELF_EXEC_PAGESIZE IA32_PAGE_SIZE |
320 | 320 | ||
321 | /* | 321 | /* |
322 | * This is the location that an ET_DYN program is loaded if exec'ed. | 322 | * This is the location that an ET_DYN program is loaded if exec'ed. |
323 | * Typical use of this is to invoke "./ld.so someprog" to test out a | 323 | * Typical use of this is to invoke "./ld.so someprog" to test out a |
324 | * new version of the loader. We need to make sure that it is out of | 324 | * new version of the loader. We need to make sure that it is out of |
325 | * the way of the program that it will "exec", and that there is | 325 | * the way of the program that it will "exec", and that there is |
326 | * sufficient room for the brk. | 326 | * sufficient room for the brk. |
327 | */ | 327 | */ |
328 | #define ELF_ET_DYN_BASE (IA32_PAGE_OFFSET/3 + 0x1000000) | 328 | #define ELF_ET_DYN_BASE (IA32_PAGE_OFFSET/3 + 0x1000000) |
329 | 329 | ||
330 | void ia64_elf32_init(struct pt_regs *regs); | 330 | void ia64_elf32_init(struct pt_regs *regs); |
331 | #define ELF_PLAT_INIT(_r, load_addr) ia64_elf32_init(_r) | 331 | #define ELF_PLAT_INIT(_r, load_addr) ia64_elf32_init(_r) |
332 | 332 | ||
333 | /* This macro yields a bitmask that programs can use to figure out | 333 | /* This macro yields a bitmask that programs can use to figure out |
334 | what instruction set this CPU supports. */ | 334 | what instruction set this CPU supports. */ |
335 | #define ELF_HWCAP 0 | 335 | #define ELF_HWCAP 0 |
336 | 336 | ||
337 | /* This macro yields a string that ld.so will use to load | 337 | /* This macro yields a string that ld.so will use to load |
338 | implementation specific libraries for optimization. Not terribly | 338 | implementation specific libraries for optimization. Not terribly |
339 | relevant until we have real hardware to play with... */ | 339 | relevant until we have real hardware to play with... */ |
340 | #define ELF_PLATFORM NULL | 340 | #define ELF_PLATFORM NULL |
341 | 341 | ||
342 | #ifdef __KERNEL__ | 342 | #ifdef __KERNEL__ |
343 | # define SET_PERSONALITY(EX,IBCS2) \ | 343 | # define SET_PERSONALITY(EX,IBCS2) \ |
344 | (current->personality = (IBCS2) ? PER_SVR4 : PER_LINUX) | 344 | (current->personality = (IBCS2) ? PER_SVR4 : PER_LINUX) |
345 | #endif | 345 | #endif |
346 | 346 | ||
347 | #define IA32_EFLAG 0x200 | 347 | #define IA32_EFLAG 0x200 |
348 | 348 | ||
349 | /* | 349 | /* |
350 | * IA-32 ELF specific definitions for IA-64. | 350 | * IA-32 ELF specific definitions for IA-64. |
351 | */ | 351 | */ |
352 | 352 | ||
353 | #define __USER_CS 0x23 | 353 | #define __USER_CS 0x23 |
354 | #define __USER_DS 0x2B | 354 | #define __USER_DS 0x2B |
355 | 355 | ||
356 | /* | 356 | /* |
357 | * The per-cpu GDT has 32 entries: see <asm-i386/segment.h> | 357 | * The per-cpu GDT has 32 entries: see <asm-i386/segment.h> |
358 | */ | 358 | */ |
359 | #define GDT_ENTRIES 32 | 359 | #define GDT_ENTRIES 32 |
360 | 360 | ||
361 | #define GDT_SIZE (GDT_ENTRIES * 8) | 361 | #define GDT_SIZE (GDT_ENTRIES * 8) |
362 | 362 | ||
363 | #define TSS_ENTRY 14 | 363 | #define TSS_ENTRY 14 |
364 | #define LDT_ENTRY (TSS_ENTRY + 1) | 364 | #define LDT_ENTRY (TSS_ENTRY + 1) |
365 | 365 | ||
366 | #define IA32_SEGSEL_RPL (0x3 << 0) | 366 | #define IA32_SEGSEL_RPL (0x3 << 0) |
367 | #define IA32_SEGSEL_TI (0x1 << 2) | 367 | #define IA32_SEGSEL_TI (0x1 << 2) |
368 | #define IA32_SEGSEL_INDEX_SHIFT 3 | 368 | #define IA32_SEGSEL_INDEX_SHIFT 3 |
369 | 369 | ||
370 | #define _TSS ((unsigned long) TSS_ENTRY << IA32_SEGSEL_INDEX_SHIFT) | 370 | #define _TSS ((unsigned long) TSS_ENTRY << IA32_SEGSEL_INDEX_SHIFT) |
371 | #define _LDT ((unsigned long) LDT_ENTRY << IA32_SEGSEL_INDEX_SHIFT) | 371 | #define _LDT ((unsigned long) LDT_ENTRY << IA32_SEGSEL_INDEX_SHIFT) |
372 | 372 | ||
373 | #define IA32_SEG_BASE 16 | 373 | #define IA32_SEG_BASE 16 |
374 | #define IA32_SEG_TYPE 40 | 374 | #define IA32_SEG_TYPE 40 |
375 | #define IA32_SEG_SYS 44 | 375 | #define IA32_SEG_SYS 44 |
376 | #define IA32_SEG_DPL 45 | 376 | #define IA32_SEG_DPL 45 |
377 | #define IA32_SEG_P 47 | 377 | #define IA32_SEG_P 47 |
378 | #define IA32_SEG_HIGH_LIMIT 48 | 378 | #define IA32_SEG_HIGH_LIMIT 48 |
379 | #define IA32_SEG_AVL 52 | 379 | #define IA32_SEG_AVL 52 |
380 | #define IA32_SEG_DB 54 | 380 | #define IA32_SEG_DB 54 |
381 | #define IA32_SEG_G 55 | 381 | #define IA32_SEG_G 55 |
382 | #define IA32_SEG_HIGH_BASE 56 | 382 | #define IA32_SEG_HIGH_BASE 56 |
383 | 383 | ||
384 | #define IA32_SEG_DESCRIPTOR(base, limit, segtype, nonsysseg, dpl, segpresent, avl, segdb, gran) \ | 384 | #define IA32_SEG_DESCRIPTOR(base, limit, segtype, nonsysseg, dpl, segpresent, avl, segdb, gran) \ |
385 | (((limit) & 0xffff) \ | 385 | (((limit) & 0xffff) \ |
386 | | (((unsigned long) (base) & 0xffffff) << IA32_SEG_BASE) \ | 386 | | (((unsigned long) (base) & 0xffffff) << IA32_SEG_BASE) \ |
387 | | ((unsigned long) (segtype) << IA32_SEG_TYPE) \ | 387 | | ((unsigned long) (segtype) << IA32_SEG_TYPE) \ |
388 | | ((unsigned long) (nonsysseg) << IA32_SEG_SYS) \ | 388 | | ((unsigned long) (nonsysseg) << IA32_SEG_SYS) \ |
389 | | ((unsigned long) (dpl) << IA32_SEG_DPL) \ | 389 | | ((unsigned long) (dpl) << IA32_SEG_DPL) \ |
390 | | ((unsigned long) (segpresent) << IA32_SEG_P) \ | 390 | | ((unsigned long) (segpresent) << IA32_SEG_P) \ |
391 | | ((((unsigned long) (limit) >> 16) & 0xf) << IA32_SEG_HIGH_LIMIT) \ | 391 | | ((((unsigned long) (limit) >> 16) & 0xf) << IA32_SEG_HIGH_LIMIT) \ |
392 | | ((unsigned long) (avl) << IA32_SEG_AVL) \ | 392 | | ((unsigned long) (avl) << IA32_SEG_AVL) \ |
393 | | ((unsigned long) (segdb) << IA32_SEG_DB) \ | 393 | | ((unsigned long) (segdb) << IA32_SEG_DB) \ |
394 | | ((unsigned long) (gran) << IA32_SEG_G) \ | 394 | | ((unsigned long) (gran) << IA32_SEG_G) \ |
395 | | ((((unsigned long) (base) >> 24) & 0xff) << IA32_SEG_HIGH_BASE)) | 395 | | ((((unsigned long) (base) >> 24) & 0xff) << IA32_SEG_HIGH_BASE)) |
396 | 396 | ||
397 | #define SEG_LIM 32 | 397 | #define SEG_LIM 32 |
398 | #define SEG_TYPE 52 | 398 | #define SEG_TYPE 52 |
399 | #define SEG_SYS 56 | 399 | #define SEG_SYS 56 |
400 | #define SEG_DPL 57 | 400 | #define SEG_DPL 57 |
401 | #define SEG_P 59 | 401 | #define SEG_P 59 |
402 | #define SEG_AVL 60 | 402 | #define SEG_AVL 60 |
403 | #define SEG_DB 62 | 403 | #define SEG_DB 62 |
404 | #define SEG_G 63 | 404 | #define SEG_G 63 |
405 | 405 | ||
406 | /* Unscramble an IA-32 segment descriptor into the IA-64 format. */ | 406 | /* Unscramble an IA-32 segment descriptor into the IA-64 format. */ |
407 | #define IA32_SEG_UNSCRAMBLE(sd) \ | 407 | #define IA32_SEG_UNSCRAMBLE(sd) \ |
408 | ( (((sd) >> IA32_SEG_BASE) & 0xffffff) | ((((sd) >> IA32_SEG_HIGH_BASE) & 0xff) << 24) \ | 408 | ( (((sd) >> IA32_SEG_BASE) & 0xffffff) | ((((sd) >> IA32_SEG_HIGH_BASE) & 0xff) << 24) \ |
409 | | ((((sd) & 0xffff) | ((((sd) >> IA32_SEG_HIGH_LIMIT) & 0xf) << 16)) << SEG_LIM) \ | 409 | | ((((sd) & 0xffff) | ((((sd) >> IA32_SEG_HIGH_LIMIT) & 0xf) << 16)) << SEG_LIM) \ |
410 | | ((((sd) >> IA32_SEG_TYPE) & 0xf) << SEG_TYPE) \ | 410 | | ((((sd) >> IA32_SEG_TYPE) & 0xf) << SEG_TYPE) \ |
411 | | ((((sd) >> IA32_SEG_SYS) & 0x1) << SEG_SYS) \ | 411 | | ((((sd) >> IA32_SEG_SYS) & 0x1) << SEG_SYS) \ |
412 | | ((((sd) >> IA32_SEG_DPL) & 0x3) << SEG_DPL) \ | 412 | | ((((sd) >> IA32_SEG_DPL) & 0x3) << SEG_DPL) \ |
413 | | ((((sd) >> IA32_SEG_P) & 0x1) << SEG_P) \ | 413 | | ((((sd) >> IA32_SEG_P) & 0x1) << SEG_P) \ |
414 | | ((((sd) >> IA32_SEG_AVL) & 0x1) << SEG_AVL) \ | 414 | | ((((sd) >> IA32_SEG_AVL) & 0x1) << SEG_AVL) \ |
415 | | ((((sd) >> IA32_SEG_DB) & 0x1) << SEG_DB) \ | 415 | | ((((sd) >> IA32_SEG_DB) & 0x1) << SEG_DB) \ |
416 | | ((((sd) >> IA32_SEG_G) & 0x1) << SEG_G)) | 416 | | ((((sd) >> IA32_SEG_G) & 0x1) << SEG_G)) |
417 | 417 | ||
418 | #define IA32_IOBASE 0x2000000000000000UL /* Virtual address for I/O space */ | 418 | #define IA32_IOBASE 0x2000000000000000UL /* Virtual address for I/O space */ |
419 | 419 | ||
420 | #define IA32_CR0 0x80000001 /* Enable PG and PE bits */ | 420 | #define IA32_CR0 0x80000001 /* Enable PG and PE bits */ |
421 | #define IA32_CR4 0x600 /* MMXEX and FXSR on */ | 421 | #define IA32_CR4 0x600 /* MMXEX and FXSR on */ |
422 | 422 | ||
423 | /* | 423 | /* |
424 | * IA32 floating point control registers starting values | 424 | * IA32 floating point control registers starting values |
425 | */ | 425 | */ |
426 | 426 | ||
427 | #define IA32_FSR_DEFAULT 0x55550000 /* set all tag bits */ | 427 | #define IA32_FSR_DEFAULT 0x55550000 /* set all tag bits */ |
428 | #define IA32_FCR_DEFAULT 0x17800000037fUL /* extended precision, all masks */ | 428 | #define IA32_FCR_DEFAULT 0x17800000037fUL /* extended precision, all masks */ |
429 | 429 | ||
430 | #define IA32_PTRACE_GETREGS 12 | 430 | #define IA32_PTRACE_GETREGS 12 |
431 | #define IA32_PTRACE_SETREGS 13 | 431 | #define IA32_PTRACE_SETREGS 13 |
432 | #define IA32_PTRACE_GETFPREGS 14 | 432 | #define IA32_PTRACE_GETFPREGS 14 |
433 | #define IA32_PTRACE_SETFPREGS 15 | 433 | #define IA32_PTRACE_SETFPREGS 15 |
434 | #define IA32_PTRACE_GETFPXREGS 18 | 434 | #define IA32_PTRACE_GETFPXREGS 18 |
435 | #define IA32_PTRACE_SETFPXREGS 19 | 435 | #define IA32_PTRACE_SETFPXREGS 19 |
436 | 436 | ||
437 | #define ia32_start_thread(regs,new_ip,new_sp) do { \ | 437 | #define ia32_start_thread(regs,new_ip,new_sp) do { \ |
438 | set_fs(USER_DS); \ | 438 | set_fs(USER_DS); \ |
439 | ia64_psr(regs)->cpl = 3; /* set user mode */ \ | 439 | ia64_psr(regs)->cpl = 3; /* set user mode */ \ |
440 | ia64_psr(regs)->ri = 0; /* clear return slot number */ \ | 440 | ia64_psr(regs)->ri = 0; /* clear return slot number */ \ |
441 | ia64_psr(regs)->is = 1; /* IA-32 instruction set */ \ | 441 | ia64_psr(regs)->is = 1; /* IA-32 instruction set */ \ |
442 | regs->cr_iip = new_ip; \ | 442 | regs->cr_iip = new_ip; \ |
443 | regs->ar_rsc = 0xc; /* enforced lazy mode, priv. level 3 */ \ | 443 | regs->ar_rsc = 0xc; /* enforced lazy mode, priv. level 3 */ \ |
444 | regs->ar_rnat = 0; \ | 444 | regs->ar_rnat = 0; \ |
445 | regs->loadrs = 0; \ | 445 | regs->loadrs = 0; \ |
446 | regs->r12 = new_sp; \ | 446 | regs->r12 = new_sp; \ |
447 | } while (0) | 447 | } while (0) |
448 | 448 | ||
449 | /* | 449 | /* |
450 | * Local Descriptor Table (LDT) related declarations. | 450 | * Local Descriptor Table (LDT) related declarations. |
451 | */ | 451 | */ |
452 | 452 | ||
453 | #define IA32_LDT_ENTRIES 8192 /* Maximum number of LDT entries supported. */ | 453 | #define IA32_LDT_ENTRIES 8192 /* Maximum number of LDT entries supported. */ |
454 | #define IA32_LDT_ENTRY_SIZE 8 /* The size of each LDT entry. */ | 454 | #define IA32_LDT_ENTRY_SIZE 8 /* The size of each LDT entry. */ |
455 | 455 | ||
456 | #define LDT_entry_a(info) \ | 456 | #define LDT_entry_a(info) \ |
457 | ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) | 457 | ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff)) |
458 | 458 | ||
459 | #define LDT_entry_b(info) \ | 459 | #define LDT_entry_b(info) \ |
460 | (((info)->base_addr & 0xff000000) | \ | 460 | (((info)->base_addr & 0xff000000) | \ |
461 | (((info)->base_addr & 0x00ff0000) >> 16) | \ | 461 | (((info)->base_addr & 0x00ff0000) >> 16) | \ |
462 | ((info)->limit & 0xf0000) | \ | 462 | ((info)->limit & 0xf0000) | \ |
463 | (((info)->read_exec_only ^ 1) << 9) | \ | 463 | (((info)->read_exec_only ^ 1) << 9) | \ |
464 | ((info)->contents << 10) | \ | 464 | ((info)->contents << 10) | \ |
465 | (((info)->seg_not_present ^ 1) << 15) | \ | 465 | (((info)->seg_not_present ^ 1) << 15) | \ |
466 | ((info)->seg_32bit << 22) | \ | 466 | ((info)->seg_32bit << 22) | \ |
467 | ((info)->limit_in_pages << 23) | \ | 467 | ((info)->limit_in_pages << 23) | \ |
468 | ((info)->useable << 20) | \ | 468 | ((info)->useable << 20) | \ |
469 | 0x7100) | 469 | 0x7100) |
470 | 470 | ||
471 | #define LDT_empty(info) ( \ | 471 | #define LDT_empty(info) ( \ |
472 | (info)->base_addr == 0 && \ | 472 | (info)->base_addr == 0 && \ |
473 | (info)->limit == 0 && \ | 473 | (info)->limit == 0 && \ |
474 | (info)->contents == 0 && \ | 474 | (info)->contents == 0 && \ |
475 | (info)->read_exec_only == 1 && \ | 475 | (info)->read_exec_only == 1 && \ |
476 | (info)->seg_32bit == 0 && \ | 476 | (info)->seg_32bit == 0 && \ |
477 | (info)->limit_in_pages == 0 && \ | 477 | (info)->limit_in_pages == 0 && \ |
478 | (info)->seg_not_present == 1 && \ | 478 | (info)->seg_not_present == 1 && \ |
479 | (info)->useable == 0 ) | 479 | (info)->useable == 0 ) |
480 | 480 | ||
481 | static inline void | 481 | static inline void |
482 | load_TLS (struct thread_struct *t, unsigned int cpu) | 482 | load_TLS (struct thread_struct *t, unsigned int cpu) |
483 | { | 483 | { |
484 | extern unsigned long *cpu_gdt_table[NR_CPUS]; | 484 | extern unsigned long *cpu_gdt_table[NR_CPUS]; |
485 | 485 | ||
486 | memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0], sizeof(long)); | 486 | memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0], sizeof(long)); |
487 | memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1], sizeof(long)); | 487 | memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1], sizeof(long)); |
488 | memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2], sizeof(long)); | 488 | memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2], sizeof(long)); |
489 | } | 489 | } |
490 | 490 | ||
491 | struct ia32_user_desc { | 491 | struct ia32_user_desc { |
492 | unsigned int entry_number; | 492 | unsigned int entry_number; |
493 | unsigned int base_addr; | 493 | unsigned int base_addr; |
494 | unsigned int limit; | 494 | unsigned int limit; |
495 | unsigned int seg_32bit:1; | 495 | unsigned int seg_32bit:1; |
496 | unsigned int contents:2; | 496 | unsigned int contents:2; |
497 | unsigned int read_exec_only:1; | 497 | unsigned int read_exec_only:1; |
498 | unsigned int limit_in_pages:1; | 498 | unsigned int limit_in_pages:1; |
499 | unsigned int seg_not_present:1; | 499 | unsigned int seg_not_present:1; |
500 | unsigned int useable:1; | 500 | unsigned int useable:1; |
501 | }; | 501 | }; |
502 | 502 | ||
503 | struct linux_binprm; | 503 | struct linux_binprm; |
504 | 504 | ||
505 | extern void ia32_init_addr_space (struct pt_regs *regs); | 505 | extern void ia32_init_addr_space (struct pt_regs *regs); |
506 | extern int ia32_setup_arg_pages (struct linux_binprm *bprm, int exec_stack); | 506 | extern int ia32_setup_arg_pages (struct linux_binprm *bprm, int exec_stack); |
507 | extern unsigned long ia32_do_mmap (struct file *, unsigned long, unsigned long, int, int, loff_t); | 507 | extern unsigned long ia32_do_mmap (struct file *, unsigned long, unsigned long, int, int, loff_t); |
508 | extern void ia32_load_segment_descriptors (struct task_struct *task); | 508 | extern void ia32_load_segment_descriptors (struct task_struct *task); |
509 | 509 | ||
510 | #define ia32f2ia64f(dst,src) \ | 510 | #define ia32f2ia64f(dst,src) \ |
511 | do { \ | 511 | do { \ |
512 | ia64_ldfe(6,src); \ | 512 | ia64_ldfe(6,src); \ |
513 | ia64_stop(); \ | 513 | ia64_stop(); \ |
514 | ia64_stf_spill(dst, 6); \ | 514 | ia64_stf_spill(dst, 6); \ |
515 | } while(0) | 515 | } while(0) |
516 | 516 | ||
517 | #define ia64f2ia32f(dst,src) \ | 517 | #define ia64f2ia32f(dst,src) \ |
518 | do { \ | 518 | do { \ |
519 | ia64_ldf_fill(6, src); \ | 519 | ia64_ldf_fill(6, src); \ |
520 | ia64_stop(); \ | 520 | ia64_stop(); \ |
521 | ia64_stfe(dst, 6); \ | 521 | ia64_stfe(dst, 6); \ |
522 | } while(0) | 522 | } while(0) |
523 | 523 | ||
524 | struct user_regs_struct32 { | 524 | struct user_regs_struct32 { |
525 | __u32 ebx, ecx, edx, esi, edi, ebp, eax; | 525 | __u32 ebx, ecx, edx, esi, edi, ebp, eax; |
526 | unsigned short ds, __ds, es, __es; | 526 | unsigned short ds, __ds, es, __es; |
527 | unsigned short fs, __fs, gs, __gs; | 527 | unsigned short fs, __fs, gs, __gs; |
528 | __u32 orig_eax, eip; | 528 | __u32 orig_eax, eip; |
529 | unsigned short cs, __cs; | 529 | unsigned short cs, __cs; |
530 | __u32 eflags, esp; | 530 | __u32 eflags, esp; |
531 | unsigned short ss, __ss; | 531 | unsigned short ss, __ss; |
532 | }; | 532 | }; |
533 | 533 | ||
534 | /* Prototypes for use in elfcore32.h */ | 534 | /* Prototypes for use in elfcore32.h */ |
535 | extern int save_ia32_fpstate (struct task_struct *, struct ia32_user_i387_struct __user *); | 535 | extern int save_ia32_fpstate (struct task_struct *, struct ia32_user_i387_struct __user *); |
536 | extern int save_ia32_fpxstate (struct task_struct *, struct ia32_user_fxsr_struct __user *); | 536 | extern int save_ia32_fpxstate (struct task_struct *, struct ia32_user_fxsr_struct __user *); |
537 | 537 | ||
538 | #endif /* !CONFIG_IA32_SUPPORT */ | 538 | #endif /* !CONFIG_IA32_SUPPORT */ |
539 | 539 | ||
540 | #endif /* _ASM_IA64_IA32_PRIV_H */ | 540 | #endif /* _ASM_IA64_IA32_PRIV_H */ |
541 | 541 |
arch/ia64/ia32/sys_ia32.c
1 | /* | 1 | /* |
2 | * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Derived from sys_sparc32.c. | 2 | * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Derived from sys_sparc32.c. |
3 | * | 3 | * |
4 | * Copyright (C) 2000 VA Linux Co | 4 | * Copyright (C) 2000 VA Linux Co |
5 | * Copyright (C) 2000 Don Dugger <n0ano@valinux.com> | 5 | * Copyright (C) 2000 Don Dugger <n0ano@valinux.com> |
6 | * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com> | 6 | * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com> |
7 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | 7 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) |
8 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | 8 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) |
9 | * Copyright (C) 2000-2003, 2005 Hewlett-Packard Co | 9 | * Copyright (C) 2000-2003, 2005 Hewlett-Packard Co |
10 | * David Mosberger-Tang <davidm@hpl.hp.com> | 10 | * David Mosberger-Tang <davidm@hpl.hp.com> |
11 | * Copyright (C) 2004 Gordon Jin <gordon.jin@intel.com> | 11 | * Copyright (C) 2004 Gordon Jin <gordon.jin@intel.com> |
12 | * | 12 | * |
13 | * These routines maintain argument size conversion between 32bit and 64bit | 13 | * These routines maintain argument size conversion between 32bit and 64bit |
14 | * environment. | 14 | * environment. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/syscalls.h> | 18 | #include <linux/syscalls.h> |
19 | #include <linux/sysctl.h> | 19 | #include <linux/sysctl.h> |
20 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
21 | #include <linux/fs.h> | 21 | #include <linux/fs.h> |
22 | #include <linux/file.h> | 22 | #include <linux/file.h> |
23 | #include <linux/signal.h> | 23 | #include <linux/signal.h> |
24 | #include <linux/resource.h> | 24 | #include <linux/resource.h> |
25 | #include <linux/times.h> | 25 | #include <linux/times.h> |
26 | #include <linux/utsname.h> | 26 | #include <linux/utsname.h> |
27 | #include <linux/smp.h> | 27 | #include <linux/smp.h> |
28 | #include <linux/smp_lock.h> | 28 | #include <linux/smp_lock.h> |
29 | #include <linux/sem.h> | 29 | #include <linux/sem.h> |
30 | #include <linux/msg.h> | 30 | #include <linux/msg.h> |
31 | #include <linux/mm.h> | 31 | #include <linux/mm.h> |
32 | #include <linux/shm.h> | 32 | #include <linux/shm.h> |
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | #include <linux/uio.h> | 34 | #include <linux/uio.h> |
35 | #include <linux/nfs_fs.h> | 35 | #include <linux/nfs_fs.h> |
36 | #include <linux/quota.h> | 36 | #include <linux/quota.h> |
37 | #include <linux/syscalls.h> | 37 | #include <linux/syscalls.h> |
38 | #include <linux/sunrpc/svc.h> | 38 | #include <linux/sunrpc/svc.h> |
39 | #include <linux/nfsd/nfsd.h> | 39 | #include <linux/nfsd/nfsd.h> |
40 | #include <linux/nfsd/cache.h> | 40 | #include <linux/nfsd/cache.h> |
41 | #include <linux/nfsd/xdr.h> | 41 | #include <linux/nfsd/xdr.h> |
42 | #include <linux/nfsd/syscall.h> | 42 | #include <linux/nfsd/syscall.h> |
43 | #include <linux/poll.h> | 43 | #include <linux/poll.h> |
44 | #include <linux/eventpoll.h> | 44 | #include <linux/eventpoll.h> |
45 | #include <linux/personality.h> | 45 | #include <linux/personality.h> |
46 | #include <linux/ptrace.h> | 46 | #include <linux/ptrace.h> |
47 | #include <linux/stat.h> | 47 | #include <linux/stat.h> |
48 | #include <linux/ipc.h> | 48 | #include <linux/ipc.h> |
49 | #include <linux/capability.h> | 49 | #include <linux/capability.h> |
50 | #include <linux/compat.h> | 50 | #include <linux/compat.h> |
51 | #include <linux/vfs.h> | 51 | #include <linux/vfs.h> |
52 | #include <linux/mman.h> | 52 | #include <linux/mman.h> |
53 | #include <linux/mutex.h> | 53 | #include <linux/mutex.h> |
54 | 54 | ||
55 | #include <asm/intrinsics.h> | 55 | #include <asm/intrinsics.h> |
56 | #include <asm/types.h> | 56 | #include <asm/types.h> |
57 | #include <asm/uaccess.h> | 57 | #include <asm/uaccess.h> |
58 | #include <asm/unistd.h> | 58 | #include <asm/unistd.h> |
59 | 59 | ||
60 | #include "ia32priv.h" | 60 | #include "ia32priv.h" |
61 | 61 | ||
62 | #include <net/scm.h> | 62 | #include <net/scm.h> |
63 | #include <net/sock.h> | 63 | #include <net/sock.h> |
64 | 64 | ||
65 | #define DEBUG 0 | 65 | #define DEBUG 0 |
66 | 66 | ||
67 | #if DEBUG | 67 | #if DEBUG |
68 | # define DBG(fmt...) printk(KERN_DEBUG fmt) | 68 | # define DBG(fmt...) printk(KERN_DEBUG fmt) |
69 | #else | 69 | #else |
70 | # define DBG(fmt...) | 70 | # define DBG(fmt...) |
71 | #endif | 71 | #endif |
72 | 72 | ||
73 | #define ROUND_UP(x,a) ((__typeof__(x))(((unsigned long)(x) + ((a) - 1)) & ~((a) - 1))) | 73 | #define ROUND_UP(x,a) ((__typeof__(x))(((unsigned long)(x) + ((a) - 1)) & ~((a) - 1))) |
74 | 74 | ||
75 | #define OFFSET4K(a) ((a) & 0xfff) | 75 | #define OFFSET4K(a) ((a) & 0xfff) |
76 | #define PAGE_START(addr) ((addr) & PAGE_MASK) | 76 | #define PAGE_START(addr) ((addr) & PAGE_MASK) |
77 | #define MINSIGSTKSZ_IA32 2048 | 77 | #define MINSIGSTKSZ_IA32 2048 |
78 | 78 | ||
79 | #define high2lowuid(uid) ((uid) > 65535 ? 65534 : (uid)) | 79 | #define high2lowuid(uid) ((uid) > 65535 ? 65534 : (uid)) |
80 | #define high2lowgid(gid) ((gid) > 65535 ? 65534 : (gid)) | 80 | #define high2lowgid(gid) ((gid) > 65535 ? 65534 : (gid)) |
81 | 81 | ||
82 | /* | 82 | /* |
83 | * Anything that modifies or inspects ia32 user virtual memory must hold this semaphore | 83 | * Anything that modifies or inspects ia32 user virtual memory must hold this semaphore |
84 | * while doing so. | 84 | * while doing so. |
85 | */ | 85 | */ |
86 | /* XXX make per-mm: */ | 86 | /* XXX make per-mm: */ |
87 | static DEFINE_MUTEX(ia32_mmap_mutex); | 87 | static DEFINE_MUTEX(ia32_mmap_mutex); |
88 | 88 | ||
89 | asmlinkage long | 89 | asmlinkage long |
90 | sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp, | 90 | sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp, |
91 | struct pt_regs *regs) | 91 | struct pt_regs *regs) |
92 | { | 92 | { |
93 | long error; | 93 | long error; |
94 | char *filename; | 94 | char *filename; |
95 | unsigned long old_map_base, old_task_size, tssd; | 95 | unsigned long old_map_base, old_task_size, tssd; |
96 | 96 | ||
97 | filename = getname(name); | 97 | filename = getname(name); |
98 | error = PTR_ERR(filename); | 98 | error = PTR_ERR(filename); |
99 | if (IS_ERR(filename)) | 99 | if (IS_ERR(filename)) |
100 | return error; | 100 | return error; |
101 | 101 | ||
102 | old_map_base = current->thread.map_base; | 102 | old_map_base = current->thread.map_base; |
103 | old_task_size = current->thread.task_size; | 103 | old_task_size = current->thread.task_size; |
104 | tssd = ia64_get_kr(IA64_KR_TSSD); | 104 | tssd = ia64_get_kr(IA64_KR_TSSD); |
105 | 105 | ||
106 | /* we may be exec'ing a 64-bit process: reset map base, task-size, and io-base: */ | 106 | /* we may be exec'ing a 64-bit process: reset map base, task-size, and io-base: */ |
107 | current->thread.map_base = DEFAULT_MAP_BASE; | 107 | current->thread.map_base = DEFAULT_MAP_BASE; |
108 | current->thread.task_size = DEFAULT_TASK_SIZE; | 108 | current->thread.task_size = DEFAULT_TASK_SIZE; |
109 | ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob); | 109 | ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob); |
110 | ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1); | 110 | ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1); |
111 | 111 | ||
112 | error = compat_do_execve(filename, argv, envp, regs); | 112 | error = compat_do_execve(filename, argv, envp, regs); |
113 | putname(filename); | 113 | putname(filename); |
114 | 114 | ||
115 | if (error < 0) { | 115 | if (error < 0) { |
116 | /* oops, execve failed, switch back to old values... */ | 116 | /* oops, execve failed, switch back to old values... */ |
117 | ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE); | 117 | ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE); |
118 | ia64_set_kr(IA64_KR_TSSD, tssd); | 118 | ia64_set_kr(IA64_KR_TSSD, tssd); |
119 | current->thread.map_base = old_map_base; | 119 | current->thread.map_base = old_map_base; |
120 | current->thread.task_size = old_task_size; | 120 | current->thread.task_size = old_task_size; |
121 | } | 121 | } |
122 | 122 | ||
123 | return error; | 123 | return error; |
124 | } | 124 | } |
125 | 125 | ||
126 | int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) | 126 | int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) |
127 | { | 127 | { |
128 | compat_ino_t ino; | 128 | compat_ino_t ino; |
129 | int err; | 129 | int err; |
130 | 130 | ||
131 | if ((u64) stat->size > MAX_NON_LFS || | 131 | if ((u64) stat->size > MAX_NON_LFS || |
132 | !old_valid_dev(stat->dev) || | 132 | !old_valid_dev(stat->dev) || |
133 | !old_valid_dev(stat->rdev)) | 133 | !old_valid_dev(stat->rdev)) |
134 | return -EOVERFLOW; | 134 | return -EOVERFLOW; |
135 | 135 | ||
136 | ino = stat->ino; | 136 | ino = stat->ino; |
137 | if (sizeof(ino) < sizeof(stat->ino) && ino != stat->ino) | 137 | if (sizeof(ino) < sizeof(stat->ino) && ino != stat->ino) |
138 | return -EOVERFLOW; | 138 | return -EOVERFLOW; |
139 | 139 | ||
140 | if (clear_user(ubuf, sizeof(*ubuf))) | 140 | if (clear_user(ubuf, sizeof(*ubuf))) |
141 | return -EFAULT; | 141 | return -EFAULT; |
142 | 142 | ||
143 | err = __put_user(old_encode_dev(stat->dev), &ubuf->st_dev); | 143 | err = __put_user(old_encode_dev(stat->dev), &ubuf->st_dev); |
144 | err |= __put_user(ino, &ubuf->st_ino); | 144 | err |= __put_user(ino, &ubuf->st_ino); |
145 | err |= __put_user(stat->mode, &ubuf->st_mode); | 145 | err |= __put_user(stat->mode, &ubuf->st_mode); |
146 | err |= __put_user(stat->nlink, &ubuf->st_nlink); | 146 | err |= __put_user(stat->nlink, &ubuf->st_nlink); |
147 | err |= __put_user(high2lowuid(stat->uid), &ubuf->st_uid); | 147 | err |= __put_user(high2lowuid(stat->uid), &ubuf->st_uid); |
148 | err |= __put_user(high2lowgid(stat->gid), &ubuf->st_gid); | 148 | err |= __put_user(high2lowgid(stat->gid), &ubuf->st_gid); |
149 | err |= __put_user(old_encode_dev(stat->rdev), &ubuf->st_rdev); | 149 | err |= __put_user(old_encode_dev(stat->rdev), &ubuf->st_rdev); |
150 | err |= __put_user(stat->size, &ubuf->st_size); | 150 | err |= __put_user(stat->size, &ubuf->st_size); |
151 | err |= __put_user(stat->atime.tv_sec, &ubuf->st_atime); | 151 | err |= __put_user(stat->atime.tv_sec, &ubuf->st_atime); |
152 | err |= __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec); | 152 | err |= __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec); |
153 | err |= __put_user(stat->mtime.tv_sec, &ubuf->st_mtime); | 153 | err |= __put_user(stat->mtime.tv_sec, &ubuf->st_mtime); |
154 | err |= __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec); | 154 | err |= __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec); |
155 | err |= __put_user(stat->ctime.tv_sec, &ubuf->st_ctime); | 155 | err |= __put_user(stat->ctime.tv_sec, &ubuf->st_ctime); |
156 | err |= __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec); | 156 | err |= __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec); |
157 | err |= __put_user(stat->blksize, &ubuf->st_blksize); | 157 | err |= __put_user(stat->blksize, &ubuf->st_blksize); |
158 | err |= __put_user(stat->blocks, &ubuf->st_blocks); | 158 | err |= __put_user(stat->blocks, &ubuf->st_blocks); |
159 | return err; | 159 | return err; |
160 | } | 160 | } |
161 | 161 | ||
162 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | 162 | #if PAGE_SHIFT > IA32_PAGE_SHIFT |
163 | 163 | ||
164 | 164 | ||
165 | static int | 165 | static int |
166 | get_page_prot (struct vm_area_struct *vma, unsigned long addr) | 166 | get_page_prot (struct vm_area_struct *vma, unsigned long addr) |
167 | { | 167 | { |
168 | int prot = 0; | 168 | int prot = 0; |
169 | 169 | ||
170 | if (!vma || vma->vm_start > addr) | 170 | if (!vma || vma->vm_start > addr) |
171 | return 0; | 171 | return 0; |
172 | 172 | ||
173 | if (vma->vm_flags & VM_READ) | 173 | if (vma->vm_flags & VM_READ) |
174 | prot |= PROT_READ; | 174 | prot |= PROT_READ; |
175 | if (vma->vm_flags & VM_WRITE) | 175 | if (vma->vm_flags & VM_WRITE) |
176 | prot |= PROT_WRITE; | 176 | prot |= PROT_WRITE; |
177 | if (vma->vm_flags & VM_EXEC) | 177 | if (vma->vm_flags & VM_EXEC) |
178 | prot |= PROT_EXEC; | 178 | prot |= PROT_EXEC; |
179 | return prot; | 179 | return prot; |
180 | } | 180 | } |
181 | 181 | ||
182 | /* | 182 | /* |
183 | * Map a subpage by creating an anonymous page that contains the union of the old page and | 183 | * Map a subpage by creating an anonymous page that contains the union of the old page and |
184 | * the subpage. | 184 | * the subpage. |
185 | */ | 185 | */ |
186 | static unsigned long | 186 | static unsigned long |
187 | mmap_subpage (struct file *file, unsigned long start, unsigned long end, int prot, int flags, | 187 | mmap_subpage (struct file *file, unsigned long start, unsigned long end, int prot, int flags, |
188 | loff_t off) | 188 | loff_t off) |
189 | { | 189 | { |
190 | void *page = NULL; | 190 | void *page = NULL; |
191 | struct inode *inode; | 191 | struct inode *inode; |
192 | unsigned long ret = 0; | 192 | unsigned long ret = 0; |
193 | struct vm_area_struct *vma = find_vma(current->mm, start); | 193 | struct vm_area_struct *vma = find_vma(current->mm, start); |
194 | int old_prot = get_page_prot(vma, start); | 194 | int old_prot = get_page_prot(vma, start); |
195 | 195 | ||
196 | DBG("mmap_subpage(file=%p,start=0x%lx,end=0x%lx,prot=%x,flags=%x,off=0x%llx)\n", | 196 | DBG("mmap_subpage(file=%p,start=0x%lx,end=0x%lx,prot=%x,flags=%x,off=0x%llx)\n", |
197 | file, start, end, prot, flags, off); | 197 | file, start, end, prot, flags, off); |
198 | 198 | ||
199 | 199 | ||
200 | /* Optimize the case where the old mmap and the new mmap are both anonymous */ | 200 | /* Optimize the case where the old mmap and the new mmap are both anonymous */ |
201 | if ((old_prot & PROT_WRITE) && (flags & MAP_ANONYMOUS) && !vma->vm_file) { | 201 | if ((old_prot & PROT_WRITE) && (flags & MAP_ANONYMOUS) && !vma->vm_file) { |
202 | if (clear_user((void __user *) start, end - start)) { | 202 | if (clear_user((void __user *) start, end - start)) { |
203 | ret = -EFAULT; | 203 | ret = -EFAULT; |
204 | goto out; | 204 | goto out; |
205 | } | 205 | } |
206 | goto skip_mmap; | 206 | goto skip_mmap; |
207 | } | 207 | } |
208 | 208 | ||
209 | page = (void *) get_zeroed_page(GFP_KERNEL); | 209 | page = (void *) get_zeroed_page(GFP_KERNEL); |
210 | if (!page) | 210 | if (!page) |
211 | return -ENOMEM; | 211 | return -ENOMEM; |
212 | 212 | ||
213 | if (old_prot) | 213 | if (old_prot) |
214 | copy_from_user(page, (void __user *) PAGE_START(start), PAGE_SIZE); | 214 | copy_from_user(page, (void __user *) PAGE_START(start), PAGE_SIZE); |
215 | 215 | ||
216 | down_write(¤t->mm->mmap_sem); | 216 | down_write(¤t->mm->mmap_sem); |
217 | { | 217 | { |
218 | ret = do_mmap(NULL, PAGE_START(start), PAGE_SIZE, prot | PROT_WRITE, | 218 | ret = do_mmap(NULL, PAGE_START(start), PAGE_SIZE, prot | PROT_WRITE, |
219 | flags | MAP_FIXED | MAP_ANONYMOUS, 0); | 219 | flags | MAP_FIXED | MAP_ANONYMOUS, 0); |
220 | } | 220 | } |
221 | up_write(¤t->mm->mmap_sem); | 221 | up_write(¤t->mm->mmap_sem); |
222 | 222 | ||
223 | if (IS_ERR((void *) ret)) | 223 | if (IS_ERR((void *) ret)) |
224 | goto out; | 224 | goto out; |
225 | 225 | ||
226 | if (old_prot) { | 226 | if (old_prot) { |
227 | /* copy back the old page contents. */ | 227 | /* copy back the old page contents. */ |
228 | if (offset_in_page(start)) | 228 | if (offset_in_page(start)) |
229 | copy_to_user((void __user *) PAGE_START(start), page, | 229 | copy_to_user((void __user *) PAGE_START(start), page, |
230 | offset_in_page(start)); | 230 | offset_in_page(start)); |
231 | if (offset_in_page(end)) | 231 | if (offset_in_page(end)) |
232 | copy_to_user((void __user *) end, page + offset_in_page(end), | 232 | copy_to_user((void __user *) end, page + offset_in_page(end), |
233 | PAGE_SIZE - offset_in_page(end)); | 233 | PAGE_SIZE - offset_in_page(end)); |
234 | } | 234 | } |
235 | 235 | ||
236 | if (!(flags & MAP_ANONYMOUS)) { | 236 | if (!(flags & MAP_ANONYMOUS)) { |
237 | /* read the file contents */ | 237 | /* read the file contents */ |
238 | inode = file->f_path.dentry->d_inode; | 238 | inode = file->f_path.dentry->d_inode; |
239 | if (!inode->i_fop || !file->f_op->read | 239 | if (!inode->i_fop || !file->f_op->read |
240 | || ((*file->f_op->read)(file, (char __user *) start, end - start, &off) < 0)) | 240 | || ((*file->f_op->read)(file, (char __user *) start, end - start, &off) < 0)) |
241 | { | 241 | { |
242 | ret = -EINVAL; | 242 | ret = -EINVAL; |
243 | goto out; | 243 | goto out; |
244 | } | 244 | } |
245 | } | 245 | } |
246 | 246 | ||
247 | skip_mmap: | 247 | skip_mmap: |
248 | if (!(prot & PROT_WRITE)) | 248 | if (!(prot & PROT_WRITE)) |
249 | ret = sys_mprotect(PAGE_START(start), PAGE_SIZE, prot | old_prot); | 249 | ret = sys_mprotect(PAGE_START(start), PAGE_SIZE, prot | old_prot); |
250 | out: | 250 | out: |
251 | if (page) | 251 | if (page) |
252 | free_page((unsigned long) page); | 252 | free_page((unsigned long) page); |
253 | return ret; | 253 | return ret; |
254 | } | 254 | } |
255 | 255 | ||
256 | /* SLAB cache for partial_page structures */ | 256 | /* SLAB cache for ia64_partial_page structures */ |
257 | struct kmem_cache *partial_page_cachep; | 257 | struct kmem_cache *ia64_partial_page_cachep; |
258 | 258 | ||
259 | /* | 259 | /* |
260 | * init partial_page_list. | 260 | * init ia64_partial_page_list. |
261 | * return 0 means kmalloc fail. | 261 | * return 0 means kmalloc fail. |
262 | */ | 262 | */ |
263 | struct partial_page_list* | 263 | struct ia64_partial_page_list* |
264 | ia32_init_pp_list(void) | 264 | ia32_init_pp_list(void) |
265 | { | 265 | { |
266 | struct partial_page_list *p; | 266 | struct ia64_partial_page_list *p; |
267 | 267 | ||
268 | if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL) | 268 | if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL) |
269 | return p; | 269 | return p; |
270 | p->pp_head = NULL; | 270 | p->pp_head = NULL; |
271 | p->ppl_rb = RB_ROOT; | 271 | p->ppl_rb = RB_ROOT; |
272 | p->pp_hint = NULL; | 272 | p->pp_hint = NULL; |
273 | atomic_set(&p->pp_count, 1); | 273 | atomic_set(&p->pp_count, 1); |
274 | return p; | 274 | return p; |
275 | } | 275 | } |
276 | 276 | ||
277 | /* | 277 | /* |
278 | * Search for the partial page with @start in partial page list @ppl. | 278 | * Search for the partial page with @start in partial page list @ppl. |
279 | * If finds the partial page, return the found partial page. | 279 | * If finds the partial page, return the found partial page. |
280 | * Else, return 0 and provide @pprev, @rb_link, @rb_parent to | 280 | * Else, return 0 and provide @pprev, @rb_link, @rb_parent to |
281 | * be used by later __ia32_insert_pp(). | 281 | * be used by later __ia32_insert_pp(). |
282 | */ | 282 | */ |
283 | static struct partial_page * | 283 | static struct ia64_partial_page * |
284 | __ia32_find_pp(struct partial_page_list *ppl, unsigned int start, | 284 | __ia32_find_pp(struct ia64_partial_page_list *ppl, unsigned int start, |
285 | struct partial_page **pprev, struct rb_node ***rb_link, | 285 | struct ia64_partial_page **pprev, struct rb_node ***rb_link, |
286 | struct rb_node **rb_parent) | 286 | struct rb_node **rb_parent) |
287 | { | 287 | { |
288 | struct partial_page *pp; | 288 | struct ia64_partial_page *pp; |
289 | struct rb_node **__rb_link, *__rb_parent, *rb_prev; | 289 | struct rb_node **__rb_link, *__rb_parent, *rb_prev; |
290 | 290 | ||
291 | pp = ppl->pp_hint; | 291 | pp = ppl->pp_hint; |
292 | if (pp && pp->base == start) | 292 | if (pp && pp->base == start) |
293 | return pp; | 293 | return pp; |
294 | 294 | ||
295 | __rb_link = &ppl->ppl_rb.rb_node; | 295 | __rb_link = &ppl->ppl_rb.rb_node; |
296 | rb_prev = __rb_parent = NULL; | 296 | rb_prev = __rb_parent = NULL; |
297 | 297 | ||
298 | while (*__rb_link) { | 298 | while (*__rb_link) { |
299 | __rb_parent = *__rb_link; | 299 | __rb_parent = *__rb_link; |
300 | pp = rb_entry(__rb_parent, struct partial_page, pp_rb); | 300 | pp = rb_entry(__rb_parent, struct ia64_partial_page, pp_rb); |
301 | 301 | ||
302 | if (pp->base == start) { | 302 | if (pp->base == start) { |
303 | ppl->pp_hint = pp; | 303 | ppl->pp_hint = pp; |
304 | return pp; | 304 | return pp; |
305 | } else if (pp->base < start) { | 305 | } else if (pp->base < start) { |
306 | rb_prev = __rb_parent; | 306 | rb_prev = __rb_parent; |
307 | __rb_link = &__rb_parent->rb_right; | 307 | __rb_link = &__rb_parent->rb_right; |
308 | } else { | 308 | } else { |
309 | __rb_link = &__rb_parent->rb_left; | 309 | __rb_link = &__rb_parent->rb_left; |
310 | } | 310 | } |
311 | } | 311 | } |
312 | 312 | ||
313 | *rb_link = __rb_link; | 313 | *rb_link = __rb_link; |
314 | *rb_parent = __rb_parent; | 314 | *rb_parent = __rb_parent; |
315 | *pprev = NULL; | 315 | *pprev = NULL; |
316 | if (rb_prev) | 316 | if (rb_prev) |
317 | *pprev = rb_entry(rb_prev, struct partial_page, pp_rb); | 317 | *pprev = rb_entry(rb_prev, struct ia64_partial_page, pp_rb); |
318 | return NULL; | 318 | return NULL; |
319 | } | 319 | } |
320 | 320 | ||
321 | /* | 321 | /* |
322 | * insert @pp into @ppl. | 322 | * insert @pp into @ppl. |
323 | */ | 323 | */ |
324 | static void | 324 | static void |
325 | __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp, | 325 | __ia32_insert_pp(struct ia64_partial_page_list *ppl, |
326 | struct partial_page *prev, struct rb_node **rb_link, | 326 | struct ia64_partial_page *pp, struct ia64_partial_page *prev, |
327 | struct rb_node *rb_parent) | 327 | struct rb_node **rb_link, struct rb_node *rb_parent) |
328 | { | 328 | { |
329 | /* link list */ | 329 | /* link list */ |
330 | if (prev) { | 330 | if (prev) { |
331 | pp->next = prev->next; | 331 | pp->next = prev->next; |
332 | prev->next = pp; | 332 | prev->next = pp; |
333 | } else { | 333 | } else { |
334 | ppl->pp_head = pp; | 334 | ppl->pp_head = pp; |
335 | if (rb_parent) | 335 | if (rb_parent) |
336 | pp->next = rb_entry(rb_parent, | 336 | pp->next = rb_entry(rb_parent, |
337 | struct partial_page, pp_rb); | 337 | struct ia64_partial_page, pp_rb); |
338 | else | 338 | else |
339 | pp->next = NULL; | 339 | pp->next = NULL; |
340 | } | 340 | } |
341 | 341 | ||
342 | /* link rb */ | 342 | /* link rb */ |
343 | rb_link_node(&pp->pp_rb, rb_parent, rb_link); | 343 | rb_link_node(&pp->pp_rb, rb_parent, rb_link); |
344 | rb_insert_color(&pp->pp_rb, &ppl->ppl_rb); | 344 | rb_insert_color(&pp->pp_rb, &ppl->ppl_rb); |
345 | 345 | ||
346 | ppl->pp_hint = pp; | 346 | ppl->pp_hint = pp; |
347 | } | 347 | } |
348 | 348 | ||
349 | /* | 349 | /* |
350 | * delete @pp from partial page list @ppl. | 350 | * delete @pp from partial page list @ppl. |
351 | */ | 351 | */ |
352 | static void | 352 | static void |
353 | __ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp, | 353 | __ia32_delete_pp(struct ia64_partial_page_list *ppl, |
354 | struct partial_page *prev) | 354 | struct ia64_partial_page *pp, struct ia64_partial_page *prev) |
355 | { | 355 | { |
356 | if (prev) { | 356 | if (prev) { |
357 | prev->next = pp->next; | 357 | prev->next = pp->next; |
358 | if (ppl->pp_hint == pp) | 358 | if (ppl->pp_hint == pp) |
359 | ppl->pp_hint = prev; | 359 | ppl->pp_hint = prev; |
360 | } else { | 360 | } else { |
361 | ppl->pp_head = pp->next; | 361 | ppl->pp_head = pp->next; |
362 | if (ppl->pp_hint == pp) | 362 | if (ppl->pp_hint == pp) |
363 | ppl->pp_hint = pp->next; | 363 | ppl->pp_hint = pp->next; |
364 | } | 364 | } |
365 | rb_erase(&pp->pp_rb, &ppl->ppl_rb); | 365 | rb_erase(&pp->pp_rb, &ppl->ppl_rb); |
366 | kmem_cache_free(partial_page_cachep, pp); | 366 | kmem_cache_free(ia64_partial_page_cachep, pp); |
367 | } | 367 | } |
368 | 368 | ||
369 | static struct partial_page * | 369 | static struct ia64_partial_page * |
370 | __pp_prev(struct partial_page *pp) | 370 | __pp_prev(struct ia64_partial_page *pp) |
371 | { | 371 | { |
372 | struct rb_node *prev = rb_prev(&pp->pp_rb); | 372 | struct rb_node *prev = rb_prev(&pp->pp_rb); |
373 | if (prev) | 373 | if (prev) |
374 | return rb_entry(prev, struct partial_page, pp_rb); | 374 | return rb_entry(prev, struct ia64_partial_page, pp_rb); |
375 | else | 375 | else |
376 | return NULL; | 376 | return NULL; |
377 | } | 377 | } |
378 | 378 | ||
379 | /* | 379 | /* |
380 | * Delete partial pages with address between @start and @end. | 380 | * Delete partial pages with address between @start and @end. |
381 | * @start and @end are page aligned. | 381 | * @start and @end are page aligned. |
382 | */ | 382 | */ |
383 | static void | 383 | static void |
384 | __ia32_delete_pp_range(unsigned int start, unsigned int end) | 384 | __ia32_delete_pp_range(unsigned int start, unsigned int end) |
385 | { | 385 | { |
386 | struct partial_page *pp, *prev; | 386 | struct ia64_partial_page *pp, *prev; |
387 | struct rb_node **rb_link, *rb_parent; | 387 | struct rb_node **rb_link, *rb_parent; |
388 | 388 | ||
389 | if (start >= end) | 389 | if (start >= end) |
390 | return; | 390 | return; |
391 | 391 | ||
392 | pp = __ia32_find_pp(current->thread.ppl, start, &prev, | 392 | pp = __ia32_find_pp(current->thread.ppl, start, &prev, |
393 | &rb_link, &rb_parent); | 393 | &rb_link, &rb_parent); |
394 | if (pp) | 394 | if (pp) |
395 | prev = __pp_prev(pp); | 395 | prev = __pp_prev(pp); |
396 | else { | 396 | else { |
397 | if (prev) | 397 | if (prev) |
398 | pp = prev->next; | 398 | pp = prev->next; |
399 | else | 399 | else |
400 | pp = current->thread.ppl->pp_head; | 400 | pp = current->thread.ppl->pp_head; |
401 | } | 401 | } |
402 | 402 | ||
403 | while (pp && pp->base < end) { | 403 | while (pp && pp->base < end) { |
404 | struct partial_page *tmp = pp->next; | 404 | struct ia64_partial_page *tmp = pp->next; |
405 | __ia32_delete_pp(current->thread.ppl, pp, prev); | 405 | __ia32_delete_pp(current->thread.ppl, pp, prev); |
406 | pp = tmp; | 406 | pp = tmp; |
407 | } | 407 | } |
408 | } | 408 | } |
409 | 409 | ||
410 | /* | 410 | /* |
411 | * Set the range between @start and @end in bitmap. | 411 | * Set the range between @start and @end in bitmap. |
412 | * @start and @end should be IA32 page aligned and in the same IA64 page. | 412 | * @start and @end should be IA32 page aligned and in the same IA64 page. |
413 | */ | 413 | */ |
414 | static int | 414 | static int |
415 | __ia32_set_pp(unsigned int start, unsigned int end, int flags) | 415 | __ia32_set_pp(unsigned int start, unsigned int end, int flags) |
416 | { | 416 | { |
417 | struct partial_page *pp, *prev; | 417 | struct ia64_partial_page *pp, *prev; |
418 | struct rb_node ** rb_link, *rb_parent; | 418 | struct rb_node ** rb_link, *rb_parent; |
419 | unsigned int pstart, start_bit, end_bit, i; | 419 | unsigned int pstart, start_bit, end_bit, i; |
420 | 420 | ||
421 | pstart = PAGE_START(start); | 421 | pstart = PAGE_START(start); |
422 | start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE; | 422 | start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE; |
423 | end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE; | 423 | end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE; |
424 | if (end_bit == 0) | 424 | if (end_bit == 0) |
425 | end_bit = PAGE_SIZE / IA32_PAGE_SIZE; | 425 | end_bit = PAGE_SIZE / IA32_PAGE_SIZE; |
426 | pp = __ia32_find_pp(current->thread.ppl, pstart, &prev, | 426 | pp = __ia32_find_pp(current->thread.ppl, pstart, &prev, |
427 | &rb_link, &rb_parent); | 427 | &rb_link, &rb_parent); |
428 | if (pp) { | 428 | if (pp) { |
429 | for (i = start_bit; i < end_bit; i++) | 429 | for (i = start_bit; i < end_bit; i++) |
430 | set_bit(i, &pp->bitmap); | 430 | set_bit(i, &pp->bitmap); |
431 | /* | 431 | /* |
432 | * Check: if this partial page has been set to a full page, | 432 | * Check: if this partial page has been set to a full page, |
433 | * then delete it. | 433 | * then delete it. |
434 | */ | 434 | */ |
435 | if (find_first_zero_bit(&pp->bitmap, sizeof(pp->bitmap)*8) >= | 435 | if (find_first_zero_bit(&pp->bitmap, sizeof(pp->bitmap)*8) >= |
436 | PAGE_SIZE/IA32_PAGE_SIZE) { | 436 | PAGE_SIZE/IA32_PAGE_SIZE) { |
437 | __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp)); | 437 | __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp)); |
438 | } | 438 | } |
439 | return 0; | 439 | return 0; |
440 | } | 440 | } |
441 | 441 | ||
442 | /* | 442 | /* |
443 | * MAP_FIXED may lead to overlapping mmap. | 443 | * MAP_FIXED may lead to overlapping mmap. |
444 | * In this case, the requested mmap area may already mmaped as a full | 444 | * In this case, the requested mmap area may already mmaped as a full |
445 | * page. So check vma before adding a new partial page. | 445 | * page. So check vma before adding a new partial page. |
446 | */ | 446 | */ |
447 | if (flags & MAP_FIXED) { | 447 | if (flags & MAP_FIXED) { |
448 | struct vm_area_struct *vma = find_vma(current->mm, pstart); | 448 | struct vm_area_struct *vma = find_vma(current->mm, pstart); |
449 | if (vma && vma->vm_start <= pstart) | 449 | if (vma && vma->vm_start <= pstart) |
450 | return 0; | 450 | return 0; |
451 | } | 451 | } |
452 | 452 | ||
453 | /* new a partial_page */ | 453 | /* new a ia64_partial_page */ |
454 | pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL); | 454 | pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL); |
455 | if (!pp) | 455 | if (!pp) |
456 | return -ENOMEM; | 456 | return -ENOMEM; |
457 | pp->base = pstart; | 457 | pp->base = pstart; |
458 | pp->bitmap = 0; | 458 | pp->bitmap = 0; |
459 | for (i=start_bit; i<end_bit; i++) | 459 | for (i=start_bit; i<end_bit; i++) |
460 | set_bit(i, &(pp->bitmap)); | 460 | set_bit(i, &(pp->bitmap)); |
461 | pp->next = NULL; | 461 | pp->next = NULL; |
462 | __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent); | 462 | __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent); |
463 | return 0; | 463 | return 0; |
464 | } | 464 | } |
465 | 465 | ||
466 | /* | 466 | /* |
467 | * @start and @end should be IA32 page aligned, but don't need to be in the | 467 | * @start and @end should be IA32 page aligned, but don't need to be in the |
468 | * same IA64 page. Split @start and @end to make sure they're in the same IA64 | 468 | * same IA64 page. Split @start and @end to make sure they're in the same IA64 |
469 | * page, then call __ia32_set_pp(). | 469 | * page, then call __ia32_set_pp(). |
470 | */ | 470 | */ |
471 | static void | 471 | static void |
472 | ia32_set_pp(unsigned int start, unsigned int end, int flags) | 472 | ia32_set_pp(unsigned int start, unsigned int end, int flags) |
473 | { | 473 | { |
474 | down_write(¤t->mm->mmap_sem); | 474 | down_write(¤t->mm->mmap_sem); |
475 | if (flags & MAP_FIXED) { | 475 | if (flags & MAP_FIXED) { |
476 | /* | 476 | /* |
477 | * MAP_FIXED may lead to overlapping mmap. When this happens, | 477 | * MAP_FIXED may lead to overlapping mmap. When this happens, |
478 | * a series of complete IA64 pages results in deletion of | 478 | * a series of complete IA64 pages results in deletion of |
479 | * old partial pages in that range. | 479 | * old partial pages in that range. |
480 | */ | 480 | */ |
481 | __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end)); | 481 | __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end)); |
482 | } | 482 | } |
483 | 483 | ||
484 | if (end < PAGE_ALIGN(start)) { | 484 | if (end < PAGE_ALIGN(start)) { |
485 | __ia32_set_pp(start, end, flags); | 485 | __ia32_set_pp(start, end, flags); |
486 | } else { | 486 | } else { |
487 | if (offset_in_page(start)) | 487 | if (offset_in_page(start)) |
488 | __ia32_set_pp(start, PAGE_ALIGN(start), flags); | 488 | __ia32_set_pp(start, PAGE_ALIGN(start), flags); |
489 | if (offset_in_page(end)) | 489 | if (offset_in_page(end)) |
490 | __ia32_set_pp(PAGE_START(end), end, flags); | 490 | __ia32_set_pp(PAGE_START(end), end, flags); |
491 | } | 491 | } |
492 | up_write(¤t->mm->mmap_sem); | 492 | up_write(¤t->mm->mmap_sem); |
493 | } | 493 | } |
494 | 494 | ||
495 | /* | 495 | /* |
496 | * Unset the range between @start and @end in bitmap. | 496 | * Unset the range between @start and @end in bitmap. |
497 | * @start and @end should be IA32 page aligned and in the same IA64 page. | 497 | * @start and @end should be IA32 page aligned and in the same IA64 page. |
498 | * After doing that, if the bitmap is 0, then free the page and return 1, | 498 | * After doing that, if the bitmap is 0, then free the page and return 1, |
499 | * else return 0; | 499 | * else return 0; |
500 | * If not find the partial page in the list, then | 500 | * If not find the partial page in the list, then |
501 | * If the vma exists, then the full page is set to a partial page; | 501 | * If the vma exists, then the full page is set to a partial page; |
502 | * Else return -ENOMEM. | 502 | * Else return -ENOMEM. |
503 | */ | 503 | */ |
504 | static int | 504 | static int |
505 | __ia32_unset_pp(unsigned int start, unsigned int end) | 505 | __ia32_unset_pp(unsigned int start, unsigned int end) |
506 | { | 506 | { |
507 | struct partial_page *pp, *prev; | 507 | struct ia64_partial_page *pp, *prev; |
508 | struct rb_node ** rb_link, *rb_parent; | 508 | struct rb_node ** rb_link, *rb_parent; |
509 | unsigned int pstart, start_bit, end_bit, i; | 509 | unsigned int pstart, start_bit, end_bit, i; |
510 | struct vm_area_struct *vma; | 510 | struct vm_area_struct *vma; |
511 | 511 | ||
512 | pstart = PAGE_START(start); | 512 | pstart = PAGE_START(start); |
513 | start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE; | 513 | start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE; |
514 | end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE; | 514 | end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE; |
515 | if (end_bit == 0) | 515 | if (end_bit == 0) |
516 | end_bit = PAGE_SIZE / IA32_PAGE_SIZE; | 516 | end_bit = PAGE_SIZE / IA32_PAGE_SIZE; |
517 | 517 | ||
518 | pp = __ia32_find_pp(current->thread.ppl, pstart, &prev, | 518 | pp = __ia32_find_pp(current->thread.ppl, pstart, &prev, |
519 | &rb_link, &rb_parent); | 519 | &rb_link, &rb_parent); |
520 | if (pp) { | 520 | if (pp) { |
521 | for (i = start_bit; i < end_bit; i++) | 521 | for (i = start_bit; i < end_bit; i++) |
522 | clear_bit(i, &pp->bitmap); | 522 | clear_bit(i, &pp->bitmap); |
523 | if (pp->bitmap == 0) { | 523 | if (pp->bitmap == 0) { |
524 | __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp)); | 524 | __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp)); |
525 | return 1; | 525 | return 1; |
526 | } | 526 | } |
527 | return 0; | 527 | return 0; |
528 | } | 528 | } |
529 | 529 | ||
530 | vma = find_vma(current->mm, pstart); | 530 | vma = find_vma(current->mm, pstart); |
531 | if (!vma || vma->vm_start > pstart) { | 531 | if (!vma || vma->vm_start > pstart) { |
532 | return -ENOMEM; | 532 | return -ENOMEM; |
533 | } | 533 | } |
534 | 534 | ||
535 | /* new a partial_page */ | 535 | /* new a ia64_partial_page */ |
536 | pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL); | 536 | pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL); |
537 | if (!pp) | 537 | if (!pp) |
538 | return -ENOMEM; | 538 | return -ENOMEM; |
539 | pp->base = pstart; | 539 | pp->base = pstart; |
540 | pp->bitmap = 0; | 540 | pp->bitmap = 0; |
541 | for (i = 0; i < start_bit; i++) | 541 | for (i = 0; i < start_bit; i++) |
542 | set_bit(i, &(pp->bitmap)); | 542 | set_bit(i, &(pp->bitmap)); |
543 | for (i = end_bit; i < PAGE_SIZE / IA32_PAGE_SIZE; i++) | 543 | for (i = end_bit; i < PAGE_SIZE / IA32_PAGE_SIZE; i++) |
544 | set_bit(i, &(pp->bitmap)); | 544 | set_bit(i, &(pp->bitmap)); |
545 | pp->next = NULL; | 545 | pp->next = NULL; |
546 | __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent); | 546 | __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent); |
547 | return 0; | 547 | return 0; |
548 | } | 548 | } |
549 | 549 | ||
550 | /* | 550 | /* |
551 | * Delete pp between PAGE_ALIGN(start) and PAGE_START(end) by calling | 551 | * Delete pp between PAGE_ALIGN(start) and PAGE_START(end) by calling |
552 | * __ia32_delete_pp_range(). Unset possible partial pages by calling | 552 | * __ia32_delete_pp_range(). Unset possible partial pages by calling |
553 | * __ia32_unset_pp(). | 553 | * __ia32_unset_pp(). |
554 | * The returned value see __ia32_unset_pp(). | 554 | * The returned value see __ia32_unset_pp(). |
555 | */ | 555 | */ |
556 | static int | 556 | static int |
557 | ia32_unset_pp(unsigned int *startp, unsigned int *endp) | 557 | ia32_unset_pp(unsigned int *startp, unsigned int *endp) |
558 | { | 558 | { |
559 | unsigned int start = *startp, end = *endp; | 559 | unsigned int start = *startp, end = *endp; |
560 | int ret = 0; | 560 | int ret = 0; |
561 | 561 | ||
562 | down_write(¤t->mm->mmap_sem); | 562 | down_write(¤t->mm->mmap_sem); |
563 | 563 | ||
564 | __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end)); | 564 | __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end)); |
565 | 565 | ||
566 | if (end < PAGE_ALIGN(start)) { | 566 | if (end < PAGE_ALIGN(start)) { |
567 | ret = __ia32_unset_pp(start, end); | 567 | ret = __ia32_unset_pp(start, end); |
568 | if (ret == 1) { | 568 | if (ret == 1) { |
569 | *startp = PAGE_START(start); | 569 | *startp = PAGE_START(start); |
570 | *endp = PAGE_ALIGN(end); | 570 | *endp = PAGE_ALIGN(end); |
571 | } | 571 | } |
572 | if (ret == 0) { | 572 | if (ret == 0) { |
573 | /* to shortcut sys_munmap() in sys32_munmap() */ | 573 | /* to shortcut sys_munmap() in sys32_munmap() */ |
574 | *startp = PAGE_START(start); | 574 | *startp = PAGE_START(start); |
575 | *endp = PAGE_START(end); | 575 | *endp = PAGE_START(end); |
576 | } | 576 | } |
577 | } else { | 577 | } else { |
578 | if (offset_in_page(start)) { | 578 | if (offset_in_page(start)) { |
579 | ret = __ia32_unset_pp(start, PAGE_ALIGN(start)); | 579 | ret = __ia32_unset_pp(start, PAGE_ALIGN(start)); |
580 | if (ret == 1) | 580 | if (ret == 1) |
581 | *startp = PAGE_START(start); | 581 | *startp = PAGE_START(start); |
582 | if (ret == 0) | 582 | if (ret == 0) |
583 | *startp = PAGE_ALIGN(start); | 583 | *startp = PAGE_ALIGN(start); |
584 | if (ret < 0) | 584 | if (ret < 0) |
585 | goto out; | 585 | goto out; |
586 | } | 586 | } |
587 | if (offset_in_page(end)) { | 587 | if (offset_in_page(end)) { |
588 | ret = __ia32_unset_pp(PAGE_START(end), end); | 588 | ret = __ia32_unset_pp(PAGE_START(end), end); |
589 | if (ret == 1) | 589 | if (ret == 1) |
590 | *endp = PAGE_ALIGN(end); | 590 | *endp = PAGE_ALIGN(end); |
591 | if (ret == 0) | 591 | if (ret == 0) |
592 | *endp = PAGE_START(end); | 592 | *endp = PAGE_START(end); |
593 | } | 593 | } |
594 | } | 594 | } |
595 | 595 | ||
596 | out: | 596 | out: |
597 | up_write(¤t->mm->mmap_sem); | 597 | up_write(¤t->mm->mmap_sem); |
598 | return ret; | 598 | return ret; |
599 | } | 599 | } |
600 | 600 | ||
601 | /* | 601 | /* |
602 | * Compare the range between @start and @end with bitmap in partial page. | 602 | * Compare the range between @start and @end with bitmap in partial page. |
603 | * @start and @end should be IA32 page aligned and in the same IA64 page. | 603 | * @start and @end should be IA32 page aligned and in the same IA64 page. |
604 | */ | 604 | */ |
605 | static int | 605 | static int |
606 | __ia32_compare_pp(unsigned int start, unsigned int end) | 606 | __ia32_compare_pp(unsigned int start, unsigned int end) |
607 | { | 607 | { |
608 | struct partial_page *pp, *prev; | 608 | struct ia64_partial_page *pp, *prev; |
609 | struct rb_node ** rb_link, *rb_parent; | 609 | struct rb_node ** rb_link, *rb_parent; |
610 | unsigned int pstart, start_bit, end_bit, size; | 610 | unsigned int pstart, start_bit, end_bit, size; |
611 | unsigned int first_bit, next_zero_bit; /* the first range in bitmap */ | 611 | unsigned int first_bit, next_zero_bit; /* the first range in bitmap */ |
612 | 612 | ||
613 | pstart = PAGE_START(start); | 613 | pstart = PAGE_START(start); |
614 | 614 | ||
615 | pp = __ia32_find_pp(current->thread.ppl, pstart, &prev, | 615 | pp = __ia32_find_pp(current->thread.ppl, pstart, &prev, |
616 | &rb_link, &rb_parent); | 616 | &rb_link, &rb_parent); |
617 | if (!pp) | 617 | if (!pp) |
618 | return 1; | 618 | return 1; |
619 | 619 | ||
620 | start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE; | 620 | start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE; |
621 | end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE; | 621 | end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE; |
622 | size = sizeof(pp->bitmap) * 8; | 622 | size = sizeof(pp->bitmap) * 8; |
623 | first_bit = find_first_bit(&pp->bitmap, size); | 623 | first_bit = find_first_bit(&pp->bitmap, size); |
624 | next_zero_bit = find_next_zero_bit(&pp->bitmap, size, first_bit); | 624 | next_zero_bit = find_next_zero_bit(&pp->bitmap, size, first_bit); |
625 | if ((start_bit < first_bit) || (end_bit > next_zero_bit)) { | 625 | if ((start_bit < first_bit) || (end_bit > next_zero_bit)) { |
626 | /* exceeds the first range in bitmap */ | 626 | /* exceeds the first range in bitmap */ |
627 | return -ENOMEM; | 627 | return -ENOMEM; |
628 | } else if ((start_bit == first_bit) && (end_bit == next_zero_bit)) { | 628 | } else if ((start_bit == first_bit) && (end_bit == next_zero_bit)) { |
629 | first_bit = find_next_bit(&pp->bitmap, size, next_zero_bit); | 629 | first_bit = find_next_bit(&pp->bitmap, size, next_zero_bit); |
630 | if ((next_zero_bit < first_bit) && (first_bit < size)) | 630 | if ((next_zero_bit < first_bit) && (first_bit < size)) |
631 | return 1; /* has next range */ | 631 | return 1; /* has next range */ |
632 | else | 632 | else |
633 | return 0; /* no next range */ | 633 | return 0; /* no next range */ |
634 | } else | 634 | } else |
635 | return 1; | 635 | return 1; |
636 | } | 636 | } |
637 | 637 | ||
638 | /* | 638 | /* |
639 | * @start and @end should be IA32 page aligned, but don't need to be in the | 639 | * @start and @end should be IA32 page aligned, but don't need to be in the |
640 | * same IA64 page. Split @start and @end to make sure they're in the same IA64 | 640 | * same IA64 page. Split @start and @end to make sure they're in the same IA64 |
641 | * page, then call __ia32_compare_pp(). | 641 | * page, then call __ia32_compare_pp(). |
642 | * | 642 | * |
643 | * Take this as example: the range is the 1st and 2nd 4K page. | 643 | * Take this as example: the range is the 1st and 2nd 4K page. |
644 | * Return 0 if they fit bitmap exactly, i.e. bitmap = 00000011; | 644 | * Return 0 if they fit bitmap exactly, i.e. bitmap = 00000011; |
645 | * Return 1 if the range doesn't cover whole bitmap, e.g. bitmap = 00001111; | 645 | * Return 1 if the range doesn't cover whole bitmap, e.g. bitmap = 00001111; |
646 | * Return -ENOMEM if the range exceeds the bitmap, e.g. bitmap = 00000001 or | 646 | * Return -ENOMEM if the range exceeds the bitmap, e.g. bitmap = 00000001 or |
647 | * bitmap = 00000101. | 647 | * bitmap = 00000101. |
648 | */ | 648 | */ |
649 | static int | 649 | static int |
650 | ia32_compare_pp(unsigned int *startp, unsigned int *endp) | 650 | ia32_compare_pp(unsigned int *startp, unsigned int *endp) |
651 | { | 651 | { |
652 | unsigned int start = *startp, end = *endp; | 652 | unsigned int start = *startp, end = *endp; |
653 | int retval = 0; | 653 | int retval = 0; |
654 | 654 | ||
655 | down_write(¤t->mm->mmap_sem); | 655 | down_write(¤t->mm->mmap_sem); |
656 | 656 | ||
657 | if (end < PAGE_ALIGN(start)) { | 657 | if (end < PAGE_ALIGN(start)) { |
658 | retval = __ia32_compare_pp(start, end); | 658 | retval = __ia32_compare_pp(start, end); |
659 | if (retval == 0) { | 659 | if (retval == 0) { |
660 | *startp = PAGE_START(start); | 660 | *startp = PAGE_START(start); |
661 | *endp = PAGE_ALIGN(end); | 661 | *endp = PAGE_ALIGN(end); |
662 | } | 662 | } |
663 | } else { | 663 | } else { |
664 | if (offset_in_page(start)) { | 664 | if (offset_in_page(start)) { |
665 | retval = __ia32_compare_pp(start, | 665 | retval = __ia32_compare_pp(start, |
666 | PAGE_ALIGN(start)); | 666 | PAGE_ALIGN(start)); |
667 | if (retval == 0) | 667 | if (retval == 0) |
668 | *startp = PAGE_START(start); | 668 | *startp = PAGE_START(start); |
669 | if (retval < 0) | 669 | if (retval < 0) |
670 | goto out; | 670 | goto out; |
671 | } | 671 | } |
672 | if (offset_in_page(end)) { | 672 | if (offset_in_page(end)) { |
673 | retval = __ia32_compare_pp(PAGE_START(end), end); | 673 | retval = __ia32_compare_pp(PAGE_START(end), end); |
674 | if (retval == 0) | 674 | if (retval == 0) |
675 | *endp = PAGE_ALIGN(end); | 675 | *endp = PAGE_ALIGN(end); |
676 | } | 676 | } |
677 | } | 677 | } |
678 | 678 | ||
679 | out: | 679 | out: |
680 | up_write(¤t->mm->mmap_sem); | 680 | up_write(¤t->mm->mmap_sem); |
681 | return retval; | 681 | return retval; |
682 | } | 682 | } |
683 | 683 | ||
684 | static void | 684 | static void |
685 | __ia32_drop_pp_list(struct partial_page_list *ppl) | 685 | __ia32_drop_pp_list(struct ia64_partial_page_list *ppl) |
686 | { | 686 | { |
687 | struct partial_page *pp = ppl->pp_head; | 687 | struct ia64_partial_page *pp = ppl->pp_head; |
688 | 688 | ||
689 | while (pp) { | 689 | while (pp) { |
690 | struct partial_page *next = pp->next; | 690 | struct ia64_partial_page *next = pp->next; |
691 | kmem_cache_free(partial_page_cachep, pp); | 691 | kmem_cache_free(ia64_partial_page_cachep, pp); |
692 | pp = next; | 692 | pp = next; |
693 | } | 693 | } |
694 | 694 | ||
695 | kfree(ppl); | 695 | kfree(ppl); |
696 | } | 696 | } |
697 | 697 | ||
698 | void | 698 | void |
699 | ia32_drop_partial_page_list(struct task_struct *task) | 699 | ia32_drop_ia64_partial_page_list(struct task_struct *task) |
700 | { | 700 | { |
701 | struct partial_page_list* ppl = task->thread.ppl; | 701 | struct ia64_partial_page_list* ppl = task->thread.ppl; |
702 | 702 | ||
703 | if (ppl && atomic_dec_and_test(&ppl->pp_count)) | 703 | if (ppl && atomic_dec_and_test(&ppl->pp_count)) |
704 | __ia32_drop_pp_list(ppl); | 704 | __ia32_drop_pp_list(ppl); |
705 | } | 705 | } |
706 | 706 | ||
707 | /* | 707 | /* |
708 | * Copy current->thread.ppl to ppl (already initialized). | 708 | * Copy current->thread.ppl to ppl (already initialized). |
709 | */ | 709 | */ |
710 | static int | 710 | static int |
711 | __ia32_copy_pp_list(struct partial_page_list *ppl) | 711 | __ia32_copy_pp_list(struct ia64_partial_page_list *ppl) |
712 | { | 712 | { |
713 | struct partial_page *pp, *tmp, *prev; | 713 | struct ia64_partial_page *pp, *tmp, *prev; |
714 | struct rb_node **rb_link, *rb_parent; | 714 | struct rb_node **rb_link, *rb_parent; |
715 | 715 | ||
716 | ppl->pp_head = NULL; | 716 | ppl->pp_head = NULL; |
717 | ppl->pp_hint = NULL; | 717 | ppl->pp_hint = NULL; |
718 | ppl->ppl_rb = RB_ROOT; | 718 | ppl->ppl_rb = RB_ROOT; |
719 | rb_link = &ppl->ppl_rb.rb_node; | 719 | rb_link = &ppl->ppl_rb.rb_node; |
720 | rb_parent = NULL; | 720 | rb_parent = NULL; |
721 | prev = NULL; | 721 | prev = NULL; |
722 | 722 | ||
723 | for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) { | 723 | for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) { |
724 | tmp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL); | 724 | tmp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL); |
725 | if (!tmp) | 725 | if (!tmp) |
726 | return -ENOMEM; | 726 | return -ENOMEM; |
727 | *tmp = *pp; | 727 | *tmp = *pp; |
728 | __ia32_insert_pp(ppl, tmp, prev, rb_link, rb_parent); | 728 | __ia32_insert_pp(ppl, tmp, prev, rb_link, rb_parent); |
729 | prev = tmp; | 729 | prev = tmp; |
730 | rb_link = &tmp->pp_rb.rb_right; | 730 | rb_link = &tmp->pp_rb.rb_right; |
731 | rb_parent = &tmp->pp_rb; | 731 | rb_parent = &tmp->pp_rb; |
732 | } | 732 | } |
733 | return 0; | 733 | return 0; |
734 | } | 734 | } |
735 | 735 | ||
736 | int | 736 | int |
737 | ia32_copy_partial_page_list(struct task_struct *p, unsigned long clone_flags) | 737 | ia32_copy_ia64_partial_page_list(struct task_struct *p, |
738 | unsigned long clone_flags) | ||
738 | { | 739 | { |
739 | int retval = 0; | 740 | int retval = 0; |
740 | 741 | ||
741 | if (clone_flags & CLONE_VM) { | 742 | if (clone_flags & CLONE_VM) { |
742 | atomic_inc(¤t->thread.ppl->pp_count); | 743 | atomic_inc(¤t->thread.ppl->pp_count); |
743 | p->thread.ppl = current->thread.ppl; | 744 | p->thread.ppl = current->thread.ppl; |
744 | } else { | 745 | } else { |
745 | p->thread.ppl = ia32_init_pp_list(); | 746 | p->thread.ppl = ia32_init_pp_list(); |
746 | if (!p->thread.ppl) | 747 | if (!p->thread.ppl) |
747 | return -ENOMEM; | 748 | return -ENOMEM; |
748 | down_write(¤t->mm->mmap_sem); | 749 | down_write(¤t->mm->mmap_sem); |
749 | { | 750 | { |
750 | retval = __ia32_copy_pp_list(p->thread.ppl); | 751 | retval = __ia32_copy_pp_list(p->thread.ppl); |
751 | } | 752 | } |
752 | up_write(¤t->mm->mmap_sem); | 753 | up_write(¤t->mm->mmap_sem); |
753 | } | 754 | } |
754 | 755 | ||
755 | return retval; | 756 | return retval; |
756 | } | 757 | } |
757 | 758 | ||
758 | static unsigned long | 759 | static unsigned long |
759 | emulate_mmap (struct file *file, unsigned long start, unsigned long len, int prot, int flags, | 760 | emulate_mmap (struct file *file, unsigned long start, unsigned long len, int prot, int flags, |
760 | loff_t off) | 761 | loff_t off) |
761 | { | 762 | { |
762 | unsigned long tmp, end, pend, pstart, ret, is_congruent, fudge = 0; | 763 | unsigned long tmp, end, pend, pstart, ret, is_congruent, fudge = 0; |
763 | struct inode *inode; | 764 | struct inode *inode; |
764 | loff_t poff; | 765 | loff_t poff; |
765 | 766 | ||
766 | end = start + len; | 767 | end = start + len; |
767 | pstart = PAGE_START(start); | 768 | pstart = PAGE_START(start); |
768 | pend = PAGE_ALIGN(end); | 769 | pend = PAGE_ALIGN(end); |
769 | 770 | ||
770 | if (flags & MAP_FIXED) { | 771 | if (flags & MAP_FIXED) { |
771 | ia32_set_pp((unsigned int)start, (unsigned int)end, flags); | 772 | ia32_set_pp((unsigned int)start, (unsigned int)end, flags); |
772 | if (start > pstart) { | 773 | if (start > pstart) { |
773 | if (flags & MAP_SHARED) | 774 | if (flags & MAP_SHARED) |
774 | printk(KERN_INFO | 775 | printk(KERN_INFO |
775 | "%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n", | 776 | "%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n", |
776 | current->comm, current->pid, start); | 777 | current->comm, current->pid, start); |
777 | ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags, | 778 | ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags, |
778 | off); | 779 | off); |
779 | if (IS_ERR((void *) ret)) | 780 | if (IS_ERR((void *) ret)) |
780 | return ret; | 781 | return ret; |
781 | pstart += PAGE_SIZE; | 782 | pstart += PAGE_SIZE; |
782 | if (pstart >= pend) | 783 | if (pstart >= pend) |
783 | goto out; /* done */ | 784 | goto out; /* done */ |
784 | } | 785 | } |
785 | if (end < pend) { | 786 | if (end < pend) { |
786 | if (flags & MAP_SHARED) | 787 | if (flags & MAP_SHARED) |
787 | printk(KERN_INFO | 788 | printk(KERN_INFO |
788 | "%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n", | 789 | "%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n", |
789 | current->comm, current->pid, end); | 790 | current->comm, current->pid, end); |
790 | ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags, | 791 | ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags, |
791 | (off + len) - offset_in_page(end)); | 792 | (off + len) - offset_in_page(end)); |
792 | if (IS_ERR((void *) ret)) | 793 | if (IS_ERR((void *) ret)) |
793 | return ret; | 794 | return ret; |
794 | pend -= PAGE_SIZE; | 795 | pend -= PAGE_SIZE; |
795 | if (pstart >= pend) | 796 | if (pstart >= pend) |
796 | goto out; /* done */ | 797 | goto out; /* done */ |
797 | } | 798 | } |
798 | } else { | 799 | } else { |
799 | /* | 800 | /* |
800 | * If a start address was specified, use it if the entire rounded out area | 801 | * If a start address was specified, use it if the entire rounded out area |
801 | * is available. | 802 | * is available. |
802 | */ | 803 | */ |
803 | if (start && !pstart) | 804 | if (start && !pstart) |
804 | fudge = 1; /* handle case of mapping to range (0,PAGE_SIZE) */ | 805 | fudge = 1; /* handle case of mapping to range (0,PAGE_SIZE) */ |
805 | tmp = arch_get_unmapped_area(file, pstart - fudge, pend - pstart, 0, flags); | 806 | tmp = arch_get_unmapped_area(file, pstart - fudge, pend - pstart, 0, flags); |
806 | if (tmp != pstart) { | 807 | if (tmp != pstart) { |
807 | pstart = tmp; | 808 | pstart = tmp; |
808 | start = pstart + offset_in_page(off); /* make start congruent with off */ | 809 | start = pstart + offset_in_page(off); /* make start congruent with off */ |
809 | end = start + len; | 810 | end = start + len; |
810 | pend = PAGE_ALIGN(end); | 811 | pend = PAGE_ALIGN(end); |
811 | } | 812 | } |
812 | } | 813 | } |
813 | 814 | ||
814 | poff = off + (pstart - start); /* note: (pstart - start) may be negative */ | 815 | poff = off + (pstart - start); /* note: (pstart - start) may be negative */ |
815 | is_congruent = (flags & MAP_ANONYMOUS) || (offset_in_page(poff) == 0); | 816 | is_congruent = (flags & MAP_ANONYMOUS) || (offset_in_page(poff) == 0); |
816 | 817 | ||
817 | if ((flags & MAP_SHARED) && !is_congruent) | 818 | if ((flags & MAP_SHARED) && !is_congruent) |
818 | printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap " | 819 | printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap " |
819 | "(addr=0x%lx,off=0x%llx)\n", current->comm, current->pid, start, off); | 820 | "(addr=0x%lx,off=0x%llx)\n", current->comm, current->pid, start, off); |
820 | 821 | ||
821 | DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend, | 822 | DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend, |
822 | is_congruent ? "congruent" : "not congruent", poff); | 823 | is_congruent ? "congruent" : "not congruent", poff); |
823 | 824 | ||
824 | down_write(¤t->mm->mmap_sem); | 825 | down_write(¤t->mm->mmap_sem); |
825 | { | 826 | { |
826 | if (!(flags & MAP_ANONYMOUS) && is_congruent) | 827 | if (!(flags & MAP_ANONYMOUS) && is_congruent) |
827 | ret = do_mmap(file, pstart, pend - pstart, prot, flags | MAP_FIXED, poff); | 828 | ret = do_mmap(file, pstart, pend - pstart, prot, flags | MAP_FIXED, poff); |
828 | else | 829 | else |
829 | ret = do_mmap(NULL, pstart, pend - pstart, | 830 | ret = do_mmap(NULL, pstart, pend - pstart, |
830 | prot | ((flags & MAP_ANONYMOUS) ? 0 : PROT_WRITE), | 831 | prot | ((flags & MAP_ANONYMOUS) ? 0 : PROT_WRITE), |
831 | flags | MAP_FIXED | MAP_ANONYMOUS, 0); | 832 | flags | MAP_FIXED | MAP_ANONYMOUS, 0); |
832 | } | 833 | } |
833 | up_write(¤t->mm->mmap_sem); | 834 | up_write(¤t->mm->mmap_sem); |
834 | 835 | ||
835 | if (IS_ERR((void *) ret)) | 836 | if (IS_ERR((void *) ret)) |
836 | return ret; | 837 | return ret; |
837 | 838 | ||
838 | if (!is_congruent) { | 839 | if (!is_congruent) { |
839 | /* read the file contents */ | 840 | /* read the file contents */ |
840 | inode = file->f_path.dentry->d_inode; | 841 | inode = file->f_path.dentry->d_inode; |
841 | if (!inode->i_fop || !file->f_op->read | 842 | if (!inode->i_fop || !file->f_op->read |
842 | || ((*file->f_op->read)(file, (char __user *) pstart, pend - pstart, &poff) | 843 | || ((*file->f_op->read)(file, (char __user *) pstart, pend - pstart, &poff) |
843 | < 0)) | 844 | < 0)) |
844 | { | 845 | { |
845 | sys_munmap(pstart, pend - pstart); | 846 | sys_munmap(pstart, pend - pstart); |
846 | return -EINVAL; | 847 | return -EINVAL; |
847 | } | 848 | } |
848 | if (!(prot & PROT_WRITE) && sys_mprotect(pstart, pend - pstart, prot) < 0) | 849 | if (!(prot & PROT_WRITE) && sys_mprotect(pstart, pend - pstart, prot) < 0) |
849 | return -EINVAL; | 850 | return -EINVAL; |
850 | } | 851 | } |
851 | 852 | ||
852 | if (!(flags & MAP_FIXED)) | 853 | if (!(flags & MAP_FIXED)) |
853 | ia32_set_pp((unsigned int)start, (unsigned int)end, flags); | 854 | ia32_set_pp((unsigned int)start, (unsigned int)end, flags); |
854 | out: | 855 | out: |
855 | return start; | 856 | return start; |
856 | } | 857 | } |
857 | 858 | ||
858 | #endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */ | 859 | #endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */ |
859 | 860 | ||
860 | static inline unsigned int | 861 | static inline unsigned int |
861 | get_prot32 (unsigned int prot) | 862 | get_prot32 (unsigned int prot) |
862 | { | 863 | { |
863 | if (prot & PROT_WRITE) | 864 | if (prot & PROT_WRITE) |
864 | /* on x86, PROT_WRITE implies PROT_READ which implies PROT_EEC */ | 865 | /* on x86, PROT_WRITE implies PROT_READ which implies PROT_EEC */ |
865 | prot |= PROT_READ | PROT_WRITE | PROT_EXEC; | 866 | prot |= PROT_READ | PROT_WRITE | PROT_EXEC; |
866 | else if (prot & (PROT_READ | PROT_EXEC)) | 867 | else if (prot & (PROT_READ | PROT_EXEC)) |
867 | /* on x86, there is no distinction between PROT_READ and PROT_EXEC */ | 868 | /* on x86, there is no distinction between PROT_READ and PROT_EXEC */ |
868 | prot |= (PROT_READ | PROT_EXEC); | 869 | prot |= (PROT_READ | PROT_EXEC); |
869 | 870 | ||
870 | return prot; | 871 | return prot; |
871 | } | 872 | } |
872 | 873 | ||
873 | unsigned long | 874 | unsigned long |
874 | ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot, int flags, | 875 | ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot, int flags, |
875 | loff_t offset) | 876 | loff_t offset) |
876 | { | 877 | { |
877 | DBG("ia32_do_mmap(file=%p,addr=0x%lx,len=0x%lx,prot=%x,flags=%x,offset=0x%llx)\n", | 878 | DBG("ia32_do_mmap(file=%p,addr=0x%lx,len=0x%lx,prot=%x,flags=%x,offset=0x%llx)\n", |
878 | file, addr, len, prot, flags, offset); | 879 | file, addr, len, prot, flags, offset); |
879 | 880 | ||
880 | if (file && (!file->f_op || !file->f_op->mmap)) | 881 | if (file && (!file->f_op || !file->f_op->mmap)) |
881 | return -ENODEV; | 882 | return -ENODEV; |
882 | 883 | ||
883 | len = IA32_PAGE_ALIGN(len); | 884 | len = IA32_PAGE_ALIGN(len); |
884 | if (len == 0) | 885 | if (len == 0) |
885 | return addr; | 886 | return addr; |
886 | 887 | ||
887 | if (len > IA32_PAGE_OFFSET || addr > IA32_PAGE_OFFSET - len) | 888 | if (len > IA32_PAGE_OFFSET || addr > IA32_PAGE_OFFSET - len) |
888 | { | 889 | { |
889 | if (flags & MAP_FIXED) | 890 | if (flags & MAP_FIXED) |
890 | return -ENOMEM; | 891 | return -ENOMEM; |
891 | else | 892 | else |
892 | return -EINVAL; | 893 | return -EINVAL; |
893 | } | 894 | } |
894 | 895 | ||
895 | if (OFFSET4K(offset)) | 896 | if (OFFSET4K(offset)) |
896 | return -EINVAL; | 897 | return -EINVAL; |
897 | 898 | ||
898 | prot = get_prot32(prot); | 899 | prot = get_prot32(prot); |
899 | 900 | ||
900 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | 901 | #if PAGE_SHIFT > IA32_PAGE_SHIFT |
901 | mutex_lock(&ia32_mmap_mutex); | 902 | mutex_lock(&ia32_mmap_mutex); |
902 | { | 903 | { |
903 | addr = emulate_mmap(file, addr, len, prot, flags, offset); | 904 | addr = emulate_mmap(file, addr, len, prot, flags, offset); |
904 | } | 905 | } |
905 | mutex_unlock(&ia32_mmap_mutex); | 906 | mutex_unlock(&ia32_mmap_mutex); |
906 | #else | 907 | #else |
907 | down_write(¤t->mm->mmap_sem); | 908 | down_write(¤t->mm->mmap_sem); |
908 | { | 909 | { |
909 | addr = do_mmap(file, addr, len, prot, flags, offset); | 910 | addr = do_mmap(file, addr, len, prot, flags, offset); |
910 | } | 911 | } |
911 | up_write(¤t->mm->mmap_sem); | 912 | up_write(¤t->mm->mmap_sem); |
912 | #endif | 913 | #endif |
913 | DBG("ia32_do_mmap: returning 0x%lx\n", addr); | 914 | DBG("ia32_do_mmap: returning 0x%lx\n", addr); |
914 | return addr; | 915 | return addr; |
915 | } | 916 | } |
916 | 917 | ||
917 | /* | 918 | /* |
918 | * Linux/i386 didn't use to be able to handle more than 4 system call parameters, so these | 919 | * Linux/i386 didn't use to be able to handle more than 4 system call parameters, so these |
919 | * system calls used a memory block for parameter passing.. | 920 | * system calls used a memory block for parameter passing.. |
920 | */ | 921 | */ |
921 | 922 | ||
922 | struct mmap_arg_struct { | 923 | struct mmap_arg_struct { |
923 | unsigned int addr; | 924 | unsigned int addr; |
924 | unsigned int len; | 925 | unsigned int len; |
925 | unsigned int prot; | 926 | unsigned int prot; |
926 | unsigned int flags; | 927 | unsigned int flags; |
927 | unsigned int fd; | 928 | unsigned int fd; |
928 | unsigned int offset; | 929 | unsigned int offset; |
929 | }; | 930 | }; |
930 | 931 | ||
931 | asmlinkage long | 932 | asmlinkage long |
932 | sys32_mmap (struct mmap_arg_struct __user *arg) | 933 | sys32_mmap (struct mmap_arg_struct __user *arg) |
933 | { | 934 | { |
934 | struct mmap_arg_struct a; | 935 | struct mmap_arg_struct a; |
935 | struct file *file = NULL; | 936 | struct file *file = NULL; |
936 | unsigned long addr; | 937 | unsigned long addr; |
937 | int flags; | 938 | int flags; |
938 | 939 | ||
939 | if (copy_from_user(&a, arg, sizeof(a))) | 940 | if (copy_from_user(&a, arg, sizeof(a))) |
940 | return -EFAULT; | 941 | return -EFAULT; |
941 | 942 | ||
942 | if (OFFSET4K(a.offset)) | 943 | if (OFFSET4K(a.offset)) |
943 | return -EINVAL; | 944 | return -EINVAL; |
944 | 945 | ||
945 | flags = a.flags; | 946 | flags = a.flags; |
946 | 947 | ||
947 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | 948 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); |
948 | if (!(flags & MAP_ANONYMOUS)) { | 949 | if (!(flags & MAP_ANONYMOUS)) { |
949 | file = fget(a.fd); | 950 | file = fget(a.fd); |
950 | if (!file) | 951 | if (!file) |
951 | return -EBADF; | 952 | return -EBADF; |
952 | } | 953 | } |
953 | 954 | ||
954 | addr = ia32_do_mmap(file, a.addr, a.len, a.prot, flags, a.offset); | 955 | addr = ia32_do_mmap(file, a.addr, a.len, a.prot, flags, a.offset); |
955 | 956 | ||
956 | if (file) | 957 | if (file) |
957 | fput(file); | 958 | fput(file); |
958 | return addr; | 959 | return addr; |
959 | } | 960 | } |
960 | 961 | ||
961 | asmlinkage long | 962 | asmlinkage long |
962 | sys32_mmap2 (unsigned int addr, unsigned int len, unsigned int prot, unsigned int flags, | 963 | sys32_mmap2 (unsigned int addr, unsigned int len, unsigned int prot, unsigned int flags, |
963 | unsigned int fd, unsigned int pgoff) | 964 | unsigned int fd, unsigned int pgoff) |
964 | { | 965 | { |
965 | struct file *file = NULL; | 966 | struct file *file = NULL; |
966 | unsigned long retval; | 967 | unsigned long retval; |
967 | 968 | ||
968 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); | 969 | flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); |
969 | if (!(flags & MAP_ANONYMOUS)) { | 970 | if (!(flags & MAP_ANONYMOUS)) { |
970 | file = fget(fd); | 971 | file = fget(fd); |
971 | if (!file) | 972 | if (!file) |
972 | return -EBADF; | 973 | return -EBADF; |
973 | } | 974 | } |
974 | 975 | ||
975 | retval = ia32_do_mmap(file, addr, len, prot, flags, | 976 | retval = ia32_do_mmap(file, addr, len, prot, flags, |
976 | (unsigned long) pgoff << IA32_PAGE_SHIFT); | 977 | (unsigned long) pgoff << IA32_PAGE_SHIFT); |
977 | 978 | ||
978 | if (file) | 979 | if (file) |
979 | fput(file); | 980 | fput(file); |
980 | return retval; | 981 | return retval; |
981 | } | 982 | } |
982 | 983 | ||
983 | asmlinkage long | 984 | asmlinkage long |
984 | sys32_munmap (unsigned int start, unsigned int len) | 985 | sys32_munmap (unsigned int start, unsigned int len) |
985 | { | 986 | { |
986 | unsigned int end = start + len; | 987 | unsigned int end = start + len; |
987 | long ret; | 988 | long ret; |
988 | 989 | ||
989 | #if PAGE_SHIFT <= IA32_PAGE_SHIFT | 990 | #if PAGE_SHIFT <= IA32_PAGE_SHIFT |
990 | ret = sys_munmap(start, end - start); | 991 | ret = sys_munmap(start, end - start); |
991 | #else | 992 | #else |
992 | if (OFFSET4K(start)) | 993 | if (OFFSET4K(start)) |
993 | return -EINVAL; | 994 | return -EINVAL; |
994 | 995 | ||
995 | end = IA32_PAGE_ALIGN(end); | 996 | end = IA32_PAGE_ALIGN(end); |
996 | if (start >= end) | 997 | if (start >= end) |
997 | return -EINVAL; | 998 | return -EINVAL; |
998 | 999 | ||
999 | ret = ia32_unset_pp(&start, &end); | 1000 | ret = ia32_unset_pp(&start, &end); |
1000 | if (ret < 0) | 1001 | if (ret < 0) |
1001 | return ret; | 1002 | return ret; |
1002 | 1003 | ||
1003 | if (start >= end) | 1004 | if (start >= end) |
1004 | return 0; | 1005 | return 0; |
1005 | 1006 | ||
1006 | mutex_lock(&ia32_mmap_mutex); | 1007 | mutex_lock(&ia32_mmap_mutex); |
1007 | ret = sys_munmap(start, end - start); | 1008 | ret = sys_munmap(start, end - start); |
1008 | mutex_unlock(&ia32_mmap_mutex); | 1009 | mutex_unlock(&ia32_mmap_mutex); |
1009 | #endif | 1010 | #endif |
1010 | return ret; | 1011 | return ret; |
1011 | } | 1012 | } |
1012 | 1013 | ||
1013 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | 1014 | #if PAGE_SHIFT > IA32_PAGE_SHIFT |
1014 | 1015 | ||
1015 | /* | 1016 | /* |
1016 | * When mprotect()ing a partial page, we set the permission to the union of the old | 1017 | * When mprotect()ing a partial page, we set the permission to the union of the old |
1017 | * settings and the new settings. In other words, it's only possible to make access to a | 1018 | * settings and the new settings. In other words, it's only possible to make access to a |
1018 | * partial page less restrictive. | 1019 | * partial page less restrictive. |
1019 | */ | 1020 | */ |
1020 | static long | 1021 | static long |
1021 | mprotect_subpage (unsigned long address, int new_prot) | 1022 | mprotect_subpage (unsigned long address, int new_prot) |
1022 | { | 1023 | { |
1023 | int old_prot; | 1024 | int old_prot; |
1024 | struct vm_area_struct *vma; | 1025 | struct vm_area_struct *vma; |
1025 | 1026 | ||
1026 | if (new_prot == PROT_NONE) | 1027 | if (new_prot == PROT_NONE) |
1027 | return 0; /* optimize case where nothing changes... */ | 1028 | return 0; /* optimize case where nothing changes... */ |
1028 | vma = find_vma(current->mm, address); | 1029 | vma = find_vma(current->mm, address); |
1029 | old_prot = get_page_prot(vma, address); | 1030 | old_prot = get_page_prot(vma, address); |
1030 | return sys_mprotect(address, PAGE_SIZE, new_prot | old_prot); | 1031 | return sys_mprotect(address, PAGE_SIZE, new_prot | old_prot); |
1031 | } | 1032 | } |
1032 | 1033 | ||
1033 | #endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */ | 1034 | #endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */ |
1034 | 1035 | ||
1035 | asmlinkage long | 1036 | asmlinkage long |
1036 | sys32_mprotect (unsigned int start, unsigned int len, int prot) | 1037 | sys32_mprotect (unsigned int start, unsigned int len, int prot) |
1037 | { | 1038 | { |
1038 | unsigned int end = start + len; | 1039 | unsigned int end = start + len; |
1039 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | 1040 | #if PAGE_SHIFT > IA32_PAGE_SHIFT |
1040 | long retval = 0; | 1041 | long retval = 0; |
1041 | #endif | 1042 | #endif |
1042 | 1043 | ||
1043 | prot = get_prot32(prot); | 1044 | prot = get_prot32(prot); |
1044 | 1045 | ||
1045 | #if PAGE_SHIFT <= IA32_PAGE_SHIFT | 1046 | #if PAGE_SHIFT <= IA32_PAGE_SHIFT |
1046 | return sys_mprotect(start, end - start, prot); | 1047 | return sys_mprotect(start, end - start, prot); |
1047 | #else | 1048 | #else |
1048 | if (OFFSET4K(start)) | 1049 | if (OFFSET4K(start)) |
1049 | return -EINVAL; | 1050 | return -EINVAL; |
1050 | 1051 | ||
1051 | end = IA32_PAGE_ALIGN(end); | 1052 | end = IA32_PAGE_ALIGN(end); |
1052 | if (end < start) | 1053 | if (end < start) |
1053 | return -EINVAL; | 1054 | return -EINVAL; |
1054 | 1055 | ||
1055 | retval = ia32_compare_pp(&start, &end); | 1056 | retval = ia32_compare_pp(&start, &end); |
1056 | 1057 | ||
1057 | if (retval < 0) | 1058 | if (retval < 0) |
1058 | return retval; | 1059 | return retval; |
1059 | 1060 | ||
1060 | mutex_lock(&ia32_mmap_mutex); | 1061 | mutex_lock(&ia32_mmap_mutex); |
1061 | { | 1062 | { |
1062 | if (offset_in_page(start)) { | 1063 | if (offset_in_page(start)) { |
1063 | /* start address is 4KB aligned but not page aligned. */ | 1064 | /* start address is 4KB aligned but not page aligned. */ |
1064 | retval = mprotect_subpage(PAGE_START(start), prot); | 1065 | retval = mprotect_subpage(PAGE_START(start), prot); |
1065 | if (retval < 0) | 1066 | if (retval < 0) |
1066 | goto out; | 1067 | goto out; |
1067 | 1068 | ||
1068 | start = PAGE_ALIGN(start); | 1069 | start = PAGE_ALIGN(start); |
1069 | if (start >= end) | 1070 | if (start >= end) |
1070 | goto out; /* retval is already zero... */ | 1071 | goto out; /* retval is already zero... */ |
1071 | } | 1072 | } |
1072 | 1073 | ||
1073 | if (offset_in_page(end)) { | 1074 | if (offset_in_page(end)) { |
1074 | /* end address is 4KB aligned but not page aligned. */ | 1075 | /* end address is 4KB aligned but not page aligned. */ |
1075 | retval = mprotect_subpage(PAGE_START(end), prot); | 1076 | retval = mprotect_subpage(PAGE_START(end), prot); |
1076 | if (retval < 0) | 1077 | if (retval < 0) |
1077 | goto out; | 1078 | goto out; |
1078 | 1079 | ||
1079 | end = PAGE_START(end); | 1080 | end = PAGE_START(end); |
1080 | } | 1081 | } |
1081 | retval = sys_mprotect(start, end - start, prot); | 1082 | retval = sys_mprotect(start, end - start, prot); |
1082 | } | 1083 | } |
1083 | out: | 1084 | out: |
1084 | mutex_unlock(&ia32_mmap_mutex); | 1085 | mutex_unlock(&ia32_mmap_mutex); |
1085 | return retval; | 1086 | return retval; |
1086 | #endif | 1087 | #endif |
1087 | } | 1088 | } |
1088 | 1089 | ||
1089 | asmlinkage long | 1090 | asmlinkage long |
1090 | sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len, | 1091 | sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len, |
1091 | unsigned int flags, unsigned int new_addr) | 1092 | unsigned int flags, unsigned int new_addr) |
1092 | { | 1093 | { |
1093 | long ret; | 1094 | long ret; |
1094 | 1095 | ||
1095 | #if PAGE_SHIFT <= IA32_PAGE_SHIFT | 1096 | #if PAGE_SHIFT <= IA32_PAGE_SHIFT |
1096 | ret = sys_mremap(addr, old_len, new_len, flags, new_addr); | 1097 | ret = sys_mremap(addr, old_len, new_len, flags, new_addr); |
1097 | #else | 1098 | #else |
1098 | unsigned int old_end, new_end; | 1099 | unsigned int old_end, new_end; |
1099 | 1100 | ||
1100 | if (OFFSET4K(addr)) | 1101 | if (OFFSET4K(addr)) |
1101 | return -EINVAL; | 1102 | return -EINVAL; |
1102 | 1103 | ||
1103 | old_len = IA32_PAGE_ALIGN(old_len); | 1104 | old_len = IA32_PAGE_ALIGN(old_len); |
1104 | new_len = IA32_PAGE_ALIGN(new_len); | 1105 | new_len = IA32_PAGE_ALIGN(new_len); |
1105 | old_end = addr + old_len; | 1106 | old_end = addr + old_len; |
1106 | new_end = addr + new_len; | 1107 | new_end = addr + new_len; |
1107 | 1108 | ||
1108 | if (!new_len) | 1109 | if (!new_len) |
1109 | return -EINVAL; | 1110 | return -EINVAL; |
1110 | 1111 | ||
1111 | if ((flags & MREMAP_FIXED) && (OFFSET4K(new_addr))) | 1112 | if ((flags & MREMAP_FIXED) && (OFFSET4K(new_addr))) |
1112 | return -EINVAL; | 1113 | return -EINVAL; |
1113 | 1114 | ||
1114 | if (old_len >= new_len) { | 1115 | if (old_len >= new_len) { |
1115 | ret = sys32_munmap(addr + new_len, old_len - new_len); | 1116 | ret = sys32_munmap(addr + new_len, old_len - new_len); |
1116 | if (ret && old_len != new_len) | 1117 | if (ret && old_len != new_len) |
1117 | return ret; | 1118 | return ret; |
1118 | ret = addr; | 1119 | ret = addr; |
1119 | if (!(flags & MREMAP_FIXED) || (new_addr == addr)) | 1120 | if (!(flags & MREMAP_FIXED) || (new_addr == addr)) |
1120 | return ret; | 1121 | return ret; |
1121 | old_len = new_len; | 1122 | old_len = new_len; |
1122 | } | 1123 | } |
1123 | 1124 | ||
1124 | addr = PAGE_START(addr); | 1125 | addr = PAGE_START(addr); |
1125 | old_len = PAGE_ALIGN(old_end) - addr; | 1126 | old_len = PAGE_ALIGN(old_end) - addr; |
1126 | new_len = PAGE_ALIGN(new_end) - addr; | 1127 | new_len = PAGE_ALIGN(new_end) - addr; |
1127 | 1128 | ||
1128 | mutex_lock(&ia32_mmap_mutex); | 1129 | mutex_lock(&ia32_mmap_mutex); |
1129 | ret = sys_mremap(addr, old_len, new_len, flags, new_addr); | 1130 | ret = sys_mremap(addr, old_len, new_len, flags, new_addr); |
1130 | mutex_unlock(&ia32_mmap_mutex); | 1131 | mutex_unlock(&ia32_mmap_mutex); |
1131 | 1132 | ||
1132 | if ((ret >= 0) && (old_len < new_len)) { | 1133 | if ((ret >= 0) && (old_len < new_len)) { |
1133 | /* mremap expanded successfully */ | 1134 | /* mremap expanded successfully */ |
1134 | ia32_set_pp(old_end, new_end, flags); | 1135 | ia32_set_pp(old_end, new_end, flags); |
1135 | } | 1136 | } |
1136 | #endif | 1137 | #endif |
1137 | return ret; | 1138 | return ret; |
1138 | } | 1139 | } |
1139 | 1140 | ||
1140 | asmlinkage long | 1141 | asmlinkage long |
1141 | sys32_pipe (int __user *fd) | 1142 | sys32_pipe (int __user *fd) |
1142 | { | 1143 | { |
1143 | int retval; | 1144 | int retval; |
1144 | int fds[2]; | 1145 | int fds[2]; |
1145 | 1146 | ||
1146 | retval = do_pipe(fds); | 1147 | retval = do_pipe(fds); |
1147 | if (retval) | 1148 | if (retval) |
1148 | goto out; | 1149 | goto out; |
1149 | if (copy_to_user(fd, fds, sizeof(fds))) | 1150 | if (copy_to_user(fd, fds, sizeof(fds))) |
1150 | retval = -EFAULT; | 1151 | retval = -EFAULT; |
1151 | out: | 1152 | out: |
1152 | return retval; | 1153 | return retval; |
1153 | } | 1154 | } |
1154 | 1155 | ||
1155 | static inline long | 1156 | static inline long |
1156 | get_tv32 (struct timeval *o, struct compat_timeval __user *i) | 1157 | get_tv32 (struct timeval *o, struct compat_timeval __user *i) |
1157 | { | 1158 | { |
1158 | return (!access_ok(VERIFY_READ, i, sizeof(*i)) || | 1159 | return (!access_ok(VERIFY_READ, i, sizeof(*i)) || |
1159 | (__get_user(o->tv_sec, &i->tv_sec) | __get_user(o->tv_usec, &i->tv_usec))); | 1160 | (__get_user(o->tv_sec, &i->tv_sec) | __get_user(o->tv_usec, &i->tv_usec))); |
1160 | } | 1161 | } |
1161 | 1162 | ||
1162 | static inline long | 1163 | static inline long |
1163 | put_tv32 (struct compat_timeval __user *o, struct timeval *i) | 1164 | put_tv32 (struct compat_timeval __user *o, struct timeval *i) |
1164 | { | 1165 | { |
1165 | return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || | 1166 | return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) || |
1166 | (__put_user(i->tv_sec, &o->tv_sec) | __put_user(i->tv_usec, &o->tv_usec))); | 1167 | (__put_user(i->tv_sec, &o->tv_sec) | __put_user(i->tv_usec, &o->tv_usec))); |
1167 | } | 1168 | } |
1168 | 1169 | ||
1169 | asmlinkage unsigned long | 1170 | asmlinkage unsigned long |
1170 | sys32_alarm (unsigned int seconds) | 1171 | sys32_alarm (unsigned int seconds) |
1171 | { | 1172 | { |
1172 | return alarm_setitimer(seconds); | 1173 | return alarm_setitimer(seconds); |
1173 | } | 1174 | } |
1174 | 1175 | ||
1175 | /* Translations due to time_t size differences. Which affects all | 1176 | /* Translations due to time_t size differences. Which affects all |
1176 | sorts of things, like timeval and itimerval. */ | 1177 | sorts of things, like timeval and itimerval. */ |
1177 | 1178 | ||
1178 | extern struct timezone sys_tz; | 1179 | extern struct timezone sys_tz; |
1179 | 1180 | ||
1180 | asmlinkage long | 1181 | asmlinkage long |
1181 | sys32_gettimeofday (struct compat_timeval __user *tv, struct timezone __user *tz) | 1182 | sys32_gettimeofday (struct compat_timeval __user *tv, struct timezone __user *tz) |
1182 | { | 1183 | { |
1183 | if (tv) { | 1184 | if (tv) { |
1184 | struct timeval ktv; | 1185 | struct timeval ktv; |
1185 | do_gettimeofday(&ktv); | 1186 | do_gettimeofday(&ktv); |
1186 | if (put_tv32(tv, &ktv)) | 1187 | if (put_tv32(tv, &ktv)) |
1187 | return -EFAULT; | 1188 | return -EFAULT; |
1188 | } | 1189 | } |
1189 | if (tz) { | 1190 | if (tz) { |
1190 | if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) | 1191 | if (copy_to_user(tz, &sys_tz, sizeof(sys_tz))) |
1191 | return -EFAULT; | 1192 | return -EFAULT; |
1192 | } | 1193 | } |
1193 | return 0; | 1194 | return 0; |
1194 | } | 1195 | } |
1195 | 1196 | ||
1196 | asmlinkage long | 1197 | asmlinkage long |
1197 | sys32_settimeofday (struct compat_timeval __user *tv, struct timezone __user *tz) | 1198 | sys32_settimeofday (struct compat_timeval __user *tv, struct timezone __user *tz) |
1198 | { | 1199 | { |
1199 | struct timeval ktv; | 1200 | struct timeval ktv; |
1200 | struct timespec kts; | 1201 | struct timespec kts; |
1201 | struct timezone ktz; | 1202 | struct timezone ktz; |
1202 | 1203 | ||
1203 | if (tv) { | 1204 | if (tv) { |
1204 | if (get_tv32(&ktv, tv)) | 1205 | if (get_tv32(&ktv, tv)) |
1205 | return -EFAULT; | 1206 | return -EFAULT; |
1206 | kts.tv_sec = ktv.tv_sec; | 1207 | kts.tv_sec = ktv.tv_sec; |
1207 | kts.tv_nsec = ktv.tv_usec * 1000; | 1208 | kts.tv_nsec = ktv.tv_usec * 1000; |
1208 | } | 1209 | } |
1209 | if (tz) { | 1210 | if (tz) { |
1210 | if (copy_from_user(&ktz, tz, sizeof(ktz))) | 1211 | if (copy_from_user(&ktz, tz, sizeof(ktz))) |
1211 | return -EFAULT; | 1212 | return -EFAULT; |
1212 | } | 1213 | } |
1213 | 1214 | ||
1214 | return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); | 1215 | return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL); |
1215 | } | 1216 | } |
1216 | 1217 | ||
1217 | struct getdents32_callback { | 1218 | struct getdents32_callback { |
1218 | struct compat_dirent __user *current_dir; | 1219 | struct compat_dirent __user *current_dir; |
1219 | struct compat_dirent __user *previous; | 1220 | struct compat_dirent __user *previous; |
1220 | int count; | 1221 | int count; |
1221 | int error; | 1222 | int error; |
1222 | }; | 1223 | }; |
1223 | 1224 | ||
1224 | struct readdir32_callback { | 1225 | struct readdir32_callback { |
1225 | struct old_linux32_dirent __user * dirent; | 1226 | struct old_linux32_dirent __user * dirent; |
1226 | int count; | 1227 | int count; |
1227 | }; | 1228 | }; |
1228 | 1229 | ||
1229 | static int | 1230 | static int |
1230 | filldir32 (void *__buf, const char *name, int namlen, loff_t offset, u64 ino, | 1231 | filldir32 (void *__buf, const char *name, int namlen, loff_t offset, u64 ino, |
1231 | unsigned int d_type) | 1232 | unsigned int d_type) |
1232 | { | 1233 | { |
1233 | struct compat_dirent __user * dirent; | 1234 | struct compat_dirent __user * dirent; |
1234 | struct getdents32_callback * buf = (struct getdents32_callback *) __buf; | 1235 | struct getdents32_callback * buf = (struct getdents32_callback *) __buf; |
1235 | int reclen = ROUND_UP(offsetof(struct compat_dirent, d_name) + namlen + 1, 4); | 1236 | int reclen = ROUND_UP(offsetof(struct compat_dirent, d_name) + namlen + 1, 4); |
1236 | u32 d_ino; | 1237 | u32 d_ino; |
1237 | 1238 | ||
1238 | buf->error = -EINVAL; /* only used if we fail.. */ | 1239 | buf->error = -EINVAL; /* only used if we fail.. */ |
1239 | if (reclen > buf->count) | 1240 | if (reclen > buf->count) |
1240 | return -EINVAL; | 1241 | return -EINVAL; |
1241 | d_ino = ino; | 1242 | d_ino = ino; |
1242 | if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) | 1243 | if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) |
1243 | return -EOVERFLOW; | 1244 | return -EOVERFLOW; |
1244 | buf->error = -EFAULT; /* only used if we fail.. */ | 1245 | buf->error = -EFAULT; /* only used if we fail.. */ |
1245 | dirent = buf->previous; | 1246 | dirent = buf->previous; |
1246 | if (dirent) | 1247 | if (dirent) |
1247 | if (put_user(offset, &dirent->d_off)) | 1248 | if (put_user(offset, &dirent->d_off)) |
1248 | return -EFAULT; | 1249 | return -EFAULT; |
1249 | dirent = buf->current_dir; | 1250 | dirent = buf->current_dir; |
1250 | buf->previous = dirent; | 1251 | buf->previous = dirent; |
1251 | if (put_user(d_ino, &dirent->d_ino) | 1252 | if (put_user(d_ino, &dirent->d_ino) |
1252 | || put_user(reclen, &dirent->d_reclen) | 1253 | || put_user(reclen, &dirent->d_reclen) |
1253 | || copy_to_user(dirent->d_name, name, namlen) | 1254 | || copy_to_user(dirent->d_name, name, namlen) |
1254 | || put_user(0, dirent->d_name + namlen)) | 1255 | || put_user(0, dirent->d_name + namlen)) |
1255 | return -EFAULT; | 1256 | return -EFAULT; |
1256 | dirent = (struct compat_dirent __user *) ((char __user *) dirent + reclen); | 1257 | dirent = (struct compat_dirent __user *) ((char __user *) dirent + reclen); |
1257 | buf->current_dir = dirent; | 1258 | buf->current_dir = dirent; |
1258 | buf->count -= reclen; | 1259 | buf->count -= reclen; |
1259 | return 0; | 1260 | return 0; |
1260 | } | 1261 | } |
1261 | 1262 | ||
1262 | asmlinkage long | 1263 | asmlinkage long |
1263 | sys32_getdents (unsigned int fd, struct compat_dirent __user *dirent, unsigned int count) | 1264 | sys32_getdents (unsigned int fd, struct compat_dirent __user *dirent, unsigned int count) |
1264 | { | 1265 | { |
1265 | struct file * file; | 1266 | struct file * file; |
1266 | struct compat_dirent __user * lastdirent; | 1267 | struct compat_dirent __user * lastdirent; |
1267 | struct getdents32_callback buf; | 1268 | struct getdents32_callback buf; |
1268 | int error; | 1269 | int error; |
1269 | 1270 | ||
1270 | error = -EFAULT; | 1271 | error = -EFAULT; |
1271 | if (!access_ok(VERIFY_WRITE, dirent, count)) | 1272 | if (!access_ok(VERIFY_WRITE, dirent, count)) |
1272 | goto out; | 1273 | goto out; |
1273 | 1274 | ||
1274 | error = -EBADF; | 1275 | error = -EBADF; |
1275 | file = fget(fd); | 1276 | file = fget(fd); |
1276 | if (!file) | 1277 | if (!file) |
1277 | goto out; | 1278 | goto out; |
1278 | 1279 | ||
1279 | buf.current_dir = dirent; | 1280 | buf.current_dir = dirent; |
1280 | buf.previous = NULL; | 1281 | buf.previous = NULL; |
1281 | buf.count = count; | 1282 | buf.count = count; |
1282 | buf.error = 0; | 1283 | buf.error = 0; |
1283 | 1284 | ||
1284 | error = vfs_readdir(file, filldir32, &buf); | 1285 | error = vfs_readdir(file, filldir32, &buf); |
1285 | if (error < 0) | 1286 | if (error < 0) |
1286 | goto out_putf; | 1287 | goto out_putf; |
1287 | error = buf.error; | 1288 | error = buf.error; |
1288 | lastdirent = buf.previous; | 1289 | lastdirent = buf.previous; |
1289 | if (lastdirent) { | 1290 | if (lastdirent) { |
1290 | if (put_user(file->f_pos, &lastdirent->d_off)) | 1291 | if (put_user(file->f_pos, &lastdirent->d_off)) |
1291 | error = -EFAULT; | 1292 | error = -EFAULT; |
1292 | else | 1293 | else |
1293 | error = count - buf.count; | 1294 | error = count - buf.count; |
1294 | } | 1295 | } |
1295 | 1296 | ||
1296 | out_putf: | 1297 | out_putf: |
1297 | fput(file); | 1298 | fput(file); |
1298 | out: | 1299 | out: |
1299 | return error; | 1300 | return error; |
1300 | } | 1301 | } |
1301 | 1302 | ||
1302 | static int | 1303 | static int |
1303 | fillonedir32 (void * __buf, const char * name, int namlen, loff_t offset, u64 ino, | 1304 | fillonedir32 (void * __buf, const char * name, int namlen, loff_t offset, u64 ino, |
1304 | unsigned int d_type) | 1305 | unsigned int d_type) |
1305 | { | 1306 | { |
1306 | struct readdir32_callback * buf = (struct readdir32_callback *) __buf; | 1307 | struct readdir32_callback * buf = (struct readdir32_callback *) __buf; |
1307 | struct old_linux32_dirent __user * dirent; | 1308 | struct old_linux32_dirent __user * dirent; |
1308 | u32 d_ino; | 1309 | u32 d_ino; |
1309 | 1310 | ||
1310 | if (buf->count) | 1311 | if (buf->count) |
1311 | return -EINVAL; | 1312 | return -EINVAL; |
1312 | d_ino = ino; | 1313 | d_ino = ino; |
1313 | if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) | 1314 | if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) |
1314 | return -EOVERFLOW; | 1315 | return -EOVERFLOW; |
1315 | buf->count++; | 1316 | buf->count++; |
1316 | dirent = buf->dirent; | 1317 | dirent = buf->dirent; |
1317 | if (put_user(d_ino, &dirent->d_ino) | 1318 | if (put_user(d_ino, &dirent->d_ino) |
1318 | || put_user(offset, &dirent->d_offset) | 1319 | || put_user(offset, &dirent->d_offset) |
1319 | || put_user(namlen, &dirent->d_namlen) | 1320 | || put_user(namlen, &dirent->d_namlen) |
1320 | || copy_to_user(dirent->d_name, name, namlen) | 1321 | || copy_to_user(dirent->d_name, name, namlen) |
1321 | || put_user(0, dirent->d_name + namlen)) | 1322 | || put_user(0, dirent->d_name + namlen)) |
1322 | return -EFAULT; | 1323 | return -EFAULT; |
1323 | return 0; | 1324 | return 0; |
1324 | } | 1325 | } |
1325 | 1326 | ||
1326 | asmlinkage long | 1327 | asmlinkage long |
1327 | sys32_readdir (unsigned int fd, void __user *dirent, unsigned int count) | 1328 | sys32_readdir (unsigned int fd, void __user *dirent, unsigned int count) |
1328 | { | 1329 | { |
1329 | int error; | 1330 | int error; |
1330 | struct file * file; | 1331 | struct file * file; |
1331 | struct readdir32_callback buf; | 1332 | struct readdir32_callback buf; |
1332 | 1333 | ||
1333 | error = -EBADF; | 1334 | error = -EBADF; |
1334 | file = fget(fd); | 1335 | file = fget(fd); |
1335 | if (!file) | 1336 | if (!file) |
1336 | goto out; | 1337 | goto out; |
1337 | 1338 | ||
1338 | buf.count = 0; | 1339 | buf.count = 0; |
1339 | buf.dirent = dirent; | 1340 | buf.dirent = dirent; |
1340 | 1341 | ||
1341 | error = vfs_readdir(file, fillonedir32, &buf); | 1342 | error = vfs_readdir(file, fillonedir32, &buf); |
1342 | if (error >= 0) | 1343 | if (error >= 0) |
1343 | error = buf.count; | 1344 | error = buf.count; |
1344 | fput(file); | 1345 | fput(file); |
1345 | out: | 1346 | out: |
1346 | return error; | 1347 | return error; |
1347 | } | 1348 | } |
1348 | 1349 | ||
1349 | struct sel_arg_struct { | 1350 | struct sel_arg_struct { |
1350 | unsigned int n; | 1351 | unsigned int n; |
1351 | unsigned int inp; | 1352 | unsigned int inp; |
1352 | unsigned int outp; | 1353 | unsigned int outp; |
1353 | unsigned int exp; | 1354 | unsigned int exp; |
1354 | unsigned int tvp; | 1355 | unsigned int tvp; |
1355 | }; | 1356 | }; |
1356 | 1357 | ||
1357 | asmlinkage long | 1358 | asmlinkage long |
1358 | sys32_old_select (struct sel_arg_struct __user *arg) | 1359 | sys32_old_select (struct sel_arg_struct __user *arg) |
1359 | { | 1360 | { |
1360 | struct sel_arg_struct a; | 1361 | struct sel_arg_struct a; |
1361 | 1362 | ||
1362 | if (copy_from_user(&a, arg, sizeof(a))) | 1363 | if (copy_from_user(&a, arg, sizeof(a))) |
1363 | return -EFAULT; | 1364 | return -EFAULT; |
1364 | return compat_sys_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp), | 1365 | return compat_sys_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp), |
1365 | compat_ptr(a.exp), compat_ptr(a.tvp)); | 1366 | compat_ptr(a.exp), compat_ptr(a.tvp)); |
1366 | } | 1367 | } |
1367 | 1368 | ||
1368 | #define SEMOP 1 | 1369 | #define SEMOP 1 |
1369 | #define SEMGET 2 | 1370 | #define SEMGET 2 |
1370 | #define SEMCTL 3 | 1371 | #define SEMCTL 3 |
1371 | #define SEMTIMEDOP 4 | 1372 | #define SEMTIMEDOP 4 |
1372 | #define MSGSND 11 | 1373 | #define MSGSND 11 |
1373 | #define MSGRCV 12 | 1374 | #define MSGRCV 12 |
1374 | #define MSGGET 13 | 1375 | #define MSGGET 13 |
1375 | #define MSGCTL 14 | 1376 | #define MSGCTL 14 |
1376 | #define SHMAT 21 | 1377 | #define SHMAT 21 |
1377 | #define SHMDT 22 | 1378 | #define SHMDT 22 |
1378 | #define SHMGET 23 | 1379 | #define SHMGET 23 |
1379 | #define SHMCTL 24 | 1380 | #define SHMCTL 24 |
1380 | 1381 | ||
1381 | asmlinkage long | 1382 | asmlinkage long |
1382 | sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth) | 1383 | sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth) |
1383 | { | 1384 | { |
1384 | int version; | 1385 | int version; |
1385 | 1386 | ||
1386 | version = call >> 16; /* hack for backward compatibility */ | 1387 | version = call >> 16; /* hack for backward compatibility */ |
1387 | call &= 0xffff; | 1388 | call &= 0xffff; |
1388 | 1389 | ||
1389 | switch (call) { | 1390 | switch (call) { |
1390 | case SEMTIMEDOP: | 1391 | case SEMTIMEDOP: |
1391 | if (fifth) | 1392 | if (fifth) |
1392 | return compat_sys_semtimedop(first, compat_ptr(ptr), | 1393 | return compat_sys_semtimedop(first, compat_ptr(ptr), |
1393 | second, compat_ptr(fifth)); | 1394 | second, compat_ptr(fifth)); |
1394 | /* else fall through for normal semop() */ | 1395 | /* else fall through for normal semop() */ |
1395 | case SEMOP: | 1396 | case SEMOP: |
1396 | /* struct sembuf is the same on 32 and 64bit :)) */ | 1397 | /* struct sembuf is the same on 32 and 64bit :)) */ |
1397 | return sys_semtimedop(first, compat_ptr(ptr), second, | 1398 | return sys_semtimedop(first, compat_ptr(ptr), second, |
1398 | NULL); | 1399 | NULL); |
1399 | case SEMGET: | 1400 | case SEMGET: |
1400 | return sys_semget(first, second, third); | 1401 | return sys_semget(first, second, third); |
1401 | case SEMCTL: | 1402 | case SEMCTL: |
1402 | return compat_sys_semctl(first, second, third, compat_ptr(ptr)); | 1403 | return compat_sys_semctl(first, second, third, compat_ptr(ptr)); |
1403 | 1404 | ||
1404 | case MSGSND: | 1405 | case MSGSND: |
1405 | return compat_sys_msgsnd(first, second, third, compat_ptr(ptr)); | 1406 | return compat_sys_msgsnd(first, second, third, compat_ptr(ptr)); |
1406 | case MSGRCV: | 1407 | case MSGRCV: |
1407 | return compat_sys_msgrcv(first, second, fifth, third, version, compat_ptr(ptr)); | 1408 | return compat_sys_msgrcv(first, second, fifth, third, version, compat_ptr(ptr)); |
1408 | case MSGGET: | 1409 | case MSGGET: |
1409 | return sys_msgget((key_t) first, second); | 1410 | return sys_msgget((key_t) first, second); |
1410 | case MSGCTL: | 1411 | case MSGCTL: |
1411 | return compat_sys_msgctl(first, second, compat_ptr(ptr)); | 1412 | return compat_sys_msgctl(first, second, compat_ptr(ptr)); |
1412 | 1413 | ||
1413 | case SHMAT: | 1414 | case SHMAT: |
1414 | return compat_sys_shmat(first, second, third, version, compat_ptr(ptr)); | 1415 | return compat_sys_shmat(first, second, third, version, compat_ptr(ptr)); |
1415 | break; | 1416 | break; |
1416 | case SHMDT: | 1417 | case SHMDT: |
1417 | return sys_shmdt(compat_ptr(ptr)); | 1418 | return sys_shmdt(compat_ptr(ptr)); |
1418 | case SHMGET: | 1419 | case SHMGET: |
1419 | return sys_shmget(first, (unsigned)second, third); | 1420 | return sys_shmget(first, (unsigned)second, third); |
1420 | case SHMCTL: | 1421 | case SHMCTL: |
1421 | return compat_sys_shmctl(first, second, compat_ptr(ptr)); | 1422 | return compat_sys_shmctl(first, second, compat_ptr(ptr)); |
1422 | 1423 | ||
1423 | default: | 1424 | default: |
1424 | return -ENOSYS; | 1425 | return -ENOSYS; |
1425 | } | 1426 | } |
1426 | return -EINVAL; | 1427 | return -EINVAL; |
1427 | } | 1428 | } |
1428 | 1429 | ||
1429 | asmlinkage long | 1430 | asmlinkage long |
1430 | compat_sys_wait4 (compat_pid_t pid, compat_uint_t * stat_addr, int options, | 1431 | compat_sys_wait4 (compat_pid_t pid, compat_uint_t * stat_addr, int options, |
1431 | struct compat_rusage *ru); | 1432 | struct compat_rusage *ru); |
1432 | 1433 | ||
1433 | asmlinkage long | 1434 | asmlinkage long |
1434 | sys32_waitpid (int pid, unsigned int *stat_addr, int options) | 1435 | sys32_waitpid (int pid, unsigned int *stat_addr, int options) |
1435 | { | 1436 | { |
1436 | return compat_sys_wait4(pid, stat_addr, options, NULL); | 1437 | return compat_sys_wait4(pid, stat_addr, options, NULL); |
1437 | } | 1438 | } |
1438 | 1439 | ||
1439 | static unsigned int | 1440 | static unsigned int |
1440 | ia32_peek (struct task_struct *child, unsigned long addr, unsigned int *val) | 1441 | ia32_peek (struct task_struct *child, unsigned long addr, unsigned int *val) |
1441 | { | 1442 | { |
1442 | size_t copied; | 1443 | size_t copied; |
1443 | unsigned int ret; | 1444 | unsigned int ret; |
1444 | 1445 | ||
1445 | copied = access_process_vm(child, addr, val, sizeof(*val), 0); | 1446 | copied = access_process_vm(child, addr, val, sizeof(*val), 0); |
1446 | return (copied != sizeof(ret)) ? -EIO : 0; | 1447 | return (copied != sizeof(ret)) ? -EIO : 0; |
1447 | } | 1448 | } |
1448 | 1449 | ||
1449 | static unsigned int | 1450 | static unsigned int |
1450 | ia32_poke (struct task_struct *child, unsigned long addr, unsigned int val) | 1451 | ia32_poke (struct task_struct *child, unsigned long addr, unsigned int val) |
1451 | { | 1452 | { |
1452 | 1453 | ||
1453 | if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val)) | 1454 | if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val)) |
1454 | return -EIO; | 1455 | return -EIO; |
1455 | return 0; | 1456 | return 0; |
1456 | } | 1457 | } |
1457 | 1458 | ||
1458 | /* | 1459 | /* |
1459 | * The order in which registers are stored in the ptrace regs structure | 1460 | * The order in which registers are stored in the ptrace regs structure |
1460 | */ | 1461 | */ |
1461 | #define PT_EBX 0 | 1462 | #define PT_EBX 0 |
1462 | #define PT_ECX 1 | 1463 | #define PT_ECX 1 |
1463 | #define PT_EDX 2 | 1464 | #define PT_EDX 2 |
1464 | #define PT_ESI 3 | 1465 | #define PT_ESI 3 |
1465 | #define PT_EDI 4 | 1466 | #define PT_EDI 4 |
1466 | #define PT_EBP 5 | 1467 | #define PT_EBP 5 |
1467 | #define PT_EAX 6 | 1468 | #define PT_EAX 6 |
1468 | #define PT_DS 7 | 1469 | #define PT_DS 7 |
1469 | #define PT_ES 8 | 1470 | #define PT_ES 8 |
1470 | #define PT_FS 9 | 1471 | #define PT_FS 9 |
1471 | #define PT_GS 10 | 1472 | #define PT_GS 10 |
1472 | #define PT_ORIG_EAX 11 | 1473 | #define PT_ORIG_EAX 11 |
1473 | #define PT_EIP 12 | 1474 | #define PT_EIP 12 |
1474 | #define PT_CS 13 | 1475 | #define PT_CS 13 |
1475 | #define PT_EFL 14 | 1476 | #define PT_EFL 14 |
1476 | #define PT_UESP 15 | 1477 | #define PT_UESP 15 |
1477 | #define PT_SS 16 | 1478 | #define PT_SS 16 |
1478 | 1479 | ||
1479 | static unsigned int | 1480 | static unsigned int |
1480 | getreg (struct task_struct *child, int regno) | 1481 | getreg (struct task_struct *child, int regno) |
1481 | { | 1482 | { |
1482 | struct pt_regs *child_regs; | 1483 | struct pt_regs *child_regs; |
1483 | 1484 | ||
1484 | child_regs = task_pt_regs(child); | 1485 | child_regs = task_pt_regs(child); |
1485 | switch (regno / sizeof(int)) { | 1486 | switch (regno / sizeof(int)) { |
1486 | case PT_EBX: return child_regs->r11; | 1487 | case PT_EBX: return child_regs->r11; |
1487 | case PT_ECX: return child_regs->r9; | 1488 | case PT_ECX: return child_regs->r9; |
1488 | case PT_EDX: return child_regs->r10; | 1489 | case PT_EDX: return child_regs->r10; |
1489 | case PT_ESI: return child_regs->r14; | 1490 | case PT_ESI: return child_regs->r14; |
1490 | case PT_EDI: return child_regs->r15; | 1491 | case PT_EDI: return child_regs->r15; |
1491 | case PT_EBP: return child_regs->r13; | 1492 | case PT_EBP: return child_regs->r13; |
1492 | case PT_EAX: return child_regs->r8; | 1493 | case PT_EAX: return child_regs->r8; |
1493 | case PT_ORIG_EAX: return child_regs->r1; /* see dispatch_to_ia32_handler() */ | 1494 | case PT_ORIG_EAX: return child_regs->r1; /* see dispatch_to_ia32_handler() */ |
1494 | case PT_EIP: return child_regs->cr_iip; | 1495 | case PT_EIP: return child_regs->cr_iip; |
1495 | case PT_UESP: return child_regs->r12; | 1496 | case PT_UESP: return child_regs->r12; |
1496 | case PT_EFL: return child->thread.eflag; | 1497 | case PT_EFL: return child->thread.eflag; |
1497 | case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS: | 1498 | case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS: |
1498 | return __USER_DS; | 1499 | return __USER_DS; |
1499 | case PT_CS: return __USER_CS; | 1500 | case PT_CS: return __USER_CS; |
1500 | default: | 1501 | default: |
1501 | printk(KERN_ERR "ia32.getreg(): unknown register %d\n", regno); | 1502 | printk(KERN_ERR "ia32.getreg(): unknown register %d\n", regno); |
1502 | break; | 1503 | break; |
1503 | } | 1504 | } |
1504 | return 0; | 1505 | return 0; |
1505 | } | 1506 | } |
1506 | 1507 | ||
1507 | static void | 1508 | static void |
1508 | putreg (struct task_struct *child, int regno, unsigned int value) | 1509 | putreg (struct task_struct *child, int regno, unsigned int value) |
1509 | { | 1510 | { |
1510 | struct pt_regs *child_regs; | 1511 | struct pt_regs *child_regs; |
1511 | 1512 | ||
1512 | child_regs = task_pt_regs(child); | 1513 | child_regs = task_pt_regs(child); |
1513 | switch (regno / sizeof(int)) { | 1514 | switch (regno / sizeof(int)) { |
1514 | case PT_EBX: child_regs->r11 = value; break; | 1515 | case PT_EBX: child_regs->r11 = value; break; |
1515 | case PT_ECX: child_regs->r9 = value; break; | 1516 | case PT_ECX: child_regs->r9 = value; break; |
1516 | case PT_EDX: child_regs->r10 = value; break; | 1517 | case PT_EDX: child_regs->r10 = value; break; |
1517 | case PT_ESI: child_regs->r14 = value; break; | 1518 | case PT_ESI: child_regs->r14 = value; break; |
1518 | case PT_EDI: child_regs->r15 = value; break; | 1519 | case PT_EDI: child_regs->r15 = value; break; |
1519 | case PT_EBP: child_regs->r13 = value; break; | 1520 | case PT_EBP: child_regs->r13 = value; break; |
1520 | case PT_EAX: child_regs->r8 = value; break; | 1521 | case PT_EAX: child_regs->r8 = value; break; |
1521 | case PT_ORIG_EAX: child_regs->r1 = value; break; | 1522 | case PT_ORIG_EAX: child_regs->r1 = value; break; |
1522 | case PT_EIP: child_regs->cr_iip = value; break; | 1523 | case PT_EIP: child_regs->cr_iip = value; break; |
1523 | case PT_UESP: child_regs->r12 = value; break; | 1524 | case PT_UESP: child_regs->r12 = value; break; |
1524 | case PT_EFL: child->thread.eflag = value; break; | 1525 | case PT_EFL: child->thread.eflag = value; break; |
1525 | case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS: | 1526 | case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS: |
1526 | if (value != __USER_DS) | 1527 | if (value != __USER_DS) |
1527 | printk(KERN_ERR | 1528 | printk(KERN_ERR |
1528 | "ia32.putreg: attempt to set invalid segment register %d = %x\n", | 1529 | "ia32.putreg: attempt to set invalid segment register %d = %x\n", |
1529 | regno, value); | 1530 | regno, value); |
1530 | break; | 1531 | break; |
1531 | case PT_CS: | 1532 | case PT_CS: |
1532 | if (value != __USER_CS) | 1533 | if (value != __USER_CS) |
1533 | printk(KERN_ERR | 1534 | printk(KERN_ERR |
1534 | "ia32.putreg: attempt to to set invalid segment register %d = %x\n", | 1535 | "ia32.putreg: attempt to to set invalid segment register %d = %x\n", |
1535 | regno, value); | 1536 | regno, value); |
1536 | break; | 1537 | break; |
1537 | default: | 1538 | default: |
1538 | printk(KERN_ERR "ia32.putreg: unknown register %d\n", regno); | 1539 | printk(KERN_ERR "ia32.putreg: unknown register %d\n", regno); |
1539 | break; | 1540 | break; |
1540 | } | 1541 | } |
1541 | } | 1542 | } |
1542 | 1543 | ||
1543 | static void | 1544 | static void |
1544 | put_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp, | 1545 | put_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp, |
1545 | struct switch_stack *swp, int tos) | 1546 | struct switch_stack *swp, int tos) |
1546 | { | 1547 | { |
1547 | struct _fpreg_ia32 *f; | 1548 | struct _fpreg_ia32 *f; |
1548 | char buf[32]; | 1549 | char buf[32]; |
1549 | 1550 | ||
1550 | f = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15); | 1551 | f = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15); |
1551 | if ((regno += tos) >= 8) | 1552 | if ((regno += tos) >= 8) |
1552 | regno -= 8; | 1553 | regno -= 8; |
1553 | switch (regno) { | 1554 | switch (regno) { |
1554 | case 0: | 1555 | case 0: |
1555 | ia64f2ia32f(f, &ptp->f8); | 1556 | ia64f2ia32f(f, &ptp->f8); |
1556 | break; | 1557 | break; |
1557 | case 1: | 1558 | case 1: |
1558 | ia64f2ia32f(f, &ptp->f9); | 1559 | ia64f2ia32f(f, &ptp->f9); |
1559 | break; | 1560 | break; |
1560 | case 2: | 1561 | case 2: |
1561 | ia64f2ia32f(f, &ptp->f10); | 1562 | ia64f2ia32f(f, &ptp->f10); |
1562 | break; | 1563 | break; |
1563 | case 3: | 1564 | case 3: |
1564 | ia64f2ia32f(f, &ptp->f11); | 1565 | ia64f2ia32f(f, &ptp->f11); |
1565 | break; | 1566 | break; |
1566 | case 4: | 1567 | case 4: |
1567 | case 5: | 1568 | case 5: |
1568 | case 6: | 1569 | case 6: |
1569 | case 7: | 1570 | case 7: |
1570 | ia64f2ia32f(f, &swp->f12 + (regno - 4)); | 1571 | ia64f2ia32f(f, &swp->f12 + (regno - 4)); |
1571 | break; | 1572 | break; |
1572 | } | 1573 | } |
1573 | copy_to_user(reg, f, sizeof(*reg)); | 1574 | copy_to_user(reg, f, sizeof(*reg)); |
1574 | } | 1575 | } |
1575 | 1576 | ||
1576 | static void | 1577 | static void |
1577 | get_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp, | 1578 | get_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp, |
1578 | struct switch_stack *swp, int tos) | 1579 | struct switch_stack *swp, int tos) |
1579 | { | 1580 | { |
1580 | 1581 | ||
1581 | if ((regno += tos) >= 8) | 1582 | if ((regno += tos) >= 8) |
1582 | regno -= 8; | 1583 | regno -= 8; |
1583 | switch (regno) { | 1584 | switch (regno) { |
1584 | case 0: | 1585 | case 0: |
1585 | copy_from_user(&ptp->f8, reg, sizeof(*reg)); | 1586 | copy_from_user(&ptp->f8, reg, sizeof(*reg)); |
1586 | break; | 1587 | break; |
1587 | case 1: | 1588 | case 1: |
1588 | copy_from_user(&ptp->f9, reg, sizeof(*reg)); | 1589 | copy_from_user(&ptp->f9, reg, sizeof(*reg)); |
1589 | break; | 1590 | break; |
1590 | case 2: | 1591 | case 2: |
1591 | copy_from_user(&ptp->f10, reg, sizeof(*reg)); | 1592 | copy_from_user(&ptp->f10, reg, sizeof(*reg)); |
1592 | break; | 1593 | break; |
1593 | case 3: | 1594 | case 3: |
1594 | copy_from_user(&ptp->f11, reg, sizeof(*reg)); | 1595 | copy_from_user(&ptp->f11, reg, sizeof(*reg)); |
1595 | break; | 1596 | break; |
1596 | case 4: | 1597 | case 4: |
1597 | case 5: | 1598 | case 5: |
1598 | case 6: | 1599 | case 6: |
1599 | case 7: | 1600 | case 7: |
1600 | copy_from_user(&swp->f12 + (regno - 4), reg, sizeof(*reg)); | 1601 | copy_from_user(&swp->f12 + (regno - 4), reg, sizeof(*reg)); |
1601 | break; | 1602 | break; |
1602 | } | 1603 | } |
1603 | return; | 1604 | return; |
1604 | } | 1605 | } |
1605 | 1606 | ||
1606 | int | 1607 | int |
1607 | save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save) | 1608 | save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save) |
1608 | { | 1609 | { |
1609 | struct switch_stack *swp; | 1610 | struct switch_stack *swp; |
1610 | struct pt_regs *ptp; | 1611 | struct pt_regs *ptp; |
1611 | int i, tos; | 1612 | int i, tos; |
1612 | 1613 | ||
1613 | if (!access_ok(VERIFY_WRITE, save, sizeof(*save))) | 1614 | if (!access_ok(VERIFY_WRITE, save, sizeof(*save))) |
1614 | return -EFAULT; | 1615 | return -EFAULT; |
1615 | 1616 | ||
1616 | __put_user(tsk->thread.fcr & 0xffff, &save->cwd); | 1617 | __put_user(tsk->thread.fcr & 0xffff, &save->cwd); |
1617 | __put_user(tsk->thread.fsr & 0xffff, &save->swd); | 1618 | __put_user(tsk->thread.fsr & 0xffff, &save->swd); |
1618 | __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd); | 1619 | __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd); |
1619 | __put_user(tsk->thread.fir, &save->fip); | 1620 | __put_user(tsk->thread.fir, &save->fip); |
1620 | __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs); | 1621 | __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs); |
1621 | __put_user(tsk->thread.fdr, &save->foo); | 1622 | __put_user(tsk->thread.fdr, &save->foo); |
1622 | __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos); | 1623 | __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos); |
1623 | 1624 | ||
1624 | /* | 1625 | /* |
1625 | * Stack frames start with 16-bytes of temp space | 1626 | * Stack frames start with 16-bytes of temp space |
1626 | */ | 1627 | */ |
1627 | swp = (struct switch_stack *)(tsk->thread.ksp + 16); | 1628 | swp = (struct switch_stack *)(tsk->thread.ksp + 16); |
1628 | ptp = task_pt_regs(tsk); | 1629 | ptp = task_pt_regs(tsk); |
1629 | tos = (tsk->thread.fsr >> 11) & 7; | 1630 | tos = (tsk->thread.fsr >> 11) & 7; |
1630 | for (i = 0; i < 8; i++) | 1631 | for (i = 0; i < 8; i++) |
1631 | put_fpreg(i, &save->st_space[i], ptp, swp, tos); | 1632 | put_fpreg(i, &save->st_space[i], ptp, swp, tos); |
1632 | return 0; | 1633 | return 0; |
1633 | } | 1634 | } |
1634 | 1635 | ||
1635 | static int | 1636 | static int |
1636 | restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save) | 1637 | restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save) |
1637 | { | 1638 | { |
1638 | struct switch_stack *swp; | 1639 | struct switch_stack *swp; |
1639 | struct pt_regs *ptp; | 1640 | struct pt_regs *ptp; |
1640 | int i, tos; | 1641 | int i, tos; |
1641 | unsigned int fsrlo, fsrhi, num32; | 1642 | unsigned int fsrlo, fsrhi, num32; |
1642 | 1643 | ||
1643 | if (!access_ok(VERIFY_READ, save, sizeof(*save))) | 1644 | if (!access_ok(VERIFY_READ, save, sizeof(*save))) |
1644 | return(-EFAULT); | 1645 | return(-EFAULT); |
1645 | 1646 | ||
1646 | __get_user(num32, (unsigned int __user *)&save->cwd); | 1647 | __get_user(num32, (unsigned int __user *)&save->cwd); |
1647 | tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f); | 1648 | tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f); |
1648 | __get_user(fsrlo, (unsigned int __user *)&save->swd); | 1649 | __get_user(fsrlo, (unsigned int __user *)&save->swd); |
1649 | __get_user(fsrhi, (unsigned int __user *)&save->twd); | 1650 | __get_user(fsrhi, (unsigned int __user *)&save->twd); |
1650 | num32 = (fsrhi << 16) | fsrlo; | 1651 | num32 = (fsrhi << 16) | fsrlo; |
1651 | tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32; | 1652 | tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32; |
1652 | __get_user(num32, (unsigned int __user *)&save->fip); | 1653 | __get_user(num32, (unsigned int __user *)&save->fip); |
1653 | tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32; | 1654 | tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32; |
1654 | __get_user(num32, (unsigned int __user *)&save->foo); | 1655 | __get_user(num32, (unsigned int __user *)&save->foo); |
1655 | tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32; | 1656 | tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32; |
1656 | 1657 | ||
1657 | /* | 1658 | /* |
1658 | * Stack frames start with 16-bytes of temp space | 1659 | * Stack frames start with 16-bytes of temp space |
1659 | */ | 1660 | */ |
1660 | swp = (struct switch_stack *)(tsk->thread.ksp + 16); | 1661 | swp = (struct switch_stack *)(tsk->thread.ksp + 16); |
1661 | ptp = task_pt_regs(tsk); | 1662 | ptp = task_pt_regs(tsk); |
1662 | tos = (tsk->thread.fsr >> 11) & 7; | 1663 | tos = (tsk->thread.fsr >> 11) & 7; |
1663 | for (i = 0; i < 8; i++) | 1664 | for (i = 0; i < 8; i++) |
1664 | get_fpreg(i, &save->st_space[i], ptp, swp, tos); | 1665 | get_fpreg(i, &save->st_space[i], ptp, swp, tos); |
1665 | return 0; | 1666 | return 0; |
1666 | } | 1667 | } |
1667 | 1668 | ||
1668 | int | 1669 | int |
1669 | save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save) | 1670 | save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save) |
1670 | { | 1671 | { |
1671 | struct switch_stack *swp; | 1672 | struct switch_stack *swp; |
1672 | struct pt_regs *ptp; | 1673 | struct pt_regs *ptp; |
1673 | int i, tos; | 1674 | int i, tos; |
1674 | unsigned long mxcsr=0; | 1675 | unsigned long mxcsr=0; |
1675 | unsigned long num128[2]; | 1676 | unsigned long num128[2]; |
1676 | 1677 | ||
1677 | if (!access_ok(VERIFY_WRITE, save, sizeof(*save))) | 1678 | if (!access_ok(VERIFY_WRITE, save, sizeof(*save))) |
1678 | return -EFAULT; | 1679 | return -EFAULT; |
1679 | 1680 | ||
1680 | __put_user(tsk->thread.fcr & 0xffff, &save->cwd); | 1681 | __put_user(tsk->thread.fcr & 0xffff, &save->cwd); |
1681 | __put_user(tsk->thread.fsr & 0xffff, &save->swd); | 1682 | __put_user(tsk->thread.fsr & 0xffff, &save->swd); |
1682 | __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd); | 1683 | __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd); |
1683 | __put_user(tsk->thread.fir, &save->fip); | 1684 | __put_user(tsk->thread.fir, &save->fip); |
1684 | __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs); | 1685 | __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs); |
1685 | __put_user(tsk->thread.fdr, &save->foo); | 1686 | __put_user(tsk->thread.fdr, &save->foo); |
1686 | __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos); | 1687 | __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos); |
1687 | 1688 | ||
1688 | /* | 1689 | /* |
1689 | * Stack frames start with 16-bytes of temp space | 1690 | * Stack frames start with 16-bytes of temp space |
1690 | */ | 1691 | */ |
1691 | swp = (struct switch_stack *)(tsk->thread.ksp + 16); | 1692 | swp = (struct switch_stack *)(tsk->thread.ksp + 16); |
1692 | ptp = task_pt_regs(tsk); | 1693 | ptp = task_pt_regs(tsk); |
1693 | tos = (tsk->thread.fsr >> 11) & 7; | 1694 | tos = (tsk->thread.fsr >> 11) & 7; |
1694 | for (i = 0; i < 8; i++) | 1695 | for (i = 0; i < 8; i++) |
1695 | put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos); | 1696 | put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos); |
1696 | 1697 | ||
1697 | mxcsr = ((tsk->thread.fcr>>32) & 0xff80) | ((tsk->thread.fsr>>32) & 0x3f); | 1698 | mxcsr = ((tsk->thread.fcr>>32) & 0xff80) | ((tsk->thread.fsr>>32) & 0x3f); |
1698 | __put_user(mxcsr & 0xffff, &save->mxcsr); | 1699 | __put_user(mxcsr & 0xffff, &save->mxcsr); |
1699 | for (i = 0; i < 8; i++) { | 1700 | for (i = 0; i < 8; i++) { |
1700 | memcpy(&(num128[0]), &(swp->f16) + i*2, sizeof(unsigned long)); | 1701 | memcpy(&(num128[0]), &(swp->f16) + i*2, sizeof(unsigned long)); |
1701 | memcpy(&(num128[1]), &(swp->f17) + i*2, sizeof(unsigned long)); | 1702 | memcpy(&(num128[1]), &(swp->f17) + i*2, sizeof(unsigned long)); |
1702 | copy_to_user(&save->xmm_space[0] + 4*i, num128, sizeof(struct _xmmreg_ia32)); | 1703 | copy_to_user(&save->xmm_space[0] + 4*i, num128, sizeof(struct _xmmreg_ia32)); |
1703 | } | 1704 | } |
1704 | return 0; | 1705 | return 0; |
1705 | } | 1706 | } |
1706 | 1707 | ||
1707 | static int | 1708 | static int |
1708 | restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save) | 1709 | restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save) |
1709 | { | 1710 | { |
1710 | struct switch_stack *swp; | 1711 | struct switch_stack *swp; |
1711 | struct pt_regs *ptp; | 1712 | struct pt_regs *ptp; |
1712 | int i, tos; | 1713 | int i, tos; |
1713 | unsigned int fsrlo, fsrhi, num32; | 1714 | unsigned int fsrlo, fsrhi, num32; |
1714 | int mxcsr; | 1715 | int mxcsr; |
1715 | unsigned long num64; | 1716 | unsigned long num64; |
1716 | unsigned long num128[2]; | 1717 | unsigned long num128[2]; |
1717 | 1718 | ||
1718 | if (!access_ok(VERIFY_READ, save, sizeof(*save))) | 1719 | if (!access_ok(VERIFY_READ, save, sizeof(*save))) |
1719 | return(-EFAULT); | 1720 | return(-EFAULT); |
1720 | 1721 | ||
1721 | __get_user(num32, (unsigned int __user *)&save->cwd); | 1722 | __get_user(num32, (unsigned int __user *)&save->cwd); |
1722 | tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f); | 1723 | tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f); |
1723 | __get_user(fsrlo, (unsigned int __user *)&save->swd); | 1724 | __get_user(fsrlo, (unsigned int __user *)&save->swd); |
1724 | __get_user(fsrhi, (unsigned int __user *)&save->twd); | 1725 | __get_user(fsrhi, (unsigned int __user *)&save->twd); |
1725 | num32 = (fsrhi << 16) | fsrlo; | 1726 | num32 = (fsrhi << 16) | fsrlo; |
1726 | tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32; | 1727 | tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32; |
1727 | __get_user(num32, (unsigned int __user *)&save->fip); | 1728 | __get_user(num32, (unsigned int __user *)&save->fip); |
1728 | tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32; | 1729 | tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32; |
1729 | __get_user(num32, (unsigned int __user *)&save->foo); | 1730 | __get_user(num32, (unsigned int __user *)&save->foo); |
1730 | tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32; | 1731 | tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32; |
1731 | 1732 | ||
1732 | /* | 1733 | /* |
1733 | * Stack frames start with 16-bytes of temp space | 1734 | * Stack frames start with 16-bytes of temp space |
1734 | */ | 1735 | */ |
1735 | swp = (struct switch_stack *)(tsk->thread.ksp + 16); | 1736 | swp = (struct switch_stack *)(tsk->thread.ksp + 16); |
1736 | ptp = task_pt_regs(tsk); | 1737 | ptp = task_pt_regs(tsk); |
1737 | tos = (tsk->thread.fsr >> 11) & 7; | 1738 | tos = (tsk->thread.fsr >> 11) & 7; |
1738 | for (i = 0; i < 8; i++) | 1739 | for (i = 0; i < 8; i++) |
1739 | get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos); | 1740 | get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos); |
1740 | 1741 | ||
1741 | __get_user(mxcsr, (unsigned int __user *)&save->mxcsr); | 1742 | __get_user(mxcsr, (unsigned int __user *)&save->mxcsr); |
1742 | num64 = mxcsr & 0xff10; | 1743 | num64 = mxcsr & 0xff10; |
1743 | tsk->thread.fcr = (tsk->thread.fcr & (~0xff1000000000UL)) | (num64<<32); | 1744 | tsk->thread.fcr = (tsk->thread.fcr & (~0xff1000000000UL)) | (num64<<32); |
1744 | num64 = mxcsr & 0x3f; | 1745 | num64 = mxcsr & 0x3f; |
1745 | tsk->thread.fsr = (tsk->thread.fsr & (~0x3f00000000UL)) | (num64<<32); | 1746 | tsk->thread.fsr = (tsk->thread.fsr & (~0x3f00000000UL)) | (num64<<32); |
1746 | 1747 | ||
1747 | for (i = 0; i < 8; i++) { | 1748 | for (i = 0; i < 8; i++) { |
1748 | copy_from_user(num128, &save->xmm_space[0] + 4*i, sizeof(struct _xmmreg_ia32)); | 1749 | copy_from_user(num128, &save->xmm_space[0] + 4*i, sizeof(struct _xmmreg_ia32)); |
1749 | memcpy(&(swp->f16) + i*2, &(num128[0]), sizeof(unsigned long)); | 1750 | memcpy(&(swp->f16) + i*2, &(num128[0]), sizeof(unsigned long)); |
1750 | memcpy(&(swp->f17) + i*2, &(num128[1]), sizeof(unsigned long)); | 1751 | memcpy(&(swp->f17) + i*2, &(num128[1]), sizeof(unsigned long)); |
1751 | } | 1752 | } |
1752 | return 0; | 1753 | return 0; |
1753 | } | 1754 | } |
1754 | 1755 | ||
1755 | asmlinkage long | 1756 | asmlinkage long |
1756 | sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data) | 1757 | sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data) |
1757 | { | 1758 | { |
1758 | struct task_struct *child; | 1759 | struct task_struct *child; |
1759 | unsigned int value, tmp; | 1760 | unsigned int value, tmp; |
1760 | long i, ret; | 1761 | long i, ret; |
1761 | 1762 | ||
1762 | lock_kernel(); | 1763 | lock_kernel(); |
1763 | if (request == PTRACE_TRACEME) { | 1764 | if (request == PTRACE_TRACEME) { |
1764 | ret = ptrace_traceme(); | 1765 | ret = ptrace_traceme(); |
1765 | goto out; | 1766 | goto out; |
1766 | } | 1767 | } |
1767 | 1768 | ||
1768 | child = ptrace_get_task_struct(pid); | 1769 | child = ptrace_get_task_struct(pid); |
1769 | if (IS_ERR(child)) { | 1770 | if (IS_ERR(child)) { |
1770 | ret = PTR_ERR(child); | 1771 | ret = PTR_ERR(child); |
1771 | goto out; | 1772 | goto out; |
1772 | } | 1773 | } |
1773 | 1774 | ||
1774 | if (request == PTRACE_ATTACH) { | 1775 | if (request == PTRACE_ATTACH) { |
1775 | ret = sys_ptrace(request, pid, addr, data); | 1776 | ret = sys_ptrace(request, pid, addr, data); |
1776 | goto out_tsk; | 1777 | goto out_tsk; |
1777 | } | 1778 | } |
1778 | 1779 | ||
1779 | ret = ptrace_check_attach(child, request == PTRACE_KILL); | 1780 | ret = ptrace_check_attach(child, request == PTRACE_KILL); |
1780 | if (ret < 0) | 1781 | if (ret < 0) |
1781 | goto out_tsk; | 1782 | goto out_tsk; |
1782 | 1783 | ||
1783 | switch (request) { | 1784 | switch (request) { |
1784 | case PTRACE_PEEKTEXT: | 1785 | case PTRACE_PEEKTEXT: |
1785 | case PTRACE_PEEKDATA: /* read word at location addr */ | 1786 | case PTRACE_PEEKDATA: /* read word at location addr */ |
1786 | ret = ia32_peek(child, addr, &value); | 1787 | ret = ia32_peek(child, addr, &value); |
1787 | if (ret == 0) | 1788 | if (ret == 0) |
1788 | ret = put_user(value, (unsigned int __user *) compat_ptr(data)); | 1789 | ret = put_user(value, (unsigned int __user *) compat_ptr(data)); |
1789 | else | 1790 | else |
1790 | ret = -EIO; | 1791 | ret = -EIO; |
1791 | goto out_tsk; | 1792 | goto out_tsk; |
1792 | 1793 | ||
1793 | case PTRACE_POKETEXT: | 1794 | case PTRACE_POKETEXT: |
1794 | case PTRACE_POKEDATA: /* write the word at location addr */ | 1795 | case PTRACE_POKEDATA: /* write the word at location addr */ |
1795 | ret = ia32_poke(child, addr, data); | 1796 | ret = ia32_poke(child, addr, data); |
1796 | goto out_tsk; | 1797 | goto out_tsk; |
1797 | 1798 | ||
1798 | case PTRACE_PEEKUSR: /* read word at addr in USER area */ | 1799 | case PTRACE_PEEKUSR: /* read word at addr in USER area */ |
1799 | ret = -EIO; | 1800 | ret = -EIO; |
1800 | if ((addr & 3) || addr > 17*sizeof(int)) | 1801 | if ((addr & 3) || addr > 17*sizeof(int)) |
1801 | break; | 1802 | break; |
1802 | 1803 | ||
1803 | tmp = getreg(child, addr); | 1804 | tmp = getreg(child, addr); |
1804 | if (!put_user(tmp, (unsigned int __user *) compat_ptr(data))) | 1805 | if (!put_user(tmp, (unsigned int __user *) compat_ptr(data))) |
1805 | ret = 0; | 1806 | ret = 0; |
1806 | break; | 1807 | break; |
1807 | 1808 | ||
1808 | case PTRACE_POKEUSR: /* write word at addr in USER area */ | 1809 | case PTRACE_POKEUSR: /* write word at addr in USER area */ |
1809 | ret = -EIO; | 1810 | ret = -EIO; |
1810 | if ((addr & 3) || addr > 17*sizeof(int)) | 1811 | if ((addr & 3) || addr > 17*sizeof(int)) |
1811 | break; | 1812 | break; |
1812 | 1813 | ||
1813 | putreg(child, addr, data); | 1814 | putreg(child, addr, data); |
1814 | ret = 0; | 1815 | ret = 0; |
1815 | break; | 1816 | break; |
1816 | 1817 | ||
1817 | case IA32_PTRACE_GETREGS: | 1818 | case IA32_PTRACE_GETREGS: |
1818 | if (!access_ok(VERIFY_WRITE, compat_ptr(data), 17*sizeof(int))) { | 1819 | if (!access_ok(VERIFY_WRITE, compat_ptr(data), 17*sizeof(int))) { |
1819 | ret = -EIO; | 1820 | ret = -EIO; |
1820 | break; | 1821 | break; |
1821 | } | 1822 | } |
1822 | for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) { | 1823 | for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) { |
1823 | put_user(getreg(child, i), (unsigned int __user *) compat_ptr(data)); | 1824 | put_user(getreg(child, i), (unsigned int __user *) compat_ptr(data)); |
1824 | data += sizeof(int); | 1825 | data += sizeof(int); |
1825 | } | 1826 | } |
1826 | ret = 0; | 1827 | ret = 0; |
1827 | break; | 1828 | break; |
1828 | 1829 | ||
1829 | case IA32_PTRACE_SETREGS: | 1830 | case IA32_PTRACE_SETREGS: |
1830 | if (!access_ok(VERIFY_READ, compat_ptr(data), 17*sizeof(int))) { | 1831 | if (!access_ok(VERIFY_READ, compat_ptr(data), 17*sizeof(int))) { |
1831 | ret = -EIO; | 1832 | ret = -EIO; |
1832 | break; | 1833 | break; |
1833 | } | 1834 | } |
1834 | for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) { | 1835 | for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) { |
1835 | get_user(tmp, (unsigned int __user *) compat_ptr(data)); | 1836 | get_user(tmp, (unsigned int __user *) compat_ptr(data)); |
1836 | putreg(child, i, tmp); | 1837 | putreg(child, i, tmp); |
1837 | data += sizeof(int); | 1838 | data += sizeof(int); |
1838 | } | 1839 | } |
1839 | ret = 0; | 1840 | ret = 0; |
1840 | break; | 1841 | break; |
1841 | 1842 | ||
1842 | case IA32_PTRACE_GETFPREGS: | 1843 | case IA32_PTRACE_GETFPREGS: |
1843 | ret = save_ia32_fpstate(child, (struct ia32_user_i387_struct __user *) | 1844 | ret = save_ia32_fpstate(child, (struct ia32_user_i387_struct __user *) |
1844 | compat_ptr(data)); | 1845 | compat_ptr(data)); |
1845 | break; | 1846 | break; |
1846 | 1847 | ||
1847 | case IA32_PTRACE_GETFPXREGS: | 1848 | case IA32_PTRACE_GETFPXREGS: |
1848 | ret = save_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *) | 1849 | ret = save_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *) |
1849 | compat_ptr(data)); | 1850 | compat_ptr(data)); |
1850 | break; | 1851 | break; |
1851 | 1852 | ||
1852 | case IA32_PTRACE_SETFPREGS: | 1853 | case IA32_PTRACE_SETFPREGS: |
1853 | ret = restore_ia32_fpstate(child, (struct ia32_user_i387_struct __user *) | 1854 | ret = restore_ia32_fpstate(child, (struct ia32_user_i387_struct __user *) |
1854 | compat_ptr(data)); | 1855 | compat_ptr(data)); |
1855 | break; | 1856 | break; |
1856 | 1857 | ||
1857 | case IA32_PTRACE_SETFPXREGS: | 1858 | case IA32_PTRACE_SETFPXREGS: |
1858 | ret = restore_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *) | 1859 | ret = restore_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *) |
1859 | compat_ptr(data)); | 1860 | compat_ptr(data)); |
1860 | break; | 1861 | break; |
1861 | 1862 | ||
1862 | case PTRACE_GETEVENTMSG: | 1863 | case PTRACE_GETEVENTMSG: |
1863 | ret = put_user(child->ptrace_message, (unsigned int __user *) compat_ptr(data)); | 1864 | ret = put_user(child->ptrace_message, (unsigned int __user *) compat_ptr(data)); |
1864 | break; | 1865 | break; |
1865 | 1866 | ||
1866 | case PTRACE_SYSCALL: /* continue, stop after next syscall */ | 1867 | case PTRACE_SYSCALL: /* continue, stop after next syscall */ |
1867 | case PTRACE_CONT: /* restart after signal. */ | 1868 | case PTRACE_CONT: /* restart after signal. */ |
1868 | case PTRACE_KILL: | 1869 | case PTRACE_KILL: |
1869 | case PTRACE_SINGLESTEP: /* execute chile for one instruction */ | 1870 | case PTRACE_SINGLESTEP: /* execute chile for one instruction */ |
1870 | case PTRACE_DETACH: /* detach a process */ | 1871 | case PTRACE_DETACH: /* detach a process */ |
1871 | ret = sys_ptrace(request, pid, addr, data); | 1872 | ret = sys_ptrace(request, pid, addr, data); |
1872 | break; | 1873 | break; |
1873 | 1874 | ||
1874 | default: | 1875 | default: |
1875 | ret = ptrace_request(child, request, addr, data); | 1876 | ret = ptrace_request(child, request, addr, data); |
1876 | break; | 1877 | break; |
1877 | 1878 | ||
1878 | } | 1879 | } |
1879 | out_tsk: | 1880 | out_tsk: |
1880 | put_task_struct(child); | 1881 | put_task_struct(child); |
1881 | out: | 1882 | out: |
1882 | unlock_kernel(); | 1883 | unlock_kernel(); |
1883 | return ret; | 1884 | return ret; |
1884 | } | 1885 | } |
1885 | 1886 | ||
1886 | typedef struct { | 1887 | typedef struct { |
1887 | unsigned int ss_sp; | 1888 | unsigned int ss_sp; |
1888 | unsigned int ss_flags; | 1889 | unsigned int ss_flags; |
1889 | unsigned int ss_size; | 1890 | unsigned int ss_size; |
1890 | } ia32_stack_t; | 1891 | } ia32_stack_t; |
1891 | 1892 | ||
1892 | asmlinkage long | 1893 | asmlinkage long |
1893 | sys32_sigaltstack (ia32_stack_t __user *uss32, ia32_stack_t __user *uoss32, | 1894 | sys32_sigaltstack (ia32_stack_t __user *uss32, ia32_stack_t __user *uoss32, |
1894 | long arg2, long arg3, long arg4, long arg5, long arg6, | 1895 | long arg2, long arg3, long arg4, long arg5, long arg6, |
1895 | long arg7, struct pt_regs pt) | 1896 | long arg7, struct pt_regs pt) |
1896 | { | 1897 | { |
1897 | stack_t uss, uoss; | 1898 | stack_t uss, uoss; |
1898 | ia32_stack_t buf32; | 1899 | ia32_stack_t buf32; |
1899 | int ret; | 1900 | int ret; |
1900 | mm_segment_t old_fs = get_fs(); | 1901 | mm_segment_t old_fs = get_fs(); |
1901 | 1902 | ||
1902 | if (uss32) { | 1903 | if (uss32) { |
1903 | if (copy_from_user(&buf32, uss32, sizeof(ia32_stack_t))) | 1904 | if (copy_from_user(&buf32, uss32, sizeof(ia32_stack_t))) |
1904 | return -EFAULT; | 1905 | return -EFAULT; |
1905 | uss.ss_sp = (void __user *) (long) buf32.ss_sp; | 1906 | uss.ss_sp = (void __user *) (long) buf32.ss_sp; |
1906 | uss.ss_flags = buf32.ss_flags; | 1907 | uss.ss_flags = buf32.ss_flags; |
1907 | /* MINSIGSTKSZ is different for ia32 vs ia64. We lie here to pass the | 1908 | /* MINSIGSTKSZ is different for ia32 vs ia64. We lie here to pass the |
1908 | check and set it to the user requested value later */ | 1909 | check and set it to the user requested value later */ |
1909 | if ((buf32.ss_flags != SS_DISABLE) && (buf32.ss_size < MINSIGSTKSZ_IA32)) { | 1910 | if ((buf32.ss_flags != SS_DISABLE) && (buf32.ss_size < MINSIGSTKSZ_IA32)) { |
1910 | ret = -ENOMEM; | 1911 | ret = -ENOMEM; |
1911 | goto out; | 1912 | goto out; |
1912 | } | 1913 | } |
1913 | uss.ss_size = MINSIGSTKSZ; | 1914 | uss.ss_size = MINSIGSTKSZ; |
1914 | } | 1915 | } |
1915 | set_fs(KERNEL_DS); | 1916 | set_fs(KERNEL_DS); |
1916 | ret = do_sigaltstack(uss32 ? (stack_t __user *) &uss : NULL, | 1917 | ret = do_sigaltstack(uss32 ? (stack_t __user *) &uss : NULL, |
1917 | (stack_t __user *) &uoss, pt.r12); | 1918 | (stack_t __user *) &uoss, pt.r12); |
1918 | current->sas_ss_size = buf32.ss_size; | 1919 | current->sas_ss_size = buf32.ss_size; |
1919 | set_fs(old_fs); | 1920 | set_fs(old_fs); |
1920 | out: | 1921 | out: |
1921 | if (ret < 0) | 1922 | if (ret < 0) |
1922 | return(ret); | 1923 | return(ret); |
1923 | if (uoss32) { | 1924 | if (uoss32) { |
1924 | buf32.ss_sp = (long __user) uoss.ss_sp; | 1925 | buf32.ss_sp = (long __user) uoss.ss_sp; |
1925 | buf32.ss_flags = uoss.ss_flags; | 1926 | buf32.ss_flags = uoss.ss_flags; |
1926 | buf32.ss_size = uoss.ss_size; | 1927 | buf32.ss_size = uoss.ss_size; |
1927 | if (copy_to_user(uoss32, &buf32, sizeof(ia32_stack_t))) | 1928 | if (copy_to_user(uoss32, &buf32, sizeof(ia32_stack_t))) |
1928 | return -EFAULT; | 1929 | return -EFAULT; |
1929 | } | 1930 | } |
1930 | return ret; | 1931 | return ret; |
1931 | } | 1932 | } |
1932 | 1933 | ||
1933 | asmlinkage int | 1934 | asmlinkage int |
1934 | sys32_pause (void) | 1935 | sys32_pause (void) |
1935 | { | 1936 | { |
1936 | current->state = TASK_INTERRUPTIBLE; | 1937 | current->state = TASK_INTERRUPTIBLE; |
1937 | schedule(); | 1938 | schedule(); |
1938 | return -ERESTARTNOHAND; | 1939 | return -ERESTARTNOHAND; |
1939 | } | 1940 | } |
1940 | 1941 | ||
1941 | asmlinkage int | 1942 | asmlinkage int |
1942 | sys32_msync (unsigned int start, unsigned int len, int flags) | 1943 | sys32_msync (unsigned int start, unsigned int len, int flags) |
1943 | { | 1944 | { |
1944 | unsigned int addr; | 1945 | unsigned int addr; |
1945 | 1946 | ||
1946 | if (OFFSET4K(start)) | 1947 | if (OFFSET4K(start)) |
1947 | return -EINVAL; | 1948 | return -EINVAL; |
1948 | addr = PAGE_START(start); | 1949 | addr = PAGE_START(start); |
1949 | return sys_msync(addr, len + (start - addr), flags); | 1950 | return sys_msync(addr, len + (start - addr), flags); |
1950 | } | 1951 | } |
1951 | 1952 | ||
1952 | struct sysctl32 { | 1953 | struct sysctl32 { |
1953 | unsigned int name; | 1954 | unsigned int name; |
1954 | int nlen; | 1955 | int nlen; |
1955 | unsigned int oldval; | 1956 | unsigned int oldval; |
1956 | unsigned int oldlenp; | 1957 | unsigned int oldlenp; |
1957 | unsigned int newval; | 1958 | unsigned int newval; |
1958 | unsigned int newlen; | 1959 | unsigned int newlen; |
1959 | unsigned int __unused[4]; | 1960 | unsigned int __unused[4]; |
1960 | }; | 1961 | }; |
1961 | 1962 | ||
1962 | #ifdef CONFIG_SYSCTL_SYSCALL | 1963 | #ifdef CONFIG_SYSCTL_SYSCALL |
1963 | asmlinkage long | 1964 | asmlinkage long |
1964 | sys32_sysctl (struct sysctl32 __user *args) | 1965 | sys32_sysctl (struct sysctl32 __user *args) |
1965 | { | 1966 | { |
1966 | struct sysctl32 a32; | 1967 | struct sysctl32 a32; |
1967 | mm_segment_t old_fs = get_fs (); | 1968 | mm_segment_t old_fs = get_fs (); |
1968 | void __user *oldvalp, *newvalp; | 1969 | void __user *oldvalp, *newvalp; |
1969 | size_t oldlen; | 1970 | size_t oldlen; |
1970 | int __user *namep; | 1971 | int __user *namep; |
1971 | long ret; | 1972 | long ret; |
1972 | 1973 | ||
1973 | if (copy_from_user(&a32, args, sizeof(a32))) | 1974 | if (copy_from_user(&a32, args, sizeof(a32))) |
1974 | return -EFAULT; | 1975 | return -EFAULT; |
1975 | 1976 | ||
1976 | /* | 1977 | /* |
1977 | * We need to pre-validate these because we have to disable address checking | 1978 | * We need to pre-validate these because we have to disable address checking |
1978 | * before calling do_sysctl() because of OLDLEN but we can't run the risk of the | 1979 | * before calling do_sysctl() because of OLDLEN but we can't run the risk of the |
1979 | * user specifying bad addresses here. Well, since we're dealing with 32 bit | 1980 | * user specifying bad addresses here. Well, since we're dealing with 32 bit |
1980 | * addresses, we KNOW that access_ok() will always succeed, so this is an | 1981 | * addresses, we KNOW that access_ok() will always succeed, so this is an |
1981 | * expensive NOP, but so what... | 1982 | * expensive NOP, but so what... |
1982 | */ | 1983 | */ |
1983 | namep = (int __user *) compat_ptr(a32.name); | 1984 | namep = (int __user *) compat_ptr(a32.name); |
1984 | oldvalp = compat_ptr(a32.oldval); | 1985 | oldvalp = compat_ptr(a32.oldval); |
1985 | newvalp = compat_ptr(a32.newval); | 1986 | newvalp = compat_ptr(a32.newval); |
1986 | 1987 | ||
1987 | if ((oldvalp && get_user(oldlen, (int __user *) compat_ptr(a32.oldlenp))) | 1988 | if ((oldvalp && get_user(oldlen, (int __user *) compat_ptr(a32.oldlenp))) |
1988 | || !access_ok(VERIFY_WRITE, namep, 0) | 1989 | || !access_ok(VERIFY_WRITE, namep, 0) |
1989 | || !access_ok(VERIFY_WRITE, oldvalp, 0) | 1990 | || !access_ok(VERIFY_WRITE, oldvalp, 0) |
1990 | || !access_ok(VERIFY_WRITE, newvalp, 0)) | 1991 | || !access_ok(VERIFY_WRITE, newvalp, 0)) |
1991 | return -EFAULT; | 1992 | return -EFAULT; |
1992 | 1993 | ||
1993 | set_fs(KERNEL_DS); | 1994 | set_fs(KERNEL_DS); |
1994 | lock_kernel(); | 1995 | lock_kernel(); |
1995 | ret = do_sysctl(namep, a32.nlen, oldvalp, (size_t __user *) &oldlen, | 1996 | ret = do_sysctl(namep, a32.nlen, oldvalp, (size_t __user *) &oldlen, |
1996 | newvalp, (size_t) a32.newlen); | 1997 | newvalp, (size_t) a32.newlen); |
1997 | unlock_kernel(); | 1998 | unlock_kernel(); |
1998 | set_fs(old_fs); | 1999 | set_fs(old_fs); |
1999 | 2000 | ||
2000 | if (oldvalp && put_user (oldlen, (int __user *) compat_ptr(a32.oldlenp))) | 2001 | if (oldvalp && put_user (oldlen, (int __user *) compat_ptr(a32.oldlenp))) |
2001 | return -EFAULT; | 2002 | return -EFAULT; |
2002 | 2003 | ||
2003 | return ret; | 2004 | return ret; |
2004 | } | 2005 | } |
2005 | #endif | 2006 | #endif |
2006 | 2007 | ||
2007 | asmlinkage long | 2008 | asmlinkage long |
2008 | sys32_newuname (struct new_utsname __user *name) | 2009 | sys32_newuname (struct new_utsname __user *name) |
2009 | { | 2010 | { |
2010 | int ret = sys_newuname(name); | 2011 | int ret = sys_newuname(name); |
2011 | 2012 | ||
2012 | if (!ret) | 2013 | if (!ret) |
2013 | if (copy_to_user(name->machine, "i686\0\0\0", 8)) | 2014 | if (copy_to_user(name->machine, "i686\0\0\0", 8)) |
2014 | ret = -EFAULT; | 2015 | ret = -EFAULT; |
2015 | return ret; | 2016 | return ret; |
2016 | } | 2017 | } |
2017 | 2018 | ||
2018 | asmlinkage long | 2019 | asmlinkage long |
2019 | sys32_getresuid16 (u16 __user *ruid, u16 __user *euid, u16 __user *suid) | 2020 | sys32_getresuid16 (u16 __user *ruid, u16 __user *euid, u16 __user *suid) |
2020 | { | 2021 | { |
2021 | uid_t a, b, c; | 2022 | uid_t a, b, c; |
2022 | int ret; | 2023 | int ret; |
2023 | mm_segment_t old_fs = get_fs(); | 2024 | mm_segment_t old_fs = get_fs(); |
2024 | 2025 | ||
2025 | set_fs(KERNEL_DS); | 2026 | set_fs(KERNEL_DS); |
2026 | ret = sys_getresuid((uid_t __user *) &a, (uid_t __user *) &b, (uid_t __user *) &c); | 2027 | ret = sys_getresuid((uid_t __user *) &a, (uid_t __user *) &b, (uid_t __user *) &c); |
2027 | set_fs(old_fs); | 2028 | set_fs(old_fs); |
2028 | 2029 | ||
2029 | if (put_user(a, ruid) || put_user(b, euid) || put_user(c, suid)) | 2030 | if (put_user(a, ruid) || put_user(b, euid) || put_user(c, suid)) |
2030 | return -EFAULT; | 2031 | return -EFAULT; |
2031 | return ret; | 2032 | return ret; |
2032 | } | 2033 | } |
2033 | 2034 | ||
2034 | asmlinkage long | 2035 | asmlinkage long |
2035 | sys32_getresgid16 (u16 __user *rgid, u16 __user *egid, u16 __user *sgid) | 2036 | sys32_getresgid16 (u16 __user *rgid, u16 __user *egid, u16 __user *sgid) |
2036 | { | 2037 | { |
2037 | gid_t a, b, c; | 2038 | gid_t a, b, c; |
2038 | int ret; | 2039 | int ret; |
2039 | mm_segment_t old_fs = get_fs(); | 2040 | mm_segment_t old_fs = get_fs(); |
2040 | 2041 | ||
2041 | set_fs(KERNEL_DS); | 2042 | set_fs(KERNEL_DS); |
2042 | ret = sys_getresgid((gid_t __user *) &a, (gid_t __user *) &b, (gid_t __user *) &c); | 2043 | ret = sys_getresgid((gid_t __user *) &a, (gid_t __user *) &b, (gid_t __user *) &c); |
2043 | set_fs(old_fs); | 2044 | set_fs(old_fs); |
2044 | 2045 | ||
2045 | if (ret) | 2046 | if (ret) |
2046 | return ret; | 2047 | return ret; |
2047 | 2048 | ||
2048 | return put_user(a, rgid) | put_user(b, egid) | put_user(c, sgid); | 2049 | return put_user(a, rgid) | put_user(b, egid) | put_user(c, sgid); |
2049 | } | 2050 | } |
2050 | 2051 | ||
2051 | asmlinkage long | 2052 | asmlinkage long |
2052 | sys32_lseek (unsigned int fd, int offset, unsigned int whence) | 2053 | sys32_lseek (unsigned int fd, int offset, unsigned int whence) |
2053 | { | 2054 | { |
2054 | /* Sign-extension of "offset" is important here... */ | 2055 | /* Sign-extension of "offset" is important here... */ |
2055 | return sys_lseek(fd, offset, whence); | 2056 | return sys_lseek(fd, offset, whence); |
2056 | } | 2057 | } |
2057 | 2058 | ||
2058 | static int | 2059 | static int |
2059 | groups16_to_user(short __user *grouplist, struct group_info *group_info) | 2060 | groups16_to_user(short __user *grouplist, struct group_info *group_info) |
2060 | { | 2061 | { |
2061 | int i; | 2062 | int i; |
2062 | short group; | 2063 | short group; |
2063 | 2064 | ||
2064 | for (i = 0; i < group_info->ngroups; i++) { | 2065 | for (i = 0; i < group_info->ngroups; i++) { |
2065 | group = (short)GROUP_AT(group_info, i); | 2066 | group = (short)GROUP_AT(group_info, i); |
2066 | if (put_user(group, grouplist+i)) | 2067 | if (put_user(group, grouplist+i)) |
2067 | return -EFAULT; | 2068 | return -EFAULT; |
2068 | } | 2069 | } |
2069 | 2070 | ||
2070 | return 0; | 2071 | return 0; |
2071 | } | 2072 | } |
2072 | 2073 | ||
2073 | static int | 2074 | static int |
2074 | groups16_from_user(struct group_info *group_info, short __user *grouplist) | 2075 | groups16_from_user(struct group_info *group_info, short __user *grouplist) |
2075 | { | 2076 | { |
2076 | int i; | 2077 | int i; |
2077 | short group; | 2078 | short group; |
2078 | 2079 | ||
2079 | for (i = 0; i < group_info->ngroups; i++) { | 2080 | for (i = 0; i < group_info->ngroups; i++) { |
2080 | if (get_user(group, grouplist+i)) | 2081 | if (get_user(group, grouplist+i)) |
2081 | return -EFAULT; | 2082 | return -EFAULT; |
2082 | GROUP_AT(group_info, i) = (gid_t)group; | 2083 | GROUP_AT(group_info, i) = (gid_t)group; |
2083 | } | 2084 | } |
2084 | 2085 | ||
2085 | return 0; | 2086 | return 0; |
2086 | } | 2087 | } |
2087 | 2088 | ||
2088 | asmlinkage long | 2089 | asmlinkage long |
2089 | sys32_getgroups16 (int gidsetsize, short __user *grouplist) | 2090 | sys32_getgroups16 (int gidsetsize, short __user *grouplist) |
2090 | { | 2091 | { |
2091 | int i; | 2092 | int i; |
2092 | 2093 | ||
2093 | if (gidsetsize < 0) | 2094 | if (gidsetsize < 0) |
2094 | return -EINVAL; | 2095 | return -EINVAL; |
2095 | 2096 | ||
2096 | get_group_info(current->group_info); | 2097 | get_group_info(current->group_info); |
2097 | i = current->group_info->ngroups; | 2098 | i = current->group_info->ngroups; |
2098 | if (gidsetsize) { | 2099 | if (gidsetsize) { |
2099 | if (i > gidsetsize) { | 2100 | if (i > gidsetsize) { |
2100 | i = -EINVAL; | 2101 | i = -EINVAL; |
2101 | goto out; | 2102 | goto out; |
2102 | } | 2103 | } |
2103 | if (groups16_to_user(grouplist, current->group_info)) { | 2104 | if (groups16_to_user(grouplist, current->group_info)) { |
2104 | i = -EFAULT; | 2105 | i = -EFAULT; |
2105 | goto out; | 2106 | goto out; |
2106 | } | 2107 | } |
2107 | } | 2108 | } |
2108 | out: | 2109 | out: |
2109 | put_group_info(current->group_info); | 2110 | put_group_info(current->group_info); |
2110 | return i; | 2111 | return i; |
2111 | } | 2112 | } |
2112 | 2113 | ||
2113 | asmlinkage long | 2114 | asmlinkage long |
2114 | sys32_setgroups16 (int gidsetsize, short __user *grouplist) | 2115 | sys32_setgroups16 (int gidsetsize, short __user *grouplist) |
2115 | { | 2116 | { |
2116 | struct group_info *group_info; | 2117 | struct group_info *group_info; |
2117 | int retval; | 2118 | int retval; |
2118 | 2119 | ||
2119 | if (!capable(CAP_SETGID)) | 2120 | if (!capable(CAP_SETGID)) |
2120 | return -EPERM; | 2121 | return -EPERM; |
2121 | if ((unsigned)gidsetsize > NGROUPS_MAX) | 2122 | if ((unsigned)gidsetsize > NGROUPS_MAX) |
2122 | return -EINVAL; | 2123 | return -EINVAL; |
2123 | 2124 | ||
2124 | group_info = groups_alloc(gidsetsize); | 2125 | group_info = groups_alloc(gidsetsize); |
2125 | if (!group_info) | 2126 | if (!group_info) |
2126 | return -ENOMEM; | 2127 | return -ENOMEM; |
2127 | retval = groups16_from_user(group_info, grouplist); | 2128 | retval = groups16_from_user(group_info, grouplist); |
2128 | if (retval) { | 2129 | if (retval) { |
2129 | put_group_info(group_info); | 2130 | put_group_info(group_info); |
2130 | return retval; | 2131 | return retval; |
2131 | } | 2132 | } |
2132 | 2133 | ||
2133 | retval = set_current_groups(group_info); | 2134 | retval = set_current_groups(group_info); |
2134 | put_group_info(group_info); | 2135 | put_group_info(group_info); |
2135 | 2136 | ||
2136 | return retval; | 2137 | return retval; |
2137 | } | 2138 | } |
2138 | 2139 | ||
2139 | asmlinkage long | 2140 | asmlinkage long |
2140 | sys32_truncate64 (unsigned int path, unsigned int len_lo, unsigned int len_hi) | 2141 | sys32_truncate64 (unsigned int path, unsigned int len_lo, unsigned int len_hi) |
2141 | { | 2142 | { |
2142 | return sys_truncate(compat_ptr(path), ((unsigned long) len_hi << 32) | len_lo); | 2143 | return sys_truncate(compat_ptr(path), ((unsigned long) len_hi << 32) | len_lo); |
2143 | } | 2144 | } |
2144 | 2145 | ||
2145 | asmlinkage long | 2146 | asmlinkage long |
2146 | sys32_ftruncate64 (int fd, unsigned int len_lo, unsigned int len_hi) | 2147 | sys32_ftruncate64 (int fd, unsigned int len_lo, unsigned int len_hi) |
2147 | { | 2148 | { |
2148 | return sys_ftruncate(fd, ((unsigned long) len_hi << 32) | len_lo); | 2149 | return sys_ftruncate(fd, ((unsigned long) len_hi << 32) | len_lo); |
2149 | } | 2150 | } |
2150 | 2151 | ||
2151 | static int | 2152 | static int |
2152 | putstat64 (struct stat64 __user *ubuf, struct kstat *kbuf) | 2153 | putstat64 (struct stat64 __user *ubuf, struct kstat *kbuf) |
2153 | { | 2154 | { |
2154 | int err; | 2155 | int err; |
2155 | u64 hdev; | 2156 | u64 hdev; |
2156 | 2157 | ||
2157 | if (clear_user(ubuf, sizeof(*ubuf))) | 2158 | if (clear_user(ubuf, sizeof(*ubuf))) |
2158 | return -EFAULT; | 2159 | return -EFAULT; |
2159 | 2160 | ||
2160 | hdev = huge_encode_dev(kbuf->dev); | 2161 | hdev = huge_encode_dev(kbuf->dev); |
2161 | err = __put_user(hdev, (u32 __user*)&ubuf->st_dev); | 2162 | err = __put_user(hdev, (u32 __user*)&ubuf->st_dev); |
2162 | err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_dev) + 1); | 2163 | err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_dev) + 1); |
2163 | err |= __put_user(kbuf->ino, &ubuf->__st_ino); | 2164 | err |= __put_user(kbuf->ino, &ubuf->__st_ino); |
2164 | err |= __put_user(kbuf->ino, &ubuf->st_ino_lo); | 2165 | err |= __put_user(kbuf->ino, &ubuf->st_ino_lo); |
2165 | err |= __put_user(kbuf->ino >> 32, &ubuf->st_ino_hi); | 2166 | err |= __put_user(kbuf->ino >> 32, &ubuf->st_ino_hi); |
2166 | err |= __put_user(kbuf->mode, &ubuf->st_mode); | 2167 | err |= __put_user(kbuf->mode, &ubuf->st_mode); |
2167 | err |= __put_user(kbuf->nlink, &ubuf->st_nlink); | 2168 | err |= __put_user(kbuf->nlink, &ubuf->st_nlink); |
2168 | err |= __put_user(kbuf->uid, &ubuf->st_uid); | 2169 | err |= __put_user(kbuf->uid, &ubuf->st_uid); |
2169 | err |= __put_user(kbuf->gid, &ubuf->st_gid); | 2170 | err |= __put_user(kbuf->gid, &ubuf->st_gid); |
2170 | hdev = huge_encode_dev(kbuf->rdev); | 2171 | hdev = huge_encode_dev(kbuf->rdev); |
2171 | err = __put_user(hdev, (u32 __user*)&ubuf->st_rdev); | 2172 | err = __put_user(hdev, (u32 __user*)&ubuf->st_rdev); |
2172 | err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_rdev) + 1); | 2173 | err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_rdev) + 1); |
2173 | err |= __put_user(kbuf->size, &ubuf->st_size_lo); | 2174 | err |= __put_user(kbuf->size, &ubuf->st_size_lo); |
2174 | err |= __put_user((kbuf->size >> 32), &ubuf->st_size_hi); | 2175 | err |= __put_user((kbuf->size >> 32), &ubuf->st_size_hi); |
2175 | err |= __put_user(kbuf->atime.tv_sec, &ubuf->st_atime); | 2176 | err |= __put_user(kbuf->atime.tv_sec, &ubuf->st_atime); |
2176 | err |= __put_user(kbuf->atime.tv_nsec, &ubuf->st_atime_nsec); | 2177 | err |= __put_user(kbuf->atime.tv_nsec, &ubuf->st_atime_nsec); |
2177 | err |= __put_user(kbuf->mtime.tv_sec, &ubuf->st_mtime); | 2178 | err |= __put_user(kbuf->mtime.tv_sec, &ubuf->st_mtime); |
2178 | err |= __put_user(kbuf->mtime.tv_nsec, &ubuf->st_mtime_nsec); | 2179 | err |= __put_user(kbuf->mtime.tv_nsec, &ubuf->st_mtime_nsec); |
2179 | err |= __put_user(kbuf->ctime.tv_sec, &ubuf->st_ctime); | 2180 | err |= __put_user(kbuf->ctime.tv_sec, &ubuf->st_ctime); |
2180 | err |= __put_user(kbuf->ctime.tv_nsec, &ubuf->st_ctime_nsec); | 2181 | err |= __put_user(kbuf->ctime.tv_nsec, &ubuf->st_ctime_nsec); |
2181 | err |= __put_user(kbuf->blksize, &ubuf->st_blksize); | 2182 | err |= __put_user(kbuf->blksize, &ubuf->st_blksize); |
2182 | err |= __put_user(kbuf->blocks, &ubuf->st_blocks); | 2183 | err |= __put_user(kbuf->blocks, &ubuf->st_blocks); |
2183 | return err; | 2184 | return err; |
2184 | } | 2185 | } |
2185 | 2186 | ||
2186 | asmlinkage long | 2187 | asmlinkage long |
2187 | sys32_stat64 (char __user *filename, struct stat64 __user *statbuf) | 2188 | sys32_stat64 (char __user *filename, struct stat64 __user *statbuf) |
2188 | { | 2189 | { |
2189 | struct kstat s; | 2190 | struct kstat s; |
2190 | long ret = vfs_stat(filename, &s); | 2191 | long ret = vfs_stat(filename, &s); |
2191 | if (!ret) | 2192 | if (!ret) |
2192 | ret = putstat64(statbuf, &s); | 2193 | ret = putstat64(statbuf, &s); |
2193 | return ret; | 2194 | return ret; |
2194 | } | 2195 | } |
2195 | 2196 | ||
2196 | asmlinkage long | 2197 | asmlinkage long |
2197 | sys32_lstat64 (char __user *filename, struct stat64 __user *statbuf) | 2198 | sys32_lstat64 (char __user *filename, struct stat64 __user *statbuf) |
2198 | { | 2199 | { |
2199 | struct kstat s; | 2200 | struct kstat s; |
2200 | long ret = vfs_lstat(filename, &s); | 2201 | long ret = vfs_lstat(filename, &s); |
2201 | if (!ret) | 2202 | if (!ret) |
2202 | ret = putstat64(statbuf, &s); | 2203 | ret = putstat64(statbuf, &s); |
2203 | return ret; | 2204 | return ret; |
2204 | } | 2205 | } |
2205 | 2206 | ||
2206 | asmlinkage long | 2207 | asmlinkage long |
2207 | sys32_fstat64 (unsigned int fd, struct stat64 __user *statbuf) | 2208 | sys32_fstat64 (unsigned int fd, struct stat64 __user *statbuf) |
2208 | { | 2209 | { |
2209 | struct kstat s; | 2210 | struct kstat s; |
2210 | long ret = vfs_fstat(fd, &s); | 2211 | long ret = vfs_fstat(fd, &s); |
2211 | if (!ret) | 2212 | if (!ret) |
2212 | ret = putstat64(statbuf, &s); | 2213 | ret = putstat64(statbuf, &s); |
2213 | return ret; | 2214 | return ret; |
2214 | } | 2215 | } |
2215 | 2216 | ||
2216 | asmlinkage long | 2217 | asmlinkage long |
2217 | sys32_sched_rr_get_interval (pid_t pid, struct compat_timespec __user *interval) | 2218 | sys32_sched_rr_get_interval (pid_t pid, struct compat_timespec __user *interval) |
2218 | { | 2219 | { |
2219 | mm_segment_t old_fs = get_fs(); | 2220 | mm_segment_t old_fs = get_fs(); |
2220 | struct timespec t; | 2221 | struct timespec t; |
2221 | long ret; | 2222 | long ret; |
2222 | 2223 | ||
2223 | set_fs(KERNEL_DS); | 2224 | set_fs(KERNEL_DS); |
2224 | ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t); | 2225 | ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t); |
2225 | set_fs(old_fs); | 2226 | set_fs(old_fs); |
2226 | if (put_compat_timespec(&t, interval)) | 2227 | if (put_compat_timespec(&t, interval)) |
2227 | return -EFAULT; | 2228 | return -EFAULT; |
2228 | return ret; | 2229 | return ret; |
2229 | } | 2230 | } |
2230 | 2231 | ||
2231 | asmlinkage long | 2232 | asmlinkage long |
2232 | sys32_pread (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi) | 2233 | sys32_pread (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi) |
2233 | { | 2234 | { |
2234 | return sys_pread64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo); | 2235 | return sys_pread64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo); |
2235 | } | 2236 | } |
2236 | 2237 | ||
2237 | asmlinkage long | 2238 | asmlinkage long |
2238 | sys32_pwrite (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi) | 2239 | sys32_pwrite (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi) |
2239 | { | 2240 | { |
2240 | return sys_pwrite64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo); | 2241 | return sys_pwrite64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo); |
2241 | } | 2242 | } |
2242 | 2243 | ||
2243 | asmlinkage long | 2244 | asmlinkage long |
2244 | sys32_sendfile (int out_fd, int in_fd, int __user *offset, unsigned int count) | 2245 | sys32_sendfile (int out_fd, int in_fd, int __user *offset, unsigned int count) |
2245 | { | 2246 | { |
2246 | mm_segment_t old_fs = get_fs(); | 2247 | mm_segment_t old_fs = get_fs(); |
2247 | long ret; | 2248 | long ret; |
2248 | off_t of; | 2249 | off_t of; |
2249 | 2250 | ||
2250 | if (offset && get_user(of, offset)) | 2251 | if (offset && get_user(of, offset)) |
2251 | return -EFAULT; | 2252 | return -EFAULT; |
2252 | 2253 | ||
2253 | set_fs(KERNEL_DS); | 2254 | set_fs(KERNEL_DS); |
2254 | ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *) &of : NULL, count); | 2255 | ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *) &of : NULL, count); |
2255 | set_fs(old_fs); | 2256 | set_fs(old_fs); |
2256 | 2257 | ||
2257 | if (offset && put_user(of, offset)) | 2258 | if (offset && put_user(of, offset)) |
2258 | return -EFAULT; | 2259 | return -EFAULT; |
2259 | 2260 | ||
2260 | return ret; | 2261 | return ret; |
2261 | } | 2262 | } |
2262 | 2263 | ||
2263 | asmlinkage long | 2264 | asmlinkage long |
2264 | sys32_personality (unsigned int personality) | 2265 | sys32_personality (unsigned int personality) |
2265 | { | 2266 | { |
2266 | long ret; | 2267 | long ret; |
2267 | 2268 | ||
2268 | if (current->personality == PER_LINUX32 && personality == PER_LINUX) | 2269 | if (current->personality == PER_LINUX32 && personality == PER_LINUX) |
2269 | personality = PER_LINUX32; | 2270 | personality = PER_LINUX32; |
2270 | ret = sys_personality(personality); | 2271 | ret = sys_personality(personality); |
2271 | if (ret == PER_LINUX32) | 2272 | if (ret == PER_LINUX32) |
2272 | ret = PER_LINUX; | 2273 | ret = PER_LINUX; |
2273 | return ret; | 2274 | return ret; |
2274 | } | 2275 | } |
2275 | 2276 | ||
2276 | asmlinkage unsigned long | 2277 | asmlinkage unsigned long |
2277 | sys32_brk (unsigned int brk) | 2278 | sys32_brk (unsigned int brk) |
2278 | { | 2279 | { |
2279 | unsigned long ret, obrk; | 2280 | unsigned long ret, obrk; |
2280 | struct mm_struct *mm = current->mm; | 2281 | struct mm_struct *mm = current->mm; |
2281 | 2282 | ||
2282 | obrk = mm->brk; | 2283 | obrk = mm->brk; |
2283 | ret = sys_brk(brk); | 2284 | ret = sys_brk(brk); |
2284 | if (ret < obrk) | 2285 | if (ret < obrk) |
2285 | clear_user(compat_ptr(ret), PAGE_ALIGN(ret) - ret); | 2286 | clear_user(compat_ptr(ret), PAGE_ALIGN(ret) - ret); |
2286 | return ret; | 2287 | return ret; |
2287 | } | 2288 | } |
2288 | 2289 | ||
2289 | /* Structure for ia32 emulation on ia64 */ | 2290 | /* Structure for ia32 emulation on ia64 */ |
2290 | struct epoll_event32 | 2291 | struct epoll_event32 |
2291 | { | 2292 | { |
2292 | u32 events; | 2293 | u32 events; |
2293 | u32 data[2]; | 2294 | u32 data[2]; |
2294 | }; | 2295 | }; |
2295 | 2296 | ||
2296 | asmlinkage long | 2297 | asmlinkage long |
2297 | sys32_epoll_ctl(int epfd, int op, int fd, struct epoll_event32 __user *event) | 2298 | sys32_epoll_ctl(int epfd, int op, int fd, struct epoll_event32 __user *event) |
2298 | { | 2299 | { |
2299 | mm_segment_t old_fs = get_fs(); | 2300 | mm_segment_t old_fs = get_fs(); |
2300 | struct epoll_event event64; | 2301 | struct epoll_event event64; |
2301 | int error; | 2302 | int error; |
2302 | u32 data_halfword; | 2303 | u32 data_halfword; |
2303 | 2304 | ||
2304 | if (!access_ok(VERIFY_READ, event, sizeof(struct epoll_event32))) | 2305 | if (!access_ok(VERIFY_READ, event, sizeof(struct epoll_event32))) |
2305 | return -EFAULT; | 2306 | return -EFAULT; |
2306 | 2307 | ||
2307 | __get_user(event64.events, &event->events); | 2308 | __get_user(event64.events, &event->events); |
2308 | __get_user(data_halfword, &event->data[0]); | 2309 | __get_user(data_halfword, &event->data[0]); |
2309 | event64.data = data_halfword; | 2310 | event64.data = data_halfword; |
2310 | __get_user(data_halfword, &event->data[1]); | 2311 | __get_user(data_halfword, &event->data[1]); |
2311 | event64.data |= (u64)data_halfword << 32; | 2312 | event64.data |= (u64)data_halfword << 32; |
2312 | 2313 | ||
2313 | set_fs(KERNEL_DS); | 2314 | set_fs(KERNEL_DS); |
2314 | error = sys_epoll_ctl(epfd, op, fd, (struct epoll_event __user *) &event64); | 2315 | error = sys_epoll_ctl(epfd, op, fd, (struct epoll_event __user *) &event64); |
2315 | set_fs(old_fs); | 2316 | set_fs(old_fs); |
2316 | 2317 | ||
2317 | return error; | 2318 | return error; |
2318 | } | 2319 | } |
2319 | 2320 | ||
2320 | asmlinkage long | 2321 | asmlinkage long |
2321 | sys32_epoll_wait(int epfd, struct epoll_event32 __user * events, int maxevents, | 2322 | sys32_epoll_wait(int epfd, struct epoll_event32 __user * events, int maxevents, |
2322 | int timeout) | 2323 | int timeout) |
2323 | { | 2324 | { |
2324 | struct epoll_event *events64 = NULL; | 2325 | struct epoll_event *events64 = NULL; |
2325 | mm_segment_t old_fs = get_fs(); | 2326 | mm_segment_t old_fs = get_fs(); |
2326 | int numevents, size; | 2327 | int numevents, size; |
2327 | int evt_idx; | 2328 | int evt_idx; |
2328 | int do_free_pages = 0; | 2329 | int do_free_pages = 0; |
2329 | 2330 | ||
2330 | if (maxevents <= 0) { | 2331 | if (maxevents <= 0) { |
2331 | return -EINVAL; | 2332 | return -EINVAL; |
2332 | } | 2333 | } |
2333 | 2334 | ||
2334 | /* Verify that the area passed by the user is writeable */ | 2335 | /* Verify that the area passed by the user is writeable */ |
2335 | if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event32))) | 2336 | if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event32))) |
2336 | return -EFAULT; | 2337 | return -EFAULT; |
2337 | 2338 | ||
2338 | /* | 2339 | /* |
2339 | * Allocate space for the intermediate copy. If the space needed | 2340 | * Allocate space for the intermediate copy. If the space needed |
2340 | * is large enough to cause kmalloc to fail, then try again with | 2341 | * is large enough to cause kmalloc to fail, then try again with |
2341 | * __get_free_pages. | 2342 | * __get_free_pages. |
2342 | */ | 2343 | */ |
2343 | size = maxevents * sizeof(struct epoll_event); | 2344 | size = maxevents * sizeof(struct epoll_event); |
2344 | events64 = kmalloc(size, GFP_KERNEL); | 2345 | events64 = kmalloc(size, GFP_KERNEL); |
2345 | if (events64 == NULL) { | 2346 | if (events64 == NULL) { |
2346 | events64 = (struct epoll_event *) | 2347 | events64 = (struct epoll_event *) |
2347 | __get_free_pages(GFP_KERNEL, get_order(size)); | 2348 | __get_free_pages(GFP_KERNEL, get_order(size)); |
2348 | if (events64 == NULL) | 2349 | if (events64 == NULL) |
2349 | return -ENOMEM; | 2350 | return -ENOMEM; |
2350 | do_free_pages = 1; | 2351 | do_free_pages = 1; |
2351 | } | 2352 | } |
2352 | 2353 | ||
2353 | /* Do the system call */ | 2354 | /* Do the system call */ |
2354 | set_fs(KERNEL_DS); /* copy_to/from_user should work on kernel mem*/ | 2355 | set_fs(KERNEL_DS); /* copy_to/from_user should work on kernel mem*/ |
2355 | numevents = sys_epoll_wait(epfd, (struct epoll_event __user *) events64, | 2356 | numevents = sys_epoll_wait(epfd, (struct epoll_event __user *) events64, |
2356 | maxevents, timeout); | 2357 | maxevents, timeout); |
2357 | set_fs(old_fs); | 2358 | set_fs(old_fs); |
2358 | 2359 | ||
2359 | /* Don't modify userspace memory if we're returning an error */ | 2360 | /* Don't modify userspace memory if we're returning an error */ |
2360 | if (numevents > 0) { | 2361 | if (numevents > 0) { |
2361 | /* Translate the 64-bit structures back into the 32-bit | 2362 | /* Translate the 64-bit structures back into the 32-bit |
2362 | structures */ | 2363 | structures */ |
2363 | for (evt_idx = 0; evt_idx < numevents; evt_idx++) { | 2364 | for (evt_idx = 0; evt_idx < numevents; evt_idx++) { |
2364 | __put_user(events64[evt_idx].events, | 2365 | __put_user(events64[evt_idx].events, |
2365 | &events[evt_idx].events); | 2366 | &events[evt_idx].events); |
2366 | __put_user((u32)events64[evt_idx].data, | 2367 | __put_user((u32)events64[evt_idx].data, |
2367 | &events[evt_idx].data[0]); | 2368 | &events[evt_idx].data[0]); |
2368 | __put_user((u32)(events64[evt_idx].data >> 32), | 2369 | __put_user((u32)(events64[evt_idx].data >> 32), |
2369 | &events[evt_idx].data[1]); | 2370 | &events[evt_idx].data[1]); |
2370 | } | 2371 | } |
2371 | } | 2372 | } |
2372 | 2373 | ||
2373 | if (do_free_pages) | 2374 | if (do_free_pages) |
2374 | free_pages((unsigned long) events64, get_order(size)); | 2375 | free_pages((unsigned long) events64, get_order(size)); |
2375 | else | 2376 | else |
2376 | kfree(events64); | 2377 | kfree(events64); |
2377 | return numevents; | 2378 | return numevents; |
2378 | } | 2379 | } |
2379 | 2380 | ||
2380 | /* | 2381 | /* |
2381 | * Get a yet unused TLS descriptor index. | 2382 | * Get a yet unused TLS descriptor index. |
2382 | */ | 2383 | */ |
2383 | static int | 2384 | static int |
2384 | get_free_idx (void) | 2385 | get_free_idx (void) |
2385 | { | 2386 | { |
2386 | struct thread_struct *t = ¤t->thread; | 2387 | struct thread_struct *t = ¤t->thread; |
2387 | int idx; | 2388 | int idx; |
2388 | 2389 | ||
2389 | for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++) | 2390 | for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++) |
2390 | if (desc_empty(t->tls_array + idx)) | 2391 | if (desc_empty(t->tls_array + idx)) |
2391 | return idx + GDT_ENTRY_TLS_MIN; | 2392 | return idx + GDT_ENTRY_TLS_MIN; |
2392 | return -ESRCH; | 2393 | return -ESRCH; |
2393 | } | 2394 | } |
2394 | 2395 | ||
2395 | /* | 2396 | /* |
2396 | * Set a given TLS descriptor: | 2397 | * Set a given TLS descriptor: |
2397 | */ | 2398 | */ |
2398 | asmlinkage int | 2399 | asmlinkage int |
2399 | sys32_set_thread_area (struct ia32_user_desc __user *u_info) | 2400 | sys32_set_thread_area (struct ia32_user_desc __user *u_info) |
2400 | { | 2401 | { |
2401 | struct thread_struct *t = ¤t->thread; | 2402 | struct thread_struct *t = ¤t->thread; |
2402 | struct ia32_user_desc info; | 2403 | struct ia32_user_desc info; |
2403 | struct desc_struct *desc; | 2404 | struct desc_struct *desc; |
2404 | int cpu, idx; | 2405 | int cpu, idx; |
2405 | 2406 | ||
2406 | if (copy_from_user(&info, u_info, sizeof(info))) | 2407 | if (copy_from_user(&info, u_info, sizeof(info))) |
2407 | return -EFAULT; | 2408 | return -EFAULT; |
2408 | idx = info.entry_number; | 2409 | idx = info.entry_number; |
2409 | 2410 | ||
2410 | /* | 2411 | /* |
2411 | * index -1 means the kernel should try to find and allocate an empty descriptor: | 2412 | * index -1 means the kernel should try to find and allocate an empty descriptor: |
2412 | */ | 2413 | */ |
2413 | if (idx == -1) { | 2414 | if (idx == -1) { |
2414 | idx = get_free_idx(); | 2415 | idx = get_free_idx(); |
2415 | if (idx < 0) | 2416 | if (idx < 0) |
2416 | return idx; | 2417 | return idx; |
2417 | if (put_user(idx, &u_info->entry_number)) | 2418 | if (put_user(idx, &u_info->entry_number)) |
2418 | return -EFAULT; | 2419 | return -EFAULT; |
2419 | } | 2420 | } |
2420 | 2421 | ||
2421 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) | 2422 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) |
2422 | return -EINVAL; | 2423 | return -EINVAL; |
2423 | 2424 | ||
2424 | desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN; | 2425 | desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN; |
2425 | 2426 | ||
2426 | cpu = smp_processor_id(); | 2427 | cpu = smp_processor_id(); |
2427 | 2428 | ||
2428 | if (LDT_empty(&info)) { | 2429 | if (LDT_empty(&info)) { |
2429 | desc->a = 0; | 2430 | desc->a = 0; |
2430 | desc->b = 0; | 2431 | desc->b = 0; |
2431 | } else { | 2432 | } else { |
2432 | desc->a = LDT_entry_a(&info); | 2433 | desc->a = LDT_entry_a(&info); |
2433 | desc->b = LDT_entry_b(&info); | 2434 | desc->b = LDT_entry_b(&info); |
2434 | } | 2435 | } |
2435 | load_TLS(t, cpu); | 2436 | load_TLS(t, cpu); |
2436 | return 0; | 2437 | return 0; |
2437 | } | 2438 | } |
2438 | 2439 | ||
2439 | /* | 2440 | /* |
2440 | * Get the current Thread-Local Storage area: | 2441 | * Get the current Thread-Local Storage area: |
2441 | */ | 2442 | */ |
2442 | 2443 | ||
2443 | #define GET_BASE(desc) ( \ | 2444 | #define GET_BASE(desc) ( \ |
2444 | (((desc)->a >> 16) & 0x0000ffff) | \ | 2445 | (((desc)->a >> 16) & 0x0000ffff) | \ |
2445 | (((desc)->b << 16) & 0x00ff0000) | \ | 2446 | (((desc)->b << 16) & 0x00ff0000) | \ |
2446 | ( (desc)->b & 0xff000000) ) | 2447 | ( (desc)->b & 0xff000000) ) |
2447 | 2448 | ||
2448 | #define GET_LIMIT(desc) ( \ | 2449 | #define GET_LIMIT(desc) ( \ |
2449 | ((desc)->a & 0x0ffff) | \ | 2450 | ((desc)->a & 0x0ffff) | \ |
2450 | ((desc)->b & 0xf0000) ) | 2451 | ((desc)->b & 0xf0000) ) |
2451 | 2452 | ||
2452 | #define GET_32BIT(desc) (((desc)->b >> 22) & 1) | 2453 | #define GET_32BIT(desc) (((desc)->b >> 22) & 1) |
2453 | #define GET_CONTENTS(desc) (((desc)->b >> 10) & 3) | 2454 | #define GET_CONTENTS(desc) (((desc)->b >> 10) & 3) |
2454 | #define GET_WRITABLE(desc) (((desc)->b >> 9) & 1) | 2455 | #define GET_WRITABLE(desc) (((desc)->b >> 9) & 1) |
2455 | #define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1) | 2456 | #define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1) |
2456 | #define GET_PRESENT(desc) (((desc)->b >> 15) & 1) | 2457 | #define GET_PRESENT(desc) (((desc)->b >> 15) & 1) |
2457 | #define GET_USEABLE(desc) (((desc)->b >> 20) & 1) | 2458 | #define GET_USEABLE(desc) (((desc)->b >> 20) & 1) |
2458 | 2459 | ||
2459 | asmlinkage int | 2460 | asmlinkage int |
2460 | sys32_get_thread_area (struct ia32_user_desc __user *u_info) | 2461 | sys32_get_thread_area (struct ia32_user_desc __user *u_info) |
2461 | { | 2462 | { |
2462 | struct ia32_user_desc info; | 2463 | struct ia32_user_desc info; |
2463 | struct desc_struct *desc; | 2464 | struct desc_struct *desc; |
2464 | int idx; | 2465 | int idx; |
2465 | 2466 | ||
2466 | if (get_user(idx, &u_info->entry_number)) | 2467 | if (get_user(idx, &u_info->entry_number)) |
2467 | return -EFAULT; | 2468 | return -EFAULT; |
2468 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) | 2469 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) |
2469 | return -EINVAL; | 2470 | return -EINVAL; |
2470 | 2471 | ||
2471 | desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; | 2472 | desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; |
2472 | 2473 | ||
2473 | info.entry_number = idx; | 2474 | info.entry_number = idx; |
2474 | info.base_addr = GET_BASE(desc); | 2475 | info.base_addr = GET_BASE(desc); |
2475 | info.limit = GET_LIMIT(desc); | 2476 | info.limit = GET_LIMIT(desc); |
2476 | info.seg_32bit = GET_32BIT(desc); | 2477 | info.seg_32bit = GET_32BIT(desc); |
2477 | info.contents = GET_CONTENTS(desc); | 2478 | info.contents = GET_CONTENTS(desc); |
2478 | info.read_exec_only = !GET_WRITABLE(desc); | 2479 | info.read_exec_only = !GET_WRITABLE(desc); |
2479 | info.limit_in_pages = GET_LIMIT_PAGES(desc); | 2480 | info.limit_in_pages = GET_LIMIT_PAGES(desc); |
2480 | info.seg_not_present = !GET_PRESENT(desc); | 2481 | info.seg_not_present = !GET_PRESENT(desc); |
2481 | info.useable = GET_USEABLE(desc); | 2482 | info.useable = GET_USEABLE(desc); |
2482 | 2483 | ||
2483 | if (copy_to_user(u_info, &info, sizeof(info))) | 2484 | if (copy_to_user(u_info, &info, sizeof(info))) |
2484 | return -EFAULT; | 2485 | return -EFAULT; |
2485 | return 0; | 2486 | return 0; |
2486 | } | 2487 | } |
2487 | 2488 | ||
2488 | long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, | 2489 | long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, |
2489 | __u32 len_low, __u32 len_high, int advice) | 2490 | __u32 len_low, __u32 len_high, int advice) |
2490 | { | 2491 | { |
2491 | return sys_fadvise64_64(fd, | 2492 | return sys_fadvise64_64(fd, |
2492 | (((u64)offset_high)<<32) | offset_low, | 2493 | (((u64)offset_high)<<32) | offset_low, |
2493 | (((u64)len_high)<<32) | len_low, | 2494 | (((u64)len_high)<<32) | len_low, |
2494 | advice); | 2495 | advice); |
2495 | } | 2496 | } |
2496 | 2497 | ||
2497 | #ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */ | 2498 | #ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */ |
2498 | 2499 | ||
2499 | asmlinkage long sys32_setreuid(compat_uid_t ruid, compat_uid_t euid) | 2500 | asmlinkage long sys32_setreuid(compat_uid_t ruid, compat_uid_t euid) |
2500 | { | 2501 | { |
2501 | uid_t sruid, seuid; | 2502 | uid_t sruid, seuid; |
2502 | 2503 | ||
2503 | sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid); | 2504 | sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid); |
2504 | seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid); | 2505 | seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid); |
2505 | return sys_setreuid(sruid, seuid); | 2506 | return sys_setreuid(sruid, seuid); |
2506 | } | 2507 | } |
2507 | 2508 | ||
2508 | asmlinkage long | 2509 | asmlinkage long |
2509 | sys32_setresuid(compat_uid_t ruid, compat_uid_t euid, | 2510 | sys32_setresuid(compat_uid_t ruid, compat_uid_t euid, |
2510 | compat_uid_t suid) | 2511 | compat_uid_t suid) |
2511 | { | 2512 | { |
2512 | uid_t sruid, seuid, ssuid; | 2513 | uid_t sruid, seuid, ssuid; |
2513 | 2514 | ||
2514 | sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid); | 2515 | sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid); |
2515 | seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid); | 2516 | seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid); |
2516 | ssuid = (suid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)suid); | 2517 | ssuid = (suid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)suid); |
2517 | return sys_setresuid(sruid, seuid, ssuid); | 2518 | return sys_setresuid(sruid, seuid, ssuid); |
2518 | } | 2519 | } |
2519 | 2520 | ||
2520 | asmlinkage long | 2521 | asmlinkage long |
2521 | sys32_setregid(compat_gid_t rgid, compat_gid_t egid) | 2522 | sys32_setregid(compat_gid_t rgid, compat_gid_t egid) |
2522 | { | 2523 | { |
2523 | gid_t srgid, segid; | 2524 | gid_t srgid, segid; |
2524 | 2525 | ||
2525 | srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid); | 2526 | srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid); |
2526 | segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid); | 2527 | segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid); |
2527 | return sys_setregid(srgid, segid); | 2528 | return sys_setregid(srgid, segid); |
2528 | } | 2529 | } |
2529 | 2530 | ||
2530 | asmlinkage long | 2531 | asmlinkage long |
2531 | sys32_setresgid(compat_gid_t rgid, compat_gid_t egid, | 2532 | sys32_setresgid(compat_gid_t rgid, compat_gid_t egid, |
2532 | compat_gid_t sgid) | 2533 | compat_gid_t sgid) |
2533 | { | 2534 | { |
2534 | gid_t srgid, segid, ssgid; | 2535 | gid_t srgid, segid, ssgid; |
2535 | 2536 | ||
2536 | srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid); | 2537 | srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid); |
2537 | segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid); | 2538 | segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid); |
2538 | ssgid = (sgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)sgid); | 2539 | ssgid = (sgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)sgid); |
2539 | return sys_setresgid(srgid, segid, ssgid); | 2540 | return sys_setresgid(srgid, segid, ssgid); |
2540 | } | 2541 | } |
2541 | #endif /* NOTYET */ | 2542 | #endif /* NOTYET */ |
2542 | 2543 |
arch/ia64/kernel/process.c
1 | /* | 1 | /* |
2 | * Architecture-specific setup. | 2 | * Architecture-specific setup. |
3 | * | 3 | * |
4 | * Copyright (C) 1998-2003 Hewlett-Packard Co | 4 | * Copyright (C) 1998-2003 Hewlett-Packard Co |
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | 5 | * David Mosberger-Tang <davidm@hpl.hp.com> |
6 | * 04/11/17 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support | 6 | * 04/11/17 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support |
7 | * | 7 | * |
8 | * 2005-10-07 Keith Owens <kaos@sgi.com> | 8 | * 2005-10-07 Keith Owens <kaos@sgi.com> |
9 | * Add notify_die() hooks. | 9 | * Add notify_die() hooks. |
10 | */ | 10 | */ |
11 | #include <linux/cpu.h> | 11 | #include <linux/cpu.h> |
12 | #include <linux/pm.h> | 12 | #include <linux/pm.h> |
13 | #include <linux/elf.h> | 13 | #include <linux/elf.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/kallsyms.h> | 15 | #include <linux/kallsyms.h> |
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/notifier.h> | 19 | #include <linux/notifier.h> |
20 | #include <linux/personality.h> | 20 | #include <linux/personality.h> |
21 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/stddef.h> | 23 | #include <linux/stddef.h> |
24 | #include <linux/thread_info.h> | 24 | #include <linux/thread_info.h> |
25 | #include <linux/unistd.h> | 25 | #include <linux/unistd.h> |
26 | #include <linux/efi.h> | 26 | #include <linux/efi.h> |
27 | #include <linux/interrupt.h> | 27 | #include <linux/interrupt.h> |
28 | #include <linux/delay.h> | 28 | #include <linux/delay.h> |
29 | #include <linux/kdebug.h> | 29 | #include <linux/kdebug.h> |
30 | 30 | ||
31 | #include <asm/cpu.h> | 31 | #include <asm/cpu.h> |
32 | #include <asm/delay.h> | 32 | #include <asm/delay.h> |
33 | #include <asm/elf.h> | 33 | #include <asm/elf.h> |
34 | #include <asm/ia32.h> | 34 | #include <asm/ia32.h> |
35 | #include <asm/irq.h> | 35 | #include <asm/irq.h> |
36 | #include <asm/kexec.h> | 36 | #include <asm/kexec.h> |
37 | #include <asm/pgalloc.h> | 37 | #include <asm/pgalloc.h> |
38 | #include <asm/processor.h> | 38 | #include <asm/processor.h> |
39 | #include <asm/sal.h> | 39 | #include <asm/sal.h> |
40 | #include <asm/tlbflush.h> | 40 | #include <asm/tlbflush.h> |
41 | #include <asm/uaccess.h> | 41 | #include <asm/uaccess.h> |
42 | #include <asm/unwind.h> | 42 | #include <asm/unwind.h> |
43 | #include <asm/user.h> | 43 | #include <asm/user.h> |
44 | 44 | ||
45 | #include "entry.h" | 45 | #include "entry.h" |
46 | 46 | ||
47 | #ifdef CONFIG_PERFMON | 47 | #ifdef CONFIG_PERFMON |
48 | # include <asm/perfmon.h> | 48 | # include <asm/perfmon.h> |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | #include "sigframe.h" | 51 | #include "sigframe.h" |
52 | 52 | ||
53 | void (*ia64_mark_idle)(int); | 53 | void (*ia64_mark_idle)(int); |
54 | static DEFINE_PER_CPU(unsigned int, cpu_idle_state); | 54 | static DEFINE_PER_CPU(unsigned int, cpu_idle_state); |
55 | 55 | ||
56 | unsigned long boot_option_idle_override = 0; | 56 | unsigned long boot_option_idle_override = 0; |
57 | EXPORT_SYMBOL(boot_option_idle_override); | 57 | EXPORT_SYMBOL(boot_option_idle_override); |
58 | 58 | ||
59 | void | 59 | void |
60 | ia64_do_show_stack (struct unw_frame_info *info, void *arg) | 60 | ia64_do_show_stack (struct unw_frame_info *info, void *arg) |
61 | { | 61 | { |
62 | unsigned long ip, sp, bsp; | 62 | unsigned long ip, sp, bsp; |
63 | char buf[128]; /* don't make it so big that it overflows the stack! */ | 63 | char buf[128]; /* don't make it so big that it overflows the stack! */ |
64 | 64 | ||
65 | printk("\nCall Trace:\n"); | 65 | printk("\nCall Trace:\n"); |
66 | do { | 66 | do { |
67 | unw_get_ip(info, &ip); | 67 | unw_get_ip(info, &ip); |
68 | if (ip == 0) | 68 | if (ip == 0) |
69 | break; | 69 | break; |
70 | 70 | ||
71 | unw_get_sp(info, &sp); | 71 | unw_get_sp(info, &sp); |
72 | unw_get_bsp(info, &bsp); | 72 | unw_get_bsp(info, &bsp); |
73 | snprintf(buf, sizeof(buf), | 73 | snprintf(buf, sizeof(buf), |
74 | " [<%016lx>] %%s\n" | 74 | " [<%016lx>] %%s\n" |
75 | " sp=%016lx bsp=%016lx\n", | 75 | " sp=%016lx bsp=%016lx\n", |
76 | ip, sp, bsp); | 76 | ip, sp, bsp); |
77 | print_symbol(buf, ip); | 77 | print_symbol(buf, ip); |
78 | } while (unw_unwind(info) >= 0); | 78 | } while (unw_unwind(info) >= 0); |
79 | } | 79 | } |
80 | 80 | ||
81 | void | 81 | void |
82 | show_stack (struct task_struct *task, unsigned long *sp) | 82 | show_stack (struct task_struct *task, unsigned long *sp) |
83 | { | 83 | { |
84 | if (!task) | 84 | if (!task) |
85 | unw_init_running(ia64_do_show_stack, NULL); | 85 | unw_init_running(ia64_do_show_stack, NULL); |
86 | else { | 86 | else { |
87 | struct unw_frame_info info; | 87 | struct unw_frame_info info; |
88 | 88 | ||
89 | unw_init_from_blocked_task(&info, task); | 89 | unw_init_from_blocked_task(&info, task); |
90 | ia64_do_show_stack(&info, NULL); | 90 | ia64_do_show_stack(&info, NULL); |
91 | } | 91 | } |
92 | } | 92 | } |
93 | 93 | ||
94 | void | 94 | void |
95 | dump_stack (void) | 95 | dump_stack (void) |
96 | { | 96 | { |
97 | show_stack(NULL, NULL); | 97 | show_stack(NULL, NULL); |
98 | } | 98 | } |
99 | 99 | ||
100 | EXPORT_SYMBOL(dump_stack); | 100 | EXPORT_SYMBOL(dump_stack); |
101 | 101 | ||
102 | void | 102 | void |
103 | show_regs (struct pt_regs *regs) | 103 | show_regs (struct pt_regs *regs) |
104 | { | 104 | { |
105 | unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; | 105 | unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; |
106 | 106 | ||
107 | print_modules(); | 107 | print_modules(); |
108 | printk("\nPid: %d, CPU %d, comm: %20s\n", current->pid, smp_processor_id(), current->comm); | 108 | printk("\nPid: %d, CPU %d, comm: %20s\n", current->pid, smp_processor_id(), current->comm); |
109 | printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n", | 109 | printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n", |
110 | regs->cr_ipsr, regs->cr_ifs, ip, print_tainted()); | 110 | regs->cr_ipsr, regs->cr_ifs, ip, print_tainted()); |
111 | print_symbol("ip is at %s\n", ip); | 111 | print_symbol("ip is at %s\n", ip); |
112 | printk("unat: %016lx pfs : %016lx rsc : %016lx\n", | 112 | printk("unat: %016lx pfs : %016lx rsc : %016lx\n", |
113 | regs->ar_unat, regs->ar_pfs, regs->ar_rsc); | 113 | regs->ar_unat, regs->ar_pfs, regs->ar_rsc); |
114 | printk("rnat: %016lx bsps: %016lx pr : %016lx\n", | 114 | printk("rnat: %016lx bsps: %016lx pr : %016lx\n", |
115 | regs->ar_rnat, regs->ar_bspstore, regs->pr); | 115 | regs->ar_rnat, regs->ar_bspstore, regs->pr); |
116 | printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n", | 116 | printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n", |
117 | regs->loadrs, regs->ar_ccv, regs->ar_fpsr); | 117 | regs->loadrs, regs->ar_ccv, regs->ar_fpsr); |
118 | printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd); | 118 | printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd); |
119 | printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0, regs->b6, regs->b7); | 119 | printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0, regs->b6, regs->b7); |
120 | printk("f6 : %05lx%016lx f7 : %05lx%016lx\n", | 120 | printk("f6 : %05lx%016lx f7 : %05lx%016lx\n", |
121 | regs->f6.u.bits[1], regs->f6.u.bits[0], | 121 | regs->f6.u.bits[1], regs->f6.u.bits[0], |
122 | regs->f7.u.bits[1], regs->f7.u.bits[0]); | 122 | regs->f7.u.bits[1], regs->f7.u.bits[0]); |
123 | printk("f8 : %05lx%016lx f9 : %05lx%016lx\n", | 123 | printk("f8 : %05lx%016lx f9 : %05lx%016lx\n", |
124 | regs->f8.u.bits[1], regs->f8.u.bits[0], | 124 | regs->f8.u.bits[1], regs->f8.u.bits[0], |
125 | regs->f9.u.bits[1], regs->f9.u.bits[0]); | 125 | regs->f9.u.bits[1], regs->f9.u.bits[0]); |
126 | printk("f10 : %05lx%016lx f11 : %05lx%016lx\n", | 126 | printk("f10 : %05lx%016lx f11 : %05lx%016lx\n", |
127 | regs->f10.u.bits[1], regs->f10.u.bits[0], | 127 | regs->f10.u.bits[1], regs->f10.u.bits[0], |
128 | regs->f11.u.bits[1], regs->f11.u.bits[0]); | 128 | regs->f11.u.bits[1], regs->f11.u.bits[0]); |
129 | 129 | ||
130 | printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1, regs->r2, regs->r3); | 130 | printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1, regs->r2, regs->r3); |
131 | printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8, regs->r9, regs->r10); | 131 | printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8, regs->r9, regs->r10); |
132 | printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11, regs->r12, regs->r13); | 132 | printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11, regs->r12, regs->r13); |
133 | printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14, regs->r15, regs->r16); | 133 | printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14, regs->r15, regs->r16); |
134 | printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17, regs->r18, regs->r19); | 134 | printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17, regs->r18, regs->r19); |
135 | printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20, regs->r21, regs->r22); | 135 | printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20, regs->r21, regs->r22); |
136 | printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23, regs->r24, regs->r25); | 136 | printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23, regs->r24, regs->r25); |
137 | printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26, regs->r27, regs->r28); | 137 | printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26, regs->r27, regs->r28); |
138 | printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29, regs->r30, regs->r31); | 138 | printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29, regs->r30, regs->r31); |
139 | 139 | ||
140 | if (user_mode(regs)) { | 140 | if (user_mode(regs)) { |
141 | /* print the stacked registers */ | 141 | /* print the stacked registers */ |
142 | unsigned long val, *bsp, ndirty; | 142 | unsigned long val, *bsp, ndirty; |
143 | int i, sof, is_nat = 0; | 143 | int i, sof, is_nat = 0; |
144 | 144 | ||
145 | sof = regs->cr_ifs & 0x7f; /* size of frame */ | 145 | sof = regs->cr_ifs & 0x7f; /* size of frame */ |
146 | ndirty = (regs->loadrs >> 19); | 146 | ndirty = (regs->loadrs >> 19); |
147 | bsp = ia64_rse_skip_regs((unsigned long *) regs->ar_bspstore, ndirty); | 147 | bsp = ia64_rse_skip_regs((unsigned long *) regs->ar_bspstore, ndirty); |
148 | for (i = 0; i < sof; ++i) { | 148 | for (i = 0; i < sof; ++i) { |
149 | get_user(val, (unsigned long __user *) ia64_rse_skip_regs(bsp, i)); | 149 | get_user(val, (unsigned long __user *) ia64_rse_skip_regs(bsp, i)); |
150 | printk("r%-3u:%c%016lx%s", 32 + i, is_nat ? '*' : ' ', val, | 150 | printk("r%-3u:%c%016lx%s", 32 + i, is_nat ? '*' : ' ', val, |
151 | ((i == sof - 1) || (i % 3) == 2) ? "\n" : " "); | 151 | ((i == sof - 1) || (i % 3) == 2) ? "\n" : " "); |
152 | } | 152 | } |
153 | } else | 153 | } else |
154 | show_stack(NULL, NULL); | 154 | show_stack(NULL, NULL); |
155 | } | 155 | } |
156 | 156 | ||
157 | void | 157 | void |
158 | do_notify_resume_user (sigset_t *unused, struct sigscratch *scr, long in_syscall) | 158 | do_notify_resume_user (sigset_t *unused, struct sigscratch *scr, long in_syscall) |
159 | { | 159 | { |
160 | if (fsys_mode(current, &scr->pt)) { | 160 | if (fsys_mode(current, &scr->pt)) { |
161 | /* defer signal-handling etc. until we return to privilege-level 0. */ | 161 | /* defer signal-handling etc. until we return to privilege-level 0. */ |
162 | if (!ia64_psr(&scr->pt)->lp) | 162 | if (!ia64_psr(&scr->pt)->lp) |
163 | ia64_psr(&scr->pt)->lp = 1; | 163 | ia64_psr(&scr->pt)->lp = 1; |
164 | return; | 164 | return; |
165 | } | 165 | } |
166 | 166 | ||
167 | #ifdef CONFIG_PERFMON | 167 | #ifdef CONFIG_PERFMON |
168 | if (current->thread.pfm_needs_checking) | 168 | if (current->thread.pfm_needs_checking) |
169 | pfm_handle_work(); | 169 | pfm_handle_work(); |
170 | #endif | 170 | #endif |
171 | 171 | ||
172 | /* deal with pending signal delivery */ | 172 | /* deal with pending signal delivery */ |
173 | if (test_thread_flag(TIF_SIGPENDING)||test_thread_flag(TIF_RESTORE_SIGMASK)) | 173 | if (test_thread_flag(TIF_SIGPENDING)||test_thread_flag(TIF_RESTORE_SIGMASK)) |
174 | ia64_do_signal(scr, in_syscall); | 174 | ia64_do_signal(scr, in_syscall); |
175 | } | 175 | } |
176 | 176 | ||
177 | static int pal_halt = 1; | 177 | static int pal_halt = 1; |
178 | static int can_do_pal_halt = 1; | 178 | static int can_do_pal_halt = 1; |
179 | 179 | ||
180 | static int __init nohalt_setup(char * str) | 180 | static int __init nohalt_setup(char * str) |
181 | { | 181 | { |
182 | pal_halt = can_do_pal_halt = 0; | 182 | pal_halt = can_do_pal_halt = 0; |
183 | return 1; | 183 | return 1; |
184 | } | 184 | } |
185 | __setup("nohalt", nohalt_setup); | 185 | __setup("nohalt", nohalt_setup); |
186 | 186 | ||
187 | void | 187 | void |
188 | update_pal_halt_status(int status) | 188 | update_pal_halt_status(int status) |
189 | { | 189 | { |
190 | can_do_pal_halt = pal_halt && status; | 190 | can_do_pal_halt = pal_halt && status; |
191 | } | 191 | } |
192 | 192 | ||
193 | /* | 193 | /* |
194 | * We use this if we don't have any better idle routine.. | 194 | * We use this if we don't have any better idle routine.. |
195 | */ | 195 | */ |
196 | void | 196 | void |
197 | default_idle (void) | 197 | default_idle (void) |
198 | { | 198 | { |
199 | local_irq_enable(); | 199 | local_irq_enable(); |
200 | while (!need_resched()) { | 200 | while (!need_resched()) { |
201 | if (can_do_pal_halt) | 201 | if (can_do_pal_halt) |
202 | safe_halt(); | 202 | safe_halt(); |
203 | else | 203 | else |
204 | cpu_relax(); | 204 | cpu_relax(); |
205 | } | 205 | } |
206 | } | 206 | } |
207 | 207 | ||
208 | #ifdef CONFIG_HOTPLUG_CPU | 208 | #ifdef CONFIG_HOTPLUG_CPU |
209 | /* We don't actually take CPU down, just spin without interrupts. */ | 209 | /* We don't actually take CPU down, just spin without interrupts. */ |
210 | static inline void play_dead(void) | 210 | static inline void play_dead(void) |
211 | { | 211 | { |
212 | extern void ia64_cpu_local_tick (void); | 212 | extern void ia64_cpu_local_tick (void); |
213 | unsigned int this_cpu = smp_processor_id(); | 213 | unsigned int this_cpu = smp_processor_id(); |
214 | 214 | ||
215 | /* Ack it */ | 215 | /* Ack it */ |
216 | __get_cpu_var(cpu_state) = CPU_DEAD; | 216 | __get_cpu_var(cpu_state) = CPU_DEAD; |
217 | 217 | ||
218 | max_xtp(); | 218 | max_xtp(); |
219 | local_irq_disable(); | 219 | local_irq_disable(); |
220 | idle_task_exit(); | 220 | idle_task_exit(); |
221 | ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]); | 221 | ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]); |
222 | /* | 222 | /* |
223 | * The above is a point of no-return, the processor is | 223 | * The above is a point of no-return, the processor is |
224 | * expected to be in SAL loop now. | 224 | * expected to be in SAL loop now. |
225 | */ | 225 | */ |
226 | BUG(); | 226 | BUG(); |
227 | } | 227 | } |
228 | #else | 228 | #else |
229 | static inline void play_dead(void) | 229 | static inline void play_dead(void) |
230 | { | 230 | { |
231 | BUG(); | 231 | BUG(); |
232 | } | 232 | } |
233 | #endif /* CONFIG_HOTPLUG_CPU */ | 233 | #endif /* CONFIG_HOTPLUG_CPU */ |
234 | 234 | ||
235 | void cpu_idle_wait(void) | 235 | void cpu_idle_wait(void) |
236 | { | 236 | { |
237 | unsigned int cpu, this_cpu = get_cpu(); | 237 | unsigned int cpu, this_cpu = get_cpu(); |
238 | cpumask_t map; | 238 | cpumask_t map; |
239 | cpumask_t tmp = current->cpus_allowed; | 239 | cpumask_t tmp = current->cpus_allowed; |
240 | 240 | ||
241 | set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); | 241 | set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); |
242 | put_cpu(); | 242 | put_cpu(); |
243 | 243 | ||
244 | cpus_clear(map); | 244 | cpus_clear(map); |
245 | for_each_online_cpu(cpu) { | 245 | for_each_online_cpu(cpu) { |
246 | per_cpu(cpu_idle_state, cpu) = 1; | 246 | per_cpu(cpu_idle_state, cpu) = 1; |
247 | cpu_set(cpu, map); | 247 | cpu_set(cpu, map); |
248 | } | 248 | } |
249 | 249 | ||
250 | __get_cpu_var(cpu_idle_state) = 0; | 250 | __get_cpu_var(cpu_idle_state) = 0; |
251 | 251 | ||
252 | wmb(); | 252 | wmb(); |
253 | do { | 253 | do { |
254 | ssleep(1); | 254 | ssleep(1); |
255 | for_each_online_cpu(cpu) { | 255 | for_each_online_cpu(cpu) { |
256 | if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu)) | 256 | if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu)) |
257 | cpu_clear(cpu, map); | 257 | cpu_clear(cpu, map); |
258 | } | 258 | } |
259 | cpus_and(map, map, cpu_online_map); | 259 | cpus_and(map, map, cpu_online_map); |
260 | } while (!cpus_empty(map)); | 260 | } while (!cpus_empty(map)); |
261 | set_cpus_allowed(current, tmp); | 261 | set_cpus_allowed(current, tmp); |
262 | } | 262 | } |
263 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | 263 | EXPORT_SYMBOL_GPL(cpu_idle_wait); |
264 | 264 | ||
265 | void __attribute__((noreturn)) | 265 | void __attribute__((noreturn)) |
266 | cpu_idle (void) | 266 | cpu_idle (void) |
267 | { | 267 | { |
268 | void (*mark_idle)(int) = ia64_mark_idle; | 268 | void (*mark_idle)(int) = ia64_mark_idle; |
269 | int cpu = smp_processor_id(); | 269 | int cpu = smp_processor_id(); |
270 | 270 | ||
271 | /* endless idle loop with no priority at all */ | 271 | /* endless idle loop with no priority at all */ |
272 | while (1) { | 272 | while (1) { |
273 | if (can_do_pal_halt) { | 273 | if (can_do_pal_halt) { |
274 | current_thread_info()->status &= ~TS_POLLING; | 274 | current_thread_info()->status &= ~TS_POLLING; |
275 | /* | 275 | /* |
276 | * TS_POLLING-cleared state must be visible before we | 276 | * TS_POLLING-cleared state must be visible before we |
277 | * test NEED_RESCHED: | 277 | * test NEED_RESCHED: |
278 | */ | 278 | */ |
279 | smp_mb(); | 279 | smp_mb(); |
280 | } else { | 280 | } else { |
281 | current_thread_info()->status |= TS_POLLING; | 281 | current_thread_info()->status |= TS_POLLING; |
282 | } | 282 | } |
283 | 283 | ||
284 | if (!need_resched()) { | 284 | if (!need_resched()) { |
285 | void (*idle)(void); | 285 | void (*idle)(void); |
286 | #ifdef CONFIG_SMP | 286 | #ifdef CONFIG_SMP |
287 | min_xtp(); | 287 | min_xtp(); |
288 | #endif | 288 | #endif |
289 | if (__get_cpu_var(cpu_idle_state)) | 289 | if (__get_cpu_var(cpu_idle_state)) |
290 | __get_cpu_var(cpu_idle_state) = 0; | 290 | __get_cpu_var(cpu_idle_state) = 0; |
291 | 291 | ||
292 | rmb(); | 292 | rmb(); |
293 | if (mark_idle) | 293 | if (mark_idle) |
294 | (*mark_idle)(1); | 294 | (*mark_idle)(1); |
295 | 295 | ||
296 | idle = pm_idle; | 296 | idle = pm_idle; |
297 | if (!idle) | 297 | if (!idle) |
298 | idle = default_idle; | 298 | idle = default_idle; |
299 | (*idle)(); | 299 | (*idle)(); |
300 | if (mark_idle) | 300 | if (mark_idle) |
301 | (*mark_idle)(0); | 301 | (*mark_idle)(0); |
302 | #ifdef CONFIG_SMP | 302 | #ifdef CONFIG_SMP |
303 | normal_xtp(); | 303 | normal_xtp(); |
304 | #endif | 304 | #endif |
305 | } | 305 | } |
306 | preempt_enable_no_resched(); | 306 | preempt_enable_no_resched(); |
307 | schedule(); | 307 | schedule(); |
308 | preempt_disable(); | 308 | preempt_disable(); |
309 | check_pgt_cache(); | 309 | check_pgt_cache(); |
310 | if (cpu_is_offline(cpu)) | 310 | if (cpu_is_offline(cpu)) |
311 | play_dead(); | 311 | play_dead(); |
312 | } | 312 | } |
313 | } | 313 | } |
314 | 314 | ||
315 | void | 315 | void |
316 | ia64_save_extra (struct task_struct *task) | 316 | ia64_save_extra (struct task_struct *task) |
317 | { | 317 | { |
318 | #ifdef CONFIG_PERFMON | 318 | #ifdef CONFIG_PERFMON |
319 | unsigned long info; | 319 | unsigned long info; |
320 | #endif | 320 | #endif |
321 | 321 | ||
322 | if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) | 322 | if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) |
323 | ia64_save_debug_regs(&task->thread.dbr[0]); | 323 | ia64_save_debug_regs(&task->thread.dbr[0]); |
324 | 324 | ||
325 | #ifdef CONFIG_PERFMON | 325 | #ifdef CONFIG_PERFMON |
326 | if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0) | 326 | if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0) |
327 | pfm_save_regs(task); | 327 | pfm_save_regs(task); |
328 | 328 | ||
329 | info = __get_cpu_var(pfm_syst_info); | 329 | info = __get_cpu_var(pfm_syst_info); |
330 | if (info & PFM_CPUINFO_SYST_WIDE) | 330 | if (info & PFM_CPUINFO_SYST_WIDE) |
331 | pfm_syst_wide_update_task(task, info, 0); | 331 | pfm_syst_wide_update_task(task, info, 0); |
332 | #endif | 332 | #endif |
333 | 333 | ||
334 | #ifdef CONFIG_IA32_SUPPORT | 334 | #ifdef CONFIG_IA32_SUPPORT |
335 | if (IS_IA32_PROCESS(task_pt_regs(task))) | 335 | if (IS_IA32_PROCESS(task_pt_regs(task))) |
336 | ia32_save_state(task); | 336 | ia32_save_state(task); |
337 | #endif | 337 | #endif |
338 | } | 338 | } |
339 | 339 | ||
340 | void | 340 | void |
341 | ia64_load_extra (struct task_struct *task) | 341 | ia64_load_extra (struct task_struct *task) |
342 | { | 342 | { |
343 | #ifdef CONFIG_PERFMON | 343 | #ifdef CONFIG_PERFMON |
344 | unsigned long info; | 344 | unsigned long info; |
345 | #endif | 345 | #endif |
346 | 346 | ||
347 | if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) | 347 | if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0) |
348 | ia64_load_debug_regs(&task->thread.dbr[0]); | 348 | ia64_load_debug_regs(&task->thread.dbr[0]); |
349 | 349 | ||
350 | #ifdef CONFIG_PERFMON | 350 | #ifdef CONFIG_PERFMON |
351 | if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0) | 351 | if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0) |
352 | pfm_load_regs(task); | 352 | pfm_load_regs(task); |
353 | 353 | ||
354 | info = __get_cpu_var(pfm_syst_info); | 354 | info = __get_cpu_var(pfm_syst_info); |
355 | if (info & PFM_CPUINFO_SYST_WIDE) | 355 | if (info & PFM_CPUINFO_SYST_WIDE) |
356 | pfm_syst_wide_update_task(task, info, 1); | 356 | pfm_syst_wide_update_task(task, info, 1); |
357 | #endif | 357 | #endif |
358 | 358 | ||
359 | #ifdef CONFIG_IA32_SUPPORT | 359 | #ifdef CONFIG_IA32_SUPPORT |
360 | if (IS_IA32_PROCESS(task_pt_regs(task))) | 360 | if (IS_IA32_PROCESS(task_pt_regs(task))) |
361 | ia32_load_state(task); | 361 | ia32_load_state(task); |
362 | #endif | 362 | #endif |
363 | } | 363 | } |
364 | 364 | ||
365 | /* | 365 | /* |
366 | * Copy the state of an ia-64 thread. | 366 | * Copy the state of an ia-64 thread. |
367 | * | 367 | * |
368 | * We get here through the following call chain: | 368 | * We get here through the following call chain: |
369 | * | 369 | * |
370 | * from user-level: from kernel: | 370 | * from user-level: from kernel: |
371 | * | 371 | * |
372 | * <clone syscall> <some kernel call frames> | 372 | * <clone syscall> <some kernel call frames> |
373 | * sys_clone : | 373 | * sys_clone : |
374 | * do_fork do_fork | 374 | * do_fork do_fork |
375 | * copy_thread copy_thread | 375 | * copy_thread copy_thread |
376 | * | 376 | * |
377 | * This means that the stack layout is as follows: | 377 | * This means that the stack layout is as follows: |
378 | * | 378 | * |
379 | * +---------------------+ (highest addr) | 379 | * +---------------------+ (highest addr) |
380 | * | struct pt_regs | | 380 | * | struct pt_regs | |
381 | * +---------------------+ | 381 | * +---------------------+ |
382 | * | struct switch_stack | | 382 | * | struct switch_stack | |
383 | * +---------------------+ | 383 | * +---------------------+ |
384 | * | | | 384 | * | | |
385 | * | memory stack | | 385 | * | memory stack | |
386 | * | | <-- sp (lowest addr) | 386 | * | | <-- sp (lowest addr) |
387 | * +---------------------+ | 387 | * +---------------------+ |
388 | * | 388 | * |
389 | * Observe that we copy the unat values that are in pt_regs and switch_stack. Spilling an | 389 | * Observe that we copy the unat values that are in pt_regs and switch_stack. Spilling an |
390 | * integer to address X causes bit N in ar.unat to be set to the NaT bit of the register, | 390 | * integer to address X causes bit N in ar.unat to be set to the NaT bit of the register, |
391 | * with N=(X & 0x1ff)/8. Thus, copying the unat value preserves the NaT bits ONLY if the | 391 | * with N=(X & 0x1ff)/8. Thus, copying the unat value preserves the NaT bits ONLY if the |
392 | * pt_regs structure in the parent is congruent to that of the child, modulo 512. Since | 392 | * pt_regs structure in the parent is congruent to that of the child, modulo 512. Since |
393 | * the stack is page aligned and the page size is at least 4KB, this is always the case, | 393 | * the stack is page aligned and the page size is at least 4KB, this is always the case, |
394 | * so there is nothing to worry about. | 394 | * so there is nothing to worry about. |
395 | */ | 395 | */ |
396 | int | 396 | int |
397 | copy_thread (int nr, unsigned long clone_flags, | 397 | copy_thread (int nr, unsigned long clone_flags, |
398 | unsigned long user_stack_base, unsigned long user_stack_size, | 398 | unsigned long user_stack_base, unsigned long user_stack_size, |
399 | struct task_struct *p, struct pt_regs *regs) | 399 | struct task_struct *p, struct pt_regs *regs) |
400 | { | 400 | { |
401 | extern char ia64_ret_from_clone, ia32_ret_from_clone; | 401 | extern char ia64_ret_from_clone, ia32_ret_from_clone; |
402 | struct switch_stack *child_stack, *stack; | 402 | struct switch_stack *child_stack, *stack; |
403 | unsigned long rbs, child_rbs, rbs_size; | 403 | unsigned long rbs, child_rbs, rbs_size; |
404 | struct pt_regs *child_ptregs; | 404 | struct pt_regs *child_ptregs; |
405 | int retval = 0; | 405 | int retval = 0; |
406 | 406 | ||
407 | #ifdef CONFIG_SMP | 407 | #ifdef CONFIG_SMP |
408 | /* | 408 | /* |
409 | * For SMP idle threads, fork_by_hand() calls do_fork with | 409 | * For SMP idle threads, fork_by_hand() calls do_fork with |
410 | * NULL regs. | 410 | * NULL regs. |
411 | */ | 411 | */ |
412 | if (!regs) | 412 | if (!regs) |
413 | return 0; | 413 | return 0; |
414 | #endif | 414 | #endif |
415 | 415 | ||
416 | stack = ((struct switch_stack *) regs) - 1; | 416 | stack = ((struct switch_stack *) regs) - 1; |
417 | 417 | ||
418 | child_ptregs = (struct pt_regs *) ((unsigned long) p + IA64_STK_OFFSET) - 1; | 418 | child_ptregs = (struct pt_regs *) ((unsigned long) p + IA64_STK_OFFSET) - 1; |
419 | child_stack = (struct switch_stack *) child_ptregs - 1; | 419 | child_stack = (struct switch_stack *) child_ptregs - 1; |
420 | 420 | ||
421 | /* copy parent's switch_stack & pt_regs to child: */ | 421 | /* copy parent's switch_stack & pt_regs to child: */ |
422 | memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack)); | 422 | memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack)); |
423 | 423 | ||
424 | rbs = (unsigned long) current + IA64_RBS_OFFSET; | 424 | rbs = (unsigned long) current + IA64_RBS_OFFSET; |
425 | child_rbs = (unsigned long) p + IA64_RBS_OFFSET; | 425 | child_rbs = (unsigned long) p + IA64_RBS_OFFSET; |
426 | rbs_size = stack->ar_bspstore - rbs; | 426 | rbs_size = stack->ar_bspstore - rbs; |
427 | 427 | ||
428 | /* copy the parent's register backing store to the child: */ | 428 | /* copy the parent's register backing store to the child: */ |
429 | memcpy((void *) child_rbs, (void *) rbs, rbs_size); | 429 | memcpy((void *) child_rbs, (void *) rbs, rbs_size); |
430 | 430 | ||
431 | if (likely(user_mode(child_ptregs))) { | 431 | if (likely(user_mode(child_ptregs))) { |
432 | if ((clone_flags & CLONE_SETTLS) && !IS_IA32_PROCESS(regs)) | 432 | if ((clone_flags & CLONE_SETTLS) && !IS_IA32_PROCESS(regs)) |
433 | child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */ | 433 | child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */ |
434 | if (user_stack_base) { | 434 | if (user_stack_base) { |
435 | child_ptregs->r12 = user_stack_base + user_stack_size - 16; | 435 | child_ptregs->r12 = user_stack_base + user_stack_size - 16; |
436 | child_ptregs->ar_bspstore = user_stack_base; | 436 | child_ptregs->ar_bspstore = user_stack_base; |
437 | child_ptregs->ar_rnat = 0; | 437 | child_ptregs->ar_rnat = 0; |
438 | child_ptregs->loadrs = 0; | 438 | child_ptregs->loadrs = 0; |
439 | } | 439 | } |
440 | } else { | 440 | } else { |
441 | /* | 441 | /* |
442 | * Note: we simply preserve the relative position of | 442 | * Note: we simply preserve the relative position of |
443 | * the stack pointer here. There is no need to | 443 | * the stack pointer here. There is no need to |
444 | * allocate a scratch area here, since that will have | 444 | * allocate a scratch area here, since that will have |
445 | * been taken care of by the caller of sys_clone() | 445 | * been taken care of by the caller of sys_clone() |
446 | * already. | 446 | * already. |
447 | */ | 447 | */ |
448 | child_ptregs->r12 = (unsigned long) child_ptregs - 16; /* kernel sp */ | 448 | child_ptregs->r12 = (unsigned long) child_ptregs - 16; /* kernel sp */ |
449 | child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */ | 449 | child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */ |
450 | } | 450 | } |
451 | child_stack->ar_bspstore = child_rbs + rbs_size; | 451 | child_stack->ar_bspstore = child_rbs + rbs_size; |
452 | if (IS_IA32_PROCESS(regs)) | 452 | if (IS_IA32_PROCESS(regs)) |
453 | child_stack->b0 = (unsigned long) &ia32_ret_from_clone; | 453 | child_stack->b0 = (unsigned long) &ia32_ret_from_clone; |
454 | else | 454 | else |
455 | child_stack->b0 = (unsigned long) &ia64_ret_from_clone; | 455 | child_stack->b0 = (unsigned long) &ia64_ret_from_clone; |
456 | 456 | ||
457 | /* copy parts of thread_struct: */ | 457 | /* copy parts of thread_struct: */ |
458 | p->thread.ksp = (unsigned long) child_stack - 16; | 458 | p->thread.ksp = (unsigned long) child_stack - 16; |
459 | 459 | ||
460 | /* stop some PSR bits from being inherited. | 460 | /* stop some PSR bits from being inherited. |
461 | * the psr.up/psr.pp bits must be cleared on fork but inherited on execve() | 461 | * the psr.up/psr.pp bits must be cleared on fork but inherited on execve() |
462 | * therefore we must specify them explicitly here and not include them in | 462 | * therefore we must specify them explicitly here and not include them in |
463 | * IA64_PSR_BITS_TO_CLEAR. | 463 | * IA64_PSR_BITS_TO_CLEAR. |
464 | */ | 464 | */ |
465 | child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET) | 465 | child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET) |
466 | & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP)); | 466 | & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP)); |
467 | 467 | ||
468 | /* | 468 | /* |
469 | * NOTE: The calling convention considers all floating point | 469 | * NOTE: The calling convention considers all floating point |
470 | * registers in the high partition (fph) to be scratch. Since | 470 | * registers in the high partition (fph) to be scratch. Since |
471 | * the only way to get to this point is through a system call, | 471 | * the only way to get to this point is through a system call, |
472 | * we know that the values in fph are all dead. Hence, there | 472 | * we know that the values in fph are all dead. Hence, there |
473 | * is no need to inherit the fph state from the parent to the | 473 | * is no need to inherit the fph state from the parent to the |
474 | * child and all we have to do is to make sure that | 474 | * child and all we have to do is to make sure that |
475 | * IA64_THREAD_FPH_VALID is cleared in the child. | 475 | * IA64_THREAD_FPH_VALID is cleared in the child. |
476 | * | 476 | * |
477 | * XXX We could push this optimization a bit further by | 477 | * XXX We could push this optimization a bit further by |
478 | * clearing IA64_THREAD_FPH_VALID on ANY system call. | 478 | * clearing IA64_THREAD_FPH_VALID on ANY system call. |
479 | * However, it's not clear this is worth doing. Also, it | 479 | * However, it's not clear this is worth doing. Also, it |
480 | * would be a slight deviation from the normal Linux system | 480 | * would be a slight deviation from the normal Linux system |
481 | * call behavior where scratch registers are preserved across | 481 | * call behavior where scratch registers are preserved across |
482 | * system calls (unless used by the system call itself). | 482 | * system calls (unless used by the system call itself). |
483 | */ | 483 | */ |
484 | # define THREAD_FLAGS_TO_CLEAR (IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID \ | 484 | # define THREAD_FLAGS_TO_CLEAR (IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID \ |
485 | | IA64_THREAD_PM_VALID) | 485 | | IA64_THREAD_PM_VALID) |
486 | # define THREAD_FLAGS_TO_SET 0 | 486 | # define THREAD_FLAGS_TO_SET 0 |
487 | p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR) | 487 | p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR) |
488 | | THREAD_FLAGS_TO_SET); | 488 | | THREAD_FLAGS_TO_SET); |
489 | ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */ | 489 | ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */ |
490 | #ifdef CONFIG_IA32_SUPPORT | 490 | #ifdef CONFIG_IA32_SUPPORT |
491 | /* | 491 | /* |
492 | * If we're cloning an IA32 task then save the IA32 extra | 492 | * If we're cloning an IA32 task then save the IA32 extra |
493 | * state from the current task to the new task | 493 | * state from the current task to the new task |
494 | */ | 494 | */ |
495 | if (IS_IA32_PROCESS(task_pt_regs(current))) { | 495 | if (IS_IA32_PROCESS(task_pt_regs(current))) { |
496 | ia32_save_state(p); | 496 | ia32_save_state(p); |
497 | if (clone_flags & CLONE_SETTLS) | 497 | if (clone_flags & CLONE_SETTLS) |
498 | retval = ia32_clone_tls(p, child_ptregs); | 498 | retval = ia32_clone_tls(p, child_ptregs); |
499 | 499 | ||
500 | /* Copy partially mapped page list */ | 500 | /* Copy partially mapped page list */ |
501 | if (!retval) | 501 | if (!retval) |
502 | retval = ia32_copy_partial_page_list(p, clone_flags); | 502 | retval = ia32_copy_ia64_partial_page_list(p, |
503 | clone_flags); | ||
503 | } | 504 | } |
504 | #endif | 505 | #endif |
505 | 506 | ||
506 | #ifdef CONFIG_PERFMON | 507 | #ifdef CONFIG_PERFMON |
507 | if (current->thread.pfm_context) | 508 | if (current->thread.pfm_context) |
508 | pfm_inherit(p, child_ptregs); | 509 | pfm_inherit(p, child_ptregs); |
509 | #endif | 510 | #endif |
510 | return retval; | 511 | return retval; |
511 | } | 512 | } |
512 | 513 | ||
513 | static void | 514 | static void |
514 | do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg) | 515 | do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg) |
515 | { | 516 | { |
516 | unsigned long mask, sp, nat_bits = 0, ar_rnat, urbs_end, cfm; | 517 | unsigned long mask, sp, nat_bits = 0, ar_rnat, urbs_end, cfm; |
517 | unsigned long uninitialized_var(ip); /* GCC be quiet */ | 518 | unsigned long uninitialized_var(ip); /* GCC be quiet */ |
518 | elf_greg_t *dst = arg; | 519 | elf_greg_t *dst = arg; |
519 | struct pt_regs *pt; | 520 | struct pt_regs *pt; |
520 | char nat; | 521 | char nat; |
521 | int i; | 522 | int i; |
522 | 523 | ||
523 | memset(dst, 0, sizeof(elf_gregset_t)); /* don't leak any kernel bits to user-level */ | 524 | memset(dst, 0, sizeof(elf_gregset_t)); /* don't leak any kernel bits to user-level */ |
524 | 525 | ||
525 | if (unw_unwind_to_user(info) < 0) | 526 | if (unw_unwind_to_user(info) < 0) |
526 | return; | 527 | return; |
527 | 528 | ||
528 | unw_get_sp(info, &sp); | 529 | unw_get_sp(info, &sp); |
529 | pt = (struct pt_regs *) (sp + 16); | 530 | pt = (struct pt_regs *) (sp + 16); |
530 | 531 | ||
531 | urbs_end = ia64_get_user_rbs_end(task, pt, &cfm); | 532 | urbs_end = ia64_get_user_rbs_end(task, pt, &cfm); |
532 | 533 | ||
533 | if (ia64_sync_user_rbs(task, info->sw, pt->ar_bspstore, urbs_end) < 0) | 534 | if (ia64_sync_user_rbs(task, info->sw, pt->ar_bspstore, urbs_end) < 0) |
534 | return; | 535 | return; |
535 | 536 | ||
536 | ia64_peek(task, info->sw, urbs_end, (long) ia64_rse_rnat_addr((long *) urbs_end), | 537 | ia64_peek(task, info->sw, urbs_end, (long) ia64_rse_rnat_addr((long *) urbs_end), |
537 | &ar_rnat); | 538 | &ar_rnat); |
538 | 539 | ||
539 | /* | 540 | /* |
540 | * coredump format: | 541 | * coredump format: |
541 | * r0-r31 | 542 | * r0-r31 |
542 | * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT) | 543 | * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT) |
543 | * predicate registers (p0-p63) | 544 | * predicate registers (p0-p63) |
544 | * b0-b7 | 545 | * b0-b7 |
545 | * ip cfm user-mask | 546 | * ip cfm user-mask |
546 | * ar.rsc ar.bsp ar.bspstore ar.rnat | 547 | * ar.rsc ar.bsp ar.bspstore ar.rnat |
547 | * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec | 548 | * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec |
548 | */ | 549 | */ |
549 | 550 | ||
550 | /* r0 is zero */ | 551 | /* r0 is zero */ |
551 | for (i = 1, mask = (1UL << i); i < 32; ++i) { | 552 | for (i = 1, mask = (1UL << i); i < 32; ++i) { |
552 | unw_get_gr(info, i, &dst[i], &nat); | 553 | unw_get_gr(info, i, &dst[i], &nat); |
553 | if (nat) | 554 | if (nat) |
554 | nat_bits |= mask; | 555 | nat_bits |= mask; |
555 | mask <<= 1; | 556 | mask <<= 1; |
556 | } | 557 | } |
557 | dst[32] = nat_bits; | 558 | dst[32] = nat_bits; |
558 | unw_get_pr(info, &dst[33]); | 559 | unw_get_pr(info, &dst[33]); |
559 | 560 | ||
560 | for (i = 0; i < 8; ++i) | 561 | for (i = 0; i < 8; ++i) |
561 | unw_get_br(info, i, &dst[34 + i]); | 562 | unw_get_br(info, i, &dst[34 + i]); |
562 | 563 | ||
563 | unw_get_rp(info, &ip); | 564 | unw_get_rp(info, &ip); |
564 | dst[42] = ip + ia64_psr(pt)->ri; | 565 | dst[42] = ip + ia64_psr(pt)->ri; |
565 | dst[43] = cfm; | 566 | dst[43] = cfm; |
566 | dst[44] = pt->cr_ipsr & IA64_PSR_UM; | 567 | dst[44] = pt->cr_ipsr & IA64_PSR_UM; |
567 | 568 | ||
568 | unw_get_ar(info, UNW_AR_RSC, &dst[45]); | 569 | unw_get_ar(info, UNW_AR_RSC, &dst[45]); |
569 | /* | 570 | /* |
570 | * For bsp and bspstore, unw_get_ar() would return the kernel | 571 | * For bsp and bspstore, unw_get_ar() would return the kernel |
571 | * addresses, but we need the user-level addresses instead: | 572 | * addresses, but we need the user-level addresses instead: |
572 | */ | 573 | */ |
573 | dst[46] = urbs_end; /* note: by convention PT_AR_BSP points to the end of the urbs! */ | 574 | dst[46] = urbs_end; /* note: by convention PT_AR_BSP points to the end of the urbs! */ |
574 | dst[47] = pt->ar_bspstore; | 575 | dst[47] = pt->ar_bspstore; |
575 | dst[48] = ar_rnat; | 576 | dst[48] = ar_rnat; |
576 | unw_get_ar(info, UNW_AR_CCV, &dst[49]); | 577 | unw_get_ar(info, UNW_AR_CCV, &dst[49]); |
577 | unw_get_ar(info, UNW_AR_UNAT, &dst[50]); | 578 | unw_get_ar(info, UNW_AR_UNAT, &dst[50]); |
578 | unw_get_ar(info, UNW_AR_FPSR, &dst[51]); | 579 | unw_get_ar(info, UNW_AR_FPSR, &dst[51]); |
579 | dst[52] = pt->ar_pfs; /* UNW_AR_PFS is == to pt->cr_ifs for interrupt frames */ | 580 | dst[52] = pt->ar_pfs; /* UNW_AR_PFS is == to pt->cr_ifs for interrupt frames */ |
580 | unw_get_ar(info, UNW_AR_LC, &dst[53]); | 581 | unw_get_ar(info, UNW_AR_LC, &dst[53]); |
581 | unw_get_ar(info, UNW_AR_EC, &dst[54]); | 582 | unw_get_ar(info, UNW_AR_EC, &dst[54]); |
582 | unw_get_ar(info, UNW_AR_CSD, &dst[55]); | 583 | unw_get_ar(info, UNW_AR_CSD, &dst[55]); |
583 | unw_get_ar(info, UNW_AR_SSD, &dst[56]); | 584 | unw_get_ar(info, UNW_AR_SSD, &dst[56]); |
584 | } | 585 | } |
585 | 586 | ||
586 | void | 587 | void |
587 | do_dump_task_fpu (struct task_struct *task, struct unw_frame_info *info, void *arg) | 588 | do_dump_task_fpu (struct task_struct *task, struct unw_frame_info *info, void *arg) |
588 | { | 589 | { |
589 | elf_fpreg_t *dst = arg; | 590 | elf_fpreg_t *dst = arg; |
590 | int i; | 591 | int i; |
591 | 592 | ||
592 | memset(dst, 0, sizeof(elf_fpregset_t)); /* don't leak any "random" bits */ | 593 | memset(dst, 0, sizeof(elf_fpregset_t)); /* don't leak any "random" bits */ |
593 | 594 | ||
594 | if (unw_unwind_to_user(info) < 0) | 595 | if (unw_unwind_to_user(info) < 0) |
595 | return; | 596 | return; |
596 | 597 | ||
597 | /* f0 is 0.0, f1 is 1.0 */ | 598 | /* f0 is 0.0, f1 is 1.0 */ |
598 | 599 | ||
599 | for (i = 2; i < 32; ++i) | 600 | for (i = 2; i < 32; ++i) |
600 | unw_get_fr(info, i, dst + i); | 601 | unw_get_fr(info, i, dst + i); |
601 | 602 | ||
602 | ia64_flush_fph(task); | 603 | ia64_flush_fph(task); |
603 | if ((task->thread.flags & IA64_THREAD_FPH_VALID) != 0) | 604 | if ((task->thread.flags & IA64_THREAD_FPH_VALID) != 0) |
604 | memcpy(dst + 32, task->thread.fph, 96*16); | 605 | memcpy(dst + 32, task->thread.fph, 96*16); |
605 | } | 606 | } |
606 | 607 | ||
607 | void | 608 | void |
608 | do_copy_regs (struct unw_frame_info *info, void *arg) | 609 | do_copy_regs (struct unw_frame_info *info, void *arg) |
609 | { | 610 | { |
610 | do_copy_task_regs(current, info, arg); | 611 | do_copy_task_regs(current, info, arg); |
611 | } | 612 | } |
612 | 613 | ||
613 | void | 614 | void |
614 | do_dump_fpu (struct unw_frame_info *info, void *arg) | 615 | do_dump_fpu (struct unw_frame_info *info, void *arg) |
615 | { | 616 | { |
616 | do_dump_task_fpu(current, info, arg); | 617 | do_dump_task_fpu(current, info, arg); |
617 | } | 618 | } |
618 | 619 | ||
619 | int | 620 | int |
620 | dump_task_regs(struct task_struct *task, elf_gregset_t *regs) | 621 | dump_task_regs(struct task_struct *task, elf_gregset_t *regs) |
621 | { | 622 | { |
622 | struct unw_frame_info tcore_info; | 623 | struct unw_frame_info tcore_info; |
623 | 624 | ||
624 | if (current == task) { | 625 | if (current == task) { |
625 | unw_init_running(do_copy_regs, regs); | 626 | unw_init_running(do_copy_regs, regs); |
626 | } else { | 627 | } else { |
627 | memset(&tcore_info, 0, sizeof(tcore_info)); | 628 | memset(&tcore_info, 0, sizeof(tcore_info)); |
628 | unw_init_from_blocked_task(&tcore_info, task); | 629 | unw_init_from_blocked_task(&tcore_info, task); |
629 | do_copy_task_regs(task, &tcore_info, regs); | 630 | do_copy_task_regs(task, &tcore_info, regs); |
630 | } | 631 | } |
631 | return 1; | 632 | return 1; |
632 | } | 633 | } |
633 | 634 | ||
634 | void | 635 | void |
635 | ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst) | 636 | ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst) |
636 | { | 637 | { |
637 | unw_init_running(do_copy_regs, dst); | 638 | unw_init_running(do_copy_regs, dst); |
638 | } | 639 | } |
639 | 640 | ||
640 | int | 641 | int |
641 | dump_task_fpu (struct task_struct *task, elf_fpregset_t *dst) | 642 | dump_task_fpu (struct task_struct *task, elf_fpregset_t *dst) |
642 | { | 643 | { |
643 | struct unw_frame_info tcore_info; | 644 | struct unw_frame_info tcore_info; |
644 | 645 | ||
645 | if (current == task) { | 646 | if (current == task) { |
646 | unw_init_running(do_dump_fpu, dst); | 647 | unw_init_running(do_dump_fpu, dst); |
647 | } else { | 648 | } else { |
648 | memset(&tcore_info, 0, sizeof(tcore_info)); | 649 | memset(&tcore_info, 0, sizeof(tcore_info)); |
649 | unw_init_from_blocked_task(&tcore_info, task); | 650 | unw_init_from_blocked_task(&tcore_info, task); |
650 | do_dump_task_fpu(task, &tcore_info, dst); | 651 | do_dump_task_fpu(task, &tcore_info, dst); |
651 | } | 652 | } |
652 | return 1; | 653 | return 1; |
653 | } | 654 | } |
654 | 655 | ||
655 | int | 656 | int |
656 | dump_fpu (struct pt_regs *pt, elf_fpregset_t dst) | 657 | dump_fpu (struct pt_regs *pt, elf_fpregset_t dst) |
657 | { | 658 | { |
658 | unw_init_running(do_dump_fpu, dst); | 659 | unw_init_running(do_dump_fpu, dst); |
659 | return 1; /* f0-f31 are always valid so we always return 1 */ | 660 | return 1; /* f0-f31 are always valid so we always return 1 */ |
660 | } | 661 | } |
661 | 662 | ||
662 | long | 663 | long |
663 | sys_execve (char __user *filename, char __user * __user *argv, char __user * __user *envp, | 664 | sys_execve (char __user *filename, char __user * __user *argv, char __user * __user *envp, |
664 | struct pt_regs *regs) | 665 | struct pt_regs *regs) |
665 | { | 666 | { |
666 | char *fname; | 667 | char *fname; |
667 | int error; | 668 | int error; |
668 | 669 | ||
669 | fname = getname(filename); | 670 | fname = getname(filename); |
670 | error = PTR_ERR(fname); | 671 | error = PTR_ERR(fname); |
671 | if (IS_ERR(fname)) | 672 | if (IS_ERR(fname)) |
672 | goto out; | 673 | goto out; |
673 | error = do_execve(fname, argv, envp, regs); | 674 | error = do_execve(fname, argv, envp, regs); |
674 | putname(fname); | 675 | putname(fname); |
675 | out: | 676 | out: |
676 | return error; | 677 | return error; |
677 | } | 678 | } |
678 | 679 | ||
679 | pid_t | 680 | pid_t |
680 | kernel_thread (int (*fn)(void *), void *arg, unsigned long flags) | 681 | kernel_thread (int (*fn)(void *), void *arg, unsigned long flags) |
681 | { | 682 | { |
682 | extern void start_kernel_thread (void); | 683 | extern void start_kernel_thread (void); |
683 | unsigned long *helper_fptr = (unsigned long *) &start_kernel_thread; | 684 | unsigned long *helper_fptr = (unsigned long *) &start_kernel_thread; |
684 | struct { | 685 | struct { |
685 | struct switch_stack sw; | 686 | struct switch_stack sw; |
686 | struct pt_regs pt; | 687 | struct pt_regs pt; |
687 | } regs; | 688 | } regs; |
688 | 689 | ||
689 | memset(®s, 0, sizeof(regs)); | 690 | memset(®s, 0, sizeof(regs)); |
690 | regs.pt.cr_iip = helper_fptr[0]; /* set entry point (IP) */ | 691 | regs.pt.cr_iip = helper_fptr[0]; /* set entry point (IP) */ |
691 | regs.pt.r1 = helper_fptr[1]; /* set GP */ | 692 | regs.pt.r1 = helper_fptr[1]; /* set GP */ |
692 | regs.pt.r9 = (unsigned long) fn; /* 1st argument */ | 693 | regs.pt.r9 = (unsigned long) fn; /* 1st argument */ |
693 | regs.pt.r11 = (unsigned long) arg; /* 2nd argument */ | 694 | regs.pt.r11 = (unsigned long) arg; /* 2nd argument */ |
694 | /* Preserve PSR bits, except for bits 32-34 and 37-45, which we can't read. */ | 695 | /* Preserve PSR bits, except for bits 32-34 and 37-45, which we can't read. */ |
695 | regs.pt.cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN; | 696 | regs.pt.cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN; |
696 | regs.pt.cr_ifs = 1UL << 63; /* mark as valid, empty frame */ | 697 | regs.pt.cr_ifs = 1UL << 63; /* mark as valid, empty frame */ |
697 | regs.sw.ar_fpsr = regs.pt.ar_fpsr = ia64_getreg(_IA64_REG_AR_FPSR); | 698 | regs.sw.ar_fpsr = regs.pt.ar_fpsr = ia64_getreg(_IA64_REG_AR_FPSR); |
698 | regs.sw.ar_bspstore = (unsigned long) current + IA64_RBS_OFFSET; | 699 | regs.sw.ar_bspstore = (unsigned long) current + IA64_RBS_OFFSET; |
699 | regs.sw.pr = (1 << PRED_KERNEL_STACK); | 700 | regs.sw.pr = (1 << PRED_KERNEL_STACK); |
700 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s.pt, 0, NULL, NULL); | 701 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s.pt, 0, NULL, NULL); |
701 | } | 702 | } |
702 | EXPORT_SYMBOL(kernel_thread); | 703 | EXPORT_SYMBOL(kernel_thread); |
703 | 704 | ||
704 | /* This gets called from kernel_thread() via ia64_invoke_thread_helper(). */ | 705 | /* This gets called from kernel_thread() via ia64_invoke_thread_helper(). */ |
705 | int | 706 | int |
706 | kernel_thread_helper (int (*fn)(void *), void *arg) | 707 | kernel_thread_helper (int (*fn)(void *), void *arg) |
707 | { | 708 | { |
708 | #ifdef CONFIG_IA32_SUPPORT | 709 | #ifdef CONFIG_IA32_SUPPORT |
709 | if (IS_IA32_PROCESS(task_pt_regs(current))) { | 710 | if (IS_IA32_PROCESS(task_pt_regs(current))) { |
710 | /* A kernel thread is always a 64-bit process. */ | 711 | /* A kernel thread is always a 64-bit process. */ |
711 | current->thread.map_base = DEFAULT_MAP_BASE; | 712 | current->thread.map_base = DEFAULT_MAP_BASE; |
712 | current->thread.task_size = DEFAULT_TASK_SIZE; | 713 | current->thread.task_size = DEFAULT_TASK_SIZE; |
713 | ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob); | 714 | ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob); |
714 | ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1); | 715 | ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1); |
715 | } | 716 | } |
716 | #endif | 717 | #endif |
717 | return (*fn)(arg); | 718 | return (*fn)(arg); |
718 | } | 719 | } |
719 | 720 | ||
720 | /* | 721 | /* |
721 | * Flush thread state. This is called when a thread does an execve(). | 722 | * Flush thread state. This is called when a thread does an execve(). |
722 | */ | 723 | */ |
723 | void | 724 | void |
724 | flush_thread (void) | 725 | flush_thread (void) |
725 | { | 726 | { |
726 | /* drop floating-point and debug-register state if it exists: */ | 727 | /* drop floating-point and debug-register state if it exists: */ |
727 | current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); | 728 | current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); |
728 | ia64_drop_fpu(current); | 729 | ia64_drop_fpu(current); |
729 | #ifdef CONFIG_IA32_SUPPORT | 730 | #ifdef CONFIG_IA32_SUPPORT |
730 | if (IS_IA32_PROCESS(task_pt_regs(current))) { | 731 | if (IS_IA32_PROCESS(task_pt_regs(current))) { |
731 | ia32_drop_partial_page_list(current); | 732 | ia32_drop_ia64_partial_page_list(current); |
732 | current->thread.task_size = IA32_PAGE_OFFSET; | 733 | current->thread.task_size = IA32_PAGE_OFFSET; |
733 | set_fs(USER_DS); | 734 | set_fs(USER_DS); |
734 | } | 735 | } |
735 | #endif | 736 | #endif |
736 | } | 737 | } |
737 | 738 | ||
738 | /* | 739 | /* |
739 | * Clean up state associated with current thread. This is called when | 740 | * Clean up state associated with current thread. This is called when |
740 | * the thread calls exit(). | 741 | * the thread calls exit(). |
741 | */ | 742 | */ |
742 | void | 743 | void |
743 | exit_thread (void) | 744 | exit_thread (void) |
744 | { | 745 | { |
745 | 746 | ||
746 | ia64_drop_fpu(current); | 747 | ia64_drop_fpu(current); |
747 | #ifdef CONFIG_PERFMON | 748 | #ifdef CONFIG_PERFMON |
748 | /* if needed, stop monitoring and flush state to perfmon context */ | 749 | /* if needed, stop monitoring and flush state to perfmon context */ |
749 | if (current->thread.pfm_context) | 750 | if (current->thread.pfm_context) |
750 | pfm_exit_thread(current); | 751 | pfm_exit_thread(current); |
751 | 752 | ||
752 | /* free debug register resources */ | 753 | /* free debug register resources */ |
753 | if (current->thread.flags & IA64_THREAD_DBG_VALID) | 754 | if (current->thread.flags & IA64_THREAD_DBG_VALID) |
754 | pfm_release_debug_registers(current); | 755 | pfm_release_debug_registers(current); |
755 | #endif | 756 | #endif |
756 | if (IS_IA32_PROCESS(task_pt_regs(current))) | 757 | if (IS_IA32_PROCESS(task_pt_regs(current))) |
757 | ia32_drop_partial_page_list(current); | 758 | ia32_drop_ia64_partial_page_list(current); |
758 | } | 759 | } |
759 | 760 | ||
760 | unsigned long | 761 | unsigned long |
761 | get_wchan (struct task_struct *p) | 762 | get_wchan (struct task_struct *p) |
762 | { | 763 | { |
763 | struct unw_frame_info info; | 764 | struct unw_frame_info info; |
764 | unsigned long ip; | 765 | unsigned long ip; |
765 | int count = 0; | 766 | int count = 0; |
766 | 767 | ||
767 | if (!p || p == current || p->state == TASK_RUNNING) | 768 | if (!p || p == current || p->state == TASK_RUNNING) |
768 | return 0; | 769 | return 0; |
769 | 770 | ||
770 | /* | 771 | /* |
771 | * Note: p may not be a blocked task (it could be current or | 772 | * Note: p may not be a blocked task (it could be current or |
772 | * another process running on some other CPU. Rather than | 773 | * another process running on some other CPU. Rather than |
773 | * trying to determine if p is really blocked, we just assume | 774 | * trying to determine if p is really blocked, we just assume |
774 | * it's blocked and rely on the unwind routines to fail | 775 | * it's blocked and rely on the unwind routines to fail |
775 | * gracefully if the process wasn't really blocked after all. | 776 | * gracefully if the process wasn't really blocked after all. |
776 | * --davidm 99/12/15 | 777 | * --davidm 99/12/15 |
777 | */ | 778 | */ |
778 | unw_init_from_blocked_task(&info, p); | 779 | unw_init_from_blocked_task(&info, p); |
779 | do { | 780 | do { |
780 | if (p->state == TASK_RUNNING) | 781 | if (p->state == TASK_RUNNING) |
781 | return 0; | 782 | return 0; |
782 | if (unw_unwind(&info) < 0) | 783 | if (unw_unwind(&info) < 0) |
783 | return 0; | 784 | return 0; |
784 | unw_get_ip(&info, &ip); | 785 | unw_get_ip(&info, &ip); |
785 | if (!in_sched_functions(ip)) | 786 | if (!in_sched_functions(ip)) |
786 | return ip; | 787 | return ip; |
787 | } while (count++ < 16); | 788 | } while (count++ < 16); |
788 | return 0; | 789 | return 0; |
789 | } | 790 | } |
790 | 791 | ||
791 | void | 792 | void |
792 | cpu_halt (void) | 793 | cpu_halt (void) |
793 | { | 794 | { |
794 | pal_power_mgmt_info_u_t power_info[8]; | 795 | pal_power_mgmt_info_u_t power_info[8]; |
795 | unsigned long min_power; | 796 | unsigned long min_power; |
796 | int i, min_power_state; | 797 | int i, min_power_state; |
797 | 798 | ||
798 | if (ia64_pal_halt_info(power_info) != 0) | 799 | if (ia64_pal_halt_info(power_info) != 0) |
799 | return; | 800 | return; |
800 | 801 | ||
801 | min_power_state = 0; | 802 | min_power_state = 0; |
802 | min_power = power_info[0].pal_power_mgmt_info_s.power_consumption; | 803 | min_power = power_info[0].pal_power_mgmt_info_s.power_consumption; |
803 | for (i = 1; i < 8; ++i) | 804 | for (i = 1; i < 8; ++i) |
804 | if (power_info[i].pal_power_mgmt_info_s.im | 805 | if (power_info[i].pal_power_mgmt_info_s.im |
805 | && power_info[i].pal_power_mgmt_info_s.power_consumption < min_power) { | 806 | && power_info[i].pal_power_mgmt_info_s.power_consumption < min_power) { |
806 | min_power = power_info[i].pal_power_mgmt_info_s.power_consumption; | 807 | min_power = power_info[i].pal_power_mgmt_info_s.power_consumption; |
807 | min_power_state = i; | 808 | min_power_state = i; |
808 | } | 809 | } |
809 | 810 | ||
810 | while (1) | 811 | while (1) |
811 | ia64_pal_halt(min_power_state); | 812 | ia64_pal_halt(min_power_state); |
812 | } | 813 | } |
813 | 814 | ||
814 | void machine_shutdown(void) | 815 | void machine_shutdown(void) |
815 | { | 816 | { |
816 | #ifdef CONFIG_HOTPLUG_CPU | 817 | #ifdef CONFIG_HOTPLUG_CPU |
817 | int cpu; | 818 | int cpu; |
818 | 819 | ||
819 | for_each_online_cpu(cpu) { | 820 | for_each_online_cpu(cpu) { |
820 | if (cpu != smp_processor_id()) | 821 | if (cpu != smp_processor_id()) |
821 | cpu_down(cpu); | 822 | cpu_down(cpu); |
822 | } | 823 | } |
823 | #endif | 824 | #endif |
824 | #ifdef CONFIG_KEXEC | 825 | #ifdef CONFIG_KEXEC |
825 | kexec_disable_iosapic(); | 826 | kexec_disable_iosapic(); |
826 | #endif | 827 | #endif |
827 | } | 828 | } |
828 | 829 | ||
829 | void | 830 | void |
830 | machine_restart (char *restart_cmd) | 831 | machine_restart (char *restart_cmd) |
831 | { | 832 | { |
832 | (void) notify_die(DIE_MACHINE_RESTART, restart_cmd, NULL, 0, 0, 0); | 833 | (void) notify_die(DIE_MACHINE_RESTART, restart_cmd, NULL, 0, 0, 0); |
833 | (*efi.reset_system)(EFI_RESET_WARM, 0, 0, NULL); | 834 | (*efi.reset_system)(EFI_RESET_WARM, 0, 0, NULL); |
834 | } | 835 | } |
835 | 836 | ||
836 | void | 837 | void |
837 | machine_halt (void) | 838 | machine_halt (void) |
838 | { | 839 | { |
839 | (void) notify_die(DIE_MACHINE_HALT, "", NULL, 0, 0, 0); | 840 | (void) notify_die(DIE_MACHINE_HALT, "", NULL, 0, 0, 0); |
840 | cpu_halt(); | 841 | cpu_halt(); |
841 | } | 842 | } |
842 | 843 | ||
843 | void | 844 | void |
844 | machine_power_off (void) | 845 | machine_power_off (void) |
845 | { | 846 | { |
846 | if (pm_power_off) | 847 | if (pm_power_off) |
847 | pm_power_off(); | 848 | pm_power_off(); |
848 | machine_halt(); | 849 | machine_halt(); |
849 | } | 850 | } |
850 | 851 | ||
851 | 852 |
include/asm-ia64/ia32.h
1 | #ifndef _ASM_IA64_IA32_H | 1 | #ifndef _ASM_IA64_IA32_H |
2 | #define _ASM_IA64_IA32_H | 2 | #define _ASM_IA64_IA32_H |
3 | 3 | ||
4 | 4 | ||
5 | #include <asm/ptrace.h> | 5 | #include <asm/ptrace.h> |
6 | #include <asm/signal.h> | 6 | #include <asm/signal.h> |
7 | 7 | ||
8 | #define IA32_NR_syscalls 285 /* length of syscall table */ | 8 | #define IA32_NR_syscalls 285 /* length of syscall table */ |
9 | #define IA32_PAGE_SHIFT 12 /* 4KB pages */ | 9 | #define IA32_PAGE_SHIFT 12 /* 4KB pages */ |
10 | 10 | ||
11 | #ifndef __ASSEMBLY__ | 11 | #ifndef __ASSEMBLY__ |
12 | 12 | ||
13 | # ifdef CONFIG_IA32_SUPPORT | 13 | # ifdef CONFIG_IA32_SUPPORT |
14 | 14 | ||
15 | #define IA32_PAGE_OFFSET 0xc0000000 | 15 | #define IA32_PAGE_OFFSET 0xc0000000 |
16 | 16 | ||
17 | extern void ia32_cpu_init (void); | 17 | extern void ia32_cpu_init (void); |
18 | extern void ia32_mem_init (void); | 18 | extern void ia32_mem_init (void); |
19 | extern void ia32_gdt_init (void); | 19 | extern void ia32_gdt_init (void); |
20 | extern int ia32_exception (struct pt_regs *regs, unsigned long isr); | 20 | extern int ia32_exception (struct pt_regs *regs, unsigned long isr); |
21 | extern int ia32_intercept (struct pt_regs *regs, unsigned long isr); | 21 | extern int ia32_intercept (struct pt_regs *regs, unsigned long isr); |
22 | extern int ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs); | 22 | extern int ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs); |
23 | 23 | ||
24 | # endif /* !CONFIG_IA32_SUPPORT */ | 24 | # endif /* !CONFIG_IA32_SUPPORT */ |
25 | 25 | ||
26 | /* Declare this unconditionally, so we don't get warnings for unreachable code. */ | 26 | /* Declare this unconditionally, so we don't get warnings for unreachable code. */ |
27 | extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info, | 27 | extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info, |
28 | sigset_t *set, struct pt_regs *regs); | 28 | sigset_t *set, struct pt_regs *regs); |
29 | #if PAGE_SHIFT > IA32_PAGE_SHIFT | 29 | #if PAGE_SHIFT > IA32_PAGE_SHIFT |
30 | extern int ia32_copy_partial_page_list (struct task_struct *, unsigned long); | 30 | extern int ia32_copy_ia64_partial_page_list(struct task_struct *, |
31 | extern void ia32_drop_partial_page_list (struct task_struct *); | 31 | unsigned long); |
32 | extern void ia32_drop_ia64_partial_page_list(struct task_struct *); | ||
32 | #else | 33 | #else |
33 | # define ia32_copy_partial_page_list(a1, a2) 0 | 34 | # define ia32_copy_ia64_partial_page_list(a1, a2) 0 |
34 | # define ia32_drop_partial_page_list(a1) do { ; } while (0) | 35 | # define ia32_drop_ia64_partial_page_list(a1) do { ; } while (0) |
35 | #endif | 36 | #endif |
36 | 37 | ||
37 | #endif /* !__ASSEMBLY__ */ | 38 | #endif /* !__ASSEMBLY__ */ |
38 | 39 | ||
39 | #endif /* _ASM_IA64_IA32_H */ | 40 | #endif /* _ASM_IA64_IA32_H */ |
40 | 41 |
include/asm-ia64/processor.h
1 | #ifndef _ASM_IA64_PROCESSOR_H | 1 | #ifndef _ASM_IA64_PROCESSOR_H |
2 | #define _ASM_IA64_PROCESSOR_H | 2 | #define _ASM_IA64_PROCESSOR_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Copyright (C) 1998-2004 Hewlett-Packard Co | 5 | * Copyright (C) 1998-2004 Hewlett-Packard Co |
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | 6 | * David Mosberger-Tang <davidm@hpl.hp.com> |
7 | * Stephane Eranian <eranian@hpl.hp.com> | 7 | * Stephane Eranian <eranian@hpl.hp.com> |
8 | * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> | 8 | * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> |
9 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> | 9 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> |
10 | * | 10 | * |
11 | * 11/24/98 S.Eranian added ia64_set_iva() | 11 | * 11/24/98 S.Eranian added ia64_set_iva() |
12 | * 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API | 12 | * 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API |
13 | * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support | 13 | * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support |
14 | */ | 14 | */ |
15 | 15 | ||
16 | 16 | ||
17 | #include <asm/intrinsics.h> | 17 | #include <asm/intrinsics.h> |
18 | #include <asm/kregs.h> | 18 | #include <asm/kregs.h> |
19 | #include <asm/ptrace.h> | 19 | #include <asm/ptrace.h> |
20 | #include <asm/ustack.h> | 20 | #include <asm/ustack.h> |
21 | 21 | ||
22 | #define IA64_NUM_PHYS_STACK_REG 96 | 22 | #define IA64_NUM_PHYS_STACK_REG 96 |
23 | #define IA64_NUM_DBG_REGS 8 | 23 | #define IA64_NUM_DBG_REGS 8 |
24 | 24 | ||
25 | #define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000) | 25 | #define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000) |
26 | #define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000) | 26 | #define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000) |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * TASK_SIZE really is a mis-named. It really is the maximum user | 29 | * TASK_SIZE really is a mis-named. It really is the maximum user |
30 | * space address (plus one). On IA-64, there are five regions of 2TB | 30 | * space address (plus one). On IA-64, there are five regions of 2TB |
31 | * each (assuming 8KB page size), for a total of 8TB of user virtual | 31 | * each (assuming 8KB page size), for a total of 8TB of user virtual |
32 | * address space. | 32 | * address space. |
33 | */ | 33 | */ |
34 | #define TASK_SIZE (current->thread.task_size) | 34 | #define TASK_SIZE (current->thread.task_size) |
35 | 35 | ||
36 | /* | 36 | /* |
37 | * This decides where the kernel will search for a free chunk of vm | 37 | * This decides where the kernel will search for a free chunk of vm |
38 | * space during mmap's. | 38 | * space during mmap's. |
39 | */ | 39 | */ |
40 | #define TASK_UNMAPPED_BASE (current->thread.map_base) | 40 | #define TASK_UNMAPPED_BASE (current->thread.map_base) |
41 | 41 | ||
42 | #define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */ | 42 | #define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */ |
43 | #define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */ | 43 | #define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */ |
44 | #define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */ | 44 | #define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */ |
45 | #define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */ | 45 | #define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */ |
46 | #define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */ | 46 | #define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */ |
47 | #define IA64_THREAD_MIGRATION (__IA64_UL(1) << 5) /* require migration | 47 | #define IA64_THREAD_MIGRATION (__IA64_UL(1) << 5) /* require migration |
48 | sync at ctx sw */ | 48 | sync at ctx sw */ |
49 | #define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */ | 49 | #define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */ |
50 | #define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */ | 50 | #define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */ |
51 | 51 | ||
52 | #define IA64_THREAD_UAC_SHIFT 3 | 52 | #define IA64_THREAD_UAC_SHIFT 3 |
53 | #define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS) | 53 | #define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS) |
54 | #define IA64_THREAD_FPEMU_SHIFT 6 | 54 | #define IA64_THREAD_FPEMU_SHIFT 6 |
55 | #define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE) | 55 | #define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE) |
56 | 56 | ||
57 | 57 | ||
58 | /* | 58 | /* |
59 | * This shift should be large enough to be able to represent 1000000000/itc_freq with good | 59 | * This shift should be large enough to be able to represent 1000000000/itc_freq with good |
60 | * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits | 60 | * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits |
61 | * (this will give enough slack to represent 10 seconds worth of time as a scaled number). | 61 | * (this will give enough slack to represent 10 seconds worth of time as a scaled number). |
62 | */ | 62 | */ |
63 | #define IA64_NSEC_PER_CYC_SHIFT 30 | 63 | #define IA64_NSEC_PER_CYC_SHIFT 30 |
64 | 64 | ||
65 | #ifndef __ASSEMBLY__ | 65 | #ifndef __ASSEMBLY__ |
66 | 66 | ||
67 | #include <linux/cache.h> | 67 | #include <linux/cache.h> |
68 | #include <linux/compiler.h> | 68 | #include <linux/compiler.h> |
69 | #include <linux/threads.h> | 69 | #include <linux/threads.h> |
70 | #include <linux/types.h> | 70 | #include <linux/types.h> |
71 | 71 | ||
72 | #include <asm/fpu.h> | 72 | #include <asm/fpu.h> |
73 | #include <asm/page.h> | 73 | #include <asm/page.h> |
74 | #include <asm/percpu.h> | 74 | #include <asm/percpu.h> |
75 | #include <asm/rse.h> | 75 | #include <asm/rse.h> |
76 | #include <asm/unwind.h> | 76 | #include <asm/unwind.h> |
77 | #include <asm/atomic.h> | 77 | #include <asm/atomic.h> |
78 | #ifdef CONFIG_NUMA | 78 | #ifdef CONFIG_NUMA |
79 | #include <asm/nodedata.h> | 79 | #include <asm/nodedata.h> |
80 | #endif | 80 | #endif |
81 | 81 | ||
82 | /* like above but expressed as bitfields for more efficient access: */ | 82 | /* like above but expressed as bitfields for more efficient access: */ |
83 | struct ia64_psr { | 83 | struct ia64_psr { |
84 | __u64 reserved0 : 1; | 84 | __u64 reserved0 : 1; |
85 | __u64 be : 1; | 85 | __u64 be : 1; |
86 | __u64 up : 1; | 86 | __u64 up : 1; |
87 | __u64 ac : 1; | 87 | __u64 ac : 1; |
88 | __u64 mfl : 1; | 88 | __u64 mfl : 1; |
89 | __u64 mfh : 1; | 89 | __u64 mfh : 1; |
90 | __u64 reserved1 : 7; | 90 | __u64 reserved1 : 7; |
91 | __u64 ic : 1; | 91 | __u64 ic : 1; |
92 | __u64 i : 1; | 92 | __u64 i : 1; |
93 | __u64 pk : 1; | 93 | __u64 pk : 1; |
94 | __u64 reserved2 : 1; | 94 | __u64 reserved2 : 1; |
95 | __u64 dt : 1; | 95 | __u64 dt : 1; |
96 | __u64 dfl : 1; | 96 | __u64 dfl : 1; |
97 | __u64 dfh : 1; | 97 | __u64 dfh : 1; |
98 | __u64 sp : 1; | 98 | __u64 sp : 1; |
99 | __u64 pp : 1; | 99 | __u64 pp : 1; |
100 | __u64 di : 1; | 100 | __u64 di : 1; |
101 | __u64 si : 1; | 101 | __u64 si : 1; |
102 | __u64 db : 1; | 102 | __u64 db : 1; |
103 | __u64 lp : 1; | 103 | __u64 lp : 1; |
104 | __u64 tb : 1; | 104 | __u64 tb : 1; |
105 | __u64 rt : 1; | 105 | __u64 rt : 1; |
106 | __u64 reserved3 : 4; | 106 | __u64 reserved3 : 4; |
107 | __u64 cpl : 2; | 107 | __u64 cpl : 2; |
108 | __u64 is : 1; | 108 | __u64 is : 1; |
109 | __u64 mc : 1; | 109 | __u64 mc : 1; |
110 | __u64 it : 1; | 110 | __u64 it : 1; |
111 | __u64 id : 1; | 111 | __u64 id : 1; |
112 | __u64 da : 1; | 112 | __u64 da : 1; |
113 | __u64 dd : 1; | 113 | __u64 dd : 1; |
114 | __u64 ss : 1; | 114 | __u64 ss : 1; |
115 | __u64 ri : 2; | 115 | __u64 ri : 2; |
116 | __u64 ed : 1; | 116 | __u64 ed : 1; |
117 | __u64 bn : 1; | 117 | __u64 bn : 1; |
118 | __u64 reserved4 : 19; | 118 | __u64 reserved4 : 19; |
119 | }; | 119 | }; |
120 | 120 | ||
121 | /* | 121 | /* |
122 | * CPU type, hardware bug flags, and per-CPU state. Frequently used | 122 | * CPU type, hardware bug flags, and per-CPU state. Frequently used |
123 | * state comes earlier: | 123 | * state comes earlier: |
124 | */ | 124 | */ |
125 | struct cpuinfo_ia64 { | 125 | struct cpuinfo_ia64 { |
126 | __u32 softirq_pending; | 126 | __u32 softirq_pending; |
127 | __u64 itm_delta; /* # of clock cycles between clock ticks */ | 127 | __u64 itm_delta; /* # of clock cycles between clock ticks */ |
128 | __u64 itm_next; /* interval timer mask value to use for next clock tick */ | 128 | __u64 itm_next; /* interval timer mask value to use for next clock tick */ |
129 | __u64 nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */ | 129 | __u64 nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */ |
130 | __u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */ | 130 | __u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */ |
131 | __u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */ | 131 | __u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */ |
132 | __u64 itc_freq; /* frequency of ITC counter */ | 132 | __u64 itc_freq; /* frequency of ITC counter */ |
133 | __u64 proc_freq; /* frequency of processor */ | 133 | __u64 proc_freq; /* frequency of processor */ |
134 | __u64 cyc_per_usec; /* itc_freq/1000000 */ | 134 | __u64 cyc_per_usec; /* itc_freq/1000000 */ |
135 | __u64 ptce_base; | 135 | __u64 ptce_base; |
136 | __u32 ptce_count[2]; | 136 | __u32 ptce_count[2]; |
137 | __u32 ptce_stride[2]; | 137 | __u32 ptce_stride[2]; |
138 | struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */ | 138 | struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */ |
139 | 139 | ||
140 | #ifdef CONFIG_SMP | 140 | #ifdef CONFIG_SMP |
141 | __u64 loops_per_jiffy; | 141 | __u64 loops_per_jiffy; |
142 | int cpu; | 142 | int cpu; |
143 | __u32 socket_id; /* physical processor socket id */ | 143 | __u32 socket_id; /* physical processor socket id */ |
144 | __u16 core_id; /* core id */ | 144 | __u16 core_id; /* core id */ |
145 | __u16 thread_id; /* thread id */ | 145 | __u16 thread_id; /* thread id */ |
146 | __u16 num_log; /* Total number of logical processors on | 146 | __u16 num_log; /* Total number of logical processors on |
147 | * this socket that were successfully booted */ | 147 | * this socket that were successfully booted */ |
148 | __u8 cores_per_socket; /* Cores per processor socket */ | 148 | __u8 cores_per_socket; /* Cores per processor socket */ |
149 | __u8 threads_per_core; /* Threads per core */ | 149 | __u8 threads_per_core; /* Threads per core */ |
150 | #endif | 150 | #endif |
151 | 151 | ||
152 | /* CPUID-derived information: */ | 152 | /* CPUID-derived information: */ |
153 | __u64 ppn; | 153 | __u64 ppn; |
154 | __u64 features; | 154 | __u64 features; |
155 | __u8 number; | 155 | __u8 number; |
156 | __u8 revision; | 156 | __u8 revision; |
157 | __u8 model; | 157 | __u8 model; |
158 | __u8 family; | 158 | __u8 family; |
159 | __u8 archrev; | 159 | __u8 archrev; |
160 | char vendor[16]; | 160 | char vendor[16]; |
161 | char *model_name; | 161 | char *model_name; |
162 | 162 | ||
163 | #ifdef CONFIG_NUMA | 163 | #ifdef CONFIG_NUMA |
164 | struct ia64_node_data *node_data; | 164 | struct ia64_node_data *node_data; |
165 | #endif | 165 | #endif |
166 | }; | 166 | }; |
167 | 167 | ||
168 | DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info); | 168 | DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info); |
169 | 169 | ||
170 | /* | 170 | /* |
171 | * The "local" data variable. It refers to the per-CPU data of the currently executing | 171 | * The "local" data variable. It refers to the per-CPU data of the currently executing |
172 | * CPU, much like "current" points to the per-task data of the currently executing task. | 172 | * CPU, much like "current" points to the per-task data of the currently executing task. |
173 | * Do not use the address of local_cpu_data, since it will be different from | 173 | * Do not use the address of local_cpu_data, since it will be different from |
174 | * cpu_data(smp_processor_id())! | 174 | * cpu_data(smp_processor_id())! |
175 | */ | 175 | */ |
176 | #define local_cpu_data (&__ia64_per_cpu_var(cpu_info)) | 176 | #define local_cpu_data (&__ia64_per_cpu_var(cpu_info)) |
177 | #define cpu_data(cpu) (&per_cpu(cpu_info, cpu)) | 177 | #define cpu_data(cpu) (&per_cpu(cpu_info, cpu)) |
178 | 178 | ||
179 | extern void print_cpu_info (struct cpuinfo_ia64 *); | 179 | extern void print_cpu_info (struct cpuinfo_ia64 *); |
180 | 180 | ||
181 | typedef struct { | 181 | typedef struct { |
182 | unsigned long seg; | 182 | unsigned long seg; |
183 | } mm_segment_t; | 183 | } mm_segment_t; |
184 | 184 | ||
185 | #define SET_UNALIGN_CTL(task,value) \ | 185 | #define SET_UNALIGN_CTL(task,value) \ |
186 | ({ \ | 186 | ({ \ |
187 | (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \ | 187 | (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \ |
188 | | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \ | 188 | | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \ |
189 | 0; \ | 189 | 0; \ |
190 | }) | 190 | }) |
191 | #define GET_UNALIGN_CTL(task,addr) \ | 191 | #define GET_UNALIGN_CTL(task,addr) \ |
192 | ({ \ | 192 | ({ \ |
193 | put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \ | 193 | put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \ |
194 | (int __user *) (addr)); \ | 194 | (int __user *) (addr)); \ |
195 | }) | 195 | }) |
196 | 196 | ||
197 | #define SET_FPEMU_CTL(task,value) \ | 197 | #define SET_FPEMU_CTL(task,value) \ |
198 | ({ \ | 198 | ({ \ |
199 | (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \ | 199 | (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \ |
200 | | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK)); \ | 200 | | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK)); \ |
201 | 0; \ | 201 | 0; \ |
202 | }) | 202 | }) |
203 | #define GET_FPEMU_CTL(task,addr) \ | 203 | #define GET_FPEMU_CTL(task,addr) \ |
204 | ({ \ | 204 | ({ \ |
205 | put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \ | 205 | put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \ |
206 | (int __user *) (addr)); \ | 206 | (int __user *) (addr)); \ |
207 | }) | 207 | }) |
208 | 208 | ||
209 | #ifdef CONFIG_IA32_SUPPORT | 209 | #ifdef CONFIG_IA32_SUPPORT |
210 | struct desc_struct { | 210 | struct desc_struct { |
211 | unsigned int a, b; | 211 | unsigned int a, b; |
212 | }; | 212 | }; |
213 | 213 | ||
214 | #define desc_empty(desc) (!((desc)->a | (desc)->b)) | 214 | #define desc_empty(desc) (!((desc)->a | (desc)->b)) |
215 | #define desc_equal(desc1, desc2) (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) | 215 | #define desc_equal(desc1, desc2) (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b)) |
216 | 216 | ||
217 | #define GDT_ENTRY_TLS_ENTRIES 3 | 217 | #define GDT_ENTRY_TLS_ENTRIES 3 |
218 | #define GDT_ENTRY_TLS_MIN 6 | 218 | #define GDT_ENTRY_TLS_MIN 6 |
219 | #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) | 219 | #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) |
220 | 220 | ||
221 | #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) | 221 | #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8) |
222 | 222 | ||
223 | struct partial_page_list; | 223 | struct ia64_partial_page_list; |
224 | #endif | 224 | #endif |
225 | 225 | ||
226 | struct thread_struct { | 226 | struct thread_struct { |
227 | __u32 flags; /* various thread flags (see IA64_THREAD_*) */ | 227 | __u32 flags; /* various thread flags (see IA64_THREAD_*) */ |
228 | /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */ | 228 | /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */ |
229 | __u8 on_ustack; /* executing on user-stacks? */ | 229 | __u8 on_ustack; /* executing on user-stacks? */ |
230 | __u8 pad[3]; | 230 | __u8 pad[3]; |
231 | __u64 ksp; /* kernel stack pointer */ | 231 | __u64 ksp; /* kernel stack pointer */ |
232 | __u64 map_base; /* base address for get_unmapped_area() */ | 232 | __u64 map_base; /* base address for get_unmapped_area() */ |
233 | __u64 task_size; /* limit for task size */ | 233 | __u64 task_size; /* limit for task size */ |
234 | __u64 rbs_bot; /* the base address for the RBS */ | 234 | __u64 rbs_bot; /* the base address for the RBS */ |
235 | int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */ | 235 | int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */ |
236 | 236 | ||
237 | #ifdef CONFIG_IA32_SUPPORT | 237 | #ifdef CONFIG_IA32_SUPPORT |
238 | __u64 eflag; /* IA32 EFLAGS reg */ | 238 | __u64 eflag; /* IA32 EFLAGS reg */ |
239 | __u64 fsr; /* IA32 floating pt status reg */ | 239 | __u64 fsr; /* IA32 floating pt status reg */ |
240 | __u64 fcr; /* IA32 floating pt control reg */ | 240 | __u64 fcr; /* IA32 floating pt control reg */ |
241 | __u64 fir; /* IA32 fp except. instr. reg */ | 241 | __u64 fir; /* IA32 fp except. instr. reg */ |
242 | __u64 fdr; /* IA32 fp except. data reg */ | 242 | __u64 fdr; /* IA32 fp except. data reg */ |
243 | __u64 old_k1; /* old value of ar.k1 */ | 243 | __u64 old_k1; /* old value of ar.k1 */ |
244 | __u64 old_iob; /* old IOBase value */ | 244 | __u64 old_iob; /* old IOBase value */ |
245 | struct partial_page_list *ppl; /* partial page list for 4K page size issue */ | 245 | struct ia64_partial_page_list *ppl; /* partial page list for 4K page size issue */ |
246 | /* cached TLS descriptors. */ | 246 | /* cached TLS descriptors. */ |
247 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; | 247 | struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES]; |
248 | 248 | ||
249 | # define INIT_THREAD_IA32 .eflag = 0, \ | 249 | # define INIT_THREAD_IA32 .eflag = 0, \ |
250 | .fsr = 0, \ | 250 | .fsr = 0, \ |
251 | .fcr = 0x17800000037fULL, \ | 251 | .fcr = 0x17800000037fULL, \ |
252 | .fir = 0, \ | 252 | .fir = 0, \ |
253 | .fdr = 0, \ | 253 | .fdr = 0, \ |
254 | .old_k1 = 0, \ | 254 | .old_k1 = 0, \ |
255 | .old_iob = 0, \ | 255 | .old_iob = 0, \ |
256 | .ppl = NULL, | 256 | .ppl = NULL, |
257 | #else | 257 | #else |
258 | # define INIT_THREAD_IA32 | 258 | # define INIT_THREAD_IA32 |
259 | #endif /* CONFIG_IA32_SUPPORT */ | 259 | #endif /* CONFIG_IA32_SUPPORT */ |
260 | #ifdef CONFIG_PERFMON | 260 | #ifdef CONFIG_PERFMON |
261 | void *pfm_context; /* pointer to detailed PMU context */ | 261 | void *pfm_context; /* pointer to detailed PMU context */ |
262 | unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */ | 262 | unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */ |
263 | # define INIT_THREAD_PM .pfm_context = NULL, \ | 263 | # define INIT_THREAD_PM .pfm_context = NULL, \ |
264 | .pfm_needs_checking = 0UL, | 264 | .pfm_needs_checking = 0UL, |
265 | #else | 265 | #else |
266 | # define INIT_THREAD_PM | 266 | # define INIT_THREAD_PM |
267 | #endif | 267 | #endif |
268 | __u64 dbr[IA64_NUM_DBG_REGS]; | 268 | __u64 dbr[IA64_NUM_DBG_REGS]; |
269 | __u64 ibr[IA64_NUM_DBG_REGS]; | 269 | __u64 ibr[IA64_NUM_DBG_REGS]; |
270 | struct ia64_fpreg fph[96]; /* saved/loaded on demand */ | 270 | struct ia64_fpreg fph[96]; /* saved/loaded on demand */ |
271 | }; | 271 | }; |
272 | 272 | ||
273 | #define INIT_THREAD { \ | 273 | #define INIT_THREAD { \ |
274 | .flags = 0, \ | 274 | .flags = 0, \ |
275 | .on_ustack = 0, \ | 275 | .on_ustack = 0, \ |
276 | .ksp = 0, \ | 276 | .ksp = 0, \ |
277 | .map_base = DEFAULT_MAP_BASE, \ | 277 | .map_base = DEFAULT_MAP_BASE, \ |
278 | .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \ | 278 | .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \ |
279 | .task_size = DEFAULT_TASK_SIZE, \ | 279 | .task_size = DEFAULT_TASK_SIZE, \ |
280 | .last_fph_cpu = -1, \ | 280 | .last_fph_cpu = -1, \ |
281 | INIT_THREAD_IA32 \ | 281 | INIT_THREAD_IA32 \ |
282 | INIT_THREAD_PM \ | 282 | INIT_THREAD_PM \ |
283 | .dbr = {0, }, \ | 283 | .dbr = {0, }, \ |
284 | .ibr = {0, }, \ | 284 | .ibr = {0, }, \ |
285 | .fph = {{{{0}}}, } \ | 285 | .fph = {{{{0}}}, } \ |
286 | } | 286 | } |
287 | 287 | ||
288 | #define start_thread(regs,new_ip,new_sp) do { \ | 288 | #define start_thread(regs,new_ip,new_sp) do { \ |
289 | set_fs(USER_DS); \ | 289 | set_fs(USER_DS); \ |
290 | regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL)) \ | 290 | regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL)) \ |
291 | & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \ | 291 | & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \ |
292 | regs->cr_iip = new_ip; \ | 292 | regs->cr_iip = new_ip; \ |
293 | regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \ | 293 | regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \ |
294 | regs->ar_rnat = 0; \ | 294 | regs->ar_rnat = 0; \ |
295 | regs->ar_bspstore = current->thread.rbs_bot; \ | 295 | regs->ar_bspstore = current->thread.rbs_bot; \ |
296 | regs->ar_fpsr = FPSR_DEFAULT; \ | 296 | regs->ar_fpsr = FPSR_DEFAULT; \ |
297 | regs->loadrs = 0; \ | 297 | regs->loadrs = 0; \ |
298 | regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \ | 298 | regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \ |
299 | regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \ | 299 | regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \ |
300 | if (unlikely(!get_dumpable(current->mm))) { \ | 300 | if (unlikely(!get_dumpable(current->mm))) { \ |
301 | /* \ | 301 | /* \ |
302 | * Zap scratch regs to avoid leaking bits between processes with different \ | 302 | * Zap scratch regs to avoid leaking bits between processes with different \ |
303 | * uid/privileges. \ | 303 | * uid/privileges. \ |
304 | */ \ | 304 | */ \ |
305 | regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0; \ | 305 | regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0; \ |
306 | regs->r1 = 0; regs->r9 = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0; \ | 306 | regs->r1 = 0; regs->r9 = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0; \ |
307 | } \ | 307 | } \ |
308 | } while (0) | 308 | } while (0) |
309 | 309 | ||
310 | /* Forward declarations, a strange C thing... */ | 310 | /* Forward declarations, a strange C thing... */ |
311 | struct mm_struct; | 311 | struct mm_struct; |
312 | struct task_struct; | 312 | struct task_struct; |
313 | 313 | ||
314 | /* | 314 | /* |
315 | * Free all resources held by a thread. This is called after the | 315 | * Free all resources held by a thread. This is called after the |
316 | * parent of DEAD_TASK has collected the exit status of the task via | 316 | * parent of DEAD_TASK has collected the exit status of the task via |
317 | * wait(). | 317 | * wait(). |
318 | */ | 318 | */ |
319 | #define release_thread(dead_task) | 319 | #define release_thread(dead_task) |
320 | 320 | ||
321 | /* Prepare to copy thread state - unlazy all lazy status */ | 321 | /* Prepare to copy thread state - unlazy all lazy status */ |
322 | #define prepare_to_copy(tsk) do { } while (0) | 322 | #define prepare_to_copy(tsk) do { } while (0) |
323 | 323 | ||
324 | /* | 324 | /* |
325 | * This is the mechanism for creating a new kernel thread. | 325 | * This is the mechanism for creating a new kernel thread. |
326 | * | 326 | * |
327 | * NOTE 1: Only a kernel-only process (ie the swapper or direct | 327 | * NOTE 1: Only a kernel-only process (ie the swapper or direct |
328 | * descendants who haven't done an "execve()") should use this: it | 328 | * descendants who haven't done an "execve()") should use this: it |
329 | * will work within a system call from a "real" process, but the | 329 | * will work within a system call from a "real" process, but the |
330 | * process memory space will not be free'd until both the parent and | 330 | * process memory space will not be free'd until both the parent and |
331 | * the child have exited. | 331 | * the child have exited. |
332 | * | 332 | * |
333 | * NOTE 2: This MUST NOT be an inlined function. Otherwise, we get | 333 | * NOTE 2: This MUST NOT be an inlined function. Otherwise, we get |
334 | * into trouble in init/main.c when the child thread returns to | 334 | * into trouble in init/main.c when the child thread returns to |
335 | * do_basic_setup() and the timing is such that free_initmem() has | 335 | * do_basic_setup() and the timing is such that free_initmem() has |
336 | * been called already. | 336 | * been called already. |
337 | */ | 337 | */ |
338 | extern pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags); | 338 | extern pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags); |
339 | 339 | ||
340 | /* Get wait channel for task P. */ | 340 | /* Get wait channel for task P. */ |
341 | extern unsigned long get_wchan (struct task_struct *p); | 341 | extern unsigned long get_wchan (struct task_struct *p); |
342 | 342 | ||
343 | /* Return instruction pointer of blocked task TSK. */ | 343 | /* Return instruction pointer of blocked task TSK. */ |
344 | #define KSTK_EIP(tsk) \ | 344 | #define KSTK_EIP(tsk) \ |
345 | ({ \ | 345 | ({ \ |
346 | struct pt_regs *_regs = task_pt_regs(tsk); \ | 346 | struct pt_regs *_regs = task_pt_regs(tsk); \ |
347 | _regs->cr_iip + ia64_psr(_regs)->ri; \ | 347 | _regs->cr_iip + ia64_psr(_regs)->ri; \ |
348 | }) | 348 | }) |
349 | 349 | ||
350 | /* Return stack pointer of blocked task TSK. */ | 350 | /* Return stack pointer of blocked task TSK. */ |
351 | #define KSTK_ESP(tsk) ((tsk)->thread.ksp) | 351 | #define KSTK_ESP(tsk) ((tsk)->thread.ksp) |
352 | 352 | ||
353 | extern void ia64_getreg_unknown_kr (void); | 353 | extern void ia64_getreg_unknown_kr (void); |
354 | extern void ia64_setreg_unknown_kr (void); | 354 | extern void ia64_setreg_unknown_kr (void); |
355 | 355 | ||
356 | #define ia64_get_kr(regnum) \ | 356 | #define ia64_get_kr(regnum) \ |
357 | ({ \ | 357 | ({ \ |
358 | unsigned long r = 0; \ | 358 | unsigned long r = 0; \ |
359 | \ | 359 | \ |
360 | switch (regnum) { \ | 360 | switch (regnum) { \ |
361 | case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \ | 361 | case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \ |
362 | case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \ | 362 | case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \ |
363 | case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \ | 363 | case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \ |
364 | case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \ | 364 | case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \ |
365 | case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \ | 365 | case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \ |
366 | case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \ | 366 | case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \ |
367 | case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \ | 367 | case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \ |
368 | case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \ | 368 | case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \ |
369 | default: ia64_getreg_unknown_kr(); break; \ | 369 | default: ia64_getreg_unknown_kr(); break; \ |
370 | } \ | 370 | } \ |
371 | r; \ | 371 | r; \ |
372 | }) | 372 | }) |
373 | 373 | ||
374 | #define ia64_set_kr(regnum, r) \ | 374 | #define ia64_set_kr(regnum, r) \ |
375 | ({ \ | 375 | ({ \ |
376 | switch (regnum) { \ | 376 | switch (regnum) { \ |
377 | case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \ | 377 | case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \ |
378 | case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \ | 378 | case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \ |
379 | case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \ | 379 | case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \ |
380 | case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \ | 380 | case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \ |
381 | case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \ | 381 | case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \ |
382 | case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \ | 382 | case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \ |
383 | case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \ | 383 | case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \ |
384 | case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \ | 384 | case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \ |
385 | default: ia64_setreg_unknown_kr(); break; \ | 385 | default: ia64_setreg_unknown_kr(); break; \ |
386 | } \ | 386 | } \ |
387 | }) | 387 | }) |
388 | 388 | ||
389 | /* | 389 | /* |
390 | * The following three macros can't be inline functions because we don't have struct | 390 | * The following three macros can't be inline functions because we don't have struct |
391 | * task_struct at this point. | 391 | * task_struct at this point. |
392 | */ | 392 | */ |
393 | 393 | ||
394 | /* | 394 | /* |
395 | * Return TRUE if task T owns the fph partition of the CPU we're running on. | 395 | * Return TRUE if task T owns the fph partition of the CPU we're running on. |
396 | * Must be called from code that has preemption disabled. | 396 | * Must be called from code that has preemption disabled. |
397 | */ | 397 | */ |
398 | #define ia64_is_local_fpu_owner(t) \ | 398 | #define ia64_is_local_fpu_owner(t) \ |
399 | ({ \ | 399 | ({ \ |
400 | struct task_struct *__ia64_islfo_task = (t); \ | 400 | struct task_struct *__ia64_islfo_task = (t); \ |
401 | (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \ | 401 | (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \ |
402 | && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \ | 402 | && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \ |
403 | }) | 403 | }) |
404 | 404 | ||
405 | /* | 405 | /* |
406 | * Mark task T as owning the fph partition of the CPU we're running on. | 406 | * Mark task T as owning the fph partition of the CPU we're running on. |
407 | * Must be called from code that has preemption disabled. | 407 | * Must be called from code that has preemption disabled. |
408 | */ | 408 | */ |
409 | #define ia64_set_local_fpu_owner(t) do { \ | 409 | #define ia64_set_local_fpu_owner(t) do { \ |
410 | struct task_struct *__ia64_slfo_task = (t); \ | 410 | struct task_struct *__ia64_slfo_task = (t); \ |
411 | __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \ | 411 | __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \ |
412 | ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task); \ | 412 | ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task); \ |
413 | } while (0) | 413 | } while (0) |
414 | 414 | ||
415 | /* Mark the fph partition of task T as being invalid on all CPUs. */ | 415 | /* Mark the fph partition of task T as being invalid on all CPUs. */ |
416 | #define ia64_drop_fpu(t) ((t)->thread.last_fph_cpu = -1) | 416 | #define ia64_drop_fpu(t) ((t)->thread.last_fph_cpu = -1) |
417 | 417 | ||
418 | extern void __ia64_init_fpu (void); | 418 | extern void __ia64_init_fpu (void); |
419 | extern void __ia64_save_fpu (struct ia64_fpreg *fph); | 419 | extern void __ia64_save_fpu (struct ia64_fpreg *fph); |
420 | extern void __ia64_load_fpu (struct ia64_fpreg *fph); | 420 | extern void __ia64_load_fpu (struct ia64_fpreg *fph); |
421 | extern void ia64_save_debug_regs (unsigned long *save_area); | 421 | extern void ia64_save_debug_regs (unsigned long *save_area); |
422 | extern void ia64_load_debug_regs (unsigned long *save_area); | 422 | extern void ia64_load_debug_regs (unsigned long *save_area); |
423 | 423 | ||
424 | #ifdef CONFIG_IA32_SUPPORT | 424 | #ifdef CONFIG_IA32_SUPPORT |
425 | extern void ia32_save_state (struct task_struct *task); | 425 | extern void ia32_save_state (struct task_struct *task); |
426 | extern void ia32_load_state (struct task_struct *task); | 426 | extern void ia32_load_state (struct task_struct *task); |
427 | #endif | 427 | #endif |
428 | 428 | ||
429 | #define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) | 429 | #define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) |
430 | #define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) | 430 | #define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) |
431 | 431 | ||
432 | /* load fp 0.0 into fph */ | 432 | /* load fp 0.0 into fph */ |
433 | static inline void | 433 | static inline void |
434 | ia64_init_fpu (void) { | 434 | ia64_init_fpu (void) { |
435 | ia64_fph_enable(); | 435 | ia64_fph_enable(); |
436 | __ia64_init_fpu(); | 436 | __ia64_init_fpu(); |
437 | ia64_fph_disable(); | 437 | ia64_fph_disable(); |
438 | } | 438 | } |
439 | 439 | ||
440 | /* save f32-f127 at FPH */ | 440 | /* save f32-f127 at FPH */ |
441 | static inline void | 441 | static inline void |
442 | ia64_save_fpu (struct ia64_fpreg *fph) { | 442 | ia64_save_fpu (struct ia64_fpreg *fph) { |
443 | ia64_fph_enable(); | 443 | ia64_fph_enable(); |
444 | __ia64_save_fpu(fph); | 444 | __ia64_save_fpu(fph); |
445 | ia64_fph_disable(); | 445 | ia64_fph_disable(); |
446 | } | 446 | } |
447 | 447 | ||
448 | /* load f32-f127 from FPH */ | 448 | /* load f32-f127 from FPH */ |
449 | static inline void | 449 | static inline void |
450 | ia64_load_fpu (struct ia64_fpreg *fph) { | 450 | ia64_load_fpu (struct ia64_fpreg *fph) { |
451 | ia64_fph_enable(); | 451 | ia64_fph_enable(); |
452 | __ia64_load_fpu(fph); | 452 | __ia64_load_fpu(fph); |
453 | ia64_fph_disable(); | 453 | ia64_fph_disable(); |
454 | } | 454 | } |
455 | 455 | ||
456 | static inline __u64 | 456 | static inline __u64 |
457 | ia64_clear_ic (void) | 457 | ia64_clear_ic (void) |
458 | { | 458 | { |
459 | __u64 psr; | 459 | __u64 psr; |
460 | psr = ia64_getreg(_IA64_REG_PSR); | 460 | psr = ia64_getreg(_IA64_REG_PSR); |
461 | ia64_stop(); | 461 | ia64_stop(); |
462 | ia64_rsm(IA64_PSR_I | IA64_PSR_IC); | 462 | ia64_rsm(IA64_PSR_I | IA64_PSR_IC); |
463 | ia64_srlz_i(); | 463 | ia64_srlz_i(); |
464 | return psr; | 464 | return psr; |
465 | } | 465 | } |
466 | 466 | ||
467 | /* | 467 | /* |
468 | * Restore the psr. | 468 | * Restore the psr. |
469 | */ | 469 | */ |
470 | static inline void | 470 | static inline void |
471 | ia64_set_psr (__u64 psr) | 471 | ia64_set_psr (__u64 psr) |
472 | { | 472 | { |
473 | ia64_stop(); | 473 | ia64_stop(); |
474 | ia64_setreg(_IA64_REG_PSR_L, psr); | 474 | ia64_setreg(_IA64_REG_PSR_L, psr); |
475 | ia64_srlz_d(); | 475 | ia64_srlz_d(); |
476 | } | 476 | } |
477 | 477 | ||
478 | /* | 478 | /* |
479 | * Insert a translation into an instruction and/or data translation | 479 | * Insert a translation into an instruction and/or data translation |
480 | * register. | 480 | * register. |
481 | */ | 481 | */ |
482 | static inline void | 482 | static inline void |
483 | ia64_itr (__u64 target_mask, __u64 tr_num, | 483 | ia64_itr (__u64 target_mask, __u64 tr_num, |
484 | __u64 vmaddr, __u64 pte, | 484 | __u64 vmaddr, __u64 pte, |
485 | __u64 log_page_size) | 485 | __u64 log_page_size) |
486 | { | 486 | { |
487 | ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2)); | 487 | ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2)); |
488 | ia64_setreg(_IA64_REG_CR_IFA, vmaddr); | 488 | ia64_setreg(_IA64_REG_CR_IFA, vmaddr); |
489 | ia64_stop(); | 489 | ia64_stop(); |
490 | if (target_mask & 0x1) | 490 | if (target_mask & 0x1) |
491 | ia64_itri(tr_num, pte); | 491 | ia64_itri(tr_num, pte); |
492 | if (target_mask & 0x2) | 492 | if (target_mask & 0x2) |
493 | ia64_itrd(tr_num, pte); | 493 | ia64_itrd(tr_num, pte); |
494 | } | 494 | } |
495 | 495 | ||
496 | /* | 496 | /* |
497 | * Insert a translation into the instruction and/or data translation | 497 | * Insert a translation into the instruction and/or data translation |
498 | * cache. | 498 | * cache. |
499 | */ | 499 | */ |
500 | static inline void | 500 | static inline void |
501 | ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte, | 501 | ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte, |
502 | __u64 log_page_size) | 502 | __u64 log_page_size) |
503 | { | 503 | { |
504 | ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2)); | 504 | ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2)); |
505 | ia64_setreg(_IA64_REG_CR_IFA, vmaddr); | 505 | ia64_setreg(_IA64_REG_CR_IFA, vmaddr); |
506 | ia64_stop(); | 506 | ia64_stop(); |
507 | /* as per EAS2.6, itc must be the last instruction in an instruction group */ | 507 | /* as per EAS2.6, itc must be the last instruction in an instruction group */ |
508 | if (target_mask & 0x1) | 508 | if (target_mask & 0x1) |
509 | ia64_itci(pte); | 509 | ia64_itci(pte); |
510 | if (target_mask & 0x2) | 510 | if (target_mask & 0x2) |
511 | ia64_itcd(pte); | 511 | ia64_itcd(pte); |
512 | } | 512 | } |
513 | 513 | ||
514 | /* | 514 | /* |
515 | * Purge a range of addresses from instruction and/or data translation | 515 | * Purge a range of addresses from instruction and/or data translation |
516 | * register(s). | 516 | * register(s). |
517 | */ | 517 | */ |
518 | static inline void | 518 | static inline void |
519 | ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size) | 519 | ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size) |
520 | { | 520 | { |
521 | if (target_mask & 0x1) | 521 | if (target_mask & 0x1) |
522 | ia64_ptri(vmaddr, (log_size << 2)); | 522 | ia64_ptri(vmaddr, (log_size << 2)); |
523 | if (target_mask & 0x2) | 523 | if (target_mask & 0x2) |
524 | ia64_ptrd(vmaddr, (log_size << 2)); | 524 | ia64_ptrd(vmaddr, (log_size << 2)); |
525 | } | 525 | } |
526 | 526 | ||
527 | /* Set the interrupt vector address. The address must be suitably aligned (32KB). */ | 527 | /* Set the interrupt vector address. The address must be suitably aligned (32KB). */ |
528 | static inline void | 528 | static inline void |
529 | ia64_set_iva (void *ivt_addr) | 529 | ia64_set_iva (void *ivt_addr) |
530 | { | 530 | { |
531 | ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr); | 531 | ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr); |
532 | ia64_srlz_i(); | 532 | ia64_srlz_i(); |
533 | } | 533 | } |
534 | 534 | ||
535 | /* Set the page table address and control bits. */ | 535 | /* Set the page table address and control bits. */ |
536 | static inline void | 536 | static inline void |
537 | ia64_set_pta (__u64 pta) | 537 | ia64_set_pta (__u64 pta) |
538 | { | 538 | { |
539 | /* Note: srlz.i implies srlz.d */ | 539 | /* Note: srlz.i implies srlz.d */ |
540 | ia64_setreg(_IA64_REG_CR_PTA, pta); | 540 | ia64_setreg(_IA64_REG_CR_PTA, pta); |
541 | ia64_srlz_i(); | 541 | ia64_srlz_i(); |
542 | } | 542 | } |
543 | 543 | ||
544 | static inline void | 544 | static inline void |
545 | ia64_eoi (void) | 545 | ia64_eoi (void) |
546 | { | 546 | { |
547 | ia64_setreg(_IA64_REG_CR_EOI, 0); | 547 | ia64_setreg(_IA64_REG_CR_EOI, 0); |
548 | ia64_srlz_d(); | 548 | ia64_srlz_d(); |
549 | } | 549 | } |
550 | 550 | ||
551 | #define cpu_relax() ia64_hint(ia64_hint_pause) | 551 | #define cpu_relax() ia64_hint(ia64_hint_pause) |
552 | 552 | ||
553 | static inline int | 553 | static inline int |
554 | ia64_get_irr(unsigned int vector) | 554 | ia64_get_irr(unsigned int vector) |
555 | { | 555 | { |
556 | unsigned int reg = vector / 64; | 556 | unsigned int reg = vector / 64; |
557 | unsigned int bit = vector % 64; | 557 | unsigned int bit = vector % 64; |
558 | u64 irr; | 558 | u64 irr; |
559 | 559 | ||
560 | switch (reg) { | 560 | switch (reg) { |
561 | case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break; | 561 | case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break; |
562 | case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break; | 562 | case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break; |
563 | case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break; | 563 | case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break; |
564 | case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break; | 564 | case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break; |
565 | } | 565 | } |
566 | 566 | ||
567 | return test_bit(bit, &irr); | 567 | return test_bit(bit, &irr); |
568 | } | 568 | } |
569 | 569 | ||
570 | static inline void | 570 | static inline void |
571 | ia64_set_lrr0 (unsigned long val) | 571 | ia64_set_lrr0 (unsigned long val) |
572 | { | 572 | { |
573 | ia64_setreg(_IA64_REG_CR_LRR0, val); | 573 | ia64_setreg(_IA64_REG_CR_LRR0, val); |
574 | ia64_srlz_d(); | 574 | ia64_srlz_d(); |
575 | } | 575 | } |
576 | 576 | ||
577 | static inline void | 577 | static inline void |
578 | ia64_set_lrr1 (unsigned long val) | 578 | ia64_set_lrr1 (unsigned long val) |
579 | { | 579 | { |
580 | ia64_setreg(_IA64_REG_CR_LRR1, val); | 580 | ia64_setreg(_IA64_REG_CR_LRR1, val); |
581 | ia64_srlz_d(); | 581 | ia64_srlz_d(); |
582 | } | 582 | } |
583 | 583 | ||
584 | 584 | ||
585 | /* | 585 | /* |
586 | * Given the address to which a spill occurred, return the unat bit | 586 | * Given the address to which a spill occurred, return the unat bit |
587 | * number that corresponds to this address. | 587 | * number that corresponds to this address. |
588 | */ | 588 | */ |
589 | static inline __u64 | 589 | static inline __u64 |
590 | ia64_unat_pos (void *spill_addr) | 590 | ia64_unat_pos (void *spill_addr) |
591 | { | 591 | { |
592 | return ((__u64) spill_addr >> 3) & 0x3f; | 592 | return ((__u64) spill_addr >> 3) & 0x3f; |
593 | } | 593 | } |
594 | 594 | ||
595 | /* | 595 | /* |
596 | * Set the NaT bit of an integer register which was spilled at address | 596 | * Set the NaT bit of an integer register which was spilled at address |
597 | * SPILL_ADDR. UNAT is the mask to be updated. | 597 | * SPILL_ADDR. UNAT is the mask to be updated. |
598 | */ | 598 | */ |
599 | static inline void | 599 | static inline void |
600 | ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat) | 600 | ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat) |
601 | { | 601 | { |
602 | __u64 bit = ia64_unat_pos(spill_addr); | 602 | __u64 bit = ia64_unat_pos(spill_addr); |
603 | __u64 mask = 1UL << bit; | 603 | __u64 mask = 1UL << bit; |
604 | 604 | ||
605 | *unat = (*unat & ~mask) | (nat << bit); | 605 | *unat = (*unat & ~mask) | (nat << bit); |
606 | } | 606 | } |
607 | 607 | ||
608 | /* | 608 | /* |
609 | * Return saved PC of a blocked thread. | 609 | * Return saved PC of a blocked thread. |
610 | * Note that the only way T can block is through a call to schedule() -> switch_to(). | 610 | * Note that the only way T can block is through a call to schedule() -> switch_to(). |
611 | */ | 611 | */ |
612 | static inline unsigned long | 612 | static inline unsigned long |
613 | thread_saved_pc (struct task_struct *t) | 613 | thread_saved_pc (struct task_struct *t) |
614 | { | 614 | { |
615 | struct unw_frame_info info; | 615 | struct unw_frame_info info; |
616 | unsigned long ip; | 616 | unsigned long ip; |
617 | 617 | ||
618 | unw_init_from_blocked_task(&info, t); | 618 | unw_init_from_blocked_task(&info, t); |
619 | if (unw_unwind(&info) < 0) | 619 | if (unw_unwind(&info) < 0) |
620 | return 0; | 620 | return 0; |
621 | unw_get_ip(&info, &ip); | 621 | unw_get_ip(&info, &ip); |
622 | return ip; | 622 | return ip; |
623 | } | 623 | } |
624 | 624 | ||
625 | /* | 625 | /* |
626 | * Get the current instruction/program counter value. | 626 | * Get the current instruction/program counter value. |
627 | */ | 627 | */ |
628 | #define current_text_addr() \ | 628 | #define current_text_addr() \ |
629 | ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; }) | 629 | ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; }) |
630 | 630 | ||
631 | static inline __u64 | 631 | static inline __u64 |
632 | ia64_get_ivr (void) | 632 | ia64_get_ivr (void) |
633 | { | 633 | { |
634 | __u64 r; | 634 | __u64 r; |
635 | ia64_srlz_d(); | 635 | ia64_srlz_d(); |
636 | r = ia64_getreg(_IA64_REG_CR_IVR); | 636 | r = ia64_getreg(_IA64_REG_CR_IVR); |
637 | ia64_srlz_d(); | 637 | ia64_srlz_d(); |
638 | return r; | 638 | return r; |
639 | } | 639 | } |
640 | 640 | ||
641 | static inline void | 641 | static inline void |
642 | ia64_set_dbr (__u64 regnum, __u64 value) | 642 | ia64_set_dbr (__u64 regnum, __u64 value) |
643 | { | 643 | { |
644 | __ia64_set_dbr(regnum, value); | 644 | __ia64_set_dbr(regnum, value); |
645 | #ifdef CONFIG_ITANIUM | 645 | #ifdef CONFIG_ITANIUM |
646 | ia64_srlz_d(); | 646 | ia64_srlz_d(); |
647 | #endif | 647 | #endif |
648 | } | 648 | } |
649 | 649 | ||
650 | static inline __u64 | 650 | static inline __u64 |
651 | ia64_get_dbr (__u64 regnum) | 651 | ia64_get_dbr (__u64 regnum) |
652 | { | 652 | { |
653 | __u64 retval; | 653 | __u64 retval; |
654 | 654 | ||
655 | retval = __ia64_get_dbr(regnum); | 655 | retval = __ia64_get_dbr(regnum); |
656 | #ifdef CONFIG_ITANIUM | 656 | #ifdef CONFIG_ITANIUM |
657 | ia64_srlz_d(); | 657 | ia64_srlz_d(); |
658 | #endif | 658 | #endif |
659 | return retval; | 659 | return retval; |
660 | } | 660 | } |
661 | 661 | ||
662 | static inline __u64 | 662 | static inline __u64 |
663 | ia64_rotr (__u64 w, __u64 n) | 663 | ia64_rotr (__u64 w, __u64 n) |
664 | { | 664 | { |
665 | return (w >> n) | (w << (64 - n)); | 665 | return (w >> n) | (w << (64 - n)); |
666 | } | 666 | } |
667 | 667 | ||
668 | #define ia64_rotl(w,n) ia64_rotr((w), (64) - (n)) | 668 | #define ia64_rotl(w,n) ia64_rotr((w), (64) - (n)) |
669 | 669 | ||
670 | /* | 670 | /* |
671 | * Take a mapped kernel address and return the equivalent address | 671 | * Take a mapped kernel address and return the equivalent address |
672 | * in the region 7 identity mapped virtual area. | 672 | * in the region 7 identity mapped virtual area. |
673 | */ | 673 | */ |
674 | static inline void * | 674 | static inline void * |
675 | ia64_imva (void *addr) | 675 | ia64_imva (void *addr) |
676 | { | 676 | { |
677 | void *result; | 677 | void *result; |
678 | result = (void *) ia64_tpa(addr); | 678 | result = (void *) ia64_tpa(addr); |
679 | return __va(result); | 679 | return __va(result); |
680 | } | 680 | } |
681 | 681 | ||
682 | #define ARCH_HAS_PREFETCH | 682 | #define ARCH_HAS_PREFETCH |
683 | #define ARCH_HAS_PREFETCHW | 683 | #define ARCH_HAS_PREFETCHW |
684 | #define ARCH_HAS_SPINLOCK_PREFETCH | 684 | #define ARCH_HAS_SPINLOCK_PREFETCH |
685 | #define PREFETCH_STRIDE L1_CACHE_BYTES | 685 | #define PREFETCH_STRIDE L1_CACHE_BYTES |
686 | 686 | ||
687 | static inline void | 687 | static inline void |
688 | prefetch (const void *x) | 688 | prefetch (const void *x) |
689 | { | 689 | { |
690 | ia64_lfetch(ia64_lfhint_none, x); | 690 | ia64_lfetch(ia64_lfhint_none, x); |
691 | } | 691 | } |
692 | 692 | ||
693 | static inline void | 693 | static inline void |
694 | prefetchw (const void *x) | 694 | prefetchw (const void *x) |
695 | { | 695 | { |
696 | ia64_lfetch_excl(ia64_lfhint_none, x); | 696 | ia64_lfetch_excl(ia64_lfhint_none, x); |
697 | } | 697 | } |
698 | 698 | ||
699 | #define spin_lock_prefetch(x) prefetchw(x) | 699 | #define spin_lock_prefetch(x) prefetchw(x) |
700 | 700 | ||
701 | extern unsigned long boot_option_idle_override; | 701 | extern unsigned long boot_option_idle_override; |
702 | 702 | ||
703 | #endif /* !__ASSEMBLY__ */ | 703 | #endif /* !__ASSEMBLY__ */ |
704 | 704 | ||
705 | #endif /* _ASM_IA64_PROCESSOR_H */ | 705 | #endif /* _ASM_IA64_PROCESSOR_H */ |
706 | 706 |