Commit 2d4712b7a615e5db3eb9a427f1722eec79681b4b

Authored by Linus Torvalds

Merge branch 'parisc-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc fixes from Helge Deller:
 "This patchset includes a bugfix to prevent a kernel crash when memory
  in page zero is accessed by the kernel itself, e.g.  via
  probe_kernel_read().

  Furthermore we now export flush_cache_page() which is needed
  (indirectly) by the lustre filesystem.  The other patches remove
  unused functions and optimizes the page fault handler to only evaluate
  variables if needed, which again protects against possible kernel
  crashes"

* 'parisc-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  parisc: let probe_kernel_read() capture access to page zero
  parisc: optimize variable initialization in do_page_fault
  parisc: fix interruption handler to respect pagefault_disable()
  parisc: mark parisc_terminate() noreturn and cold.
  parisc: remove unused syscall_ipi() function.
  parisc: kill SMP single function call interrupt
  parisc: Export flush_cache_page() (needed by lustre)

Showing 6 changed files Inline Diff

arch/parisc/include/asm/traps.h
1 #ifndef __ASM_TRAPS_H 1 #ifndef __ASM_TRAPS_H
2 #define __ASM_TRAPS_H 2 #define __ASM_TRAPS_H
3 3
4 #ifdef __KERNEL__ 4 #ifdef __KERNEL__
5 struct pt_regs; 5 struct pt_regs;
6 6
7 /* traps.c */ 7 /* traps.c */
8 void parisc_terminate(char *msg, struct pt_regs *regs, 8 void parisc_terminate(char *msg, struct pt_regs *regs,
9 int code, unsigned long offset); 9 int code, unsigned long offset) __noreturn __cold;
10 10
11 /* mm/fault.c */ 11 /* mm/fault.c */
12 void do_page_fault(struct pt_regs *regs, unsigned long code, 12 void do_page_fault(struct pt_regs *regs, unsigned long code,
13 unsigned long address); 13 unsigned long address);
14 #endif 14 #endif
15 15
16 #endif 16 #endif
17 17
arch/parisc/kernel/cache.c
1 /* 1 /*
2 * This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999) 6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg 7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org) 8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9 * 9 *
10 * Cache and TLB management 10 * Cache and TLB management
11 * 11 *
12 */ 12 */
13 13
14 #include <linux/init.h> 14 #include <linux/init.h>
15 #include <linux/kernel.h> 15 #include <linux/kernel.h>
16 #include <linux/mm.h> 16 #include <linux/mm.h>
17 #include <linux/module.h> 17 #include <linux/module.h>
18 #include <linux/seq_file.h> 18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h> 19 #include <linux/pagemap.h>
20 #include <linux/sched.h> 20 #include <linux/sched.h>
21 #include <asm/pdc.h> 21 #include <asm/pdc.h>
22 #include <asm/cache.h> 22 #include <asm/cache.h>
23 #include <asm/cacheflush.h> 23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h> 24 #include <asm/tlbflush.h>
25 #include <asm/page.h> 25 #include <asm/page.h>
26 #include <asm/pgalloc.h> 26 #include <asm/pgalloc.h>
27 #include <asm/processor.h> 27 #include <asm/processor.h>
28 #include <asm/sections.h> 28 #include <asm/sections.h>
29 #include <asm/shmparam.h> 29 #include <asm/shmparam.h>
30 30
31 int split_tlb __read_mostly; 31 int split_tlb __read_mostly;
32 int dcache_stride __read_mostly; 32 int dcache_stride __read_mostly;
33 int icache_stride __read_mostly; 33 int icache_stride __read_mostly;
34 EXPORT_SYMBOL(dcache_stride); 34 EXPORT_SYMBOL(dcache_stride);
35 35
36 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); 36 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
37 EXPORT_SYMBOL(flush_dcache_page_asm); 37 EXPORT_SYMBOL(flush_dcache_page_asm);
38 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr); 38 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
39 39
40 40
41 /* On some machines (e.g. ones with the Merced bus), there can be 41 /* On some machines (e.g. ones with the Merced bus), there can be
42 * only a single PxTLB broadcast at a time; this must be guaranteed 42 * only a single PxTLB broadcast at a time; this must be guaranteed
43 * by software. We put a spinlock around all TLB flushes to 43 * by software. We put a spinlock around all TLB flushes to
44 * ensure this. 44 * ensure this.
45 */ 45 */
46 DEFINE_SPINLOCK(pa_tlb_lock); 46 DEFINE_SPINLOCK(pa_tlb_lock);
47 47
48 struct pdc_cache_info cache_info __read_mostly; 48 struct pdc_cache_info cache_info __read_mostly;
49 #ifndef CONFIG_PA20 49 #ifndef CONFIG_PA20
50 static struct pdc_btlb_info btlb_info __read_mostly; 50 static struct pdc_btlb_info btlb_info __read_mostly;
51 #endif 51 #endif
52 52
53 #ifdef CONFIG_SMP 53 #ifdef CONFIG_SMP
54 void 54 void
55 flush_data_cache(void) 55 flush_data_cache(void)
56 { 56 {
57 on_each_cpu(flush_data_cache_local, NULL, 1); 57 on_each_cpu(flush_data_cache_local, NULL, 1);
58 } 58 }
59 void 59 void
60 flush_instruction_cache(void) 60 flush_instruction_cache(void)
61 { 61 {
62 on_each_cpu(flush_instruction_cache_local, NULL, 1); 62 on_each_cpu(flush_instruction_cache_local, NULL, 1);
63 } 63 }
64 #endif 64 #endif
65 65
66 void 66 void
67 flush_cache_all_local(void) 67 flush_cache_all_local(void)
68 { 68 {
69 flush_instruction_cache_local(NULL); 69 flush_instruction_cache_local(NULL);
70 flush_data_cache_local(NULL); 70 flush_data_cache_local(NULL);
71 } 71 }
72 EXPORT_SYMBOL(flush_cache_all_local); 72 EXPORT_SYMBOL(flush_cache_all_local);
73 73
74 /* Virtual address of pfn. */ 74 /* Virtual address of pfn. */
75 #define pfn_va(pfn) __va(PFN_PHYS(pfn)) 75 #define pfn_va(pfn) __va(PFN_PHYS(pfn))
76 76
77 void 77 void
78 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) 78 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
79 { 79 {
80 unsigned long pfn = pte_pfn(*ptep); 80 unsigned long pfn = pte_pfn(*ptep);
81 struct page *page; 81 struct page *page;
82 82
83 /* We don't have pte special. As a result, we can be called with 83 /* We don't have pte special. As a result, we can be called with
84 an invalid pfn and we don't need to flush the kernel dcache page. 84 an invalid pfn and we don't need to flush the kernel dcache page.
85 This occurs with FireGL card in C8000. */ 85 This occurs with FireGL card in C8000. */
86 if (!pfn_valid(pfn)) 86 if (!pfn_valid(pfn))
87 return; 87 return;
88 88
89 page = pfn_to_page(pfn); 89 page = pfn_to_page(pfn);
90 if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) { 90 if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
91 flush_kernel_dcache_page_addr(pfn_va(pfn)); 91 flush_kernel_dcache_page_addr(pfn_va(pfn));
92 clear_bit(PG_dcache_dirty, &page->flags); 92 clear_bit(PG_dcache_dirty, &page->flags);
93 } else if (parisc_requires_coherency()) 93 } else if (parisc_requires_coherency())
94 flush_kernel_dcache_page_addr(pfn_va(pfn)); 94 flush_kernel_dcache_page_addr(pfn_va(pfn));
95 } 95 }
96 96
97 void 97 void
98 show_cache_info(struct seq_file *m) 98 show_cache_info(struct seq_file *m)
99 { 99 {
100 char buf[32]; 100 char buf[32];
101 101
102 seq_printf(m, "I-cache\t\t: %ld KB\n", 102 seq_printf(m, "I-cache\t\t: %ld KB\n",
103 cache_info.ic_size/1024 ); 103 cache_info.ic_size/1024 );
104 if (cache_info.dc_loop != 1) 104 if (cache_info.dc_loop != 1)
105 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop); 105 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
106 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n", 106 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
107 cache_info.dc_size/1024, 107 cache_info.dc_size/1024,
108 (cache_info.dc_conf.cc_wt ? "WT":"WB"), 108 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
109 (cache_info.dc_conf.cc_sh ? ", shared I/D":""), 109 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
110 ((cache_info.dc_loop == 1) ? "direct mapped" : buf)); 110 ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
111 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n", 111 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
112 cache_info.it_size, 112 cache_info.it_size,
113 cache_info.dt_size, 113 cache_info.dt_size,
114 cache_info.dt_conf.tc_sh ? " - shared with ITLB":"" 114 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
115 ); 115 );
116 116
117 #ifndef CONFIG_PA20 117 #ifndef CONFIG_PA20
118 /* BTLB - Block TLB */ 118 /* BTLB - Block TLB */
119 if (btlb_info.max_size==0) { 119 if (btlb_info.max_size==0) {
120 seq_printf(m, "BTLB\t\t: not supported\n" ); 120 seq_printf(m, "BTLB\t\t: not supported\n" );
121 } else { 121 } else {
122 seq_printf(m, 122 seq_printf(m,
123 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n" 123 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
124 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n" 124 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
125 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n", 125 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
126 btlb_info.max_size, (int)4096, 126 btlb_info.max_size, (int)4096,
127 btlb_info.max_size>>8, 127 btlb_info.max_size>>8,
128 btlb_info.fixed_range_info.num_i, 128 btlb_info.fixed_range_info.num_i,
129 btlb_info.fixed_range_info.num_d, 129 btlb_info.fixed_range_info.num_d,
130 btlb_info.fixed_range_info.num_comb, 130 btlb_info.fixed_range_info.num_comb,
131 btlb_info.variable_range_info.num_i, 131 btlb_info.variable_range_info.num_i,
132 btlb_info.variable_range_info.num_d, 132 btlb_info.variable_range_info.num_d,
133 btlb_info.variable_range_info.num_comb 133 btlb_info.variable_range_info.num_comb
134 ); 134 );
135 } 135 }
136 #endif 136 #endif
137 } 137 }
138 138
139 void __init 139 void __init
140 parisc_cache_init(void) 140 parisc_cache_init(void)
141 { 141 {
142 if (pdc_cache_info(&cache_info) < 0) 142 if (pdc_cache_info(&cache_info) < 0)
143 panic("parisc_cache_init: pdc_cache_info failed"); 143 panic("parisc_cache_init: pdc_cache_info failed");
144 144
145 #if 0 145 #if 0
146 printk("ic_size %lx dc_size %lx it_size %lx\n", 146 printk("ic_size %lx dc_size %lx it_size %lx\n",
147 cache_info.ic_size, 147 cache_info.ic_size,
148 cache_info.dc_size, 148 cache_info.dc_size,
149 cache_info.it_size); 149 cache_info.it_size);
150 150
151 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n", 151 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
152 cache_info.dc_base, 152 cache_info.dc_base,
153 cache_info.dc_stride, 153 cache_info.dc_stride,
154 cache_info.dc_count, 154 cache_info.dc_count,
155 cache_info.dc_loop); 155 cache_info.dc_loop);
156 156
157 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n", 157 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
158 *(unsigned long *) (&cache_info.dc_conf), 158 *(unsigned long *) (&cache_info.dc_conf),
159 cache_info.dc_conf.cc_alias, 159 cache_info.dc_conf.cc_alias,
160 cache_info.dc_conf.cc_block, 160 cache_info.dc_conf.cc_block,
161 cache_info.dc_conf.cc_line, 161 cache_info.dc_conf.cc_line,
162 cache_info.dc_conf.cc_shift); 162 cache_info.dc_conf.cc_shift);
163 printk(" wt %d sh %d cst %d hv %d\n", 163 printk(" wt %d sh %d cst %d hv %d\n",
164 cache_info.dc_conf.cc_wt, 164 cache_info.dc_conf.cc_wt,
165 cache_info.dc_conf.cc_sh, 165 cache_info.dc_conf.cc_sh,
166 cache_info.dc_conf.cc_cst, 166 cache_info.dc_conf.cc_cst,
167 cache_info.dc_conf.cc_hv); 167 cache_info.dc_conf.cc_hv);
168 168
169 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n", 169 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
170 cache_info.ic_base, 170 cache_info.ic_base,
171 cache_info.ic_stride, 171 cache_info.ic_stride,
172 cache_info.ic_count, 172 cache_info.ic_count,
173 cache_info.ic_loop); 173 cache_info.ic_loop);
174 174
175 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n", 175 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
176 *(unsigned long *) (&cache_info.ic_conf), 176 *(unsigned long *) (&cache_info.ic_conf),
177 cache_info.ic_conf.cc_alias, 177 cache_info.ic_conf.cc_alias,
178 cache_info.ic_conf.cc_block, 178 cache_info.ic_conf.cc_block,
179 cache_info.ic_conf.cc_line, 179 cache_info.ic_conf.cc_line,
180 cache_info.ic_conf.cc_shift); 180 cache_info.ic_conf.cc_shift);
181 printk(" wt %d sh %d cst %d hv %d\n", 181 printk(" wt %d sh %d cst %d hv %d\n",
182 cache_info.ic_conf.cc_wt, 182 cache_info.ic_conf.cc_wt,
183 cache_info.ic_conf.cc_sh, 183 cache_info.ic_conf.cc_sh,
184 cache_info.ic_conf.cc_cst, 184 cache_info.ic_conf.cc_cst,
185 cache_info.ic_conf.cc_hv); 185 cache_info.ic_conf.cc_hv);
186 186
187 printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n", 187 printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
188 cache_info.dt_conf.tc_sh, 188 cache_info.dt_conf.tc_sh,
189 cache_info.dt_conf.tc_page, 189 cache_info.dt_conf.tc_page,
190 cache_info.dt_conf.tc_cst, 190 cache_info.dt_conf.tc_cst,
191 cache_info.dt_conf.tc_aid, 191 cache_info.dt_conf.tc_aid,
192 cache_info.dt_conf.tc_pad1); 192 cache_info.dt_conf.tc_pad1);
193 193
194 printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n", 194 printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
195 cache_info.it_conf.tc_sh, 195 cache_info.it_conf.tc_sh,
196 cache_info.it_conf.tc_page, 196 cache_info.it_conf.tc_page,
197 cache_info.it_conf.tc_cst, 197 cache_info.it_conf.tc_cst,
198 cache_info.it_conf.tc_aid, 198 cache_info.it_conf.tc_aid,
199 cache_info.it_conf.tc_pad1); 199 cache_info.it_conf.tc_pad1);
200 #endif 200 #endif
201 201
202 split_tlb = 0; 202 split_tlb = 0;
203 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) { 203 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
204 if (cache_info.dt_conf.tc_sh == 2) 204 if (cache_info.dt_conf.tc_sh == 2)
205 printk(KERN_WARNING "Unexpected TLB configuration. " 205 printk(KERN_WARNING "Unexpected TLB configuration. "
206 "Will flush I/D separately (could be optimized).\n"); 206 "Will flush I/D separately (could be optimized).\n");
207 207
208 split_tlb = 1; 208 split_tlb = 1;
209 } 209 }
210 210
211 /* "New and Improved" version from Jim Hull 211 /* "New and Improved" version from Jim Hull
212 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift)) 212 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
213 * The following CAFL_STRIDE is an optimized version, see 213 * The following CAFL_STRIDE is an optimized version, see
214 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html 214 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
215 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html 215 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
216 */ 216 */
217 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift)) 217 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
218 dcache_stride = CAFL_STRIDE(cache_info.dc_conf); 218 dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
219 icache_stride = CAFL_STRIDE(cache_info.ic_conf); 219 icache_stride = CAFL_STRIDE(cache_info.ic_conf);
220 #undef CAFL_STRIDE 220 #undef CAFL_STRIDE
221 221
222 #ifndef CONFIG_PA20 222 #ifndef CONFIG_PA20
223 if (pdc_btlb_info(&btlb_info) < 0) { 223 if (pdc_btlb_info(&btlb_info) < 0) {
224 memset(&btlb_info, 0, sizeof btlb_info); 224 memset(&btlb_info, 0, sizeof btlb_info);
225 } 225 }
226 #endif 226 #endif
227 227
228 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) == 228 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
229 PDC_MODEL_NVA_UNSUPPORTED) { 229 PDC_MODEL_NVA_UNSUPPORTED) {
230 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n"); 230 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
231 #if 0 231 #if 0
232 panic("SMP kernel required to avoid non-equivalent aliasing"); 232 panic("SMP kernel required to avoid non-equivalent aliasing");
233 #endif 233 #endif
234 } 234 }
235 } 235 }
236 236
237 void disable_sr_hashing(void) 237 void disable_sr_hashing(void)
238 { 238 {
239 int srhash_type, retval; 239 int srhash_type, retval;
240 unsigned long space_bits; 240 unsigned long space_bits;
241 241
242 switch (boot_cpu_data.cpu_type) { 242 switch (boot_cpu_data.cpu_type) {
243 case pcx: /* We shouldn't get this far. setup.c should prevent it. */ 243 case pcx: /* We shouldn't get this far. setup.c should prevent it. */
244 BUG(); 244 BUG();
245 return; 245 return;
246 246
247 case pcxs: 247 case pcxs:
248 case pcxt: 248 case pcxt:
249 case pcxt_: 249 case pcxt_:
250 srhash_type = SRHASH_PCXST; 250 srhash_type = SRHASH_PCXST;
251 break; 251 break;
252 252
253 case pcxl: 253 case pcxl:
254 srhash_type = SRHASH_PCXL; 254 srhash_type = SRHASH_PCXL;
255 break; 255 break;
256 256
257 case pcxl2: /* pcxl2 doesn't support space register hashing */ 257 case pcxl2: /* pcxl2 doesn't support space register hashing */
258 return; 258 return;
259 259
260 default: /* Currently all PA2.0 machines use the same ins. sequence */ 260 default: /* Currently all PA2.0 machines use the same ins. sequence */
261 srhash_type = SRHASH_PA20; 261 srhash_type = SRHASH_PA20;
262 break; 262 break;
263 } 263 }
264 264
265 disable_sr_hashing_asm(srhash_type); 265 disable_sr_hashing_asm(srhash_type);
266 266
267 retval = pdc_spaceid_bits(&space_bits); 267 retval = pdc_spaceid_bits(&space_bits);
268 /* If this procedure isn't implemented, don't panic. */ 268 /* If this procedure isn't implemented, don't panic. */
269 if (retval < 0 && retval != PDC_BAD_OPTION) 269 if (retval < 0 && retval != PDC_BAD_OPTION)
270 panic("pdc_spaceid_bits call failed.\n"); 270 panic("pdc_spaceid_bits call failed.\n");
271 if (space_bits != 0) 271 if (space_bits != 0)
272 panic("SpaceID hashing is still on!\n"); 272 panic("SpaceID hashing is still on!\n");
273 } 273 }
274 274
275 static inline void 275 static inline void
276 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, 276 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
277 unsigned long physaddr) 277 unsigned long physaddr)
278 { 278 {
279 preempt_disable(); 279 preempt_disable();
280 flush_dcache_page_asm(physaddr, vmaddr); 280 flush_dcache_page_asm(physaddr, vmaddr);
281 if (vma->vm_flags & VM_EXEC) 281 if (vma->vm_flags & VM_EXEC)
282 flush_icache_page_asm(physaddr, vmaddr); 282 flush_icache_page_asm(physaddr, vmaddr);
283 preempt_enable(); 283 preempt_enable();
284 } 284 }
285 285
286 void flush_dcache_page(struct page *page) 286 void flush_dcache_page(struct page *page)
287 { 287 {
288 struct address_space *mapping = page_mapping(page); 288 struct address_space *mapping = page_mapping(page);
289 struct vm_area_struct *mpnt; 289 struct vm_area_struct *mpnt;
290 unsigned long offset; 290 unsigned long offset;
291 unsigned long addr, old_addr = 0; 291 unsigned long addr, old_addr = 0;
292 pgoff_t pgoff; 292 pgoff_t pgoff;
293 293
294 if (mapping && !mapping_mapped(mapping)) { 294 if (mapping && !mapping_mapped(mapping)) {
295 set_bit(PG_dcache_dirty, &page->flags); 295 set_bit(PG_dcache_dirty, &page->flags);
296 return; 296 return;
297 } 297 }
298 298
299 flush_kernel_dcache_page(page); 299 flush_kernel_dcache_page(page);
300 300
301 if (!mapping) 301 if (!mapping)
302 return; 302 return;
303 303
304 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 304 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
305 305
306 /* We have carefully arranged in arch_get_unmapped_area() that 306 /* We have carefully arranged in arch_get_unmapped_area() that
307 * *any* mappings of a file are always congruently mapped (whether 307 * *any* mappings of a file are always congruently mapped (whether
308 * declared as MAP_PRIVATE or MAP_SHARED), so we only need 308 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
309 * to flush one address here for them all to become coherent */ 309 * to flush one address here for them all to become coherent */
310 310
311 flush_dcache_mmap_lock(mapping); 311 flush_dcache_mmap_lock(mapping);
312 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { 312 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
313 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 313 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
314 addr = mpnt->vm_start + offset; 314 addr = mpnt->vm_start + offset;
315 315
316 /* The TLB is the engine of coherence on parisc: The 316 /* The TLB is the engine of coherence on parisc: The
317 * CPU is entitled to speculate any page with a TLB 317 * CPU is entitled to speculate any page with a TLB
318 * mapping, so here we kill the mapping then flush the 318 * mapping, so here we kill the mapping then flush the
319 * page along a special flush only alias mapping. 319 * page along a special flush only alias mapping.
320 * This guarantees that the page is no-longer in the 320 * This guarantees that the page is no-longer in the
321 * cache for any process and nor may it be 321 * cache for any process and nor may it be
322 * speculatively read in (until the user or kernel 322 * speculatively read in (until the user or kernel
323 * specifically accesses it, of course) */ 323 * specifically accesses it, of course) */
324 324
325 flush_tlb_page(mpnt, addr); 325 flush_tlb_page(mpnt, addr);
326 if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) { 326 if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
327 __flush_cache_page(mpnt, addr, page_to_phys(page)); 327 __flush_cache_page(mpnt, addr, page_to_phys(page));
328 if (old_addr) 328 if (old_addr)
329 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)"); 329 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
330 old_addr = addr; 330 old_addr = addr;
331 } 331 }
332 } 332 }
333 flush_dcache_mmap_unlock(mapping); 333 flush_dcache_mmap_unlock(mapping);
334 } 334 }
335 EXPORT_SYMBOL(flush_dcache_page); 335 EXPORT_SYMBOL(flush_dcache_page);
336 336
337 /* Defined in arch/parisc/kernel/pacache.S */ 337 /* Defined in arch/parisc/kernel/pacache.S */
338 EXPORT_SYMBOL(flush_kernel_dcache_range_asm); 338 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
339 EXPORT_SYMBOL(flush_kernel_dcache_page_asm); 339 EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
340 EXPORT_SYMBOL(flush_data_cache_local); 340 EXPORT_SYMBOL(flush_data_cache_local);
341 EXPORT_SYMBOL(flush_kernel_icache_range_asm); 341 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
342 342
343 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ 343 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
344 int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD; 344 int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
345 345
346 void __init parisc_setup_cache_timing(void) 346 void __init parisc_setup_cache_timing(void)
347 { 347 {
348 unsigned long rangetime, alltime; 348 unsigned long rangetime, alltime;
349 unsigned long size; 349 unsigned long size;
350 350
351 alltime = mfctl(16); 351 alltime = mfctl(16);
352 flush_data_cache(); 352 flush_data_cache();
353 alltime = mfctl(16) - alltime; 353 alltime = mfctl(16) - alltime;
354 354
355 size = (unsigned long)(_end - _text); 355 size = (unsigned long)(_end - _text);
356 rangetime = mfctl(16); 356 rangetime = mfctl(16);
357 flush_kernel_dcache_range((unsigned long)_text, size); 357 flush_kernel_dcache_range((unsigned long)_text, size);
358 rangetime = mfctl(16) - rangetime; 358 rangetime = mfctl(16) - rangetime;
359 359
360 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n", 360 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
361 alltime, size, rangetime); 361 alltime, size, rangetime);
362 362
363 /* Racy, but if we see an intermediate value, it's ok too... */ 363 /* Racy, but if we see an intermediate value, it's ok too... */
364 parisc_cache_flush_threshold = size * alltime / rangetime; 364 parisc_cache_flush_threshold = size * alltime / rangetime;
365 365
366 parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1); 366 parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
367 if (!parisc_cache_flush_threshold) 367 if (!parisc_cache_flush_threshold)
368 parisc_cache_flush_threshold = FLUSH_THRESHOLD; 368 parisc_cache_flush_threshold = FLUSH_THRESHOLD;
369 369
370 if (parisc_cache_flush_threshold > cache_info.dc_size) 370 if (parisc_cache_flush_threshold > cache_info.dc_size)
371 parisc_cache_flush_threshold = cache_info.dc_size; 371 parisc_cache_flush_threshold = cache_info.dc_size;
372 372
373 printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus()); 373 printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
374 } 374 }
375 375
376 extern void purge_kernel_dcache_page_asm(unsigned long); 376 extern void purge_kernel_dcache_page_asm(unsigned long);
377 extern void clear_user_page_asm(void *, unsigned long); 377 extern void clear_user_page_asm(void *, unsigned long);
378 extern void copy_user_page_asm(void *, void *, unsigned long); 378 extern void copy_user_page_asm(void *, void *, unsigned long);
379 379
380 void flush_kernel_dcache_page_addr(void *addr) 380 void flush_kernel_dcache_page_addr(void *addr)
381 { 381 {
382 unsigned long flags; 382 unsigned long flags;
383 383
384 flush_kernel_dcache_page_asm(addr); 384 flush_kernel_dcache_page_asm(addr);
385 purge_tlb_start(flags); 385 purge_tlb_start(flags);
386 pdtlb_kernel(addr); 386 pdtlb_kernel(addr);
387 purge_tlb_end(flags); 387 purge_tlb_end(flags);
388 } 388 }
389 EXPORT_SYMBOL(flush_kernel_dcache_page_addr); 389 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
390 390
391 void clear_user_page(void *vto, unsigned long vaddr, struct page *page) 391 void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
392 { 392 {
393 clear_page_asm(vto); 393 clear_page_asm(vto);
394 if (!parisc_requires_coherency()) 394 if (!parisc_requires_coherency())
395 flush_kernel_dcache_page_asm(vto); 395 flush_kernel_dcache_page_asm(vto);
396 } 396 }
397 EXPORT_SYMBOL(clear_user_page); 397 EXPORT_SYMBOL(clear_user_page);
398 398
399 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 399 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
400 struct page *pg) 400 struct page *pg)
401 { 401 {
402 /* Copy using kernel mapping. No coherency is needed 402 /* Copy using kernel mapping. No coherency is needed
403 (all in kmap/kunmap) on machines that don't support 403 (all in kmap/kunmap) on machines that don't support
404 non-equivalent aliasing. However, the `from' page 404 non-equivalent aliasing. However, the `from' page
405 needs to be flushed before it can be accessed through 405 needs to be flushed before it can be accessed through
406 the kernel mapping. */ 406 the kernel mapping. */
407 preempt_disable(); 407 preempt_disable();
408 flush_dcache_page_asm(__pa(vfrom), vaddr); 408 flush_dcache_page_asm(__pa(vfrom), vaddr);
409 preempt_enable(); 409 preempt_enable();
410 copy_page_asm(vto, vfrom); 410 copy_page_asm(vto, vfrom);
411 if (!parisc_requires_coherency()) 411 if (!parisc_requires_coherency())
412 flush_kernel_dcache_page_asm(vto); 412 flush_kernel_dcache_page_asm(vto);
413 } 413 }
414 EXPORT_SYMBOL(copy_user_page); 414 EXPORT_SYMBOL(copy_user_page);
415 415
416 #ifdef CONFIG_PA8X00 416 #ifdef CONFIG_PA8X00
417 417
418 void kunmap_parisc(void *addr) 418 void kunmap_parisc(void *addr)
419 { 419 {
420 if (parisc_requires_coherency()) 420 if (parisc_requires_coherency())
421 flush_kernel_dcache_page_addr(addr); 421 flush_kernel_dcache_page_addr(addr);
422 } 422 }
423 EXPORT_SYMBOL(kunmap_parisc); 423 EXPORT_SYMBOL(kunmap_parisc);
424 #endif 424 #endif
425 425
426 void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) 426 void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
427 { 427 {
428 unsigned long flags; 428 unsigned long flags;
429 429
430 /* Note: purge_tlb_entries can be called at startup with 430 /* Note: purge_tlb_entries can be called at startup with
431 no context. */ 431 no context. */
432 432
433 purge_tlb_start(flags); 433 purge_tlb_start(flags);
434 mtsp(mm->context, 1); 434 mtsp(mm->context, 1);
435 pdtlb(addr); 435 pdtlb(addr);
436 pitlb(addr); 436 pitlb(addr);
437 purge_tlb_end(flags); 437 purge_tlb_end(flags);
438 } 438 }
439 EXPORT_SYMBOL(purge_tlb_entries); 439 EXPORT_SYMBOL(purge_tlb_entries);
440 440
441 void __flush_tlb_range(unsigned long sid, unsigned long start, 441 void __flush_tlb_range(unsigned long sid, unsigned long start,
442 unsigned long end) 442 unsigned long end)
443 { 443 {
444 unsigned long npages; 444 unsigned long npages;
445 445
446 npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 446 npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
447 if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */ 447 if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
448 flush_tlb_all(); 448 flush_tlb_all();
449 else { 449 else {
450 unsigned long flags; 450 unsigned long flags;
451 451
452 purge_tlb_start(flags); 452 purge_tlb_start(flags);
453 mtsp(sid, 1); 453 mtsp(sid, 1);
454 if (split_tlb) { 454 if (split_tlb) {
455 while (npages--) { 455 while (npages--) {
456 pdtlb(start); 456 pdtlb(start);
457 pitlb(start); 457 pitlb(start);
458 start += PAGE_SIZE; 458 start += PAGE_SIZE;
459 } 459 }
460 } else { 460 } else {
461 while (npages--) { 461 while (npages--) {
462 pdtlb(start); 462 pdtlb(start);
463 start += PAGE_SIZE; 463 start += PAGE_SIZE;
464 } 464 }
465 } 465 }
466 purge_tlb_end(flags); 466 purge_tlb_end(flags);
467 } 467 }
468 } 468 }
469 469
470 static void cacheflush_h_tmp_function(void *dummy) 470 static void cacheflush_h_tmp_function(void *dummy)
471 { 471 {
472 flush_cache_all_local(); 472 flush_cache_all_local();
473 } 473 }
474 474
475 void flush_cache_all(void) 475 void flush_cache_all(void)
476 { 476 {
477 on_each_cpu(cacheflush_h_tmp_function, NULL, 1); 477 on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
478 } 478 }
479 479
480 static inline unsigned long mm_total_size(struct mm_struct *mm) 480 static inline unsigned long mm_total_size(struct mm_struct *mm)
481 { 481 {
482 struct vm_area_struct *vma; 482 struct vm_area_struct *vma;
483 unsigned long usize = 0; 483 unsigned long usize = 0;
484 484
485 for (vma = mm->mmap; vma; vma = vma->vm_next) 485 for (vma = mm->mmap; vma; vma = vma->vm_next)
486 usize += vma->vm_end - vma->vm_start; 486 usize += vma->vm_end - vma->vm_start;
487 return usize; 487 return usize;
488 } 488 }
489 489
490 static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr) 490 static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
491 { 491 {
492 pte_t *ptep = NULL; 492 pte_t *ptep = NULL;
493 493
494 if (!pgd_none(*pgd)) { 494 if (!pgd_none(*pgd)) {
495 pud_t *pud = pud_offset(pgd, addr); 495 pud_t *pud = pud_offset(pgd, addr);
496 if (!pud_none(*pud)) { 496 if (!pud_none(*pud)) {
497 pmd_t *pmd = pmd_offset(pud, addr); 497 pmd_t *pmd = pmd_offset(pud, addr);
498 if (!pmd_none(*pmd)) 498 if (!pmd_none(*pmd))
499 ptep = pte_offset_map(pmd, addr); 499 ptep = pte_offset_map(pmd, addr);
500 } 500 }
501 } 501 }
502 return ptep; 502 return ptep;
503 } 503 }
504 504
505 void flush_cache_mm(struct mm_struct *mm) 505 void flush_cache_mm(struct mm_struct *mm)
506 { 506 {
507 struct vm_area_struct *vma; 507 struct vm_area_struct *vma;
508 pgd_t *pgd; 508 pgd_t *pgd;
509 509
510 /* Flushing the whole cache on each cpu takes forever on 510 /* Flushing the whole cache on each cpu takes forever on
511 rp3440, etc. So, avoid it if the mm isn't too big. */ 511 rp3440, etc. So, avoid it if the mm isn't too big. */
512 if (mm_total_size(mm) >= parisc_cache_flush_threshold) { 512 if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
513 flush_cache_all(); 513 flush_cache_all();
514 return; 514 return;
515 } 515 }
516 516
517 if (mm->context == mfsp(3)) { 517 if (mm->context == mfsp(3)) {
518 for (vma = mm->mmap; vma; vma = vma->vm_next) { 518 for (vma = mm->mmap; vma; vma = vma->vm_next) {
519 flush_user_dcache_range_asm(vma->vm_start, vma->vm_end); 519 flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
520 if ((vma->vm_flags & VM_EXEC) == 0) 520 if ((vma->vm_flags & VM_EXEC) == 0)
521 continue; 521 continue;
522 flush_user_icache_range_asm(vma->vm_start, vma->vm_end); 522 flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
523 } 523 }
524 return; 524 return;
525 } 525 }
526 526
527 pgd = mm->pgd; 527 pgd = mm->pgd;
528 for (vma = mm->mmap; vma; vma = vma->vm_next) { 528 for (vma = mm->mmap; vma; vma = vma->vm_next) {
529 unsigned long addr; 529 unsigned long addr;
530 530
531 for (addr = vma->vm_start; addr < vma->vm_end; 531 for (addr = vma->vm_start; addr < vma->vm_end;
532 addr += PAGE_SIZE) { 532 addr += PAGE_SIZE) {
533 unsigned long pfn; 533 unsigned long pfn;
534 pte_t *ptep = get_ptep(pgd, addr); 534 pte_t *ptep = get_ptep(pgd, addr);
535 if (!ptep) 535 if (!ptep)
536 continue; 536 continue;
537 pfn = pte_pfn(*ptep); 537 pfn = pte_pfn(*ptep);
538 if (!pfn_valid(pfn)) 538 if (!pfn_valid(pfn))
539 continue; 539 continue;
540 __flush_cache_page(vma, addr, PFN_PHYS(pfn)); 540 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
541 } 541 }
542 } 542 }
543 } 543 }
544 544
545 void 545 void
546 flush_user_dcache_range(unsigned long start, unsigned long end) 546 flush_user_dcache_range(unsigned long start, unsigned long end)
547 { 547 {
548 if ((end - start) < parisc_cache_flush_threshold) 548 if ((end - start) < parisc_cache_flush_threshold)
549 flush_user_dcache_range_asm(start,end); 549 flush_user_dcache_range_asm(start,end);
550 else 550 else
551 flush_data_cache(); 551 flush_data_cache();
552 } 552 }
553 553
554 void 554 void
555 flush_user_icache_range(unsigned long start, unsigned long end) 555 flush_user_icache_range(unsigned long start, unsigned long end)
556 { 556 {
557 if ((end - start) < parisc_cache_flush_threshold) 557 if ((end - start) < parisc_cache_flush_threshold)
558 flush_user_icache_range_asm(start,end); 558 flush_user_icache_range_asm(start,end);
559 else 559 else
560 flush_instruction_cache(); 560 flush_instruction_cache();
561 } 561 }
562 562
563 void flush_cache_range(struct vm_area_struct *vma, 563 void flush_cache_range(struct vm_area_struct *vma,
564 unsigned long start, unsigned long end) 564 unsigned long start, unsigned long end)
565 { 565 {
566 unsigned long addr; 566 unsigned long addr;
567 pgd_t *pgd; 567 pgd_t *pgd;
568 568
569 BUG_ON(!vma->vm_mm->context); 569 BUG_ON(!vma->vm_mm->context);
570 570
571 if ((end - start) >= parisc_cache_flush_threshold) { 571 if ((end - start) >= parisc_cache_flush_threshold) {
572 flush_cache_all(); 572 flush_cache_all();
573 return; 573 return;
574 } 574 }
575 575
576 if (vma->vm_mm->context == mfsp(3)) { 576 if (vma->vm_mm->context == mfsp(3)) {
577 flush_user_dcache_range_asm(start, end); 577 flush_user_dcache_range_asm(start, end);
578 if (vma->vm_flags & VM_EXEC) 578 if (vma->vm_flags & VM_EXEC)
579 flush_user_icache_range_asm(start, end); 579 flush_user_icache_range_asm(start, end);
580 return; 580 return;
581 } 581 }
582 582
583 pgd = vma->vm_mm->pgd; 583 pgd = vma->vm_mm->pgd;
584 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { 584 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
585 unsigned long pfn; 585 unsigned long pfn;
586 pte_t *ptep = get_ptep(pgd, addr); 586 pte_t *ptep = get_ptep(pgd, addr);
587 if (!ptep) 587 if (!ptep)
588 continue; 588 continue;
589 pfn = pte_pfn(*ptep); 589 pfn = pte_pfn(*ptep);
590 if (pfn_valid(pfn)) 590 if (pfn_valid(pfn))
591 __flush_cache_page(vma, addr, PFN_PHYS(pfn)); 591 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
592 } 592 }
593 } 593 }
594 594
595 void 595 void
596 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) 596 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
597 { 597 {
598 BUG_ON(!vma->vm_mm->context); 598 BUG_ON(!vma->vm_mm->context);
599 599
600 if (pfn_valid(pfn)) { 600 if (pfn_valid(pfn)) {
601 flush_tlb_page(vma, vmaddr); 601 flush_tlb_page(vma, vmaddr);
602 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); 602 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
603 } 603 }
604 } 604 }
605 EXPORT_SYMBOL_GPL(flush_cache_page);
605 606
606 #ifdef CONFIG_PARISC_TMPALIAS 607 #ifdef CONFIG_PARISC_TMPALIAS
607 608
608 void clear_user_highpage(struct page *page, unsigned long vaddr) 609 void clear_user_highpage(struct page *page, unsigned long vaddr)
609 { 610 {
610 void *vto; 611 void *vto;
611 unsigned long flags; 612 unsigned long flags;
612 613
613 /* Clear using TMPALIAS region. The page doesn't need to 614 /* Clear using TMPALIAS region. The page doesn't need to
614 be flushed but the kernel mapping needs to be purged. */ 615 be flushed but the kernel mapping needs to be purged. */
615 616
616 vto = kmap_atomic(page); 617 vto = kmap_atomic(page);
617 618
618 /* The PA-RISC 2.0 Architecture book states on page F-6: 619 /* The PA-RISC 2.0 Architecture book states on page F-6:
619 "Before a write-capable translation is enabled, *all* 620 "Before a write-capable translation is enabled, *all*
620 non-equivalently-aliased translations must be removed 621 non-equivalently-aliased translations must be removed
621 from the page table and purged from the TLB. (Note 622 from the page table and purged from the TLB. (Note
622 that the caches are not required to be flushed at this 623 that the caches are not required to be flushed at this
623 time.) Before any non-equivalent aliased translation 624 time.) Before any non-equivalent aliased translation
624 is re-enabled, the virtual address range for the writeable 625 is re-enabled, the virtual address range for the writeable
625 page (the entire page) must be flushed from the cache, 626 page (the entire page) must be flushed from the cache,
626 and the write-capable translation removed from the page 627 and the write-capable translation removed from the page
627 table and purged from the TLB." */ 628 table and purged from the TLB." */
628 629
629 purge_kernel_dcache_page_asm((unsigned long)vto); 630 purge_kernel_dcache_page_asm((unsigned long)vto);
630 purge_tlb_start(flags); 631 purge_tlb_start(flags);
631 pdtlb_kernel(vto); 632 pdtlb_kernel(vto);
632 purge_tlb_end(flags); 633 purge_tlb_end(flags);
633 preempt_disable(); 634 preempt_disable();
634 clear_user_page_asm(vto, vaddr); 635 clear_user_page_asm(vto, vaddr);
635 preempt_enable(); 636 preempt_enable();
636 637
637 pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */ 638 pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
638 } 639 }
639 640
640 void copy_user_highpage(struct page *to, struct page *from, 641 void copy_user_highpage(struct page *to, struct page *from,
641 unsigned long vaddr, struct vm_area_struct *vma) 642 unsigned long vaddr, struct vm_area_struct *vma)
642 { 643 {
643 void *vfrom, *vto; 644 void *vfrom, *vto;
644 unsigned long flags; 645 unsigned long flags;
645 646
646 /* Copy using TMPALIAS region. This has the advantage 647 /* Copy using TMPALIAS region. This has the advantage
647 that the `from' page doesn't need to be flushed. However, 648 that the `from' page doesn't need to be flushed. However,
648 the `to' page must be flushed in copy_user_page_asm since 649 the `to' page must be flushed in copy_user_page_asm since
649 it can be used to bring in executable code. */ 650 it can be used to bring in executable code. */
650 651
651 vfrom = kmap_atomic(from); 652 vfrom = kmap_atomic(from);
652 vto = kmap_atomic(to); 653 vto = kmap_atomic(to);
653 654
654 purge_kernel_dcache_page_asm((unsigned long)vto); 655 purge_kernel_dcache_page_asm((unsigned long)vto);
655 purge_tlb_start(flags); 656 purge_tlb_start(flags);
656 pdtlb_kernel(vto); 657 pdtlb_kernel(vto);
657 pdtlb_kernel(vfrom); 658 pdtlb_kernel(vfrom);
658 purge_tlb_end(flags); 659 purge_tlb_end(flags);
659 preempt_disable(); 660 preempt_disable();
660 copy_user_page_asm(vto, vfrom, vaddr); 661 copy_user_page_asm(vto, vfrom, vaddr);
661 flush_dcache_page_asm(__pa(vto), vaddr); 662 flush_dcache_page_asm(__pa(vto), vaddr);
662 preempt_enable(); 663 preempt_enable();
663 664
664 pagefault_enable(); /* kunmap_atomic(addr, KM_USER1); */ 665 pagefault_enable(); /* kunmap_atomic(addr, KM_USER1); */
665 pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */ 666 pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
666 } 667 }
667 668
668 #endif /* CONFIG_PARISC_TMPALIAS */ 669 #endif /* CONFIG_PARISC_TMPALIAS */
669 670
arch/parisc/kernel/smp.c
1 /* 1 /*
2 ** SMP Support 2 ** SMP Support
3 ** 3 **
4 ** Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 4 ** Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
5 ** Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com> 5 ** Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
6 ** Copyright (C) 2001,2004 Grant Grundler <grundler@parisc-linux.org> 6 ** Copyright (C) 2001,2004 Grant Grundler <grundler@parisc-linux.org>
7 ** 7 **
8 ** Lots of stuff stolen from arch/alpha/kernel/smp.c 8 ** Lots of stuff stolen from arch/alpha/kernel/smp.c
9 ** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^) 9 ** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^)
10 ** 10 **
11 ** Thanks to John Curry and Ullas Ponnadi. I learned a lot from their work. 11 ** Thanks to John Curry and Ullas Ponnadi. I learned a lot from their work.
12 ** -grant (1/12/2001) 12 ** -grant (1/12/2001)
13 ** 13 **
14 ** This program is free software; you can redistribute it and/or modify 14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License as published by 15 ** it under the terms of the GNU General Public License as published by
16 ** the Free Software Foundation; either version 2 of the License, or 16 ** the Free Software Foundation; either version 2 of the License, or
17 ** (at your option) any later version. 17 ** (at your option) any later version.
18 */ 18 */
19 #include <linux/types.h> 19 #include <linux/types.h>
20 #include <linux/spinlock.h> 20 #include <linux/spinlock.h>
21 21
22 #include <linux/kernel.h> 22 #include <linux/kernel.h>
23 #include <linux/module.h> 23 #include <linux/module.h>
24 #include <linux/sched.h> 24 #include <linux/sched.h>
25 #include <linux/init.h> 25 #include <linux/init.h>
26 #include <linux/interrupt.h> 26 #include <linux/interrupt.h>
27 #include <linux/smp.h> 27 #include <linux/smp.h>
28 #include <linux/kernel_stat.h> 28 #include <linux/kernel_stat.h>
29 #include <linux/mm.h> 29 #include <linux/mm.h>
30 #include <linux/err.h> 30 #include <linux/err.h>
31 #include <linux/delay.h> 31 #include <linux/delay.h>
32 #include <linux/bitops.h> 32 #include <linux/bitops.h>
33 #include <linux/ftrace.h> 33 #include <linux/ftrace.h>
34 #include <linux/cpu.h> 34 #include <linux/cpu.h>
35 35
36 #include <linux/atomic.h> 36 #include <linux/atomic.h>
37 #include <asm/current.h> 37 #include <asm/current.h>
38 #include <asm/delay.h> 38 #include <asm/delay.h>
39 #include <asm/tlbflush.h> 39 #include <asm/tlbflush.h>
40 40
41 #include <asm/io.h> 41 #include <asm/io.h>
42 #include <asm/irq.h> /* for CPU_IRQ_REGION and friends */ 42 #include <asm/irq.h> /* for CPU_IRQ_REGION and friends */
43 #include <asm/mmu_context.h> 43 #include <asm/mmu_context.h>
44 #include <asm/page.h> 44 #include <asm/page.h>
45 #include <asm/pgtable.h> 45 #include <asm/pgtable.h>
46 #include <asm/pgalloc.h> 46 #include <asm/pgalloc.h>
47 #include <asm/processor.h> 47 #include <asm/processor.h>
48 #include <asm/ptrace.h> 48 #include <asm/ptrace.h>
49 #include <asm/unistd.h> 49 #include <asm/unistd.h>
50 #include <asm/cacheflush.h> 50 #include <asm/cacheflush.h>
51 51
52 #undef DEBUG_SMP 52 #undef DEBUG_SMP
53 #ifdef DEBUG_SMP 53 #ifdef DEBUG_SMP
54 static int smp_debug_lvl = 0; 54 static int smp_debug_lvl = 0;
55 #define smp_debug(lvl, printargs...) \ 55 #define smp_debug(lvl, printargs...) \
56 if (lvl >= smp_debug_lvl) \ 56 if (lvl >= smp_debug_lvl) \
57 printk(printargs); 57 printk(printargs);
58 #else 58 #else
59 #define smp_debug(lvl, ...) do { } while(0) 59 #define smp_debug(lvl, ...) do { } while(0)
60 #endif /* DEBUG_SMP */ 60 #endif /* DEBUG_SMP */
61 61
62 volatile struct task_struct *smp_init_current_idle_task; 62 volatile struct task_struct *smp_init_current_idle_task;
63 63
64 /* track which CPU is booting */ 64 /* track which CPU is booting */
65 static volatile int cpu_now_booting; 65 static volatile int cpu_now_booting;
66 66
67 static int parisc_max_cpus = 1; 67 static int parisc_max_cpus = 1;
68 68
69 static DEFINE_PER_CPU(spinlock_t, ipi_lock); 69 static DEFINE_PER_CPU(spinlock_t, ipi_lock);
70 70
71 enum ipi_message_type { 71 enum ipi_message_type {
72 IPI_NOP=0, 72 IPI_NOP=0,
73 IPI_RESCHEDULE=1, 73 IPI_RESCHEDULE=1,
74 IPI_CALL_FUNC, 74 IPI_CALL_FUNC,
75 IPI_CALL_FUNC_SINGLE,
76 IPI_CPU_START, 75 IPI_CPU_START,
77 IPI_CPU_STOP, 76 IPI_CPU_STOP,
78 IPI_CPU_TEST 77 IPI_CPU_TEST
79 }; 78 };
80 79
81 80
82 /********** SMP inter processor interrupt and communication routines */ 81 /********** SMP inter processor interrupt and communication routines */
83 82
84 #undef PER_CPU_IRQ_REGION 83 #undef PER_CPU_IRQ_REGION
85 #ifdef PER_CPU_IRQ_REGION 84 #ifdef PER_CPU_IRQ_REGION
86 /* XXX REVISIT Ignore for now. 85 /* XXX REVISIT Ignore for now.
87 ** *May* need this "hook" to register IPI handler 86 ** *May* need this "hook" to register IPI handler
88 ** once we have perCPU ExtIntr switch tables. 87 ** once we have perCPU ExtIntr switch tables.
89 */ 88 */
90 static void 89 static void
91 ipi_init(int cpuid) 90 ipi_init(int cpuid)
92 { 91 {
93 #error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region 92 #error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region
94 93
95 if(cpu_online(cpuid) ) 94 if(cpu_online(cpuid) )
96 { 95 {
97 switch_to_idle_task(current); 96 switch_to_idle_task(current);
98 } 97 }
99 98
100 return; 99 return;
101 } 100 }
102 #endif 101 #endif
103 102
104 103
105 /* 104 /*
106 ** Yoink this CPU from the runnable list... 105 ** Yoink this CPU from the runnable list...
107 ** 106 **
108 */ 107 */
109 static void 108 static void
110 halt_processor(void) 109 halt_processor(void)
111 { 110 {
112 /* REVISIT : redirect I/O Interrupts to another CPU? */ 111 /* REVISIT : redirect I/O Interrupts to another CPU? */
113 /* REVISIT : does PM *know* this CPU isn't available? */ 112 /* REVISIT : does PM *know* this CPU isn't available? */
114 set_cpu_online(smp_processor_id(), false); 113 set_cpu_online(smp_processor_id(), false);
115 local_irq_disable(); 114 local_irq_disable();
116 for (;;) 115 for (;;)
117 ; 116 ;
118 } 117 }
119 118
120 119
121 irqreturn_t __irq_entry 120 irqreturn_t __irq_entry
122 ipi_interrupt(int irq, void *dev_id) 121 ipi_interrupt(int irq, void *dev_id)
123 { 122 {
124 int this_cpu = smp_processor_id(); 123 int this_cpu = smp_processor_id();
125 struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu); 124 struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu);
126 unsigned long ops; 125 unsigned long ops;
127 unsigned long flags; 126 unsigned long flags;
128 127
129 /* Count this now; we may make a call that never returns. */ 128 /* Count this now; we may make a call that never returns. */
130 inc_irq_stat(irq_call_count); 129 inc_irq_stat(irq_call_count);
131 130
132 mb(); /* Order interrupt and bit testing. */ 131 mb(); /* Order interrupt and bit testing. */
133 132
134 for (;;) { 133 for (;;) {
135 spinlock_t *lock = &per_cpu(ipi_lock, this_cpu); 134 spinlock_t *lock = &per_cpu(ipi_lock, this_cpu);
136 spin_lock_irqsave(lock, flags); 135 spin_lock_irqsave(lock, flags);
137 ops = p->pending_ipi; 136 ops = p->pending_ipi;
138 p->pending_ipi = 0; 137 p->pending_ipi = 0;
139 spin_unlock_irqrestore(lock, flags); 138 spin_unlock_irqrestore(lock, flags);
140 139
141 mb(); /* Order bit clearing and data access. */ 140 mb(); /* Order bit clearing and data access. */
142 141
143 if (!ops) 142 if (!ops)
144 break; 143 break;
145 144
146 while (ops) { 145 while (ops) {
147 unsigned long which = ffz(~ops); 146 unsigned long which = ffz(~ops);
148 147
149 ops &= ~(1 << which); 148 ops &= ~(1 << which);
150 149
151 switch (which) { 150 switch (which) {
152 case IPI_NOP: 151 case IPI_NOP:
153 smp_debug(100, KERN_DEBUG "CPU%d IPI_NOP\n", this_cpu); 152 smp_debug(100, KERN_DEBUG "CPU%d IPI_NOP\n", this_cpu);
154 break; 153 break;
155 154
156 case IPI_RESCHEDULE: 155 case IPI_RESCHEDULE:
157 smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu); 156 smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu);
158 inc_irq_stat(irq_resched_count); 157 inc_irq_stat(irq_resched_count);
159 scheduler_ipi(); 158 scheduler_ipi();
160 break; 159 break;
161 160
162 case IPI_CALL_FUNC: 161 case IPI_CALL_FUNC:
163 smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu); 162 smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu);
164 generic_smp_call_function_interrupt(); 163 generic_smp_call_function_interrupt();
165 break; 164 break;
166 165
167 case IPI_CALL_FUNC_SINGLE:
168 smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu);
169 generic_smp_call_function_single_interrupt();
170 break;
171
172 case IPI_CPU_START: 166 case IPI_CPU_START:
173 smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu); 167 smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu);
174 break; 168 break;
175 169
176 case IPI_CPU_STOP: 170 case IPI_CPU_STOP:
177 smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_STOP\n", this_cpu); 171 smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_STOP\n", this_cpu);
178 halt_processor(); 172 halt_processor();
179 break; 173 break;
180 174
181 case IPI_CPU_TEST: 175 case IPI_CPU_TEST:
182 smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu); 176 smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu);
183 break; 177 break;
184 178
185 default: 179 default:
186 printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n", 180 printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
187 this_cpu, which); 181 this_cpu, which);
188 return IRQ_NONE; 182 return IRQ_NONE;
189 } /* Switch */ 183 } /* Switch */
190 /* let in any pending interrupts */ 184 /* let in any pending interrupts */
191 local_irq_enable(); 185 local_irq_enable();
192 local_irq_disable(); 186 local_irq_disable();
193 } /* while (ops) */ 187 } /* while (ops) */
194 } 188 }
195 return IRQ_HANDLED; 189 return IRQ_HANDLED;
196 } 190 }
197 191
198 192
199 static inline void 193 static inline void
200 ipi_send(int cpu, enum ipi_message_type op) 194 ipi_send(int cpu, enum ipi_message_type op)
201 { 195 {
202 struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu); 196 struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu);
203 spinlock_t *lock = &per_cpu(ipi_lock, cpu); 197 spinlock_t *lock = &per_cpu(ipi_lock, cpu);
204 unsigned long flags; 198 unsigned long flags;
205 199
206 spin_lock_irqsave(lock, flags); 200 spin_lock_irqsave(lock, flags);
207 p->pending_ipi |= 1 << op; 201 p->pending_ipi |= 1 << op;
208 gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa); 202 gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa);
209 spin_unlock_irqrestore(lock, flags); 203 spin_unlock_irqrestore(lock, flags);
210 } 204 }
211 205
212 static void 206 static void
213 send_IPI_mask(const struct cpumask *mask, enum ipi_message_type op) 207 send_IPI_mask(const struct cpumask *mask, enum ipi_message_type op)
214 { 208 {
215 int cpu; 209 int cpu;
216 210
217 for_each_cpu(cpu, mask) 211 for_each_cpu(cpu, mask)
218 ipi_send(cpu, op); 212 ipi_send(cpu, op);
219 } 213 }
220 214
221 static inline void 215 static inline void
222 send_IPI_single(int dest_cpu, enum ipi_message_type op) 216 send_IPI_single(int dest_cpu, enum ipi_message_type op)
223 { 217 {
224 BUG_ON(dest_cpu == NO_PROC_ID); 218 BUG_ON(dest_cpu == NO_PROC_ID);
225 219
226 ipi_send(dest_cpu, op); 220 ipi_send(dest_cpu, op);
227 } 221 }
228 222
229 static inline void 223 static inline void
230 send_IPI_allbutself(enum ipi_message_type op) 224 send_IPI_allbutself(enum ipi_message_type op)
231 { 225 {
232 int i; 226 int i;
233 227
234 for_each_online_cpu(i) { 228 for_each_online_cpu(i) {
235 if (i != smp_processor_id()) 229 if (i != smp_processor_id())
236 send_IPI_single(i, op); 230 send_IPI_single(i, op);
237 } 231 }
238 } 232 }
239 233
240 234
241 inline void 235 inline void
242 smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP); } 236 smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP); }
243 237
244 static inline void 238 static inline void
245 smp_send_start(void) { send_IPI_allbutself(IPI_CPU_START); } 239 smp_send_start(void) { send_IPI_allbutself(IPI_CPU_START); }
246 240
247 void 241 void
248 smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); } 242 smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
249 243
250 void 244 void
251 smp_send_all_nop(void) 245 smp_send_all_nop(void)
252 { 246 {
253 send_IPI_allbutself(IPI_NOP); 247 send_IPI_allbutself(IPI_NOP);
254 } 248 }
255 249
256 void arch_send_call_function_ipi_mask(const struct cpumask *mask) 250 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
257 { 251 {
258 send_IPI_mask(mask, IPI_CALL_FUNC); 252 send_IPI_mask(mask, IPI_CALL_FUNC);
259 } 253 }
260 254
261 void arch_send_call_function_single_ipi(int cpu) 255 void arch_send_call_function_single_ipi(int cpu)
262 { 256 {
263 send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); 257 send_IPI_single(cpu, IPI_CALL_FUNC);
264 } 258 }
265 259
266 /* 260 /*
267 * Called by secondaries to update state and initialize CPU registers. 261 * Called by secondaries to update state and initialize CPU registers.
268 */ 262 */
269 static void __init 263 static void __init
270 smp_cpu_init(int cpunum) 264 smp_cpu_init(int cpunum)
271 { 265 {
272 extern int init_per_cpu(int); /* arch/parisc/kernel/processor.c */ 266 extern int init_per_cpu(int); /* arch/parisc/kernel/processor.c */
273 extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */ 267 extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */
274 extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */ 268 extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */
275 269
276 /* Set modes and Enable floating point coprocessor */ 270 /* Set modes and Enable floating point coprocessor */
277 (void) init_per_cpu(cpunum); 271 (void) init_per_cpu(cpunum);
278 272
279 disable_sr_hashing(); 273 disable_sr_hashing();
280 274
281 mb(); 275 mb();
282 276
283 /* Well, support 2.4 linux scheme as well. */ 277 /* Well, support 2.4 linux scheme as well. */
284 if (cpu_online(cpunum)) { 278 if (cpu_online(cpunum)) {
285 extern void machine_halt(void); /* arch/parisc.../process.c */ 279 extern void machine_halt(void); /* arch/parisc.../process.c */
286 280
287 printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum); 281 printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
288 machine_halt(); 282 machine_halt();
289 } 283 }
290 284
291 notify_cpu_starting(cpunum); 285 notify_cpu_starting(cpunum);
292 286
293 set_cpu_online(cpunum, true); 287 set_cpu_online(cpunum, true);
294 288
295 /* Initialise the idle task for this CPU */ 289 /* Initialise the idle task for this CPU */
296 atomic_inc(&init_mm.mm_count); 290 atomic_inc(&init_mm.mm_count);
297 current->active_mm = &init_mm; 291 current->active_mm = &init_mm;
298 BUG_ON(current->mm); 292 BUG_ON(current->mm);
299 enter_lazy_tlb(&init_mm, current); 293 enter_lazy_tlb(&init_mm, current);
300 294
301 init_IRQ(); /* make sure no IRQs are enabled or pending */ 295 init_IRQ(); /* make sure no IRQs are enabled or pending */
302 start_cpu_itimer(); 296 start_cpu_itimer();
303 } 297 }
304 298
305 299
306 /* 300 /*
307 * Slaves start using C here. Indirectly called from smp_slave_stext. 301 * Slaves start using C here. Indirectly called from smp_slave_stext.
308 * Do what start_kernel() and main() do for boot strap processor (aka monarch) 302 * Do what start_kernel() and main() do for boot strap processor (aka monarch)
309 */ 303 */
310 void __init smp_callin(void) 304 void __init smp_callin(void)
311 { 305 {
312 int slave_id = cpu_now_booting; 306 int slave_id = cpu_now_booting;
313 307
314 smp_cpu_init(slave_id); 308 smp_cpu_init(slave_id);
315 preempt_disable(); 309 preempt_disable();
316 310
317 flush_cache_all_local(); /* start with known state */ 311 flush_cache_all_local(); /* start with known state */
318 flush_tlb_all_local(NULL); 312 flush_tlb_all_local(NULL);
319 313
320 local_irq_enable(); /* Interrupts have been off until now */ 314 local_irq_enable(); /* Interrupts have been off until now */
321 315
322 cpu_startup_entry(CPUHP_ONLINE); 316 cpu_startup_entry(CPUHP_ONLINE);
323 317
324 /* NOTREACHED */ 318 /* NOTREACHED */
325 panic("smp_callin() AAAAaaaaahhhh....\n"); 319 panic("smp_callin() AAAAaaaaahhhh....\n");
326 } 320 }
327 321
328 /* 322 /*
329 * Bring one cpu online. 323 * Bring one cpu online.
330 */ 324 */
331 int smp_boot_one_cpu(int cpuid, struct task_struct *idle) 325 int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
332 { 326 {
333 const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); 327 const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
334 long timeout; 328 long timeout;
335 329
336 task_thread_info(idle)->cpu = cpuid; 330 task_thread_info(idle)->cpu = cpuid;
337 331
338 /* Let _start know what logical CPU we're booting 332 /* Let _start know what logical CPU we're booting
339 ** (offset into init_tasks[],cpu_data[]) 333 ** (offset into init_tasks[],cpu_data[])
340 */ 334 */
341 cpu_now_booting = cpuid; 335 cpu_now_booting = cpuid;
342 336
343 /* 337 /*
344 ** boot strap code needs to know the task address since 338 ** boot strap code needs to know the task address since
345 ** it also contains the process stack. 339 ** it also contains the process stack.
346 */ 340 */
347 smp_init_current_idle_task = idle ; 341 smp_init_current_idle_task = idle ;
348 mb(); 342 mb();
349 343
350 printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa); 344 printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa);
351 345
352 /* 346 /*
353 ** This gets PDC to release the CPU from a very tight loop. 347 ** This gets PDC to release the CPU from a very tight loop.
354 ** 348 **
355 ** From the PA-RISC 2.0 Firmware Architecture Reference Specification: 349 ** From the PA-RISC 2.0 Firmware Architecture Reference Specification:
356 ** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which 350 ** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which
357 ** is executed after receiving the rendezvous signal (an interrupt to 351 ** is executed after receiving the rendezvous signal (an interrupt to
358 ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the 352 ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the
359 ** contents of memory are valid." 353 ** contents of memory are valid."
360 */ 354 */
361 gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa); 355 gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa);
362 mb(); 356 mb();
363 357
364 /* 358 /*
365 * OK, wait a bit for that CPU to finish staggering about. 359 * OK, wait a bit for that CPU to finish staggering about.
366 * Slave will set a bit when it reaches smp_cpu_init(). 360 * Slave will set a bit when it reaches smp_cpu_init().
367 * Once the "monarch CPU" sees the bit change, it can move on. 361 * Once the "monarch CPU" sees the bit change, it can move on.
368 */ 362 */
369 for (timeout = 0; timeout < 10000; timeout++) { 363 for (timeout = 0; timeout < 10000; timeout++) {
370 if(cpu_online(cpuid)) { 364 if(cpu_online(cpuid)) {
371 /* Which implies Slave has started up */ 365 /* Which implies Slave has started up */
372 cpu_now_booting = 0; 366 cpu_now_booting = 0;
373 smp_init_current_idle_task = NULL; 367 smp_init_current_idle_task = NULL;
374 goto alive ; 368 goto alive ;
375 } 369 }
376 udelay(100); 370 udelay(100);
377 barrier(); 371 barrier();
378 } 372 }
379 printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid); 373 printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
380 return -1; 374 return -1;
381 375
382 alive: 376 alive:
383 /* Remember the Slave data */ 377 /* Remember the Slave data */
384 smp_debug(100, KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n", 378 smp_debug(100, KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n",
385 cpuid, timeout * 100); 379 cpuid, timeout * 100);
386 return 0; 380 return 0;
387 } 381 }
388 382
389 void __init smp_prepare_boot_cpu(void) 383 void __init smp_prepare_boot_cpu(void)
390 { 384 {
391 int bootstrap_processor = per_cpu(cpu_data, 0).cpuid; 385 int bootstrap_processor = per_cpu(cpu_data, 0).cpuid;
392 386
393 /* Setup BSP mappings */ 387 /* Setup BSP mappings */
394 printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor); 388 printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor);
395 389
396 set_cpu_online(bootstrap_processor, true); 390 set_cpu_online(bootstrap_processor, true);
397 set_cpu_present(bootstrap_processor, true); 391 set_cpu_present(bootstrap_processor, true);
398 } 392 }
399 393
400 394
401 395
402 /* 396 /*
403 ** inventory.c:do_inventory() hasn't yet been run and thus we 397 ** inventory.c:do_inventory() hasn't yet been run and thus we
404 ** don't 'discover' the additional CPUs until later. 398 ** don't 'discover' the additional CPUs until later.
405 */ 399 */
406 void __init smp_prepare_cpus(unsigned int max_cpus) 400 void __init smp_prepare_cpus(unsigned int max_cpus)
407 { 401 {
408 int cpu; 402 int cpu;
409 403
410 for_each_possible_cpu(cpu) 404 for_each_possible_cpu(cpu)
411 spin_lock_init(&per_cpu(ipi_lock, cpu)); 405 spin_lock_init(&per_cpu(ipi_lock, cpu));
412 406
413 init_cpu_present(cpumask_of(0)); 407 init_cpu_present(cpumask_of(0));
414 408
415 parisc_max_cpus = max_cpus; 409 parisc_max_cpus = max_cpus;
416 if (!max_cpus) 410 if (!max_cpus)
417 printk(KERN_INFO "SMP mode deactivated.\n"); 411 printk(KERN_INFO "SMP mode deactivated.\n");
418 } 412 }
419 413
420 414
421 void smp_cpus_done(unsigned int cpu_max) 415 void smp_cpus_done(unsigned int cpu_max)
422 { 416 {
423 return; 417 return;
424 } 418 }
425 419
426 420
427 int __cpu_up(unsigned int cpu, struct task_struct *tidle) 421 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
428 { 422 {
429 if (cpu != 0 && cpu < parisc_max_cpus) 423 if (cpu != 0 && cpu < parisc_max_cpus)
430 smp_boot_one_cpu(cpu, tidle); 424 smp_boot_one_cpu(cpu, tidle);
431 425
432 return cpu_online(cpu) ? 0 : -ENOSYS; 426 return cpu_online(cpu) ? 0 : -ENOSYS;
433 } 427 }
434 428
435 #ifdef CONFIG_PROC_FS 429 #ifdef CONFIG_PROC_FS
436 int __init 430 int __init
437 setup_profiling_timer(unsigned int multiplier) 431 setup_profiling_timer(unsigned int multiplier)
438 { 432 {
439 return -EINVAL; 433 return -EINVAL;
440 } 434 }
441 #endif 435 #endif
442 436
arch/parisc/kernel/traps.c
1 /* 1 /*
2 * linux/arch/parisc/traps.c 2 * linux/arch/parisc/traps.c
3 * 3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds 4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org> 5 * Copyright (C) 1999, 2000 Philipp Rumpf <prumpf@tux.org>
6 */ 6 */
7 7
8 /* 8 /*
9 * 'Traps.c' handles hardware traps and faults after we have saved some 9 * 'Traps.c' handles hardware traps and faults after we have saved some
10 * state in 'asm.s'. 10 * state in 'asm.s'.
11 */ 11 */
12 12
13 #include <linux/sched.h> 13 #include <linux/sched.h>
14 #include <linux/kernel.h> 14 #include <linux/kernel.h>
15 #include <linux/string.h> 15 #include <linux/string.h>
16 #include <linux/errno.h> 16 #include <linux/errno.h>
17 #include <linux/ptrace.h> 17 #include <linux/ptrace.h>
18 #include <linux/timer.h> 18 #include <linux/timer.h>
19 #include <linux/delay.h> 19 #include <linux/delay.h>
20 #include <linux/mm.h> 20 #include <linux/mm.h>
21 #include <linux/module.h> 21 #include <linux/module.h>
22 #include <linux/smp.h> 22 #include <linux/smp.h>
23 #include <linux/spinlock.h> 23 #include <linux/spinlock.h>
24 #include <linux/init.h> 24 #include <linux/init.h>
25 #include <linux/interrupt.h> 25 #include <linux/interrupt.h>
26 #include <linux/console.h> 26 #include <linux/console.h>
27 #include <linux/bug.h> 27 #include <linux/bug.h>
28 28
29 #include <asm/assembly.h> 29 #include <asm/assembly.h>
30 #include <asm/uaccess.h> 30 #include <asm/uaccess.h>
31 #include <asm/io.h> 31 #include <asm/io.h>
32 #include <asm/irq.h> 32 #include <asm/irq.h>
33 #include <asm/traps.h> 33 #include <asm/traps.h>
34 #include <asm/unaligned.h> 34 #include <asm/unaligned.h>
35 #include <linux/atomic.h> 35 #include <linux/atomic.h>
36 #include <asm/smp.h> 36 #include <asm/smp.h>
37 #include <asm/pdc.h> 37 #include <asm/pdc.h>
38 #include <asm/pdc_chassis.h> 38 #include <asm/pdc_chassis.h>
39 #include <asm/unwind.h> 39 #include <asm/unwind.h>
40 #include <asm/tlbflush.h> 40 #include <asm/tlbflush.h>
41 #include <asm/cacheflush.h> 41 #include <asm/cacheflush.h>
42 42
43 #include "../math-emu/math-emu.h" /* for handle_fpe() */ 43 #include "../math-emu/math-emu.h" /* for handle_fpe() */
44 44
45 #define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */ 45 #define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
46 /* dumped to the console via printk) */ 46 /* dumped to the console via printk) */
47 47
48 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 48 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
49 DEFINE_SPINLOCK(pa_dbit_lock); 49 DEFINE_SPINLOCK(pa_dbit_lock);
50 #endif 50 #endif
51 51
52 static void parisc_show_stack(struct task_struct *task, unsigned long *sp, 52 static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
53 struct pt_regs *regs); 53 struct pt_regs *regs);
54 54
55 static int printbinary(char *buf, unsigned long x, int nbits) 55 static int printbinary(char *buf, unsigned long x, int nbits)
56 { 56 {
57 unsigned long mask = 1UL << (nbits - 1); 57 unsigned long mask = 1UL << (nbits - 1);
58 while (mask != 0) { 58 while (mask != 0) {
59 *buf++ = (mask & x ? '1' : '0'); 59 *buf++ = (mask & x ? '1' : '0');
60 mask >>= 1; 60 mask >>= 1;
61 } 61 }
62 *buf = '\0'; 62 *buf = '\0';
63 63
64 return nbits; 64 return nbits;
65 } 65 }
66 66
67 #ifdef CONFIG_64BIT 67 #ifdef CONFIG_64BIT
68 #define RFMT "%016lx" 68 #define RFMT "%016lx"
69 #else 69 #else
70 #define RFMT "%08lx" 70 #define RFMT "%08lx"
71 #endif 71 #endif
72 #define FFMT "%016llx" /* fpregs are 64-bit always */ 72 #define FFMT "%016llx" /* fpregs are 64-bit always */
73 73
74 #define PRINTREGS(lvl,r,f,fmt,x) \ 74 #define PRINTREGS(lvl,r,f,fmt,x) \
75 printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \ 75 printk("%s%s%02d-%02d " fmt " " fmt " " fmt " " fmt "\n", \
76 lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \ 76 lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1], \
77 (r)[(x)+2], (r)[(x)+3]) 77 (r)[(x)+2], (r)[(x)+3])
78 78
79 static void print_gr(char *level, struct pt_regs *regs) 79 static void print_gr(char *level, struct pt_regs *regs)
80 { 80 {
81 int i; 81 int i;
82 char buf[64]; 82 char buf[64];
83 83
84 printk("%s\n", level); 84 printk("%s\n", level);
85 printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level); 85 printk("%s YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
86 printbinary(buf, regs->gr[0], 32); 86 printbinary(buf, regs->gr[0], 32);
87 printk("%sPSW: %s %s\n", level, buf, print_tainted()); 87 printk("%sPSW: %s %s\n", level, buf, print_tainted());
88 88
89 for (i = 0; i < 32; i += 4) 89 for (i = 0; i < 32; i += 4)
90 PRINTREGS(level, regs->gr, "r", RFMT, i); 90 PRINTREGS(level, regs->gr, "r", RFMT, i);
91 } 91 }
92 92
93 static void print_fr(char *level, struct pt_regs *regs) 93 static void print_fr(char *level, struct pt_regs *regs)
94 { 94 {
95 int i; 95 int i;
96 char buf[64]; 96 char buf[64];
97 struct { u32 sw[2]; } s; 97 struct { u32 sw[2]; } s;
98 98
99 /* FR are 64bit everywhere. Need to use asm to get the content 99 /* FR are 64bit everywhere. Need to use asm to get the content
100 * of fpsr/fper1, and we assume that we won't have a FP Identify 100 * of fpsr/fper1, and we assume that we won't have a FP Identify
101 * in our way, otherwise we're screwed. 101 * in our way, otherwise we're screwed.
102 * The fldd is used to restore the T-bit if there was one, as the 102 * The fldd is used to restore the T-bit if there was one, as the
103 * store clears it anyway. 103 * store clears it anyway.
104 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */ 104 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
105 asm volatile ("fstd %%fr0,0(%1) \n\t" 105 asm volatile ("fstd %%fr0,0(%1) \n\t"
106 "fldd 0(%1),%%fr0 \n\t" 106 "fldd 0(%1),%%fr0 \n\t"
107 : "=m" (s) : "r" (&s) : "r0"); 107 : "=m" (s) : "r" (&s) : "r0");
108 108
109 printk("%s\n", level); 109 printk("%s\n", level);
110 printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level); 110 printk("%s VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
111 printbinary(buf, s.sw[0], 32); 111 printbinary(buf, s.sw[0], 32);
112 printk("%sFPSR: %s\n", level, buf); 112 printk("%sFPSR: %s\n", level, buf);
113 printk("%sFPER1: %08x\n", level, s.sw[1]); 113 printk("%sFPER1: %08x\n", level, s.sw[1]);
114 114
115 /* here we'll print fr0 again, tho it'll be meaningless */ 115 /* here we'll print fr0 again, tho it'll be meaningless */
116 for (i = 0; i < 32; i += 4) 116 for (i = 0; i < 32; i += 4)
117 PRINTREGS(level, regs->fr, "fr", FFMT, i); 117 PRINTREGS(level, regs->fr, "fr", FFMT, i);
118 } 118 }
119 119
120 void show_regs(struct pt_regs *regs) 120 void show_regs(struct pt_regs *regs)
121 { 121 {
122 int i, user; 122 int i, user;
123 char *level; 123 char *level;
124 unsigned long cr30, cr31; 124 unsigned long cr30, cr31;
125 125
126 user = user_mode(regs); 126 user = user_mode(regs);
127 level = user ? KERN_DEBUG : KERN_CRIT; 127 level = user ? KERN_DEBUG : KERN_CRIT;
128 128
129 show_regs_print_info(level); 129 show_regs_print_info(level);
130 130
131 print_gr(level, regs); 131 print_gr(level, regs);
132 132
133 for (i = 0; i < 8; i += 4) 133 for (i = 0; i < 8; i += 4)
134 PRINTREGS(level, regs->sr, "sr", RFMT, i); 134 PRINTREGS(level, regs->sr, "sr", RFMT, i);
135 135
136 if (user) 136 if (user)
137 print_fr(level, regs); 137 print_fr(level, regs);
138 138
139 cr30 = mfctl(30); 139 cr30 = mfctl(30);
140 cr31 = mfctl(31); 140 cr31 = mfctl(31);
141 printk("%s\n", level); 141 printk("%s\n", level);
142 printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n", 142 printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
143 level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]); 143 level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
144 printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n", 144 printk("%s IIR: %08lx ISR: " RFMT " IOR: " RFMT "\n",
145 level, regs->iir, regs->isr, regs->ior); 145 level, regs->iir, regs->isr, regs->ior);
146 printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n", 146 printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
147 level, current_thread_info()->cpu, cr30, cr31); 147 level, current_thread_info()->cpu, cr30, cr31);
148 printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28); 148 printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
149 149
150 if (user) { 150 if (user) {
151 printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]); 151 printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
152 printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]); 152 printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
153 printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]); 153 printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
154 } else { 154 } else {
155 printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]); 155 printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
156 printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]); 156 printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
157 printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]); 157 printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
158 158
159 parisc_show_stack(current, NULL, regs); 159 parisc_show_stack(current, NULL, regs);
160 } 160 }
161 } 161 }
162 162
163 static void do_show_stack(struct unwind_frame_info *info) 163 static void do_show_stack(struct unwind_frame_info *info)
164 { 164 {
165 int i = 1; 165 int i = 1;
166 166
167 printk(KERN_CRIT "Backtrace:\n"); 167 printk(KERN_CRIT "Backtrace:\n");
168 while (i <= 16) { 168 while (i <= 16) {
169 if (unwind_once(info) < 0 || info->ip == 0) 169 if (unwind_once(info) < 0 || info->ip == 0)
170 break; 170 break;
171 171
172 if (__kernel_text_address(info->ip)) { 172 if (__kernel_text_address(info->ip)) {
173 printk(KERN_CRIT " [<" RFMT ">] %pS\n", 173 printk(KERN_CRIT " [<" RFMT ">] %pS\n",
174 info->ip, (void *) info->ip); 174 info->ip, (void *) info->ip);
175 i++; 175 i++;
176 } 176 }
177 } 177 }
178 printk(KERN_CRIT "\n"); 178 printk(KERN_CRIT "\n");
179 } 179 }
180 180
181 static void parisc_show_stack(struct task_struct *task, unsigned long *sp, 181 static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
182 struct pt_regs *regs) 182 struct pt_regs *regs)
183 { 183 {
184 struct unwind_frame_info info; 184 struct unwind_frame_info info;
185 struct task_struct *t; 185 struct task_struct *t;
186 186
187 t = task ? task : current; 187 t = task ? task : current;
188 if (regs) { 188 if (regs) {
189 unwind_frame_init(&info, t, regs); 189 unwind_frame_init(&info, t, regs);
190 goto show_stack; 190 goto show_stack;
191 } 191 }
192 192
193 if (t == current) { 193 if (t == current) {
194 unsigned long sp; 194 unsigned long sp;
195 195
196 HERE: 196 HERE:
197 asm volatile ("copy %%r30, %0" : "=r"(sp)); 197 asm volatile ("copy %%r30, %0" : "=r"(sp));
198 { 198 {
199 struct pt_regs r; 199 struct pt_regs r;
200 200
201 memset(&r, 0, sizeof(struct pt_regs)); 201 memset(&r, 0, sizeof(struct pt_regs));
202 r.iaoq[0] = (unsigned long)&&HERE; 202 r.iaoq[0] = (unsigned long)&&HERE;
203 r.gr[2] = (unsigned long)__builtin_return_address(0); 203 r.gr[2] = (unsigned long)__builtin_return_address(0);
204 r.gr[30] = sp; 204 r.gr[30] = sp;
205 205
206 unwind_frame_init(&info, current, &r); 206 unwind_frame_init(&info, current, &r);
207 } 207 }
208 } else { 208 } else {
209 unwind_frame_init_from_blocked_task(&info, t); 209 unwind_frame_init_from_blocked_task(&info, t);
210 } 210 }
211 211
212 show_stack: 212 show_stack:
213 do_show_stack(&info); 213 do_show_stack(&info);
214 } 214 }
215 215
216 void show_stack(struct task_struct *t, unsigned long *sp) 216 void show_stack(struct task_struct *t, unsigned long *sp)
217 { 217 {
218 return parisc_show_stack(t, sp, NULL); 218 return parisc_show_stack(t, sp, NULL);
219 } 219 }
220 220
221 int is_valid_bugaddr(unsigned long iaoq) 221 int is_valid_bugaddr(unsigned long iaoq)
222 { 222 {
223 return 1; 223 return 1;
224 } 224 }
225 225
226 void die_if_kernel(char *str, struct pt_regs *regs, long err) 226 void die_if_kernel(char *str, struct pt_regs *regs, long err)
227 { 227 {
228 if (user_mode(regs)) { 228 if (user_mode(regs)) {
229 if (err == 0) 229 if (err == 0)
230 return; /* STFU */ 230 return; /* STFU */
231 231
232 printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n", 232 printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
233 current->comm, task_pid_nr(current), str, err, regs->iaoq[0]); 233 current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
234 #ifdef PRINT_USER_FAULTS 234 #ifdef PRINT_USER_FAULTS
235 /* XXX for debugging only */ 235 /* XXX for debugging only */
236 show_regs(regs); 236 show_regs(regs);
237 #endif 237 #endif
238 return; 238 return;
239 } 239 }
240 240
241 oops_in_progress = 1; 241 oops_in_progress = 1;
242 242
243 oops_enter(); 243 oops_enter();
244 244
245 /* Amuse the user in a SPARC fashion */ 245 /* Amuse the user in a SPARC fashion */
246 if (err) printk(KERN_CRIT 246 if (err) printk(KERN_CRIT
247 " _______________________________ \n" 247 " _______________________________ \n"
248 " < Your System ate a SPARC! Gah! >\n" 248 " < Your System ate a SPARC! Gah! >\n"
249 " ------------------------------- \n" 249 " ------------------------------- \n"
250 " \\ ^__^\n" 250 " \\ ^__^\n"
251 " (__)\\ )\\/\\\n" 251 " (__)\\ )\\/\\\n"
252 " U ||----w |\n" 252 " U ||----w |\n"
253 " || ||\n"); 253 " || ||\n");
254 254
255 /* unlock the pdc lock if necessary */ 255 /* unlock the pdc lock if necessary */
256 pdc_emergency_unlock(); 256 pdc_emergency_unlock();
257 257
258 /* maybe the kernel hasn't booted very far yet and hasn't been able 258 /* maybe the kernel hasn't booted very far yet and hasn't been able
259 * to initialize the serial or STI console. In that case we should 259 * to initialize the serial or STI console. In that case we should
260 * re-enable the pdc console, so that the user will be able to 260 * re-enable the pdc console, so that the user will be able to
261 * identify the problem. */ 261 * identify the problem. */
262 if (!console_drivers) 262 if (!console_drivers)
263 pdc_console_restart(); 263 pdc_console_restart();
264 264
265 if (err) 265 if (err)
266 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n", 266 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
267 current->comm, task_pid_nr(current), str, err); 267 current->comm, task_pid_nr(current), str, err);
268 268
269 /* Wot's wrong wif bein' racy? */ 269 /* Wot's wrong wif bein' racy? */
270 if (current->thread.flags & PARISC_KERNEL_DEATH) { 270 if (current->thread.flags & PARISC_KERNEL_DEATH) {
271 printk(KERN_CRIT "%s() recursion detected.\n", __func__); 271 printk(KERN_CRIT "%s() recursion detected.\n", __func__);
272 local_irq_enable(); 272 local_irq_enable();
273 while (1); 273 while (1);
274 } 274 }
275 current->thread.flags |= PARISC_KERNEL_DEATH; 275 current->thread.flags |= PARISC_KERNEL_DEATH;
276 276
277 show_regs(regs); 277 show_regs(regs);
278 dump_stack(); 278 dump_stack();
279 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); 279 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
280 280
281 if (in_interrupt()) 281 if (in_interrupt())
282 panic("Fatal exception in interrupt"); 282 panic("Fatal exception in interrupt");
283 283
284 if (panic_on_oops) { 284 if (panic_on_oops) {
285 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); 285 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
286 ssleep(5); 286 ssleep(5);
287 panic("Fatal exception"); 287 panic("Fatal exception");
288 } 288 }
289 289
290 oops_exit(); 290 oops_exit();
291 do_exit(SIGSEGV); 291 do_exit(SIGSEGV);
292 } 292 }
293 293
294 int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
295 {
296 return syscall(regs);
297 }
298
299 /* gdb uses break 4,8 */ 294 /* gdb uses break 4,8 */
300 #define GDB_BREAK_INSN 0x10004 295 #define GDB_BREAK_INSN 0x10004
301 static void handle_gdb_break(struct pt_regs *regs, int wot) 296 static void handle_gdb_break(struct pt_regs *regs, int wot)
302 { 297 {
303 struct siginfo si; 298 struct siginfo si;
304 299
305 si.si_signo = SIGTRAP; 300 si.si_signo = SIGTRAP;
306 si.si_errno = 0; 301 si.si_errno = 0;
307 si.si_code = wot; 302 si.si_code = wot;
308 si.si_addr = (void __user *) (regs->iaoq[0] & ~3); 303 si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
309 force_sig_info(SIGTRAP, &si, current); 304 force_sig_info(SIGTRAP, &si, current);
310 } 305 }
311 306
312 static void handle_break(struct pt_regs *regs) 307 static void handle_break(struct pt_regs *regs)
313 { 308 {
314 unsigned iir = regs->iir; 309 unsigned iir = regs->iir;
315 310
316 if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) { 311 if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
317 /* check if a BUG() or WARN() trapped here. */ 312 /* check if a BUG() or WARN() trapped here. */
318 enum bug_trap_type tt; 313 enum bug_trap_type tt;
319 tt = report_bug(regs->iaoq[0] & ~3, regs); 314 tt = report_bug(regs->iaoq[0] & ~3, regs);
320 if (tt == BUG_TRAP_TYPE_WARN) { 315 if (tt == BUG_TRAP_TYPE_WARN) {
321 regs->iaoq[0] += 4; 316 regs->iaoq[0] += 4;
322 regs->iaoq[1] += 4; 317 regs->iaoq[1] += 4;
323 return; /* return to next instruction when WARN_ON(). */ 318 return; /* return to next instruction when WARN_ON(). */
324 } 319 }
325 die_if_kernel("Unknown kernel breakpoint", regs, 320 die_if_kernel("Unknown kernel breakpoint", regs,
326 (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0); 321 (tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
327 } 322 }
328 323
329 #ifdef PRINT_USER_FAULTS 324 #ifdef PRINT_USER_FAULTS
330 if (unlikely(iir != GDB_BREAK_INSN)) { 325 if (unlikely(iir != GDB_BREAK_INSN)) {
331 printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n", 326 printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
332 iir & 31, (iir>>13) & ((1<<13)-1), 327 iir & 31, (iir>>13) & ((1<<13)-1),
333 task_pid_nr(current), current->comm); 328 task_pid_nr(current), current->comm);
334 show_regs(regs); 329 show_regs(regs);
335 } 330 }
336 #endif 331 #endif
337 332
338 /* send standard GDB signal */ 333 /* send standard GDB signal */
339 handle_gdb_break(regs, TRAP_BRKPT); 334 handle_gdb_break(regs, TRAP_BRKPT);
340 } 335 }
341 336
342 static void default_trap(int code, struct pt_regs *regs) 337 static void default_trap(int code, struct pt_regs *regs)
343 { 338 {
344 printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id()); 339 printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
345 show_regs(regs); 340 show_regs(regs);
346 } 341 }
347 342
348 void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap; 343 void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
349 344
350 345
351 void transfer_pim_to_trap_frame(struct pt_regs *regs) 346 void transfer_pim_to_trap_frame(struct pt_regs *regs)
352 { 347 {
353 register int i; 348 register int i;
354 extern unsigned int hpmc_pim_data[]; 349 extern unsigned int hpmc_pim_data[];
355 struct pdc_hpmc_pim_11 *pim_narrow; 350 struct pdc_hpmc_pim_11 *pim_narrow;
356 struct pdc_hpmc_pim_20 *pim_wide; 351 struct pdc_hpmc_pim_20 *pim_wide;
357 352
358 if (boot_cpu_data.cpu_type >= pcxu) { 353 if (boot_cpu_data.cpu_type >= pcxu) {
359 354
360 pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data; 355 pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
361 356
362 /* 357 /*
363 * Note: The following code will probably generate a 358 * Note: The following code will probably generate a
364 * bunch of truncation error warnings from the compiler. 359 * bunch of truncation error warnings from the compiler.
365 * Could be handled with an ifdef, but perhaps there 360 * Could be handled with an ifdef, but perhaps there
366 * is a better way. 361 * is a better way.
367 */ 362 */
368 363
369 regs->gr[0] = pim_wide->cr[22]; 364 regs->gr[0] = pim_wide->cr[22];
370 365
371 for (i = 1; i < 32; i++) 366 for (i = 1; i < 32; i++)
372 regs->gr[i] = pim_wide->gr[i]; 367 regs->gr[i] = pim_wide->gr[i];
373 368
374 for (i = 0; i < 32; i++) 369 for (i = 0; i < 32; i++)
375 regs->fr[i] = pim_wide->fr[i]; 370 regs->fr[i] = pim_wide->fr[i];
376 371
377 for (i = 0; i < 8; i++) 372 for (i = 0; i < 8; i++)
378 regs->sr[i] = pim_wide->sr[i]; 373 regs->sr[i] = pim_wide->sr[i];
379 374
380 regs->iasq[0] = pim_wide->cr[17]; 375 regs->iasq[0] = pim_wide->cr[17];
381 regs->iasq[1] = pim_wide->iasq_back; 376 regs->iasq[1] = pim_wide->iasq_back;
382 regs->iaoq[0] = pim_wide->cr[18]; 377 regs->iaoq[0] = pim_wide->cr[18];
383 regs->iaoq[1] = pim_wide->iaoq_back; 378 regs->iaoq[1] = pim_wide->iaoq_back;
384 379
385 regs->sar = pim_wide->cr[11]; 380 regs->sar = pim_wide->cr[11];
386 regs->iir = pim_wide->cr[19]; 381 regs->iir = pim_wide->cr[19];
387 regs->isr = pim_wide->cr[20]; 382 regs->isr = pim_wide->cr[20];
388 regs->ior = pim_wide->cr[21]; 383 regs->ior = pim_wide->cr[21];
389 } 384 }
390 else { 385 else {
391 pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data; 386 pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
392 387
393 regs->gr[0] = pim_narrow->cr[22]; 388 regs->gr[0] = pim_narrow->cr[22];
394 389
395 for (i = 1; i < 32; i++) 390 for (i = 1; i < 32; i++)
396 regs->gr[i] = pim_narrow->gr[i]; 391 regs->gr[i] = pim_narrow->gr[i];
397 392
398 for (i = 0; i < 32; i++) 393 for (i = 0; i < 32; i++)
399 regs->fr[i] = pim_narrow->fr[i]; 394 regs->fr[i] = pim_narrow->fr[i];
400 395
401 for (i = 0; i < 8; i++) 396 for (i = 0; i < 8; i++)
402 regs->sr[i] = pim_narrow->sr[i]; 397 regs->sr[i] = pim_narrow->sr[i];
403 398
404 regs->iasq[0] = pim_narrow->cr[17]; 399 regs->iasq[0] = pim_narrow->cr[17];
405 regs->iasq[1] = pim_narrow->iasq_back; 400 regs->iasq[1] = pim_narrow->iasq_back;
406 regs->iaoq[0] = pim_narrow->cr[18]; 401 regs->iaoq[0] = pim_narrow->cr[18];
407 regs->iaoq[1] = pim_narrow->iaoq_back; 402 regs->iaoq[1] = pim_narrow->iaoq_back;
408 403
409 regs->sar = pim_narrow->cr[11]; 404 regs->sar = pim_narrow->cr[11];
410 regs->iir = pim_narrow->cr[19]; 405 regs->iir = pim_narrow->cr[19];
411 regs->isr = pim_narrow->cr[20]; 406 regs->isr = pim_narrow->cr[20];
412 regs->ior = pim_narrow->cr[21]; 407 regs->ior = pim_narrow->cr[21];
413 } 408 }
414 409
415 /* 410 /*
416 * The following fields only have meaning if we came through 411 * The following fields only have meaning if we came through
417 * another path. So just zero them here. 412 * another path. So just zero them here.
418 */ 413 */
419 414
420 regs->ksp = 0; 415 regs->ksp = 0;
421 regs->kpc = 0; 416 regs->kpc = 0;
422 regs->orig_r28 = 0; 417 regs->orig_r28 = 0;
423 } 418 }
424 419
425 420
426 /* 421 /*
427 * This routine is called as a last resort when everything else 422 * This routine is called as a last resort when everything else
428 * has gone clearly wrong. We get called for faults in kernel space, 423 * has gone clearly wrong. We get called for faults in kernel space,
429 * and HPMC's. 424 * and HPMC's.
430 */ 425 */
431 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset) 426 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
432 { 427 {
433 static DEFINE_SPINLOCK(terminate_lock); 428 static DEFINE_SPINLOCK(terminate_lock);
434 429
435 oops_in_progress = 1; 430 oops_in_progress = 1;
436 431
437 set_eiem(0); 432 set_eiem(0);
438 local_irq_disable(); 433 local_irq_disable();
439 spin_lock(&terminate_lock); 434 spin_lock(&terminate_lock);
440 435
441 /* unlock the pdc lock if necessary */ 436 /* unlock the pdc lock if necessary */
442 pdc_emergency_unlock(); 437 pdc_emergency_unlock();
443 438
444 /* restart pdc console if necessary */ 439 /* restart pdc console if necessary */
445 if (!console_drivers) 440 if (!console_drivers)
446 pdc_console_restart(); 441 pdc_console_restart();
447 442
448 /* Not all paths will gutter the processor... */ 443 /* Not all paths will gutter the processor... */
449 switch(code){ 444 switch(code){
450 445
451 case 1: 446 case 1:
452 transfer_pim_to_trap_frame(regs); 447 transfer_pim_to_trap_frame(regs);
453 break; 448 break;
454 449
455 default: 450 default:
456 /* Fall through */ 451 /* Fall through */
457 break; 452 break;
458 453
459 } 454 }
460 455
461 { 456 {
462 /* show_stack(NULL, (unsigned long *)regs->gr[30]); */ 457 /* show_stack(NULL, (unsigned long *)regs->gr[30]); */
463 struct unwind_frame_info info; 458 struct unwind_frame_info info;
464 unwind_frame_init(&info, current, regs); 459 unwind_frame_init(&info, current, regs);
465 do_show_stack(&info); 460 do_show_stack(&info);
466 } 461 }
467 462
468 printk("\n"); 463 printk("\n");
469 printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n", 464 printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
470 msg, code, regs, offset); 465 msg, code, regs, offset);
471 show_regs(regs); 466 show_regs(regs);
472 467
473 spin_unlock(&terminate_lock); 468 spin_unlock(&terminate_lock);
474 469
475 /* put soft power button back under hardware control; 470 /* put soft power button back under hardware control;
476 * if the user had pressed it once at any time, the 471 * if the user had pressed it once at any time, the
477 * system will shut down immediately right here. */ 472 * system will shut down immediately right here. */
478 pdc_soft_power_button(0); 473 pdc_soft_power_button(0);
479 474
480 /* Call kernel panic() so reboot timeouts work properly 475 /* Call kernel panic() so reboot timeouts work properly
481 * FIXME: This function should be on the list of 476 * FIXME: This function should be on the list of
482 * panic notifiers, and we should call panic 477 * panic notifiers, and we should call panic
483 * directly from the location that we wish. 478 * directly from the location that we wish.
484 * e.g. We should not call panic from 479 * e.g. We should not call panic from
485 * parisc_terminate, but rather the oter way around. 480 * parisc_terminate, but rather the oter way around.
486 * This hack works, prints the panic message twice, 481 * This hack works, prints the panic message twice,
487 * and it enables reboot timers! 482 * and it enables reboot timers!
488 */ 483 */
489 panic(msg); 484 panic(msg);
490 } 485 }
491 486
492 void notrace handle_interruption(int code, struct pt_regs *regs) 487 void notrace handle_interruption(int code, struct pt_regs *regs)
493 { 488 {
494 unsigned long fault_address = 0; 489 unsigned long fault_address = 0;
495 unsigned long fault_space = 0; 490 unsigned long fault_space = 0;
496 struct siginfo si; 491 struct siginfo si;
497 492
498 if (code == 1) 493 if (code == 1)
499 pdc_console_restart(); /* switch back to pdc if HPMC */ 494 pdc_console_restart(); /* switch back to pdc if HPMC */
500 else 495 else
501 local_irq_enable(); 496 local_irq_enable();
502 497
503 /* Security check: 498 /* Security check:
504 * If the priority level is still user, and the 499 * If the priority level is still user, and the
505 * faulting space is not equal to the active space 500 * faulting space is not equal to the active space
506 * then the user is attempting something in a space 501 * then the user is attempting something in a space
507 * that does not belong to them. Kill the process. 502 * that does not belong to them. Kill the process.
508 * 503 *
509 * This is normally the situation when the user 504 * This is normally the situation when the user
510 * attempts to jump into the kernel space at the 505 * attempts to jump into the kernel space at the
511 * wrong offset, be it at the gateway page or a 506 * wrong offset, be it at the gateway page or a
512 * random location. 507 * random location.
513 * 508 *
514 * We cannot normally signal the process because it 509 * We cannot normally signal the process because it
515 * could *be* on the gateway page, and processes 510 * could *be* on the gateway page, and processes
516 * executing on the gateway page can't have signals 511 * executing on the gateway page can't have signals
517 * delivered. 512 * delivered.
518 * 513 *
519 * We merely readjust the address into the users 514 * We merely readjust the address into the users
520 * space, at a destination address of zero, and 515 * space, at a destination address of zero, and
521 * allow processing to continue. 516 * allow processing to continue.
522 */ 517 */
523 if (((unsigned long)regs->iaoq[0] & 3) && 518 if (((unsigned long)regs->iaoq[0] & 3) &&
524 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) { 519 ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
525 /* Kill the user process later */ 520 /* Kill the user process later */
526 regs->iaoq[0] = 0 | 3; 521 regs->iaoq[0] = 0 | 3;
527 regs->iaoq[1] = regs->iaoq[0] + 4; 522 regs->iaoq[1] = regs->iaoq[0] + 4;
528 regs->iasq[0] = regs->iasq[1] = regs->sr[7]; 523 regs->iasq[0] = regs->iasq[1] = regs->sr[7];
529 regs->gr[0] &= ~PSW_B; 524 regs->gr[0] &= ~PSW_B;
530 return; 525 return;
531 } 526 }
532 527
533 #if 0 528 #if 0
534 printk(KERN_CRIT "Interruption # %d\n", code); 529 printk(KERN_CRIT "Interruption # %d\n", code);
535 #endif 530 #endif
536 531
537 switch(code) { 532 switch(code) {
538 533
539 case 1: 534 case 1:
540 /* High-priority machine check (HPMC) */ 535 /* High-priority machine check (HPMC) */
541 536
542 /* set up a new led state on systems shipped with a LED State panel */ 537 /* set up a new led state on systems shipped with a LED State panel */
543 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC); 538 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
544 539
545 parisc_terminate("High Priority Machine Check (HPMC)", 540 parisc_terminate("High Priority Machine Check (HPMC)",
546 regs, code, 0); 541 regs, code, 0);
547 /* NOT REACHED */ 542 /* NOT REACHED */
548 543
549 case 2: 544 case 2:
550 /* Power failure interrupt */ 545 /* Power failure interrupt */
551 printk(KERN_CRIT "Power failure interrupt !\n"); 546 printk(KERN_CRIT "Power failure interrupt !\n");
552 return; 547 return;
553 548
554 case 3: 549 case 3:
555 /* Recovery counter trap */ 550 /* Recovery counter trap */
556 regs->gr[0] &= ~PSW_R; 551 regs->gr[0] &= ~PSW_R;
557 if (user_space(regs)) 552 if (user_space(regs))
558 handle_gdb_break(regs, TRAP_TRACE); 553 handle_gdb_break(regs, TRAP_TRACE);
559 /* else this must be the start of a syscall - just let it run */ 554 /* else this must be the start of a syscall - just let it run */
560 return; 555 return;
561 556
562 case 5: 557 case 5:
563 /* Low-priority machine check */ 558 /* Low-priority machine check */
564 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC); 559 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
565 560
566 flush_cache_all(); 561 flush_cache_all();
567 flush_tlb_all(); 562 flush_tlb_all();
568 cpu_lpmc(5, regs); 563 cpu_lpmc(5, regs);
569 return; 564 return;
570 565
571 case 6: 566 case 6:
572 /* Instruction TLB miss fault/Instruction page fault */ 567 /* Instruction TLB miss fault/Instruction page fault */
573 fault_address = regs->iaoq[0]; 568 fault_address = regs->iaoq[0];
574 fault_space = regs->iasq[0]; 569 fault_space = regs->iasq[0];
575 break; 570 break;
576 571
577 case 8: 572 case 8:
578 /* Illegal instruction trap */ 573 /* Illegal instruction trap */
579 die_if_kernel("Illegal instruction", regs, code); 574 die_if_kernel("Illegal instruction", regs, code);
580 si.si_code = ILL_ILLOPC; 575 si.si_code = ILL_ILLOPC;
581 goto give_sigill; 576 goto give_sigill;
582 577
583 case 9: 578 case 9:
584 /* Break instruction trap */ 579 /* Break instruction trap */
585 handle_break(regs); 580 handle_break(regs);
586 return; 581 return;
587 582
588 case 10: 583 case 10:
589 /* Privileged operation trap */ 584 /* Privileged operation trap */
590 die_if_kernel("Privileged operation", regs, code); 585 die_if_kernel("Privileged operation", regs, code);
591 si.si_code = ILL_PRVOPC; 586 si.si_code = ILL_PRVOPC;
592 goto give_sigill; 587 goto give_sigill;
593 588
594 case 11: 589 case 11:
595 /* Privileged register trap */ 590 /* Privileged register trap */
596 if ((regs->iir & 0xffdfffe0) == 0x034008a0) { 591 if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
597 592
598 /* This is a MFCTL cr26/cr27 to gr instruction. 593 /* This is a MFCTL cr26/cr27 to gr instruction.
599 * PCXS traps on this, so we need to emulate it. 594 * PCXS traps on this, so we need to emulate it.
600 */ 595 */
601 596
602 if (regs->iir & 0x00200000) 597 if (regs->iir & 0x00200000)
603 regs->gr[regs->iir & 0x1f] = mfctl(27); 598 regs->gr[regs->iir & 0x1f] = mfctl(27);
604 else 599 else
605 regs->gr[regs->iir & 0x1f] = mfctl(26); 600 regs->gr[regs->iir & 0x1f] = mfctl(26);
606 601
607 regs->iaoq[0] = regs->iaoq[1]; 602 regs->iaoq[0] = regs->iaoq[1];
608 regs->iaoq[1] += 4; 603 regs->iaoq[1] += 4;
609 regs->iasq[0] = regs->iasq[1]; 604 regs->iasq[0] = regs->iasq[1];
610 return; 605 return;
611 } 606 }
612 607
613 die_if_kernel("Privileged register usage", regs, code); 608 die_if_kernel("Privileged register usage", regs, code);
614 si.si_code = ILL_PRVREG; 609 si.si_code = ILL_PRVREG;
615 give_sigill: 610 give_sigill:
616 si.si_signo = SIGILL; 611 si.si_signo = SIGILL;
617 si.si_errno = 0; 612 si.si_errno = 0;
618 si.si_addr = (void __user *) regs->iaoq[0]; 613 si.si_addr = (void __user *) regs->iaoq[0];
619 force_sig_info(SIGILL, &si, current); 614 force_sig_info(SIGILL, &si, current);
620 return; 615 return;
621 616
622 case 12: 617 case 12:
623 /* Overflow Trap, let the userland signal handler do the cleanup */ 618 /* Overflow Trap, let the userland signal handler do the cleanup */
624 si.si_signo = SIGFPE; 619 si.si_signo = SIGFPE;
625 si.si_code = FPE_INTOVF; 620 si.si_code = FPE_INTOVF;
626 si.si_addr = (void __user *) regs->iaoq[0]; 621 si.si_addr = (void __user *) regs->iaoq[0];
627 force_sig_info(SIGFPE, &si, current); 622 force_sig_info(SIGFPE, &si, current);
628 return; 623 return;
629 624
630 case 13: 625 case 13:
631 /* Conditional Trap 626 /* Conditional Trap
632 The condition succeeds in an instruction which traps 627 The condition succeeds in an instruction which traps
633 on condition */ 628 on condition */
634 if(user_mode(regs)){ 629 if(user_mode(regs)){
635 si.si_signo = SIGFPE; 630 si.si_signo = SIGFPE;
636 /* Set to zero, and let the userspace app figure it out from 631 /* Set to zero, and let the userspace app figure it out from
637 the insn pointed to by si_addr */ 632 the insn pointed to by si_addr */
638 si.si_code = 0; 633 si.si_code = 0;
639 si.si_addr = (void __user *) regs->iaoq[0]; 634 si.si_addr = (void __user *) regs->iaoq[0];
640 force_sig_info(SIGFPE, &si, current); 635 force_sig_info(SIGFPE, &si, current);
641 return; 636 return;
642 } 637 }
643 /* The kernel doesn't want to handle condition codes */ 638 /* The kernel doesn't want to handle condition codes */
644 break; 639 break;
645 640
646 case 14: 641 case 14:
647 /* Assist Exception Trap, i.e. floating point exception. */ 642 /* Assist Exception Trap, i.e. floating point exception. */
648 die_if_kernel("Floating point exception", regs, 0); /* quiet */ 643 die_if_kernel("Floating point exception", regs, 0); /* quiet */
649 __inc_irq_stat(irq_fpassist_count); 644 __inc_irq_stat(irq_fpassist_count);
650 handle_fpe(regs); 645 handle_fpe(regs);
651 return; 646 return;
652 647
653 case 15: 648 case 15:
654 /* Data TLB miss fault/Data page fault */ 649 /* Data TLB miss fault/Data page fault */
655 /* Fall through */ 650 /* Fall through */
656 case 16: 651 case 16:
657 /* Non-access instruction TLB miss fault */ 652 /* Non-access instruction TLB miss fault */
658 /* The instruction TLB entry needed for the target address of the FIC 653 /* The instruction TLB entry needed for the target address of the FIC
659 is absent, and hardware can't find it, so we get to cleanup */ 654 is absent, and hardware can't find it, so we get to cleanup */
660 /* Fall through */ 655 /* Fall through */
661 case 17: 656 case 17:
662 /* Non-access data TLB miss fault/Non-access data page fault */ 657 /* Non-access data TLB miss fault/Non-access data page fault */
663 /* FIXME: 658 /* FIXME:
664 Still need to add slow path emulation code here! 659 Still need to add slow path emulation code here!
665 If the insn used a non-shadow register, then the tlb 660 If the insn used a non-shadow register, then the tlb
666 handlers could not have their side-effect (e.g. probe 661 handlers could not have their side-effect (e.g. probe
667 writing to a target register) emulated since rfir would 662 writing to a target register) emulated since rfir would
668 erase the changes to said register. Instead we have to 663 erase the changes to said register. Instead we have to
669 setup everything, call this function we are in, and emulate 664 setup everything, call this function we are in, and emulate
670 by hand. Technically we need to emulate: 665 by hand. Technically we need to emulate:
671 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw 666 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
672 */ 667 */
673 fault_address = regs->ior; 668 fault_address = regs->ior;
674 fault_space = regs->isr; 669 fault_space = regs->isr;
675 break; 670 break;
676 671
677 case 18: 672 case 18:
678 /* PCXS only -- later cpu's split this into types 26,27 & 28 */ 673 /* PCXS only -- later cpu's split this into types 26,27 & 28 */
679 /* Check for unaligned access */ 674 /* Check for unaligned access */
680 if (check_unaligned(regs)) { 675 if (check_unaligned(regs)) {
681 handle_unaligned(regs); 676 handle_unaligned(regs);
682 return; 677 return;
683 } 678 }
684 /* Fall Through */ 679 /* Fall Through */
685 case 26: 680 case 26:
686 /* PCXL: Data memory access rights trap */ 681 /* PCXL: Data memory access rights trap */
687 fault_address = regs->ior; 682 fault_address = regs->ior;
688 fault_space = regs->isr; 683 fault_space = regs->isr;
689 break; 684 break;
690 685
691 case 19: 686 case 19:
692 /* Data memory break trap */ 687 /* Data memory break trap */
693 regs->gr[0] |= PSW_X; /* So we can single-step over the trap */ 688 regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
694 /* fall thru */ 689 /* fall thru */
695 case 21: 690 case 21:
696 /* Page reference trap */ 691 /* Page reference trap */
697 handle_gdb_break(regs, TRAP_HWBKPT); 692 handle_gdb_break(regs, TRAP_HWBKPT);
698 return; 693 return;
699 694
700 case 25: 695 case 25:
701 /* Taken branch trap */ 696 /* Taken branch trap */
702 regs->gr[0] &= ~PSW_T; 697 regs->gr[0] &= ~PSW_T;
703 if (user_space(regs)) 698 if (user_space(regs))
704 handle_gdb_break(regs, TRAP_BRANCH); 699 handle_gdb_break(regs, TRAP_BRANCH);
705 /* else this must be the start of a syscall - just let it 700 /* else this must be the start of a syscall - just let it
706 * run. 701 * run.
707 */ 702 */
708 return; 703 return;
709 704
710 case 7: 705 case 7:
711 /* Instruction access rights */ 706 /* Instruction access rights */
712 /* PCXL: Instruction memory protection trap */ 707 /* PCXL: Instruction memory protection trap */
713 708
714 /* 709 /*
715 * This could be caused by either: 1) a process attempting 710 * This could be caused by either: 1) a process attempting
716 * to execute within a vma that does not have execute 711 * to execute within a vma that does not have execute
717 * permission, or 2) an access rights violation caused by a 712 * permission, or 2) an access rights violation caused by a
718 * flush only translation set up by ptep_get_and_clear(). 713 * flush only translation set up by ptep_get_and_clear().
719 * So we check the vma permissions to differentiate the two. 714 * So we check the vma permissions to differentiate the two.
720 * If the vma indicates we have execute permission, then 715 * If the vma indicates we have execute permission, then
721 * the cause is the latter one. In this case, we need to 716 * the cause is the latter one. In this case, we need to
722 * call do_page_fault() to fix the problem. 717 * call do_page_fault() to fix the problem.
723 */ 718 */
724 719
725 if (user_mode(regs)) { 720 if (user_mode(regs)) {
726 struct vm_area_struct *vma; 721 struct vm_area_struct *vma;
727 722
728 down_read(&current->mm->mmap_sem); 723 down_read(&current->mm->mmap_sem);
729 vma = find_vma(current->mm,regs->iaoq[0]); 724 vma = find_vma(current->mm,regs->iaoq[0]);
730 if (vma && (regs->iaoq[0] >= vma->vm_start) 725 if (vma && (regs->iaoq[0] >= vma->vm_start)
731 && (vma->vm_flags & VM_EXEC)) { 726 && (vma->vm_flags & VM_EXEC)) {
732 727
733 fault_address = regs->iaoq[0]; 728 fault_address = regs->iaoq[0];
734 fault_space = regs->iasq[0]; 729 fault_space = regs->iasq[0];
735 730
736 up_read(&current->mm->mmap_sem); 731 up_read(&current->mm->mmap_sem);
737 break; /* call do_page_fault() */ 732 break; /* call do_page_fault() */
738 } 733 }
739 up_read(&current->mm->mmap_sem); 734 up_read(&current->mm->mmap_sem);
740 } 735 }
741 /* Fall Through */ 736 /* Fall Through */
742 case 27: 737 case 27:
743 /* Data memory protection ID trap */ 738 /* Data memory protection ID trap */
744 if (code == 27 && !user_mode(regs) && 739 if (code == 27 && !user_mode(regs) &&
745 fixup_exception(regs)) 740 fixup_exception(regs))
746 return; 741 return;
747 742
748 die_if_kernel("Protection id trap", regs, code); 743 die_if_kernel("Protection id trap", regs, code);
749 si.si_code = SEGV_MAPERR; 744 si.si_code = SEGV_MAPERR;
750 si.si_signo = SIGSEGV; 745 si.si_signo = SIGSEGV;
751 si.si_errno = 0; 746 si.si_errno = 0;
752 if (code == 7) 747 if (code == 7)
753 si.si_addr = (void __user *) regs->iaoq[0]; 748 si.si_addr = (void __user *) regs->iaoq[0];
754 else 749 else
755 si.si_addr = (void __user *) regs->ior; 750 si.si_addr = (void __user *) regs->ior;
756 force_sig_info(SIGSEGV, &si, current); 751 force_sig_info(SIGSEGV, &si, current);
757 return; 752 return;
758 753
759 case 28: 754 case 28:
760 /* Unaligned data reference trap */ 755 /* Unaligned data reference trap */
761 handle_unaligned(regs); 756 handle_unaligned(regs);
762 return; 757 return;
763 758
764 default: 759 default:
765 if (user_mode(regs)) { 760 if (user_mode(regs)) {
766 #ifdef PRINT_USER_FAULTS 761 #ifdef PRINT_USER_FAULTS
767 printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n", 762 printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
768 task_pid_nr(current), current->comm); 763 task_pid_nr(current), current->comm);
769 show_regs(regs); 764 show_regs(regs);
770 #endif 765 #endif
771 /* SIGBUS, for lack of a better one. */ 766 /* SIGBUS, for lack of a better one. */
772 si.si_signo = SIGBUS; 767 si.si_signo = SIGBUS;
773 si.si_code = BUS_OBJERR; 768 si.si_code = BUS_OBJERR;
774 si.si_errno = 0; 769 si.si_errno = 0;
775 si.si_addr = (void __user *) regs->ior; 770 si.si_addr = (void __user *) regs->ior;
776 force_sig_info(SIGBUS, &si, current); 771 force_sig_info(SIGBUS, &si, current);
777 return; 772 return;
778 } 773 }
779 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); 774 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
780 775
781 parisc_terminate("Unexpected interruption", regs, code, 0); 776 parisc_terminate("Unexpected interruption", regs, code, 0);
782 /* NOT REACHED */ 777 /* NOT REACHED */
783 } 778 }
784 779
785 if (user_mode(regs)) { 780 if (user_mode(regs)) {
786 if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) { 781 if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
787 #ifdef PRINT_USER_FAULTS 782 #ifdef PRINT_USER_FAULTS
788 if (fault_space == 0) 783 if (fault_space == 0)
789 printk(KERN_DEBUG "User Fault on Kernel Space "); 784 printk(KERN_DEBUG "User Fault on Kernel Space ");
790 else 785 else
791 printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ", 786 printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
792 code); 787 code);
793 printk(KERN_CONT "pid=%d command='%s'\n", 788 printk(KERN_CONT "pid=%d command='%s'\n",
794 task_pid_nr(current), current->comm); 789 task_pid_nr(current), current->comm);
795 show_regs(regs); 790 show_regs(regs);
796 #endif 791 #endif
797 si.si_signo = SIGSEGV; 792 si.si_signo = SIGSEGV;
798 si.si_errno = 0; 793 si.si_errno = 0;
799 si.si_code = SEGV_MAPERR; 794 si.si_code = SEGV_MAPERR;
800 si.si_addr = (void __user *) regs->ior; 795 si.si_addr = (void __user *) regs->ior;
801 force_sig_info(SIGSEGV, &si, current); 796 force_sig_info(SIGSEGV, &si, current);
802 return; 797 return;
803 } 798 }
804 } 799 }
805 else { 800 else {
806 801
807 /* 802 /*
808 * The kernel should never fault on its own address space. 803 * The kernel should never fault on its own address space,
804 * unless pagefault_disable() was called before.
809 */ 805 */
810 806
811 if (fault_space == 0) 807 if (fault_space == 0 && !in_atomic())
812 { 808 {
813 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); 809 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
814 parisc_terminate("Kernel Fault", regs, code, fault_address); 810 parisc_terminate("Kernel Fault", regs, code, fault_address);
815
816 } 811 }
817 } 812 }
818 813
819 do_page_fault(regs, code, fault_address); 814 do_page_fault(regs, code, fault_address);
820 } 815 }
821 816
822 817
823 int __init check_ivt(void *iva) 818 int __init check_ivt(void *iva)
824 { 819 {
825 extern u32 os_hpmc_size; 820 extern u32 os_hpmc_size;
826 extern const u32 os_hpmc[]; 821 extern const u32 os_hpmc[];
827 822
828 int i; 823 int i;
829 u32 check = 0; 824 u32 check = 0;
830 u32 *ivap; 825 u32 *ivap;
831 u32 *hpmcp; 826 u32 *hpmcp;
832 u32 length; 827 u32 length;
833 828
834 if (strcmp((char *)iva, "cows can fly")) 829 if (strcmp((char *)iva, "cows can fly"))
835 return -1; 830 return -1;
836 831
837 ivap = (u32 *)iva; 832 ivap = (u32 *)iva;
838 833
839 for (i = 0; i < 8; i++) 834 for (i = 0; i < 8; i++)
840 *ivap++ = 0; 835 *ivap++ = 0;
841 836
842 /* Compute Checksum for HPMC handler */ 837 /* Compute Checksum for HPMC handler */
843 length = os_hpmc_size; 838 length = os_hpmc_size;
844 ivap[7] = length; 839 ivap[7] = length;
845 840
846 hpmcp = (u32 *)os_hpmc; 841 hpmcp = (u32 *)os_hpmc;
847 842
848 for (i=0; i<length/4; i++) 843 for (i=0; i<length/4; i++)
849 check += *hpmcp++; 844 check += *hpmcp++;
850 845
851 for (i=0; i<8; i++) 846 for (i=0; i<8; i++)
852 check += ivap[i]; 847 check += ivap[i];
853 848
854 ivap[5] = -check; 849 ivap[5] = -check;
855 850
856 return 0; 851 return 0;
857 } 852 }
858 853
859 #ifndef CONFIG_64BIT 854 #ifndef CONFIG_64BIT
860 extern const void fault_vector_11; 855 extern const void fault_vector_11;
861 #endif 856 #endif
862 extern const void fault_vector_20; 857 extern const void fault_vector_20;
863 858
864 void __init trap_init(void) 859 void __init trap_init(void)
865 { 860 {
866 void *iva; 861 void *iva;
867 862
868 if (boot_cpu_data.cpu_type >= pcxu) 863 if (boot_cpu_data.cpu_type >= pcxu)
869 iva = (void *) &fault_vector_20; 864 iva = (void *) &fault_vector_20;
870 else 865 else
871 #ifdef CONFIG_64BIT 866 #ifdef CONFIG_64BIT
872 panic("Can't boot 64-bit OS on PA1.1 processor!"); 867 panic("Can't boot 64-bit OS on PA1.1 processor!");
873 #else 868 #else
874 iva = (void *) &fault_vector_11; 869 iva = (void *) &fault_vector_11;
875 #endif 870 #endif
876 871
877 if (check_ivt(iva)) 872 if (check_ivt(iva))
878 panic("IVT invalid"); 873 panic("IVT invalid");
879 } 874 }
arch/parisc/lib/memcpy.c
1 /* 1 /*
2 * Optimized memory copy routines. 2 * Optimized memory copy routines.
3 * 3 *
4 * Copyright (C) 2004 Randolph Chung <tausq@debian.org> 4 * Copyright (C) 2004 Randolph Chung <tausq@debian.org>
5 * Copyright (C) 2013 Helge Deller <deller@gmx.de> 5 * Copyright (C) 2013 Helge Deller <deller@gmx.de>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option) 9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version. 10 * any later version.
11 * 11 *
12 * This program is distributed in the hope that it will be useful, 12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 15 * GNU General Public License for more details.
16 * 16 *
17 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software 18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 * 20 *
21 * Portions derived from the GNU C Library 21 * Portions derived from the GNU C Library
22 * Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc. 22 * Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc.
23 * 23 *
24 * Several strategies are tried to try to get the best performance for various 24 * Several strategies are tried to try to get the best performance for various
25 * conditions. In the optimal case, we copy 64-bytes in an unrolled loop using 25 * conditions. In the optimal case, we copy 64-bytes in an unrolled loop using
26 * fp regs. This is followed by loops that copy 32- or 16-bytes at a time using 26 * fp regs. This is followed by loops that copy 32- or 16-bytes at a time using
27 * general registers. Unaligned copies are handled either by aligning the 27 * general registers. Unaligned copies are handled either by aligning the
28 * destination and then using shift-and-write method, or in a few cases by 28 * destination and then using shift-and-write method, or in a few cases by
29 * falling back to a byte-at-a-time copy. 29 * falling back to a byte-at-a-time copy.
30 * 30 *
31 * I chose to implement this in C because it is easier to maintain and debug, 31 * I chose to implement this in C because it is easier to maintain and debug,
32 * and in my experiments it appears that the C code generated by gcc (3.3/3.4 32 * and in my experiments it appears that the C code generated by gcc (3.3/3.4
33 * at the time of writing) is fairly optimal. Unfortunately some of the 33 * at the time of writing) is fairly optimal. Unfortunately some of the
34 * semantics of the copy routine (exception handling) is difficult to express 34 * semantics of the copy routine (exception handling) is difficult to express
35 * in C, so we have to play some tricks to get it to work. 35 * in C, so we have to play some tricks to get it to work.
36 * 36 *
37 * All the loads and stores are done via explicit asm() code in order to use 37 * All the loads and stores are done via explicit asm() code in order to use
38 * the right space registers. 38 * the right space registers.
39 * 39 *
40 * Testing with various alignments and buffer sizes shows that this code is 40 * Testing with various alignments and buffer sizes shows that this code is
41 * often >10x faster than a simple byte-at-a-time copy, even for strangely 41 * often >10x faster than a simple byte-at-a-time copy, even for strangely
42 * aligned operands. It is interesting to note that the glibc version 42 * aligned operands. It is interesting to note that the glibc version
43 * of memcpy (written in C) is actually quite fast already. This routine is 43 * of memcpy (written in C) is actually quite fast already. This routine is
44 * able to beat it by 30-40% for aligned copies because of the loop unrolling, 44 * able to beat it by 30-40% for aligned copies because of the loop unrolling,
45 * but in some cases the glibc version is still slightly faster. This lends 45 * but in some cases the glibc version is still slightly faster. This lends
46 * more credibility that gcc can generate very good code as long as we are 46 * more credibility that gcc can generate very good code as long as we are
47 * careful. 47 * careful.
48 * 48 *
49 * TODO: 49 * TODO:
50 * - cache prefetching needs more experimentation to get optimal settings 50 * - cache prefetching needs more experimentation to get optimal settings
51 * - try not to use the post-increment address modifiers; they create additional 51 * - try not to use the post-increment address modifiers; they create additional
52 * interlocks 52 * interlocks
53 * - replace byte-copy loops with stybs sequences 53 * - replace byte-copy loops with stybs sequences
54 */ 54 */
55 55
56 #ifdef __KERNEL__ 56 #ifdef __KERNEL__
57 #include <linux/module.h> 57 #include <linux/module.h>
58 #include <linux/compiler.h> 58 #include <linux/compiler.h>
59 #include <asm/uaccess.h> 59 #include <linux/uaccess.h>
60 #define s_space "%%sr1" 60 #define s_space "%%sr1"
61 #define d_space "%%sr2" 61 #define d_space "%%sr2"
62 #else 62 #else
63 #include "memcpy.h" 63 #include "memcpy.h"
64 #define s_space "%%sr0" 64 #define s_space "%%sr0"
65 #define d_space "%%sr0" 65 #define d_space "%%sr0"
66 #define pa_memcpy new2_copy 66 #define pa_memcpy new2_copy
67 #endif 67 #endif
68 68
69 DECLARE_PER_CPU(struct exception_data, exception_data); 69 DECLARE_PER_CPU(struct exception_data, exception_data);
70 70
71 #define preserve_branch(label) do { \ 71 #define preserve_branch(label) do { \
72 volatile int dummy = 0; \ 72 volatile int dummy = 0; \
73 /* The following branch is never taken, it's just here to */ \ 73 /* The following branch is never taken, it's just here to */ \
74 /* prevent gcc from optimizing away our exception code. */ \ 74 /* prevent gcc from optimizing away our exception code. */ \
75 if (unlikely(dummy != dummy)) \ 75 if (unlikely(dummy != dummy)) \
76 goto label; \ 76 goto label; \
77 } while (0) 77 } while (0)
78 78
79 #define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3)) 79 #define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3))
80 #define get_kernel_space() (0) 80 #define get_kernel_space() (0)
81 81
82 #define MERGE(w0, sh_1, w1, sh_2) ({ \ 82 #define MERGE(w0, sh_1, w1, sh_2) ({ \
83 unsigned int _r; \ 83 unsigned int _r; \
84 asm volatile ( \ 84 asm volatile ( \
85 "mtsar %3\n" \ 85 "mtsar %3\n" \
86 "shrpw %1, %2, %%sar, %0\n" \ 86 "shrpw %1, %2, %%sar, %0\n" \
87 : "=r"(_r) \ 87 : "=r"(_r) \
88 : "r"(w0), "r"(w1), "r"(sh_2) \ 88 : "r"(w0), "r"(w1), "r"(sh_2) \
89 ); \ 89 ); \
90 _r; \ 90 _r; \
91 }) 91 })
92 #define THRESHOLD 16 92 #define THRESHOLD 16
93 93
94 #ifdef DEBUG_MEMCPY 94 #ifdef DEBUG_MEMCPY
95 #define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0) 95 #define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
96 #else 96 #else
97 #define DPRINTF(fmt, args...) 97 #define DPRINTF(fmt, args...)
98 #endif 98 #endif
99 99
100 #define def_load_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \ 100 #define def_load_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
101 __asm__ __volatile__ ( \ 101 __asm__ __volatile__ ( \
102 "1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n\t" \ 102 "1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n\t" \
103 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ 103 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
104 : _tt(_t), "+r"(_a) \ 104 : _tt(_t), "+r"(_a) \
105 : \ 105 : \
106 : "r8") 106 : "r8")
107 107
108 #define def_store_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \ 108 #define def_store_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \
109 __asm__ __volatile__ ( \ 109 __asm__ __volatile__ ( \
110 "1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n\t" \ 110 "1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n\t" \
111 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ 111 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
112 : "+r"(_a) \ 112 : "+r"(_a) \
113 : _tt(_t) \ 113 : _tt(_t) \
114 : "r8") 114 : "r8")
115 115
116 #define ldbma(_s, _a, _t, _e) def_load_ai_insn(ldbs,1,"=r",_s,_a,_t,_e) 116 #define ldbma(_s, _a, _t, _e) def_load_ai_insn(ldbs,1,"=r",_s,_a,_t,_e)
117 #define stbma(_s, _t, _a, _e) def_store_ai_insn(stbs,1,"r",_s,_a,_t,_e) 117 #define stbma(_s, _t, _a, _e) def_store_ai_insn(stbs,1,"r",_s,_a,_t,_e)
118 #define ldwma(_s, _a, _t, _e) def_load_ai_insn(ldw,4,"=r",_s,_a,_t,_e) 118 #define ldwma(_s, _a, _t, _e) def_load_ai_insn(ldw,4,"=r",_s,_a,_t,_e)
119 #define stwma(_s, _t, _a, _e) def_store_ai_insn(stw,4,"r",_s,_a,_t,_e) 119 #define stwma(_s, _t, _a, _e) def_store_ai_insn(stw,4,"r",_s,_a,_t,_e)
120 #define flddma(_s, _a, _t, _e) def_load_ai_insn(fldd,8,"=f",_s,_a,_t,_e) 120 #define flddma(_s, _a, _t, _e) def_load_ai_insn(fldd,8,"=f",_s,_a,_t,_e)
121 #define fstdma(_s, _t, _a, _e) def_store_ai_insn(fstd,8,"f",_s,_a,_t,_e) 121 #define fstdma(_s, _t, _a, _e) def_store_ai_insn(fstd,8,"f",_s,_a,_t,_e)
122 122
123 #define def_load_insn(_insn,_tt,_s,_o,_a,_t,_e) \ 123 #define def_load_insn(_insn,_tt,_s,_o,_a,_t,_e) \
124 __asm__ __volatile__ ( \ 124 __asm__ __volatile__ ( \
125 "1:\t" #_insn " " #_o "(" _s ",%1), %0\n\t" \ 125 "1:\t" #_insn " " #_o "(" _s ",%1), %0\n\t" \
126 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ 126 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
127 : _tt(_t) \ 127 : _tt(_t) \
128 : "r"(_a) \ 128 : "r"(_a) \
129 : "r8") 129 : "r8")
130 130
131 #define def_store_insn(_insn,_tt,_s,_t,_o,_a,_e) \ 131 #define def_store_insn(_insn,_tt,_s,_t,_o,_a,_e) \
132 __asm__ __volatile__ ( \ 132 __asm__ __volatile__ ( \
133 "1:\t" #_insn " %0, " #_o "(" _s ",%1)\n\t" \ 133 "1:\t" #_insn " %0, " #_o "(" _s ",%1)\n\t" \
134 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ 134 ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \
135 : \ 135 : \
136 : _tt(_t), "r"(_a) \ 136 : _tt(_t), "r"(_a) \
137 : "r8") 137 : "r8")
138 138
139 #define ldw(_s,_o,_a,_t,_e) def_load_insn(ldw,"=r",_s,_o,_a,_t,_e) 139 #define ldw(_s,_o,_a,_t,_e) def_load_insn(ldw,"=r",_s,_o,_a,_t,_e)
140 #define stw(_s,_t,_o,_a,_e) def_store_insn(stw,"r",_s,_t,_o,_a,_e) 140 #define stw(_s,_t,_o,_a,_e) def_store_insn(stw,"r",_s,_t,_o,_a,_e)
141 141
142 #ifdef CONFIG_PREFETCH 142 #ifdef CONFIG_PREFETCH
143 static inline void prefetch_src(const void *addr) 143 static inline void prefetch_src(const void *addr)
144 { 144 {
145 __asm__("ldw 0(" s_space ",%0), %%r0" : : "r" (addr)); 145 __asm__("ldw 0(" s_space ",%0), %%r0" : : "r" (addr));
146 } 146 }
147 147
148 static inline void prefetch_dst(const void *addr) 148 static inline void prefetch_dst(const void *addr)
149 { 149 {
150 __asm__("ldd 0(" d_space ",%0), %%r0" : : "r" (addr)); 150 __asm__("ldd 0(" d_space ",%0), %%r0" : : "r" (addr));
151 } 151 }
152 #else 152 #else
153 #define prefetch_src(addr) do { } while(0) 153 #define prefetch_src(addr) do { } while(0)
154 #define prefetch_dst(addr) do { } while(0) 154 #define prefetch_dst(addr) do { } while(0)
155 #endif 155 #endif
156 156
157 #define PA_MEMCPY_OK 0 157 #define PA_MEMCPY_OK 0
158 #define PA_MEMCPY_LOAD_ERROR 1 158 #define PA_MEMCPY_LOAD_ERROR 1
159 #define PA_MEMCPY_STORE_ERROR 2 159 #define PA_MEMCPY_STORE_ERROR 2
160 160
161 /* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words 161 /* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words
162 * per loop. This code is derived from glibc. 162 * per loop. This code is derived from glibc.
163 */ 163 */
164 static inline unsigned long copy_dstaligned(unsigned long dst, 164 static inline unsigned long copy_dstaligned(unsigned long dst,
165 unsigned long src, unsigned long len) 165 unsigned long src, unsigned long len)
166 { 166 {
167 /* gcc complains that a2 and a3 may be uninitialized, but actually 167 /* gcc complains that a2 and a3 may be uninitialized, but actually
168 * they cannot be. Initialize a2/a3 to shut gcc up. 168 * they cannot be. Initialize a2/a3 to shut gcc up.
169 */ 169 */
170 register unsigned int a0, a1, a2 = 0, a3 = 0; 170 register unsigned int a0, a1, a2 = 0, a3 = 0;
171 int sh_1, sh_2; 171 int sh_1, sh_2;
172 172
173 /* prefetch_src((const void *)src); */ 173 /* prefetch_src((const void *)src); */
174 174
175 /* Calculate how to shift a word read at the memory operation 175 /* Calculate how to shift a word read at the memory operation
176 aligned srcp to make it aligned for copy. */ 176 aligned srcp to make it aligned for copy. */
177 sh_1 = 8 * (src % sizeof(unsigned int)); 177 sh_1 = 8 * (src % sizeof(unsigned int));
178 sh_2 = 8 * sizeof(unsigned int) - sh_1; 178 sh_2 = 8 * sizeof(unsigned int) - sh_1;
179 179
180 /* Make src aligned by rounding it down. */ 180 /* Make src aligned by rounding it down. */
181 src &= -sizeof(unsigned int); 181 src &= -sizeof(unsigned int);
182 182
183 switch (len % 4) 183 switch (len % 4)
184 { 184 {
185 case 2: 185 case 2:
186 /* a1 = ((unsigned int *) src)[0]; 186 /* a1 = ((unsigned int *) src)[0];
187 a2 = ((unsigned int *) src)[1]; */ 187 a2 = ((unsigned int *) src)[1]; */
188 ldw(s_space, 0, src, a1, cda_ldw_exc); 188 ldw(s_space, 0, src, a1, cda_ldw_exc);
189 ldw(s_space, 4, src, a2, cda_ldw_exc); 189 ldw(s_space, 4, src, a2, cda_ldw_exc);
190 src -= 1 * sizeof(unsigned int); 190 src -= 1 * sizeof(unsigned int);
191 dst -= 3 * sizeof(unsigned int); 191 dst -= 3 * sizeof(unsigned int);
192 len += 2; 192 len += 2;
193 goto do1; 193 goto do1;
194 case 3: 194 case 3:
195 /* a0 = ((unsigned int *) src)[0]; 195 /* a0 = ((unsigned int *) src)[0];
196 a1 = ((unsigned int *) src)[1]; */ 196 a1 = ((unsigned int *) src)[1]; */
197 ldw(s_space, 0, src, a0, cda_ldw_exc); 197 ldw(s_space, 0, src, a0, cda_ldw_exc);
198 ldw(s_space, 4, src, a1, cda_ldw_exc); 198 ldw(s_space, 4, src, a1, cda_ldw_exc);
199 src -= 0 * sizeof(unsigned int); 199 src -= 0 * sizeof(unsigned int);
200 dst -= 2 * sizeof(unsigned int); 200 dst -= 2 * sizeof(unsigned int);
201 len += 1; 201 len += 1;
202 goto do2; 202 goto do2;
203 case 0: 203 case 0:
204 if (len == 0) 204 if (len == 0)
205 return PA_MEMCPY_OK; 205 return PA_MEMCPY_OK;
206 /* a3 = ((unsigned int *) src)[0]; 206 /* a3 = ((unsigned int *) src)[0];
207 a0 = ((unsigned int *) src)[1]; */ 207 a0 = ((unsigned int *) src)[1]; */
208 ldw(s_space, 0, src, a3, cda_ldw_exc); 208 ldw(s_space, 0, src, a3, cda_ldw_exc);
209 ldw(s_space, 4, src, a0, cda_ldw_exc); 209 ldw(s_space, 4, src, a0, cda_ldw_exc);
210 src -=-1 * sizeof(unsigned int); 210 src -=-1 * sizeof(unsigned int);
211 dst -= 1 * sizeof(unsigned int); 211 dst -= 1 * sizeof(unsigned int);
212 len += 0; 212 len += 0;
213 goto do3; 213 goto do3;
214 case 1: 214 case 1:
215 /* a2 = ((unsigned int *) src)[0]; 215 /* a2 = ((unsigned int *) src)[0];
216 a3 = ((unsigned int *) src)[1]; */ 216 a3 = ((unsigned int *) src)[1]; */
217 ldw(s_space, 0, src, a2, cda_ldw_exc); 217 ldw(s_space, 0, src, a2, cda_ldw_exc);
218 ldw(s_space, 4, src, a3, cda_ldw_exc); 218 ldw(s_space, 4, src, a3, cda_ldw_exc);
219 src -=-2 * sizeof(unsigned int); 219 src -=-2 * sizeof(unsigned int);
220 dst -= 0 * sizeof(unsigned int); 220 dst -= 0 * sizeof(unsigned int);
221 len -= 1; 221 len -= 1;
222 if (len == 0) 222 if (len == 0)
223 goto do0; 223 goto do0;
224 goto do4; /* No-op. */ 224 goto do4; /* No-op. */
225 } 225 }
226 226
227 do 227 do
228 { 228 {
229 /* prefetch_src((const void *)(src + 4 * sizeof(unsigned int))); */ 229 /* prefetch_src((const void *)(src + 4 * sizeof(unsigned int))); */
230 do4: 230 do4:
231 /* a0 = ((unsigned int *) src)[0]; */ 231 /* a0 = ((unsigned int *) src)[0]; */
232 ldw(s_space, 0, src, a0, cda_ldw_exc); 232 ldw(s_space, 0, src, a0, cda_ldw_exc);
233 /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */ 233 /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
234 stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc); 234 stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
235 do3: 235 do3:
236 /* a1 = ((unsigned int *) src)[1]; */ 236 /* a1 = ((unsigned int *) src)[1]; */
237 ldw(s_space, 4, src, a1, cda_ldw_exc); 237 ldw(s_space, 4, src, a1, cda_ldw_exc);
238 /* ((unsigned int *) dst)[1] = MERGE (a3, sh_1, a0, sh_2); */ 238 /* ((unsigned int *) dst)[1] = MERGE (a3, sh_1, a0, sh_2); */
239 stw(d_space, MERGE (a3, sh_1, a0, sh_2), 4, dst, cda_stw_exc); 239 stw(d_space, MERGE (a3, sh_1, a0, sh_2), 4, dst, cda_stw_exc);
240 do2: 240 do2:
241 /* a2 = ((unsigned int *) src)[2]; */ 241 /* a2 = ((unsigned int *) src)[2]; */
242 ldw(s_space, 8, src, a2, cda_ldw_exc); 242 ldw(s_space, 8, src, a2, cda_ldw_exc);
243 /* ((unsigned int *) dst)[2] = MERGE (a0, sh_1, a1, sh_2); */ 243 /* ((unsigned int *) dst)[2] = MERGE (a0, sh_1, a1, sh_2); */
244 stw(d_space, MERGE (a0, sh_1, a1, sh_2), 8, dst, cda_stw_exc); 244 stw(d_space, MERGE (a0, sh_1, a1, sh_2), 8, dst, cda_stw_exc);
245 do1: 245 do1:
246 /* a3 = ((unsigned int *) src)[3]; */ 246 /* a3 = ((unsigned int *) src)[3]; */
247 ldw(s_space, 12, src, a3, cda_ldw_exc); 247 ldw(s_space, 12, src, a3, cda_ldw_exc);
248 /* ((unsigned int *) dst)[3] = MERGE (a1, sh_1, a2, sh_2); */ 248 /* ((unsigned int *) dst)[3] = MERGE (a1, sh_1, a2, sh_2); */
249 stw(d_space, MERGE (a1, sh_1, a2, sh_2), 12, dst, cda_stw_exc); 249 stw(d_space, MERGE (a1, sh_1, a2, sh_2), 12, dst, cda_stw_exc);
250 250
251 src += 4 * sizeof(unsigned int); 251 src += 4 * sizeof(unsigned int);
252 dst += 4 * sizeof(unsigned int); 252 dst += 4 * sizeof(unsigned int);
253 len -= 4; 253 len -= 4;
254 } 254 }
255 while (len != 0); 255 while (len != 0);
256 256
257 do0: 257 do0:
258 /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */ 258 /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
259 stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc); 259 stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
260 260
261 preserve_branch(handle_load_error); 261 preserve_branch(handle_load_error);
262 preserve_branch(handle_store_error); 262 preserve_branch(handle_store_error);
263 263
264 return PA_MEMCPY_OK; 264 return PA_MEMCPY_OK;
265 265
266 handle_load_error: 266 handle_load_error:
267 __asm__ __volatile__ ("cda_ldw_exc:\n"); 267 __asm__ __volatile__ ("cda_ldw_exc:\n");
268 return PA_MEMCPY_LOAD_ERROR; 268 return PA_MEMCPY_LOAD_ERROR;
269 269
270 handle_store_error: 270 handle_store_error:
271 __asm__ __volatile__ ("cda_stw_exc:\n"); 271 __asm__ __volatile__ ("cda_stw_exc:\n");
272 return PA_MEMCPY_STORE_ERROR; 272 return PA_MEMCPY_STORE_ERROR;
273 } 273 }
274 274
275 275
276 /* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR. 276 /* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR.
277 * In case of an access fault the faulty address can be read from the per_cpu 277 * In case of an access fault the faulty address can be read from the per_cpu
278 * exception data struct. */ 278 * exception data struct. */
279 static unsigned long pa_memcpy_internal(void *dstp, const void *srcp, 279 static unsigned long pa_memcpy_internal(void *dstp, const void *srcp,
280 unsigned long len) 280 unsigned long len)
281 { 281 {
282 register unsigned long src, dst, t1, t2, t3; 282 register unsigned long src, dst, t1, t2, t3;
283 register unsigned char *pcs, *pcd; 283 register unsigned char *pcs, *pcd;
284 register unsigned int *pws, *pwd; 284 register unsigned int *pws, *pwd;
285 register double *pds, *pdd; 285 register double *pds, *pdd;
286 unsigned long ret; 286 unsigned long ret;
287 287
288 src = (unsigned long)srcp; 288 src = (unsigned long)srcp;
289 dst = (unsigned long)dstp; 289 dst = (unsigned long)dstp;
290 pcs = (unsigned char *)srcp; 290 pcs = (unsigned char *)srcp;
291 pcd = (unsigned char *)dstp; 291 pcd = (unsigned char *)dstp;
292 292
293 /* prefetch_src((const void *)srcp); */ 293 /* prefetch_src((const void *)srcp); */
294 294
295 if (len < THRESHOLD) 295 if (len < THRESHOLD)
296 goto byte_copy; 296 goto byte_copy;
297 297
298 /* Check alignment */ 298 /* Check alignment */
299 t1 = (src ^ dst); 299 t1 = (src ^ dst);
300 if (unlikely(t1 & (sizeof(double)-1))) 300 if (unlikely(t1 & (sizeof(double)-1)))
301 goto unaligned_copy; 301 goto unaligned_copy;
302 302
303 /* src and dst have same alignment. */ 303 /* src and dst have same alignment. */
304 304
305 /* Copy bytes till we are double-aligned. */ 305 /* Copy bytes till we are double-aligned. */
306 t2 = src & (sizeof(double) - 1); 306 t2 = src & (sizeof(double) - 1);
307 if (unlikely(t2 != 0)) { 307 if (unlikely(t2 != 0)) {
308 t2 = sizeof(double) - t2; 308 t2 = sizeof(double) - t2;
309 while (t2 && len) { 309 while (t2 && len) {
310 /* *pcd++ = *pcs++; */ 310 /* *pcd++ = *pcs++; */
311 ldbma(s_space, pcs, t3, pmc_load_exc); 311 ldbma(s_space, pcs, t3, pmc_load_exc);
312 len--; 312 len--;
313 stbma(d_space, t3, pcd, pmc_store_exc); 313 stbma(d_space, t3, pcd, pmc_store_exc);
314 t2--; 314 t2--;
315 } 315 }
316 } 316 }
317 317
318 pds = (double *)pcs; 318 pds = (double *)pcs;
319 pdd = (double *)pcd; 319 pdd = (double *)pcd;
320 320
321 #if 0 321 #if 0
322 /* Copy 8 doubles at a time */ 322 /* Copy 8 doubles at a time */
323 while (len >= 8*sizeof(double)) { 323 while (len >= 8*sizeof(double)) {
324 register double r1, r2, r3, r4, r5, r6, r7, r8; 324 register double r1, r2, r3, r4, r5, r6, r7, r8;
325 /* prefetch_src((char *)pds + L1_CACHE_BYTES); */ 325 /* prefetch_src((char *)pds + L1_CACHE_BYTES); */
326 flddma(s_space, pds, r1, pmc_load_exc); 326 flddma(s_space, pds, r1, pmc_load_exc);
327 flddma(s_space, pds, r2, pmc_load_exc); 327 flddma(s_space, pds, r2, pmc_load_exc);
328 flddma(s_space, pds, r3, pmc_load_exc); 328 flddma(s_space, pds, r3, pmc_load_exc);
329 flddma(s_space, pds, r4, pmc_load_exc); 329 flddma(s_space, pds, r4, pmc_load_exc);
330 fstdma(d_space, r1, pdd, pmc_store_exc); 330 fstdma(d_space, r1, pdd, pmc_store_exc);
331 fstdma(d_space, r2, pdd, pmc_store_exc); 331 fstdma(d_space, r2, pdd, pmc_store_exc);
332 fstdma(d_space, r3, pdd, pmc_store_exc); 332 fstdma(d_space, r3, pdd, pmc_store_exc);
333 fstdma(d_space, r4, pdd, pmc_store_exc); 333 fstdma(d_space, r4, pdd, pmc_store_exc);
334 334
335 #if 0 335 #if 0
336 if (L1_CACHE_BYTES <= 32) 336 if (L1_CACHE_BYTES <= 32)
337 prefetch_src((char *)pds + L1_CACHE_BYTES); 337 prefetch_src((char *)pds + L1_CACHE_BYTES);
338 #endif 338 #endif
339 flddma(s_space, pds, r5, pmc_load_exc); 339 flddma(s_space, pds, r5, pmc_load_exc);
340 flddma(s_space, pds, r6, pmc_load_exc); 340 flddma(s_space, pds, r6, pmc_load_exc);
341 flddma(s_space, pds, r7, pmc_load_exc); 341 flddma(s_space, pds, r7, pmc_load_exc);
342 flddma(s_space, pds, r8, pmc_load_exc); 342 flddma(s_space, pds, r8, pmc_load_exc);
343 fstdma(d_space, r5, pdd, pmc_store_exc); 343 fstdma(d_space, r5, pdd, pmc_store_exc);
344 fstdma(d_space, r6, pdd, pmc_store_exc); 344 fstdma(d_space, r6, pdd, pmc_store_exc);
345 fstdma(d_space, r7, pdd, pmc_store_exc); 345 fstdma(d_space, r7, pdd, pmc_store_exc);
346 fstdma(d_space, r8, pdd, pmc_store_exc); 346 fstdma(d_space, r8, pdd, pmc_store_exc);
347 len -= 8*sizeof(double); 347 len -= 8*sizeof(double);
348 } 348 }
349 #endif 349 #endif
350 350
351 pws = (unsigned int *)pds; 351 pws = (unsigned int *)pds;
352 pwd = (unsigned int *)pdd; 352 pwd = (unsigned int *)pdd;
353 353
354 word_copy: 354 word_copy:
355 while (len >= 8*sizeof(unsigned int)) { 355 while (len >= 8*sizeof(unsigned int)) {
356 register unsigned int r1,r2,r3,r4,r5,r6,r7,r8; 356 register unsigned int r1,r2,r3,r4,r5,r6,r7,r8;
357 /* prefetch_src((char *)pws + L1_CACHE_BYTES); */ 357 /* prefetch_src((char *)pws + L1_CACHE_BYTES); */
358 ldwma(s_space, pws, r1, pmc_load_exc); 358 ldwma(s_space, pws, r1, pmc_load_exc);
359 ldwma(s_space, pws, r2, pmc_load_exc); 359 ldwma(s_space, pws, r2, pmc_load_exc);
360 ldwma(s_space, pws, r3, pmc_load_exc); 360 ldwma(s_space, pws, r3, pmc_load_exc);
361 ldwma(s_space, pws, r4, pmc_load_exc); 361 ldwma(s_space, pws, r4, pmc_load_exc);
362 stwma(d_space, r1, pwd, pmc_store_exc); 362 stwma(d_space, r1, pwd, pmc_store_exc);
363 stwma(d_space, r2, pwd, pmc_store_exc); 363 stwma(d_space, r2, pwd, pmc_store_exc);
364 stwma(d_space, r3, pwd, pmc_store_exc); 364 stwma(d_space, r3, pwd, pmc_store_exc);
365 stwma(d_space, r4, pwd, pmc_store_exc); 365 stwma(d_space, r4, pwd, pmc_store_exc);
366 366
367 ldwma(s_space, pws, r5, pmc_load_exc); 367 ldwma(s_space, pws, r5, pmc_load_exc);
368 ldwma(s_space, pws, r6, pmc_load_exc); 368 ldwma(s_space, pws, r6, pmc_load_exc);
369 ldwma(s_space, pws, r7, pmc_load_exc); 369 ldwma(s_space, pws, r7, pmc_load_exc);
370 ldwma(s_space, pws, r8, pmc_load_exc); 370 ldwma(s_space, pws, r8, pmc_load_exc);
371 stwma(d_space, r5, pwd, pmc_store_exc); 371 stwma(d_space, r5, pwd, pmc_store_exc);
372 stwma(d_space, r6, pwd, pmc_store_exc); 372 stwma(d_space, r6, pwd, pmc_store_exc);
373 stwma(d_space, r7, pwd, pmc_store_exc); 373 stwma(d_space, r7, pwd, pmc_store_exc);
374 stwma(d_space, r8, pwd, pmc_store_exc); 374 stwma(d_space, r8, pwd, pmc_store_exc);
375 len -= 8*sizeof(unsigned int); 375 len -= 8*sizeof(unsigned int);
376 } 376 }
377 377
378 while (len >= 4*sizeof(unsigned int)) { 378 while (len >= 4*sizeof(unsigned int)) {
379 register unsigned int r1,r2,r3,r4; 379 register unsigned int r1,r2,r3,r4;
380 ldwma(s_space, pws, r1, pmc_load_exc); 380 ldwma(s_space, pws, r1, pmc_load_exc);
381 ldwma(s_space, pws, r2, pmc_load_exc); 381 ldwma(s_space, pws, r2, pmc_load_exc);
382 ldwma(s_space, pws, r3, pmc_load_exc); 382 ldwma(s_space, pws, r3, pmc_load_exc);
383 ldwma(s_space, pws, r4, pmc_load_exc); 383 ldwma(s_space, pws, r4, pmc_load_exc);
384 stwma(d_space, r1, pwd, pmc_store_exc); 384 stwma(d_space, r1, pwd, pmc_store_exc);
385 stwma(d_space, r2, pwd, pmc_store_exc); 385 stwma(d_space, r2, pwd, pmc_store_exc);
386 stwma(d_space, r3, pwd, pmc_store_exc); 386 stwma(d_space, r3, pwd, pmc_store_exc);
387 stwma(d_space, r4, pwd, pmc_store_exc); 387 stwma(d_space, r4, pwd, pmc_store_exc);
388 len -= 4*sizeof(unsigned int); 388 len -= 4*sizeof(unsigned int);
389 } 389 }
390 390
391 pcs = (unsigned char *)pws; 391 pcs = (unsigned char *)pws;
392 pcd = (unsigned char *)pwd; 392 pcd = (unsigned char *)pwd;
393 393
394 byte_copy: 394 byte_copy:
395 while (len) { 395 while (len) {
396 /* *pcd++ = *pcs++; */ 396 /* *pcd++ = *pcs++; */
397 ldbma(s_space, pcs, t3, pmc_load_exc); 397 ldbma(s_space, pcs, t3, pmc_load_exc);
398 stbma(d_space, t3, pcd, pmc_store_exc); 398 stbma(d_space, t3, pcd, pmc_store_exc);
399 len--; 399 len--;
400 } 400 }
401 401
402 return PA_MEMCPY_OK; 402 return PA_MEMCPY_OK;
403 403
404 unaligned_copy: 404 unaligned_copy:
405 /* possibly we are aligned on a word, but not on a double... */ 405 /* possibly we are aligned on a word, but not on a double... */
406 if (likely((t1 & (sizeof(unsigned int)-1)) == 0)) { 406 if (likely((t1 & (sizeof(unsigned int)-1)) == 0)) {
407 t2 = src & (sizeof(unsigned int) - 1); 407 t2 = src & (sizeof(unsigned int) - 1);
408 408
409 if (unlikely(t2 != 0)) { 409 if (unlikely(t2 != 0)) {
410 t2 = sizeof(unsigned int) - t2; 410 t2 = sizeof(unsigned int) - t2;
411 while (t2) { 411 while (t2) {
412 /* *pcd++ = *pcs++; */ 412 /* *pcd++ = *pcs++; */
413 ldbma(s_space, pcs, t3, pmc_load_exc); 413 ldbma(s_space, pcs, t3, pmc_load_exc);
414 stbma(d_space, t3, pcd, pmc_store_exc); 414 stbma(d_space, t3, pcd, pmc_store_exc);
415 len--; 415 len--;
416 t2--; 416 t2--;
417 } 417 }
418 } 418 }
419 419
420 pws = (unsigned int *)pcs; 420 pws = (unsigned int *)pcs;
421 pwd = (unsigned int *)pcd; 421 pwd = (unsigned int *)pcd;
422 goto word_copy; 422 goto word_copy;
423 } 423 }
424 424
425 /* Align the destination. */ 425 /* Align the destination. */
426 if (unlikely((dst & (sizeof(unsigned int) - 1)) != 0)) { 426 if (unlikely((dst & (sizeof(unsigned int) - 1)) != 0)) {
427 t2 = sizeof(unsigned int) - (dst & (sizeof(unsigned int) - 1)); 427 t2 = sizeof(unsigned int) - (dst & (sizeof(unsigned int) - 1));
428 while (t2) { 428 while (t2) {
429 /* *pcd++ = *pcs++; */ 429 /* *pcd++ = *pcs++; */
430 ldbma(s_space, pcs, t3, pmc_load_exc); 430 ldbma(s_space, pcs, t3, pmc_load_exc);
431 stbma(d_space, t3, pcd, pmc_store_exc); 431 stbma(d_space, t3, pcd, pmc_store_exc);
432 len--; 432 len--;
433 t2--; 433 t2--;
434 } 434 }
435 dst = (unsigned long)pcd; 435 dst = (unsigned long)pcd;
436 src = (unsigned long)pcs; 436 src = (unsigned long)pcs;
437 } 437 }
438 438
439 ret = copy_dstaligned(dst, src, len / sizeof(unsigned int)); 439 ret = copy_dstaligned(dst, src, len / sizeof(unsigned int));
440 if (ret) 440 if (ret)
441 return ret; 441 return ret;
442 442
443 pcs += (len & -sizeof(unsigned int)); 443 pcs += (len & -sizeof(unsigned int));
444 pcd += (len & -sizeof(unsigned int)); 444 pcd += (len & -sizeof(unsigned int));
445 len %= sizeof(unsigned int); 445 len %= sizeof(unsigned int);
446 446
447 preserve_branch(handle_load_error); 447 preserve_branch(handle_load_error);
448 preserve_branch(handle_store_error); 448 preserve_branch(handle_store_error);
449 449
450 goto byte_copy; 450 goto byte_copy;
451 451
452 handle_load_error: 452 handle_load_error:
453 __asm__ __volatile__ ("pmc_load_exc:\n"); 453 __asm__ __volatile__ ("pmc_load_exc:\n");
454 return PA_MEMCPY_LOAD_ERROR; 454 return PA_MEMCPY_LOAD_ERROR;
455 455
456 handle_store_error: 456 handle_store_error:
457 __asm__ __volatile__ ("pmc_store_exc:\n"); 457 __asm__ __volatile__ ("pmc_store_exc:\n");
458 return PA_MEMCPY_STORE_ERROR; 458 return PA_MEMCPY_STORE_ERROR;
459 } 459 }
460 460
461 461
462 /* Returns 0 for success, otherwise, returns number of bytes not transferred. */ 462 /* Returns 0 for success, otherwise, returns number of bytes not transferred. */
463 static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) 463 static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
464 { 464 {
465 unsigned long ret, fault_addr, reference; 465 unsigned long ret, fault_addr, reference;
466 struct exception_data *d; 466 struct exception_data *d;
467 467
468 ret = pa_memcpy_internal(dstp, srcp, len); 468 ret = pa_memcpy_internal(dstp, srcp, len);
469 if (likely(ret == PA_MEMCPY_OK)) 469 if (likely(ret == PA_MEMCPY_OK))
470 return 0; 470 return 0;
471 471
472 /* if a load or store fault occured we can get the faulty addr */ 472 /* if a load or store fault occured we can get the faulty addr */
473 d = &__get_cpu_var(exception_data); 473 d = &__get_cpu_var(exception_data);
474 fault_addr = d->fault_addr; 474 fault_addr = d->fault_addr;
475 475
476 /* error in load or store? */ 476 /* error in load or store? */
477 if (ret == PA_MEMCPY_LOAD_ERROR) 477 if (ret == PA_MEMCPY_LOAD_ERROR)
478 reference = (unsigned long) srcp; 478 reference = (unsigned long) srcp;
479 else 479 else
480 reference = (unsigned long) dstp; 480 reference = (unsigned long) dstp;
481 481
482 DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n", 482 DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n",
483 ret, len, fault_addr, reference); 483 ret, len, fault_addr, reference);
484 484
485 if (fault_addr >= reference) 485 if (fault_addr >= reference)
486 return len - (fault_addr - reference); 486 return len - (fault_addr - reference);
487 else 487 else
488 return len; 488 return len;
489 } 489 }
490 490
491 #ifdef __KERNEL__ 491 #ifdef __KERNEL__
492 unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len) 492 unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len)
493 { 493 {
494 mtsp(get_kernel_space(), 1); 494 mtsp(get_kernel_space(), 1);
495 mtsp(get_user_space(), 2); 495 mtsp(get_user_space(), 2);
496 return pa_memcpy((void __force *)dst, src, len); 496 return pa_memcpy((void __force *)dst, src, len);
497 } 497 }
498 498
499 EXPORT_SYMBOL(__copy_from_user); 499 EXPORT_SYMBOL(__copy_from_user);
500 unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long len) 500 unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long len)
501 { 501 {
502 mtsp(get_user_space(), 1); 502 mtsp(get_user_space(), 1);
503 mtsp(get_kernel_space(), 2); 503 mtsp(get_kernel_space(), 2);
504 return pa_memcpy(dst, (void __force *)src, len); 504 return pa_memcpy(dst, (void __force *)src, len);
505 } 505 }
506 506
507 unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len) 507 unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len)
508 { 508 {
509 mtsp(get_user_space(), 1); 509 mtsp(get_user_space(), 1);
510 mtsp(get_user_space(), 2); 510 mtsp(get_user_space(), 2);
511 return pa_memcpy((void __force *)dst, (void __force *)src, len); 511 return pa_memcpy((void __force *)dst, (void __force *)src, len);
512 } 512 }
513 513
514 514
515 void * memcpy(void * dst,const void *src, size_t count) 515 void * memcpy(void * dst,const void *src, size_t count)
516 { 516 {
517 mtsp(get_kernel_space(), 1); 517 mtsp(get_kernel_space(), 1);
518 mtsp(get_kernel_space(), 2); 518 mtsp(get_kernel_space(), 2);
519 pa_memcpy(dst, src, count); 519 pa_memcpy(dst, src, count);
520 return dst; 520 return dst;
521 } 521 }
522 522
523 EXPORT_SYMBOL(copy_to_user); 523 EXPORT_SYMBOL(copy_to_user);
524 EXPORT_SYMBOL(copy_from_user); 524 EXPORT_SYMBOL(copy_from_user);
525 EXPORT_SYMBOL(copy_in_user); 525 EXPORT_SYMBOL(copy_in_user);
526 EXPORT_SYMBOL(memcpy); 526 EXPORT_SYMBOL(memcpy);
527
528 long probe_kernel_read(void *dst, const void *src, size_t size)
529 {
530 unsigned long addr = (unsigned long)src;
531
532 if (size < 0 || addr < PAGE_SIZE)
533 return -EFAULT;
534
535 /* check for I/O space F_EXTEND(0xfff00000) access as well? */
536
537 return __probe_kernel_read(dst, src, size);
538 }
539
527 #endif 540 #endif
528 541
arch/parisc/mm/fault.c
1 /* 1 /*
2 * This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * 6 *
7 * Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle 7 * Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
8 * Copyright 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org) 8 * Copyright 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org)
9 * Copyright 1999 Hewlett Packard Co. 9 * Copyright 1999 Hewlett Packard Co.
10 * 10 *
11 */ 11 */
12 12
13 #include <linux/mm.h> 13 #include <linux/mm.h>
14 #include <linux/ptrace.h> 14 #include <linux/ptrace.h>
15 #include <linux/sched.h> 15 #include <linux/sched.h>
16 #include <linux/interrupt.h> 16 #include <linux/interrupt.h>
17 #include <linux/module.h> 17 #include <linux/module.h>
18 18
19 #include <asm/uaccess.h> 19 #include <asm/uaccess.h>
20 #include <asm/traps.h> 20 #include <asm/traps.h>
21 21
22 #define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */ 22 #define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
23 /* dumped to the console via printk) */ 23 /* dumped to the console via printk) */
24 24
25 25
26 /* Various important other fields */ 26 /* Various important other fields */
27 #define bit22set(x) (x & 0x00000200) 27 #define bit22set(x) (x & 0x00000200)
28 #define bits23_25set(x) (x & 0x000001c0) 28 #define bits23_25set(x) (x & 0x000001c0)
29 #define isGraphicsFlushRead(x) ((x & 0xfc003fdf) == 0x04001a80) 29 #define isGraphicsFlushRead(x) ((x & 0xfc003fdf) == 0x04001a80)
30 /* extended opcode is 0x6a */ 30 /* extended opcode is 0x6a */
31 31
32 #define BITSSET 0x1c0 /* for identifying LDCW */ 32 #define BITSSET 0x1c0 /* for identifying LDCW */
33 33
34 34
35 DEFINE_PER_CPU(struct exception_data, exception_data); 35 DEFINE_PER_CPU(struct exception_data, exception_data);
36 36
37 /* 37 /*
38 * parisc_acctyp(unsigned int inst) -- 38 * parisc_acctyp(unsigned int inst) --
39 * Given a PA-RISC memory access instruction, determine if the 39 * Given a PA-RISC memory access instruction, determine if the
40 * the instruction would perform a memory read or memory write 40 * the instruction would perform a memory read or memory write
41 * operation. 41 * operation.
42 * 42 *
43 * This function assumes that the given instruction is a memory access 43 * This function assumes that the given instruction is a memory access
44 * instruction (i.e. you should really only call it if you know that 44 * instruction (i.e. you should really only call it if you know that
45 * the instruction has generated some sort of a memory access fault). 45 * the instruction has generated some sort of a memory access fault).
46 * 46 *
47 * Returns: 47 * Returns:
48 * VM_READ if read operation 48 * VM_READ if read operation
49 * VM_WRITE if write operation 49 * VM_WRITE if write operation
50 * VM_EXEC if execute operation 50 * VM_EXEC if execute operation
51 */ 51 */
52 static unsigned long 52 static unsigned long
53 parisc_acctyp(unsigned long code, unsigned int inst) 53 parisc_acctyp(unsigned long code, unsigned int inst)
54 { 54 {
55 if (code == 6 || code == 16) 55 if (code == 6 || code == 16)
56 return VM_EXEC; 56 return VM_EXEC;
57 57
58 switch (inst & 0xf0000000) { 58 switch (inst & 0xf0000000) {
59 case 0x40000000: /* load */ 59 case 0x40000000: /* load */
60 case 0x50000000: /* new load */ 60 case 0x50000000: /* new load */
61 return VM_READ; 61 return VM_READ;
62 62
63 case 0x60000000: /* store */ 63 case 0x60000000: /* store */
64 case 0x70000000: /* new store */ 64 case 0x70000000: /* new store */
65 return VM_WRITE; 65 return VM_WRITE;
66 66
67 case 0x20000000: /* coproc */ 67 case 0x20000000: /* coproc */
68 case 0x30000000: /* coproc2 */ 68 case 0x30000000: /* coproc2 */
69 if (bit22set(inst)) 69 if (bit22set(inst))
70 return VM_WRITE; 70 return VM_WRITE;
71 71
72 case 0x0: /* indexed/memory management */ 72 case 0x0: /* indexed/memory management */
73 if (bit22set(inst)) { 73 if (bit22set(inst)) {
74 /* 74 /*
75 * Check for the 'Graphics Flush Read' instruction. 75 * Check for the 'Graphics Flush Read' instruction.
76 * It resembles an FDC instruction, except for bits 76 * It resembles an FDC instruction, except for bits
77 * 20 and 21. Any combination other than zero will 77 * 20 and 21. Any combination other than zero will
78 * utilize the block mover functionality on some 78 * utilize the block mover functionality on some
79 * older PA-RISC platforms. The case where a block 79 * older PA-RISC platforms. The case where a block
80 * move is performed from VM to graphics IO space 80 * move is performed from VM to graphics IO space
81 * should be treated as a READ. 81 * should be treated as a READ.
82 * 82 *
83 * The significance of bits 20,21 in the FDC 83 * The significance of bits 20,21 in the FDC
84 * instruction is: 84 * instruction is:
85 * 85 *
86 * 00 Flush data cache (normal instruction behavior) 86 * 00 Flush data cache (normal instruction behavior)
87 * 01 Graphics flush write (IO space -> VM) 87 * 01 Graphics flush write (IO space -> VM)
88 * 10 Graphics flush read (VM -> IO space) 88 * 10 Graphics flush read (VM -> IO space)
89 * 11 Graphics flush read/write (VM <-> IO space) 89 * 11 Graphics flush read/write (VM <-> IO space)
90 */ 90 */
91 if (isGraphicsFlushRead(inst)) 91 if (isGraphicsFlushRead(inst))
92 return VM_READ; 92 return VM_READ;
93 return VM_WRITE; 93 return VM_WRITE;
94 } else { 94 } else {
95 /* 95 /*
96 * Check for LDCWX and LDCWS (semaphore instructions). 96 * Check for LDCWX and LDCWS (semaphore instructions).
97 * If bits 23 through 25 are all 1's it is one of 97 * If bits 23 through 25 are all 1's it is one of
98 * the above two instructions and is a write. 98 * the above two instructions and is a write.
99 * 99 *
100 * Note: With the limited bits we are looking at, 100 * Note: With the limited bits we are looking at,
101 * this will also catch PROBEW and PROBEWI. However, 101 * this will also catch PROBEW and PROBEWI. However,
102 * these should never get in here because they don't 102 * these should never get in here because they don't
103 * generate exceptions of the type: 103 * generate exceptions of the type:
104 * Data TLB miss fault/data page fault 104 * Data TLB miss fault/data page fault
105 * Data memory protection trap 105 * Data memory protection trap
106 */ 106 */
107 if (bits23_25set(inst) == BITSSET) 107 if (bits23_25set(inst) == BITSSET)
108 return VM_WRITE; 108 return VM_WRITE;
109 } 109 }
110 return VM_READ; /* Default */ 110 return VM_READ; /* Default */
111 } 111 }
112 return VM_READ; /* Default */ 112 return VM_READ; /* Default */
113 } 113 }
114 114
115 #undef bit22set 115 #undef bit22set
116 #undef bits23_25set 116 #undef bits23_25set
117 #undef isGraphicsFlushRead 117 #undef isGraphicsFlushRead
118 #undef BITSSET 118 #undef BITSSET
119 119
120 120
121 #if 0 121 #if 0
122 /* This is the treewalk to find a vma which is the highest that has 122 /* This is the treewalk to find a vma which is the highest that has
123 * a start < addr. We're using find_vma_prev instead right now, but 123 * a start < addr. We're using find_vma_prev instead right now, but
124 * we might want to use this at some point in the future. Probably 124 * we might want to use this at some point in the future. Probably
125 * not, but I want it committed to CVS so I don't lose it :-) 125 * not, but I want it committed to CVS so I don't lose it :-)
126 */ 126 */
127 while (tree != vm_avl_empty) { 127 while (tree != vm_avl_empty) {
128 if (tree->vm_start > addr) { 128 if (tree->vm_start > addr) {
129 tree = tree->vm_avl_left; 129 tree = tree->vm_avl_left;
130 } else { 130 } else {
131 prev = tree; 131 prev = tree;
132 if (prev->vm_next == NULL) 132 if (prev->vm_next == NULL)
133 break; 133 break;
134 if (prev->vm_next->vm_start > addr) 134 if (prev->vm_next->vm_start > addr)
135 break; 135 break;
136 tree = tree->vm_avl_right; 136 tree = tree->vm_avl_right;
137 } 137 }
138 } 138 }
139 #endif 139 #endif
140 140
141 int fixup_exception(struct pt_regs *regs) 141 int fixup_exception(struct pt_regs *regs)
142 { 142 {
143 const struct exception_table_entry *fix; 143 const struct exception_table_entry *fix;
144 144
145 fix = search_exception_tables(regs->iaoq[0]); 145 fix = search_exception_tables(regs->iaoq[0]);
146 if (fix) { 146 if (fix) {
147 struct exception_data *d; 147 struct exception_data *d;
148 d = &__get_cpu_var(exception_data); 148 d = &__get_cpu_var(exception_data);
149 d->fault_ip = regs->iaoq[0]; 149 d->fault_ip = regs->iaoq[0];
150 d->fault_space = regs->isr; 150 d->fault_space = regs->isr;
151 d->fault_addr = regs->ior; 151 d->fault_addr = regs->ior;
152 152
153 regs->iaoq[0] = ((fix->fixup) & ~3); 153 regs->iaoq[0] = ((fix->fixup) & ~3);
154 /* 154 /*
155 * NOTE: In some cases the faulting instruction 155 * NOTE: In some cases the faulting instruction
156 * may be in the delay slot of a branch. We 156 * may be in the delay slot of a branch. We
157 * don't want to take the branch, so we don't 157 * don't want to take the branch, so we don't
158 * increment iaoq[1], instead we set it to be 158 * increment iaoq[1], instead we set it to be
159 * iaoq[0]+4, and clear the B bit in the PSW 159 * iaoq[0]+4, and clear the B bit in the PSW
160 */ 160 */
161 regs->iaoq[1] = regs->iaoq[0] + 4; 161 regs->iaoq[1] = regs->iaoq[0] + 4;
162 regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */ 162 regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */
163 163
164 return 1; 164 return 1;
165 } 165 }
166 166
167 return 0; 167 return 0;
168 } 168 }
169 169
170 void do_page_fault(struct pt_regs *regs, unsigned long code, 170 void do_page_fault(struct pt_regs *regs, unsigned long code,
171 unsigned long address) 171 unsigned long address)
172 { 172 {
173 struct vm_area_struct *vma, *prev_vma; 173 struct vm_area_struct *vma, *prev_vma;
174 struct task_struct *tsk = current; 174 struct task_struct *tsk;
175 struct mm_struct *mm = tsk->mm; 175 struct mm_struct *mm;
176 unsigned long acc_type; 176 unsigned long acc_type;
177 int fault; 177 int fault;
178 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 178 unsigned int flags;
179 179
180 if (in_atomic() || !mm) 180 if (in_atomic())
181 goto no_context; 181 goto no_context;
182 182
183 tsk = current;
184 mm = tsk->mm;
185 if (!mm)
186 goto no_context;
187
188 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
183 if (user_mode(regs)) 189 if (user_mode(regs))
184 flags |= FAULT_FLAG_USER; 190 flags |= FAULT_FLAG_USER;
185 191
186 acc_type = parisc_acctyp(code, regs->iir); 192 acc_type = parisc_acctyp(code, regs->iir);
187
188 if (acc_type & VM_WRITE) 193 if (acc_type & VM_WRITE)
189 flags |= FAULT_FLAG_WRITE; 194 flags |= FAULT_FLAG_WRITE;
190 retry: 195 retry:
191 down_read(&mm->mmap_sem); 196 down_read(&mm->mmap_sem);
192 vma = find_vma_prev(mm, address, &prev_vma); 197 vma = find_vma_prev(mm, address, &prev_vma);
193 if (!vma || address < vma->vm_start) 198 if (!vma || address < vma->vm_start)
194 goto check_expansion; 199 goto check_expansion;
195 /* 200 /*
196 * Ok, we have a good vm_area for this memory access. We still need to 201 * Ok, we have a good vm_area for this memory access. We still need to
197 * check the access permissions. 202 * check the access permissions.
198 */ 203 */
199 204
200 good_area: 205 good_area:
201 206
202 if ((vma->vm_flags & acc_type) != acc_type) 207 if ((vma->vm_flags & acc_type) != acc_type)
203 goto bad_area; 208 goto bad_area;
204 209
205 /* 210 /*
206 * If for any reason at all we couldn't handle the fault, make 211 * If for any reason at all we couldn't handle the fault, make
207 * sure we exit gracefully rather than endlessly redo the 212 * sure we exit gracefully rather than endlessly redo the
208 * fault. 213 * fault.
209 */ 214 */
210 215
211 fault = handle_mm_fault(mm, vma, address, flags); 216 fault = handle_mm_fault(mm, vma, address, flags);
212 217
213 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) 218 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
214 return; 219 return;
215 220
216 if (unlikely(fault & VM_FAULT_ERROR)) { 221 if (unlikely(fault & VM_FAULT_ERROR)) {
217 /* 222 /*
218 * We hit a shared mapping outside of the file, or some 223 * We hit a shared mapping outside of the file, or some
219 * other thing happened to us that made us unable to 224 * other thing happened to us that made us unable to
220 * handle the page fault gracefully. 225 * handle the page fault gracefully.
221 */ 226 */
222 if (fault & VM_FAULT_OOM) 227 if (fault & VM_FAULT_OOM)
223 goto out_of_memory; 228 goto out_of_memory;
224 else if (fault & VM_FAULT_SIGBUS) 229 else if (fault & VM_FAULT_SIGBUS)
225 goto bad_area; 230 goto bad_area;
226 BUG(); 231 BUG();
227 } 232 }
228 if (flags & FAULT_FLAG_ALLOW_RETRY) { 233 if (flags & FAULT_FLAG_ALLOW_RETRY) {
229 if (fault & VM_FAULT_MAJOR) 234 if (fault & VM_FAULT_MAJOR)
230 current->maj_flt++; 235 current->maj_flt++;
231 else 236 else
232 current->min_flt++; 237 current->min_flt++;
233 if (fault & VM_FAULT_RETRY) { 238 if (fault & VM_FAULT_RETRY) {
234 flags &= ~FAULT_FLAG_ALLOW_RETRY; 239 flags &= ~FAULT_FLAG_ALLOW_RETRY;
235 240
236 /* 241 /*
237 * No need to up_read(&mm->mmap_sem) as we would 242 * No need to up_read(&mm->mmap_sem) as we would
238 * have already released it in __lock_page_or_retry 243 * have already released it in __lock_page_or_retry
239 * in mm/filemap.c. 244 * in mm/filemap.c.
240 */ 245 */
241 246
242 goto retry; 247 goto retry;
243 } 248 }
244 } 249 }
245 up_read(&mm->mmap_sem); 250 up_read(&mm->mmap_sem);
246 return; 251 return;
247 252
248 check_expansion: 253 check_expansion:
249 vma = prev_vma; 254 vma = prev_vma;
250 if (vma && (expand_stack(vma, address) == 0)) 255 if (vma && (expand_stack(vma, address) == 0))
251 goto good_area; 256 goto good_area;
252 257
253 /* 258 /*
254 * Something tried to access memory that isn't in our memory map.. 259 * Something tried to access memory that isn't in our memory map..
255 */ 260 */
256 bad_area: 261 bad_area:
257 up_read(&mm->mmap_sem); 262 up_read(&mm->mmap_sem);
258 263
259 if (user_mode(regs)) { 264 if (user_mode(regs)) {
260 struct siginfo si; 265 struct siginfo si;
261 266
262 #ifdef PRINT_USER_FAULTS 267 #ifdef PRINT_USER_FAULTS
263 printk(KERN_DEBUG "\n"); 268 printk(KERN_DEBUG "\n");
264 printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n", 269 printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n",
265 task_pid_nr(tsk), tsk->comm, code, address); 270 task_pid_nr(tsk), tsk->comm, code, address);
266 if (vma) { 271 if (vma) {
267 printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n", 272 printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n",
268 vma->vm_start, vma->vm_end); 273 vma->vm_start, vma->vm_end);
269 } 274 }
270 show_regs(regs); 275 show_regs(regs);
271 #endif 276 #endif
272 /* FIXME: actually we need to get the signo and code correct */ 277 /* FIXME: actually we need to get the signo and code correct */
273 si.si_signo = SIGSEGV; 278 si.si_signo = SIGSEGV;
274 si.si_errno = 0; 279 si.si_errno = 0;
275 si.si_code = SEGV_MAPERR; 280 si.si_code = SEGV_MAPERR;
276 si.si_addr = (void __user *) address; 281 si.si_addr = (void __user *) address;
277 force_sig_info(SIGSEGV, &si, current); 282 force_sig_info(SIGSEGV, &si, current);
278 return; 283 return;
279 } 284 }
280 285
281 no_context: 286 no_context:
282 287
283 if (!user_mode(regs) && fixup_exception(regs)) { 288 if (!user_mode(regs) && fixup_exception(regs)) {
284 return; 289 return;
285 } 290 }
286 291
287 parisc_terminate("Bad Address (null pointer deref?)", regs, code, address); 292 parisc_terminate("Bad Address (null pointer deref?)", regs, code, address);
288 293
289 out_of_memory: 294 out_of_memory:
290 up_read(&mm->mmap_sem); 295 up_read(&mm->mmap_sem);
291 if (!user_mode(regs)) 296 if (!user_mode(regs))
292 goto no_context; 297 goto no_context;
293 pagefault_out_of_memory(); 298 pagefault_out_of_memory();
294 } 299 }