Commit dd0fd51dc1585941c2edccdb40e5f11ea3a64496

Authored by James Bottomley
Committed by Kyle McMartin
1 parent 63af965d2b

[PARISC] Remove the spurious do_softirq calls from entry.S

remove the spurious do_softirq calls from entry.S

With these in we were calling do_softirq twice; plus the calls in
entry.S took no account of nesting.

Signed-off-by: James Bottomley <jejb@parisc-linux.org>

Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>

Showing 1 changed file with 0 additions and 27 deletions Inline Diff

arch/parisc/kernel/entry.S
1 /* 1 /*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/) 2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 * 3 *
4 * kernel entry points (interruptions, system call wrappers) 4 * kernel entry points (interruptions, system call wrappers)
5 * Copyright (C) 1999,2000 Philipp Rumpf 5 * Copyright (C) 1999,2000 Philipp Rumpf
6 * Copyright (C) 1999 SuSE GmbH Nuernberg 6 * Copyright (C) 1999 SuSE GmbH Nuernberg
7 * Copyright (C) 2000 Hewlett-Packard (John Marvin) 7 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand) 8 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option) 12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version. 13 * any later version.
14 * 14 *
15 * This program is distributed in the hope that it will be useful, 15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details. 18 * GNU General Public License for more details.
19 * 19 *
20 * You should have received a copy of the GNU General Public License 20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software 21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */ 23 */
24 24
25 #include <linux/config.h> 25 #include <linux/config.h>
26 #include <asm/asm-offsets.h> 26 #include <asm/asm-offsets.h>
27 27
28 /* we have the following possibilities to act on an interruption: 28 /* we have the following possibilities to act on an interruption:
29 * - handle in assembly and use shadowed registers only 29 * - handle in assembly and use shadowed registers only
30 * - save registers to kernel stack and handle in assembly or C */ 30 * - save registers to kernel stack and handle in assembly or C */
31 31
32 32
33 #include <asm/psw.h> 33 #include <asm/psw.h>
34 #include <asm/assembly.h> /* for LDREG/STREG defines */ 34 #include <asm/assembly.h> /* for LDREG/STREG defines */
35 #include <asm/pgtable.h> 35 #include <asm/pgtable.h>
36 #include <asm/signal.h> 36 #include <asm/signal.h>
37 #include <asm/unistd.h> 37 #include <asm/unistd.h>
38 #include <asm/thread_info.h> 38 #include <asm/thread_info.h>
39 39
40 #ifdef CONFIG_64BIT 40 #ifdef CONFIG_64BIT
41 #define CMPIB cmpib,* 41 #define CMPIB cmpib,*
42 #define CMPB cmpb,* 42 #define CMPB cmpb,*
43 #define COND(x) *x 43 #define COND(x) *x
44 44
45 .level 2.0w 45 .level 2.0w
46 #else 46 #else
47 #define CMPIB cmpib, 47 #define CMPIB cmpib,
48 #define CMPB cmpb, 48 #define CMPB cmpb,
49 #define COND(x) x 49 #define COND(x) x
50 50
51 .level 2.0 51 .level 2.0
52 #endif 52 #endif
53 53
54 .import pa_dbit_lock,data 54 .import pa_dbit_lock,data
55 55
56 /* space_to_prot macro creates a prot id from a space id */ 56 /* space_to_prot macro creates a prot id from a space id */
57 57
58 #if (SPACEID_SHIFT) == 0 58 #if (SPACEID_SHIFT) == 0
59 .macro space_to_prot spc prot 59 .macro space_to_prot spc prot
60 depd,z \spc,62,31,\prot 60 depd,z \spc,62,31,\prot
61 .endm 61 .endm
62 #else 62 #else
63 .macro space_to_prot spc prot 63 .macro space_to_prot spc prot
64 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot 64 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
65 .endm 65 .endm
66 #endif 66 #endif
67 67
68 /* Switch to virtual mapping, trashing only %r1 */ 68 /* Switch to virtual mapping, trashing only %r1 */
69 .macro virt_map 69 .macro virt_map
70 /* pcxt_ssm_bug */ 70 /* pcxt_ssm_bug */
71 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */ 71 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
72 mtsp %r0, %sr4 72 mtsp %r0, %sr4
73 mtsp %r0, %sr5 73 mtsp %r0, %sr5
74 mfsp %sr7, %r1 74 mfsp %sr7, %r1
75 or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */ 75 or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
76 mtsp %r1, %sr3 76 mtsp %r1, %sr3
77 tovirt_r1 %r29 77 tovirt_r1 %r29
78 load32 KERNEL_PSW, %r1 78 load32 KERNEL_PSW, %r1
79 79
80 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */ 80 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
81 mtsp %r0, %sr6 81 mtsp %r0, %sr6
82 mtsp %r0, %sr7 82 mtsp %r0, %sr7
83 mtctl %r0, %cr17 /* Clear IIASQ tail */ 83 mtctl %r0, %cr17 /* Clear IIASQ tail */
84 mtctl %r0, %cr17 /* Clear IIASQ head */ 84 mtctl %r0, %cr17 /* Clear IIASQ head */
85 mtctl %r1, %ipsw 85 mtctl %r1, %ipsw
86 load32 4f, %r1 86 load32 4f, %r1
87 mtctl %r1, %cr18 /* Set IIAOQ tail */ 87 mtctl %r1, %cr18 /* Set IIAOQ tail */
88 ldo 4(%r1), %r1 88 ldo 4(%r1), %r1
89 mtctl %r1, %cr18 /* Set IIAOQ head */ 89 mtctl %r1, %cr18 /* Set IIAOQ head */
90 rfir 90 rfir
91 nop 91 nop
92 4: 92 4:
93 .endm 93 .endm
94 94
95 /* 95 /*
96 * The "get_stack" macros are responsible for determining the 96 * The "get_stack" macros are responsible for determining the
97 * kernel stack value. 97 * kernel stack value.
98 * 98 *
99 * For Faults: 99 * For Faults:
100 * If sr7 == 0 100 * If sr7 == 0
101 * Already using a kernel stack, so call the 101 * Already using a kernel stack, so call the
102 * get_stack_use_r30 macro to push a pt_regs structure 102 * get_stack_use_r30 macro to push a pt_regs structure
103 * on the stack, and store registers there. 103 * on the stack, and store registers there.
104 * else 104 * else
105 * Need to set up a kernel stack, so call the 105 * Need to set up a kernel stack, so call the
106 * get_stack_use_cr30 macro to set up a pointer 106 * get_stack_use_cr30 macro to set up a pointer
107 * to the pt_regs structure contained within the 107 * to the pt_regs structure contained within the
108 * task pointer pointed to by cr30. Set the stack 108 * task pointer pointed to by cr30. Set the stack
109 * pointer to point to the end of the task structure. 109 * pointer to point to the end of the task structure.
110 * 110 *
111 * For Interrupts: 111 * For Interrupts:
112 * If sr7 == 0 112 * If sr7 == 0
113 * Already using a kernel stack, check to see if r30 113 * Already using a kernel stack, check to see if r30
114 * is already pointing to the per processor interrupt 114 * is already pointing to the per processor interrupt
115 * stack. If it is, call the get_stack_use_r30 macro 115 * stack. If it is, call the get_stack_use_r30 macro
116 * to push a pt_regs structure on the stack, and store 116 * to push a pt_regs structure on the stack, and store
117 * registers there. Otherwise, call get_stack_use_cr31 117 * registers there. Otherwise, call get_stack_use_cr31
118 * to get a pointer to the base of the interrupt stack 118 * to get a pointer to the base of the interrupt stack
119 * and push a pt_regs structure on that stack. 119 * and push a pt_regs structure on that stack.
120 * else 120 * else
121 * Need to set up a kernel stack, so call the 121 * Need to set up a kernel stack, so call the
122 * get_stack_use_cr30 macro to set up a pointer 122 * get_stack_use_cr30 macro to set up a pointer
123 * to the pt_regs structure contained within the 123 * to the pt_regs structure contained within the
124 * task pointer pointed to by cr30. Set the stack 124 * task pointer pointed to by cr30. Set the stack
125 * pointer to point to the end of the task structure. 125 * pointer to point to the end of the task structure.
126 * N.B: We don't use the interrupt stack for the 126 * N.B: We don't use the interrupt stack for the
127 * first interrupt from userland, because signals/ 127 * first interrupt from userland, because signals/
128 * resched's are processed when returning to userland, 128 * resched's are processed when returning to userland,
129 * and we can sleep in those cases. 129 * and we can sleep in those cases.
130 * 130 *
131 * Note that we use shadowed registers for temps until 131 * Note that we use shadowed registers for temps until
132 * we can save %r26 and %r29. %r26 is used to preserve 132 * we can save %r26 and %r29. %r26 is used to preserve
133 * %r8 (a shadowed register) which temporarily contained 133 * %r8 (a shadowed register) which temporarily contained
134 * either the fault type ("code") or the eirr. We need 134 * either the fault type ("code") or the eirr. We need
135 * to use a non-shadowed register to carry the value over 135 * to use a non-shadowed register to carry the value over
136 * the rfir in virt_map. We use %r26 since this value winds 136 * the rfir in virt_map. We use %r26 since this value winds
137 * up being passed as the argument to either do_cpu_irq_mask 137 * up being passed as the argument to either do_cpu_irq_mask
138 * or handle_interruption. %r29 is used to hold a pointer 138 * or handle_interruption. %r29 is used to hold a pointer
139 * the register save area, and once again, it needs to 139 * the register save area, and once again, it needs to
140 * be a non-shadowed register so that it survives the rfir. 140 * be a non-shadowed register so that it survives the rfir.
141 * 141 *
142 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame. 142 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
143 */ 143 */
144 144
145 .macro get_stack_use_cr30 145 .macro get_stack_use_cr30
146 146
147 /* we save the registers in the task struct */ 147 /* we save the registers in the task struct */
148 148
149 mfctl %cr30, %r1 149 mfctl %cr30, %r1
150 tophys %r1,%r9 150 tophys %r1,%r9
151 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */ 151 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
152 tophys %r1,%r9 152 tophys %r1,%r9
153 ldo TASK_REGS(%r9),%r9 153 ldo TASK_REGS(%r9),%r9
154 STREG %r30, PT_GR30(%r9) 154 STREG %r30, PT_GR30(%r9)
155 STREG %r29,PT_GR29(%r9) 155 STREG %r29,PT_GR29(%r9)
156 STREG %r26,PT_GR26(%r9) 156 STREG %r26,PT_GR26(%r9)
157 copy %r9,%r29 157 copy %r9,%r29
158 mfctl %cr30, %r1 158 mfctl %cr30, %r1
159 ldo THREAD_SZ_ALGN(%r1), %r30 159 ldo THREAD_SZ_ALGN(%r1), %r30
160 .endm 160 .endm
161 161
162 .macro get_stack_use_r30 162 .macro get_stack_use_r30
163 163
164 /* we put a struct pt_regs on the stack and save the registers there */ 164 /* we put a struct pt_regs on the stack and save the registers there */
165 165
166 tophys %r30,%r9 166 tophys %r30,%r9
167 STREG %r30,PT_GR30(%r9) 167 STREG %r30,PT_GR30(%r9)
168 ldo PT_SZ_ALGN(%r30),%r30 168 ldo PT_SZ_ALGN(%r30),%r30
169 STREG %r29,PT_GR29(%r9) 169 STREG %r29,PT_GR29(%r9)
170 STREG %r26,PT_GR26(%r9) 170 STREG %r26,PT_GR26(%r9)
171 copy %r9,%r29 171 copy %r9,%r29
172 .endm 172 .endm
173 173
174 .macro rest_stack 174 .macro rest_stack
175 LDREG PT_GR1(%r29), %r1 175 LDREG PT_GR1(%r29), %r1
176 LDREG PT_GR30(%r29),%r30 176 LDREG PT_GR30(%r29),%r30
177 LDREG PT_GR29(%r29),%r29 177 LDREG PT_GR29(%r29),%r29
178 .endm 178 .endm
179 179
180 /* default interruption handler 180 /* default interruption handler
181 * (calls traps.c:handle_interruption) */ 181 * (calls traps.c:handle_interruption) */
182 .macro def code 182 .macro def code
183 b intr_save 183 b intr_save
184 ldi \code, %r8 184 ldi \code, %r8
185 .align 32 185 .align 32
186 .endm 186 .endm
187 187
188 /* Interrupt interruption handler 188 /* Interrupt interruption handler
189 * (calls irq.c:do_cpu_irq_mask) */ 189 * (calls irq.c:do_cpu_irq_mask) */
190 .macro extint code 190 .macro extint code
191 b intr_extint 191 b intr_extint
192 mfsp %sr7,%r16 192 mfsp %sr7,%r16
193 .align 32 193 .align 32
194 .endm 194 .endm
195 195
196 .import os_hpmc, code 196 .import os_hpmc, code
197 197
198 /* HPMC handler */ 198 /* HPMC handler */
199 .macro hpmc code 199 .macro hpmc code
200 nop /* must be a NOP, will be patched later */ 200 nop /* must be a NOP, will be patched later */
201 load32 PA(os_hpmc), %r3 201 load32 PA(os_hpmc), %r3
202 bv,n 0(%r3) 202 bv,n 0(%r3)
203 nop 203 nop
204 .word 0 /* checksum (will be patched) */ 204 .word 0 /* checksum (will be patched) */
205 .word PA(os_hpmc) /* address of handler */ 205 .word PA(os_hpmc) /* address of handler */
206 .word 0 /* length of handler */ 206 .word 0 /* length of handler */
207 .endm 207 .endm
208 208
209 /* 209 /*
210 * Performance Note: Instructions will be moved up into 210 * Performance Note: Instructions will be moved up into
211 * this part of the code later on, once we are sure 211 * this part of the code later on, once we are sure
212 * that the tlb miss handlers are close to final form. 212 * that the tlb miss handlers are close to final form.
213 */ 213 */
214 214
215 /* Register definitions for tlb miss handler macros */ 215 /* Register definitions for tlb miss handler macros */
216 216
217 va = r8 /* virtual address for which the trap occured */ 217 va = r8 /* virtual address for which the trap occured */
218 spc = r24 /* space for which the trap occured */ 218 spc = r24 /* space for which the trap occured */
219 219
220 #ifndef CONFIG_64BIT 220 #ifndef CONFIG_64BIT
221 221
222 /* 222 /*
223 * itlb miss interruption handler (parisc 1.1 - 32 bit) 223 * itlb miss interruption handler (parisc 1.1 - 32 bit)
224 */ 224 */
225 225
226 .macro itlb_11 code 226 .macro itlb_11 code
227 227
228 mfctl %pcsq, spc 228 mfctl %pcsq, spc
229 b itlb_miss_11 229 b itlb_miss_11
230 mfctl %pcoq, va 230 mfctl %pcoq, va
231 231
232 .align 32 232 .align 32
233 .endm 233 .endm
234 #endif 234 #endif
235 235
236 /* 236 /*
237 * itlb miss interruption handler (parisc 2.0) 237 * itlb miss interruption handler (parisc 2.0)
238 */ 238 */
239 239
240 .macro itlb_20 code 240 .macro itlb_20 code
241 mfctl %pcsq, spc 241 mfctl %pcsq, spc
242 #ifdef CONFIG_64BIT 242 #ifdef CONFIG_64BIT
243 b itlb_miss_20w 243 b itlb_miss_20w
244 #else 244 #else
245 b itlb_miss_20 245 b itlb_miss_20
246 #endif 246 #endif
247 mfctl %pcoq, va 247 mfctl %pcoq, va
248 248
249 .align 32 249 .align 32
250 .endm 250 .endm
251 251
252 #ifndef CONFIG_64BIT 252 #ifndef CONFIG_64BIT
253 /* 253 /*
254 * naitlb miss interruption handler (parisc 1.1 - 32 bit) 254 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
255 * 255 *
256 * Note: naitlb misses will be treated 256 * Note: naitlb misses will be treated
257 * as an ordinary itlb miss for now. 257 * as an ordinary itlb miss for now.
258 * However, note that naitlb misses 258 * However, note that naitlb misses
259 * have the faulting address in the 259 * have the faulting address in the
260 * IOR/ISR. 260 * IOR/ISR.
261 */ 261 */
262 262
263 .macro naitlb_11 code 263 .macro naitlb_11 code
264 264
265 mfctl %isr,spc 265 mfctl %isr,spc
266 b itlb_miss_11 266 b itlb_miss_11
267 mfctl %ior,va 267 mfctl %ior,va
268 /* FIXME: If user causes a naitlb miss, the priv level may not be in 268 /* FIXME: If user causes a naitlb miss, the priv level may not be in
269 * lower bits of va, where the itlb miss handler is expecting them 269 * lower bits of va, where the itlb miss handler is expecting them
270 */ 270 */
271 271
272 .align 32 272 .align 32
273 .endm 273 .endm
274 #endif 274 #endif
275 275
276 /* 276 /*
277 * naitlb miss interruption handler (parisc 2.0) 277 * naitlb miss interruption handler (parisc 2.0)
278 * 278 *
279 * Note: naitlb misses will be treated 279 * Note: naitlb misses will be treated
280 * as an ordinary itlb miss for now. 280 * as an ordinary itlb miss for now.
281 * However, note that naitlb misses 281 * However, note that naitlb misses
282 * have the faulting address in the 282 * have the faulting address in the
283 * IOR/ISR. 283 * IOR/ISR.
284 */ 284 */
285 285
286 .macro naitlb_20 code 286 .macro naitlb_20 code
287 287
288 mfctl %isr,spc 288 mfctl %isr,spc
289 #ifdef CONFIG_64BIT 289 #ifdef CONFIG_64BIT
290 b itlb_miss_20w 290 b itlb_miss_20w
291 #else 291 #else
292 b itlb_miss_20 292 b itlb_miss_20
293 #endif 293 #endif
294 mfctl %ior,va 294 mfctl %ior,va
295 /* FIXME: If user causes a naitlb miss, the priv level may not be in 295 /* FIXME: If user causes a naitlb miss, the priv level may not be in
296 * lower bits of va, where the itlb miss handler is expecting them 296 * lower bits of va, where the itlb miss handler is expecting them
297 */ 297 */
298 298
299 .align 32 299 .align 32
300 .endm 300 .endm
301 301
302 #ifndef CONFIG_64BIT 302 #ifndef CONFIG_64BIT
303 /* 303 /*
304 * dtlb miss interruption handler (parisc 1.1 - 32 bit) 304 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
305 */ 305 */
306 306
307 .macro dtlb_11 code 307 .macro dtlb_11 code
308 308
309 mfctl %isr, spc 309 mfctl %isr, spc
310 b dtlb_miss_11 310 b dtlb_miss_11
311 mfctl %ior, va 311 mfctl %ior, va
312 312
313 .align 32 313 .align 32
314 .endm 314 .endm
315 #endif 315 #endif
316 316
317 /* 317 /*
318 * dtlb miss interruption handler (parisc 2.0) 318 * dtlb miss interruption handler (parisc 2.0)
319 */ 319 */
320 320
321 .macro dtlb_20 code 321 .macro dtlb_20 code
322 322
323 mfctl %isr, spc 323 mfctl %isr, spc
324 #ifdef CONFIG_64BIT 324 #ifdef CONFIG_64BIT
325 b dtlb_miss_20w 325 b dtlb_miss_20w
326 #else 326 #else
327 b dtlb_miss_20 327 b dtlb_miss_20
328 #endif 328 #endif
329 mfctl %ior, va 329 mfctl %ior, va
330 330
331 .align 32 331 .align 32
332 .endm 332 .endm
333 333
334 #ifndef CONFIG_64BIT 334 #ifndef CONFIG_64BIT
335 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */ 335 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
336 336
337 .macro nadtlb_11 code 337 .macro nadtlb_11 code
338 338
339 mfctl %isr,spc 339 mfctl %isr,spc
340 b nadtlb_miss_11 340 b nadtlb_miss_11
341 mfctl %ior,va 341 mfctl %ior,va
342 342
343 .align 32 343 .align 32
344 .endm 344 .endm
345 #endif 345 #endif
346 346
347 /* nadtlb miss interruption handler (parisc 2.0) */ 347 /* nadtlb miss interruption handler (parisc 2.0) */
348 348
349 .macro nadtlb_20 code 349 .macro nadtlb_20 code
350 350
351 mfctl %isr,spc 351 mfctl %isr,spc
352 #ifdef CONFIG_64BIT 352 #ifdef CONFIG_64BIT
353 b nadtlb_miss_20w 353 b nadtlb_miss_20w
354 #else 354 #else
355 b nadtlb_miss_20 355 b nadtlb_miss_20
356 #endif 356 #endif
357 mfctl %ior,va 357 mfctl %ior,va
358 358
359 .align 32 359 .align 32
360 .endm 360 .endm
361 361
362 #ifndef CONFIG_64BIT 362 #ifndef CONFIG_64BIT
363 /* 363 /*
364 * dirty bit trap interruption handler (parisc 1.1 - 32 bit) 364 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
365 */ 365 */
366 366
367 .macro dbit_11 code 367 .macro dbit_11 code
368 368
369 mfctl %isr,spc 369 mfctl %isr,spc
370 b dbit_trap_11 370 b dbit_trap_11
371 mfctl %ior,va 371 mfctl %ior,va
372 372
373 .align 32 373 .align 32
374 .endm 374 .endm
375 #endif 375 #endif
376 376
377 /* 377 /*
378 * dirty bit trap interruption handler (parisc 2.0) 378 * dirty bit trap interruption handler (parisc 2.0)
379 */ 379 */
380 380
381 .macro dbit_20 code 381 .macro dbit_20 code
382 382
383 mfctl %isr,spc 383 mfctl %isr,spc
384 #ifdef CONFIG_64BIT 384 #ifdef CONFIG_64BIT
385 b dbit_trap_20w 385 b dbit_trap_20w
386 #else 386 #else
387 b dbit_trap_20 387 b dbit_trap_20
388 #endif 388 #endif
389 mfctl %ior,va 389 mfctl %ior,va
390 390
391 .align 32 391 .align 32
392 .endm 392 .endm
393 393
394 /* The following are simple 32 vs 64 bit instruction 394 /* The following are simple 32 vs 64 bit instruction
395 * abstractions for the macros */ 395 * abstractions for the macros */
396 .macro EXTR reg1,start,length,reg2 396 .macro EXTR reg1,start,length,reg2
397 #ifdef CONFIG_64BIT 397 #ifdef CONFIG_64BIT
398 extrd,u \reg1,32+\start,\length,\reg2 398 extrd,u \reg1,32+\start,\length,\reg2
399 #else 399 #else
400 extrw,u \reg1,\start,\length,\reg2 400 extrw,u \reg1,\start,\length,\reg2
401 #endif 401 #endif
402 .endm 402 .endm
403 403
404 .macro DEP reg1,start,length,reg2 404 .macro DEP reg1,start,length,reg2
405 #ifdef CONFIG_64BIT 405 #ifdef CONFIG_64BIT
406 depd \reg1,32+\start,\length,\reg2 406 depd \reg1,32+\start,\length,\reg2
407 #else 407 #else
408 depw \reg1,\start,\length,\reg2 408 depw \reg1,\start,\length,\reg2
409 #endif 409 #endif
410 .endm 410 .endm
411 411
412 .macro DEPI val,start,length,reg 412 .macro DEPI val,start,length,reg
413 #ifdef CONFIG_64BIT 413 #ifdef CONFIG_64BIT
414 depdi \val,32+\start,\length,\reg 414 depdi \val,32+\start,\length,\reg
415 #else 415 #else
416 depwi \val,\start,\length,\reg 416 depwi \val,\start,\length,\reg
417 #endif 417 #endif
418 .endm 418 .endm
419 419
420 /* In LP64, the space contains part of the upper 32 bits of the 420 /* In LP64, the space contains part of the upper 32 bits of the
421 * fault. We have to extract this and place it in the va, 421 * fault. We have to extract this and place it in the va,
422 * zeroing the corresponding bits in the space register */ 422 * zeroing the corresponding bits in the space register */
423 .macro space_adjust spc,va,tmp 423 .macro space_adjust spc,va,tmp
424 #ifdef CONFIG_64BIT 424 #ifdef CONFIG_64BIT
425 extrd,u \spc,63,SPACEID_SHIFT,\tmp 425 extrd,u \spc,63,SPACEID_SHIFT,\tmp
426 depd %r0,63,SPACEID_SHIFT,\spc 426 depd %r0,63,SPACEID_SHIFT,\spc
427 depd \tmp,31,SPACEID_SHIFT,\va 427 depd \tmp,31,SPACEID_SHIFT,\va
428 #endif 428 #endif
429 .endm 429 .endm
430 430
431 .import swapper_pg_dir,code 431 .import swapper_pg_dir,code
432 432
433 /* Get the pgd. For faults on space zero (kernel space), this 433 /* Get the pgd. For faults on space zero (kernel space), this
434 * is simply swapper_pg_dir. For user space faults, the 434 * is simply swapper_pg_dir. For user space faults, the
435 * pgd is stored in %cr25 */ 435 * pgd is stored in %cr25 */
436 .macro get_pgd spc,reg 436 .macro get_pgd spc,reg
437 ldil L%PA(swapper_pg_dir),\reg 437 ldil L%PA(swapper_pg_dir),\reg
438 ldo R%PA(swapper_pg_dir)(\reg),\reg 438 ldo R%PA(swapper_pg_dir)(\reg),\reg
439 or,COND(=) %r0,\spc,%r0 439 or,COND(=) %r0,\spc,%r0
440 mfctl %cr25,\reg 440 mfctl %cr25,\reg
441 .endm 441 .endm
442 442
443 /* 443 /*
444 space_check(spc,tmp,fault) 444 space_check(spc,tmp,fault)
445 445
446 spc - The space we saw the fault with. 446 spc - The space we saw the fault with.
447 tmp - The place to store the current space. 447 tmp - The place to store the current space.
448 fault - Function to call on failure. 448 fault - Function to call on failure.
449 449
450 Only allow faults on different spaces from the 450 Only allow faults on different spaces from the
451 currently active one if we're the kernel 451 currently active one if we're the kernel
452 452
453 */ 453 */
454 .macro space_check spc,tmp,fault 454 .macro space_check spc,tmp,fault
455 mfsp %sr7,\tmp 455 mfsp %sr7,\tmp
456 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page 456 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
457 * as kernel, so defeat the space 457 * as kernel, so defeat the space
458 * check if it is */ 458 * check if it is */
459 copy \spc,\tmp 459 copy \spc,\tmp
460 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */ 460 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
461 cmpb,COND(<>),n \tmp,\spc,\fault 461 cmpb,COND(<>),n \tmp,\spc,\fault
462 .endm 462 .endm
463 463
464 /* Look up a PTE in a 2-Level scheme (faulting at each 464 /* Look up a PTE in a 2-Level scheme (faulting at each
465 * level if the entry isn't present 465 * level if the entry isn't present
466 * 466 *
467 * NOTE: we use ldw even for LP64, since the short pointers 467 * NOTE: we use ldw even for LP64, since the short pointers
468 * can address up to 1TB 468 * can address up to 1TB
469 */ 469 */
470 .macro L2_ptep pmd,pte,index,va,fault 470 .macro L2_ptep pmd,pte,index,va,fault
471 #if PT_NLEVELS == 3 471 #if PT_NLEVELS == 3
472 EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index 472 EXTR \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
473 #else 473 #else
474 EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 474 EXTR \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
475 #endif 475 #endif
476 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 476 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
477 copy %r0,\pte 477 copy %r0,\pte
478 ldw,s \index(\pmd),\pmd 478 ldw,s \index(\pmd),\pmd
479 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault 479 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
480 DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ 480 DEP %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
481 copy \pmd,%r9 481 copy \pmd,%r9
482 #ifdef CONFIG_64BIT 482 #ifdef CONFIG_64BIT
483 shld %r9,PxD_VALUE_SHIFT,\pmd 483 shld %r9,PxD_VALUE_SHIFT,\pmd
484 #else 484 #else
485 shlw %r9,PxD_VALUE_SHIFT,\pmd 485 shlw %r9,PxD_VALUE_SHIFT,\pmd
486 #endif 486 #endif
487 EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index 487 EXTR \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
488 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 488 DEP %r0,31,PAGE_SHIFT,\pmd /* clear offset */
489 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd 489 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
490 LDREG %r0(\pmd),\pte /* pmd is now pte */ 490 LDREG %r0(\pmd),\pte /* pmd is now pte */
491 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault 491 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
492 .endm 492 .endm
493 493
494 /* Look up PTE in a 3-Level scheme. 494 /* Look up PTE in a 3-Level scheme.
495 * 495 *
496 * Here we implement a Hybrid L2/L3 scheme: we allocate the 496 * Here we implement a Hybrid L2/L3 scheme: we allocate the
497 * first pmd adjacent to the pgd. This means that we can 497 * first pmd adjacent to the pgd. This means that we can
498 * subtract a constant offset to get to it. The pmd and pgd 498 * subtract a constant offset to get to it. The pmd and pgd
499 * sizes are arranged so that a single pmd covers 4GB (giving 499 * sizes are arranged so that a single pmd covers 4GB (giving
500 * a full LP64 process access to 8TB) so our lookups are 500 * a full LP64 process access to 8TB) so our lookups are
501 * effectively L2 for the first 4GB of the kernel (i.e. for 501 * effectively L2 for the first 4GB of the kernel (i.e. for
502 * all ILP32 processes and all the kernel for machines with 502 * all ILP32 processes and all the kernel for machines with
503 * under 4GB of memory) */ 503 * under 4GB of memory) */
504 .macro L3_ptep pgd,pte,index,va,fault 504 .macro L3_ptep pgd,pte,index,va,fault
505 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index 505 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
506 copy %r0,\pte 506 copy %r0,\pte
507 extrd,u,*= \va,31,32,%r0 507 extrd,u,*= \va,31,32,%r0
508 ldw,s \index(\pgd),\pgd 508 ldw,s \index(\pgd),\pgd
509 extrd,u,*= \va,31,32,%r0 509 extrd,u,*= \va,31,32,%r0
510 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault 510 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
511 extrd,u,*= \va,31,32,%r0 511 extrd,u,*= \va,31,32,%r0
512 shld \pgd,PxD_VALUE_SHIFT,\index 512 shld \pgd,PxD_VALUE_SHIFT,\index
513 extrd,u,*= \va,31,32,%r0 513 extrd,u,*= \va,31,32,%r0
514 copy \index,\pgd 514 copy \index,\pgd
515 extrd,u,*<> \va,31,32,%r0 515 extrd,u,*<> \va,31,32,%r0
516 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd 516 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
517 L2_ptep \pgd,\pte,\index,\va,\fault 517 L2_ptep \pgd,\pte,\index,\va,\fault
518 .endm 518 .endm
519 519
520 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and 520 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
521 * don't needlessly dirty the cache line if it was already set */ 521 * don't needlessly dirty the cache line if it was already set */
522 .macro update_ptep ptep,pte,tmp,tmp1 522 .macro update_ptep ptep,pte,tmp,tmp1
523 ldi _PAGE_ACCESSED,\tmp1 523 ldi _PAGE_ACCESSED,\tmp1
524 or \tmp1,\pte,\tmp 524 or \tmp1,\pte,\tmp
525 and,COND(<>) \tmp1,\pte,%r0 525 and,COND(<>) \tmp1,\pte,%r0
526 STREG \tmp,0(\ptep) 526 STREG \tmp,0(\ptep)
527 .endm 527 .endm
528 528
529 /* Set the dirty bit (and accessed bit). No need to be 529 /* Set the dirty bit (and accessed bit). No need to be
530 * clever, this is only used from the dirty fault */ 530 * clever, this is only used from the dirty fault */
531 .macro update_dirty ptep,pte,tmp 531 .macro update_dirty ptep,pte,tmp
532 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp 532 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
533 or \tmp,\pte,\pte 533 or \tmp,\pte,\pte
534 STREG \pte,0(\ptep) 534 STREG \pte,0(\ptep)
535 .endm 535 .endm
536 536
537 /* Convert the pte and prot to tlb insertion values. How 537 /* Convert the pte and prot to tlb insertion values. How
538 * this happens is quite subtle, read below */ 538 * this happens is quite subtle, read below */
539 .macro make_insert_tlb spc,pte,prot 539 .macro make_insert_tlb spc,pte,prot
540 space_to_prot \spc \prot /* create prot id from space */ 540 space_to_prot \spc \prot /* create prot id from space */
541 /* The following is the real subtlety. This is depositing 541 /* The following is the real subtlety. This is depositing
542 * T <-> _PAGE_REFTRAP 542 * T <-> _PAGE_REFTRAP
543 * D <-> _PAGE_DIRTY 543 * D <-> _PAGE_DIRTY
544 * B <-> _PAGE_DMB (memory break) 544 * B <-> _PAGE_DMB (memory break)
545 * 545 *
546 * Then incredible subtlety: The access rights are 546 * Then incredible subtlety: The access rights are
547 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ 547 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
548 * See 3-14 of the parisc 2.0 manual 548 * See 3-14 of the parisc 2.0 manual
549 * 549 *
550 * Finally, _PAGE_READ goes in the top bit of PL1 (so we 550 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
551 * trigger an access rights trap in user space if the user 551 * trigger an access rights trap in user space if the user
552 * tries to read an unreadable page */ 552 * tries to read an unreadable page */
553 depd \pte,8,7,\prot 553 depd \pte,8,7,\prot
554 554
555 /* PAGE_USER indicates the page can be read with user privileges, 555 /* PAGE_USER indicates the page can be read with user privileges,
556 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1 556 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
557 * contains _PAGE_READ */ 557 * contains _PAGE_READ */
558 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0 558 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
559 depdi 7,11,3,\prot 559 depdi 7,11,3,\prot
560 /* If we're a gateway page, drop PL2 back to zero for promotion 560 /* If we're a gateway page, drop PL2 back to zero for promotion
561 * to kernel privilege (so we can execute the page as kernel). 561 * to kernel privilege (so we can execute the page as kernel).
562 * Any privilege promotion page always denys read and write */ 562 * Any privilege promotion page always denys read and write */
563 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0 563 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
564 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 564 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
565 565
566 /* Get rid of prot bits and convert to page addr for iitlbt */ 566 /* Get rid of prot bits and convert to page addr for iitlbt */
567 567
568 depd %r0,63,PAGE_SHIFT,\pte 568 depd %r0,63,PAGE_SHIFT,\pte
569 extrd,u \pte,56,32,\pte 569 extrd,u \pte,56,32,\pte
570 .endm 570 .endm
571 571
572 /* Identical macro to make_insert_tlb above, except it 572 /* Identical macro to make_insert_tlb above, except it
573 * makes the tlb entry for the differently formatted pa11 573 * makes the tlb entry for the differently formatted pa11
574 * insertion instructions */ 574 * insertion instructions */
575 .macro make_insert_tlb_11 spc,pte,prot 575 .macro make_insert_tlb_11 spc,pte,prot
576 zdep \spc,30,15,\prot 576 zdep \spc,30,15,\prot
577 dep \pte,8,7,\prot 577 dep \pte,8,7,\prot
578 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0 578 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
579 depi 1,12,1,\prot 579 depi 1,12,1,\prot
580 extru,= \pte,_PAGE_USER_BIT,1,%r0 580 extru,= \pte,_PAGE_USER_BIT,1,%r0
581 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */ 581 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
582 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0 582 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
583 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */ 583 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
584 584
585 /* Get rid of prot bits and convert to page addr for iitlba */ 585 /* Get rid of prot bits and convert to page addr for iitlba */
586 586
587 depi 0,31,12,\pte 587 depi 0,31,12,\pte
588 extru \pte,24,25,\pte 588 extru \pte,24,25,\pte
589 589
590 .endm 590 .endm
591 591
592 /* This is for ILP32 PA2.0 only. The TLB insertion needs 592 /* This is for ILP32 PA2.0 only. The TLB insertion needs
593 * to extend into I/O space if the address is 0xfXXXXXXX 593 * to extend into I/O space if the address is 0xfXXXXXXX
594 * so we extend the f's into the top word of the pte in 594 * so we extend the f's into the top word of the pte in
595 * this case */ 595 * this case */
596 .macro f_extend pte,tmp 596 .macro f_extend pte,tmp
597 extrd,s \pte,42,4,\tmp 597 extrd,s \pte,42,4,\tmp
598 addi,<> 1,\tmp,%r0 598 addi,<> 1,\tmp,%r0
599 extrd,s \pte,63,25,\pte 599 extrd,s \pte,63,25,\pte
600 .endm 600 .endm
601 601
602 /* The alias region is an 8MB aligned 16MB to do clear and 602 /* The alias region is an 8MB aligned 16MB to do clear and
603 * copy user pages at addresses congruent with the user 603 * copy user pages at addresses congruent with the user
604 * virtual address. 604 * virtual address.
605 * 605 *
606 * To use the alias page, you set %r26 up with the to TLB 606 * To use the alias page, you set %r26 up with the to TLB
607 * entry (identifying the physical page) and %r23 up with 607 * entry (identifying the physical page) and %r23 up with
608 * the from tlb entry (or nothing if only a to entry---for 608 * the from tlb entry (or nothing if only a to entry---for
609 * clear_user_page_asm) */ 609 * clear_user_page_asm) */
610 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault 610 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault
611 cmpib,COND(<>),n 0,\spc,\fault 611 cmpib,COND(<>),n 0,\spc,\fault
612 ldil L%(TMPALIAS_MAP_START),\tmp 612 ldil L%(TMPALIAS_MAP_START),\tmp
613 #if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000) 613 #if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
614 /* on LP64, ldi will sign extend into the upper 32 bits, 614 /* on LP64, ldi will sign extend into the upper 32 bits,
615 * which is behaviour we don't want */ 615 * which is behaviour we don't want */
616 depdi 0,31,32,\tmp 616 depdi 0,31,32,\tmp
617 #endif 617 #endif
618 copy \va,\tmp1 618 copy \va,\tmp1
619 DEPI 0,31,23,\tmp1 619 DEPI 0,31,23,\tmp1
620 cmpb,COND(<>),n \tmp,\tmp1,\fault 620 cmpb,COND(<>),n \tmp,\tmp1,\fault
621 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot 621 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),\prot
622 depd,z \prot,8,7,\prot 622 depd,z \prot,8,7,\prot
623 /* 623 /*
624 * OK, it is in the temp alias region, check whether "from" or "to". 624 * OK, it is in the temp alias region, check whether "from" or "to".
625 * Check "subtle" note in pacache.S re: r23/r26. 625 * Check "subtle" note in pacache.S re: r23/r26.
626 */ 626 */
627 #ifdef CONFIG_64BIT 627 #ifdef CONFIG_64BIT
628 extrd,u,*= \va,41,1,%r0 628 extrd,u,*= \va,41,1,%r0
629 #else 629 #else
630 extrw,u,= \va,9,1,%r0 630 extrw,u,= \va,9,1,%r0
631 #endif 631 #endif
632 or,COND(tr) %r23,%r0,\pte 632 or,COND(tr) %r23,%r0,\pte
633 or %r26,%r0,\pte 633 or %r26,%r0,\pte
634 .endm 634 .endm
635 635
636 636
637 /* 637 /*
638 * Align fault_vector_20 on 4K boundary so that both 638 * Align fault_vector_20 on 4K boundary so that both
639 * fault_vector_11 and fault_vector_20 are on the 639 * fault_vector_11 and fault_vector_20 are on the
640 * same page. This is only necessary as long as we 640 * same page. This is only necessary as long as we
641 * write protect the kernel text, which we may stop 641 * write protect the kernel text, which we may stop
642 * doing once we use large page translations to cover 642 * doing once we use large page translations to cover
643 * the static part of the kernel address space. 643 * the static part of the kernel address space.
644 */ 644 */
645 645
646 .export fault_vector_20 646 .export fault_vector_20
647 647
648 .text 648 .text
649 649
650 .align 4096 650 .align 4096
651 651
652 fault_vector_20: 652 fault_vector_20:
653 /* First vector is invalid (0) */ 653 /* First vector is invalid (0) */
654 .ascii "cows can fly" 654 .ascii "cows can fly"
655 .byte 0 655 .byte 0
656 .align 32 656 .align 32
657 657
658 hpmc 1 658 hpmc 1
659 def 2 659 def 2
660 def 3 660 def 3
661 extint 4 661 extint 4
662 def 5 662 def 5
663 itlb_20 6 663 itlb_20 6
664 def 7 664 def 7
665 def 8 665 def 8
666 def 9 666 def 9
667 def 10 667 def 10
668 def 11 668 def 11
669 def 12 669 def 12
670 def 13 670 def 13
671 def 14 671 def 14
672 dtlb_20 15 672 dtlb_20 15
673 #if 0 673 #if 0
674 naitlb_20 16 674 naitlb_20 16
675 #else 675 #else
676 def 16 676 def 16
677 #endif 677 #endif
678 nadtlb_20 17 678 nadtlb_20 17
679 def 18 679 def 18
680 def 19 680 def 19
681 dbit_20 20 681 dbit_20 20
682 def 21 682 def 21
683 def 22 683 def 22
684 def 23 684 def 23
685 def 24 685 def 24
686 def 25 686 def 25
687 def 26 687 def 26
688 def 27 688 def 27
689 def 28 689 def 28
690 def 29 690 def 29
691 def 30 691 def 30
692 def 31 692 def 31
693 693
694 #ifndef CONFIG_64BIT 694 #ifndef CONFIG_64BIT
695 695
696 .export fault_vector_11 696 .export fault_vector_11
697 697
698 .align 2048 698 .align 2048
699 699
700 fault_vector_11: 700 fault_vector_11:
701 /* First vector is invalid (0) */ 701 /* First vector is invalid (0) */
702 .ascii "cows can fly" 702 .ascii "cows can fly"
703 .byte 0 703 .byte 0
704 .align 32 704 .align 32
705 705
706 hpmc 1 706 hpmc 1
707 def 2 707 def 2
708 def 3 708 def 3
709 extint 4 709 extint 4
710 def 5 710 def 5
711 itlb_11 6 711 itlb_11 6
712 def 7 712 def 7
713 def 8 713 def 8
714 def 9 714 def 9
715 def 10 715 def 10
716 def 11 716 def 11
717 def 12 717 def 12
718 def 13 718 def 13
719 def 14 719 def 14
720 dtlb_11 15 720 dtlb_11 15
721 #if 0 721 #if 0
722 naitlb_11 16 722 naitlb_11 16
723 #else 723 #else
724 def 16 724 def 16
725 #endif 725 #endif
726 nadtlb_11 17 726 nadtlb_11 17
727 def 18 727 def 18
728 def 19 728 def 19
729 dbit_11 20 729 dbit_11 20
730 def 21 730 def 21
731 def 22 731 def 22
732 def 23 732 def 23
733 def 24 733 def 24
734 def 25 734 def 25
735 def 26 735 def 26
736 def 27 736 def 27
737 def 28 737 def 28
738 def 29 738 def 29
739 def 30 739 def 30
740 def 31 740 def 31
741 741
742 #endif 742 #endif
743 743
744 .import handle_interruption,code 744 .import handle_interruption,code
745 .import do_cpu_irq_mask,code 745 .import do_cpu_irq_mask,code
746 746
747 /* 747 /*
748 * r26 = function to be called 748 * r26 = function to be called
749 * r25 = argument to pass in 749 * r25 = argument to pass in
750 * r24 = flags for do_fork() 750 * r24 = flags for do_fork()
751 * 751 *
752 * Kernel threads don't ever return, so they don't need 752 * Kernel threads don't ever return, so they don't need
753 * a true register context. We just save away the arguments 753 * a true register context. We just save away the arguments
754 * for copy_thread/ret_ to properly set up the child. 754 * for copy_thread/ret_ to properly set up the child.
755 */ 755 */
756 756
757 #define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */ 757 #define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */
758 #define CLONE_UNTRACED 0x00800000 758 #define CLONE_UNTRACED 0x00800000
759 759
760 .export __kernel_thread, code 760 .export __kernel_thread, code
761 .import do_fork 761 .import do_fork
762 __kernel_thread: 762 __kernel_thread:
763 STREG %r2, -RP_OFFSET(%r30) 763 STREG %r2, -RP_OFFSET(%r30)
764 764
765 copy %r30, %r1 765 copy %r30, %r1
766 ldo PT_SZ_ALGN(%r30),%r30 766 ldo PT_SZ_ALGN(%r30),%r30
767 #ifdef CONFIG_64BIT 767 #ifdef CONFIG_64BIT
768 /* Yo, function pointers in wide mode are little structs... -PB */ 768 /* Yo, function pointers in wide mode are little structs... -PB */
769 ldd 24(%r26), %r2 769 ldd 24(%r26), %r2
770 STREG %r2, PT_GR27(%r1) /* Store childs %dp */ 770 STREG %r2, PT_GR27(%r1) /* Store childs %dp */
771 ldd 16(%r26), %r26 771 ldd 16(%r26), %r26
772 772
773 STREG %r22, PT_GR22(%r1) /* save r22 (arg5) */ 773 STREG %r22, PT_GR22(%r1) /* save r22 (arg5) */
774 copy %r0, %r22 /* user_tid */ 774 copy %r0, %r22 /* user_tid */
775 #endif 775 #endif
776 STREG %r26, PT_GR26(%r1) /* Store function & argument for child */ 776 STREG %r26, PT_GR26(%r1) /* Store function & argument for child */
777 STREG %r25, PT_GR25(%r1) 777 STREG %r25, PT_GR25(%r1)
778 ldil L%CLONE_UNTRACED, %r26 778 ldil L%CLONE_UNTRACED, %r26
779 ldo CLONE_VM(%r26), %r26 /* Force CLONE_VM since only init_mm */ 779 ldo CLONE_VM(%r26), %r26 /* Force CLONE_VM since only init_mm */
780 or %r26, %r24, %r26 /* will have kernel mappings. */ 780 or %r26, %r24, %r26 /* will have kernel mappings. */
781 ldi 1, %r25 /* stack_start, signals kernel thread */ 781 ldi 1, %r25 /* stack_start, signals kernel thread */
782 stw %r0, -52(%r30) /* user_tid */ 782 stw %r0, -52(%r30) /* user_tid */
783 #ifdef CONFIG_64BIT 783 #ifdef CONFIG_64BIT
784 ldo -16(%r30),%r29 /* Reference param save area */ 784 ldo -16(%r30),%r29 /* Reference param save area */
785 #endif 785 #endif
786 BL do_fork, %r2 786 BL do_fork, %r2
787 copy %r1, %r24 /* pt_regs */ 787 copy %r1, %r24 /* pt_regs */
788 788
789 /* Parent Returns here */ 789 /* Parent Returns here */
790 790
791 LDREG -PT_SZ_ALGN-RP_OFFSET(%r30), %r2 791 LDREG -PT_SZ_ALGN-RP_OFFSET(%r30), %r2
792 ldo -PT_SZ_ALGN(%r30), %r30 792 ldo -PT_SZ_ALGN(%r30), %r30
793 bv %r0(%r2) 793 bv %r0(%r2)
794 nop 794 nop
795 795
796 /* 796 /*
797 * Child Returns here 797 * Child Returns here
798 * 798 *
799 * copy_thread moved args from temp save area set up above 799 * copy_thread moved args from temp save area set up above
800 * into task save area. 800 * into task save area.
801 */ 801 */
802 802
803 .export ret_from_kernel_thread 803 .export ret_from_kernel_thread
804 ret_from_kernel_thread: 804 ret_from_kernel_thread:
805 805
806 /* Call schedule_tail first though */ 806 /* Call schedule_tail first though */
807 BL schedule_tail, %r2 807 BL schedule_tail, %r2
808 nop 808 nop
809 809
810 LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1 810 LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1
811 LDREG TASK_PT_GR25(%r1), %r26 811 LDREG TASK_PT_GR25(%r1), %r26
812 #ifdef CONFIG_64BIT 812 #ifdef CONFIG_64BIT
813 LDREG TASK_PT_GR27(%r1), %r27 813 LDREG TASK_PT_GR27(%r1), %r27
814 LDREG TASK_PT_GR22(%r1), %r22 814 LDREG TASK_PT_GR22(%r1), %r22
815 #endif 815 #endif
816 LDREG TASK_PT_GR26(%r1), %r1 816 LDREG TASK_PT_GR26(%r1), %r1
817 ble 0(%sr7, %r1) 817 ble 0(%sr7, %r1)
818 copy %r31, %r2 818 copy %r31, %r2
819 819
820 #ifdef CONFIG_64BIT 820 #ifdef CONFIG_64BIT
821 ldo -16(%r30),%r29 /* Reference param save area */ 821 ldo -16(%r30),%r29 /* Reference param save area */
822 loadgp /* Thread could have been in a module */ 822 loadgp /* Thread could have been in a module */
823 #endif 823 #endif
824 #ifndef CONFIG_64BIT 824 #ifndef CONFIG_64BIT
825 b sys_exit 825 b sys_exit
826 #else 826 #else
827 load32 sys_exit, %r1 827 load32 sys_exit, %r1
828 bv %r0(%r1) 828 bv %r0(%r1)
829 #endif 829 #endif
830 ldi 0, %r26 830 ldi 0, %r26
831 831
832 .import sys_execve, code 832 .import sys_execve, code
833 .export __execve, code 833 .export __execve, code
834 __execve: 834 __execve:
835 copy %r2, %r15 835 copy %r2, %r15
836 copy %r30, %r16 836 copy %r30, %r16
837 ldo PT_SZ_ALGN(%r30), %r30 837 ldo PT_SZ_ALGN(%r30), %r30
838 STREG %r26, PT_GR26(%r16) 838 STREG %r26, PT_GR26(%r16)
839 STREG %r25, PT_GR25(%r16) 839 STREG %r25, PT_GR25(%r16)
840 STREG %r24, PT_GR24(%r16) 840 STREG %r24, PT_GR24(%r16)
841 #ifdef CONFIG_64BIT 841 #ifdef CONFIG_64BIT
842 ldo -16(%r30),%r29 /* Reference param save area */ 842 ldo -16(%r30),%r29 /* Reference param save area */
843 #endif 843 #endif
844 BL sys_execve, %r2 844 BL sys_execve, %r2
845 copy %r16, %r26 845 copy %r16, %r26
846 846
847 cmpib,=,n 0,%r28,intr_return /* forward */ 847 cmpib,=,n 0,%r28,intr_return /* forward */
848 848
849 /* yes, this will trap and die. */ 849 /* yes, this will trap and die. */
850 copy %r15, %r2 850 copy %r15, %r2
851 copy %r16, %r30 851 copy %r16, %r30
852 bv %r0(%r2) 852 bv %r0(%r2)
853 nop 853 nop
854 854
855 .align 4 855 .align 4
856 856
857 /* 857 /*
858 * struct task_struct *_switch_to(struct task_struct *prev, 858 * struct task_struct *_switch_to(struct task_struct *prev,
859 * struct task_struct *next) 859 * struct task_struct *next)
860 * 860 *
861 * switch kernel stacks and return prev */ 861 * switch kernel stacks and return prev */
862 .export _switch_to, code 862 .export _switch_to, code
863 _switch_to: 863 _switch_to:
864 STREG %r2, -RP_OFFSET(%r30) 864 STREG %r2, -RP_OFFSET(%r30)
865 865
866 callee_save_float 866 callee_save_float
867 callee_save 867 callee_save
868 868
869 load32 _switch_to_ret, %r2 869 load32 _switch_to_ret, %r2
870 870
871 STREG %r2, TASK_PT_KPC(%r26) 871 STREG %r2, TASK_PT_KPC(%r26)
872 LDREG TASK_PT_KPC(%r25), %r2 872 LDREG TASK_PT_KPC(%r25), %r2
873 873
874 STREG %r30, TASK_PT_KSP(%r26) 874 STREG %r30, TASK_PT_KSP(%r26)
875 LDREG TASK_PT_KSP(%r25), %r30 875 LDREG TASK_PT_KSP(%r25), %r30
876 LDREG TASK_THREAD_INFO(%r25), %r25 876 LDREG TASK_THREAD_INFO(%r25), %r25
877 bv %r0(%r2) 877 bv %r0(%r2)
878 mtctl %r25,%cr30 878 mtctl %r25,%cr30
879 879
880 _switch_to_ret: 880 _switch_to_ret:
881 mtctl %r0, %cr0 /* Needed for single stepping */ 881 mtctl %r0, %cr0 /* Needed for single stepping */
882 callee_rest 882 callee_rest
883 callee_rest_float 883 callee_rest_float
884 884
885 LDREG -RP_OFFSET(%r30), %r2 885 LDREG -RP_OFFSET(%r30), %r2
886 bv %r0(%r2) 886 bv %r0(%r2)
887 copy %r26, %r28 887 copy %r26, %r28
888 888
889 /* 889 /*
890 * Common rfi return path for interruptions, kernel execve, and 890 * Common rfi return path for interruptions, kernel execve, and
891 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will 891 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
892 * return via this path if the signal was received when the process 892 * return via this path if the signal was received when the process
893 * was running; if the process was blocked on a syscall then the 893 * was running; if the process was blocked on a syscall then the
894 * normal syscall_exit path is used. All syscalls for traced 894 * normal syscall_exit path is used. All syscalls for traced
895 * proceses exit via intr_restore. 895 * proceses exit via intr_restore.
896 * 896 *
897 * XXX If any syscalls that change a processes space id ever exit 897 * XXX If any syscalls that change a processes space id ever exit
898 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and 898 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
899 * adjust IASQ[0..1]. 899 * adjust IASQ[0..1].
900 * 900 *
901 */ 901 */
902 902
903 .align 4096 903 .align 4096
904 904
905 .export syscall_exit_rfi 905 .export syscall_exit_rfi
906 syscall_exit_rfi: 906 syscall_exit_rfi:
907 mfctl %cr30,%r16 907 mfctl %cr30,%r16
908 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */ 908 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
909 ldo TASK_REGS(%r16),%r16 909 ldo TASK_REGS(%r16),%r16
910 /* Force iaoq to userspace, as the user has had access to our current 910 /* Force iaoq to userspace, as the user has had access to our current
911 * context via sigcontext. Also Filter the PSW for the same reason. 911 * context via sigcontext. Also Filter the PSW for the same reason.
912 */ 912 */
913 LDREG PT_IAOQ0(%r16),%r19 913 LDREG PT_IAOQ0(%r16),%r19
914 depi 3,31,2,%r19 914 depi 3,31,2,%r19
915 STREG %r19,PT_IAOQ0(%r16) 915 STREG %r19,PT_IAOQ0(%r16)
916 LDREG PT_IAOQ1(%r16),%r19 916 LDREG PT_IAOQ1(%r16),%r19
917 depi 3,31,2,%r19 917 depi 3,31,2,%r19
918 STREG %r19,PT_IAOQ1(%r16) 918 STREG %r19,PT_IAOQ1(%r16)
919 LDREG PT_PSW(%r16),%r19 919 LDREG PT_PSW(%r16),%r19
920 load32 USER_PSW_MASK,%r1 920 load32 USER_PSW_MASK,%r1
921 #ifdef CONFIG_64BIT 921 #ifdef CONFIG_64BIT
922 load32 USER_PSW_HI_MASK,%r20 922 load32 USER_PSW_HI_MASK,%r20
923 depd %r20,31,32,%r1 923 depd %r20,31,32,%r1
924 #endif 924 #endif
925 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */ 925 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
926 load32 USER_PSW,%r1 926 load32 USER_PSW,%r1
927 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */ 927 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
928 STREG %r19,PT_PSW(%r16) 928 STREG %r19,PT_PSW(%r16)
929 929
930 /* 930 /*
931 * If we aren't being traced, we never saved space registers 931 * If we aren't being traced, we never saved space registers
932 * (we don't store them in the sigcontext), so set them 932 * (we don't store them in the sigcontext), so set them
933 * to "proper" values now (otherwise we'll wind up restoring 933 * to "proper" values now (otherwise we'll wind up restoring
934 * whatever was last stored in the task structure, which might 934 * whatever was last stored in the task structure, which might
935 * be inconsistent if an interrupt occured while on the gateway 935 * be inconsistent if an interrupt occured while on the gateway
936 * page) Note that we may be "trashing" values the user put in 936 * page) Note that we may be "trashing" values the user put in
937 * them, but we don't support the the user changing them. 937 * them, but we don't support the the user changing them.
938 */ 938 */
939 939
940 STREG %r0,PT_SR2(%r16) 940 STREG %r0,PT_SR2(%r16)
941 mfsp %sr3,%r19 941 mfsp %sr3,%r19
942 STREG %r19,PT_SR0(%r16) 942 STREG %r19,PT_SR0(%r16)
943 STREG %r19,PT_SR1(%r16) 943 STREG %r19,PT_SR1(%r16)
944 STREG %r19,PT_SR3(%r16) 944 STREG %r19,PT_SR3(%r16)
945 STREG %r19,PT_SR4(%r16) 945 STREG %r19,PT_SR4(%r16)
946 STREG %r19,PT_SR5(%r16) 946 STREG %r19,PT_SR5(%r16)
947 STREG %r19,PT_SR6(%r16) 947 STREG %r19,PT_SR6(%r16)
948 STREG %r19,PT_SR7(%r16) 948 STREG %r19,PT_SR7(%r16)
949 949
950 intr_return: 950 intr_return:
951 /* NOTE: Need to enable interrupts incase we schedule. */ 951 /* NOTE: Need to enable interrupts incase we schedule. */
952 ssm PSW_SM_I, %r0 952 ssm PSW_SM_I, %r0
953 953
954 /* Check for software interrupts */ 954 /* Check for software interrupts */
955 955
956 .import irq_stat,data 956 .import irq_stat,data
957 957
958 load32 irq_stat,%r19 958 load32 irq_stat,%r19
959 #ifdef CONFIG_SMP 959 #ifdef CONFIG_SMP
960 mfctl %cr30,%r1 960 mfctl %cr30,%r1
961 ldw TI_CPU(%r1),%r1 /* get cpu # - int */ 961 ldw TI_CPU(%r1),%r1 /* get cpu # - int */
962 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount 962 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
963 ** irq_stat[] is defined using ____cacheline_aligned. 963 ** irq_stat[] is defined using ____cacheline_aligned.
964 */ 964 */
965 #ifdef CONFIG_64BIT 965 #ifdef CONFIG_64BIT
966 shld %r1, 6, %r20 966 shld %r1, 6, %r20
967 #else 967 #else
968 shlw %r1, 5, %r20 968 shlw %r1, 5, %r20
969 #endif 969 #endif
970 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */ 970 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
971 #endif /* CONFIG_SMP */ 971 #endif /* CONFIG_SMP */
972 972
973 LDREG IRQSTAT_SIRQ_PEND(%r19),%r20 /* hardirq.h: unsigned long */
974 cmpib,<>,n 0,%r20,intr_do_softirq /* forward */
975
976 intr_check_resched: 973 intr_check_resched:
977 974
978 /* check for reschedule */ 975 /* check for reschedule */
979 mfctl %cr30,%r1 976 mfctl %cr30,%r1
980 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ 977 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
981 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */ 978 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
982 979
983 intr_check_sig: 980 intr_check_sig:
984 /* As above */ 981 /* As above */
985 mfctl %cr30,%r1 982 mfctl %cr30,%r1
986 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_SIGPENDING */ 983 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_SIGPENDING */
987 bb,<,n %r19, 31-TIF_SIGPENDING, intr_do_signal /* forward */ 984 bb,<,n %r19, 31-TIF_SIGPENDING, intr_do_signal /* forward */
988 985
989 intr_restore: 986 intr_restore:
990 copy %r16,%r29 987 copy %r16,%r29
991 ldo PT_FR31(%r29),%r1 988 ldo PT_FR31(%r29),%r1
992 rest_fp %r1 989 rest_fp %r1
993 rest_general %r29 990 rest_general %r29
994 991
995 /* inverse of virt_map */ 992 /* inverse of virt_map */
996 pcxt_ssm_bug 993 pcxt_ssm_bug
997 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */ 994 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
998 tophys_r1 %r29 995 tophys_r1 %r29
999 996
1000 /* Restore space id's and special cr's from PT_REGS 997 /* Restore space id's and special cr's from PT_REGS
1001 * structure pointed to by r29 998 * structure pointed to by r29
1002 */ 999 */
1003 rest_specials %r29 1000 rest_specials %r29
1004 1001
1005 /* IMPORTANT: rest_stack restores r29 last (we are using it)! 1002 /* IMPORTANT: rest_stack restores r29 last (we are using it)!
1006 * It also restores r1 and r30. 1003 * It also restores r1 and r30.
1007 */ 1004 */
1008 rest_stack 1005 rest_stack
1009 1006
1010 rfi 1007 rfi
1011 nop 1008 nop
1012 nop 1009 nop
1013 nop 1010 nop
1014 nop 1011 nop
1015 nop 1012 nop
1016 nop 1013 nop
1017 nop 1014 nop
1018 nop 1015 nop
1019 1016
1020 .import do_softirq,code
1021 intr_do_softirq:
1022 BL do_softirq,%r2
1023 #ifdef CONFIG_64BIT
1024 ldo -16(%r30),%r29 /* Reference param save area */
1025 #else
1026 nop
1027 #endif
1028 b intr_check_resched
1029 nop
1030
1031 .import schedule,code 1017 .import schedule,code
1032 intr_do_resched: 1018 intr_do_resched:
1033 /* Only do reschedule if we are returning to user space */ 1019 /* Only do reschedule if we are returning to user space */
1034 LDREG PT_IASQ0(%r16), %r20 1020 LDREG PT_IASQ0(%r16), %r20
1035 CMPIB= 0,%r20,intr_restore /* backward */ 1021 CMPIB= 0,%r20,intr_restore /* backward */
1036 nop 1022 nop
1037 LDREG PT_IASQ1(%r16), %r20 1023 LDREG PT_IASQ1(%r16), %r20
1038 CMPIB= 0,%r20,intr_restore /* backward */ 1024 CMPIB= 0,%r20,intr_restore /* backward */
1039 nop 1025 nop
1040 1026
1041 #ifdef CONFIG_64BIT 1027 #ifdef CONFIG_64BIT
1042 ldo -16(%r30),%r29 /* Reference param save area */ 1028 ldo -16(%r30),%r29 /* Reference param save area */
1043 #endif 1029 #endif
1044 1030
1045 ldil L%intr_check_sig, %r2 1031 ldil L%intr_check_sig, %r2
1046 #ifndef CONFIG_64BIT 1032 #ifndef CONFIG_64BIT
1047 b schedule 1033 b schedule
1048 #else 1034 #else
1049 load32 schedule, %r20 1035 load32 schedule, %r20
1050 bv %r0(%r20) 1036 bv %r0(%r20)
1051 #endif 1037 #endif
1052 ldo R%intr_check_sig(%r2), %r2 1038 ldo R%intr_check_sig(%r2), %r2
1053 1039
1054 1040
1055 .import do_signal,code 1041 .import do_signal,code
1056 intr_do_signal: 1042 intr_do_signal:
1057 /* 1043 /*
1058 This check is critical to having LWS 1044 This check is critical to having LWS
1059 working. The IASQ is zero on the gateway 1045 working. The IASQ is zero on the gateway
1060 page and we cannot deliver any signals until 1046 page and we cannot deliver any signals until
1061 we get off the gateway page. 1047 we get off the gateway page.
1062 1048
1063 Only do signals if we are returning to user space 1049 Only do signals if we are returning to user space
1064 */ 1050 */
1065 LDREG PT_IASQ0(%r16), %r20 1051 LDREG PT_IASQ0(%r16), %r20
1066 CMPIB= 0,%r20,intr_restore /* backward */ 1052 CMPIB= 0,%r20,intr_restore /* backward */
1067 nop 1053 nop
1068 LDREG PT_IASQ1(%r16), %r20 1054 LDREG PT_IASQ1(%r16), %r20
1069 CMPIB= 0,%r20,intr_restore /* backward */ 1055 CMPIB= 0,%r20,intr_restore /* backward */
1070 nop 1056 nop
1071 1057
1072 copy %r0, %r24 /* unsigned long in_syscall */ 1058 copy %r0, %r24 /* unsigned long in_syscall */
1073 copy %r16, %r25 /* struct pt_regs *regs */ 1059 copy %r16, %r25 /* struct pt_regs *regs */
1074 #ifdef CONFIG_64BIT 1060 #ifdef CONFIG_64BIT
1075 ldo -16(%r30),%r29 /* Reference param save area */ 1061 ldo -16(%r30),%r29 /* Reference param save area */
1076 #endif 1062 #endif
1077 1063
1078 BL do_signal,%r2 1064 BL do_signal,%r2
1079 copy %r0, %r26 /* sigset_t *oldset = NULL */ 1065 copy %r0, %r26 /* sigset_t *oldset = NULL */
1080 1066
1081 b intr_check_sig 1067 b intr_check_sig
1082 nop 1068 nop
1083 1069
1084 /* 1070 /*
1085 * External interrupts. 1071 * External interrupts.
1086 */ 1072 */
1087 1073
1088 intr_extint: 1074 intr_extint:
1089 CMPIB=,n 0,%r16,1f 1075 CMPIB=,n 0,%r16,1f
1090 get_stack_use_cr30 1076 get_stack_use_cr30
1091 b,n 3f 1077 b,n 3f
1092 1078
1093 1: 1079 1:
1094 #if 0 /* Interrupt Stack support not working yet! */ 1080 #if 0 /* Interrupt Stack support not working yet! */
1095 mfctl %cr31,%r1 1081 mfctl %cr31,%r1
1096 copy %r30,%r17 1082 copy %r30,%r17
1097 /* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/ 1083 /* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/
1098 #ifdef CONFIG_64BIT 1084 #ifdef CONFIG_64BIT
1099 depdi 0,63,15,%r17 1085 depdi 0,63,15,%r17
1100 #else 1086 #else
1101 depi 0,31,15,%r17 1087 depi 0,31,15,%r17
1102 #endif 1088 #endif
1103 CMPB=,n %r1,%r17,2f 1089 CMPB=,n %r1,%r17,2f
1104 get_stack_use_cr31 1090 get_stack_use_cr31
1105 b,n 3f 1091 b,n 3f
1106 #endif 1092 #endif
1107 2: 1093 2:
1108 get_stack_use_r30 1094 get_stack_use_r30
1109 1095
1110 3: 1096 3:
1111 save_specials %r29 1097 save_specials %r29
1112 virt_map 1098 virt_map
1113 save_general %r29 1099 save_general %r29
1114 1100
1115 ldo PT_FR0(%r29), %r24 1101 ldo PT_FR0(%r29), %r24
1116 save_fp %r24 1102 save_fp %r24
1117 1103
1118 loadgp 1104 loadgp
1119 1105
1120 copy %r29, %r26 /* arg0 is pt_regs */ 1106 copy %r29, %r26 /* arg0 is pt_regs */
1121 copy %r29, %r16 /* save pt_regs */ 1107 copy %r29, %r16 /* save pt_regs */
1122 1108
1123 ldil L%intr_return, %r2 1109 ldil L%intr_return, %r2
1124 1110
1125 #ifdef CONFIG_64BIT 1111 #ifdef CONFIG_64BIT
1126 ldo -16(%r30),%r29 /* Reference param save area */ 1112 ldo -16(%r30),%r29 /* Reference param save area */
1127 #endif 1113 #endif
1128 1114
1129 b do_cpu_irq_mask 1115 b do_cpu_irq_mask
1130 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */ 1116 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
1131 1117
1132 1118
1133 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */ 1119 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1134 1120
1135 .export intr_save, code /* for os_hpmc */ 1121 .export intr_save, code /* for os_hpmc */
1136 1122
1137 intr_save: 1123 intr_save:
1138 mfsp %sr7,%r16 1124 mfsp %sr7,%r16
1139 CMPIB=,n 0,%r16,1f 1125 CMPIB=,n 0,%r16,1f
1140 get_stack_use_cr30 1126 get_stack_use_cr30
1141 b 2f 1127 b 2f
1142 copy %r8,%r26 1128 copy %r8,%r26
1143 1129
1144 1: 1130 1:
1145 get_stack_use_r30 1131 get_stack_use_r30
1146 copy %r8,%r26 1132 copy %r8,%r26
1147 1133
1148 2: 1134 2:
1149 save_specials %r29 1135 save_specials %r29
1150 1136
1151 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */ 1137 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1152 1138
1153 /* 1139 /*
1154 * FIXME: 1) Use a #define for the hardwired "6" below (and in 1140 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1155 * traps.c. 1141 * traps.c.
1156 * 2) Once we start executing code above 4 Gb, we need 1142 * 2) Once we start executing code above 4 Gb, we need
1157 * to adjust iasq/iaoq here in the same way we 1143 * to adjust iasq/iaoq here in the same way we
1158 * adjust isr/ior below. 1144 * adjust isr/ior below.
1159 */ 1145 */
1160 1146
1161 CMPIB=,n 6,%r26,skip_save_ior 1147 CMPIB=,n 6,%r26,skip_save_ior
1162 1148
1163 1149
1164 mfctl %cr20, %r16 /* isr */ 1150 mfctl %cr20, %r16 /* isr */
1165 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */ 1151 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1166 mfctl %cr21, %r17 /* ior */ 1152 mfctl %cr21, %r17 /* ior */
1167 1153
1168 1154
1169 #ifdef CONFIG_64BIT 1155 #ifdef CONFIG_64BIT
1170 /* 1156 /*
1171 * If the interrupted code was running with W bit off (32 bit), 1157 * If the interrupted code was running with W bit off (32 bit),
1172 * clear the b bits (bits 0 & 1) in the ior. 1158 * clear the b bits (bits 0 & 1) in the ior.
1173 * save_specials left ipsw value in r8 for us to test. 1159 * save_specials left ipsw value in r8 for us to test.
1174 */ 1160 */
1175 extrd,u,*<> %r8,PSW_W_BIT,1,%r0 1161 extrd,u,*<> %r8,PSW_W_BIT,1,%r0
1176 depdi 0,1,2,%r17 1162 depdi 0,1,2,%r17
1177 1163
1178 /* 1164 /*
1179 * FIXME: This code has hardwired assumptions about the split 1165 * FIXME: This code has hardwired assumptions about the split
1180 * between space bits and offset bits. This will change 1166 * between space bits and offset bits. This will change
1181 * when we allow alternate page sizes. 1167 * when we allow alternate page sizes.
1182 */ 1168 */
1183 1169
1184 /* adjust isr/ior. */ 1170 /* adjust isr/ior. */
1185 1171
1186 extrd,u %r16,63,7,%r1 /* get high bits from isr for ior */ 1172 extrd,u %r16,63,7,%r1 /* get high bits from isr for ior */
1187 depd %r1,31,7,%r17 /* deposit them into ior */ 1173 depd %r1,31,7,%r17 /* deposit them into ior */
1188 depdi 0,63,7,%r16 /* clear them from isr */ 1174 depdi 0,63,7,%r16 /* clear them from isr */
1189 #endif 1175 #endif
1190 STREG %r16, PT_ISR(%r29) 1176 STREG %r16, PT_ISR(%r29)
1191 STREG %r17, PT_IOR(%r29) 1177 STREG %r17, PT_IOR(%r29)
1192 1178
1193 1179
1194 skip_save_ior: 1180 skip_save_ior:
1195 virt_map 1181 virt_map
1196 save_general %r29 1182 save_general %r29
1197 1183
1198 ldo PT_FR0(%r29), %r25 1184 ldo PT_FR0(%r29), %r25
1199 save_fp %r25 1185 save_fp %r25
1200 1186
1201 loadgp 1187 loadgp
1202 1188
1203 copy %r29, %r25 /* arg1 is pt_regs */ 1189 copy %r29, %r25 /* arg1 is pt_regs */
1204 #ifdef CONFIG_64BIT 1190 #ifdef CONFIG_64BIT
1205 ldo -16(%r30),%r29 /* Reference param save area */ 1191 ldo -16(%r30),%r29 /* Reference param save area */
1206 #endif 1192 #endif
1207 1193
1208 ldil L%intr_check_sig, %r2 1194 ldil L%intr_check_sig, %r2
1209 copy %r25, %r16 /* save pt_regs */ 1195 copy %r25, %r16 /* save pt_regs */
1210 1196
1211 b handle_interruption 1197 b handle_interruption
1212 ldo R%intr_check_sig(%r2), %r2 1198 ldo R%intr_check_sig(%r2), %r2
1213 1199
1214 1200
1215 /* 1201 /*
1216 * Note for all tlb miss handlers: 1202 * Note for all tlb miss handlers:
1217 * 1203 *
1218 * cr24 contains a pointer to the kernel address space 1204 * cr24 contains a pointer to the kernel address space
1219 * page directory. 1205 * page directory.
1220 * 1206 *
1221 * cr25 contains a pointer to the current user address 1207 * cr25 contains a pointer to the current user address
1222 * space page directory. 1208 * space page directory.
1223 * 1209 *
1224 * sr3 will contain the space id of the user address space 1210 * sr3 will contain the space id of the user address space
1225 * of the current running thread while that thread is 1211 * of the current running thread while that thread is
1226 * running in the kernel. 1212 * running in the kernel.
1227 */ 1213 */
1228 1214
1229 /* 1215 /*
1230 * register number allocations. Note that these are all 1216 * register number allocations. Note that these are all
1231 * in the shadowed registers 1217 * in the shadowed registers
1232 */ 1218 */
1233 1219
1234 t0 = r1 /* temporary register 0 */ 1220 t0 = r1 /* temporary register 0 */
1235 va = r8 /* virtual address for which the trap occured */ 1221 va = r8 /* virtual address for which the trap occured */
1236 t1 = r9 /* temporary register 1 */ 1222 t1 = r9 /* temporary register 1 */
1237 pte = r16 /* pte/phys page # */ 1223 pte = r16 /* pte/phys page # */
1238 prot = r17 /* prot bits */ 1224 prot = r17 /* prot bits */
1239 spc = r24 /* space for which the trap occured */ 1225 spc = r24 /* space for which the trap occured */
1240 ptp = r25 /* page directory/page table pointer */ 1226 ptp = r25 /* page directory/page table pointer */
1241 1227
1242 #ifdef CONFIG_64BIT 1228 #ifdef CONFIG_64BIT
1243 1229
1244 dtlb_miss_20w: 1230 dtlb_miss_20w:
1245 space_adjust spc,va,t0 1231 space_adjust spc,va,t0
1246 get_pgd spc,ptp 1232 get_pgd spc,ptp
1247 space_check spc,t0,dtlb_fault 1233 space_check spc,t0,dtlb_fault
1248 1234
1249 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w 1235 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1250 1236
1251 update_ptep ptp,pte,t0,t1 1237 update_ptep ptp,pte,t0,t1
1252 1238
1253 make_insert_tlb spc,pte,prot 1239 make_insert_tlb spc,pte,prot
1254 1240
1255 idtlbt pte,prot 1241 idtlbt pte,prot
1256 1242
1257 rfir 1243 rfir
1258 nop 1244 nop
1259 1245
1260 dtlb_check_alias_20w: 1246 dtlb_check_alias_20w:
1261 do_alias spc,t0,t1,va,pte,prot,dtlb_fault 1247 do_alias spc,t0,t1,va,pte,prot,dtlb_fault
1262 1248
1263 idtlbt pte,prot 1249 idtlbt pte,prot
1264 1250
1265 rfir 1251 rfir
1266 nop 1252 nop
1267 1253
1268 nadtlb_miss_20w: 1254 nadtlb_miss_20w:
1269 space_adjust spc,va,t0 1255 space_adjust spc,va,t0
1270 get_pgd spc,ptp 1256 get_pgd spc,ptp
1271 space_check spc,t0,nadtlb_fault 1257 space_check spc,t0,nadtlb_fault
1272 1258
1273 L3_ptep ptp,pte,t0,va,nadtlb_check_flush_20w 1259 L3_ptep ptp,pte,t0,va,nadtlb_check_flush_20w
1274 1260
1275 update_ptep ptp,pte,t0,t1 1261 update_ptep ptp,pte,t0,t1
1276 1262
1277 make_insert_tlb spc,pte,prot 1263 make_insert_tlb spc,pte,prot
1278 1264
1279 idtlbt pte,prot 1265 idtlbt pte,prot
1280 1266
1281 rfir 1267 rfir
1282 nop 1268 nop
1283 1269
1284 nadtlb_check_flush_20w: 1270 nadtlb_check_flush_20w:
1285 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate 1271 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1286 1272
1287 /* Insert a "flush only" translation */ 1273 /* Insert a "flush only" translation */
1288 1274
1289 depdi,z 7,7,3,prot 1275 depdi,z 7,7,3,prot
1290 depdi 1,10,1,prot 1276 depdi 1,10,1,prot
1291 1277
1292 /* Get rid of prot bits and convert to page addr for idtlbt */ 1278 /* Get rid of prot bits and convert to page addr for idtlbt */
1293 1279
1294 depdi 0,63,12,pte 1280 depdi 0,63,12,pte
1295 extrd,u pte,56,52,pte 1281 extrd,u pte,56,52,pte
1296 idtlbt pte,prot 1282 idtlbt pte,prot
1297 1283
1298 rfir 1284 rfir
1299 nop 1285 nop
1300 1286
1301 #else 1287 #else
1302 1288
1303 dtlb_miss_11: 1289 dtlb_miss_11:
1304 get_pgd spc,ptp 1290 get_pgd spc,ptp
1305 1291
1306 space_check spc,t0,dtlb_fault 1292 space_check spc,t0,dtlb_fault
1307 1293
1308 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 1294 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1309 1295
1310 update_ptep ptp,pte,t0,t1 1296 update_ptep ptp,pte,t0,t1
1311 1297
1312 make_insert_tlb_11 spc,pte,prot 1298 make_insert_tlb_11 spc,pte,prot
1313 1299
1314 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1300 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1315 mtsp spc,%sr1 1301 mtsp spc,%sr1
1316 1302
1317 idtlba pte,(%sr1,va) 1303 idtlba pte,(%sr1,va)
1318 idtlbp prot,(%sr1,va) 1304 idtlbp prot,(%sr1,va)
1319 1305
1320 mtsp t0, %sr1 /* Restore sr1 */ 1306 mtsp t0, %sr1 /* Restore sr1 */
1321 1307
1322 rfir 1308 rfir
1323 nop 1309 nop
1324 1310
1325 dtlb_check_alias_11: 1311 dtlb_check_alias_11:
1326 1312
1327 /* Check to see if fault is in the temporary alias region */ 1313 /* Check to see if fault is in the temporary alias region */
1328 1314
1329 cmpib,<>,n 0,spc,dtlb_fault /* forward */ 1315 cmpib,<>,n 0,spc,dtlb_fault /* forward */
1330 ldil L%(TMPALIAS_MAP_START),t0 1316 ldil L%(TMPALIAS_MAP_START),t0
1331 copy va,t1 1317 copy va,t1
1332 depwi 0,31,23,t1 1318 depwi 0,31,23,t1
1333 cmpb,<>,n t0,t1,dtlb_fault /* forward */ 1319 cmpb,<>,n t0,t1,dtlb_fault /* forward */
1334 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot 1320 ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
1335 depw,z prot,8,7,prot 1321 depw,z prot,8,7,prot
1336 1322
1337 /* 1323 /*
1338 * OK, it is in the temp alias region, check whether "from" or "to". 1324 * OK, it is in the temp alias region, check whether "from" or "to".
1339 * Check "subtle" note in pacache.S re: r23/r26. 1325 * Check "subtle" note in pacache.S re: r23/r26.
1340 */ 1326 */
1341 1327
1342 extrw,u,= va,9,1,r0 1328 extrw,u,= va,9,1,r0
1343 or,tr %r23,%r0,pte /* If "from" use "from" page */ 1329 or,tr %r23,%r0,pte /* If "from" use "from" page */
1344 or %r26,%r0,pte /* else "to", use "to" page */ 1330 or %r26,%r0,pte /* else "to", use "to" page */
1345 1331
1346 idtlba pte,(va) 1332 idtlba pte,(va)
1347 idtlbp prot,(va) 1333 idtlbp prot,(va)
1348 1334
1349 rfir 1335 rfir
1350 nop 1336 nop
1351 1337
1352 nadtlb_miss_11: 1338 nadtlb_miss_11:
1353 get_pgd spc,ptp 1339 get_pgd spc,ptp
1354 1340
1355 space_check spc,t0,nadtlb_fault 1341 space_check spc,t0,nadtlb_fault
1356 1342
1357 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_11 1343 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_11
1358 1344
1359 update_ptep ptp,pte,t0,t1 1345 update_ptep ptp,pte,t0,t1
1360 1346
1361 make_insert_tlb_11 spc,pte,prot 1347 make_insert_tlb_11 spc,pte,prot
1362 1348
1363 1349
1364 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1350 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1365 mtsp spc,%sr1 1351 mtsp spc,%sr1
1366 1352
1367 idtlba pte,(%sr1,va) 1353 idtlba pte,(%sr1,va)
1368 idtlbp prot,(%sr1,va) 1354 idtlbp prot,(%sr1,va)
1369 1355
1370 mtsp t0, %sr1 /* Restore sr1 */ 1356 mtsp t0, %sr1 /* Restore sr1 */
1371 1357
1372 rfir 1358 rfir
1373 nop 1359 nop
1374 1360
1375 nadtlb_check_flush_11: 1361 nadtlb_check_flush_11:
1376 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate 1362 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1377 1363
1378 /* Insert a "flush only" translation */ 1364 /* Insert a "flush only" translation */
1379 1365
1380 zdepi 7,7,3,prot 1366 zdepi 7,7,3,prot
1381 depi 1,10,1,prot 1367 depi 1,10,1,prot
1382 1368
1383 /* Get rid of prot bits and convert to page addr for idtlba */ 1369 /* Get rid of prot bits and convert to page addr for idtlba */
1384 1370
1385 depi 0,31,12,pte 1371 depi 0,31,12,pte
1386 extru pte,24,25,pte 1372 extru pte,24,25,pte
1387 1373
1388 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1374 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1389 mtsp spc,%sr1 1375 mtsp spc,%sr1
1390 1376
1391 idtlba pte,(%sr1,va) 1377 idtlba pte,(%sr1,va)
1392 idtlbp prot,(%sr1,va) 1378 idtlbp prot,(%sr1,va)
1393 1379
1394 mtsp t0, %sr1 /* Restore sr1 */ 1380 mtsp t0, %sr1 /* Restore sr1 */
1395 1381
1396 rfir 1382 rfir
1397 nop 1383 nop
1398 1384
1399 dtlb_miss_20: 1385 dtlb_miss_20:
1400 space_adjust spc,va,t0 1386 space_adjust spc,va,t0
1401 get_pgd spc,ptp 1387 get_pgd spc,ptp
1402 space_check spc,t0,dtlb_fault 1388 space_check spc,t0,dtlb_fault
1403 1389
1404 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 1390 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1405 1391
1406 update_ptep ptp,pte,t0,t1 1392 update_ptep ptp,pte,t0,t1
1407 1393
1408 make_insert_tlb spc,pte,prot 1394 make_insert_tlb spc,pte,prot
1409 1395
1410 f_extend pte,t0 1396 f_extend pte,t0
1411 1397
1412 idtlbt pte,prot 1398 idtlbt pte,prot
1413 1399
1414 rfir 1400 rfir
1415 nop 1401 nop
1416 1402
1417 dtlb_check_alias_20: 1403 dtlb_check_alias_20:
1418 do_alias spc,t0,t1,va,pte,prot,dtlb_fault 1404 do_alias spc,t0,t1,va,pte,prot,dtlb_fault
1419 1405
1420 idtlbt pte,prot 1406 idtlbt pte,prot
1421 1407
1422 rfir 1408 rfir
1423 nop 1409 nop
1424 1410
1425 nadtlb_miss_20: 1411 nadtlb_miss_20:
1426 get_pgd spc,ptp 1412 get_pgd spc,ptp
1427 1413
1428 space_check spc,t0,nadtlb_fault 1414 space_check spc,t0,nadtlb_fault
1429 1415
1430 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_20 1416 L2_ptep ptp,pte,t0,va,nadtlb_check_flush_20
1431 1417
1432 update_ptep ptp,pte,t0,t1 1418 update_ptep ptp,pte,t0,t1
1433 1419
1434 make_insert_tlb spc,pte,prot 1420 make_insert_tlb spc,pte,prot
1435 1421
1436 f_extend pte,t0 1422 f_extend pte,t0
1437 1423
1438 idtlbt pte,prot 1424 idtlbt pte,prot
1439 1425
1440 rfir 1426 rfir
1441 nop 1427 nop
1442 1428
1443 nadtlb_check_flush_20: 1429 nadtlb_check_flush_20:
1444 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate 1430 bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
1445 1431
1446 /* Insert a "flush only" translation */ 1432 /* Insert a "flush only" translation */
1447 1433
1448 depdi,z 7,7,3,prot 1434 depdi,z 7,7,3,prot
1449 depdi 1,10,1,prot 1435 depdi 1,10,1,prot
1450 1436
1451 /* Get rid of prot bits and convert to page addr for idtlbt */ 1437 /* Get rid of prot bits and convert to page addr for idtlbt */
1452 1438
1453 depdi 0,63,12,pte 1439 depdi 0,63,12,pte
1454 extrd,u pte,56,32,pte 1440 extrd,u pte,56,32,pte
1455 idtlbt pte,prot 1441 idtlbt pte,prot
1456 1442
1457 rfir 1443 rfir
1458 nop 1444 nop
1459 #endif 1445 #endif
1460 1446
1461 nadtlb_emulate: 1447 nadtlb_emulate:
1462 1448
1463 /* 1449 /*
1464 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and 1450 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1465 * probei instructions. We don't want to fault for these 1451 * probei instructions. We don't want to fault for these
1466 * instructions (not only does it not make sense, it can cause 1452 * instructions (not only does it not make sense, it can cause
1467 * deadlocks, since some flushes are done with the mmap 1453 * deadlocks, since some flushes are done with the mmap
1468 * semaphore held). If the translation doesn't exist, we can't 1454 * semaphore held). If the translation doesn't exist, we can't
1469 * insert a translation, so have to emulate the side effects 1455 * insert a translation, so have to emulate the side effects
1470 * of the instruction. Since we don't insert a translation 1456 * of the instruction. Since we don't insert a translation
1471 * we can get a lot of faults during a flush loop, so it makes 1457 * we can get a lot of faults during a flush loop, so it makes
1472 * sense to try to do it here with minimum overhead. We only 1458 * sense to try to do it here with minimum overhead. We only
1473 * emulate fdc,fic,pdc,probew,prober instructions whose base 1459 * emulate fdc,fic,pdc,probew,prober instructions whose base
1474 * and index registers are not shadowed. We defer everything 1460 * and index registers are not shadowed. We defer everything
1475 * else to the "slow" path. 1461 * else to the "slow" path.
1476 */ 1462 */
1477 1463
1478 mfctl %cr19,%r9 /* Get iir */ 1464 mfctl %cr19,%r9 /* Get iir */
1479 1465
1480 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits. 1466 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1481 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */ 1467 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1482 1468
1483 /* Checks for fdc,fdce,pdc,"fic,4f" only */ 1469 /* Checks for fdc,fdce,pdc,"fic,4f" only */
1484 ldi 0x280,%r16 1470 ldi 0x280,%r16
1485 and %r9,%r16,%r17 1471 and %r9,%r16,%r17
1486 cmpb,<>,n %r16,%r17,nadtlb_probe_check 1472 cmpb,<>,n %r16,%r17,nadtlb_probe_check
1487 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */ 1473 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
1488 BL get_register,%r25 1474 BL get_register,%r25
1489 extrw,u %r9,15,5,%r8 /* Get index register # */ 1475 extrw,u %r9,15,5,%r8 /* Get index register # */
1490 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */ 1476 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
1491 copy %r1,%r24 1477 copy %r1,%r24
1492 BL get_register,%r25 1478 BL get_register,%r25
1493 extrw,u %r9,10,5,%r8 /* Get base register # */ 1479 extrw,u %r9,10,5,%r8 /* Get base register # */
1494 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */ 1480 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
1495 BL set_register,%r25 1481 BL set_register,%r25
1496 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */ 1482 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
1497 1483
1498 nadtlb_nullify: 1484 nadtlb_nullify:
1499 mfctl %ipsw,%r8 1485 mfctl %ipsw,%r8
1500 ldil L%PSW_N,%r9 1486 ldil L%PSW_N,%r9
1501 or %r8,%r9,%r8 /* Set PSW_N */ 1487 or %r8,%r9,%r8 /* Set PSW_N */
1502 mtctl %r8,%ipsw 1488 mtctl %r8,%ipsw
1503 1489
1504 rfir 1490 rfir
1505 nop 1491 nop
1506 1492
1507 /* 1493 /*
1508 When there is no translation for the probe address then we 1494 When there is no translation for the probe address then we
1509 must nullify the insn and return zero in the target regsiter. 1495 must nullify the insn and return zero in the target regsiter.
1510 This will indicate to the calling code that it does not have 1496 This will indicate to the calling code that it does not have
1511 write/read privileges to this address. 1497 write/read privileges to this address.
1512 1498
1513 This should technically work for prober and probew in PA 1.1, 1499 This should technically work for prober and probew in PA 1.1,
1514 and also probe,r and probe,w in PA 2.0 1500 and also probe,r and probe,w in PA 2.0
1515 1501
1516 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN! 1502 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1517 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET. 1503 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1518 1504
1519 */ 1505 */
1520 nadtlb_probe_check: 1506 nadtlb_probe_check:
1521 ldi 0x80,%r16 1507 ldi 0x80,%r16
1522 and %r9,%r16,%r17 1508 and %r9,%r16,%r17
1523 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/ 1509 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1524 BL get_register,%r25 /* Find the target register */ 1510 BL get_register,%r25 /* Find the target register */
1525 extrw,u %r9,31,5,%r8 /* Get target register */ 1511 extrw,u %r9,31,5,%r8 /* Get target register */
1526 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */ 1512 CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
1527 BL set_register,%r25 1513 BL set_register,%r25
1528 copy %r0,%r1 /* Write zero to target register */ 1514 copy %r0,%r1 /* Write zero to target register */
1529 b nadtlb_nullify /* Nullify return insn */ 1515 b nadtlb_nullify /* Nullify return insn */
1530 nop 1516 nop
1531 1517
1532 1518
1533 #ifdef CONFIG_64BIT 1519 #ifdef CONFIG_64BIT
1534 itlb_miss_20w: 1520 itlb_miss_20w:
1535 1521
1536 /* 1522 /*
1537 * I miss is a little different, since we allow users to fault 1523 * I miss is a little different, since we allow users to fault
1538 * on the gateway page which is in the kernel address space. 1524 * on the gateway page which is in the kernel address space.
1539 */ 1525 */
1540 1526
1541 space_adjust spc,va,t0 1527 space_adjust spc,va,t0
1542 get_pgd spc,ptp 1528 get_pgd spc,ptp
1543 space_check spc,t0,itlb_fault 1529 space_check spc,t0,itlb_fault
1544 1530
1545 L3_ptep ptp,pte,t0,va,itlb_fault 1531 L3_ptep ptp,pte,t0,va,itlb_fault
1546 1532
1547 update_ptep ptp,pte,t0,t1 1533 update_ptep ptp,pte,t0,t1
1548 1534
1549 make_insert_tlb spc,pte,prot 1535 make_insert_tlb spc,pte,prot
1550 1536
1551 iitlbt pte,prot 1537 iitlbt pte,prot
1552 1538
1553 rfir 1539 rfir
1554 nop 1540 nop
1555 1541
1556 #else 1542 #else
1557 1543
1558 itlb_miss_11: 1544 itlb_miss_11:
1559 get_pgd spc,ptp 1545 get_pgd spc,ptp
1560 1546
1561 space_check spc,t0,itlb_fault 1547 space_check spc,t0,itlb_fault
1562 1548
1563 L2_ptep ptp,pte,t0,va,itlb_fault 1549 L2_ptep ptp,pte,t0,va,itlb_fault
1564 1550
1565 update_ptep ptp,pte,t0,t1 1551 update_ptep ptp,pte,t0,t1
1566 1552
1567 make_insert_tlb_11 spc,pte,prot 1553 make_insert_tlb_11 spc,pte,prot
1568 1554
1569 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */ 1555 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1570 mtsp spc,%sr1 1556 mtsp spc,%sr1
1571 1557
1572 iitlba pte,(%sr1,va) 1558 iitlba pte,(%sr1,va)
1573 iitlbp prot,(%sr1,va) 1559 iitlbp prot,(%sr1,va)
1574 1560
1575 mtsp t0, %sr1 /* Restore sr1 */ 1561 mtsp t0, %sr1 /* Restore sr1 */
1576 1562
1577 rfir 1563 rfir
1578 nop 1564 nop
1579 1565
1580 itlb_miss_20: 1566 itlb_miss_20:
1581 get_pgd spc,ptp 1567 get_pgd spc,ptp
1582 1568
1583 space_check spc,t0,itlb_fault 1569 space_check spc,t0,itlb_fault
1584 1570
1585 L2_ptep ptp,pte,t0,va,itlb_fault 1571 L2_ptep ptp,pte,t0,va,itlb_fault
1586 1572
1587 update_ptep ptp,pte,t0,t1 1573 update_ptep ptp,pte,t0,t1
1588 1574
1589 make_insert_tlb spc,pte,prot 1575 make_insert_tlb spc,pte,prot
1590 1576
1591 f_extend pte,t0 1577 f_extend pte,t0
1592 1578
1593 iitlbt pte,prot 1579 iitlbt pte,prot
1594 1580
1595 rfir 1581 rfir
1596 nop 1582 nop
1597 1583
1598 #endif 1584 #endif
1599 1585
1600 #ifdef CONFIG_64BIT 1586 #ifdef CONFIG_64BIT
1601 1587
1602 dbit_trap_20w: 1588 dbit_trap_20w:
1603 space_adjust spc,va,t0 1589 space_adjust spc,va,t0
1604 get_pgd spc,ptp 1590 get_pgd spc,ptp
1605 space_check spc,t0,dbit_fault 1591 space_check spc,t0,dbit_fault
1606 1592
1607 L3_ptep ptp,pte,t0,va,dbit_fault 1593 L3_ptep ptp,pte,t0,va,dbit_fault
1608 1594
1609 #ifdef CONFIG_SMP 1595 #ifdef CONFIG_SMP
1610 CMPIB=,n 0,spc,dbit_nolock_20w 1596 CMPIB=,n 0,spc,dbit_nolock_20w
1611 load32 PA(pa_dbit_lock),t0 1597 load32 PA(pa_dbit_lock),t0
1612 1598
1613 dbit_spin_20w: 1599 dbit_spin_20w:
1614 ldcw 0(t0),t1 1600 ldcw 0(t0),t1
1615 cmpib,= 0,t1,dbit_spin_20w 1601 cmpib,= 0,t1,dbit_spin_20w
1616 nop 1602 nop
1617 1603
1618 dbit_nolock_20w: 1604 dbit_nolock_20w:
1619 #endif 1605 #endif
1620 update_dirty ptp,pte,t1 1606 update_dirty ptp,pte,t1
1621 1607
1622 make_insert_tlb spc,pte,prot 1608 make_insert_tlb spc,pte,prot
1623 1609
1624 idtlbt pte,prot 1610 idtlbt pte,prot
1625 #ifdef CONFIG_SMP 1611 #ifdef CONFIG_SMP
1626 CMPIB=,n 0,spc,dbit_nounlock_20w 1612 CMPIB=,n 0,spc,dbit_nounlock_20w
1627 ldi 1,t1 1613 ldi 1,t1
1628 stw t1,0(t0) 1614 stw t1,0(t0)
1629 1615
1630 dbit_nounlock_20w: 1616 dbit_nounlock_20w:
1631 #endif 1617 #endif
1632 1618
1633 rfir 1619 rfir
1634 nop 1620 nop
1635 #else 1621 #else
1636 1622
1637 dbit_trap_11: 1623 dbit_trap_11:
1638 1624
1639 get_pgd spc,ptp 1625 get_pgd spc,ptp
1640 1626
1641 space_check spc,t0,dbit_fault 1627 space_check spc,t0,dbit_fault
1642 1628
1643 L2_ptep ptp,pte,t0,va,dbit_fault 1629 L2_ptep ptp,pte,t0,va,dbit_fault
1644 1630
1645 #ifdef CONFIG_SMP 1631 #ifdef CONFIG_SMP
1646 CMPIB=,n 0,spc,dbit_nolock_11 1632 CMPIB=,n 0,spc,dbit_nolock_11
1647 load32 PA(pa_dbit_lock),t0 1633 load32 PA(pa_dbit_lock),t0
1648 1634
1649 dbit_spin_11: 1635 dbit_spin_11:
1650 ldcw 0(t0),t1 1636 ldcw 0(t0),t1
1651 cmpib,= 0,t1,dbit_spin_11 1637 cmpib,= 0,t1,dbit_spin_11
1652 nop 1638 nop
1653 1639
1654 dbit_nolock_11: 1640 dbit_nolock_11:
1655 #endif 1641 #endif
1656 update_dirty ptp,pte,t1 1642 update_dirty ptp,pte,t1
1657 1643
1658 make_insert_tlb_11 spc,pte,prot 1644 make_insert_tlb_11 spc,pte,prot
1659 1645
1660 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */ 1646 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1661 mtsp spc,%sr1 1647 mtsp spc,%sr1
1662 1648
1663 idtlba pte,(%sr1,va) 1649 idtlba pte,(%sr1,va)
1664 idtlbp prot,(%sr1,va) 1650 idtlbp prot,(%sr1,va)
1665 1651
1666 mtsp t1, %sr1 /* Restore sr1 */ 1652 mtsp t1, %sr1 /* Restore sr1 */
1667 #ifdef CONFIG_SMP 1653 #ifdef CONFIG_SMP
1668 CMPIB=,n 0,spc,dbit_nounlock_11 1654 CMPIB=,n 0,spc,dbit_nounlock_11
1669 ldi 1,t1 1655 ldi 1,t1
1670 stw t1,0(t0) 1656 stw t1,0(t0)
1671 1657
1672 dbit_nounlock_11: 1658 dbit_nounlock_11:
1673 #endif 1659 #endif
1674 1660
1675 rfir 1661 rfir
1676 nop 1662 nop
1677 1663
1678 dbit_trap_20: 1664 dbit_trap_20:
1679 get_pgd spc,ptp 1665 get_pgd spc,ptp
1680 1666
1681 space_check spc,t0,dbit_fault 1667 space_check spc,t0,dbit_fault
1682 1668
1683 L2_ptep ptp,pte,t0,va,dbit_fault 1669 L2_ptep ptp,pte,t0,va,dbit_fault
1684 1670
1685 #ifdef CONFIG_SMP 1671 #ifdef CONFIG_SMP
1686 CMPIB=,n 0,spc,dbit_nolock_20 1672 CMPIB=,n 0,spc,dbit_nolock_20
1687 load32 PA(pa_dbit_lock),t0 1673 load32 PA(pa_dbit_lock),t0
1688 1674
1689 dbit_spin_20: 1675 dbit_spin_20:
1690 ldcw 0(t0),t1 1676 ldcw 0(t0),t1
1691 cmpib,= 0,t1,dbit_spin_20 1677 cmpib,= 0,t1,dbit_spin_20
1692 nop 1678 nop
1693 1679
1694 dbit_nolock_20: 1680 dbit_nolock_20:
1695 #endif 1681 #endif
1696 update_dirty ptp,pte,t1 1682 update_dirty ptp,pte,t1
1697 1683
1698 make_insert_tlb spc,pte,prot 1684 make_insert_tlb spc,pte,prot
1699 1685
1700 f_extend pte,t1 1686 f_extend pte,t1
1701 1687
1702 idtlbt pte,prot 1688 idtlbt pte,prot
1703 1689
1704 #ifdef CONFIG_SMP 1690 #ifdef CONFIG_SMP
1705 CMPIB=,n 0,spc,dbit_nounlock_20 1691 CMPIB=,n 0,spc,dbit_nounlock_20
1706 ldi 1,t1 1692 ldi 1,t1
1707 stw t1,0(t0) 1693 stw t1,0(t0)
1708 1694
1709 dbit_nounlock_20: 1695 dbit_nounlock_20:
1710 #endif 1696 #endif
1711 1697
1712 rfir 1698 rfir
1713 nop 1699 nop
1714 #endif 1700 #endif
1715 1701
1716 .import handle_interruption,code 1702 .import handle_interruption,code
1717 1703
1718 kernel_bad_space: 1704 kernel_bad_space:
1719 b intr_save 1705 b intr_save
1720 ldi 31,%r8 /* Use an unused code */ 1706 ldi 31,%r8 /* Use an unused code */
1721 1707
1722 dbit_fault: 1708 dbit_fault:
1723 b intr_save 1709 b intr_save
1724 ldi 20,%r8 1710 ldi 20,%r8
1725 1711
1726 itlb_fault: 1712 itlb_fault:
1727 b intr_save 1713 b intr_save
1728 ldi 6,%r8 1714 ldi 6,%r8
1729 1715
1730 nadtlb_fault: 1716 nadtlb_fault:
1731 b intr_save 1717 b intr_save
1732 ldi 17,%r8 1718 ldi 17,%r8
1733 1719
1734 dtlb_fault: 1720 dtlb_fault:
1735 b intr_save 1721 b intr_save
1736 ldi 15,%r8 1722 ldi 15,%r8
1737 1723
1738 /* Register saving semantics for system calls: 1724 /* Register saving semantics for system calls:
1739 1725
1740 %r1 clobbered by system call macro in userspace 1726 %r1 clobbered by system call macro in userspace
1741 %r2 saved in PT_REGS by gateway page 1727 %r2 saved in PT_REGS by gateway page
1742 %r3 - %r18 preserved by C code (saved by signal code) 1728 %r3 - %r18 preserved by C code (saved by signal code)
1743 %r19 - %r20 saved in PT_REGS by gateway page 1729 %r19 - %r20 saved in PT_REGS by gateway page
1744 %r21 - %r22 non-standard syscall args 1730 %r21 - %r22 non-standard syscall args
1745 stored in kernel stack by gateway page 1731 stored in kernel stack by gateway page
1746 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page 1732 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
1747 %r27 - %r30 saved in PT_REGS by gateway page 1733 %r27 - %r30 saved in PT_REGS by gateway page
1748 %r31 syscall return pointer 1734 %r31 syscall return pointer
1749 */ 1735 */
1750 1736
1751 /* Floating point registers (FIXME: what do we do with these?) 1737 /* Floating point registers (FIXME: what do we do with these?)
1752 1738
1753 %fr0 - %fr3 status/exception, not preserved 1739 %fr0 - %fr3 status/exception, not preserved
1754 %fr4 - %fr7 arguments 1740 %fr4 - %fr7 arguments
1755 %fr8 - %fr11 not preserved by C code 1741 %fr8 - %fr11 not preserved by C code
1756 %fr12 - %fr21 preserved by C code 1742 %fr12 - %fr21 preserved by C code
1757 %fr22 - %fr31 not preserved by C code 1743 %fr22 - %fr31 not preserved by C code
1758 */ 1744 */
1759 1745
1760 .macro reg_save regs 1746 .macro reg_save regs
1761 STREG %r3, PT_GR3(\regs) 1747 STREG %r3, PT_GR3(\regs)
1762 STREG %r4, PT_GR4(\regs) 1748 STREG %r4, PT_GR4(\regs)
1763 STREG %r5, PT_GR5(\regs) 1749 STREG %r5, PT_GR5(\regs)
1764 STREG %r6, PT_GR6(\regs) 1750 STREG %r6, PT_GR6(\regs)
1765 STREG %r7, PT_GR7(\regs) 1751 STREG %r7, PT_GR7(\regs)
1766 STREG %r8, PT_GR8(\regs) 1752 STREG %r8, PT_GR8(\regs)
1767 STREG %r9, PT_GR9(\regs) 1753 STREG %r9, PT_GR9(\regs)
1768 STREG %r10,PT_GR10(\regs) 1754 STREG %r10,PT_GR10(\regs)
1769 STREG %r11,PT_GR11(\regs) 1755 STREG %r11,PT_GR11(\regs)
1770 STREG %r12,PT_GR12(\regs) 1756 STREG %r12,PT_GR12(\regs)
1771 STREG %r13,PT_GR13(\regs) 1757 STREG %r13,PT_GR13(\regs)
1772 STREG %r14,PT_GR14(\regs) 1758 STREG %r14,PT_GR14(\regs)
1773 STREG %r15,PT_GR15(\regs) 1759 STREG %r15,PT_GR15(\regs)
1774 STREG %r16,PT_GR16(\regs) 1760 STREG %r16,PT_GR16(\regs)
1775 STREG %r17,PT_GR17(\regs) 1761 STREG %r17,PT_GR17(\regs)
1776 STREG %r18,PT_GR18(\regs) 1762 STREG %r18,PT_GR18(\regs)
1777 .endm 1763 .endm
1778 1764
1779 .macro reg_restore regs 1765 .macro reg_restore regs
1780 LDREG PT_GR3(\regs), %r3 1766 LDREG PT_GR3(\regs), %r3
1781 LDREG PT_GR4(\regs), %r4 1767 LDREG PT_GR4(\regs), %r4
1782 LDREG PT_GR5(\regs), %r5 1768 LDREG PT_GR5(\regs), %r5
1783 LDREG PT_GR6(\regs), %r6 1769 LDREG PT_GR6(\regs), %r6
1784 LDREG PT_GR7(\regs), %r7 1770 LDREG PT_GR7(\regs), %r7
1785 LDREG PT_GR8(\regs), %r8 1771 LDREG PT_GR8(\regs), %r8
1786 LDREG PT_GR9(\regs), %r9 1772 LDREG PT_GR9(\regs), %r9
1787 LDREG PT_GR10(\regs),%r10 1773 LDREG PT_GR10(\regs),%r10
1788 LDREG PT_GR11(\regs),%r11 1774 LDREG PT_GR11(\regs),%r11
1789 LDREG PT_GR12(\regs),%r12 1775 LDREG PT_GR12(\regs),%r12
1790 LDREG PT_GR13(\regs),%r13 1776 LDREG PT_GR13(\regs),%r13
1791 LDREG PT_GR14(\regs),%r14 1777 LDREG PT_GR14(\regs),%r14
1792 LDREG PT_GR15(\regs),%r15 1778 LDREG PT_GR15(\regs),%r15
1793 LDREG PT_GR16(\regs),%r16 1779 LDREG PT_GR16(\regs),%r16
1794 LDREG PT_GR17(\regs),%r17 1780 LDREG PT_GR17(\regs),%r17
1795 LDREG PT_GR18(\regs),%r18 1781 LDREG PT_GR18(\regs),%r18
1796 .endm 1782 .endm
1797 1783
1798 .export sys_fork_wrapper 1784 .export sys_fork_wrapper
1799 .export child_return 1785 .export child_return
1800 sys_fork_wrapper: 1786 sys_fork_wrapper:
1801 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 1787 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1802 ldo TASK_REGS(%r1),%r1 1788 ldo TASK_REGS(%r1),%r1
1803 reg_save %r1 1789 reg_save %r1
1804 mfctl %cr27, %r3 1790 mfctl %cr27, %r3
1805 STREG %r3, PT_CR27(%r1) 1791 STREG %r3, PT_CR27(%r1)
1806 1792
1807 STREG %r2,-RP_OFFSET(%r30) 1793 STREG %r2,-RP_OFFSET(%r30)
1808 ldo FRAME_SIZE(%r30),%r30 1794 ldo FRAME_SIZE(%r30),%r30
1809 #ifdef CONFIG_64BIT 1795 #ifdef CONFIG_64BIT
1810 ldo -16(%r30),%r29 /* Reference param save area */ 1796 ldo -16(%r30),%r29 /* Reference param save area */
1811 #endif 1797 #endif
1812 1798
1813 /* These are call-clobbered registers and therefore 1799 /* These are call-clobbered registers and therefore
1814 also syscall-clobbered (we hope). */ 1800 also syscall-clobbered (we hope). */
1815 STREG %r2,PT_GR19(%r1) /* save for child */ 1801 STREG %r2,PT_GR19(%r1) /* save for child */
1816 STREG %r30,PT_GR21(%r1) 1802 STREG %r30,PT_GR21(%r1)
1817 1803
1818 LDREG PT_GR30(%r1),%r25 1804 LDREG PT_GR30(%r1),%r25
1819 copy %r1,%r24 1805 copy %r1,%r24
1820 BL sys_clone,%r2 1806 BL sys_clone,%r2
1821 ldi SIGCHLD,%r26 1807 ldi SIGCHLD,%r26
1822 1808
1823 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2 1809 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1824 wrapper_exit: 1810 wrapper_exit:
1825 ldo -FRAME_SIZE(%r30),%r30 /* get the stackframe */ 1811 ldo -FRAME_SIZE(%r30),%r30 /* get the stackframe */
1826 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1812 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1827 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1813 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1828 1814
1829 LDREG PT_CR27(%r1), %r3 1815 LDREG PT_CR27(%r1), %r3
1830 mtctl %r3, %cr27 1816 mtctl %r3, %cr27
1831 reg_restore %r1 1817 reg_restore %r1
1832 1818
1833 /* strace expects syscall # to be preserved in r20 */ 1819 /* strace expects syscall # to be preserved in r20 */
1834 ldi __NR_fork,%r20 1820 ldi __NR_fork,%r20
1835 bv %r0(%r2) 1821 bv %r0(%r2)
1836 STREG %r20,PT_GR20(%r1) 1822 STREG %r20,PT_GR20(%r1)
1837 1823
1838 /* Set the return value for the child */ 1824 /* Set the return value for the child */
1839 child_return: 1825 child_return:
1840 BL schedule_tail, %r2 1826 BL schedule_tail, %r2
1841 nop 1827 nop
1842 1828
1843 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1 1829 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
1844 LDREG TASK_PT_GR19(%r1),%r2 1830 LDREG TASK_PT_GR19(%r1),%r2
1845 b wrapper_exit 1831 b wrapper_exit
1846 copy %r0,%r28 1832 copy %r0,%r28
1847 1833
1848 1834
1849 .export sys_clone_wrapper 1835 .export sys_clone_wrapper
1850 sys_clone_wrapper: 1836 sys_clone_wrapper:
1851 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1837 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1852 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1838 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1853 reg_save %r1 1839 reg_save %r1
1854 mfctl %cr27, %r3 1840 mfctl %cr27, %r3
1855 STREG %r3, PT_CR27(%r1) 1841 STREG %r3, PT_CR27(%r1)
1856 1842
1857 STREG %r2,-RP_OFFSET(%r30) 1843 STREG %r2,-RP_OFFSET(%r30)
1858 ldo FRAME_SIZE(%r30),%r30 1844 ldo FRAME_SIZE(%r30),%r30
1859 #ifdef CONFIG_64BIT 1845 #ifdef CONFIG_64BIT
1860 ldo -16(%r30),%r29 /* Reference param save area */ 1846 ldo -16(%r30),%r29 /* Reference param save area */
1861 #endif 1847 #endif
1862 1848
1863 STREG %r2,PT_GR19(%r1) /* save for child */ 1849 STREG %r2,PT_GR19(%r1) /* save for child */
1864 STREG %r30,PT_GR21(%r1) 1850 STREG %r30,PT_GR21(%r1)
1865 BL sys_clone,%r2 1851 BL sys_clone,%r2
1866 copy %r1,%r24 1852 copy %r1,%r24
1867 1853
1868 b wrapper_exit 1854 b wrapper_exit
1869 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2 1855 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1870 1856
1871 .export sys_vfork_wrapper 1857 .export sys_vfork_wrapper
1872 sys_vfork_wrapper: 1858 sys_vfork_wrapper:
1873 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1859 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1874 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1860 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1875 reg_save %r1 1861 reg_save %r1
1876 mfctl %cr27, %r3 1862 mfctl %cr27, %r3
1877 STREG %r3, PT_CR27(%r1) 1863 STREG %r3, PT_CR27(%r1)
1878 1864
1879 STREG %r2,-RP_OFFSET(%r30) 1865 STREG %r2,-RP_OFFSET(%r30)
1880 ldo FRAME_SIZE(%r30),%r30 1866 ldo FRAME_SIZE(%r30),%r30
1881 #ifdef CONFIG_64BIT 1867 #ifdef CONFIG_64BIT
1882 ldo -16(%r30),%r29 /* Reference param save area */ 1868 ldo -16(%r30),%r29 /* Reference param save area */
1883 #endif 1869 #endif
1884 1870
1885 STREG %r2,PT_GR19(%r1) /* save for child */ 1871 STREG %r2,PT_GR19(%r1) /* save for child */
1886 STREG %r30,PT_GR21(%r1) 1872 STREG %r30,PT_GR21(%r1)
1887 1873
1888 BL sys_vfork,%r2 1874 BL sys_vfork,%r2
1889 copy %r1,%r26 1875 copy %r1,%r26
1890 1876
1891 b wrapper_exit 1877 b wrapper_exit
1892 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2 1878 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1893 1879
1894 1880
1895 .macro execve_wrapper execve 1881 .macro execve_wrapper execve
1896 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1882 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1897 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1883 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1898 1884
1899 /* 1885 /*
1900 * Do we need to save/restore r3-r18 here? 1886 * Do we need to save/restore r3-r18 here?
1901 * I don't think so. why would new thread need old 1887 * I don't think so. why would new thread need old
1902 * threads registers? 1888 * threads registers?
1903 */ 1889 */
1904 1890
1905 /* %arg0 - %arg3 are already saved for us. */ 1891 /* %arg0 - %arg3 are already saved for us. */
1906 1892
1907 STREG %r2,-RP_OFFSET(%r30) 1893 STREG %r2,-RP_OFFSET(%r30)
1908 ldo FRAME_SIZE(%r30),%r30 1894 ldo FRAME_SIZE(%r30),%r30
1909 #ifdef CONFIG_64BIT 1895 #ifdef CONFIG_64BIT
1910 ldo -16(%r30),%r29 /* Reference param save area */ 1896 ldo -16(%r30),%r29 /* Reference param save area */
1911 #endif 1897 #endif
1912 BL \execve,%r2 1898 BL \execve,%r2
1913 copy %r1,%arg0 1899 copy %r1,%arg0
1914 1900
1915 ldo -FRAME_SIZE(%r30),%r30 1901 ldo -FRAME_SIZE(%r30),%r30
1916 LDREG -RP_OFFSET(%r30),%r2 1902 LDREG -RP_OFFSET(%r30),%r2
1917 1903
1918 /* If exec succeeded we need to load the args */ 1904 /* If exec succeeded we need to load the args */
1919 1905
1920 ldo -1024(%r0),%r1 1906 ldo -1024(%r0),%r1
1921 cmpb,>>= %r28,%r1,error_\execve 1907 cmpb,>>= %r28,%r1,error_\execve
1922 copy %r2,%r19 1908 copy %r2,%r19
1923 1909
1924 error_\execve: 1910 error_\execve:
1925 bv %r0(%r19) 1911 bv %r0(%r19)
1926 nop 1912 nop
1927 .endm 1913 .endm
1928 1914
1929 .export sys_execve_wrapper 1915 .export sys_execve_wrapper
1930 .import sys_execve 1916 .import sys_execve
1931 1917
1932 sys_execve_wrapper: 1918 sys_execve_wrapper:
1933 execve_wrapper sys_execve 1919 execve_wrapper sys_execve
1934 1920
1935 #ifdef CONFIG_64BIT 1921 #ifdef CONFIG_64BIT
1936 .export sys32_execve_wrapper 1922 .export sys32_execve_wrapper
1937 .import sys32_execve 1923 .import sys32_execve
1938 1924
1939 sys32_execve_wrapper: 1925 sys32_execve_wrapper:
1940 execve_wrapper sys32_execve 1926 execve_wrapper sys32_execve
1941 #endif 1927 #endif
1942 1928
1943 .export sys_rt_sigreturn_wrapper 1929 .export sys_rt_sigreturn_wrapper
1944 sys_rt_sigreturn_wrapper: 1930 sys_rt_sigreturn_wrapper:
1945 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 1931 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1946 ldo TASK_REGS(%r26),%r26 /* get pt regs */ 1932 ldo TASK_REGS(%r26),%r26 /* get pt regs */
1947 /* Don't save regs, we are going to restore them from sigcontext. */ 1933 /* Don't save regs, we are going to restore them from sigcontext. */
1948 STREG %r2, -RP_OFFSET(%r30) 1934 STREG %r2, -RP_OFFSET(%r30)
1949 #ifdef CONFIG_64BIT 1935 #ifdef CONFIG_64BIT
1950 ldo FRAME_SIZE(%r30), %r30 1936 ldo FRAME_SIZE(%r30), %r30
1951 BL sys_rt_sigreturn,%r2 1937 BL sys_rt_sigreturn,%r2
1952 ldo -16(%r30),%r29 /* Reference param save area */ 1938 ldo -16(%r30),%r29 /* Reference param save area */
1953 #else 1939 #else
1954 BL sys_rt_sigreturn,%r2 1940 BL sys_rt_sigreturn,%r2
1955 ldo FRAME_SIZE(%r30), %r30 1941 ldo FRAME_SIZE(%r30), %r30
1956 #endif 1942 #endif
1957 1943
1958 ldo -FRAME_SIZE(%r30), %r30 1944 ldo -FRAME_SIZE(%r30), %r30
1959 LDREG -RP_OFFSET(%r30), %r2 1945 LDREG -RP_OFFSET(%r30), %r2
1960 1946
1961 /* FIXME: I think we need to restore a few more things here. */ 1947 /* FIXME: I think we need to restore a few more things here. */
1962 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1948 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1963 ldo TASK_REGS(%r1),%r1 /* get pt regs */ 1949 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1964 reg_restore %r1 1950 reg_restore %r1
1965 1951
1966 /* If the signal was received while the process was blocked on a 1952 /* If the signal was received while the process was blocked on a
1967 * syscall, then r2 will take us to syscall_exit; otherwise r2 will 1953 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1968 * take us to syscall_exit_rfi and on to intr_return. 1954 * take us to syscall_exit_rfi and on to intr_return.
1969 */ 1955 */
1970 bv %r0(%r2) 1956 bv %r0(%r2)
1971 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */ 1957 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
1972 1958
1973 .export sys_sigaltstack_wrapper 1959 .export sys_sigaltstack_wrapper
1974 sys_sigaltstack_wrapper: 1960 sys_sigaltstack_wrapper:
1975 /* Get the user stack pointer */ 1961 /* Get the user stack pointer */
1976 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 1962 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1977 ldo TASK_REGS(%r1),%r24 /* get pt regs */ 1963 ldo TASK_REGS(%r1),%r24 /* get pt regs */
1978 LDREG TASK_PT_GR30(%r24),%r24 1964 LDREG TASK_PT_GR30(%r24),%r24
1979 STREG %r2, -RP_OFFSET(%r30) 1965 STREG %r2, -RP_OFFSET(%r30)
1980 #ifdef CONFIG_64BIT 1966 #ifdef CONFIG_64BIT
1981 ldo FRAME_SIZE(%r30), %r30 1967 ldo FRAME_SIZE(%r30), %r30
1982 b,l do_sigaltstack,%r2 1968 b,l do_sigaltstack,%r2
1983 ldo -16(%r30),%r29 /* Reference param save area */ 1969 ldo -16(%r30),%r29 /* Reference param save area */
1984 #else 1970 #else
1985 bl do_sigaltstack,%r2 1971 bl do_sigaltstack,%r2
1986 ldo FRAME_SIZE(%r30), %r30 1972 ldo FRAME_SIZE(%r30), %r30
1987 #endif 1973 #endif
1988 1974
1989 ldo -FRAME_SIZE(%r30), %r30 1975 ldo -FRAME_SIZE(%r30), %r30
1990 LDREG -RP_OFFSET(%r30), %r2 1976 LDREG -RP_OFFSET(%r30), %r2
1991 bv %r0(%r2) 1977 bv %r0(%r2)
1992 nop 1978 nop
1993 1979
1994 #ifdef CONFIG_64BIT 1980 #ifdef CONFIG_64BIT
1995 .export sys32_sigaltstack_wrapper 1981 .export sys32_sigaltstack_wrapper
1996 sys32_sigaltstack_wrapper: 1982 sys32_sigaltstack_wrapper:
1997 /* Get the user stack pointer */ 1983 /* Get the user stack pointer */
1998 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24 1984 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
1999 LDREG TASK_PT_GR30(%r24),%r24 1985 LDREG TASK_PT_GR30(%r24),%r24
2000 STREG %r2, -RP_OFFSET(%r30) 1986 STREG %r2, -RP_OFFSET(%r30)
2001 ldo FRAME_SIZE(%r30), %r30 1987 ldo FRAME_SIZE(%r30), %r30
2002 b,l do_sigaltstack32,%r2 1988 b,l do_sigaltstack32,%r2
2003 ldo -16(%r30),%r29 /* Reference param save area */ 1989 ldo -16(%r30),%r29 /* Reference param save area */
2004 1990
2005 ldo -FRAME_SIZE(%r30), %r30 1991 ldo -FRAME_SIZE(%r30), %r30
2006 LDREG -RP_OFFSET(%r30), %r2 1992 LDREG -RP_OFFSET(%r30), %r2
2007 bv %r0(%r2) 1993 bv %r0(%r2)
2008 nop 1994 nop
2009 #endif 1995 #endif
2010 1996
2011 .export sys_rt_sigsuspend_wrapper 1997 .export sys_rt_sigsuspend_wrapper
2012 sys_rt_sigsuspend_wrapper: 1998 sys_rt_sigsuspend_wrapper:
2013 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 1999 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
2014 ldo TASK_REGS(%r1),%r24 2000 ldo TASK_REGS(%r1),%r24
2015 reg_save %r24 2001 reg_save %r24
2016 2002
2017 STREG %r2, -RP_OFFSET(%r30) 2003 STREG %r2, -RP_OFFSET(%r30)
2018 #ifdef CONFIG_64BIT 2004 #ifdef CONFIG_64BIT
2019 ldo FRAME_SIZE(%r30), %r30 2005 ldo FRAME_SIZE(%r30), %r30
2020 b,l sys_rt_sigsuspend,%r2 2006 b,l sys_rt_sigsuspend,%r2
2021 ldo -16(%r30),%r29 /* Reference param save area */ 2007 ldo -16(%r30),%r29 /* Reference param save area */
2022 #else 2008 #else
2023 bl sys_rt_sigsuspend,%r2 2009 bl sys_rt_sigsuspend,%r2
2024 ldo FRAME_SIZE(%r30), %r30 2010 ldo FRAME_SIZE(%r30), %r30
2025 #endif 2011 #endif
2026 2012
2027 ldo -FRAME_SIZE(%r30), %r30 2013 ldo -FRAME_SIZE(%r30), %r30
2028 LDREG -RP_OFFSET(%r30), %r2 2014 LDREG -RP_OFFSET(%r30), %r2
2029 2015
2030 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 2016 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
2031 ldo TASK_REGS(%r1),%r1 2017 ldo TASK_REGS(%r1),%r1
2032 reg_restore %r1 2018 reg_restore %r1
2033 2019
2034 bv %r0(%r2) 2020 bv %r0(%r2)
2035 nop 2021 nop
2036 2022
2037 .export syscall_exit 2023 .export syscall_exit
2038 syscall_exit: 2024 syscall_exit:
2039 2025
2040 /* NOTE: HP-UX syscalls also come through here 2026 /* NOTE: HP-UX syscalls also come through here
2041 * after hpux_syscall_exit fixes up return 2027 * after hpux_syscall_exit fixes up return
2042 * values. */ 2028 * values. */
2043 2029
2044 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit 2030 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
2045 * via syscall_exit_rfi if the signal was received while the process 2031 * via syscall_exit_rfi if the signal was received while the process
2046 * was running. 2032 * was running.
2047 */ 2033 */
2048 2034
2049 /* save return value now */ 2035 /* save return value now */
2050 2036
2051 mfctl %cr30, %r1 2037 mfctl %cr30, %r1
2052 LDREG TI_TASK(%r1),%r1 2038 LDREG TI_TASK(%r1),%r1
2053 STREG %r28,TASK_PT_GR28(%r1) 2039 STREG %r28,TASK_PT_GR28(%r1)
2054 2040
2055 #ifdef CONFIG_HPUX 2041 #ifdef CONFIG_HPUX
2056 2042
2057 /* <linux/personality.h> cannot be easily included */ 2043 /* <linux/personality.h> cannot be easily included */
2058 #define PER_HPUX 0x10 2044 #define PER_HPUX 0x10
2059 LDREG TASK_PERSONALITY(%r1),%r19 2045 LDREG TASK_PERSONALITY(%r1),%r19
2060 2046
2061 /* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */ 2047 /* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
2062 ldo -PER_HPUX(%r19), %r19 2048 ldo -PER_HPUX(%r19), %r19
2063 CMPIB<>,n 0,%r19,1f 2049 CMPIB<>,n 0,%r19,1f
2064 2050
2065 /* Save other hpux returns if personality is PER_HPUX */ 2051 /* Save other hpux returns if personality is PER_HPUX */
2066 STREG %r22,TASK_PT_GR22(%r1) 2052 STREG %r22,TASK_PT_GR22(%r1)
2067 STREG %r29,TASK_PT_GR29(%r1) 2053 STREG %r29,TASK_PT_GR29(%r1)
2068 1: 2054 1:
2069 2055
2070 #endif /* CONFIG_HPUX */ 2056 #endif /* CONFIG_HPUX */
2071 2057
2072 /* Seems to me that dp could be wrong here, if the syscall involved 2058 /* Seems to me that dp could be wrong here, if the syscall involved
2073 * calling a module, and nothing got round to restoring dp on return. 2059 * calling a module, and nothing got round to restoring dp on return.
2074 */ 2060 */
2075 loadgp 2061 loadgp
2076 2062
2077 syscall_check_bh: 2063 syscall_check_bh:
2078 2064
2079 /* Check for software interrupts */ 2065 /* Check for software interrupts */
2080 2066
2081 .import irq_stat,data 2067 .import irq_stat,data
2082 2068
2083 load32 irq_stat,%r19 2069 load32 irq_stat,%r19
2084 2070
2085 #ifdef CONFIG_SMP 2071 #ifdef CONFIG_SMP
2086 /* sched.h: int processor */ 2072 /* sched.h: int processor */
2087 /* %r26 is used as scratch register to index into irq_stat[] */ 2073 /* %r26 is used as scratch register to index into irq_stat[] */
2088 ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */ 2074 ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */
2089 2075
2090 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */ 2076 /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
2091 #ifdef CONFIG_64BIT 2077 #ifdef CONFIG_64BIT
2092 shld %r26, 6, %r20 2078 shld %r26, 6, %r20
2093 #else 2079 #else
2094 shlw %r26, 5, %r20 2080 shlw %r26, 5, %r20
2095 #endif 2081 #endif
2096 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */ 2082 add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
2097 #endif /* CONFIG_SMP */ 2083 #endif /* CONFIG_SMP */
2098 2084
2099 LDREG IRQSTAT_SIRQ_PEND(%r19),%r20 /* hardirq.h: unsigned long */
2100 cmpib,<>,n 0,%r20,syscall_do_softirq /* forward */
2101
2102 syscall_check_resched: 2085 syscall_check_resched:
2103 2086
2104 /* check for reschedule */ 2087 /* check for reschedule */
2105 2088
2106 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */ 2089 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
2107 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */ 2090 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
2108 2091
2109 syscall_check_sig: 2092 syscall_check_sig:
2110 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* get ti flags */ 2093 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* get ti flags */
2111 bb,<,n %r19, 31-TIF_SIGPENDING, syscall_do_signal /* forward */ 2094 bb,<,n %r19, 31-TIF_SIGPENDING, syscall_do_signal /* forward */
2112 2095
2113 syscall_restore: 2096 syscall_restore:
2114 /* Are we being ptraced? */ 2097 /* Are we being ptraced? */
2115 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 2098 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2116 2099
2117 LDREG TASK_PTRACE(%r1), %r19 2100 LDREG TASK_PTRACE(%r1), %r19
2118 bb,< %r19,31,syscall_restore_rfi 2101 bb,< %r19,31,syscall_restore_rfi
2119 nop 2102 nop
2120 2103
2121 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ 2104 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
2122 rest_fp %r19 2105 rest_fp %r19
2123 2106
2124 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */ 2107 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
2125 mtsar %r19 2108 mtsar %r19
2126 2109
2127 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */ 2110 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
2128 LDREG TASK_PT_GR19(%r1),%r19 2111 LDREG TASK_PT_GR19(%r1),%r19
2129 LDREG TASK_PT_GR20(%r1),%r20 2112 LDREG TASK_PT_GR20(%r1),%r20
2130 LDREG TASK_PT_GR21(%r1),%r21 2113 LDREG TASK_PT_GR21(%r1),%r21
2131 LDREG TASK_PT_GR22(%r1),%r22 2114 LDREG TASK_PT_GR22(%r1),%r22
2132 LDREG TASK_PT_GR23(%r1),%r23 2115 LDREG TASK_PT_GR23(%r1),%r23
2133 LDREG TASK_PT_GR24(%r1),%r24 2116 LDREG TASK_PT_GR24(%r1),%r24
2134 LDREG TASK_PT_GR25(%r1),%r25 2117 LDREG TASK_PT_GR25(%r1),%r25
2135 LDREG TASK_PT_GR26(%r1),%r26 2118 LDREG TASK_PT_GR26(%r1),%r26
2136 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */ 2119 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
2137 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */ 2120 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
2138 LDREG TASK_PT_GR29(%r1),%r29 2121 LDREG TASK_PT_GR29(%r1),%r29
2139 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */ 2122 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
2140 2123
2141 /* NOTE: We use rsm/ssm pair to make this operation atomic */ 2124 /* NOTE: We use rsm/ssm pair to make this operation atomic */
2142 rsm PSW_SM_I, %r0 2125 rsm PSW_SM_I, %r0
2143 LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */ 2126 LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */
2144 mfsp %sr3,%r1 /* Get users space id */ 2127 mfsp %sr3,%r1 /* Get users space id */
2145 mtsp %r1,%sr7 /* Restore sr7 */ 2128 mtsp %r1,%sr7 /* Restore sr7 */
2146 ssm PSW_SM_I, %r0 2129 ssm PSW_SM_I, %r0
2147 2130
2148 /* Set sr2 to zero for userspace syscalls to work. */ 2131 /* Set sr2 to zero for userspace syscalls to work. */
2149 mtsp %r0,%sr2 2132 mtsp %r0,%sr2
2150 mtsp %r1,%sr4 /* Restore sr4 */ 2133 mtsp %r1,%sr4 /* Restore sr4 */
2151 mtsp %r1,%sr5 /* Restore sr5 */ 2134 mtsp %r1,%sr5 /* Restore sr5 */
2152 mtsp %r1,%sr6 /* Restore sr6 */ 2135 mtsp %r1,%sr6 /* Restore sr6 */
2153 2136
2154 depi 3,31,2,%r31 /* ensure return to user mode. */ 2137 depi 3,31,2,%r31 /* ensure return to user mode. */
2155 2138
2156 #ifdef CONFIG_64BIT 2139 #ifdef CONFIG_64BIT
2157 /* decide whether to reset the wide mode bit 2140 /* decide whether to reset the wide mode bit
2158 * 2141 *
2159 * For a syscall, the W bit is stored in the lowest bit 2142 * For a syscall, the W bit is stored in the lowest bit
2160 * of sp. Extract it and reset W if it is zero */ 2143 * of sp. Extract it and reset W if it is zero */
2161 extrd,u,*<> %r30,63,1,%r1 2144 extrd,u,*<> %r30,63,1,%r1
2162 rsm PSW_SM_W, %r0 2145 rsm PSW_SM_W, %r0
2163 /* now reset the lowest bit of sp if it was set */ 2146 /* now reset the lowest bit of sp if it was set */
2164 xor %r30,%r1,%r30 2147 xor %r30,%r1,%r30
2165 #endif 2148 #endif
2166 be,n 0(%sr3,%r31) /* return to user space */ 2149 be,n 0(%sr3,%r31) /* return to user space */
2167 2150
2168 /* We have to return via an RFI, so that PSW T and R bits can be set 2151 /* We have to return via an RFI, so that PSW T and R bits can be set
2169 * appropriately. 2152 * appropriately.
2170 * This sets up pt_regs so we can return via intr_restore, which is not 2153 * This sets up pt_regs so we can return via intr_restore, which is not
2171 * the most efficient way of doing things, but it works. 2154 * the most efficient way of doing things, but it works.
2172 */ 2155 */
2173 syscall_restore_rfi: 2156 syscall_restore_rfi:
2174 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */ 2157 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
2175 mtctl %r2,%cr0 /* for immediate trap */ 2158 mtctl %r2,%cr0 /* for immediate trap */
2176 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */ 2159 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
2177 ldi 0x0b,%r20 /* Create new PSW */ 2160 ldi 0x0b,%r20 /* Create new PSW */
2178 depi -1,13,1,%r20 /* C, Q, D, and I bits */ 2161 depi -1,13,1,%r20 /* C, Q, D, and I bits */
2179 2162
2180 /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are 2163 /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are
2181 * set in include/linux/ptrace.h and converted to PA bitmap 2164 * set in include/linux/ptrace.h and converted to PA bitmap
2182 * numbers in asm-offsets.c */ 2165 * numbers in asm-offsets.c */
2183 2166
2184 /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */ 2167 /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */
2185 extru,= %r19,PA_SINGLESTEP_BIT,1,%r0 2168 extru,= %r19,PA_SINGLESTEP_BIT,1,%r0
2186 depi -1,27,1,%r20 /* R bit */ 2169 depi -1,27,1,%r20 /* R bit */
2187 2170
2188 /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */ 2171 /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */
2189 extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0 2172 extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0
2190 depi -1,7,1,%r20 /* T bit */ 2173 depi -1,7,1,%r20 /* T bit */
2191 2174
2192 STREG %r20,TASK_PT_PSW(%r1) 2175 STREG %r20,TASK_PT_PSW(%r1)
2193 2176
2194 /* Always store space registers, since sr3 can be changed (e.g. fork) */ 2177 /* Always store space registers, since sr3 can be changed (e.g. fork) */
2195 2178
2196 mfsp %sr3,%r25 2179 mfsp %sr3,%r25
2197 STREG %r25,TASK_PT_SR3(%r1) 2180 STREG %r25,TASK_PT_SR3(%r1)
2198 STREG %r25,TASK_PT_SR4(%r1) 2181 STREG %r25,TASK_PT_SR4(%r1)
2199 STREG %r25,TASK_PT_SR5(%r1) 2182 STREG %r25,TASK_PT_SR5(%r1)
2200 STREG %r25,TASK_PT_SR6(%r1) 2183 STREG %r25,TASK_PT_SR6(%r1)
2201 STREG %r25,TASK_PT_SR7(%r1) 2184 STREG %r25,TASK_PT_SR7(%r1)
2202 STREG %r25,TASK_PT_IASQ0(%r1) 2185 STREG %r25,TASK_PT_IASQ0(%r1)
2203 STREG %r25,TASK_PT_IASQ1(%r1) 2186 STREG %r25,TASK_PT_IASQ1(%r1)
2204 2187
2205 /* XXX W bit??? */ 2188 /* XXX W bit??? */
2206 /* Now if old D bit is clear, it means we didn't save all registers 2189 /* Now if old D bit is clear, it means we didn't save all registers
2207 * on syscall entry, so do that now. This only happens on TRACEME 2190 * on syscall entry, so do that now. This only happens on TRACEME
2208 * calls, or if someone attached to us while we were on a syscall. 2191 * calls, or if someone attached to us while we were on a syscall.
2209 * We could make this more efficient by not saving r3-r18, but 2192 * We could make this more efficient by not saving r3-r18, but
2210 * then we wouldn't be able to use the common intr_restore path. 2193 * then we wouldn't be able to use the common intr_restore path.
2211 * It is only for traced processes anyway, so performance is not 2194 * It is only for traced processes anyway, so performance is not
2212 * an issue. 2195 * an issue.
2213 */ 2196 */
2214 bb,< %r2,30,pt_regs_ok /* Branch if D set */ 2197 bb,< %r2,30,pt_regs_ok /* Branch if D set */
2215 ldo TASK_REGS(%r1),%r25 2198 ldo TASK_REGS(%r1),%r25
2216 reg_save %r25 /* Save r3 to r18 */ 2199 reg_save %r25 /* Save r3 to r18 */
2217 2200
2218 /* Save the current sr */ 2201 /* Save the current sr */
2219 mfsp %sr0,%r2 2202 mfsp %sr0,%r2
2220 STREG %r2,TASK_PT_SR0(%r1) 2203 STREG %r2,TASK_PT_SR0(%r1)
2221 2204
2222 /* Save the scratch sr */ 2205 /* Save the scratch sr */
2223 mfsp %sr1,%r2 2206 mfsp %sr1,%r2
2224 STREG %r2,TASK_PT_SR1(%r1) 2207 STREG %r2,TASK_PT_SR1(%r1)
2225 2208
2226 /* sr2 should be set to zero for userspace syscalls */ 2209 /* sr2 should be set to zero for userspace syscalls */
2227 STREG %r0,TASK_PT_SR2(%r1) 2210 STREG %r0,TASK_PT_SR2(%r1)
2228 2211
2229 pt_regs_ok: 2212 pt_regs_ok:
2230 LDREG TASK_PT_GR31(%r1),%r2 2213 LDREG TASK_PT_GR31(%r1),%r2
2231 depi 3,31,2,%r2 /* ensure return to user mode. */ 2214 depi 3,31,2,%r2 /* ensure return to user mode. */
2232 STREG %r2,TASK_PT_IAOQ0(%r1) 2215 STREG %r2,TASK_PT_IAOQ0(%r1)
2233 ldo 4(%r2),%r2 2216 ldo 4(%r2),%r2
2234 STREG %r2,TASK_PT_IAOQ1(%r1) 2217 STREG %r2,TASK_PT_IAOQ1(%r1)
2235 copy %r25,%r16 2218 copy %r25,%r16
2236 b intr_restore 2219 b intr_restore
2237 nop 2220 nop
2238
2239 .import do_softirq,code
2240 syscall_do_softirq:
2241 BL do_softirq,%r2
2242 nop
2243 /* NOTE: We enable I-bit incase we schedule later,
2244 * and we might be going back to userspace if we were
2245 * traced. */
2246 b syscall_check_resched
2247 ssm PSW_SM_I, %r0 /* do_softirq returns with I bit off */
2248 2221
2249 .import schedule,code 2222 .import schedule,code
2250 syscall_do_resched: 2223 syscall_do_resched:
2251 BL schedule,%r2 2224 BL schedule,%r2
2252 #ifdef CONFIG_64BIT 2225 #ifdef CONFIG_64BIT
2253 ldo -16(%r30),%r29 /* Reference param save area */ 2226 ldo -16(%r30),%r29 /* Reference param save area */
2254 #else 2227 #else
2255 nop 2228 nop
2256 #endif 2229 #endif
2257 b syscall_check_bh /* if resched, we start over again */ 2230 b syscall_check_bh /* if resched, we start over again */
2258 nop 2231 nop
2259 2232
2260 .import do_signal,code 2233 .import do_signal,code
2261 syscall_do_signal: 2234 syscall_do_signal:
2262 /* Save callee-save registers (for sigcontext). 2235 /* Save callee-save registers (for sigcontext).
2263 FIXME: After this point the process structure should be 2236 FIXME: After this point the process structure should be
2264 consistent with all the relevant state of the process 2237 consistent with all the relevant state of the process
2265 before the syscall. We need to verify this. */ 2238 before the syscall. We need to verify this. */
2266 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 2239 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2267 ldo TASK_REGS(%r1), %r25 /* struct pt_regs *regs */ 2240 ldo TASK_REGS(%r1), %r25 /* struct pt_regs *regs */
2268 reg_save %r25 2241 reg_save %r25
2269 2242
2270 ldi 1, %r24 /* unsigned long in_syscall */ 2243 ldi 1, %r24 /* unsigned long in_syscall */
2271 2244
2272 #ifdef CONFIG_64BIT 2245 #ifdef CONFIG_64BIT
2273 ldo -16(%r30),%r29 /* Reference param save area */ 2246 ldo -16(%r30),%r29 /* Reference param save area */
2274 #endif 2247 #endif
2275 BL do_signal,%r2 2248 BL do_signal,%r2
2276 copy %r0, %r26 /* sigset_t *oldset = NULL */ 2249 copy %r0, %r26 /* sigset_t *oldset = NULL */
2277 2250
2278 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 2251 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2279 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */ 2252 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
2280 reg_restore %r20 2253 reg_restore %r20
2281 2254
2282 b,n syscall_check_sig 2255 b,n syscall_check_sig
2283 2256
2284 /* 2257 /*
2285 * get_register is used by the non access tlb miss handlers to 2258 * get_register is used by the non access tlb miss handlers to
2286 * copy the value of the general register specified in r8 into 2259 * copy the value of the general register specified in r8 into
2287 * r1. This routine can't be used for shadowed registers, since 2260 * r1. This routine can't be used for shadowed registers, since
2288 * the rfir will restore the original value. So, for the shadowed 2261 * the rfir will restore the original value. So, for the shadowed
2289 * registers we put a -1 into r1 to indicate that the register 2262 * registers we put a -1 into r1 to indicate that the register
2290 * should not be used (the register being copied could also have 2263 * should not be used (the register being copied could also have
2291 * a -1 in it, but that is OK, it just means that we will have 2264 * a -1 in it, but that is OK, it just means that we will have
2292 * to use the slow path instead). 2265 * to use the slow path instead).
2293 */ 2266 */
2294 2267
2295 get_register: 2268 get_register:
2296 blr %r8,%r0 2269 blr %r8,%r0
2297 nop 2270 nop
2298 bv %r0(%r25) /* r0 */ 2271 bv %r0(%r25) /* r0 */
2299 copy %r0,%r1 2272 copy %r0,%r1
2300 bv %r0(%r25) /* r1 - shadowed */ 2273 bv %r0(%r25) /* r1 - shadowed */
2301 ldi -1,%r1 2274 ldi -1,%r1
2302 bv %r0(%r25) /* r2 */ 2275 bv %r0(%r25) /* r2 */
2303 copy %r2,%r1 2276 copy %r2,%r1
2304 bv %r0(%r25) /* r3 */ 2277 bv %r0(%r25) /* r3 */
2305 copy %r3,%r1 2278 copy %r3,%r1
2306 bv %r0(%r25) /* r4 */ 2279 bv %r0(%r25) /* r4 */
2307 copy %r4,%r1 2280 copy %r4,%r1
2308 bv %r0(%r25) /* r5 */ 2281 bv %r0(%r25) /* r5 */
2309 copy %r5,%r1 2282 copy %r5,%r1
2310 bv %r0(%r25) /* r6 */ 2283 bv %r0(%r25) /* r6 */
2311 copy %r6,%r1 2284 copy %r6,%r1
2312 bv %r0(%r25) /* r7 */ 2285 bv %r0(%r25) /* r7 */
2313 copy %r7,%r1 2286 copy %r7,%r1
2314 bv %r0(%r25) /* r8 - shadowed */ 2287 bv %r0(%r25) /* r8 - shadowed */
2315 ldi -1,%r1 2288 ldi -1,%r1
2316 bv %r0(%r25) /* r9 - shadowed */ 2289 bv %r0(%r25) /* r9 - shadowed */
2317 ldi -1,%r1 2290 ldi -1,%r1
2318 bv %r0(%r25) /* r10 */ 2291 bv %r0(%r25) /* r10 */
2319 copy %r10,%r1 2292 copy %r10,%r1
2320 bv %r0(%r25) /* r11 */ 2293 bv %r0(%r25) /* r11 */
2321 copy %r11,%r1 2294 copy %r11,%r1
2322 bv %r0(%r25) /* r12 */ 2295 bv %r0(%r25) /* r12 */
2323 copy %r12,%r1 2296 copy %r12,%r1
2324 bv %r0(%r25) /* r13 */ 2297 bv %r0(%r25) /* r13 */
2325 copy %r13,%r1 2298 copy %r13,%r1
2326 bv %r0(%r25) /* r14 */ 2299 bv %r0(%r25) /* r14 */
2327 copy %r14,%r1 2300 copy %r14,%r1
2328 bv %r0(%r25) /* r15 */ 2301 bv %r0(%r25) /* r15 */
2329 copy %r15,%r1 2302 copy %r15,%r1
2330 bv %r0(%r25) /* r16 - shadowed */ 2303 bv %r0(%r25) /* r16 - shadowed */
2331 ldi -1,%r1 2304 ldi -1,%r1
2332 bv %r0(%r25) /* r17 - shadowed */ 2305 bv %r0(%r25) /* r17 - shadowed */
2333 ldi -1,%r1 2306 ldi -1,%r1
2334 bv %r0(%r25) /* r18 */ 2307 bv %r0(%r25) /* r18 */
2335 copy %r18,%r1 2308 copy %r18,%r1
2336 bv %r0(%r25) /* r19 */ 2309 bv %r0(%r25) /* r19 */
2337 copy %r19,%r1 2310 copy %r19,%r1
2338 bv %r0(%r25) /* r20 */ 2311 bv %r0(%r25) /* r20 */
2339 copy %r20,%r1 2312 copy %r20,%r1
2340 bv %r0(%r25) /* r21 */ 2313 bv %r0(%r25) /* r21 */
2341 copy %r21,%r1 2314 copy %r21,%r1
2342 bv %r0(%r25) /* r22 */ 2315 bv %r0(%r25) /* r22 */
2343 copy %r22,%r1 2316 copy %r22,%r1
2344 bv %r0(%r25) /* r23 */ 2317 bv %r0(%r25) /* r23 */
2345 copy %r23,%r1 2318 copy %r23,%r1
2346 bv %r0(%r25) /* r24 - shadowed */ 2319 bv %r0(%r25) /* r24 - shadowed */
2347 ldi -1,%r1 2320 ldi -1,%r1
2348 bv %r0(%r25) /* r25 - shadowed */ 2321 bv %r0(%r25) /* r25 - shadowed */
2349 ldi -1,%r1 2322 ldi -1,%r1
2350 bv %r0(%r25) /* r26 */ 2323 bv %r0(%r25) /* r26 */
2351 copy %r26,%r1 2324 copy %r26,%r1
2352 bv %r0(%r25) /* r27 */ 2325 bv %r0(%r25) /* r27 */
2353 copy %r27,%r1 2326 copy %r27,%r1
2354 bv %r0(%r25) /* r28 */ 2327 bv %r0(%r25) /* r28 */
2355 copy %r28,%r1 2328 copy %r28,%r1
2356 bv %r0(%r25) /* r29 */ 2329 bv %r0(%r25) /* r29 */
2357 copy %r29,%r1 2330 copy %r29,%r1
2358 bv %r0(%r25) /* r30 */ 2331 bv %r0(%r25) /* r30 */
2359 copy %r30,%r1 2332 copy %r30,%r1
2360 bv %r0(%r25) /* r31 */ 2333 bv %r0(%r25) /* r31 */
2361 copy %r31,%r1 2334 copy %r31,%r1
2362 2335
2363 /* 2336 /*
2364 * set_register is used by the non access tlb miss handlers to 2337 * set_register is used by the non access tlb miss handlers to
2365 * copy the value of r1 into the general register specified in 2338 * copy the value of r1 into the general register specified in
2366 * r8. 2339 * r8.
2367 */ 2340 */
2368 2341
2369 set_register: 2342 set_register:
2370 blr %r8,%r0 2343 blr %r8,%r0
2371 nop 2344 nop
2372 bv %r0(%r25) /* r0 (silly, but it is a place holder) */ 2345 bv %r0(%r25) /* r0 (silly, but it is a place holder) */
2373 copy %r1,%r0 2346 copy %r1,%r0
2374 bv %r0(%r25) /* r1 */ 2347 bv %r0(%r25) /* r1 */
2375 copy %r1,%r1 2348 copy %r1,%r1
2376 bv %r0(%r25) /* r2 */ 2349 bv %r0(%r25) /* r2 */
2377 copy %r1,%r2 2350 copy %r1,%r2
2378 bv %r0(%r25) /* r3 */ 2351 bv %r0(%r25) /* r3 */
2379 copy %r1,%r3 2352 copy %r1,%r3
2380 bv %r0(%r25) /* r4 */ 2353 bv %r0(%r25) /* r4 */
2381 copy %r1,%r4 2354 copy %r1,%r4
2382 bv %r0(%r25) /* r5 */ 2355 bv %r0(%r25) /* r5 */
2383 copy %r1,%r5 2356 copy %r1,%r5
2384 bv %r0(%r25) /* r6 */ 2357 bv %r0(%r25) /* r6 */
2385 copy %r1,%r6 2358 copy %r1,%r6
2386 bv %r0(%r25) /* r7 */ 2359 bv %r0(%r25) /* r7 */
2387 copy %r1,%r7 2360 copy %r1,%r7
2388 bv %r0(%r25) /* r8 */ 2361 bv %r0(%r25) /* r8 */
2389 copy %r1,%r8 2362 copy %r1,%r8
2390 bv %r0(%r25) /* r9 */ 2363 bv %r0(%r25) /* r9 */
2391 copy %r1,%r9 2364 copy %r1,%r9
2392 bv %r0(%r25) /* r10 */ 2365 bv %r0(%r25) /* r10 */
2393 copy %r1,%r10 2366 copy %r1,%r10
2394 bv %r0(%r25) /* r11 */ 2367 bv %r0(%r25) /* r11 */
2395 copy %r1,%r11 2368 copy %r1,%r11
2396 bv %r0(%r25) /* r12 */ 2369 bv %r0(%r25) /* r12 */
2397 copy %r1,%r12 2370 copy %r1,%r12
2398 bv %r0(%r25) /* r13 */ 2371 bv %r0(%r25) /* r13 */
2399 copy %r1,%r13 2372 copy %r1,%r13
2400 bv %r0(%r25) /* r14 */ 2373 bv %r0(%r25) /* r14 */
2401 copy %r1,%r14 2374 copy %r1,%r14
2402 bv %r0(%r25) /* r15 */ 2375 bv %r0(%r25) /* r15 */
2403 copy %r1,%r15 2376 copy %r1,%r15
2404 bv %r0(%r25) /* r16 */ 2377 bv %r0(%r25) /* r16 */
2405 copy %r1,%r16 2378 copy %r1,%r16
2406 bv %r0(%r25) /* r17 */ 2379 bv %r0(%r25) /* r17 */
2407 copy %r1,%r17 2380 copy %r1,%r17
2408 bv %r0(%r25) /* r18 */ 2381 bv %r0(%r25) /* r18 */
2409 copy %r1,%r18 2382 copy %r1,%r18
2410 bv %r0(%r25) /* r19 */ 2383 bv %r0(%r25) /* r19 */
2411 copy %r1,%r19 2384 copy %r1,%r19
2412 bv %r0(%r25) /* r20 */ 2385 bv %r0(%r25) /* r20 */
2413 copy %r1,%r20 2386 copy %r1,%r20
2414 bv %r0(%r25) /* r21 */ 2387 bv %r0(%r25) /* r21 */
2415 copy %r1,%r21 2388 copy %r1,%r21
2416 bv %r0(%r25) /* r22 */ 2389 bv %r0(%r25) /* r22 */
2417 copy %r1,%r22 2390 copy %r1,%r22
2418 bv %r0(%r25) /* r23 */ 2391 bv %r0(%r25) /* r23 */
2419 copy %r1,%r23 2392 copy %r1,%r23
2420 bv %r0(%r25) /* r24 */ 2393 bv %r0(%r25) /* r24 */
2421 copy %r1,%r24 2394 copy %r1,%r24
2422 bv %r0(%r25) /* r25 */ 2395 bv %r0(%r25) /* r25 */
2423 copy %r1,%r25 2396 copy %r1,%r25
2424 bv %r0(%r25) /* r26 */ 2397 bv %r0(%r25) /* r26 */
2425 copy %r1,%r26 2398 copy %r1,%r26
2426 bv %r0(%r25) /* r27 */ 2399 bv %r0(%r25) /* r27 */
2427 copy %r1,%r27 2400 copy %r1,%r27
2428 bv %r0(%r25) /* r28 */ 2401 bv %r0(%r25) /* r28 */
2429 copy %r1,%r28 2402 copy %r1,%r28
2430 bv %r0(%r25) /* r29 */ 2403 bv %r0(%r25) /* r29 */
2431 copy %r1,%r29 2404 copy %r1,%r29
2432 bv %r0(%r25) /* r30 */ 2405 bv %r0(%r25) /* r30 */
2433 copy %r1,%r30 2406 copy %r1,%r30
2434 bv %r0(%r25) /* r31 */ 2407 bv %r0(%r25) /* r31 */
2435 copy %r1,%r31 2408 copy %r1,%r31
2436 2409