Blame view
arch/arm/kernel/entry-armv.S
27.2 KB
1da177e4c
|
1 2 3 4 5 |
/* * linux/arch/arm/kernel/entry-armv.S * * Copyright (C) 1996,1997,1998 Russell King. * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk) |
afeb90ca0
|
6 |
* nommu support by Hyok S. Choi (hyok.choi@samsung.com) |
1da177e4c
|
7 8 9 10 11 12 13 |
* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Low-level vector interface routines * |
70b6f2b4a
|
14 15 |
* Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction * that causes it to save wrong values... Be aware! |
1da177e4c
|
16 |
*/ |
1da177e4c
|
17 |
|
f09b99799
|
18 |
#include <asm/memory.h> |
753790e71
|
19 20 |
#include <asm/glue-df.h> #include <asm/glue-pf.h> |
1da177e4c
|
21 |
#include <asm/vfpmacros.h> |
a09e64fbc
|
22 |
#include <mach/entry-macro.S> |
d6551e884
|
23 |
#include <asm/thread_notify.h> |
c4c5716e1
|
24 |
#include <asm/unwind.h> |
cc20d4298
|
25 |
#include <asm/unistd.h> |
f159f4ed5
|
26 |
#include <asm/tls.h> |
ef4c53687
|
27 |
#include <asm/system.h> |
1da177e4c
|
28 29 |
#include "entry-header.S" |
cd544ce75
|
30 |
#include <asm/entry-macro-multi.S> |
1da177e4c
|
31 32 |
/* |
d9600c99c
|
33 |
* Interrupt handling. |
187a51ad1
|
34 35 |
*/ .macro irq_handler |
521086412
|
36 |
#ifdef CONFIG_MULTI_IRQ_HANDLER |
d9600c99c
|
37 |
ldr r1, =handle_arch_irq |
521086412
|
38 |
mov r0, sp |
d9600c99c
|
39 |
ldr r1, [r1] |
521086412
|
40 |
adr lr, BSYM(9997f) |
d9600c99c
|
41 42 |
teq r1, #0 movne pc, r1 |
37ee16ae9
|
43 |
#endif |
cd544ce75
|
44 |
arch_irq_handler_default |
f00ec48fa
|
45 |
9997: |
187a51ad1
|
46 |
.endm |
ac8b9c1ce
|
47 |
.macro pabt_helper |
8dfe7ac96
|
48 |
@ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5 |
ac8b9c1ce
|
49 |
#ifdef MULTI_PABORT |
0402becef
|
50 |
ldr ip, .LCprocfns |
ac8b9c1ce
|
51 |
mov lr, pc |
0402becef
|
52 |
ldr pc, [ip, #PROCESSOR_PABT_FUNC] |
ac8b9c1ce
|
53 54 55 56 57 58 59 60 61 62 |
#else bl CPU_PABORT_HANDLER #endif .endm .macro dabt_helper @ @ Call the processor-specific abort handler: @ |
da7404725
|
63 |
@ r2 - pt_regs |
3e287bec6
|
64 65 |
@ r4 - aborted context pc @ r5 - aborted context psr |
ac8b9c1ce
|
66 67 68 69 70 |
@ @ The abort handler must return the aborted address in r0, and @ the fault status register in r1. r9 must be preserved. @ #ifdef MULTI_DABORT |
0402becef
|
71 |
ldr ip, .LCprocfns |
ac8b9c1ce
|
72 |
mov lr, pc |
0402becef
|
73 |
ldr pc, [ip, #PROCESSOR_DABT_FUNC] |
ac8b9c1ce
|
74 75 76 77 |
#else bl CPU_DABORT_HANDLER #endif .endm |
785d3cd28
|
78 79 80 81 82 |
#ifdef CONFIG_KPROBES .section .kprobes.text,"ax",%progbits #else .text #endif |
187a51ad1
|
83 |
/* |
1da177e4c
|
84 85 |
* Invalid mode handlers */ |
ccea7a19e
|
86 87 |
.macro inv_entry, reason sub sp, sp, #S_FRAME_SIZE |
b86040a59
|
88 89 90 91 |
ARM( stmib sp, {r1 - lr} ) THUMB( stmia sp, {r0 - r12} ) THUMB( str sp, [sp, #S_SP] ) THUMB( str lr, [sp, #S_LR] ) |
1da177e4c
|
92 93 94 95 |
mov r1, #\reason .endm __pabt_invalid: |
ccea7a19e
|
96 97 |
inv_entry BAD_PREFETCH b common_invalid |
93ed39701
|
98 |
ENDPROC(__pabt_invalid) |
1da177e4c
|
99 100 |
__dabt_invalid: |
ccea7a19e
|
101 102 |
inv_entry BAD_DATA b common_invalid |
93ed39701
|
103 |
ENDPROC(__dabt_invalid) |
1da177e4c
|
104 105 |
__irq_invalid: |
ccea7a19e
|
106 107 |
inv_entry BAD_IRQ b common_invalid |
93ed39701
|
108 |
ENDPROC(__irq_invalid) |
1da177e4c
|
109 110 |
__und_invalid: |
ccea7a19e
|
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
inv_entry BAD_UNDEFINSTR @ @ XXX fall through to common_invalid @ @ @ common_invalid - generic code for failed exception (re-entrant version of handlers) @ common_invalid: zero_fp ldmia r0, {r4 - r6} add r0, sp, #S_PC @ here for interlock avoidance mov r7, #-1 @ "" "" "" "" str r4, [sp] @ save preserved r0 stmia r0, {r5 - r7} @ lr_<exception>, @ cpsr_<exception>, "old_r0" |
1da177e4c
|
129 |
|
1da177e4c
|
130 |
mov r0, sp |
1da177e4c
|
131 |
b bad_mode |
93ed39701
|
132 |
ENDPROC(__und_invalid) |
1da177e4c
|
133 134 135 136 |
/* * SVC mode handlers */ |
2dede2d8e
|
137 138 139 140 141 142 |
#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) #define SPFIX(code...) code #else #define SPFIX(code...) #endif |
d30a0c8bf
|
143 |
.macro svc_entry, stack_hole=0 |
c4c5716e1
|
144 145 |
UNWIND(.fnstart ) UNWIND(.save {r0 - pc} ) |
b86040a59
|
146 147 148 149 150 151 152 |
sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) #ifdef CONFIG_THUMB2_KERNEL SPFIX( str r0, [sp] ) @ temporarily saved SPFIX( mov r0, sp ) SPFIX( tst r0, #4 ) @ test original stack alignment SPFIX( ldr r0, [sp] ) @ restored #else |
2dede2d8e
|
153 |
SPFIX( tst sp, #4 ) |
b86040a59
|
154 155 156 |
#endif SPFIX( subeq sp, sp, #4 ) stmia sp, {r1 - r12} |
ccea7a19e
|
157 |
|
b059bdc39
|
158 159 160 161 162 163 |
ldmia r0, {r3 - r5} add r7, sp, #S_SP - 4 @ here for interlock avoidance mov r6, #-1 @ "" "" "" "" add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) SPFIX( addeq r2, r2, #4 ) str r3, [sp, #-4]! @ save the "real" r0 copied |
ccea7a19e
|
164 |
@ from the exception stack |
b059bdc39
|
165 |
mov r3, lr |
1da177e4c
|
166 167 168 169 |
@ @ We are now ready to fill in the remaining blanks on the stack: @ |
b059bdc39
|
170 171 172 173 174 |
@ r2 - sp_svc @ r3 - lr_svc @ r4 - lr_<exception>, already fixed up for correct return/restart @ r5 - spsr_<exception> @ r6 - orig_r0 (see pt_regs definition in ptrace.h) |
1da177e4c
|
175 |
@ |
b059bdc39
|
176 |
stmia r7, {r2 - r6} |
1da177e4c
|
177 |
|
02fe2845d
|
178 179 180 |
#ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif |
f2741b78b
|
181 |
.endm |
1da177e4c
|
182 |
|
f2741b78b
|
183 184 185 |
.align 5 __dabt_svc: svc_entry |
1da177e4c
|
186 |
mov r2, sp |
da7404725
|
187 |
dabt_helper |
1da177e4c
|
188 189 190 191 |
@ @ IRQs off again before pulling preserved data off the stack @ |
ac78884e6
|
192 |
disable_irq_notrace |
1da177e4c
|
193 |
|
02fe2845d
|
194 195 196 197 198 199 |
#ifdef CONFIG_TRACE_IRQFLAGS tst r5, #PSR_I_BIT bleq trace_hardirqs_on tst r5, #PSR_I_BIT blne trace_hardirqs_off #endif |
b059bdc39
|
200 |
svc_exit r5 @ return from exception |
c4c5716e1
|
201 |
UNWIND(.fnend ) |
93ed39701
|
202 |
ENDPROC(__dabt_svc) |
1da177e4c
|
203 204 205 |
.align 5 __irq_svc: |
ccea7a19e
|
206 |
svc_entry |
187a51ad1
|
207 |
irq_handler |
1613cc111
|
208 |
|
1da177e4c
|
209 |
#ifdef CONFIG_PREEMPT |
1613cc111
|
210 211 |
get_thread_info tsk ldr r8, [tsk, #TI_PREEMPT] @ get preempt count |
706fdd9fa
|
212 |
ldr r0, [tsk, #TI_FLAGS] @ get flags |
28fab1a2f
|
213 214 |
teq r8, #0 @ if preempt count != 0 movne r0, #0 @ force flags to 0 |
1da177e4c
|
215 216 |
tst r0, #_TIF_NEED_RESCHED blne svc_preempt |
1da177e4c
|
217 |
#endif |
30891c90d
|
218 |
|
7ad1bcb25
|
219 |
#ifdef CONFIG_TRACE_IRQFLAGS |
fbab1c809
|
220 221 222 |
@ The parent context IRQs must have been enabled to get here in @ the first place, so there's no point checking the PSR I bit. bl trace_hardirqs_on |
7ad1bcb25
|
223 |
#endif |
b059bdc39
|
224 |
svc_exit r5 @ return from exception |
c4c5716e1
|
225 |
UNWIND(.fnend ) |
93ed39701
|
226 |
ENDPROC(__irq_svc) |
1da177e4c
|
227 228 229 230 231 |
.ltorg #ifdef CONFIG_PREEMPT svc_preempt: |
28fab1a2f
|
232 |
mov r8, lr |
1da177e4c
|
233 |
1: bl preempt_schedule_irq @ irq en/disable is done inside |
706fdd9fa
|
234 |
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS |
1da177e4c
|
235 |
tst r0, #_TIF_NEED_RESCHED |
28fab1a2f
|
236 |
moveq pc, r8 @ go again |
1da177e4c
|
237 238 239 240 241 |
b 1b #endif .align 5 __und_svc: |
d30a0c8bf
|
242 243 244 245 246 247 |
#ifdef CONFIG_KPROBES @ If a kprobe is about to simulate a "stmdb sp..." instruction, @ it obviously needs free stack space which then will belong to @ the saved context. svc_entry 64 #else |
ccea7a19e
|
248 |
svc_entry |
d30a0c8bf
|
249 |
#endif |
1da177e4c
|
250 251 252 253 254 255 256 |
@ @ call emulation code, which returns using r9 if it has emulated @ the instruction, or the more conventional lr if we are to treat @ this as a real undefined instruction @ @ r0 - instruction @ |
83e686ea0
|
257 |
#ifndef CONFIG_THUMB2_KERNEL |
b059bdc39
|
258 |
ldr r0, [r4, #-4] |
83e686ea0
|
259 |
#else |
b059bdc39
|
260 |
ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 |
85519189d
|
261 |
cmp r0, #0xe800 @ 32-bit instruction if xx >= 0 |
b059bdc39
|
262 |
ldrhhs r9, [r4] @ bottom 16 bits |
83e686ea0
|
263 264 |
orrhs r0, r9, r0, lsl #16 #endif |
b86040a59
|
265 |
adr r9, BSYM(1f) |
b059bdc39
|
266 |
mov r2, r4 |
1da177e4c
|
267 268 269 270 271 272 273 274 |
bl call_fpe mov r0, sp @ struct pt_regs *regs bl do_undefinstr @ @ IRQs off again before pulling preserved data off the stack @ |
ac78884e6
|
275 |
1: disable_irq_notrace |
1da177e4c
|
276 277 278 279 |
@ @ restore SPSR and restart the instruction @ |
b059bdc39
|
280 |
ldr r5, [sp, #S_PSR] @ Get SVC cpsr |
df295df6c
|
281 282 283 284 285 286 |
#ifdef CONFIG_TRACE_IRQFLAGS tst r5, #PSR_I_BIT bleq trace_hardirqs_on tst r5, #PSR_I_BIT blne trace_hardirqs_off #endif |
b059bdc39
|
287 |
svc_exit r5 @ return from exception |
c4c5716e1
|
288 |
UNWIND(.fnend ) |
93ed39701
|
289 |
ENDPROC(__und_svc) |
1da177e4c
|
290 291 292 |
.align 5 __pabt_svc: |
ccea7a19e
|
293 |
svc_entry |
4fb284743
|
294 |
mov r2, sp @ regs |
8dfe7ac96
|
295 |
pabt_helper |
1da177e4c
|
296 297 298 299 |
@ @ IRQs off again before pulling preserved data off the stack @ |
ac78884e6
|
300 |
disable_irq_notrace |
1da177e4c
|
301 |
|
02fe2845d
|
302 303 304 305 306 307 |
#ifdef CONFIG_TRACE_IRQFLAGS tst r5, #PSR_I_BIT bleq trace_hardirqs_on tst r5, #PSR_I_BIT blne trace_hardirqs_off #endif |
b059bdc39
|
308 |
svc_exit r5 @ return from exception |
c4c5716e1
|
309 |
UNWIND(.fnend ) |
93ed39701
|
310 |
ENDPROC(__pabt_svc) |
1da177e4c
|
311 312 |
.align 5 |
49f680ea7
|
313 314 |
.LCcralign: .word cr_alignment |
48d7927bd
|
315 |
#ifdef MULTI_DABORT |
1da177e4c
|
316 317 318 319 320 |
.LCprocfns: .word processor #endif .LCfp: .word fp_enter |
1da177e4c
|
321 322 323 |
/* * User mode handlers |
2dede2d8e
|
324 325 |
* * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE |
1da177e4c
|
326 |
*/ |
2dede2d8e
|
327 328 329 330 |
#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7) #error "sizeof(struct pt_regs) must be a multiple of 8" #endif |
ccea7a19e
|
331 |
.macro usr_entry |
c4c5716e1
|
332 333 |
UNWIND(.fnstart ) UNWIND(.cantunwind ) @ don't unwind the user space |
ccea7a19e
|
334 |
sub sp, sp, #S_FRAME_SIZE |
b86040a59
|
335 336 |
ARM( stmib sp, {r1 - r12} ) THUMB( stmia sp, {r0 - r12} ) |
ccea7a19e
|
337 |
|
b059bdc39
|
338 |
ldmia r0, {r3 - r5} |
ccea7a19e
|
339 |
add r0, sp, #S_PC @ here for interlock avoidance |
b059bdc39
|
340 |
mov r6, #-1 @ "" "" "" "" |
ccea7a19e
|
341 |
|
b059bdc39
|
342 |
str r3, [sp] @ save the "real" r0 copied |
ccea7a19e
|
343 |
@ from the exception stack |
1da177e4c
|
344 345 346 347 |
@ @ We are now ready to fill in the remaining blanks on the stack: @ |
b059bdc39
|
348 349 350 |
@ r4 - lr_<exception>, already fixed up for correct return/restart @ r5 - spsr_<exception> @ r6 - orig_r0 (see pt_regs definition in ptrace.h) |
1da177e4c
|
351 352 353 |
@ @ Also, separately save sp_usr and lr_usr @ |
b059bdc39
|
354 |
stmia r0, {r4 - r6} |
b86040a59
|
355 356 |
ARM( stmdb r0, {sp, lr}^ ) THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) |
1da177e4c
|
357 358 359 360 |
@ @ Enable the alignment trap while in kernel mode @ |
49f680ea7
|
361 |
alignment_trap r0 |
1da177e4c
|
362 363 364 365 366 |
@ @ Clear FP to mark the first stack frame @ zero_fp |
f2741b78b
|
367 368 369 370 |
#ifdef CONFIG_IRQSOFF_TRACER bl trace_hardirqs_off #endif |
1da177e4c
|
371 |
.endm |
b49c0f24c
|
372 |
.macro kuser_cmpxchg_check |
40fb79c8a
|
373 |
#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) |
b49c0f24c
|
374 375 376 377 378 379 380 |
#ifndef CONFIG_MMU #warning "NPTL on non MMU needs fixing" #else @ Make sure our user space atomic helper is restarted @ if it was interrupted in a critical region. Here we @ perform a quick test inline since it should be false @ 99.9999% of the time. The rest is done out of line. |
b059bdc39
|
381 |
cmp r4, #TASK_SIZE |
40fb79c8a
|
382 |
blhs kuser_cmpxchg64_fixup |
b49c0f24c
|
383 384 385 |
#endif #endif .endm |
1da177e4c
|
386 387 |
.align 5 __dabt_usr: |
ccea7a19e
|
388 |
usr_entry |
b49c0f24c
|
389 |
kuser_cmpxchg_check |
1da177e4c
|
390 |
mov r2, sp |
da7404725
|
391 392 |
dabt_helper b ret_from_exception |
c4c5716e1
|
393 |
UNWIND(.fnend ) |
93ed39701
|
394 |
ENDPROC(__dabt_usr) |
1da177e4c
|
395 396 397 |
.align 5 __irq_usr: |
ccea7a19e
|
398 |
usr_entry |
bc089602d
|
399 |
kuser_cmpxchg_check |
187a51ad1
|
400 |
irq_handler |
1613cc111
|
401 |
get_thread_info tsk |
1da177e4c
|
402 |
mov why, #0 |
9fc2552a6
|
403 |
b ret_to_user_from_irq |
c4c5716e1
|
404 |
UNWIND(.fnend ) |
93ed39701
|
405 |
ENDPROC(__irq_usr) |
1da177e4c
|
406 407 408 409 410 |
.ltorg .align 5 __und_usr: |
ccea7a19e
|
411 |
usr_entry |
bc089602d
|
412 |
|
b059bdc39
|
413 414 |
mov r2, r4 mov r3, r5 |
1da177e4c
|
415 |
|
1da177e4c
|
416 417 418 419 420 421 422 |
@ @ fall through to the emulation code, which returns using r9 if @ it has emulated the instruction, or the more conventional lr @ if we are to treat this as a real undefined instruction @ @ r0 - instruction @ |
b86040a59
|
423 424 |
adr r9, BSYM(ret_from_exception) adr lr, BSYM(__und_usr_unknown) |
cb170a45d
|
425 |
tst r3, #PSR_T_BIT @ Thumb mode? |
b86040a59
|
426 |
itet eq @ explicit IT needed for the 1f label |
cb170a45d
|
427 428 429 |
subeq r4, r2, #4 @ ARM instr at LR - 4 subne r4, r2, #2 @ Thumb instr at LR - 2 1: ldreqt r0, [r4] |
26584853a
|
430 431 432 |
#ifdef CONFIG_CPU_ENDIAN_BE8 reveq r0, r0 @ little endian instruction #endif |
cb170a45d
|
433 434 |
beq call_fpe @ Thumb instruction |
ef4c53687
|
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 |
#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 /* * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms * can never be supported in a single kernel, this code is not applicable at * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be * made about .arch directives. */ #if __LINUX_ARM_ARCH__ < 7 /* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */ #define NEED_CPU_ARCHITECTURE ldr r5, .LCcpu_architecture ldr r5, [r5] cmp r5, #CPU_ARCH_ARMv7 blo __und_usr_unknown /* * The following code won't get run unless the running CPU really is v7, so * coding round the lack of ldrht on older arches is pointless. Temporarily * override the assembler target arch with the minimum required instead: */ .arch armv6t2 #endif |
b86040a59
|
456 457 458 459 |
2: ARM( ldrht r5, [r4], #2 ) THUMB( ldrht r5, [r4] ) THUMB( add r4, r4, #2 ) |
85519189d
|
460 |
cmp r5, #0xe800 @ 32bit instruction if xx != 0 |
cb170a45d
|
461 462 463 464 |
blo __und_usr_unknown 3: ldrht r0, [r4] add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 orr r0, r0, r5, lsl #16 |
ef4c53687
|
465 466 467 468 469 |
#if __LINUX_ARM_ARCH__ < 7 /* If the target arch was overridden, change it back: */ #ifdef CONFIG_CPU_32v6K .arch armv6k |
cb170a45d
|
470 |
#else |
ef4c53687
|
471 472 473 474 |
.arch armv6 #endif #endif /* __LINUX_ARM_ARCH__ < 7 */ #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */ |
cb170a45d
|
475 476 |
b __und_usr_unknown #endif |
c4c5716e1
|
477 |
UNWIND(.fnend ) |
93ed39701
|
478 |
ENDPROC(__und_usr) |
cb170a45d
|
479 |
|
1da177e4c
|
480 481 482 483 484 485 486 |
@ @ fallthrough to call_fpe @ /* * The out of line fixup for the ldrt above. */ |
4260415f6
|
487 |
.pushsection .fixup, "ax" |
cb170a45d
|
488 |
4: mov pc, r9 |
4260415f6
|
489 490 |
.popsection .pushsection __ex_table,"a" |
cb170a45d
|
491 |
.long 1b, 4b |
c89cefed3
|
492 |
#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7 |
cb170a45d
|
493 494 495 |
.long 2b, 4b .long 3b, 4b #endif |
4260415f6
|
496 |
.popsection |
1da177e4c
|
497 498 499 500 501 502 503 504 505 506 507 |
/* * Check whether the instruction is a co-processor instruction. * If yes, we need to call the relevant co-processor handler. * * Note that we don't do a full check here for the co-processor * instructions; all instructions with bit 27 set are well * defined. The only instructions that should fault are the * co-processor instructions. However, we have to watch out * for the ARM6/ARM7 SWI bug. * |
b5872db4a
|
508 509 510 511 512 513 514 |
* NEON is a special case that has to be handled here. Not all * NEON instructions are co-processor instructions, so we have * to make a special case of checking for them. Plus, there's * five groups of them, so we have a table of mask/opcode pairs * to check against, and if any match then we branch off into the * NEON handler code. * |
1da177e4c
|
515 516 517 |
* Emulators may wish to make use of the following registers: * r0 = instruction opcode. * r2 = PC+4 |
db6ccbb61
|
518 |
* r9 = normal "successful" return address |
1da177e4c
|
519 |
* r10 = this threads thread_info structure. |
db6ccbb61
|
520 |
* lr = unrecognised instruction return address |
1da177e4c
|
521 |
*/ |
cb170a45d
|
522 523 524 525 526 527 528 |
@ @ Fall-through from Thumb-2 __und_usr @ #ifdef CONFIG_NEON adr r6, .LCneon_thumb_opcodes b 2f #endif |
1da177e4c
|
529 |
call_fpe: |
b5872db4a
|
530 |
#ifdef CONFIG_NEON |
cb170a45d
|
531 |
adr r6, .LCneon_arm_opcodes |
b5872db4a
|
532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 |
2: ldr r7, [r6], #4 @ mask value cmp r7, #0 @ end mask? beq 1f and r8, r0, r7 ldr r7, [r6], #4 @ opcode bits matching in mask cmp r8, r7 @ NEON instruction? bne 2b get_thread_info r10 mov r7, #1 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used b do_vfp @ let VFP handler handle this 1: #endif |
1da177e4c
|
547 |
tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 |
cb170a45d
|
548 |
tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 |
1da177e4c
|
549 550 551 552 553 554 555 |
#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) and r8, r0, #0x0f000000 @ mask out op-code bits teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)? #endif moveq pc, lr get_thread_info r10 @ get current thread and r8, r0, #0x00000f00 @ mask out CP number |
b86040a59
|
556 |
THUMB( lsr r8, r8, #8 ) |
1da177e4c
|
557 558 |
mov r7, #1 add r6, r10, #TI_USED_CP |
b86040a59
|
559 560 |
ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[] THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[] |
1da177e4c
|
561 562 563 564 565 566 567 |
#ifdef CONFIG_IWMMXT @ Test if we need to give access to iWMMXt coprocessors ldr r5, [r10, #TI_FLAGS] rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) bcs iwmmxt_task_enable #endif |
b86040a59
|
568 569 570 571 |
ARM( add pc, pc, r8, lsr #6 ) THUMB( lsl r8, r8, #2 ) THUMB( add pc, r8 ) nop |
a771fe6e4
|
572 |
movw_pc lr @ CP#0 |
b86040a59
|
573 574 |
W(b) do_fpe @ CP#1 (FPE) W(b) do_fpe @ CP#2 (FPE) |
a771fe6e4
|
575 |
movw_pc lr @ CP#3 |
c17fad11f
|
576 577 578 579 580 |
#ifdef CONFIG_CRUNCH b crunch_task_enable @ CP#4 (MaverickCrunch) b crunch_task_enable @ CP#5 (MaverickCrunch) b crunch_task_enable @ CP#6 (MaverickCrunch) #else |
a771fe6e4
|
581 582 583 |
movw_pc lr @ CP#4 movw_pc lr @ CP#5 movw_pc lr @ CP#6 |
c17fad11f
|
584 |
#endif |
a771fe6e4
|
585 586 587 |
movw_pc lr @ CP#7 movw_pc lr @ CP#8 movw_pc lr @ CP#9 |
1da177e4c
|
588 |
#ifdef CONFIG_VFP |
b86040a59
|
589 590 |
W(b) do_vfp @ CP#10 (VFP) W(b) do_vfp @ CP#11 (VFP) |
1da177e4c
|
591 |
#else |
a771fe6e4
|
592 593 |
movw_pc lr @ CP#10 (VFP) movw_pc lr @ CP#11 (VFP) |
1da177e4c
|
594 |
#endif |
a771fe6e4
|
595 596 597 598 |
movw_pc lr @ CP#12 movw_pc lr @ CP#13 movw_pc lr @ CP#14 (Debug) movw_pc lr @ CP#15 (Control) |
1da177e4c
|
599 |
|
ef4c53687
|
600 601 602 603 604 |
#ifdef NEED_CPU_ARCHITECTURE .align 2 .LCcpu_architecture: .word __cpu_architecture #endif |
b5872db4a
|
605 606 |
#ifdef CONFIG_NEON .align 6 |
cb170a45d
|
607 |
.LCneon_arm_opcodes: |
b5872db4a
|
608 609 610 611 612 613 614 615 |
.word 0xfe000000 @ mask .word 0xf2000000 @ opcode .word 0xff100000 @ mask .word 0xf4000000 @ opcode .word 0x00000000 @ mask .word 0x00000000 @ opcode |
cb170a45d
|
616 617 618 619 620 621 622 623 624 625 |
.LCneon_thumb_opcodes: .word 0xef000000 @ mask .word 0xef000000 @ opcode .word 0xff100000 @ mask .word 0xf9000000 @ opcode .word 0x00000000 @ mask .word 0x00000000 @ opcode |
b5872db4a
|
626 |
#endif |
1da177e4c
|
627 |
do_fpe: |
5d25ac038
|
628 |
enable_irq |
1da177e4c
|
629 630 631 632 633 634 635 636 637 638 639 640 |
ldr r4, .LCfp add r10, r10, #TI_FPSTATE @ r10 = workspace ldr pc, [r4] @ Call FP module USR entry point /* * The FP module is called with these registers set: * r0 = instruction * r2 = PC+4 * r9 = normal "successful" return address * r10 = FP workspace * lr = unrecognised FP instruction return address */ |
124efc27a
|
641 |
.pushsection .data |
1da177e4c
|
642 |
ENTRY(fp_enter) |
db6ccbb61
|
643 |
.word no_fp |
124efc27a
|
644 |
.popsection |
1da177e4c
|
645 |
|
83e686ea0
|
646 647 648 |
ENTRY(no_fp) mov pc, lr ENDPROC(no_fp) |
db6ccbb61
|
649 650 |
__und_usr_unknown: |
ecbab71c5
|
651 |
enable_irq |
1da177e4c
|
652 |
mov r0, sp |
b86040a59
|
653 |
adr lr, BSYM(ret_from_exception) |
1da177e4c
|
654 |
b do_undefinstr |
93ed39701
|
655 |
ENDPROC(__und_usr_unknown) |
1da177e4c
|
656 657 658 |
.align 5 __pabt_usr: |
ccea7a19e
|
659 |
usr_entry |
4fb284743
|
660 |
mov r2, sp @ regs |
8dfe7ac96
|
661 |
pabt_helper |
c4c5716e1
|
662 |
UNWIND(.fnend ) |
1da177e4c
|
663 664 665 666 667 |
/* fall through */ /* * This is the return code to user mode for abort handlers */ ENTRY(ret_from_exception) |
c4c5716e1
|
668 669 |
UNWIND(.fnstart ) UNWIND(.cantunwind ) |
1da177e4c
|
670 671 672 |
get_thread_info tsk mov why, #0 b ret_to_user |
c4c5716e1
|
673 |
UNWIND(.fnend ) |
93ed39701
|
674 675 |
ENDPROC(__pabt_usr) ENDPROC(ret_from_exception) |
1da177e4c
|
676 677 678 679 680 681 682 |
/* * Register switch for ARMv3 and ARMv4 processors * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info * previous and next are guaranteed not to be the same. */ ENTRY(__switch_to) |
c4c5716e1
|
683 684 |
UNWIND(.fnstart ) UNWIND(.cantunwind ) |
1da177e4c
|
685 686 |
add ip, r1, #TI_CPU_SAVE ldr r3, [r2, #TI_TP_VALUE] |
b86040a59
|
687 688 689 690 |
ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack THUMB( str sp, [ip], #4 ) THUMB( str lr, [ip], #4 ) |
247055aa2
|
691 |
#ifdef CONFIG_CPU_USE_DOMAINS |
d6551e884
|
692 |
ldr r6, [r2, #TI_CPU_DOMAIN] |
afeb90ca0
|
693 |
#endif |
f159f4ed5
|
694 |
set_tls r3, r4, r5 |
df0698be1
|
695 696 697 698 699 |
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) ldr r7, [r2, #TI_TASK] ldr r8, =__stack_chk_guard ldr r7, [r7, #TSK_STACK_CANARY] #endif |
247055aa2
|
700 |
#ifdef CONFIG_CPU_USE_DOMAINS |
1da177e4c
|
701 |
mcr p15, 0, r6, c3, c0, 0 @ Set domain register |
afeb90ca0
|
702 |
#endif |
d6551e884
|
703 704 705 706 707 |
mov r5, r0 add r4, r2, #TI_CPU_SAVE ldr r0, =thread_notify_head mov r1, #THREAD_NOTIFY_SWITCH bl atomic_notifier_call_chain |
df0698be1
|
708 709 710 |
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) str r7, [r8] #endif |
b86040a59
|
711 |
THUMB( mov ip, r4 ) |
d6551e884
|
712 |
mov r0, r5 |
b86040a59
|
713 714 715 716 |
ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously THUMB( ldr sp, [ip], #4 ) THUMB( ldr pc, [ip] ) |
c4c5716e1
|
717 |
UNWIND(.fnend ) |
93ed39701
|
718 |
ENDPROC(__switch_to) |
1da177e4c
|
719 720 |
__INIT |
2d2669b62
|
721 722 723 724 |
/* * User helpers. * |
2d2669b62
|
725 726 727 728 729 |
* Each segment is 32-byte aligned and will be moved to the top of the high * vector page. New segments (if ever needed) must be added in front of * existing ones. This mechanism should be used only for things that are * really small and justified, and not be abused freely. * |
37b830464
|
730 |
* See Documentation/arm/kernel_user_helpers.txt for formal definitions. |
2d2669b62
|
731 |
*/ |
b86040a59
|
732 |
THUMB( .arm ) |
2d2669b62
|
733 |
|
ba9b5d763
|
734 735 736 737 738 739 740 |
.macro usr_ret, reg #ifdef CONFIG_ARM_THUMB bx \reg #else mov pc, \reg #endif .endm |
2d2669b62
|
741 742 743 744 745 |
.align 5 .globl __kuser_helper_start __kuser_helper_start: /* |
40fb79c8a
|
746 747 |
* Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point. |
7c612bfd4
|
748 |
*/ |
40fb79c8a
|
749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 |
__kuser_cmpxchg64: @ 0xffff0f60 #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) /* * Poor you. No fast solution possible... * The kernel itself must perform the operation. * A special ghost syscall is used for that (see traps.c). */ stmfd sp!, {r7, lr} ldr r7, 1f @ it's 20 bits swi __ARM_NR_cmpxchg64 ldmfd sp!, {r7, pc} 1: .word __ARM_NR_cmpxchg64 #elif defined(CONFIG_CPU_32v6K) stmfd sp!, {r4, r5, r6, r7} ldrd r4, r5, [r0] @ load old val ldrd r6, r7, [r1] @ load new val smp_dmb arm 1: ldrexd r0, r1, [r2] @ load current val eors r3, r0, r4 @ compare with oldval (1) eoreqs r3, r1, r5 @ compare with oldval (2) strexdeq r3, r6, r7, [r2] @ store newval if eq teqeq r3, #1 @ success? beq 1b @ if no then retry |
ed3768a8d
|
776 |
smp_dmb arm |
40fb79c8a
|
777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 |
rsbs r0, r3, #0 @ set returned val and C flag ldmfd sp!, {r4, r5, r6, r7} bx lr #elif !defined(CONFIG_SMP) #ifdef CONFIG_MMU /* * The only thing that can break atomicity in this cmpxchg64 * implementation is either an IRQ or a data abort exception * causing another process/thread to be scheduled in the middle of * the critical sequence. The same strategy as for cmpxchg is used. */ stmfd sp!, {r4, r5, r6, lr} ldmia r0, {r4, r5} @ load old val ldmia r1, {r6, lr} @ load new val 1: ldmia r2, {r0, r1} @ load current val eors r3, r0, r4 @ compare with oldval (1) eoreqs r3, r1, r5 @ compare with oldval (2) 2: stmeqia r2, {r6, lr} @ store newval if eq rsbs r0, r3, #0 @ set return val and C flag ldmfd sp!, {r4, r5, r6, pc} .text kuser_cmpxchg64_fixup: @ Called from kuser_cmpxchg_fixup. |
3ad55155b
|
804 |
@ r4 = address of interrupted insn (must be preserved). |
40fb79c8a
|
805 806 |
@ sp = saved regs. r7 and r8 are clobbered. @ 1b = first critical insn, 2b = last critical insn. |
3ad55155b
|
807 |
@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. |
40fb79c8a
|
808 809 |
mov r7, #0xffff0fff sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64))) |
3ad55155b
|
810 |
subs r8, r4, r7 |
40fb79c8a
|
811 812 813 814 815 816 817 818 819 820 821 822 |
rsbcss r8, r8, #(2b - 1b) strcs r7, [sp, #S_PC] #if __LINUX_ARM_ARCH__ < 6 bcc kuser_cmpxchg32_fixup #endif mov pc, lr .previous #else #warning "NPTL on non MMU needs fixing" mov r0, #-1 adds r0, r0, #0 |
ba9b5d763
|
823 |
usr_ret lr |
40fb79c8a
|
824 825 826 827 828 829 830 831 832 833 |
#endif #else #error "incoherent kernel configuration" #endif /* pad to next slot */ .rept (16 - (. - __kuser_cmpxchg64)/4) .word 0 .endr |
7c612bfd4
|
834 835 |
.align 5 |
7c612bfd4
|
836 |
__kuser_memory_barrier: @ 0xffff0fa0 |
ed3768a8d
|
837 |
smp_dmb arm |
ba9b5d763
|
838 |
usr_ret lr |
7c612bfd4
|
839 840 |
.align 5 |
2d2669b62
|
841 842 |
__kuser_cmpxchg: @ 0xffff0fc0 |
dcef1f634
|
843 |
#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) |
2d2669b62
|
844 |
|
dcef1f634
|
845 846 847 848 849 |
/* * Poor you. No fast solution possible... * The kernel itself must perform the operation. * A special ghost syscall is used for that (see traps.c). */ |
5e0974459
|
850 |
stmfd sp!, {r7, lr} |
55afd264c
|
851 |
ldr r7, 1f @ it's 20 bits |
cc20d4298
|
852 |
swi __ARM_NR_cmpxchg |
5e0974459
|
853 |
ldmfd sp!, {r7, pc} |
cc20d4298
|
854 |
1: .word __ARM_NR_cmpxchg |
dcef1f634
|
855 856 |
#elif __LINUX_ARM_ARCH__ < 6 |
2d2669b62
|
857 |
|
b49c0f24c
|
858 |
#ifdef CONFIG_MMU |
2d2669b62
|
859 |
/* |
b49c0f24c
|
860 861 862 863 864 865 866 |
* The only thing that can break atomicity in this cmpxchg * implementation is either an IRQ or a data abort exception * causing another process/thread to be scheduled in the middle * of the critical sequence. To prevent this, code is added to * the IRQ and data abort exception handlers to set the pc back * to the beginning of the critical section if it is found to be * within that critical section (see kuser_cmpxchg_fixup). |
2d2669b62
|
867 |
*/ |
b49c0f24c
|
868 869 870 871 872 873 874 |
1: ldr r3, [r2] @ load current val subs r3, r3, r0 @ compare with oldval 2: streq r1, [r2] @ store newval if eq rsbs r0, r3, #0 @ set return val and C flag usr_ret lr .text |
40fb79c8a
|
875 |
kuser_cmpxchg32_fixup: |
b49c0f24c
|
876 |
@ Called from kuser_cmpxchg_check macro. |
b059bdc39
|
877 |
@ r4 = address of interrupted insn (must be preserved). |
b49c0f24c
|
878 879 |
@ sp = saved regs. r7 and r8 are clobbered. @ 1b = first critical insn, 2b = last critical insn. |
b059bdc39
|
880 |
@ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. |
b49c0f24c
|
881 882 |
mov r7, #0xffff0fff sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) |
b059bdc39
|
883 |
subs r8, r4, r7 |
b49c0f24c
|
884 885 886 887 |
rsbcss r8, r8, #(2b - 1b) strcs r7, [sp, #S_PC] mov pc, lr .previous |
49bca4c28
|
888 889 890 891 |
#else #warning "NPTL on non MMU needs fixing" mov r0, #-1 adds r0, r0, #0 |
ba9b5d763
|
892 |
usr_ret lr |
b49c0f24c
|
893 |
#endif |
2d2669b62
|
894 895 |
#else |
ed3768a8d
|
896 |
smp_dmb arm |
b49c0f24c
|
897 |
1: ldrex r3, [r2] |
2d2669b62
|
898 899 |
subs r3, r3, r0 strexeq r3, r1, [r2] |
b49c0f24c
|
900 901 |
teqeq r3, #1 beq 1b |
2d2669b62
|
902 |
rsbs r0, r3, #0 |
b49c0f24c
|
903 |
/* beware -- each __kuser slot must be 8 instructions max */ |
f00ec48fa
|
904 905 |
ALT_SMP(b __kuser_memory_barrier) ALT_UP(usr_ret lr) |
2d2669b62
|
906 907 908 909 |
#endif .align 5 |
2d2669b62
|
910 |
__kuser_get_tls: @ 0xffff0fe0 |
f159f4ed5
|
911 |
ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init |
ba9b5d763
|
912 |
usr_ret lr |
f159f4ed5
|
913 914 915 916 |
mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code .rep 4 .word 0 @ 0xffff0ff0 software TLS value, then .endr @ pad up to __kuser_helper_version |
2d2669b62
|
917 |
|
2d2669b62
|
918 919 920 921 922 |
__kuser_helper_version: @ 0xffff0ffc .word ((__kuser_helper_end - __kuser_helper_start) >> 5) .globl __kuser_helper_end __kuser_helper_end: |
b86040a59
|
923 |
THUMB( .thumb ) |
2d2669b62
|
924 |
|
1da177e4c
|
925 926 927 |
/* * Vector stubs. * |
7933523dc
|
928 929 930 |
* This code is copied to 0xffff0200 so we can use branches in the * vectors, rather than ldr's. Note that this code must not * exceed 0x300 bytes. |
1da177e4c
|
931 932 933 |
* * Common stub entry macro: * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC |
ccea7a19e
|
934 935 936 |
* * SP points to a minimal amount of processor-private memory, the address * of which is copied into r0 for the mode specific abort handler. |
1da177e4c
|
937 |
*/ |
b7ec47955
|
938 |
.macro vector_stub, name, mode, correction=0 |
1da177e4c
|
939 940 941 942 |
.align 5 vector_ ame: |
1da177e4c
|
943 944 945 |
.if \correction sub lr, lr, #\correction .endif |
ccea7a19e
|
946 947 948 949 950 951 |
@ @ Save r0, lr_<exception> (parent PC) and spsr_<exception> @ (parent CPSR) @ stmia sp, {r0, lr} @ save r0, lr |
1da177e4c
|
952 |
mrs lr, spsr |
ccea7a19e
|
953 |
str lr, [sp, #8] @ save spsr |
1da177e4c
|
954 |
@ |
ccea7a19e
|
955 |
@ Prepare for SVC32 mode. IRQs remain disabled. |
1da177e4c
|
956 |
@ |
ccea7a19e
|
957 |
mrs r0, cpsr |
b86040a59
|
958 |
eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE) |
ccea7a19e
|
959 |
msr spsr_cxsf, r0 |
1da177e4c
|
960 |
|
ccea7a19e
|
961 962 963 |
@ @ the branch table must immediately follow this code @ |
ccea7a19e
|
964 |
and lr, lr, #0x0f |
b86040a59
|
965 966 |
THUMB( adr r0, 1f ) THUMB( ldr lr, [r0, lr, lsl #2] ) |
b7ec47955
|
967 |
mov r0, sp |
b86040a59
|
968 |
ARM( ldr lr, [pc, lr, lsl #2] ) |
ccea7a19e
|
969 |
movs pc, lr @ branch to handler in SVC mode |
93ed39701
|
970 971 |
ENDPROC(vector_ ame) |
88987ef91
|
972 973 974 975 |
.align 2 @ handler addresses follow this label 1: |
1da177e4c
|
976 |
.endm |
7933523dc
|
977 |
.globl __stubs_start |
1da177e4c
|
978 979 980 981 |
__stubs_start: /* * Interrupt dispatcher */ |
b7ec47955
|
982 |
vector_stub irq, IRQ_MODE, 4 |
1da177e4c
|
983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 |
.long __irq_usr @ 0 (USR_26 / USR_32) .long __irq_invalid @ 1 (FIQ_26 / FIQ_32) .long __irq_invalid @ 2 (IRQ_26 / IRQ_32) .long __irq_svc @ 3 (SVC_26 / SVC_32) .long __irq_invalid @ 4 .long __irq_invalid @ 5 .long __irq_invalid @ 6 .long __irq_invalid @ 7 .long __irq_invalid @ 8 .long __irq_invalid @ 9 .long __irq_invalid @ a .long __irq_invalid @ b .long __irq_invalid @ c .long __irq_invalid @ d .long __irq_invalid @ e .long __irq_invalid @ f /* * Data abort dispatcher * Enter in ABT mode, spsr = USR CPSR, lr = USR PC */ |
b7ec47955
|
1005 |
vector_stub dabt, ABT_MODE, 8 |
1da177e4c
|
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 |
.long __dabt_usr @ 0 (USR_26 / USR_32) .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32) .long __dabt_svc @ 3 (SVC_26 / SVC_32) .long __dabt_invalid @ 4 .long __dabt_invalid @ 5 .long __dabt_invalid @ 6 .long __dabt_invalid @ 7 .long __dabt_invalid @ 8 .long __dabt_invalid @ 9 .long __dabt_invalid @ a .long __dabt_invalid @ b .long __dabt_invalid @ c .long __dabt_invalid @ d .long __dabt_invalid @ e .long __dabt_invalid @ f /* * Prefetch abort dispatcher * Enter in ABT mode, spsr = USR CPSR, lr = USR PC */ |
b7ec47955
|
1028 |
vector_stub pabt, ABT_MODE, 4 |
1da177e4c
|
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 |
.long __pabt_usr @ 0 (USR_26 / USR_32) .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32) .long __pabt_svc @ 3 (SVC_26 / SVC_32) .long __pabt_invalid @ 4 .long __pabt_invalid @ 5 .long __pabt_invalid @ 6 .long __pabt_invalid @ 7 .long __pabt_invalid @ 8 .long __pabt_invalid @ 9 .long __pabt_invalid @ a .long __pabt_invalid @ b .long __pabt_invalid @ c .long __pabt_invalid @ d .long __pabt_invalid @ e .long __pabt_invalid @ f /* * Undef instr entry dispatcher * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC */ |
b7ec47955
|
1051 |
vector_stub und, UND_MODE |
1da177e4c
|
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 |
.long __und_usr @ 0 (USR_26 / USR_32) .long __und_invalid @ 1 (FIQ_26 / FIQ_32) .long __und_invalid @ 2 (IRQ_26 / IRQ_32) .long __und_svc @ 3 (SVC_26 / SVC_32) .long __und_invalid @ 4 .long __und_invalid @ 5 .long __und_invalid @ 6 .long __und_invalid @ 7 .long __und_invalid @ 8 .long __und_invalid @ 9 .long __und_invalid @ a .long __und_invalid @ b .long __und_invalid @ c .long __und_invalid @ d .long __und_invalid @ e .long __und_invalid @ f .align 5 /*============================================================================= * Undefined FIQs *----------------------------------------------------------------------------- * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg. * Basically to switch modes, we *HAVE* to clobber one register... brain * damage alert! I don't think that we can execute any code in here in any * other mode than FIQ... Ok you can switch to another mode, but you can't * get out of that mode without clobbering one register. */ vector_fiq: disable_fiq subs pc, lr, #4 /*============================================================================= * Address exception handler *----------------------------------------------------------------------------- * These aren't too critical. * (they're not supposed to happen, and won't happen in 32-bit data mode). */ vector_addrexcptn: b vector_addrexcptn /* * We group all the following data together to optimise * for CPUs with separate I & D caches. */ .align 5 .LCvswi: .word vector_swi |
7933523dc
|
1104 |
.globl __stubs_end |
1da177e4c
|
1105 |
__stubs_end: |
7933523dc
|
1106 |
.equ stubs_offset, __vectors_start + 0x200 - __stubs_start |
1da177e4c
|
1107 |
|
7933523dc
|
1108 1109 |
.globl __vectors_start __vectors_start: |
b86040a59
|
1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 |
ARM( swi SYS_ERROR0 ) THUMB( svc #0 ) THUMB( nop ) W(b) vector_und + stubs_offset W(ldr) pc, .LCvswi + stubs_offset W(b) vector_pabt + stubs_offset W(b) vector_dabt + stubs_offset W(b) vector_addrexcptn + stubs_offset W(b) vector_irq + stubs_offset W(b) vector_fiq + stubs_offset |
7933523dc
|
1120 1121 1122 |
.globl __vectors_end __vectors_end: |
1da177e4c
|
1123 1124 |
.data |
1da177e4c
|
1125 1126 1127 1128 1129 1130 |
.globl cr_alignment .globl cr_no_alignment cr_alignment: .space 4 cr_no_alignment: .space 4 |
521086412
|
1131 1132 1133 1134 1135 1136 |
#ifdef CONFIG_MULTI_IRQ_HANDLER .globl handle_arch_irq handle_arch_irq: .space 4 #endif |