Blame view
arch/x86/kernel/entry_64.S
37.5 KB
1da177e4c Linux-2.6.12-rc2 |
1 2 3 4 5 6 |
/* * linux/arch/x86_64/entry.S * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> |
1da177e4c Linux-2.6.12-rc2 |
7 8 9 10 11 |
*/ /* * entry.S contains the system-call and fault low-level handling routines. * |
8b4777a4b x86-64: Document ... |
12 13 |
* Some of this is documented in Documentation/x86/entry_64.txt * |
1da177e4c Linux-2.6.12-rc2 |
14 15 |
* NOTE: This code handles signal-recognition, which happens every time * after an interrupt and after each system call. |
0bd7b7985 x86: entry_64.S: ... |
16 17 |
* * Normal syscalls and interrupts don't save a full stack frame, this is |
1da177e4c Linux-2.6.12-rc2 |
18 |
* only done for syscall tracing, signals or fork/exec et.al. |
0bd7b7985 x86: entry_64.S: ... |
19 20 21 22 |
* * A note on terminology: * - top of stack: Architecture defined interrupt frame from SS to RIP * at the top of the kernel process stack. |
0d2eb44f6 x86: Fix common m... |
23 |
* - partial stack frame: partially saved registers up to R11. |
0bd7b7985 x86: entry_64.S: ... |
24 |
* - full stack frame: Like partial stack frame, but all register saved. |
2e91a17b3 [PATCH] Add some ... |
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
* * Some macro usage: * - CFI macros are used to generate dwarf2 unwind information for better * backtraces. They don't change any code. * - SAVE_ALL/RESTORE_ALL - Save/restore all registers * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify. * There are unfortunately lots of special cases where some registers * not touched. The macro is a big mess that should be cleaned up. * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS. * Gives a full stack frame. * - ENTRY/END Define functions in the symbol table. * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack * frame that is otherwise undefined after a SYSCALL * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging. * - errorentry/paranoidentry/zeroentry - Define exception entry points. |
1da177e4c Linux-2.6.12-rc2 |
40 |
*/ |
1da177e4c Linux-2.6.12-rc2 |
41 42 |
#include <linux/linkage.h> #include <asm/segment.h> |
1da177e4c Linux-2.6.12-rc2 |
43 44 45 46 |
#include <asm/cache.h> #include <asm/errno.h> #include <asm/dwarf2.h> #include <asm/calling.h> |
e2d5df935 kbuild: alpha,x86... |
47 |
#include <asm/asm-offsets.h> |
1da177e4c Linux-2.6.12-rc2 |
48 49 50 51 |
#include <asm/msr.h> #include <asm/unistd.h> #include <asm/thread_info.h> #include <asm/hw_irq.h> |
0341c14da x86: use _types.h... |
52 |
#include <asm/page_types.h> |
2601e64d2 [PATCH] lockdep: ... |
53 |
#include <asm/irqflags.h> |
72fe48585 x86: replace priv... |
54 |
#include <asm/paravirt.h> |
395a59d0f ftrace: store mco... |
55 |
#include <asm/ftrace.h> |
9939ddaff x86: merge 64 and... |
56 |
#include <asm/percpu.h> |
1da177e4c Linux-2.6.12-rc2 |
57 |
|
86a1c34a9 x86_64 syscall au... |
58 59 60 61 62 |
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ #include <linux/elf-em.h> #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) #define __AUDIT_ARCH_64BIT 0x80000000 #define __AUDIT_ARCH_LE 0x40000000 |
1da177e4c Linux-2.6.12-rc2 |
63 |
.code64 |
ea7145477 x86: Separate out... |
64 |
.section .entry.text, "ax" |
606576ce8 ftrace: rename FT... |
65 |
#ifdef CONFIG_FUNCTION_TRACER |
d61f82d06 ftrace: use dynam... |
66 67 |
#ifdef CONFIG_DYNAMIC_FTRACE ENTRY(mcount) |
d61f82d06 ftrace: use dynam... |
68 69 70 71 |
retq END(mcount) ENTRY(ftrace_caller) |
60a7ecf42 ftrace: add quick... |
72 73 |
cmpl $0, function_trace_stop jne ftrace_stub |
d61f82d06 ftrace: use dynam... |
74 |
|
d680fe447 x86: entry_64 - i... |
75 |
MCOUNT_SAVE_FRAME |
d61f82d06 ftrace: use dynam... |
76 77 78 |
movq 0x38(%rsp), %rdi movq 8(%rbp), %rsi |
395a59d0f ftrace: store mco... |
79 |
subq $MCOUNT_INSN_SIZE, %rdi |
d61f82d06 ftrace: use dynam... |
80 |
|
bc8b2b925 x86: head_64.S - ... |
81 |
GLOBAL(ftrace_call) |
d61f82d06 ftrace: use dynam... |
82 |
call ftrace_stub |
d680fe447 x86: entry_64 - i... |
83 |
MCOUNT_RESTORE_FRAME |
d61f82d06 ftrace: use dynam... |
84 |
|
48d68b20d tracing/function-... |
85 |
#ifdef CONFIG_FUNCTION_GRAPH_TRACER |
bc8b2b925 x86: head_64.S - ... |
86 |
GLOBAL(ftrace_graph_call) |
48d68b20d tracing/function-... |
87 88 |
jmp ftrace_stub #endif |
d61f82d06 ftrace: use dynam... |
89 |
|
bc8b2b925 x86: head_64.S - ... |
90 |
GLOBAL(ftrace_stub) |
d61f82d06 ftrace: use dynam... |
91 92 93 94 |
retq END(ftrace_caller) #else /* ! CONFIG_DYNAMIC_FTRACE */ |
16444a8a4 ftrace: add basic... |
95 |
ENTRY(mcount) |
60a7ecf42 ftrace: add quick... |
96 97 |
cmpl $0, function_trace_stop jne ftrace_stub |
16444a8a4 ftrace: add basic... |
98 99 |
cmpq $ftrace_stub, ftrace_trace_function jnz trace |
48d68b20d tracing/function-... |
100 101 102 103 |
#ifdef CONFIG_FUNCTION_GRAPH_TRACER cmpq $ftrace_stub, ftrace_graph_return jnz ftrace_graph_caller |
e49dc19c6 ftrace: function ... |
104 105 106 |
cmpq $ftrace_graph_entry_stub, ftrace_graph_entry jnz ftrace_graph_caller |
48d68b20d tracing/function-... |
107 |
#endif |
bc8b2b925 x86: head_64.S - ... |
108 |
GLOBAL(ftrace_stub) |
16444a8a4 ftrace: add basic... |
109 110 111 |
retq trace: |
d680fe447 x86: entry_64 - i... |
112 |
MCOUNT_SAVE_FRAME |
16444a8a4 ftrace: add basic... |
113 114 115 |
movq 0x38(%rsp), %rdi movq 8(%rbp), %rsi |
395a59d0f ftrace: store mco... |
116 |
subq $MCOUNT_INSN_SIZE, %rdi |
16444a8a4 ftrace: add basic... |
117 118 |
call *ftrace_trace_function |
d680fe447 x86: entry_64 - i... |
119 |
MCOUNT_RESTORE_FRAME |
16444a8a4 ftrace: add basic... |
120 121 122 |
jmp ftrace_stub END(mcount) |
d61f82d06 ftrace: use dynam... |
123 |
#endif /* CONFIG_DYNAMIC_FTRACE */ |
606576ce8 ftrace: rename FT... |
124 |
#endif /* CONFIG_FUNCTION_TRACER */ |
16444a8a4 ftrace: add basic... |
125 |
|
48d68b20d tracing/function-... |
126 127 128 129 |
#ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(ftrace_graph_caller) cmpl $0, function_trace_stop jne ftrace_stub |
d680fe447 x86: entry_64 - i... |
130 |
MCOUNT_SAVE_FRAME |
48d68b20d tracing/function-... |
131 132 133 |
leaq 8(%rbp), %rdi movq 0x38(%rsp), %rsi |
71e308a23 function-graph: a... |
134 |
movq (%rbp), %rdx |
bb4304c71 ftrace: have func... |
135 |
subq $MCOUNT_INSN_SIZE, %rsi |
48d68b20d tracing/function-... |
136 137 |
call prepare_ftrace_return |
d680fe447 x86: entry_64 - i... |
138 |
MCOUNT_RESTORE_FRAME |
48d68b20d tracing/function-... |
139 140 |
retq END(ftrace_graph_caller) |
bc8b2b925 x86: head_64.S - ... |
141 |
GLOBAL(return_to_handler) |
4818d8094 tracing/function-... |
142 |
subq $24, %rsp |
48d68b20d tracing/function-... |
143 |
|
e71e99c29 x86, function-gra... |
144 |
/* Save the return values */ |
16444a8a4 ftrace: add basic... |
145 |
movq %rax, (%rsp) |
e71e99c29 x86, function-gra... |
146 |
movq %rdx, 8(%rsp) |
71e308a23 function-graph: a... |
147 |
movq %rbp, %rdi |
16444a8a4 ftrace: add basic... |
148 |
|
48d68b20d tracing/function-... |
149 |
call ftrace_return_to_handler |
16444a8a4 ftrace: add basic... |
150 |
|
194ec3418 function-graph/x8... |
151 |
movq %rax, %rdi |
e71e99c29 x86, function-gra... |
152 |
movq 8(%rsp), %rdx |
16444a8a4 ftrace: add basic... |
153 |
movq (%rsp), %rax |
194ec3418 function-graph/x8... |
154 155 |
addq $24, %rsp jmp *%rdi |
48d68b20d tracing/function-... |
156 |
#endif |
16444a8a4 ftrace: add basic... |
157 |
|
16444a8a4 ftrace: add basic... |
158 |
|
dc37db4d8 [PATCH] x86_64: R... |
159 |
#ifndef CONFIG_PREEMPT |
1da177e4c Linux-2.6.12-rc2 |
160 |
#define retint_kernel retint_restore_args |
0bd7b7985 x86: entry_64.S: ... |
161 |
#endif |
2601e64d2 [PATCH] lockdep: ... |
162 |
|
72fe48585 x86: replace priv... |
163 |
#ifdef CONFIG_PARAVIRT |
2be29982a x86/paravirt: add... |
164 |
ENTRY(native_usergs_sysret64) |
72fe48585 x86: replace priv... |
165 166 |
swapgs sysretq |
b3baaa138 x86: entry_64.S -... |
167 |
ENDPROC(native_usergs_sysret64) |
72fe48585 x86: replace priv... |
168 |
#endif /* CONFIG_PARAVIRT */ |
2601e64d2 [PATCH] lockdep: ... |
169 170 171 172 173 174 175 176 177 |
.macro TRACE_IRQS_IRETQ offset=ARGOFFSET #ifdef CONFIG_TRACE_IRQFLAGS bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */ jnc 1f TRACE_IRQS_ON 1: #endif .endm |
1da177e4c Linux-2.6.12-rc2 |
178 |
/* |
0bd7b7985 x86: entry_64.S: ... |
179 180 |
* C code is not supposed to know about undefined top of stack. Every time * a C function with an pt_regs argument is called from the SYSCALL based |
1da177e4c Linux-2.6.12-rc2 |
181 182 183 |
* fast path FIXUP_TOP_OF_STACK is needed. * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs * manipulation. |
0bd7b7985 x86: entry_64.S: ... |
184 185 186 |
*/ /* %rsp:at FRAMEEND */ |
c002a1e6b x86: introduce sa... |
187 |
.macro FIXUP_TOP_OF_STACK tmp offset=0 |
3d1e42a7c x86-64: Move oldr... |
188 |
movq PER_CPU_VAR(old_rsp),\tmp |
c002a1e6b x86: introduce sa... |
189 190 191 192 193 194 |
movq \tmp,RSP+\offset(%rsp) movq $__USER_DS,SS+\offset(%rsp) movq $__USER_CS,CS+\offset(%rsp) movq $-1,RCX+\offset(%rsp) movq R11+\offset(%rsp),\tmp /* get eflags */ movq \tmp,EFLAGS+\offset(%rsp) |
1da177e4c Linux-2.6.12-rc2 |
195 |
.endm |
c002a1e6b x86: introduce sa... |
196 197 |
.macro RESTORE_TOP_OF_STACK tmp offset=0 movq RSP+\offset(%rsp),\tmp |
3d1e42a7c x86-64: Move oldr... |
198 |
movq \tmp,PER_CPU_VAR(old_rsp) |
c002a1e6b x86: introduce sa... |
199 200 |
movq EFLAGS+\offset(%rsp),\tmp movq \tmp,R11+\offset(%rsp) |
1da177e4c Linux-2.6.12-rc2 |
201 202 203 204 |
.endm .macro FAKE_STACK_FRAME child_rip /* push in order ss, rsp, eflags, cs, rip */ |
3829ee6b1 [PATCH] x86_64: S... |
205 |
xorl %eax, %eax |
df5d1874c x86: Use {push,po... |
206 |
pushq_cfi $__KERNEL_DS /* ss */ |
7effaa882 [PATCH] x86-64: F... |
207 |
/*CFI_REL_OFFSET ss,0*/ |
df5d1874c x86: Use {push,po... |
208 |
pushq_cfi %rax /* rsp */ |
7effaa882 [PATCH] x86-64: F... |
209 |
CFI_REL_OFFSET rsp,0 |
1cf8343f5 x86: Fix rflags i... |
210 |
pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_BIT1) /* eflags - interrupts on */ |
7effaa882 [PATCH] x86-64: F... |
211 |
/*CFI_REL_OFFSET rflags,0*/ |
df5d1874c x86: Use {push,po... |
212 |
pushq_cfi $__KERNEL_CS /* cs */ |
7effaa882 [PATCH] x86-64: F... |
213 |
/*CFI_REL_OFFSET cs,0*/ |
df5d1874c x86: Use {push,po... |
214 |
pushq_cfi \child_rip /* rip */ |
7effaa882 [PATCH] x86-64: F... |
215 |
CFI_REL_OFFSET rip,0 |
df5d1874c x86: Use {push,po... |
216 |
pushq_cfi %rax /* orig rax */ |
1da177e4c Linux-2.6.12-rc2 |
217 218 219 220 221 222 |
.endm .macro UNFAKE_STACK_FRAME addq $8*6, %rsp CFI_ADJUST_CFA_OFFSET -(6*8) .endm |
dcd072e26 x86: clean up aft... |
223 224 225 226 |
/* * initial frame state for interrupts (and exceptions without error code) */ .macro EMPTY_FRAME start=1 offset=0 |
7effaa882 [PATCH] x86-64: F... |
227 |
.if \start |
dcd072e26 x86: clean up aft... |
228 |
CFI_STARTPROC simple |
adf142369 [PATCH] i386/x86-... |
229 |
CFI_SIGNAL_FRAME |
dcd072e26 x86: clean up aft... |
230 |
CFI_DEF_CFA rsp,8+\offset |
7effaa882 [PATCH] x86-64: F... |
231 |
.else |
dcd072e26 x86: clean up aft... |
232 |
CFI_DEF_CFA_OFFSET 8+\offset |
7effaa882 [PATCH] x86-64: F... |
233 |
.endif |
1da177e4c Linux-2.6.12-rc2 |
234 |
.endm |
d99015b1a x86: move entry_6... |
235 236 |
/* |
dcd072e26 x86: clean up aft... |
237 |
* initial frame state for interrupts (and exceptions without error code) |
d99015b1a x86: move entry_6... |
238 |
*/ |
dcd072e26 x86: clean up aft... |
239 |
.macro INTR_FRAME start=1 offset=0 |
e8a0e2766 x86: clean up aft... |
240 241 242 243 244 245 |
EMPTY_FRAME \start, SS+8+\offset-RIP /*CFI_REL_OFFSET ss, SS+\offset-RIP*/ CFI_REL_OFFSET rsp, RSP+\offset-RIP /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/ /*CFI_REL_OFFSET cs, CS+\offset-RIP*/ CFI_REL_OFFSET rip, RIP+\offset-RIP |
d99015b1a x86: move entry_6... |
246 247 248 |
.endm /* |
d99015b1a x86: move entry_6... |
249 250 251 |
* initial frame state for exceptions with error code (and interrupts * with vector already pushed) */ |
dcd072e26 x86: clean up aft... |
252 |
.macro XCPT_FRAME start=1 offset=0 |
e8a0e2766 x86: clean up aft... |
253 |
INTR_FRAME \start, RIP+\offset-ORIG_RAX |
dcd072e26 x86: clean up aft... |
254 255 256 257 258 259 260 |
/*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/ .endm /* * frame that enables calling into C. */ .macro PARTIAL_FRAME start=1 offset=0 |
e8a0e2766 x86: clean up aft... |
261 262 263 264 265 266 267 268 269 270 |
XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET |
dcd072e26 x86: clean up aft... |
271 272 273 274 275 276 |
.endm /* * frame that enables passing a complete pt_regs to a C function. */ .macro DEFAULT_FRAME start=1 offset=0 |
e8a0e2766 x86: clean up aft... |
277 |
PARTIAL_FRAME \start, R11+\offset-R15 |
dcd072e26 x86: clean up aft... |
278 279 280 281 282 283 284 |
CFI_REL_OFFSET rbx, RBX+\offset CFI_REL_OFFSET rbp, RBP+\offset CFI_REL_OFFSET r12, R12+\offset CFI_REL_OFFSET r13, R13+\offset CFI_REL_OFFSET r14, R14+\offset CFI_REL_OFFSET r15, R15+\offset .endm |
d99015b1a x86: move entry_6... |
285 286 |
/* save partial stack frame */ |
1871853f7 x86,64: Simplify ... |
287 |
.macro SAVE_ARGS_IRQ |
d99015b1a x86: move entry_6... |
288 |
cld |
1871853f7 x86,64: Simplify ... |
289 290 291 292 293 294 295 296 297 298 |
/* start from rbp in pt_regs and jump over */ movq_cfi rdi, RDI-RBP movq_cfi rsi, RSI-RBP movq_cfi rdx, RDX-RBP movq_cfi rcx, RCX-RBP movq_cfi rax, RAX-RBP movq_cfi r8, R8-RBP movq_cfi r9, R9-RBP movq_cfi r10, R10-RBP movq_cfi r11, R11-RBP |
a2bbe7508 x86: Don't use fr... |
299 300 301 302 303 |
/* Save rbp so that we can unwind from get_irq_regs() */ movq_cfi rbp, 0 /* Save previous stack value */ movq %rsp, %rsi |
3b99a3ef5 x86,64: Separate ... |
304 305 |
leaq -RBP(%rsp),%rdi /* arg1 for handler */ |
d99015b1a x86: move entry_6... |
306 307 308 309 |
testl $3, CS(%rdi) je 1f SWAPGS /* |
568955307 x86-64: Move irqc... |
310 |
* irq_count is used to check if a CPU is already on an interrupt stack |
d99015b1a x86: move entry_6... |
311 312 313 314 |
* or not. While this is essentially redundant with preempt_count it is * a little cheaper to use a separate counter in the PDA (short of * moving irq_enter into assembly, which would be too much work) */ |
568955307 x86-64: Move irqc... |
315 |
1: incl PER_CPU_VAR(irq_count) |
d99015b1a x86: move entry_6... |
316 |
jne 2f |
26f80bd6a x86-64: Convert i... |
317 |
mov PER_CPU_VAR(irq_stack_ptr),%rsp |
eab9e6137 x86-64: Fix CFI d... |
318 |
CFI_DEF_CFA_REGISTER rsi |
a2bbe7508 x86: Don't use fr... |
319 320 321 |
2: /* Store previous stack value */ pushq %rsi |
eab9e6137 x86-64: Fix CFI d... |
322 323 324 325 326 |
CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \ 0x77 /* DW_OP_breg7 */, 0, \ 0x06 /* DW_OP_deref */, \ 0x08 /* DW_OP_const1u */, SS+8-RBP, \ 0x22 /* DW_OP_plus */ |
a2bbe7508 x86: Don't use fr... |
327 328 |
/* We entered an interrupt context - irqs are off: */ TRACE_IRQS_OFF |
1871853f7 x86,64: Simplify ... |
329 |
.endm |
d99015b1a x86: move entry_6... |
330 |
|
c002a1e6b x86: introduce sa... |
331 332 333 334 335 336 337 338 339 340 341 342 343 344 |
ENTRY(save_rest) PARTIAL_FRAME 1 REST_SKIP+8 movq 5*8+16(%rsp), %r11 /* save return address */ movq_cfi rbx, RBX+16 movq_cfi rbp, RBP+16 movq_cfi r12, R12+16 movq_cfi r13, R13+16 movq_cfi r14, R14+16 movq_cfi r15, R15+16 movq %r11, 8(%rsp) /* return address */ FIXUP_TOP_OF_STACK %r11, 16 ret CFI_ENDPROC END(save_rest) |
e2f6bc25b x86: entry_64.S: ... |
345 |
/* save complete stack frame */ |
c2810188c x86-64: move save... |
346 |
.pushsection .kprobes.text, "ax" |
e2f6bc25b x86: entry_64.S: ... |
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 |
ENTRY(save_paranoid) XCPT_FRAME 1 RDI+8 cld movq_cfi rdi, RDI+8 movq_cfi rsi, RSI+8 movq_cfi rdx, RDX+8 movq_cfi rcx, RCX+8 movq_cfi rax, RAX+8 movq_cfi r8, R8+8 movq_cfi r9, R9+8 movq_cfi r10, R10+8 movq_cfi r11, R11+8 movq_cfi rbx, RBX+8 movq_cfi rbp, RBP+8 movq_cfi r12, R12+8 movq_cfi r13, R13+8 movq_cfi r14, R14+8 movq_cfi r15, R15+8 movl $1,%ebx movl $MSR_GS_BASE,%ecx rdmsr testl %edx,%edx js 1f /* negative -> in kernel */ SWAPGS xorl %ebx,%ebx 1: ret CFI_ENDPROC END(save_paranoid) |
c2810188c x86-64: move save... |
375 |
.popsection |
e2f6bc25b x86: entry_64.S: ... |
376 |
|
1da177e4c Linux-2.6.12-rc2 |
377 |
/* |
5b3eec0c8 x86: ret_from_for... |
378 379 380 |
* A newly forked process directly context switches into this address. * * rdi: prev task we switched from |
0bd7b7985 x86: entry_64.S: ... |
381 |
*/ |
1da177e4c Linux-2.6.12-rc2 |
382 |
ENTRY(ret_from_fork) |
dcd072e26 x86: clean up aft... |
383 |
DEFAULT_FRAME |
5b3eec0c8 x86: ret_from_for... |
384 |
|
7106a5ab8 x86-64: remove lo... |
385 |
LOCK ; btr $TIF_FORK,TI_flags(%r8) |
df5d1874c x86: Use {push,po... |
386 387 |
pushq_cfi kernel_eflags(%rip) popfq_cfi # reset kernel eflags |
5b3eec0c8 x86: ret_from_for... |
388 389 |
call schedule_tail # rdi: 'prev' task parameter |
1da177e4c Linux-2.6.12-rc2 |
390 |
GET_THREAD_INFO(%rcx) |
5b3eec0c8 x86: ret_from_for... |
391 |
|
1da177e4c Linux-2.6.12-rc2 |
392 |
RESTORE_REST |
5b3eec0c8 x86: ret_from_for... |
393 394 |
testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? |
70ea6855d x86-64: Slightly ... |
395 |
jz retint_restore_args |
5b3eec0c8 x86: ret_from_for... |
396 397 |
testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET |
1da177e4c Linux-2.6.12-rc2 |
398 |
jnz int_ret_from_sys_call |
5b3eec0c8 x86: ret_from_for... |
399 |
|
c002a1e6b x86: introduce sa... |
400 |
RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET |
5b3eec0c8 x86: ret_from_for... |
401 |
jmp ret_from_sys_call # go to the SYSRET fastpath |
1da177e4c Linux-2.6.12-rc2 |
402 |
CFI_ENDPROC |
4b787e0b8 [PATCH] x86_64: a... |
403 |
END(ret_from_fork) |
1da177e4c Linux-2.6.12-rc2 |
404 405 |
/* |
0d2eb44f6 x86: Fix common m... |
406 |
* System call entry. Up to 6 arguments in registers are supported. |
1da177e4c Linux-2.6.12-rc2 |
407 408 409 410 |
* * SYSCALL does not save anything on the stack and does not change the * stack pointer. */ |
0bd7b7985 x86: entry_64.S: ... |
411 |
|
1da177e4c Linux-2.6.12-rc2 |
412 |
/* |
0bd7b7985 x86: entry_64.S: ... |
413 |
* Register setup: |
1da177e4c Linux-2.6.12-rc2 |
414 415 |
* rax system call number * rdi arg0 |
0bd7b7985 x86: entry_64.S: ... |
416 |
* rcx return address for syscall/sysret, C arg3 |
1da177e4c Linux-2.6.12-rc2 |
417 |
* rsi arg1 |
0bd7b7985 x86: entry_64.S: ... |
418 |
* rdx arg2 |
1da177e4c Linux-2.6.12-rc2 |
419 420 421 422 |
* r10 arg3 (--> moved to rcx for C) * r8 arg4 * r9 arg5 * r11 eflags for syscall/sysret, temporary for C |
0bd7b7985 x86: entry_64.S: ... |
423 424 |
* r12-r15,rbp,rbx saved by C code, not touched. * |
1da177e4c Linux-2.6.12-rc2 |
425 426 427 428 429 |
* Interrupts are off on entry. * Only called from user space. * * XXX if we had a free scratch register we could save the RSP into the stack frame * and report it properly in ps. Unfortunately we haven't. |
7bf36bbc5 [PATCH] x86_64: W... |
430 431 432 433 |
* * When user can change the frames always force IRET. That is because * it deals with uncanonical addresses better. SYSRET has trouble * with them due to bugs in both AMD and Intel CPUs. |
0bd7b7985 x86: entry_64.S: ... |
434 |
*/ |
1da177e4c Linux-2.6.12-rc2 |
435 436 |
ENTRY(system_call) |
7effaa882 [PATCH] x86-64: F... |
437 |
CFI_STARTPROC simple |
adf142369 [PATCH] i386/x86-... |
438 |
CFI_SIGNAL_FRAME |
9af45651f x86-64: Move kern... |
439 |
CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET |
7effaa882 [PATCH] x86-64: F... |
440 441 |
CFI_REGISTER rip,rcx /*CFI_REGISTER rflags,r11*/ |
72fe48585 x86: replace priv... |
442 443 444 445 446 447 |
SWAPGS_UNSAFE_STACK /* * A hypervisor implementation might want to use a label * after the swapgs, so that it can do the swapgs * for the guest and jump here on syscall. */ |
f6b2bc847 x86-64: Cleanup s... |
448 |
GLOBAL(system_call_after_swapgs) |
72fe48585 x86: replace priv... |
449 |
|
3d1e42a7c x86-64: Move oldr... |
450 |
movq %rsp,PER_CPU_VAR(old_rsp) |
9af45651f x86-64: Move kern... |
451 |
movq PER_CPU_VAR(kernel_stack),%rsp |
2601e64d2 [PATCH] lockdep: ... |
452 453 454 455 |
/* * No need to follow this irqs off/on section - it's straight * and short: */ |
72fe48585 x86: replace priv... |
456 |
ENABLE_INTERRUPTS(CLBR_NONE) |
cac0e0a78 x86, asm: Flip SA... |
457 |
SAVE_ARGS 8,0 |
0bd7b7985 x86: entry_64.S: ... |
458 |
movq %rax,ORIG_RAX-ARGOFFSET(%rsp) |
7effaa882 [PATCH] x86-64: F... |
459 460 |
movq %rcx,RIP-ARGOFFSET(%rsp) CFI_REL_OFFSET rip,RIP-ARGOFFSET |
46db09d3f x86-64: Slightly ... |
461 |
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
1da177e4c Linux-2.6.12-rc2 |
462 |
jnz tracesys |
86a1c34a9 x86_64 syscall au... |
463 |
system_call_fastpath: |
1da177e4c Linux-2.6.12-rc2 |
464 465 466 467 468 469 470 |
cmpq $__NR_syscall_max,%rax ja badsys movq %r10,%rcx call *sys_call_table(,%rax,8) # XXX: rip relative movq %rax,RAX-ARGOFFSET(%rsp) /* * Syscall return path ending with SYSRET (fast path) |
0bd7b7985 x86: entry_64.S: ... |
471 472 |
* Has incomplete stack frame and undefined top of stack. */ |
1da177e4c Linux-2.6.12-rc2 |
473 |
ret_from_sys_call: |
11b854b2f [PATCH] x86_64: C... |
474 |
movl $_TIF_ALLWORK_MASK,%edi |
1da177e4c Linux-2.6.12-rc2 |
475 |
/* edi: flagmask */ |
0bd7b7985 x86: entry_64.S: ... |
476 |
sysret_check: |
10cd706d1 lockdep: x86_64: ... |
477 |
LOCKDEP_SYS_EXIT |
72fe48585 x86: replace priv... |
478 |
DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d2 [PATCH] lockdep: ... |
479 |
TRACE_IRQS_OFF |
46db09d3f x86-64: Slightly ... |
480 |
movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx |
1da177e4c Linux-2.6.12-rc2 |
481 |
andl %edi,%edx |
0bd7b7985 x86: entry_64.S: ... |
482 |
jnz sysret_careful |
bcddc0155 [PATCH] x86-64: m... |
483 |
CFI_REMEMBER_STATE |
2601e64d2 [PATCH] lockdep: ... |
484 485 486 487 |
/* * sysretq will re-enable interrupts: */ TRACE_IRQS_ON |
1da177e4c Linux-2.6.12-rc2 |
488 |
movq RIP-ARGOFFSET(%rsp),%rcx |
7effaa882 [PATCH] x86-64: F... |
489 |
CFI_REGISTER rip,rcx |
838feb475 x86, asm: Flip RE... |
490 |
RESTORE_ARGS 1,-ARG_SKIP,0 |
7effaa882 [PATCH] x86-64: F... |
491 |
/*CFI_REGISTER rflags,r11*/ |
3d1e42a7c x86-64: Move oldr... |
492 |
movq PER_CPU_VAR(old_rsp), %rsp |
2be29982a x86/paravirt: add... |
493 |
USERGS_SYSRET64 |
1da177e4c Linux-2.6.12-rc2 |
494 |
|
bcddc0155 [PATCH] x86-64: m... |
495 |
CFI_RESTORE_STATE |
1da177e4c Linux-2.6.12-rc2 |
496 |
/* Handle reschedules */ |
0bd7b7985 x86: entry_64.S: ... |
497 |
/* edx: work, edi: workmask */ |
1da177e4c Linux-2.6.12-rc2 |
498 499 500 |
sysret_careful: bt $TIF_NEED_RESCHED,%edx jnc sysret_signal |
2601e64d2 [PATCH] lockdep: ... |
501 |
TRACE_IRQS_ON |
72fe48585 x86: replace priv... |
502 |
ENABLE_INTERRUPTS(CLBR_NONE) |
df5d1874c x86: Use {push,po... |
503 |
pushq_cfi %rdi |
1da177e4c Linux-2.6.12-rc2 |
504 |
call schedule |
df5d1874c x86: Use {push,po... |
505 |
popq_cfi %rdi |
1da177e4c Linux-2.6.12-rc2 |
506 |
jmp sysret_check |
0bd7b7985 x86: entry_64.S: ... |
507 |
/* Handle a signal */ |
1da177e4c Linux-2.6.12-rc2 |
508 |
sysret_signal: |
2601e64d2 [PATCH] lockdep: ... |
509 |
TRACE_IRQS_ON |
72fe48585 x86: replace priv... |
510 |
ENABLE_INTERRUPTS(CLBR_NONE) |
86a1c34a9 x86_64 syscall au... |
511 512 513 514 |
#ifdef CONFIG_AUDITSYSCALL bt $TIF_SYSCALL_AUDIT,%edx jc sysret_audit #endif |
b60e714dc x86: ptrace: sysr... |
515 516 517 518 519 520 521 |
/* * We have a signal, or exit tracing or single-step. * These all wind up with the iret return path anyway, * so just join that path right now. */ FIXUP_TOP_OF_STACK %r11, -ARGOFFSET jmp int_check_syscall_exit_work |
0bd7b7985 x86: entry_64.S: ... |
522 |
|
7effaa882 [PATCH] x86-64: F... |
523 524 525 |
badsys: movq $-ENOSYS,RAX-ARGOFFSET(%rsp) jmp ret_from_sys_call |
86a1c34a9 x86_64 syscall au... |
526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 |
#ifdef CONFIG_AUDITSYSCALL /* * Fast path for syscall audit without full syscall trace. * We just call audit_syscall_entry() directly, and then * jump back to the normal fast path. */ auditsys: movq %r10,%r9 /* 6th arg: 4th syscall arg */ movq %rdx,%r8 /* 5th arg: 3rd syscall arg */ movq %rsi,%rcx /* 4th arg: 2nd syscall arg */ movq %rdi,%rdx /* 3rd arg: 1st syscall arg */ movq %rax,%rsi /* 2nd arg: syscall number */ movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */ call audit_syscall_entry LOAD_ARGS 0 /* reload call-clobbered registers */ jmp system_call_fastpath /* * Return fast path for syscall audit. Call audit_syscall_exit() * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT * masked off. */ sysret_audit: |
032755915 x86: auditsyscall... |
549 550 |
movq RAX-ARGOFFSET(%rsp),%rsi /* second arg, syscall return value */ cmpq $0,%rsi /* is it < 0? */ |
86a1c34a9 x86_64 syscall au... |
551 552 553 554 555 556 557 |
setl %al /* 1 if so, 0 if not */ movzbl %al,%edi /* zero-extend that into %edi */ inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */ call audit_syscall_exit movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi jmp sysret_check #endif /* CONFIG_AUDITSYSCALL */ |
1da177e4c Linux-2.6.12-rc2 |
558 |
/* Do syscall tracing */ |
0bd7b7985 x86: entry_64.S: ... |
559 |
tracesys: |
86a1c34a9 x86_64 syscall au... |
560 |
#ifdef CONFIG_AUDITSYSCALL |
46db09d3f x86-64: Slightly ... |
561 |
testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) |
86a1c34a9 x86_64 syscall au... |
562 563 |
jz auditsys #endif |
1da177e4c Linux-2.6.12-rc2 |
564 |
SAVE_REST |
a31f8dd7e x86: ptrace vs -E... |
565 |
movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ |
1da177e4c Linux-2.6.12-rc2 |
566 567 568 |
FIXUP_TOP_OF_STACK %rdi movq %rsp,%rdi call syscall_trace_enter |
d4d671501 x86 ptrace: unify... |
569 570 571 572 573 574 |
/* * Reload arg registers from stack in case ptrace changed them. * We don't reload %rax because syscall_trace_enter() returned * the value it wants us to use in the table lookup. */ LOAD_ARGS ARGOFFSET, 1 |
1da177e4c Linux-2.6.12-rc2 |
575 576 |
RESTORE_REST cmpq $__NR_syscall_max,%rax |
a31f8dd7e x86: ptrace vs -E... |
577 |
ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */ |
1da177e4c Linux-2.6.12-rc2 |
578 579 |
movq %r10,%rcx /* fixup for C */ call *sys_call_table(,%rax,8) |
a31f8dd7e x86: ptrace vs -E... |
580 |
movq %rax,RAX-ARGOFFSET(%rsp) |
7bf36bbc5 [PATCH] x86_64: W... |
581 |
/* Use IRET because user could have changed frame */ |
0bd7b7985 x86: entry_64.S: ... |
582 583 |
/* |
1da177e4c Linux-2.6.12-rc2 |
584 585 |
* Syscall return path ending with IRET. * Has correct top of stack, but partial stack frame. |
bcddc0155 [PATCH] x86-64: m... |
586 |
*/ |
bc8b2b925 x86: head_64.S - ... |
587 |
GLOBAL(int_ret_from_sys_call) |
72fe48585 x86: replace priv... |
588 |
DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d2 [PATCH] lockdep: ... |
589 |
TRACE_IRQS_OFF |
1da177e4c Linux-2.6.12-rc2 |
590 591 |
movl $_TIF_ALLWORK_MASK,%edi /* edi: mask to check */ |
bc8b2b925 x86: head_64.S - ... |
592 |
GLOBAL(int_with_check) |
10cd706d1 lockdep: x86_64: ... |
593 |
LOCKDEP_SYS_EXIT_IRQ |
1da177e4c Linux-2.6.12-rc2 |
594 |
GET_THREAD_INFO(%rcx) |
26ccb8a71 x86: rename threa... |
595 |
movl TI_flags(%rcx),%edx |
1da177e4c Linux-2.6.12-rc2 |
596 597 |
andl %edi,%edx jnz int_careful |
26ccb8a71 x86: rename threa... |
598 |
andl $~TS_COMPAT,TI_status(%rcx) |
1da177e4c Linux-2.6.12-rc2 |
599 600 601 602 603 604 605 606 |
jmp retint_swapgs /* Either reschedule or signal or syscall exit tracking needed. */ /* First do a reschedule test. */ /* edx: work, edi: workmask */ int_careful: bt $TIF_NEED_RESCHED,%edx jnc int_very_careful |
2601e64d2 [PATCH] lockdep: ... |
607 |
TRACE_IRQS_ON |
72fe48585 x86: replace priv... |
608 |
ENABLE_INTERRUPTS(CLBR_NONE) |
df5d1874c x86: Use {push,po... |
609 |
pushq_cfi %rdi |
1da177e4c Linux-2.6.12-rc2 |
610 |
call schedule |
df5d1874c x86: Use {push,po... |
611 |
popq_cfi %rdi |
72fe48585 x86: replace priv... |
612 |
DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d2 [PATCH] lockdep: ... |
613 |
TRACE_IRQS_OFF |
1da177e4c Linux-2.6.12-rc2 |
614 615 616 617 |
jmp int_with_check /* handle signals and tracing -- both require a full stack frame */ int_very_careful: |
2601e64d2 [PATCH] lockdep: ... |
618 |
TRACE_IRQS_ON |
72fe48585 x86: replace priv... |
619 |
ENABLE_INTERRUPTS(CLBR_NONE) |
b60e714dc x86: ptrace: sysr... |
620 |
int_check_syscall_exit_work: |
1da177e4c Linux-2.6.12-rc2 |
621 |
SAVE_REST |
0bd7b7985 x86: entry_64.S: ... |
622 |
/* Check for syscall exit trace */ |
d4d671501 x86 ptrace: unify... |
623 |
testl $_TIF_WORK_SYSCALL_EXIT,%edx |
1da177e4c Linux-2.6.12-rc2 |
624 |
jz int_signal |
df5d1874c x86: Use {push,po... |
625 |
pushq_cfi %rdi |
0bd7b7985 x86: entry_64.S: ... |
626 |
leaq 8(%rsp),%rdi # &ptregs -> arg1 |
1da177e4c Linux-2.6.12-rc2 |
627 |
call syscall_trace_leave |
df5d1874c x86: Use {push,po... |
628 |
popq_cfi %rdi |
d4d671501 x86 ptrace: unify... |
629 |
andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi |
1da177e4c Linux-2.6.12-rc2 |
630 |
jmp int_restore_rest |
0bd7b7985 x86: entry_64.S: ... |
631 |
|
1da177e4c Linux-2.6.12-rc2 |
632 |
int_signal: |
8f4d37ec0 sched: high-res p... |
633 |
testl $_TIF_DO_NOTIFY_MASK,%edx |
1da177e4c Linux-2.6.12-rc2 |
634 635 636 637 |
jz 1f movq %rsp,%rdi # &ptregs -> arg1 xorl %esi,%esi # oldset -> arg2 call do_notify_resume |
eca91e783 x86_64: fix delay... |
638 |
1: movl $_TIF_WORK_MASK,%edi |
1da177e4c Linux-2.6.12-rc2 |
639 640 |
int_restore_rest: RESTORE_REST |
72fe48585 x86: replace priv... |
641 |
DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d2 [PATCH] lockdep: ... |
642 |
TRACE_IRQS_OFF |
1da177e4c Linux-2.6.12-rc2 |
643 644 |
jmp int_with_check CFI_ENDPROC |
bcddc0155 [PATCH] x86-64: m... |
645 |
END(system_call) |
0bd7b7985 x86: entry_64.S: ... |
646 647 |
/* |
1da177e4c Linux-2.6.12-rc2 |
648 |
* Certain special system calls that need to save a complete full stack frame. |
0bd7b7985 x86: entry_64.S: ... |
649 |
*/ |
1da177e4c Linux-2.6.12-rc2 |
650 |
.macro PTREGSCALL label,func,arg |
c002a1e6b x86: introduce sa... |
651 652 653 654 655 656 657 658 659 660 |
ENTRY(\label) PARTIAL_FRAME 1 8 /* offset 8: return address */ subq $REST_SKIP, %rsp CFI_ADJUST_CFA_OFFSET REST_SKIP call save_rest DEFAULT_FRAME 0 8 /* offset 8: return address */ leaq 8(%rsp), \arg /* pt_regs pointer */ call \func jmp ptregscall_common CFI_ENDPROC |
4b787e0b8 [PATCH] x86_64: a... |
661 |
END(\label) |
1da177e4c Linux-2.6.12-rc2 |
662 663 664 665 666 |
.endm PTREGSCALL stub_clone, sys_clone, %r8 PTREGSCALL stub_fork, sys_fork, %rdi PTREGSCALL stub_vfork, sys_vfork, %rdi |
1da177e4c Linux-2.6.12-rc2 |
667 668 669 670 |
PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx PTREGSCALL stub_iopl, sys_iopl, %rsi ENTRY(ptregscall_common) |
c002a1e6b x86: introduce sa... |
671 672 673 674 675 676 677 678 679 |
DEFAULT_FRAME 1 8 /* offset 8: return address */ RESTORE_TOP_OF_STACK %r11, 8 movq_cfi_restore R15+8, r15 movq_cfi_restore R14+8, r14 movq_cfi_restore R13+8, r13 movq_cfi_restore R12+8, r12 movq_cfi_restore RBP+8, rbp movq_cfi_restore RBX+8, rbx ret $REST_SKIP /* pop extended registers */ |
1da177e4c Linux-2.6.12-rc2 |
680 |
CFI_ENDPROC |
4b787e0b8 [PATCH] x86_64: a... |
681 |
END(ptregscall_common) |
0bd7b7985 x86: entry_64.S: ... |
682 |
|
1da177e4c Linux-2.6.12-rc2 |
683 684 |
ENTRY(stub_execve) CFI_STARTPROC |
e6b04b6b5 x86-64: Fix unwin... |
685 686 |
addq $8, %rsp PARTIAL_FRAME 0 |
1da177e4c Linux-2.6.12-rc2 |
687 |
SAVE_REST |
1da177e4c Linux-2.6.12-rc2 |
688 |
FIXUP_TOP_OF_STACK %r11 |
5d119b2c9 x86: fix execve w... |
689 |
movq %rsp, %rcx |
1da177e4c Linux-2.6.12-rc2 |
690 |
call sys_execve |
1da177e4c Linux-2.6.12-rc2 |
691 |
RESTORE_TOP_OF_STACK %r11 |
1da177e4c Linux-2.6.12-rc2 |
692 693 694 695 |
movq %rax,RAX(%rsp) RESTORE_REST jmp int_ret_from_sys_call CFI_ENDPROC |
4b787e0b8 [PATCH] x86_64: a... |
696 |
END(stub_execve) |
0bd7b7985 x86: entry_64.S: ... |
697 |
|
1da177e4c Linux-2.6.12-rc2 |
698 699 700 |
/* * sigreturn is special because it needs to restore all registers on return. * This cannot be done with SYSRET, so use the IRET return path instead. |
0bd7b7985 x86: entry_64.S: ... |
701 |
*/ |
1da177e4c Linux-2.6.12-rc2 |
702 703 |
ENTRY(stub_rt_sigreturn) CFI_STARTPROC |
7effaa882 [PATCH] x86-64: F... |
704 |
addq $8, %rsp |
e6b04b6b5 x86-64: Fix unwin... |
705 |
PARTIAL_FRAME 0 |
1da177e4c Linux-2.6.12-rc2 |
706 707 708 709 710 711 712 713 |
SAVE_REST movq %rsp,%rdi FIXUP_TOP_OF_STACK %r11 call sys_rt_sigreturn movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer RESTORE_REST jmp int_ret_from_sys_call CFI_ENDPROC |
4b787e0b8 [PATCH] x86_64: a... |
714 |
END(stub_rt_sigreturn) |
1da177e4c Linux-2.6.12-rc2 |
715 |
|
7effaa882 [PATCH] x86-64: F... |
716 |
/* |
939b78713 x86: 64 bits: shr... |
717 718 719 720 721 722 |
* Build the entry stubs and pointer table with some assembler magic. * We pack 7 stubs into a single 32-byte chunk, which will fit in a * single cache line on all modern x86 implementations. */ .section .init.rodata,"a" ENTRY(interrupt) |
ea7145477 x86: Separate out... |
723 |
.section .entry.text |
939b78713 x86: 64 bits: shr... |
724 725 726 727 728 729 730 731 732 |
.p2align 5 .p2align CONFIG_X86_L1_CACHE_SHIFT ENTRY(irq_entries_start) INTR_FRAME vector=FIRST_EXTERNAL_VECTOR .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7 .balign 32 .rept 7 .if vector < NR_VECTORS |
8665596ec x86: fix up the n... |
733 |
.if vector <> FIRST_EXTERNAL_VECTOR |
939b78713 x86: 64 bits: shr... |
734 735 |
CFI_ADJUST_CFA_OFFSET -8 .endif |
df5d1874c x86: Use {push,po... |
736 |
1: pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */ |
8665596ec x86: fix up the n... |
737 |
.if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6 |
939b78713 x86: 64 bits: shr... |
738 739 740 741 |
jmp 2f .endif .previous .quad 1b |
ea7145477 x86: Separate out... |
742 |
.section .entry.text |
939b78713 x86: 64 bits: shr... |
743 744 745 746 747 748 749 750 751 752 753 |
vector=vector+1 .endif .endr 2: jmp common_interrupt .endr CFI_ENDPROC END(irq_entries_start) .previous END(interrupt) .previous |
d99015b1a x86: move entry_6... |
754 |
/* |
1da177e4c Linux-2.6.12-rc2 |
755 756 757 |
* Interrupt entry/exit. * * Interrupt entry points save only callee clobbered registers in fast path. |
d99015b1a x86: move entry_6... |
758 759 760 |
* * Entry runs with interrupts off. */ |
1da177e4c Linux-2.6.12-rc2 |
761 |
|
722024dbb x86: irq: fix api... |
762 |
/* 0(%rsp): ~(interrupt number) */ |
1da177e4c Linux-2.6.12-rc2 |
763 |
.macro interrupt func |
625dbc3b8 x86: Save rbp in ... |
764 765 766 |
/* reserve pt_regs for scratch regs and rbp */ subq $ORIG_RAX-RBP, %rsp CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP |
1871853f7 x86,64: Simplify ... |
767 |
SAVE_ARGS_IRQ |
1da177e4c Linux-2.6.12-rc2 |
768 769 |
call \func .endm |
8222d718b kprobes/x86-64: F... |
770 771 772 773 |
/* * Interrupt entry/exit should be protected against kprobes */ .pushsection .kprobes.text, "ax" |
722024dbb x86: irq: fix api... |
774 775 776 777 |
/* * The interrupt stubs push (~vector+0x80) onto the stack and * then jump to common_interrupt. */ |
939b78713 x86: 64 bits: shr... |
778 779 |
.p2align CONFIG_X86_L1_CACHE_SHIFT common_interrupt: |
7effaa882 [PATCH] x86-64: F... |
780 |
XCPT_FRAME |
722024dbb x86: irq: fix api... |
781 |
addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ |
1da177e4c Linux-2.6.12-rc2 |
782 |
interrupt do_IRQ |
3d1e42a7c x86-64: Move oldr... |
783 |
/* 0(%rsp): old_rsp-ARGOFFSET */ |
7effaa882 [PATCH] x86-64: F... |
784 |
ret_from_intr: |
72fe48585 x86: replace priv... |
785 |
DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d2 [PATCH] lockdep: ... |
786 |
TRACE_IRQS_OFF |
568955307 x86-64: Move irqc... |
787 |
decl PER_CPU_VAR(irq_count) |
625dbc3b8 x86: Save rbp in ... |
788 |
|
a2bbe7508 x86: Don't use fr... |
789 790 |
/* Restore saved previous stack */ popq %rsi |
eab9e6137 x86-64: Fix CFI d... |
791 792 |
CFI_DEF_CFA_REGISTER rsi leaq ARGOFFSET-RBP(%rsi), %rsp |
7effaa882 [PATCH] x86-64: F... |
793 |
CFI_DEF_CFA_REGISTER rsp |
eab9e6137 x86-64: Fix CFI d... |
794 |
CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET |
625dbc3b8 x86: Save rbp in ... |
795 |
|
7effaa882 [PATCH] x86-64: F... |
796 |
exit_intr: |
1da177e4c Linux-2.6.12-rc2 |
797 798 799 |
GET_THREAD_INFO(%rcx) testl $3,CS-ARGOFFSET(%rsp) je retint_kernel |
0bd7b7985 x86: entry_64.S: ... |
800 |
|
1da177e4c Linux-2.6.12-rc2 |
801 802 803 804 |
/* Interrupt came from user space */ /* * Has a correct top of stack, but a partial stack frame * %rcx: thread info. Interrupts off. |
0bd7b7985 x86: entry_64.S: ... |
805 |
*/ |
1da177e4c Linux-2.6.12-rc2 |
806 807 |
retint_with_reschedule: movl $_TIF_WORK_MASK,%edi |
7effaa882 [PATCH] x86-64: F... |
808 |
retint_check: |
10cd706d1 lockdep: x86_64: ... |
809 |
LOCKDEP_SYS_EXIT_IRQ |
26ccb8a71 x86: rename threa... |
810 |
movl TI_flags(%rcx),%edx |
1da177e4c Linux-2.6.12-rc2 |
811 |
andl %edi,%edx |
7effaa882 [PATCH] x86-64: F... |
812 |
CFI_REMEMBER_STATE |
1da177e4c Linux-2.6.12-rc2 |
813 |
jnz retint_careful |
10cd706d1 lockdep: x86_64: ... |
814 815 |
retint_swapgs: /* return to user-space */ |
2601e64d2 [PATCH] lockdep: ... |
816 817 818 |
/* * The iretq could re-enable interrupts: */ |
72fe48585 x86: replace priv... |
819 |
DISABLE_INTERRUPTS(CLBR_ANY) |
2601e64d2 [PATCH] lockdep: ... |
820 |
TRACE_IRQS_IRETQ |
72fe48585 x86: replace priv... |
821 |
SWAPGS |
2601e64d2 [PATCH] lockdep: ... |
822 |
jmp restore_args |
10cd706d1 lockdep: x86_64: ... |
823 |
retint_restore_args: /* return to kernel space */ |
72fe48585 x86: replace priv... |
824 |
DISABLE_INTERRUPTS(CLBR_ANY) |
2601e64d2 [PATCH] lockdep: ... |
825 826 827 828 829 |
/* * The iretq could re-enable interrupts: */ TRACE_IRQS_IRETQ restore_args: |
838feb475 x86, asm: Flip RE... |
830 |
RESTORE_ARGS 1,8,1 |
3701d863b x86: fixup more p... |
831 |
|
f7f3d791e x86: don't make i... |
832 |
irq_return: |
72fe48585 x86: replace priv... |
833 |
INTERRUPT_RETURN |
3701d863b x86: fixup more p... |
834 835 836 837 838 839 |
.section __ex_table, "a" .quad irq_return, bad_iret .previous #ifdef CONFIG_PARAVIRT |
72fe48585 x86: replace priv... |
840 |
ENTRY(native_iret) |
1da177e4c Linux-2.6.12-rc2 |
841 842 843 |
iretq .section __ex_table,"a" |
72fe48585 x86: replace priv... |
844 |
.quad native_iret, bad_iret |
1da177e4c Linux-2.6.12-rc2 |
845 |
.previous |
3701d863b x86: fixup more p... |
846 |
#endif |
1da177e4c Linux-2.6.12-rc2 |
847 |
.section .fixup,"ax" |
1da177e4c Linux-2.6.12-rc2 |
848 |
bad_iret: |
3aa4b37d3 x86: make traps o... |
849 850 851 852 853 854 855 856 857 858 859 860 861 862 |
/* * The iret traps when the %cs or %ss being restored is bogus. * We've lost the original trap vector and error code. * #GPF is the most likely one to get for an invalid selector. * So pretend we completed the iret and took the #GPF in user mode. * * We are now running with the kernel GS after exception recovery. * But error_entry expects us to have user GS to match the user %cs, * so swap back. */ pushq $0 SWAPGS jmp general_protection |
72fe48585 x86: replace priv... |
863 |
.previous |
7effaa882 [PATCH] x86-64: F... |
864 |
/* edi: workmask, edx: work */ |
1da177e4c Linux-2.6.12-rc2 |
865 |
retint_careful: |
7effaa882 [PATCH] x86-64: F... |
866 |
CFI_RESTORE_STATE |
1da177e4c Linux-2.6.12-rc2 |
867 868 |
bt $TIF_NEED_RESCHED,%edx jnc retint_signal |
2601e64d2 [PATCH] lockdep: ... |
869 |
TRACE_IRQS_ON |
72fe48585 x86: replace priv... |
870 |
ENABLE_INTERRUPTS(CLBR_NONE) |
df5d1874c x86: Use {push,po... |
871 |
pushq_cfi %rdi |
1da177e4c Linux-2.6.12-rc2 |
872 |
call schedule |
df5d1874c x86: Use {push,po... |
873 |
popq_cfi %rdi |
1da177e4c Linux-2.6.12-rc2 |
874 |
GET_THREAD_INFO(%rcx) |
72fe48585 x86: replace priv... |
875 |
DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d2 [PATCH] lockdep: ... |
876 |
TRACE_IRQS_OFF |
1da177e4c Linux-2.6.12-rc2 |
877 |
jmp retint_check |
0bd7b7985 x86: entry_64.S: ... |
878 |
|
1da177e4c Linux-2.6.12-rc2 |
879 |
retint_signal: |
8f4d37ec0 sched: high-res p... |
880 |
testl $_TIF_DO_NOTIFY_MASK,%edx |
10ffdbb8d [PATCH] x86_64: R... |
881 |
jz retint_swapgs |
2601e64d2 [PATCH] lockdep: ... |
882 |
TRACE_IRQS_ON |
72fe48585 x86: replace priv... |
883 |
ENABLE_INTERRUPTS(CLBR_NONE) |
1da177e4c Linux-2.6.12-rc2 |
884 |
SAVE_REST |
0bd7b7985 x86: entry_64.S: ... |
885 |
movq $-1,ORIG_RAX(%rsp) |
3829ee6b1 [PATCH] x86_64: S... |
886 |
xorl %esi,%esi # oldset |
1da177e4c Linux-2.6.12-rc2 |
887 888 889 |
movq %rsp,%rdi # &pt_regs call do_notify_resume RESTORE_REST |
72fe48585 x86: replace priv... |
890 |
DISABLE_INTERRUPTS(CLBR_NONE) |
2601e64d2 [PATCH] lockdep: ... |
891 |
TRACE_IRQS_OFF |
be9e68703 [PATCH] x86_64: i... |
892 |
GET_THREAD_INFO(%rcx) |
eca91e783 x86_64: fix delay... |
893 |
jmp retint_with_reschedule |
1da177e4c Linux-2.6.12-rc2 |
894 895 896 897 |
#ifdef CONFIG_PREEMPT /* Returning to kernel space. Check if we need preemption */ /* rcx: threadinfo. interrupts off. */ |
b06babac4 [PATCH] Add prope... |
898 |
ENTRY(retint_kernel) |
26ccb8a71 x86: rename threa... |
899 |
cmpl $0,TI_preempt_count(%rcx) |
1da177e4c Linux-2.6.12-rc2 |
900 |
jnz retint_restore_args |
26ccb8a71 x86: rename threa... |
901 |
bt $TIF_NEED_RESCHED,TI_flags(%rcx) |
1da177e4c Linux-2.6.12-rc2 |
902 903 904 905 906 |
jnc retint_restore_args bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */ jnc retint_restore_args call preempt_schedule_irq jmp exit_intr |
0bd7b7985 x86: entry_64.S: ... |
907 |
#endif |
4b787e0b8 [PATCH] x86_64: a... |
908 |
|
1da177e4c Linux-2.6.12-rc2 |
909 |
CFI_ENDPROC |
4b787e0b8 [PATCH] x86_64: a... |
910 |
END(common_interrupt) |
8222d718b kprobes/x86-64: F... |
911 912 913 914 |
/* * End of kprobes section */ .popsection |
0bd7b7985 x86: entry_64.S: ... |
915 |
|
1da177e4c Linux-2.6.12-rc2 |
916 917 |
/* * APIC interrupts. |
0bd7b7985 x86: entry_64.S: ... |
918 |
*/ |
322648d1b x86: include ENTR... |
919 920 |
.macro apicinterrupt num sym do_sym ENTRY(\sym) |
7effaa882 [PATCH] x86-64: F... |
921 |
INTR_FRAME |
df5d1874c x86: Use {push,po... |
922 923 |
pushq_cfi $~( um) |
39e954334 x86-64: Reduce am... |
924 |
.Lcommon_\sym: |
322648d1b x86: include ENTR... |
925 |
interrupt \do_sym |
1da177e4c Linux-2.6.12-rc2 |
926 927 |
jmp ret_from_intr CFI_ENDPROC |
322648d1b x86: include ENTR... |
928 929 |
END(\sym) .endm |
1da177e4c Linux-2.6.12-rc2 |
930 |
|
322648d1b x86: include ENTR... |
931 932 933 |
#ifdef CONFIG_SMP apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \ irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt |
4ef702c10 x86: fix panic wi... |
934 935 |
apicinterrupt REBOOT_VECTOR \ reboot_interrupt smp_reboot_interrupt |
322648d1b x86: include ENTR... |
936 |
#endif |
1da177e4c Linux-2.6.12-rc2 |
937 |
|
03b486322 x86: make UV supp... |
938 |
#ifdef CONFIG_X86_UV |
5ae3a139c x86: uv bau inter... |
939 |
apicinterrupt UV_BAU_MESSAGE \ |
322648d1b x86: include ENTR... |
940 |
uv_bau_message_intr1 uv_bau_message_interrupt |
03b486322 x86: make UV supp... |
941 |
#endif |
322648d1b x86: include ENTR... |
942 943 |
apicinterrupt LOCAL_TIMER_VECTOR \ apic_timer_interrupt smp_apic_timer_interrupt |
4a4de9c7d x86: UV RTC: Rena... |
944 945 |
apicinterrupt X86_PLATFORM_IPI_VECTOR \ x86_platform_ipi smp_x86_platform_ipi |
89b831ef8 [PATCH] x86_64: S... |
946 |
|
0bd7b7985 x86: entry_64.S: ... |
947 |
#ifdef CONFIG_SMP |
39e954334 x86-64: Reduce am... |
948 949 950 |
ALIGN INTR_FRAME .irp idx,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \ |
3a09fb457 x86: Allocate 32 ... |
951 952 |
16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 .if NUM_INVALIDATE_TLB_VECTORS > \idx |
39e954334 x86-64: Reduce am... |
953 954 955 956 957 |
ENTRY(invalidate_interrupt\idx) pushq_cfi $~(INVALIDATE_TLB_VECTOR_START+\idx) jmp .Lcommon_invalidate_interrupt0 CFI_ADJUST_CFA_OFFSET -8 END(invalidate_interrupt\idx) |
3a09fb457 x86: Allocate 32 ... |
958 |
.endif |
3234282f3 x86, asm: Fix CFI... |
959 |
.endr |
39e954334 x86-64: Reduce am... |
960 961 962 |
CFI_ENDPROC apicinterrupt INVALIDATE_TLB_VECTOR_START, \ invalidate_interrupt0, smp_invalidate_interrupt |
1da177e4c Linux-2.6.12-rc2 |
963 |
#endif |
322648d1b x86: include ENTR... |
964 |
apicinterrupt THRESHOLD_APIC_VECTOR \ |
7856f6cce x86, mce: enable ... |
965 |
threshold_interrupt smp_threshold_interrupt |
322648d1b x86: include ENTR... |
966 967 |
apicinterrupt THERMAL_APIC_VECTOR \ thermal_interrupt smp_thermal_interrupt |
1812924bb x86, SGI UV: TLB ... |
968 |
|
322648d1b x86: include ENTR... |
969 970 971 972 973 974 975 976 |
#ifdef CONFIG_SMP apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \ call_function_single_interrupt smp_call_function_single_interrupt apicinterrupt CALL_FUNCTION_VECTOR \ call_function_interrupt smp_call_function_interrupt apicinterrupt RESCHEDULE_VECTOR \ reschedule_interrupt smp_reschedule_interrupt #endif |
1da177e4c Linux-2.6.12-rc2 |
977 |
|
322648d1b x86: include ENTR... |
978 979 980 981 |
apicinterrupt ERROR_APIC_VECTOR \ error_interrupt smp_error_interrupt apicinterrupt SPURIOUS_APIC_VECTOR \ spurious_interrupt smp_spurious_interrupt |
0bd7b7985 x86: entry_64.S: ... |
982 |
|
e360adbe2 irq_work: Add gen... |
983 984 985 |
#ifdef CONFIG_IRQ_WORK apicinterrupt IRQ_WORK_VECTOR \ irq_work_interrupt smp_irq_work_interrupt |
241771ef0 performance count... |
986 |
#endif |
1da177e4c Linux-2.6.12-rc2 |
987 988 |
/* * Exception entry points. |
0bd7b7985 x86: entry_64.S: ... |
989 |
*/ |
322648d1b x86: include ENTR... |
990 991 |
.macro zeroentry sym do_sym ENTRY(\sym) |
7effaa882 [PATCH] x86-64: F... |
992 |
INTR_FRAME |
fab58420a x86/paravirt, 64-... |
993 |
PARAVIRT_ADJUST_EXCEPTION_FRAME |
14ae22ba2 x86: entry_64.S: ... |
994 |
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ |
b1cccb1bb x86-64: Use symbo... |
995 996 |
subq $ORIG_RAX-R15, %rsp CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 |
d99015b1a x86: move entry_6... |
997 |
call error_entry |
dcd072e26 x86: clean up aft... |
998 |
DEFAULT_FRAME 0 |
d99015b1a x86: move entry_6... |
999 1000 |
movq %rsp,%rdi /* pt_regs pointer */ xorl %esi,%esi /* no error code */ |
322648d1b x86: include ENTR... |
1001 |
call \do_sym |
d99015b1a x86: move entry_6... |
1002 |
jmp error_exit /* %ebx: no swapgs flag */ |
7effaa882 [PATCH] x86-64: F... |
1003 |
CFI_ENDPROC |
322648d1b x86: include ENTR... |
1004 1005 |
END(\sym) .endm |
1da177e4c Linux-2.6.12-rc2 |
1006 |
|
322648d1b x86: include ENTR... |
1007 |
.macro paranoidzeroentry sym do_sym |
ddeb8f214 x86_64: get rid o... |
1008 |
ENTRY(\sym) |
b8b1d08bf x86: entry_64.S: ... |
1009 1010 |
INTR_FRAME PARAVIRT_ADJUST_EXCEPTION_FRAME |
b1cccb1bb x86-64: Use symbo... |
1011 1012 1013 |
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ subq $ORIG_RAX-R15, %rsp CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 |
b8b1d08bf x86: entry_64.S: ... |
1014 1015 1016 1017 |
call save_paranoid TRACE_IRQS_OFF movq %rsp,%rdi /* pt_regs pointer */ xorl %esi,%esi /* no error code */ |
322648d1b x86: include ENTR... |
1018 |
call \do_sym |
b8b1d08bf x86: entry_64.S: ... |
1019 1020 |
jmp paranoid_exit /* %ebx: no swapgs flag */ CFI_ENDPROC |
ddeb8f214 x86_64: get rid o... |
1021 |
END(\sym) |
322648d1b x86: include ENTR... |
1022 |
.endm |
b8b1d08bf x86: entry_64.S: ... |
1023 |
|
c15a5958a x86-64, asm: Dire... |
1024 |
#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8) |
322648d1b x86: include ENTR... |
1025 |
.macro paranoidzeroentry_ist sym do_sym ist |
ddeb8f214 x86_64: get rid o... |
1026 |
ENTRY(\sym) |
9f1e87ea3 x86: entry_64.S -... |
1027 |
INTR_FRAME |
b8b1d08bf x86: entry_64.S: ... |
1028 |
PARAVIRT_ADJUST_EXCEPTION_FRAME |
b1cccb1bb x86-64: Use symbo... |
1029 1030 1031 |
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ subq $ORIG_RAX-R15, %rsp CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 |
b8b1d08bf x86: entry_64.S: ... |
1032 1033 1034 1035 |
call save_paranoid TRACE_IRQS_OFF movq %rsp,%rdi /* pt_regs pointer */ xorl %esi,%esi /* no error code */ |
c15a5958a x86-64, asm: Dire... |
1036 |
subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist) |
322648d1b x86: include ENTR... |
1037 |
call \do_sym |
c15a5958a x86-64, asm: Dire... |
1038 |
addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist) |
b8b1d08bf x86: entry_64.S: ... |
1039 1040 |
jmp paranoid_exit /* %ebx: no swapgs flag */ CFI_ENDPROC |
ddeb8f214 x86_64: get rid o... |
1041 |
END(\sym) |
322648d1b x86: include ENTR... |
1042 |
.endm |
b8b1d08bf x86: entry_64.S: ... |
1043 |
|
ddeb8f214 x86_64: get rid o... |
1044 |
.macro errorentry sym do_sym |
322648d1b x86: include ENTR... |
1045 |
ENTRY(\sym) |
7effaa882 [PATCH] x86-64: F... |
1046 |
XCPT_FRAME |
fab58420a x86/paravirt, 64-... |
1047 |
PARAVIRT_ADJUST_EXCEPTION_FRAME |
b1cccb1bb x86-64: Use symbo... |
1048 1049 |
subq $ORIG_RAX-R15, %rsp CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 |
d99015b1a x86: move entry_6... |
1050 |
call error_entry |
dcd072e26 x86: clean up aft... |
1051 |
DEFAULT_FRAME 0 |
d99015b1a x86: move entry_6... |
1052 1053 1054 |
movq %rsp,%rdi /* pt_regs pointer */ movq ORIG_RAX(%rsp),%rsi /* get error code */ movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ |
322648d1b x86: include ENTR... |
1055 |
call \do_sym |
d99015b1a x86: move entry_6... |
1056 |
jmp error_exit /* %ebx: no swapgs flag */ |
7effaa882 [PATCH] x86-64: F... |
1057 |
CFI_ENDPROC |
322648d1b x86: include ENTR... |
1058 |
END(\sym) |
322648d1b x86: include ENTR... |
1059 |
.endm |
1da177e4c Linux-2.6.12-rc2 |
1060 1061 |
/* error code is on the stack already */ |
ddeb8f214 x86_64: get rid o... |
1062 |
.macro paranoiderrorentry sym do_sym |
322648d1b x86: include ENTR... |
1063 |
ENTRY(\sym) |
b8b1d08bf x86: entry_64.S: ... |
1064 1065 |
XCPT_FRAME PARAVIRT_ADJUST_EXCEPTION_FRAME |
b1cccb1bb x86-64: Use symbo... |
1066 1067 |
subq $ORIG_RAX-R15, %rsp CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 |
e2f6bc25b x86: entry_64.S: ... |
1068 1069 |
call save_paranoid DEFAULT_FRAME 0 |
7e61a7932 traps: x86_64: ad... |
1070 |
TRACE_IRQS_OFF |
b8b1d08bf x86: entry_64.S: ... |
1071 1072 1073 |
movq %rsp,%rdi /* pt_regs pointer */ movq ORIG_RAX(%rsp),%rsi /* get error code */ movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ |
322648d1b x86: include ENTR... |
1074 |
call \do_sym |
b8b1d08bf x86: entry_64.S: ... |
1075 1076 |
jmp paranoid_exit /* %ebx: no swapgs flag */ CFI_ENDPROC |
322648d1b x86: include ENTR... |
1077 |
END(\sym) |
322648d1b x86: include ENTR... |
1078 1079 1080 |
.endm zeroentry divide_error do_divide_error |
322648d1b x86: include ENTR... |
1081 1082 1083 1084 |
zeroentry overflow do_overflow zeroentry bounds do_bounds zeroentry invalid_op do_invalid_op zeroentry device_not_available do_device_not_available |
ddeb8f214 x86_64: get rid o... |
1085 |
paranoiderrorentry double_fault do_double_fault |
322648d1b x86: include ENTR... |
1086 1087 1088 |
zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun errorentry invalid_TSS do_invalid_TSS errorentry segment_not_present do_segment_not_present |
322648d1b x86: include ENTR... |
1089 1090 1091 |
zeroentry spurious_interrupt_bug do_spurious_interrupt_bug zeroentry coprocessor_error do_coprocessor_error errorentry alignment_check do_alignment_check |
322648d1b x86: include ENTR... |
1092 |
zeroentry simd_coprocessor_error do_simd_coprocessor_error |
5cec93c21 x86-64: Emulate l... |
1093 |
|
2601e64d2 [PATCH] lockdep: ... |
1094 |
|
9f1e87ea3 x86: entry_64.S -... |
1095 1096 |
/* Reload gs selector with exception handling */ /* edi: new selector */ |
9f9d489a3 x86/paravirt, 64-... |
1097 |
ENTRY(native_load_gs_index) |
7effaa882 [PATCH] x86-64: F... |
1098 |
CFI_STARTPROC |
df5d1874c x86: Use {push,po... |
1099 |
pushfq_cfi |
b8aa287f7 x86: fix paravirt... |
1100 |
DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) |
9f1e87ea3 x86: entry_64.S -... |
1101 |
SWAPGS |
0bd7b7985 x86: entry_64.S: ... |
1102 |
gs_change: |
9f1e87ea3 x86: entry_64.S -... |
1103 |
movl %edi,%gs |
1da177e4c Linux-2.6.12-rc2 |
1104 |
2: mfence /* workaround */ |
72fe48585 x86: replace priv... |
1105 |
SWAPGS |
df5d1874c x86: Use {push,po... |
1106 |
popfq_cfi |
9f1e87ea3 x86: entry_64.S -... |
1107 |
ret |
7effaa882 [PATCH] x86-64: F... |
1108 |
CFI_ENDPROC |
6efdcfaf1 x86: KPROBE_ENTRY... |
1109 |
END(native_load_gs_index) |
0bd7b7985 x86: entry_64.S: ... |
1110 |
|
9f1e87ea3 x86: entry_64.S -... |
1111 1112 1113 1114 1115 |
.section __ex_table,"a" .align 8 .quad gs_change,bad_gs .previous .section .fixup,"ax" |
1da177e4c Linux-2.6.12-rc2 |
1116 |
/* running with kernelgs */ |
0bd7b7985 x86: entry_64.S: ... |
1117 |
bad_gs: |
72fe48585 x86: replace priv... |
1118 |
SWAPGS /* switch back to user gs */ |
1da177e4c Linux-2.6.12-rc2 |
1119 |
xorl %eax,%eax |
9f1e87ea3 x86: entry_64.S -... |
1120 1121 1122 |
movl %eax,%gs jmp 2b .previous |
0bd7b7985 x86: entry_64.S: ... |
1123 |
|
3bd95dfb1 x86, 64-bit: Move... |
1124 |
ENTRY(kernel_thread_helper) |
c05991ed1 [PATCH] x86_64: A... |
1125 1126 |
pushq $0 # fake return address CFI_STARTPROC |
1da177e4c Linux-2.6.12-rc2 |
1127 1128 1129 1130 |
/* * Here we are in the child and the registers are set as they were * at kernel_thread() invocation in the parent. */ |
3bd95dfb1 x86, 64-bit: Move... |
1131 |
call *%rsi |
1da177e4c Linux-2.6.12-rc2 |
1132 |
# exit |
1c5b5cfd2 x86: return corre... |
1133 |
mov %eax, %edi |
1da177e4c Linux-2.6.12-rc2 |
1134 |
call do_exit |
5f5db5913 x86, debug: remov... |
1135 |
ud2 # padding for call trace |
c05991ed1 [PATCH] x86_64: A... |
1136 |
CFI_ENDPROC |
3bd95dfb1 x86, 64-bit: Move... |
1137 |
END(kernel_thread_helper) |
1da177e4c Linux-2.6.12-rc2 |
1138 1139 1140 1141 1142 |
/* * execve(). This function needs to use IRET, not SYSRET, to set up all state properly. * * C extern interface: |
c78873252 Mark arguments to... |
1143 |
* extern long execve(const char *name, char **argv, char **envp) |
1da177e4c Linux-2.6.12-rc2 |
1144 1145 1146 1147 1148 |
* * asm input arguments: * rdi: name, rsi: argv, rdx: envp * * We want to fallback into: |
c78873252 Mark arguments to... |
1149 |
* extern long sys_execve(const char *name, char **argv,char **envp, struct pt_regs *regs) |
1da177e4c Linux-2.6.12-rc2 |
1150 1151 |
* * do_sys_execve asm fallback arguments: |
5d119b2c9 x86: fix execve w... |
1152 |
* rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack |
1da177e4c Linux-2.6.12-rc2 |
1153 |
*/ |
3db03b4af [PATCH] rename th... |
1154 |
ENTRY(kernel_execve) |
1da177e4c Linux-2.6.12-rc2 |
1155 1156 |
CFI_STARTPROC FAKE_STACK_FRAME $0 |
0bd7b7985 x86: entry_64.S: ... |
1157 |
SAVE_ALL |
5d119b2c9 x86: fix execve w... |
1158 |
movq %rsp,%rcx |
1da177e4c Linux-2.6.12-rc2 |
1159 |
call sys_execve |
0bd7b7985 x86: entry_64.S: ... |
1160 |
movq %rax, RAX(%rsp) |
1da177e4c Linux-2.6.12-rc2 |
1161 1162 1163 1164 1165 1166 1167 |
RESTORE_REST testq %rax,%rax je int_ret_from_sys_call RESTORE_ARGS UNFAKE_STACK_FRAME ret CFI_ENDPROC |
6efdcfaf1 x86: KPROBE_ENTRY... |
1168 |
END(kernel_execve) |
1da177e4c Linux-2.6.12-rc2 |
1169 |
|
2699500b3 [PATCH] x86_64: F... |
1170 |
/* Call softirq on interrupt stack. Interrupts are off. */ |
ed6b676ca [PATCH] x86_64: S... |
1171 |
ENTRY(call_softirq) |
7effaa882 [PATCH] x86-64: F... |
1172 |
CFI_STARTPROC |
df5d1874c x86: Use {push,po... |
1173 |
pushq_cfi %rbp |
2699500b3 [PATCH] x86_64: F... |
1174 1175 1176 |
CFI_REL_OFFSET rbp,0 mov %rsp,%rbp CFI_DEF_CFA_REGISTER rbp |
568955307 x86-64: Move irqc... |
1177 |
incl PER_CPU_VAR(irq_count) |
26f80bd6a x86-64: Convert i... |
1178 |
cmove PER_CPU_VAR(irq_stack_ptr),%rsp |
2699500b3 [PATCH] x86_64: F... |
1179 |
push %rbp # backlink for old unwinder |
ed6b676ca [PATCH] x86_64: S... |
1180 |
call __do_softirq |
2699500b3 [PATCH] x86_64: F... |
1181 |
leaveq |
df5d1874c x86: Use {push,po... |
1182 |
CFI_RESTORE rbp |
7effaa882 [PATCH] x86-64: F... |
1183 |
CFI_DEF_CFA_REGISTER rsp |
2699500b3 [PATCH] x86_64: F... |
1184 |
CFI_ADJUST_CFA_OFFSET -8 |
568955307 x86-64: Move irqc... |
1185 |
decl PER_CPU_VAR(irq_count) |
ed6b676ca [PATCH] x86_64: S... |
1186 |
ret |
7effaa882 [PATCH] x86-64: F... |
1187 |
CFI_ENDPROC |
6efdcfaf1 x86: KPROBE_ENTRY... |
1188 |
END(call_softirq) |
75154f402 x86_64: Ignore co... |
1189 |
|
3d75e1b8e xen64: add hyperv... |
1190 |
#ifdef CONFIG_XEN |
322648d1b x86: include ENTR... |
1191 |
zeroentry xen_hypervisor_callback xen_do_hypervisor_callback |
3d75e1b8e xen64: add hyperv... |
1192 1193 |
/* |
9f1e87ea3 x86: entry_64.S -... |
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 |
* A note on the "critical region" in our callback handler. * We want to avoid stacking callback handlers due to events occurring * during handling of the last event. To do this, we keep events disabled * until we've done all processing. HOWEVER, we must enable events before * popping the stack frame (can't be done atomically) and so it would still * be possible to get enough handler activations to overflow the stack. * Although unlikely, bugs of that kind are hard to track down, so we'd * like to avoid the possibility. * So, on entry to the handler we detect whether we interrupted an * existing activation in its critical region -- if so, we pop the current * activation and restart the handler using the previous one. */ |
3d75e1b8e xen64: add hyperv... |
1206 1207 |
ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) CFI_STARTPROC |
9f1e87ea3 x86: entry_64.S -... |
1208 1209 1210 1211 |
/* * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will * see the correct pointer to the pt_regs */ |
3d75e1b8e xen64: add hyperv... |
1212 1213 |
movq %rdi, %rsp # we don't return, adjust the stack frame CFI_ENDPROC |
dcd072e26 x86: clean up aft... |
1214 |
DEFAULT_FRAME |
568955307 x86-64: Move irqc... |
1215 |
11: incl PER_CPU_VAR(irq_count) |
3d75e1b8e xen64: add hyperv... |
1216 1217 |
movq %rsp,%rbp CFI_DEF_CFA_REGISTER rbp |
26f80bd6a x86-64: Convert i... |
1218 |
cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp |
3d75e1b8e xen64: add hyperv... |
1219 1220 1221 1222 |
pushq %rbp # backlink for old unwinder call xen_evtchn_do_upcall popq %rsp CFI_DEF_CFA_REGISTER rsp |
568955307 x86-64: Move irqc... |
1223 |
decl PER_CPU_VAR(irq_count) |
3d75e1b8e xen64: add hyperv... |
1224 1225 |
jmp error_exit CFI_ENDPROC |
371c394af x86, binutils, xe... |
1226 |
END(xen_do_hypervisor_callback) |
3d75e1b8e xen64: add hyperv... |
1227 1228 |
/* |
9f1e87ea3 x86: entry_64.S -... |
1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 |
* Hypervisor uses this for application faults while it executes. * We get here for two reasons: * 1. Fault while reloading DS, ES, FS or GS * 2. Fault while executing IRET * Category 1 we do not need to fix up as Xen has already reloaded all segment * registers that could be reloaded and zeroed the others. * Category 2 we fix up by killing the current process. We cannot use the * normal Linux return path in this case because if we use the IRET hypercall * to pop the stack frame we end up in an infinite loop of failsafe callbacks. * We distinguish between categories by comparing each saved segment register * with its current contents: any discrepancy means we in category 1. */ |
3d75e1b8e xen64: add hyperv... |
1241 |
ENTRY(xen_failsafe_callback) |
dcd072e26 x86: clean up aft... |
1242 1243 1244 1245 1246 1247 1248 |
INTR_FRAME 1 (6*8) /*CFI_REL_OFFSET gs,GS*/ /*CFI_REL_OFFSET fs,FS*/ /*CFI_REL_OFFSET es,ES*/ /*CFI_REL_OFFSET ds,DS*/ CFI_REL_OFFSET r11,8 CFI_REL_OFFSET rcx,0 |
3d75e1b8e xen64: add hyperv... |
1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 |
movw %ds,%cx cmpw %cx,0x10(%rsp) CFI_REMEMBER_STATE jne 1f movw %es,%cx cmpw %cx,0x18(%rsp) jne 1f movw %fs,%cx cmpw %cx,0x20(%rsp) jne 1f movw %gs,%cx cmpw %cx,0x28(%rsp) jne 1f /* All segments match their saved values => Category 2 (Bad IRET). */ movq (%rsp),%rcx CFI_RESTORE rcx movq 8(%rsp),%r11 CFI_RESTORE r11 addq $0x30,%rsp CFI_ADJUST_CFA_OFFSET -0x30 |
14ae22ba2 x86: entry_64.S: ... |
1269 1270 1271 |
pushq_cfi $0 /* RIP */ pushq_cfi %r11 pushq_cfi %rcx |
4a5c3e77f xen64: implement ... |
1272 |
jmp general_protection |
3d75e1b8e xen64: add hyperv... |
1273 1274 1275 1276 1277 1278 1279 1280 |
CFI_RESTORE_STATE 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ movq (%rsp),%rcx CFI_RESTORE rcx movq 8(%rsp),%r11 CFI_RESTORE r11 addq $0x30,%rsp CFI_ADJUST_CFA_OFFSET -0x30 |
14ae22ba2 x86: entry_64.S: ... |
1281 |
pushq_cfi $0 |
3d75e1b8e xen64: add hyperv... |
1282 1283 1284 |
SAVE_ALL jmp error_exit CFI_ENDPROC |
3d75e1b8e xen64: add hyperv... |
1285 |
END(xen_failsafe_callback) |
38e20b07e x86/xen: event ch... |
1286 1287 |
apicinterrupt XEN_HVM_EVTCHN_CALLBACK \ xen_hvm_callback_vector xen_evtchn_do_upcall |
3d75e1b8e xen64: add hyperv... |
1288 |
#endif /* CONFIG_XEN */ |
ddeb8f214 x86_64: get rid o... |
1289 1290 1291 1292 1293 1294 1295 1296 1297 |
/* * Some functions should be protected against kprobes */ .pushsection .kprobes.text, "ax" paranoidzeroentry_ist debug do_debug DEBUG_STACK paranoidzeroentry_ist int3 do_int3 DEBUG_STACK paranoiderrorentry stack_segment do_stack_segment |
6cac5a924 xen/x86-64: fix b... |
1298 1299 1300 1301 1302 |
#ifdef CONFIG_XEN zeroentry xen_debug do_debug zeroentry xen_int3 do_int3 errorentry xen_stack_segment do_stack_segment #endif |
ddeb8f214 x86_64: get rid o... |
1303 1304 |
errorentry general_protection do_general_protection errorentry page_fault do_page_fault |
631bc4878 KVM: Handle async... |
1305 1306 1307 |
#ifdef CONFIG_KVM_GUEST errorentry async_page_fault do_async_page_fault #endif |
ddeb8f214 x86_64: get rid o... |
1308 |
#ifdef CONFIG_X86_MCE |
5d7279268 x86, mce: use a c... |
1309 |
paranoidzeroentry machine_check *machine_check_vector(%rip) |
ddeb8f214 x86_64: get rid o... |
1310 1311 1312 |
#endif /* |
9f1e87ea3 x86: entry_64.S -... |
1313 1314 |
* "Paranoid" exit path from exception stack. * Paranoid because this is used by NMIs and cannot take |
ddeb8f214 x86_64: get rid o... |
1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 |
* any kernel state for granted. * We don't do kernel preemption checks here, because only * NMI should be common and it does not enable IRQs and * cannot get reschedule ticks. * * "trace" is 0 for the NMI handler only, because irq-tracing * is fundamentally NMI-unsafe. (we cannot change the soft and * hard flags at once, atomically) */ /* ebx: no swapgs flag */ ENTRY(paranoid_exit) |
1f130a783 x86-64: Adjust fr... |
1327 |
DEFAULT_FRAME |
ddeb8f214 x86_64: get rid o... |
1328 1329 1330 1331 1332 1333 1334 1335 1336 |
DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF testl %ebx,%ebx /* swapgs needed? */ jnz paranoid_restore testl $3,CS(%rsp) jnz paranoid_userspace paranoid_swapgs: TRACE_IRQS_IRETQ 0 SWAPGS_UNSAFE_STACK |
0300e7f1a lockdep, x86: acc... |
1337 1338 |
RESTORE_ALL 8 jmp irq_return |
ddeb8f214 x86_64: get rid o... |
1339 |
paranoid_restore: |
0300e7f1a lockdep, x86: acc... |
1340 |
TRACE_IRQS_IRETQ 0 |
ddeb8f214 x86_64: get rid o... |
1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 |
RESTORE_ALL 8 jmp irq_return paranoid_userspace: GET_THREAD_INFO(%rcx) movl TI_flags(%rcx),%ebx andl $_TIF_WORK_MASK,%ebx jz paranoid_swapgs movq %rsp,%rdi /* &pt_regs */ call sync_regs movq %rax,%rsp /* switch stack for scheduling */ testl $_TIF_NEED_RESCHED,%ebx jnz paranoid_schedule movl %ebx,%edx /* arg3: thread flags */ TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) xorl %esi,%esi /* arg2: oldset */ movq %rsp,%rdi /* arg1: &pt_regs */ call do_notify_resume DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF jmp paranoid_userspace paranoid_schedule: TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_ANY) call schedule DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF jmp paranoid_userspace CFI_ENDPROC END(paranoid_exit) /* * Exception entry point. This expects an error code/orig_rax on the stack. * returns in "no swapgs flag" in %ebx. */ ENTRY(error_entry) XCPT_FRAME CFI_ADJUST_CFA_OFFSET 15*8 /* oldrax contains error code */ cld movq_cfi rdi, RDI+8 movq_cfi rsi, RSI+8 movq_cfi rdx, RDX+8 movq_cfi rcx, RCX+8 movq_cfi rax, RAX+8 movq_cfi r8, R8+8 movq_cfi r9, R9+8 movq_cfi r10, R10+8 movq_cfi r11, R11+8 movq_cfi rbx, RBX+8 movq_cfi rbp, RBP+8 movq_cfi r12, R12+8 movq_cfi r13, R13+8 movq_cfi r14, R14+8 movq_cfi r15, R15+8 xorl %ebx,%ebx testl $3,CS+8(%rsp) je error_kernelspace error_swapgs: SWAPGS error_sti: TRACE_IRQS_OFF ret |
ddeb8f214 x86_64: get rid o... |
1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 |
/* * There are two places in the kernel that can potentially fault with * usergs. Handle them here. The exception handlers after iret run with * kernel gs again, so don't set the user space flag. B stepping K8s * sometimes report an truncated RIP for IRET exceptions returning to * compat mode. Check for these here too. */ error_kernelspace: incl %ebx leaq irq_return(%rip),%rcx cmpq %rcx,RIP+8(%rsp) je error_swapgs |
ae24ffe5e x86, 64-bit: Move... |
1417 1418 1419 |
movl %ecx,%eax /* zero extend */ cmpq %rax,RIP+8(%rsp) je bstep_iret |
ddeb8f214 x86_64: get rid o... |
1420 |
cmpq $gs_change,RIP+8(%rsp) |
9f1e87ea3 x86: entry_64.S -... |
1421 |
je error_swapgs |
ddeb8f214 x86_64: get rid o... |
1422 |
jmp error_sti |
ae24ffe5e x86, 64-bit: Move... |
1423 1424 1425 1426 |
bstep_iret: /* Fix truncated RIP */ movq %rcx,RIP+8(%rsp) |
97829de5a x86, 64-bit: Fix ... |
1427 |
jmp error_swapgs |
e6b04b6b5 x86-64: Fix unwin... |
1428 |
CFI_ENDPROC |
ddeb8f214 x86_64: get rid o... |
1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 |
END(error_entry) /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ ENTRY(error_exit) DEFAULT_FRAME movl %ebx,%eax RESTORE_REST DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF GET_THREAD_INFO(%rcx) testl %eax,%eax jne retint_kernel LOCKDEP_SYS_EXIT_IRQ movl TI_flags(%rcx),%edx movl $_TIF_WORK_MASK,%edi andl %edi,%edx jnz retint_careful jmp retint_swapgs CFI_ENDPROC END(error_exit) /* runs on exception stack */ ENTRY(nmi) INTR_FRAME PARAVIRT_ADJUST_EXCEPTION_FRAME pushq_cfi $-1 |
b1cccb1bb x86-64: Use symbo... |
1457 1458 |
subq $ORIG_RAX-R15, %rsp CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 |
ddeb8f214 x86_64: get rid o... |
1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 |
call save_paranoid DEFAULT_FRAME 0 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ movq %rsp,%rdi movq $-1,%rsi call do_nmi #ifdef CONFIG_TRACE_IRQFLAGS /* paranoidexit; without TRACE_IRQS_OFF */ /* ebx: no swapgs flag */ DISABLE_INTERRUPTS(CLBR_NONE) testl %ebx,%ebx /* swapgs needed? */ jnz nmi_restore testl $3,CS(%rsp) jnz nmi_userspace nmi_swapgs: SWAPGS_UNSAFE_STACK nmi_restore: RESTORE_ALL 8 jmp irq_return nmi_userspace: GET_THREAD_INFO(%rcx) movl TI_flags(%rcx),%ebx andl $_TIF_WORK_MASK,%ebx jz nmi_swapgs movq %rsp,%rdi /* &pt_regs */ call sync_regs movq %rax,%rsp /* switch stack for scheduling */ testl $_TIF_NEED_RESCHED,%ebx jnz nmi_schedule movl %ebx,%edx /* arg3: thread flags */ ENABLE_INTERRUPTS(CLBR_NONE) xorl %esi,%esi /* arg2: oldset */ movq %rsp,%rdi /* arg1: &pt_regs */ call do_notify_resume DISABLE_INTERRUPTS(CLBR_NONE) jmp nmi_userspace nmi_schedule: ENABLE_INTERRUPTS(CLBR_ANY) call schedule DISABLE_INTERRUPTS(CLBR_ANY) jmp nmi_userspace CFI_ENDPROC #else jmp paranoid_exit |
9f1e87ea3 x86: entry_64.S -... |
1503 |
CFI_ENDPROC |
ddeb8f214 x86_64: get rid o... |
1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 |
#endif END(nmi) ENTRY(ignore_sysret) CFI_STARTPROC mov $-ENOSYS,%eax sysret CFI_ENDPROC END(ignore_sysret) /* * End of kprobes section */ .popsection |