Blame view
arch/x86/kernel/head_64.S
10.8 KB
1da177e4c Linux-2.6.12-rc2 |
1 2 3 4 5 6 7 |
/* * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit * * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> * Copyright (C) 2000 Karsten Keil <kkeil@suse.de> * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de> |
1ab60e0f7 [PATCH] x86-64: R... |
8 |
* Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com> |
1da177e4c Linux-2.6.12-rc2 |
9 10 11 12 13 |
*/ #include <linux/linkage.h> #include <linux/threads.h> |
f6c2e3330 [PATCH] x86_64: U... |
14 |
#include <linux/init.h> |
1da177e4c Linux-2.6.12-rc2 |
15 |
#include <asm/segment.h> |
67dcbb6bc [PATCH] x86-64: C... |
16 |
#include <asm/pgtable.h> |
1da177e4c Linux-2.6.12-rc2 |
17 18 19 |
#include <asm/page.h> #include <asm/msr.h> #include <asm/cache.h> |
369101da7 x86: head_64.S cl... |
20 |
#include <asm/processor-flags.h> |
b12d8db8f x86: make pda a p... |
21 |
#include <asm/percpu.h> |
1ab60e0f7 [PATCH] x86-64: R... |
22 |
|
49a697871 x86: turn privile... |
23 24 25 26 27 28 |
#ifdef CONFIG_PARAVIRT #include <asm/asm-offsets.h> #include <asm/paravirt.h> #else #define GET_CR2_INTO_RCX movq %cr2, %rcx #endif |
3ad2f3fbb tree-wide: Assort... |
29 |
/* we are not able to switch in one step to the final KERNEL ADDRESS SPACE |
1ab60e0f7 [PATCH] x86-64: R... |
30 31 |
* because we need identity-mapped pages. * |
1da177e4c Linux-2.6.12-rc2 |
32 |
*/ |
a6523748b paravirt/x86, 64-... |
33 34 35 36 37 38 |
#define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET) L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET) L4_START_KERNEL = pgd_index(__START_KERNEL_map) L3_START_KERNEL = pud_index(__START_KERNEL_map) |
1da177e4c Linux-2.6.12-rc2 |
39 |
.text |
4ae59b916 x86: convert to u... |
40 |
__HEAD |
1ab60e0f7 [PATCH] x86-64: R... |
41 42 43 |
.code64 .globl startup_64 startup_64: |
1da177e4c Linux-2.6.12-rc2 |
44 |
/* |
1ab60e0f7 [PATCH] x86-64: R... |
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
* At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1, * and someone has loaded an identity mapped page table * for us. These identity mapped page tables map all of the * kernel pages and possibly all of memory. * * %esi holds a physical pointer to real_mode_data. * * We come here either directly from a 64bit bootloader, or from * arch/x86_64/boot/compressed/head.S. * * We only come here initially at boot nothing else comes here. * * Since we may be loaded at an address different from what we were * compiled to run at we first fixup the physical addresses in our page * tables and then reload them. |
1da177e4c Linux-2.6.12-rc2 |
60 |
*/ |
1ab60e0f7 [PATCH] x86-64: R... |
61 62 |
/* Compute the delta between the address I am compiled to run at and the * address I am actually running at. |
1da177e4c Linux-2.6.12-rc2 |
63 |
*/ |
1ab60e0f7 [PATCH] x86-64: R... |
64 65 66 67 68 |
leaq _text(%rip), %rbp subq $_text - __START_KERNEL_map, %rbp /* Is the address not 2M aligned? */ movq %rbp, %rax |
31422c51e x86: rename LARGE... |
69 |
andl $~PMD_PAGE_MASK, %eax |
1ab60e0f7 [PATCH] x86-64: R... |
70 71 72 73 74 75 76 77 78 79 |
testl %eax, %eax jnz bad_address /* Is the address too large? */ leaq _text(%rip), %rdx movq $PGDIR_SIZE, %rax cmpq %rax, %rdx jae bad_address /* Fixup the physical addresses in the page table |
1da177e4c Linux-2.6.12-rc2 |
80 |
*/ |
1ab60e0f7 [PATCH] x86-64: R... |
81 |
addq %rbp, init_level4_pgt + 0(%rip) |
a6523748b paravirt/x86, 64-... |
82 83 |
addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip) addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip) |
1ab60e0f7 [PATCH] x86-64: R... |
84 85 |
addq %rbp, level3_ident_pgt + 0(%rip) |
b1c931e39 x86: initial fixm... |
86 |
|
1ab60e0f7 [PATCH] x86-64: R... |
87 |
addq %rbp, level3_kernel_pgt + (510*8)(%rip) |
b1c931e39 x86: initial fixm... |
88 89 90 |
addq %rbp, level3_kernel_pgt + (511*8)(%rip) addq %rbp, level2_fixmap_pgt + (506*8)(%rip) |
1ab60e0f7 [PATCH] x86-64: R... |
91 92 93 |
/* Add an Identity mapping if I am above 1G */ leaq _text(%rip), %rdi |
31422c51e x86: rename LARGE... |
94 |
andq $PMD_PAGE_MASK, %rdi |
1ab60e0f7 [PATCH] x86-64: R... |
95 96 97 98 99 100 101 102 103 104 105 106 107 |
movq %rdi, %rax shrq $PUD_SHIFT, %rax andq $(PTRS_PER_PUD - 1), %rax jz ident_complete leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx leaq level3_ident_pgt(%rip), %rbx movq %rdx, 0(%rbx, %rax, 8) movq %rdi, %rax shrq $PMD_SHIFT, %rax andq $(PTRS_PER_PMD - 1), %rax |
b2bc27314 x86, cpa: rename ... |
108 |
leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx |
1ab60e0f7 [PATCH] x86-64: R... |
109 110 111 |
leaq level2_spare_pgt(%rip), %rbx movq %rdx, 0(%rbx, %rax, 8) ident_complete: |
31eedd823 x86: zap invalid ... |
112 113 114 115 116 |
/* * Fixup the kernel text+data virtual addresses. Note that * we might write invalid pmds, when the kernel is relocated * cleanup_highmap() fixes this up along with the mappings * beyond _end. |
1ab60e0f7 [PATCH] x86-64: R... |
117 |
*/ |
31eedd823 x86: zap invalid ... |
118 |
|
1ab60e0f7 [PATCH] x86-64: R... |
119 120 121 122 123 124 125 126 127 128 129 130 131 |
leaq level2_kernel_pgt(%rip), %rdi leaq 4096(%rdi), %r8 /* See if it is a valid page table entry */ 1: testq $1, 0(%rdi) jz 2f addq %rbp, 0(%rdi) /* Go to the next page */ 2: addq $8, %rdi cmp %r8, %rdi jne 1b /* Fixup phys_base */ addq %rbp, phys_base(%rip) |
1da177e4c Linux-2.6.12-rc2 |
132 |
|
4822b7fc6 x86, trampoline: ... |
133 |
/* Fixup trampoline */ |
1ab60e0f7 [PATCH] x86-64: R... |
134 135 |
addq %rbp, trampoline_level4_pgt + 0(%rip) addq %rbp, trampoline_level4_pgt + (511*8)(%rip) |
1da177e4c Linux-2.6.12-rc2 |
136 |
|
1ab60e0f7 [PATCH] x86-64: R... |
137 138 139 |
/* Due to ENTRY(), sometimes the empty space gets filled with * zeros. Better take a jmp than relying on empty space being * filled with 0x90 (nop) |
1da177e4c Linux-2.6.12-rc2 |
140 |
*/ |
1ab60e0f7 [PATCH] x86-64: R... |
141 |
jmp secondary_startup_64 |
90b1c2085 [PATCH] x86-64: 6... |
142 |
ENTRY(secondary_startup_64) |
1ab60e0f7 [PATCH] x86-64: R... |
143 144 145 146 147 148 149 150 151 152 153 154 |
/* * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 1, * and someone has loaded a mapped page table. * * %esi holds a physical pointer to real_mode_data. * * We come here either from startup_64 (using physical addresses) * or from trampoline.S (using virtual addresses). * * Using virtual addresses from trampoline.S removes the need * to have any identity mapped pages in the kernel page table * after the boot processor executes this code. |
1da177e4c Linux-2.6.12-rc2 |
155 156 157 |
*/ /* Enable PAE mode and PGE */ |
05139d8fb x86: head_64.S cl... |
158 |
movl $(X86_CR4_PAE | X86_CR4_PGE), %eax |
1da177e4c Linux-2.6.12-rc2 |
159 160 161 |
movq %rax, %cr4 /* Setup early boot stage 4 level pagetables. */ |
cfd243d4a [PATCH] x86-64: R... |
162 |
movq $(init_level4_pgt - __START_KERNEL_map), %rax |
1ab60e0f7 [PATCH] x86-64: R... |
163 |
addq phys_base(%rip), %rax |
1da177e4c Linux-2.6.12-rc2 |
164 |
movq %rax, %cr3 |
1ab60e0f7 [PATCH] x86-64: R... |
165 166 167 168 |
/* Ensure I am executing from virtual addresses */ movq $1f, %rax jmp *%rax 1: |
1da177e4c Linux-2.6.12-rc2 |
169 170 171 172 173 174 175 176 |
/* Check if nx is implemented */ movl $0x80000001, %eax cpuid movl %edx,%edi /* Setup EFER (Extended Feature Enable Register) */ movl $MSR_EFER, %ecx rdmsr |
1ab60e0f7 [PATCH] x86-64: R... |
177 178 |
btsl $_EFER_SCE, %eax /* Enable System Call */ btl $20,%edi /* No Execute supported? */ |
1da177e4c Linux-2.6.12-rc2 |
179 180 |
jnc 1f btsl $_EFER_NX, %eax |
1ab60e0f7 [PATCH] x86-64: R... |
181 |
1: wrmsr /* Make changes effective */ |
1da177e4c Linux-2.6.12-rc2 |
182 183 |
/* Setup cr0 */ |
369101da7 x86: head_64.S cl... |
184 185 186 187 |
#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ X86_CR0_PG) movl $CR0_STATE, %eax |
1da177e4c Linux-2.6.12-rc2 |
188 189 190 191 |
/* Make changes effective */ movq %rax, %cr0 /* Setup a boot time stack */ |
9cf4f298e x86: use stack_st... |
192 |
movq stack_start(%rip),%rsp |
1da177e4c Linux-2.6.12-rc2 |
193 194 195 196 197 198 199 200 201 202 203 |
/* zero EFLAGS after setting rsp */ pushq $0 popfq /* * We must switch to a new descriptor in kernel space for the GDT * because soon the kernel won't have access anymore to the userspace * addresses where we're currently running on. We have to do that here * because in 32bit we couldn't load a 64bit linear address. */ |
a939098af x86: move x86_64 ... |
204 |
lgdt early_gdt_descr(%rip) |
1da177e4c Linux-2.6.12-rc2 |
205 |
|
8ec6993d9 x86, 64-bit: Set ... |
206 207 |
/* set up data segments */ xorl %eax,%eax |
ffb601756 [PATCH] x86-64: x... |
208 209 210 211 212 213 214 215 216 217 218 |
movl %eax,%ds movl %eax,%ss movl %eax,%es /* * We don't really need to load %fs or %gs, but load them anyway * to kill any stale realmode selectors. This allows execution * under VT hardware. */ movl %eax,%fs movl %eax,%gs |
f32ff5388 x86: load pointer... |
219 220 |
/* Set up %gs. * |
947e76cdc x86: move stack_c... |
221 222 223 224 |
* The base of %gs always points to the bottom of the irqstack * union. If the stack protector canary is enabled, it is * located at %gs:40. Note that, on SMP, the boot cpu uses * init data section till per cpu areas are set up. |
f32ff5388 x86: load pointer... |
225 |
*/ |
1da177e4c Linux-2.6.12-rc2 |
226 |
movl $MSR_GS_BASE,%ecx |
650fb4393 x86-64: Simplify ... |
227 228 |
movl initial_gs(%rip),%eax movl initial_gs+4(%rip),%edx |
1da177e4c Linux-2.6.12-rc2 |
229 |
wrmsr |
1da177e4c Linux-2.6.12-rc2 |
230 231 232 233 234 235 |
/* esi is pointer to real mode structure with interesting info. pass it to C */ movl %esi, %edi /* Finally jump to run C code and to be on real kernel address * Since we are running on identity-mapped space we have to jump |
26374c7b7 [PATCH] Reload CS... |
236 237 238 |
* to the full 64bit address, this is only possible as indirect * jump. In addition we need to ensure %cs is set so we make this * a far return. |
1da177e4c Linux-2.6.12-rc2 |
239 240 |
*/ movq initial_code(%rip),%rax |
26374c7b7 [PATCH] Reload CS... |
241 242 243 244 |
pushq $0 # fake return address to stop unwinder pushq $__KERNEL_CS # set correct cs pushq %rax # target address in negative space lretq |
1da177e4c Linux-2.6.12-rc2 |
245 |
|
e57113bc1 [PATCH] x86_64: m... |
246 |
/* SMP bootup changes these two */ |
da5968ae3 x86: fix section ... |
247 |
__REFDATA |
e57113bc1 [PATCH] x86_64: m... |
248 |
.align 8 |
f1fbabb31 x86: fix 64-bit s... |
249 |
ENTRY(initial_code) |
1da177e4c Linux-2.6.12-rc2 |
250 |
.quad x86_64_start_kernel |
f32ff5388 x86: load pointer... |
251 |
ENTRY(initial_gs) |
2add8e235 x86: use linker t... |
252 |
.quad INIT_PER_CPU_VAR(irq_stack_union) |
f1fbabb31 x86: fix 64-bit s... |
253 |
|
9cf4f298e x86: use stack_st... |
254 |
ENTRY(stack_start) |
1da177e4c Linux-2.6.12-rc2 |
255 |
.quad init_thread_union+THREAD_SIZE-8 |
9cf4f298e x86: use stack_st... |
256 |
.word 0 |
b9af7c0d4 x86-64: preserve ... |
257 |
__FINITDATA |
1da177e4c Linux-2.6.12-rc2 |
258 |
|
1ab60e0f7 [PATCH] x86-64: R... |
259 260 |
bad_address: jmp bad_address |
41bd4eac7 x86: move early e... |
261 |
.section ".init.text","ax" |
076f9776f x86: make early p... |
262 |
#ifdef CONFIG_EARLY_PRINTK |
8866cd9dc x86: early_idt_ha... |
263 264 |
.globl early_idt_handlers early_idt_handlers: |
749c970ae x86: replace earl... |
265 266 267 268 269 270 |
i = 0 .rept NUM_EXCEPTION_VECTORS movl $i, %esi jmp early_idt_handler i = i + 1 .endr |
076f9776f x86: make early p... |
271 |
#endif |
8866cd9dc x86: early_idt_ha... |
272 |
|
1da177e4c Linux-2.6.12-rc2 |
273 |
ENTRY(early_idt_handler) |
076f9776f x86: make early p... |
274 |
#ifdef CONFIG_EARLY_PRINTK |
b957591fe [PATCH] x86_64: D... |
275 276 277 |
cmpl $2,early_recursion_flag(%rip) jz 1f incl early_recursion_flag(%rip) |
49a697871 x86: turn privile... |
278 |
GET_CR2_INTO_RCX |
8866cd9dc x86: early_idt_ha... |
279 280 281 282 283 284 285 286 287 288 289 290 291 292 |
movq %rcx,%r9 xorl %r8d,%r8d # zero for error code movl %esi,%ecx # get vector number # Test %ecx against mask of vectors that push error code. cmpl $31,%ecx ja 0f movl $1,%eax salq %cl,%rax testl $0x27d00,%eax je 0f popq %r8 # get error code 0: movq 0(%rsp),%rcx # get ip movq 8(%rsp),%rdx # get cs xorl %eax,%eax |
1da177e4c Linux-2.6.12-rc2 |
293 294 |
leaq early_idt_msg(%rip),%rdi call early_printk |
b957591fe [PATCH] x86_64: D... |
295 296 297 |
cmpl $2,early_recursion_flag(%rip) jz 1f call dump_stack |
6574ffd74 [PATCH] x86_64: R... |
298 299 |
#ifdef CONFIG_KALLSYMS leaq early_idt_ripmsg(%rip),%rdi |
7aed55d10 x86: fix RIP prin... |
300 |
movq 0(%rsp),%rsi # get rip again |
6574ffd74 [PATCH] x86_64: R... |
301 302 |
call __print_symbol #endif |
076f9776f x86: make early p... |
303 |
#endif /* EARLY_PRINTK */ |
1da177e4c Linux-2.6.12-rc2 |
304 305 |
1: hlt jmp 1b |
076f9776f x86: make early p... |
306 307 |
#ifdef CONFIG_EARLY_PRINTK |
b957591fe [PATCH] x86_64: D... |
308 309 |
early_recursion_flag: .long 0 |
1da177e4c Linux-2.6.12-rc2 |
310 311 |
early_idt_msg: |
8866cd9dc x86: early_idt_ha... |
312 313 |
.asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx " |
6574ffd74 [PATCH] x86_64: R... |
314 315 316 |
early_idt_ripmsg: .asciz "RIP %s " |
076f9776f x86: make early p... |
317 |
#endif /* CONFIG_EARLY_PRINTK */ |
41bd4eac7 x86: move early e... |
318 |
.previous |
1da177e4c Linux-2.6.12-rc2 |
319 |
|
f0cf5d1a3 [PATCH] x86_64: e... |
320 |
#define NEXT_PAGE(name) \ |
67dcbb6bc [PATCH] x86-64: C... |
321 |
.balign PAGE_SIZE; \ |
f0cf5d1a3 [PATCH] x86_64: e... |
322 |
ENTRY(name) |
67dcbb6bc [PATCH] x86-64: C... |
323 |
/* Automate the creation of 1 to 1 mapping pmd entries */ |
0e192b99d x86: head_64.S cl... |
324 325 326 327 328 |
#define PMDS(START, PERM, COUNT) \ i = 0 ; \ .rept (COUNT) ; \ .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ i = i + 1 ; \ |
67dcbb6bc [PATCH] x86-64: C... |
329 |
.endr |
b9af7c0d4 x86-64: preserve ... |
330 |
.data |
cfd243d4a [PATCH] x86-64: R... |
331 332 333 334 335 336 |
/* * This default setting generates an ident mapping at address 0x100000 * and a mapping for the kernel that precisely maps virtual address * 0xffffffff80000000 to physical address 0x000000. (always using * 2Mbyte large pages provided by PAE mode) */ |
f0cf5d1a3 [PATCH] x86_64: e... |
337 |
NEXT_PAGE(init_level4_pgt) |
cfd243d4a [PATCH] x86-64: R... |
338 |
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE |
a6523748b paravirt/x86, 64-... |
339 |
.org init_level4_pgt + L4_PAGE_OFFSET*8, 0 |
cfd243d4a [PATCH] x86-64: R... |
340 |
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE |
a6523748b paravirt/x86, 64-... |
341 |
.org init_level4_pgt + L4_START_KERNEL*8, 0 |
cfd243d4a [PATCH] x86-64: R... |
342 343 |
/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE |
1da177e4c Linux-2.6.12-rc2 |
344 |
|
f0cf5d1a3 [PATCH] x86_64: e... |
345 |
NEXT_PAGE(level3_ident_pgt) |
67dcbb6bc [PATCH] x86-64: C... |
346 |
.quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE |
1da177e4c Linux-2.6.12-rc2 |
347 |
.fill 511,8,0 |
f0cf5d1a3 [PATCH] x86_64: e... |
348 |
NEXT_PAGE(level3_kernel_pgt) |
a6523748b paravirt/x86, 64-... |
349 |
.fill L3_START_KERNEL,8,0 |
1da177e4c Linux-2.6.12-rc2 |
350 |
/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ |
67dcbb6bc [PATCH] x86-64: C... |
351 |
.quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE |
b1c931e39 x86: initial fixm... |
352 353 354 |
.quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE NEXT_PAGE(level2_fixmap_pgt) |
6596f2422 Revert "x86_64: t... |
355 356 357 358 359 360 |
.fill 506,8,0 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ .fill 5,8,0 NEXT_PAGE(level1_fixmap_pgt) |
b1c931e39 x86: initial fixm... |
361 |
.fill 512,8,0 |
1da177e4c Linux-2.6.12-rc2 |
362 |
|
f0cf5d1a3 [PATCH] x86_64: e... |
363 |
NEXT_PAGE(level2_ident_pgt) |
67dcbb6bc [PATCH] x86-64: C... |
364 365 366 |
/* Since I easily can, map the first 1G. * Don't set NX because code runs from these pages. */ |
b2bc27314 x86, cpa: rename ... |
367 |
PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) |
1ab60e0f7 [PATCH] x86-64: R... |
368 |
|
f0cf5d1a3 [PATCH] x86_64: e... |
369 |
NEXT_PAGE(level2_kernel_pgt) |
88f3aec7a x86: fix spontane... |
370 |
/* |
85eb69a16 x86: increase the... |
371 |
* 512 MB kernel mapping. We spend a full page on this pagetable |
88f3aec7a x86: fix spontane... |
372 373 374 375 |
* anyway. * * The kernel code+data+bss must not be bigger than that. * |
85eb69a16 x86: increase the... |
376 |
* (NOTE: at +512MB starts the module area, see MODULES_VADDR. |
88f3aec7a x86: fix spontane... |
377 378 379 |
* If you want to increase this then increase MODULES_VADDR * too.) */ |
8490638cf x86: always set _... |
380 |
PMDS(0, __PAGE_KERNEL_LARGE_EXEC, |
d4afe4141 x86: rename KERNE... |
381 |
KERNEL_IMAGE_SIZE/PMD_SIZE) |
1da177e4c Linux-2.6.12-rc2 |
382 |
|
1ab60e0f7 [PATCH] x86-64: R... |
383 |
NEXT_PAGE(level2_spare_pgt) |
88f3aec7a x86: fix spontane... |
384 |
.fill 512, 8, 0 |
1ab60e0f7 [PATCH] x86-64: R... |
385 |
|
67dcbb6bc [PATCH] x86-64: C... |
386 |
#undef PMDS |
f0cf5d1a3 [PATCH] x86_64: e... |
387 |
#undef NEXT_PAGE |
1da177e4c Linux-2.6.12-rc2 |
388 |
|
f0cf5d1a3 [PATCH] x86_64: e... |
389 |
.data |
1da177e4c Linux-2.6.12-rc2 |
390 |
.align 16 |
a939098af x86: move x86_64 ... |
391 392 393 |
.globl early_gdt_descr early_gdt_descr: .word GDT_ENTRIES*8-1 |
3e5d8f978 x86: make percpu ... |
394 |
early_gdt_descr_base: |
2add8e235 x86: use linker t... |
395 |
.quad INIT_PER_CPU_VAR(gdt_page) |
1da177e4c Linux-2.6.12-rc2 |
396 |
|
1ab60e0f7 [PATCH] x86-64: R... |
397 398 399 |
ENTRY(phys_base) /* This must match the first entry in level2_kernel_pgt */ .quad 0x0000000000000000 |
8c5e5ac32 xen64: add xen-he... |
400 |
#include "../../x86/xen/xen-head.S" |
1da177e4c Linux-2.6.12-rc2 |
401 |
|
e57113bc1 [PATCH] x86_64: m... |
402 403 404 |
.section .bss, "aw", @nobits .align L1_CACHE_BYTES ENTRY(idt_table) |
5e112ae23 x86: head_64.S - ... |
405 |
.skip IDT_ENTRIES * 16 |
1da177e4c Linux-2.6.12-rc2 |
406 |
|
228bdaa95 x86: Keep current... |
407 408 409 |
.align L1_CACHE_BYTES ENTRY(nmi_idt_table) .skip IDT_ENTRIES * 16 |
02b7da37f Use macros for .b... |
410 |
__PAGE_ALIGNED_BSS |
e57113bc1 [PATCH] x86_64: m... |
411 412 413 |
.align PAGE_SIZE ENTRY(empty_zero_page) .skip PAGE_SIZE |