Commit cb600d2f83c854ec3d6660063e4466431999489b

Authored by Linus Torvalds

Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, mm: Initialize initial_page_table before paravirt jumps

Showing 2 changed files Side-by-side Diff

arch/x86/kernel/head_32.S
... ... @@ -139,39 +139,6 @@
139 139 movl %eax, pa(olpc_ofw_pgd)
140 140 #endif
141 141  
142   -#ifdef CONFIG_PARAVIRT
143   - /* This is can only trip for a broken bootloader... */
144   - cmpw $0x207, pa(boot_params + BP_version)
145   - jb default_entry
146   -
147   - /* Paravirt-compatible boot parameters. Look to see what architecture
148   - we're booting under. */
149   - movl pa(boot_params + BP_hardware_subarch), %eax
150   - cmpl $num_subarch_entries, %eax
151   - jae bad_subarch
152   -
153   - movl pa(subarch_entries)(,%eax,4), %eax
154   - subl $__PAGE_OFFSET, %eax
155   - jmp *%eax
156   -
157   -bad_subarch:
158   -WEAK(lguest_entry)
159   -WEAK(xen_entry)
160   - /* Unknown implementation; there's really
161   - nothing we can do at this point. */
162   - ud2a
163   -
164   - __INITDATA
165   -
166   -subarch_entries:
167   - .long default_entry /* normal x86/PC */
168   - .long lguest_entry /* lguest hypervisor */
169   - .long xen_entry /* Xen hypervisor */
170   - .long default_entry /* Moorestown MID */
171   -num_subarch_entries = (. - subarch_entries) / 4
172   -.previous
173   -#endif /* CONFIG_PARAVIRT */
174   -
175 142 /*
176 143 * Initialize page tables. This creates a PDE and a set of page
177 144 * tables, which are located immediately beyond __brk_base. The variable
... ... @@ -181,7 +148,6 @@
181 148 *
182 149 * Note that the stack is not yet set up!
183 150 */
184   -default_entry:
185 151 #ifdef CONFIG_X86_PAE
186 152  
187 153 /*
... ... @@ -261,7 +227,42 @@
261 227 movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
262 228 movl %eax,pa(initial_page_table+0xffc)
263 229 #endif
264   - jmp 3f
  230 +
  231 +#ifdef CONFIG_PARAVIRT
  232 + /* This is can only trip for a broken bootloader... */
  233 + cmpw $0x207, pa(boot_params + BP_version)
  234 + jb default_entry
  235 +
  236 + /* Paravirt-compatible boot parameters. Look to see what architecture
  237 + we're booting under. */
  238 + movl pa(boot_params + BP_hardware_subarch), %eax
  239 + cmpl $num_subarch_entries, %eax
  240 + jae bad_subarch
  241 +
  242 + movl pa(subarch_entries)(,%eax,4), %eax
  243 + subl $__PAGE_OFFSET, %eax
  244 + jmp *%eax
  245 +
  246 +bad_subarch:
  247 +WEAK(lguest_entry)
  248 +WEAK(xen_entry)
  249 + /* Unknown implementation; there's really
  250 + nothing we can do at this point. */
  251 + ud2a
  252 +
  253 + __INITDATA
  254 +
  255 +subarch_entries:
  256 + .long default_entry /* normal x86/PC */
  257 + .long lguest_entry /* lguest hypervisor */
  258 + .long xen_entry /* Xen hypervisor */
  259 + .long default_entry /* Moorestown MID */
  260 +num_subarch_entries = (. - subarch_entries) / 4
  261 +.previous
  262 +#else
  263 + jmp default_entry
  264 +#endif /* CONFIG_PARAVIRT */
  265 +
265 266 /*
266 267 * Non-boot CPU entry point; entered from trampoline.S
267 268 * We can't lgdt here, because lgdt itself uses a data segment, but
... ... @@ -282,7 +283,7 @@
282 283 movl %eax,%fs
283 284 movl %eax,%gs
284 285 #endif /* CONFIG_SMP */
285   -3:
  286 +default_entry:
286 287  
287 288 /*
288 289 * New page tables may be in 4Mbyte page mode and may
289 290  
... ... @@ -628,13 +629,13 @@
628 629 __PAGE_ALIGNED_BSS
629 630 .align PAGE_SIZE_asm
630 631 #ifdef CONFIG_X86_PAE
631   -ENTRY(initial_pg_pmd)
  632 +initial_pg_pmd:
632 633 .fill 1024*KPMDS,4,0
633 634 #else
634 635 ENTRY(initial_page_table)
635 636 .fill 1024,4,0
636 637 #endif
637   -ENTRY(initial_pg_fixmap)
  638 +initial_pg_fixmap:
638 639 .fill 1024,4,0
639 640 ENTRY(empty_zero_page)
640 641 .fill 4096,1,0
arch/x86/lguest/i386_head.S
... ... @@ -4,7 +4,6 @@
4 4 #include <asm/asm-offsets.h>
5 5 #include <asm/thread_info.h>
6 6 #include <asm/processor-flags.h>
7   -#include <asm/pgtable.h>
8 7  
9 8 /*G:020
10 9 * Our story starts with the kernel booting into startup_32 in
11 10  
... ... @@ -38,112 +37,8 @@
38 37 /* Set up the initial stack so we can run C code. */
39 38 movl $(init_thread_union+THREAD_SIZE),%esp
40 39  
41   - call init_pagetables
42   -
43 40 /* Jumps are relative: we're running __PAGE_OFFSET too low. */
44 41 jmp lguest_init+__PAGE_OFFSET
45   -
46   -/*
47   - * Initialize page tables. This creates a PDE and a set of page
48   - * tables, which are located immediately beyond __brk_base. The variable
49   - * _brk_end is set up to point to the first "safe" location.
50   - * Mappings are created both at virtual address 0 (identity mapping)
51   - * and PAGE_OFFSET for up to _end.
52   - *
53   - * FIXME: This code is taken verbatim from arch/x86/kernel/head_32.S: they
54   - * don't have a stack at this point, so we can't just use call and ret.
55   - */
56   -init_pagetables:
57   -#if PTRS_PER_PMD > 1
58   -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
59   -#else
60   -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
61   -#endif
62   -#define pa(X) ((X) - __PAGE_OFFSET)
63   -
64   -/* Enough space to fit pagetables for the low memory linear map */
65   -MAPPING_BEYOND_END = \
66   - PAGE_TABLE_SIZE(((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) << PAGE_SHIFT
67   -#ifdef CONFIG_X86_PAE
68   -
69   - /*
70   - * In PAE mode initial_page_table is statically defined to contain
71   - * enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3
72   - * entries). The identity mapping is handled by pointing two PGD entries
73   - * to the first kernel PMD.
74   - *
75   - * Note the upper half of each PMD or PTE are always zero at this stage.
76   - */
77   -
78   -#define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */
79   -
80   - xorl %ebx,%ebx /* %ebx is kept at zero */
81   -
82   - movl $pa(__brk_base), %edi
83   - movl $pa(initial_pg_pmd), %edx
84   - movl $PTE_IDENT_ATTR, %eax
85   -10:
86   - leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */
87   - movl %ecx,(%edx) /* Store PMD entry */
88   - /* Upper half already zero */
89   - addl $8,%edx
90   - movl $512,%ecx
91   -11:
92   - stosl
93   - xchgl %eax,%ebx
94   - stosl
95   - xchgl %eax,%ebx
96   - addl $0x1000,%eax
97   - loop 11b
98   -
99   - /*
100   - * End condition: we must map up to the end + MAPPING_BEYOND_END.
101   - */
102   - movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp
103   - cmpl %ebp,%eax
104   - jb 10b
105   -1:
106   - addl $__PAGE_OFFSET, %edi
107   - movl %edi, pa(_brk_end)
108   - shrl $12, %eax
109   - movl %eax, pa(max_pfn_mapped)
110   -
111   - /* Do early initialization of the fixmap area */
112   - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
113   - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
114   -#else /* Not PAE */
115   -
116   -page_pde_offset = (__PAGE_OFFSET >> 20);
117   -
118   - movl $pa(__brk_base), %edi
119   - movl $pa(initial_page_table), %edx
120   - movl $PTE_IDENT_ATTR, %eax
121   -10:
122   - leal PDE_IDENT_ATTR(%edi),%ecx /* Create PDE entry */
123   - movl %ecx,(%edx) /* Store identity PDE entry */
124   - movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */
125   - addl $4,%edx
126   - movl $1024, %ecx
127   -11:
128   - stosl
129   - addl $0x1000,%eax
130   - loop 11b
131   - /*
132   - * End condition: we must map up to the end + MAPPING_BEYOND_END.
133   - */
134   - movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp
135   - cmpl %ebp,%eax
136   - jb 10b
137   - addl $__PAGE_OFFSET, %edi
138   - movl %edi, pa(_brk_end)
139   - shrl $12, %eax
140   - movl %eax, pa(max_pfn_mapped)
141   -
142   - /* Do early initialization of the fixmap area */
143   - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
144   - movl %eax,pa(initial_page_table+0xffc)
145   -#endif
146   - ret
147 42  
148 43 /*G:055
149 44 * We create a macro which puts the assembler code between lgstart_ and lgend_