Commit 6ad916581181a105d7832a7dec9e1eb58f7a1621

Authored by Keith Mannthey
Committed by Andi Kleen
1 parent abf0f10948

[PATCH] x86_64 kernel mapping fix

Fix for the x86_64 kernel mapping code.  Without this patch the update path
only inits one pmd_page worth of memory and tramples any entries on it.  now
the calling convention to phys_pmd_init and phys_init is to always pass a
[pmd/pud] page not an offset within a page.

Signed-off-by: Keith Mannthey<kmannth@us.ibm.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>

Showing 1 changed file with 26 additions and 25 deletions Side-by-side Diff

arch/x86_64/mm/init.c
... ... @@ -250,12 +250,13 @@
250 250 }
251 251  
252 252 static void __meminit
253   -phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
  253 +phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
254 254 {
255   - int i;
  255 + int i = pmd_index(address);
256 256  
257   - for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) {
  257 + for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
258 258 unsigned long entry;
  259 + pmd_t *pmd = pmd_page + pmd_index(address);
259 260  
260 261 if (address >= end) {
261 262 if (!after_bootmem)
... ... @@ -263,6 +264,10 @@
263 264 set_pmd(pmd, __pmd(0));
264 265 break;
265 266 }
  267 +
  268 + if (pmd_val(*pmd))
  269 + continue;
  270 +
266 271 entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address;
267 272 entry &= __supported_pte_mask;
268 273 set_pmd(pmd, __pmd(entry));
269 274  
270 275  
271 276  
272 277  
273 278  
274 279  
275 280  
276 281  
277 282  
... ... @@ -272,45 +277,41 @@
272 277 static void __meminit
273 278 phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
274 279 {
275   - pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address));
276   -
277   - if (pmd_none(*pmd)) {
278   - spin_lock(&init_mm.page_table_lock);
279   - phys_pmd_init(pmd, address, end);
280   - spin_unlock(&init_mm.page_table_lock);
281   - __flush_tlb_all();
282   - }
  280 + pmd_t *pmd = pmd_offset(pud,0);
  281 + spin_lock(&init_mm.page_table_lock);
  282 + phys_pmd_init(pmd, address, end);
  283 + spin_unlock(&init_mm.page_table_lock);
  284 + __flush_tlb_all();
283 285 }
284 286  
285   -static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
  287 +static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
286 288 {
287   - long i = pud_index(address);
  289 + int i = pud_index(addr);
288 290  
289   - pud = pud + i;
290 291  
291   - if (after_bootmem && pud_val(*pud)) {
292   - phys_pmd_update(pud, address, end);
293   - return;
294   - }
295   -
296   - for (; i < PTRS_PER_PUD; pud++, i++) {
  292 + for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) {
297 293 int map;
298   - unsigned long paddr, pmd_phys;
  294 + unsigned long pmd_phys;
  295 + pud_t *pud = pud_page + pud_index(addr);
299 296 pmd_t *pmd;
300 297  
301   - paddr = (address & PGDIR_MASK) + i*PUD_SIZE;
302   - if (paddr >= end)
  298 + if (addr >= end)
303 299 break;
304 300  
305   - if (!after_bootmem && !e820_any_mapped(paddr, paddr+PUD_SIZE, 0)) {
  301 + if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) {
306 302 set_pud(pud, __pud(0));
307 303 continue;
308 304 }
309 305  
  306 + if (pud_val(*pud)) {
  307 + phys_pmd_update(pud, addr, end);
  308 + continue;
  309 + }
  310 +
310 311 pmd = alloc_low_page(&map, &pmd_phys);
311 312 spin_lock(&init_mm.page_table_lock);
312 313 set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE));
313   - phys_pmd_init(pmd, paddr, end);
  314 + phys_pmd_init(pmd, addr, end);
314 315 spin_unlock(&init_mm.page_table_lock);
315 316 unmap_low_page(map);
316 317 }