Commit 1c083eb2cbdd917149f6acaa55efca129d05c2a9

Authored by Huang, Ying
Committed by Ingo Molnar
1 parent f56d005d30

x86: fix EFI mapping

The patch updates EFI runtime memory mapping code, by making EFI
areas explicitly executable.

Signed-off-by: Huang Ying <ying.huang@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

Showing 3 changed files with 43 additions and 40 deletions Side-by-side Diff

arch/x86/kernel/efi.c
... ... @@ -379,11 +379,9 @@
379 379 #endif
380 380 }
381 381  
382   -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
383 382 static void __init runtime_code_page_mkexec(void)
384 383 {
385 384 efi_memory_desc_t *md;
386   - unsigned long end;
387 385 void *p;
388 386  
389 387 if (!(__supported_pte_mask & _PAGE_NX))
390 388  
391 389  
... ... @@ -392,18 +390,13 @@
392 390 /* Make EFI runtime service code area executable */
393 391 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
394 392 md = p;
395   - end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
396   - if (md->type == EFI_RUNTIME_SERVICES_CODE &&
397   - (end >> PAGE_SHIFT) <= max_pfn_mapped) {
398   - set_memory_x(md->virt_addr, md->num_pages);
399   - set_memory_uc(md->virt_addr, md->num_pages);
400   - }
  393 +
  394 + if (md->type != EFI_RUNTIME_SERVICES_CODE)
  395 + continue;
  396 +
  397 + set_memory_x(md->virt_addr, md->num_pages << EFI_PAGE_SHIFT);
401 398 }
402   - __flush_tlb_all();
403 399 }
404   -#else
405   -static inline void __init runtime_code_page_mkexec(void) { }
406   -#endif
407 400  
408 401 /*
409 402 * This function will switch the EFI runtime services to virtual mode.
410 403  
411 404  
412 405  
... ... @@ -417,30 +410,40 @@
417 410 {
418 411 efi_memory_desc_t *md;
419 412 efi_status_t status;
420   - unsigned long end;
421   - void *p;
  413 + unsigned long size;
  414 + u64 end, systab;
  415 + void *p, *va;
422 416  
423 417 efi.systab = NULL;
424 418 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
425 419 md = p;
426 420 if (!(md->attribute & EFI_MEMORY_RUNTIME))
427 421 continue;
428   - end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
429   - if ((md->attribute & EFI_MEMORY_WB) &&
430   - ((end >> PAGE_SHIFT) <= max_pfn_mapped))
431   - md->virt_addr = (unsigned long)__va(md->phys_addr);
  422 +
  423 + size = md->num_pages << EFI_PAGE_SHIFT;
  424 + end = md->phys_addr + size;
  425 +
  426 + if ((end >> PAGE_SHIFT) <= max_pfn_mapped)
  427 + va = __va(md->phys_addr);
432 428 else
433   - md->virt_addr = (unsigned long)
434   - efi_ioremap(md->phys_addr,
435   - md->num_pages << EFI_PAGE_SHIFT);
436   - if (!md->virt_addr)
  429 + va = efi_ioremap(md->phys_addr, size);
  430 +
  431 + if (md->attribute & EFI_MEMORY_WB)
  432 + set_memory_uc(md->virt_addr, size);
  433 +
  434 + md->virt_addr = (u64) (unsigned long) va;
  435 +
  436 + if (!va) {
437 437 printk(KERN_ERR PFX "ioremap of 0x%llX failed!\n",
438 438 (unsigned long long)md->phys_addr);
439   - if ((md->phys_addr <= (unsigned long)efi_phys.systab) &&
440   - ((unsigned long)efi_phys.systab < end))
441   - efi.systab = (efi_system_table_t *)(unsigned long)
442   - (md->virt_addr - md->phys_addr +
443   - (unsigned long)efi_phys.systab);
  439 + continue;
  440 + }
  441 +
  442 + systab = (u64) (unsigned long) efi_phys.systab;
  443 + if (md->phys_addr <= systab && systab < end) {
  444 + systab += md->virt_addr - md->phys_addr;
  445 + efi.systab = (efi_system_table_t *) (unsigned long) systab;
  446 + }
444 447 }
445 448  
446 449 BUG_ON(!efi.systab);
arch/x86/kernel/efi_64.c
... ... @@ -54,10 +54,10 @@
54 54 else
55 55 set_pte(kpte, __pte((pte_val(*kpte) | _PAGE_NX) & \
56 56 __supported_pte_mask));
57   - if (level == 4)
58   - start = (start + PMD_SIZE) & PMD_MASK;
59   - else
  57 + if (level == PG_LEVEL_4K)
60 58 start = (start + PAGE_SIZE) & PAGE_MASK;
  59 + else
  60 + start = (start + PMD_SIZE) & PMD_MASK;
61 61 }
62 62 }
63 63  
64 64  
65 65  
66 66  
... ... @@ -109,23 +109,23 @@
109 109 memmap.nr_map * memmap.desc_size);
110 110 }
111 111  
112   -void __iomem * __init efi_ioremap(unsigned long offset,
113   - unsigned long size)
  112 +void __iomem * __init efi_ioremap(unsigned long phys_addr, unsigned long size)
114 113 {
115 114 static unsigned pages_mapped;
116   - unsigned long last_addr;
117 115 unsigned i, pages;
118 116  
119   - last_addr = offset + size - 1;
120   - offset &= PAGE_MASK;
121   - pages = (PAGE_ALIGN(last_addr) - offset) >> PAGE_SHIFT;
  117 + /* phys_addr and size must be page aligned */
  118 + if ((phys_addr & ~PAGE_MASK) || (size & ~PAGE_MASK))
  119 + return NULL;
  120 +
  121 + pages = size >> PAGE_SHIFT;
122 122 if (pages_mapped + pages > MAX_EFI_IO_PAGES)
123 123 return NULL;
124 124  
125 125 for (i = 0; i < pages; i++) {
126 126 __set_fixmap(FIX_EFI_IO_MAP_FIRST_PAGE - pages_mapped,
127   - offset, PAGE_KERNEL_EXEC_NOCACHE);
128   - offset += PAGE_SIZE;
  127 + phys_addr, PAGE_KERNEL);
  128 + phys_addr += PAGE_SIZE;
129 129 pages_mapped++;
130 130 }
131 131  
include/asm-x86/efi.h
... ... @@ -33,7 +33,7 @@
33 33 #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
34 34 efi_call_virt(f, a1, a2, a3, a4, a5, a6)
35 35  
36   -#define efi_ioremap(addr, size) ioremap(addr, size)
  36 +#define efi_ioremap(addr, size) ioremap_cache(addr, size)
37 37  
38 38 #else /* !CONFIG_X86_32 */
39 39  
... ... @@ -86,7 +86,7 @@
86 86 efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
87 87 (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
88 88  
89   -extern void *efi_ioremap(unsigned long offset, unsigned long size);
  89 +extern void *efi_ioremap(unsigned long addr, unsigned long size);
90 90  
91 91 #endif /* CONFIG_X86_32 */
92 92