Commit fc8d782677f163dee76427fdd8a92bebd2b50b23
Committed by
H. Peter Anvin
1 parent
05a476b6e3
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
x86: Use __pa_symbol instead of __pa on C visible symbols
When I made an attempt at separating __pa_symbol and __pa I found that there were a number of cases where __pa was used on an obvious symbol. I also caught one non-obvious case as _brk_start and _brk_end are based on the address of __brk_base which is a C visible symbol. In mark_rodata_ro I was able to reduce the overhead of kernel symbol to virtual memory translation by using a combination of __va(__pa_symbol()) instead of page_address(virt_to_page()). Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Link: http://lkml.kernel.org/r/20121116215640.8521.80483.stgit@ahduyck-cp1.jf.intel.com Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Showing 6 changed files with 27 additions and 29 deletions Side-by-side Diff
arch/x86/kernel/cpu/intel.c
... | ... | @@ -168,7 +168,7 @@ |
168 | 168 | #ifdef CONFIG_X86_F00F_BUG |
169 | 169 | static void __cpuinit trap_init_f00f_bug(void) |
170 | 170 | { |
171 | - __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); | |
171 | + __set_fixmap(FIX_F00F_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO); | |
172 | 172 | |
173 | 173 | /* |
174 | 174 | * Update the IDT descriptor and reload the IDT so that |
arch/x86/kernel/setup.c
... | ... | @@ -300,8 +300,8 @@ |
300 | 300 | static void __init reserve_brk(void) |
301 | 301 | { |
302 | 302 | if (_brk_end > _brk_start) |
303 | - memblock_reserve(__pa(_brk_start), | |
304 | - __pa(_brk_end) - __pa(_brk_start)); | |
303 | + memblock_reserve(__pa_symbol(_brk_start), | |
304 | + _brk_end - _brk_start); | |
305 | 305 | |
306 | 306 | /* Mark brk area as locked down and no longer taking any |
307 | 307 | new allocations */ |
... | ... | @@ -761,12 +761,12 @@ |
761 | 761 | init_mm.end_data = (unsigned long) _edata; |
762 | 762 | init_mm.brk = _brk_end; |
763 | 763 | |
764 | - code_resource.start = virt_to_phys(_text); | |
765 | - code_resource.end = virt_to_phys(_etext)-1; | |
766 | - data_resource.start = virt_to_phys(_etext); | |
767 | - data_resource.end = virt_to_phys(_edata)-1; | |
768 | - bss_resource.start = virt_to_phys(&__bss_start); | |
769 | - bss_resource.end = virt_to_phys(&__bss_stop)-1; | |
764 | + code_resource.start = __pa_symbol(_text); | |
765 | + code_resource.end = __pa_symbol(_etext)-1; | |
766 | + data_resource.start = __pa_symbol(_etext); | |
767 | + data_resource.end = __pa_symbol(_edata)-1; | |
768 | + bss_resource.start = __pa_symbol(__bss_start); | |
769 | + bss_resource.end = __pa_symbol(__bss_stop)-1; | |
770 | 770 | |
771 | 771 | #ifdef CONFIG_CMDLINE_BOOL |
772 | 772 | #ifdef CONFIG_CMDLINE_OVERRIDE |
arch/x86/mm/init_64.c
... | ... | @@ -770,12 +770,10 @@ |
770 | 770 | void mark_rodata_ro(void) |
771 | 771 | { |
772 | 772 | unsigned long start = PFN_ALIGN(_text); |
773 | - unsigned long rodata_start = | |
774 | - ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK; | |
773 | + unsigned long rodata_start = PFN_ALIGN(__start_rodata); | |
775 | 774 | unsigned long end = (unsigned long) &__end_rodata_hpage_align; |
776 | - unsigned long text_end = PAGE_ALIGN((unsigned long) &__stop___ex_table); | |
777 | - unsigned long rodata_end = PAGE_ALIGN((unsigned long) &__end_rodata); | |
778 | - unsigned long data_start = (unsigned long) &_sdata; | |
775 | + unsigned long text_end = PFN_ALIGN(&__stop___ex_table); | |
776 | + unsigned long rodata_end = PFN_ALIGN(&__end_rodata); | |
779 | 777 | |
780 | 778 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
781 | 779 | (end - start) >> 10); |
782 | 780 | |
... | ... | @@ -800,12 +798,12 @@ |
800 | 798 | #endif |
801 | 799 | |
802 | 800 | free_init_pages("unused kernel memory", |
803 | - (unsigned long) page_address(virt_to_page(text_end)), | |
804 | - (unsigned long) | |
805 | - page_address(virt_to_page(rodata_start))); | |
801 | + (unsigned long) __va(__pa_symbol(text_end)), | |
802 | + (unsigned long) __va(__pa_symbol(rodata_start))); | |
803 | + | |
806 | 804 | free_init_pages("unused kernel memory", |
807 | - (unsigned long) page_address(virt_to_page(rodata_end)), | |
808 | - (unsigned long) page_address(virt_to_page(data_start))); | |
805 | + (unsigned long) __va(__pa_symbol(rodata_end)), | |
806 | + (unsigned long) __va(__pa_symbol(_sdata))); | |
809 | 807 | } |
810 | 808 | |
811 | 809 | #endif |
arch/x86/mm/pageattr.c
... | ... | @@ -94,12 +94,12 @@ |
94 | 94 | |
95 | 95 | static inline unsigned long highmap_start_pfn(void) |
96 | 96 | { |
97 | - return __pa(_text) >> PAGE_SHIFT; | |
97 | + return __pa_symbol(_text) >> PAGE_SHIFT; | |
98 | 98 | } |
99 | 99 | |
100 | 100 | static inline unsigned long highmap_end_pfn(void) |
101 | 101 | { |
102 | - return __pa(roundup(_brk_end, PMD_SIZE)) >> PAGE_SHIFT; | |
102 | + return __pa_symbol(roundup(_brk_end, PMD_SIZE)) >> PAGE_SHIFT; | |
103 | 103 | } |
104 | 104 | |
105 | 105 | #endif |
... | ... | @@ -276,8 +276,8 @@ |
276 | 276 | * The .rodata section needs to be read-only. Using the pfn |
277 | 277 | * catches all aliases. |
278 | 278 | */ |
279 | - if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, | |
280 | - __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) | |
279 | + if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT, | |
280 | + __pa_symbol(__end_rodata) >> PAGE_SHIFT)) | |
281 | 281 | pgprot_val(forbidden) |= _PAGE_RW; |
282 | 282 | |
283 | 283 | #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) |
arch/x86/platform/efi/efi.c
... | ... | @@ -410,8 +410,8 @@ |
410 | 410 | * - Not within any part of the kernel |
411 | 411 | * - Not the bios reserved area |
412 | 412 | */ |
413 | - if ((start+size >= virt_to_phys(_text) | |
414 | - && start <= virt_to_phys(_end)) || | |
413 | + if ((start+size >= __pa_symbol(_text) | |
414 | + && start <= __pa_symbol(_end)) || | |
415 | 415 | !e820_all_mapped(start, start+size, E820_RAM) || |
416 | 416 | memblock_is_region_reserved(start, size)) { |
417 | 417 | /* Could not reserve, skip it */ |
arch/x86/realmode/init.c
... | ... | @@ -62,9 +62,9 @@ |
62 | 62 | __va(real_mode_header->trampoline_header); |
63 | 63 | |
64 | 64 | #ifdef CONFIG_X86_32 |
65 | - trampoline_header->start = __pa(startup_32_smp); | |
65 | + trampoline_header->start = __pa_symbol(startup_32_smp); | |
66 | 66 | trampoline_header->gdt_limit = __BOOT_DS + 7; |
67 | - trampoline_header->gdt_base = __pa(boot_gdt); | |
67 | + trampoline_header->gdt_base = __pa_symbol(boot_gdt); | |
68 | 68 | #else |
69 | 69 | /* |
70 | 70 | * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR |
... | ... | @@ -78,8 +78,8 @@ |
78 | 78 | *trampoline_cr4_features = read_cr4(); |
79 | 79 | |
80 | 80 | trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd); |
81 | - trampoline_pgd[0] = __pa(level3_ident_pgt) + _KERNPG_TABLE; | |
82 | - trampoline_pgd[511] = __pa(level3_kernel_pgt) + _KERNPG_TABLE; | |
81 | + trampoline_pgd[0] = __pa_symbol(level3_ident_pgt) + _KERNPG_TABLE; | |
82 | + trampoline_pgd[511] = __pa_symbol(level3_kernel_pgt) + _KERNPG_TABLE; | |
83 | 83 | #endif |
84 | 84 | } |
85 | 85 |