Commit b5666f70395016a55cc9d57826508b8a346398d0
Committed by
Paul Mackerras
1 parent
51fae6de24
Exists in
master
and in
7 other branches
[PATCH] powerpc: Separate usage of KERNELBASE and PAGE_OFFSET
This patch separates usage of KERNELBASE and PAGE_OFFSET. I haven't looked at any of the PPC32 code, if we ever want to support Kdump on PPC we'll have to do another audit, ditto for iSeries. This patch makes PAGE_OFFSET the constant, it'll always be 0xC * 1 gazillion for 64-bit. To get a physical address from a virtual one you subtract PAGE_OFFSET, _not_ KERNELBASE. KERNELBASE is the virtual address of the start of the kernel, it's often the same as PAGE_OFFSET, but _might not be_. If you want to know something's offset from the start of the kernel you should subtract KERNELBASE. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Showing 9 changed files with 37 additions and 24 deletions Side-by-side Diff
arch/powerpc/kernel/btext.c
... | ... | @@ -60,7 +60,7 @@ |
60 | 60 | * |
61 | 61 | * The display is mapped to virtual address 0xD0000000, rather |
62 | 62 | * than 1:1, because some some CHRP machines put the frame buffer |
63 | - * in the region starting at 0xC0000000 (KERNELBASE). | |
63 | + * in the region starting at 0xC0000000 (PAGE_OFFSET). | |
64 | 64 | * This mapping is temporary and will disappear as soon as the |
65 | 65 | * setup done by MMU_Init() is applied. |
66 | 66 | * |
... | ... | @@ -71,7 +71,7 @@ |
71 | 71 | */ |
72 | 72 | void __init btext_prepare_BAT(void) |
73 | 73 | { |
74 | - unsigned long vaddr = KERNELBASE + 0x10000000; | |
74 | + unsigned long vaddr = PAGE_OFFSET + 0x10000000; | |
75 | 75 | unsigned long addr; |
76 | 76 | unsigned long lowbits; |
77 | 77 |
arch/powerpc/kernel/entry_64.S
... | ... | @@ -690,7 +690,7 @@ |
690 | 690 | |
691 | 691 | /* Setup our real return addr */ |
692 | 692 | SET_REG_TO_LABEL(r4,.rtas_return_loc) |
693 | - SET_REG_TO_CONST(r9,KERNELBASE) | |
693 | + SET_REG_TO_CONST(r9,PAGE_OFFSET) | |
694 | 694 | sub r4,r4,r9 |
695 | 695 | mtlr r4 |
696 | 696 | |
... | ... | @@ -718,7 +718,7 @@ |
718 | 718 | _STATIC(rtas_return_loc) |
719 | 719 | /* relocation is off at this point */ |
720 | 720 | mfspr r4,SPRN_SPRG3 /* Get PACA */ |
721 | - SET_REG_TO_CONST(r5, KERNELBASE) | |
721 | + SET_REG_TO_CONST(r5, PAGE_OFFSET) | |
722 | 722 | sub r4,r4,r5 /* RELOC the PACA base pointer */ |
723 | 723 | |
724 | 724 | mfmsr r6 |
arch/powerpc/kernel/lparmap.c
... | ... | @@ -16,8 +16,8 @@ |
16 | 16 | .xSegmentTableOffs = STAB0_PAGE, |
17 | 17 | |
18 | 18 | .xEsids = { |
19 | - { .xKernelEsid = GET_ESID(KERNELBASE), | |
20 | - .xKernelVsid = KERNEL_VSID(KERNELBASE), }, | |
19 | + { .xKernelEsid = GET_ESID(PAGE_OFFSET), | |
20 | + .xKernelVsid = KERNEL_VSID(PAGE_OFFSET), }, | |
21 | 21 | { .xKernelEsid = GET_ESID(VMALLOCBASE), |
22 | 22 | .xKernelVsid = KERNEL_VSID(VMALLOCBASE), }, |
23 | 23 | }, |
... | ... | @@ -25,7 +25,7 @@ |
25 | 25 | .xRanges = { |
26 | 26 | { .xPages = HvPagesToMap, |
27 | 27 | .xOffset = 0, |
28 | - .xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - HW_PAGE_SHIFT), | |
28 | + .xVPN = KERNEL_VSID(PAGE_OFFSET) << (SID_SHIFT - HW_PAGE_SHIFT), | |
29 | 29 | }, |
30 | 30 | }, |
31 | 31 | }; |
arch/powerpc/kernel/machine_kexec_64.c
... | ... | @@ -153,9 +153,8 @@ |
153 | 153 | * including ones that were in place on the original copy |
154 | 154 | */ |
155 | 155 | for (i = 0; i < nr_segments; i++) |
156 | - flush_icache_range(ranges[i].mem + KERNELBASE, | |
157 | - ranges[i].mem + KERNELBASE + | |
158 | - ranges[i].memsz); | |
156 | + flush_icache_range((unsigned long)__va(ranges[i].mem), | |
157 | + (unsigned long)__va(ranges[i].mem + ranges[i].memsz)); | |
159 | 158 | } |
160 | 159 | |
161 | 160 | #ifdef CONFIG_SMP |
arch/powerpc/mm/hash_utils_64.c
... | ... | @@ -456,7 +456,7 @@ |
456 | 456 | |
457 | 457 | /* create bolted the linear mapping in the hash table */ |
458 | 458 | for (i=0; i < lmb.memory.cnt; i++) { |
459 | - base = lmb.memory.region[i].base + KERNELBASE; | |
459 | + base = (unsigned long)__va(lmb.memory.region[i].base); | |
460 | 460 | size = lmb.memory.region[i].size; |
461 | 461 | |
462 | 462 | DBG("creating mapping for region: %lx : %lx\n", base, size); |
... | ... | @@ -498,8 +498,8 @@ |
498 | 498 | * for either 4K or 16MB pages. |
499 | 499 | */ |
500 | 500 | if (tce_alloc_start) { |
501 | - tce_alloc_start += KERNELBASE; | |
502 | - tce_alloc_end += KERNELBASE; | |
501 | + tce_alloc_start = (unsigned long)__va(tce_alloc_start); | |
502 | + tce_alloc_end = (unsigned long)__va(tce_alloc_end); | |
503 | 503 | |
504 | 504 | if (base + size >= tce_alloc_start) |
505 | 505 | tce_alloc_start = base + size + 1; |
arch/powerpc/mm/slb.c
... | ... | @@ -75,7 +75,7 @@ |
75 | 75 | vflags = SLB_VSID_KERNEL | virtual_llp; |
76 | 76 | |
77 | 77 | ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); |
78 | - if ((ksp_esid_data & ESID_MASK) == KERNELBASE) | |
78 | + if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) | |
79 | 79 | ksp_esid_data &= ~SLB_ESID_V; |
80 | 80 | |
81 | 81 | /* We need to do this all in asm, so we're sure we don't touch |
... | ... | @@ -213,7 +213,7 @@ |
213 | 213 | asm volatile("isync":::"memory"); |
214 | 214 | asm volatile("slbmte %0,%0"::"r" (0) : "memory"); |
215 | 215 | asm volatile("isync; slbia; isync":::"memory"); |
216 | - create_slbe(KERNELBASE, lflags, 0); | |
216 | + create_slbe(PAGE_OFFSET, lflags, 0); | |
217 | 217 | |
218 | 218 | /* VMALLOC space has 4K pages always for now */ |
219 | 219 | create_slbe(VMALLOCBASE, vflags, 1); |
arch/powerpc/mm/slb_low.S
... | ... | @@ -37,9 +37,9 @@ |
37 | 37 | |
38 | 38 | srdi r9,r3,60 /* get region */ |
39 | 39 | srdi r10,r3,28 /* get esid */ |
40 | - cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */ | |
40 | + cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ | |
41 | 41 | |
42 | - /* r3 = address, r10 = esid, cr7 = <>KERNELBASE */ | |
42 | + /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ | |
43 | 43 | blt cr7,0f /* user or kernel? */ |
44 | 44 | |
45 | 45 | /* kernel address: proto-VSID = ESID */ |
... | ... | @@ -166,7 +166,7 @@ |
166 | 166 | /* |
167 | 167 | * Finish loading of an SLB entry and return |
168 | 168 | * |
169 | - * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <>KERNELBASE | |
169 | + * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET | |
170 | 170 | */ |
171 | 171 | slb_finish_load: |
172 | 172 | ASM_VSID_SCRAMBLE(r10,r9) |
arch/powerpc/mm/stab.c
... | ... | @@ -40,7 +40,7 @@ |
40 | 40 | unsigned long entry, group, old_esid, castout_entry, i; |
41 | 41 | unsigned int global_entry; |
42 | 42 | struct stab_entry *ste, *castout_ste; |
43 | - unsigned long kernel_segment = (esid << SID_SHIFT) >= KERNELBASE; | |
43 | + unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET; | |
44 | 44 | |
45 | 45 | vsid_data = vsid << STE_VSID_SHIFT; |
46 | 46 | esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V; |
... | ... | @@ -83,7 +83,7 @@ |
83 | 83 | } |
84 | 84 | |
85 | 85 | /* Dont cast out the first kernel segment */ |
86 | - if ((castout_ste->esid_data & ESID_MASK) != KERNELBASE) | |
86 | + if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET) | |
87 | 87 | break; |
88 | 88 | |
89 | 89 | castout_entry = (castout_entry + 1) & 0xf; |
... | ... | @@ -251,7 +251,7 @@ |
251 | 251 | panic("Unable to allocate segment table for CPU %d.\n", |
252 | 252 | cpu); |
253 | 253 | |
254 | - newstab += KERNELBASE; | |
254 | + newstab = (unsigned long)__va(newstab); | |
255 | 255 | |
256 | 256 | memset((void *)newstab, 0, HW_PAGE_SIZE); |
257 | 257 | |
258 | 258 | |
... | ... | @@ -270,11 +270,11 @@ |
270 | 270 | */ |
271 | 271 | void stab_initialize(unsigned long stab) |
272 | 272 | { |
273 | - unsigned long vsid = get_kernel_vsid(KERNELBASE); | |
273 | + unsigned long vsid = get_kernel_vsid(PAGE_OFFSET); | |
274 | 274 | unsigned long stabreal; |
275 | 275 | |
276 | 276 | asm volatile("isync; slbia; isync":::"memory"); |
277 | - make_ste(stab, GET_ESID(KERNELBASE), vsid); | |
277 | + make_ste(stab, GET_ESID(PAGE_OFFSET), vsid); | |
278 | 278 | |
279 | 279 | /* Order update */ |
280 | 280 | asm volatile("sync":::"memory"); |
include/asm-powerpc/page.h
... | ... | @@ -37,6 +37,20 @@ |
37 | 37 | */ |
38 | 38 | #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) |
39 | 39 | |
40 | +/* | |
41 | + * KERNELBASE is the virtual address of the start of the kernel, it's often | |
42 | + * the same as PAGE_OFFSET, but _might not be_. | |
43 | + * | |
44 | + * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET. | |
45 | + * | |
46 | + * To get a physical address from a virtual one you subtract PAGE_OFFSET, | |
47 | + * _not_ KERNELBASE. | |
48 | + * | |
49 | + * If you want to know something's offset from the start of the kernel you | |
50 | + * should subtract KERNELBASE. | |
51 | + * | |
52 | + * If you want to test if something's a kernel address, use is_kernel_addr(). | |
53 | + */ | |
40 | 54 | #define PAGE_OFFSET ASM_CONST(CONFIG_KERNEL_START) |
41 | 55 | #define KERNELBASE PAGE_OFFSET |
42 | 56 | |
... | ... | @@ -56,7 +70,7 @@ |
56 | 70 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
57 | 71 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
58 | 72 | |
59 | -#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE)) | |
73 | +#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET)) | |
60 | 74 | #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) |
61 | 75 | |
62 | 76 | /* |