Commit 1d5cfcdff793e2f34ec61d902fa5ee0c7e4a2208
1 parent
efd54ea315
Exists in
master
and in
7 other branches
sh: Kill off some superfluous legacy PMB special casing.
The __va()/__pa() offsets and the boot memory offsets are consistent for all PMB users, so there is no need to special case these for legacy PMB. Kill the special casing off and depend on CONFIG_PMB across the board. This also fixes up yet another addressing bug for sh64. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Showing 2 changed files with 4 additions and 10 deletions Side-by-side Diff
arch/sh/include/asm/page.h
... | ... | @@ -127,12 +127,7 @@ |
127 | 127 | * is not visible (it is part of the PMB mapping) and so needs to be |
128 | 128 | * added or subtracted as required. |
129 | 129 | */ |
130 | -#if defined(CONFIG_PMB_LEGACY) | |
131 | -/* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */ | |
132 | -#define PMB_OFFSET (PAGE_OFFSET - PXSEG(__MEMORY_START)) | |
133 | -#define __pa(x) ((unsigned long)(x) - PMB_OFFSET) | |
134 | -#define __va(x) ((void *)((unsigned long)(x) + PMB_OFFSET)) | |
135 | -#elif defined(CONFIG_32BIT) | |
130 | +#ifdef CONFIG_PMB | |
136 | 131 | #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START) |
137 | 132 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START)) |
138 | 133 | #else |
arch/sh/kernel/vmlinux.lds.S
... | ... | @@ -14,11 +14,10 @@ |
14 | 14 | #include <asm/cache.h> |
15 | 15 | #include <asm/vmlinux.lds.h> |
16 | 16 | |
17 | -#if defined(CONFIG_29BIT) || defined(CONFIG_SUPERH64) || \ | |
18 | - defined(CONFIG_PMB_LEGACY) | |
19 | - #define MEMORY_OFFSET __MEMORY_START | |
17 | +#ifdef CONFIG_PMB | |
18 | + #define MEMORY_OFFSET 0 | |
20 | 19 | #else |
21 | - #define MEMORY_OFFSET 0 | |
20 | + #define MEMORY_OFFSET __MEMORY_START | |
22 | 21 | #endif |
23 | 22 | |
24 | 23 | ENTRY(_start) |