Commit ccd805874198c248498b5f269656ec14397eeede

Authored by Paul Mundt
1 parent 9141d30a48

sh64: Fixup the nommu build.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>

Showing 7 changed files with 55 additions and 10 deletions Side-by-side Diff

arch/sh/kernel/cpu/sh5/entry.S
... ... @@ -143,12 +143,22 @@
143 143 trap_jtable:
144 144 .long do_exception_error /* 0x000 */
145 145 .long do_exception_error /* 0x020 */
  146 +#ifdef CONFIG_MMU
146 147 .long tlb_miss_load /* 0x040 */
147 148 .long tlb_miss_store /* 0x060 */
  149 +#else
  150 + .long do_exception_error
  151 + .long do_exception_error
  152 +#endif
148 153 ! ARTIFICIAL pseudo-EXPEVT setting
149 154 .long do_debug_interrupt /* 0x080 */
  155 +#ifdef CONFIG_MMU
150 156 .long tlb_miss_load /* 0x0A0 */
151 157 .long tlb_miss_store /* 0x0C0 */
  158 +#else
  159 + .long do_exception_error
  160 + .long do_exception_error
  161 +#endif
152 162 .long do_address_error_load /* 0x0E0 */
153 163 .long do_address_error_store /* 0x100 */
154 164 #ifdef CONFIG_SH_FPU
155 165  
156 166  
157 167  
... ... @@ -185,10 +195,18 @@
185 195 .endr
186 196 .long do_IRQ /* 0xA00 */
187 197 .long do_IRQ /* 0xA20 */
  198 +#ifdef CONFIG_MMU
188 199 .long itlb_miss_or_IRQ /* 0xA40 */
  200 +#else
  201 + .long do_IRQ
  202 +#endif
189 203 .long do_IRQ /* 0xA60 */
190 204 .long do_IRQ /* 0xA80 */
  205 +#ifdef CONFIG_MMU
191 206 .long itlb_miss_or_IRQ /* 0xAA0 */
  207 +#else
  208 + .long do_IRQ
  209 +#endif
192 210 .long do_exception_error /* 0xAC0 */
193 211 .long do_address_error_exec /* 0xAE0 */
194 212 .rept 8
... ... @@ -274,6 +292,7 @@
274 292 * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
275 293 * block making sure the final alignment is correct.
276 294 */
  295 +#ifdef CONFIG_MMU
277 296 tlb_miss:
278 297 synco /* TAKum03020 (but probably a good idea anyway.) */
279 298 putcon SP, KCR1
... ... @@ -377,6 +396,9 @@
377 396 getcon KCR1, SP
378 397 pta handle_exception, tr0
379 398 blink tr0, ZERO
  399 +#else /* CONFIG_MMU */
  400 + .balign 256
  401 +#endif
380 402  
381 403 /* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
382 404 DOES END UP AT VBR+0x600 */
... ... @@ -1103,6 +1125,7 @@
1103 1125 * fpu_error_or_IRQ? is a helper to deflect to the right cause.
1104 1126 *
1105 1127 */
  1128 +#ifdef CONFIG_MMU
1106 1129 tlb_miss_load:
1107 1130 or SP, ZERO, r2
1108 1131 or ZERO, ZERO, r3 /* Read */
... ... @@ -1132,6 +1155,7 @@
1132 1155 movi do_page_fault, r6
1133 1156 ptabs r6, tr0
1134 1157 blink tr0, ZERO
  1158 +#endif /* CONFIG_MMU */
1135 1159  
1136 1160 fpu_error_or_IRQA:
1137 1161 pta its_IRQ, tr0
... ... @@ -1481,6 +1505,7 @@
1481 1505 ptabs LINK, tr0
1482 1506 blink tr0, r63
1483 1507  
  1508 +#ifdef CONFIG_MMU
1484 1509 /*
1485 1510 * --- User Access Handling Section
1486 1511 */
... ... @@ -1604,6 +1629,7 @@
1604 1629 ptabs LINK, tr0
1605 1630 blink tr0, ZERO
1606 1631  
  1632 +#endif /* CONFIG_MMU */
1607 1633  
1608 1634 /*
1609 1635 * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
1610 1636  
... ... @@ -2014,9 +2040,11 @@
2014 2040 .global asm_uaccess_start /* Just a marker */
2015 2041 asm_uaccess_start:
2016 2042  
  2043 +#ifdef CONFIG_MMU
2017 2044 .long ___copy_user1, ___copy_user_exit
2018 2045 .long ___copy_user2, ___copy_user_exit
2019 2046 .long ___clear_user1, ___clear_user_exit
  2047 +#endif
2020 2048 .long ___strncpy_from_user1, ___strncpy_from_user_exit
2021 2049 .long ___strnlen_user1, ___strnlen_user_exit
2022 2050 .long ___get_user_asm_b1, ___get_user_asm_b_exit
arch/sh/mm/Makefile_64
... ... @@ -2,10 +2,11 @@
2 2 # Makefile for the Linux SuperH-specific parts of the memory manager.
3 3 #
4 4  
5   -obj-y := init.o extable_64.o consistent.o
  5 +obj-y := init.o consistent.o
6 6  
7   -mmu-y := tlb-nommu.o pg-nommu.o
8   -mmu-$(CONFIG_MMU) := fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o
  7 +mmu-y := tlb-nommu.o pg-nommu.o extable_32.o
  8 +mmu-$(CONFIG_MMU) := fault_64.o ioremap_64.o tlbflush_64.o tlb-sh5.o \
  9 + extable_64.o
9 10  
10 11 ifndef CONFIG_CACHE_OFF
11 12 obj-y += cache-sh5.o
arch/sh/mm/cache-sh5.c
... ... @@ -714,6 +714,7 @@
714 714 sh64_icache_inv_current_user_range(vaddr, end);
715 715 }
716 716  
  717 +#ifdef CONFIG_MMU
717 718 /*
718 719 * These *MUST* lie in an area of virtual address space that's otherwise
719 720 * unused.
... ... @@ -830,4 +831,5 @@
830 831 else
831 832 sh64_clear_user_page_coloured(to, address);
832 833 }
  834 +#endif
... ... @@ -268,11 +268,6 @@
268 268 unsigned long long poke_real_address_q(unsigned long long addr,
269 269 unsigned long long val);
270 270  
271   -/* arch/sh/mm/ioremap_64.c */
272   -unsigned long onchip_remap(unsigned long addr, unsigned long size,
273   - const char *name);
274   -extern void onchip_unmap(unsigned long vaddr);
275   -
276 271 #if !defined(CONFIG_MMU)
277 272 #define virt_to_phys(address) ((unsigned long)(address))
278 273 #define phys_to_virt(address) ((void *)(address))
279 274  
... ... @@ -302,9 +297,16 @@
302 297 void __iomem *__ioremap(unsigned long offset, unsigned long size,
303 298 unsigned long flags);
304 299 void __iounmap(void __iomem *addr);
  300 +
  301 +/* arch/sh/mm/ioremap_64.c */
  302 +unsigned long onchip_remap(unsigned long addr, unsigned long size,
  303 + const char *name);
  304 +extern void onchip_unmap(unsigned long vaddr);
305 305 #else
306 306 #define __ioremap(offset, size, flags) ((void __iomem *)(offset))
307 307 #define __iounmap(addr) do { } while (0)
  308 +#define onchip_remap(addr, size, name) (addr)
  309 +#define onchip_unmap(addr) do { } while (0)
308 310 #endif /* CONFIG_MMU */
309 311  
310 312 static inline void __iomem *
include/asm-sh/mmu_context.h
... ... @@ -27,6 +27,7 @@
27 27 /* ASID is 8-bit value, so it can't be 0x100 */
28 28 #define MMU_NO_ASID 0x100
29 29  
  30 +#ifdef CONFIG_MMU
30 31 #define asid_cache(cpu) (cpu_data[cpu].asid_cache)
31 32 #define cpu_context(cpu, mm) ((mm)->context.id[cpu])
32 33  
... ... @@ -38,7 +39,6 @@
38 39 */
39 40 #define MMU_VPN_MASK 0xfffff000
40 41  
41   -#ifdef CONFIG_MMU
42 42 #if defined(CONFIG_SUPERH32)
43 43 #include "mmu_context_32.h"
44 44 #else
... ... @@ -129,6 +129,8 @@
129 129 #define destroy_context(mm) do { } while (0)
130 130 #define set_asid(asid) do { } while (0)
131 131 #define get_asid() (0)
  132 +#define cpu_asid(cpu, mm) ({ (void)cpu; 0; })
  133 +#define switch_and_save_asid(asid) (0)
132 134 #define set_TTB(pgd) do { } while (0)
133 135 #define get_TTB() (0)
134 136 #define activate_context(mm,cpu) do { } while (0)
include/asm-sh/tlb_64.h
... ... @@ -56,6 +56,7 @@
56 56 __asm__ __volatile__ ("putcfg %0, 0, r63\n" : : "r" (slot));
57 57 }
58 58  
  59 +#ifdef CONFIG_MMU
59 60 /* arch/sh64/mm/tlb.c */
60 61 int sh64_tlb_init(void);
61 62 unsigned long long sh64_next_free_dtlb_entry(void);
... ... @@ -64,7 +65,14 @@
64 65 void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr,
65 66 unsigned long asid, unsigned long paddr);
66 67 void sh64_teardown_tlb_slot(unsigned long long config_addr);
67   -
  68 +#else
  69 +#define sh64_tlb_init() do { } while (0)
  70 +#define sh64_next_free_dtlb_entry() (0)
  71 +#define sh64_get_wired_dtlb_entry() (0)
  72 +#define sh64_put_wired_dtlb_entry(entry) do { } while (0)
  73 +#define sh64_setup_tlb_slot(conf, virt, asid, phys) do { } while (0)
  74 +#define sh64_teardown_tlb_slot(addr) do { } while (0)
  75 +#endif /* CONFIG_MMU */
68 76 #endif /* __ASSEMBLY__ */
69 77 #endif /* __ASM_SH_TLB_64_H */
include/asm-sh/uaccess_64.h
... ... @@ -274,7 +274,9 @@
274 274 unsigned long insn, fixup;
275 275 };
276 276  
  277 +#ifdef CONFIG_MMU
277 278 #define ARCH_HAS_SEARCH_EXTABLE
  279 +#endif
278 280  
279 281 /* Returns 0 if exception not found and fixup.unit otherwise. */
280 282 extern unsigned long search_exception_table(unsigned long addr);