Commit edc9a958fd31ef1d89f9eaee82b2a3882c8e34c9

Authored by Paul Mundt
1 parent e2fcf74f3d

sh: nommu: Support building without an uncached mapping.

Now that nommu selects 32BIT we run in to the situation where SH-2A
supports an uncached identity mapping by way of the BSC, while the SH-2
does not. This provides stubs for the PC manglers and tidies up some of
the system*.h mess in the process.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>

Showing 4 changed files with 41 additions and 42 deletions Side-by-side Diff

arch/sh/include/asm/system.h
... ... @@ -10,6 +10,7 @@
10 10 #include <linux/compiler.h>
11 11 #include <linux/linkage.h>
12 12 #include <asm/types.h>
  13 +#include <asm/uncached.h>
13 14  
14 15 #define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */
15 16  
... ... @@ -136,9 +137,6 @@
136 137 #else
137 138 #define instruction_size(insn) (4)
138 139 #endif
139   -
140   -extern unsigned long cached_to_uncached;
141   -extern unsigned long uncached_size;
142 140  
143 141 void per_cpu_trap_init(void);
144 142 void default_idle(void);
arch/sh/include/asm/system_32.h
... ... @@ -145,42 +145,6 @@
145 145 __restore_dsp(prev); \
146 146 } while (0)
147 147  
148   -/*
149   - * Jump to uncached area.
150   - * When handling TLB or caches, we need to do it from an uncached area.
151   - */
152   -#define jump_to_uncached() \
153   -do { \
154   - unsigned long __dummy; \
155   - \
156   - __asm__ __volatile__( \
157   - "mova 1f, %0\n\t" \
158   - "add %1, %0\n\t" \
159   - "jmp @%0\n\t" \
160   - " nop\n\t" \
161   - ".balign 4\n" \
162   - "1:" \
163   - : "=&z" (__dummy) \
164   - : "r" (cached_to_uncached)); \
165   -} while (0)
166   -
167   -/*
168   - * Back to cached area.
169   - */
170   -#define back_to_cached() \
171   -do { \
172   - unsigned long __dummy; \
173   - ctrl_barrier(); \
174   - __asm__ __volatile__( \
175   - "mov.l 1f, %0\n\t" \
176   - "jmp @%0\n\t" \
177   - " nop\n\t" \
178   - ".balign 4\n" \
179   - "1: .long 2f\n" \
180   - "2:" \
181   - : "=&r" (__dummy)); \
182   -} while (0)
183   -
184 148 #ifdef CONFIG_CPU_HAS_SR_RB
185 149 #define lookup_exception_vector() \
186 150 ({ \
arch/sh/include/asm/system_64.h
... ... @@ -34,9 +34,6 @@
34 34 &next->thread); \
35 35 } while (0)
36 36  
37   -#define jump_to_uncached() do { } while (0)
38   -#define back_to_cached() do { } while (0)
39   -
40 37 #define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
41 38 #define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
42 39 #define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
arch/sh/include/asm/uncached.h
... ... @@ -4,15 +4,55 @@
4 4 #include <linux/bug.h>
5 5  
6 6 #ifdef CONFIG_UNCACHED_MAPPING
  7 +extern unsigned long cached_to_uncached;
  8 +extern unsigned long uncached_size;
7 9 extern unsigned long uncached_start, uncached_end;
8 10  
9 11 extern int virt_addr_uncached(unsigned long kaddr);
10 12 extern void uncached_init(void);
11 13 extern void uncached_resize(unsigned long size);
  14 +
  15 +/*
  16 + * Jump to uncached area.
  17 + * When handling TLB or caches, we need to do it from an uncached area.
  18 + */
  19 +#define jump_to_uncached() \
  20 +do { \
  21 + unsigned long __dummy; \
  22 + \
  23 + __asm__ __volatile__( \
  24 + "mova 1f, %0\n\t" \
  25 + "add %1, %0\n\t" \
  26 + "jmp @%0\n\t" \
  27 + " nop\n\t" \
  28 + ".balign 4\n" \
  29 + "1:" \
  30 + : "=&z" (__dummy) \
  31 + : "r" (cached_to_uncached)); \
  32 +} while (0)
  33 +
  34 +/*
  35 + * Back to cached area.
  36 + */
  37 +#define back_to_cached() \
  38 +do { \
  39 + unsigned long __dummy; \
  40 + ctrl_barrier(); \
  41 + __asm__ __volatile__( \
  42 + "mov.l 1f, %0\n\t" \
  43 + "jmp @%0\n\t" \
  44 + " nop\n\t" \
  45 + ".balign 4\n" \
  46 + "1: .long 2f\n" \
  47 + "2:" \
  48 + : "=&r" (__dummy)); \
  49 +} while (0)
12 50 #else
13 51 #define virt_addr_uncached(kaddr) (0)
14 52 #define uncached_init() do { } while (0)
15 53 #define uncached_resize(size) BUG()
  54 +#define jump_to_uncached() do { } while (0)
  55 +#define back_to_cached() do { } while (0)
16 56 #endif
17 57  
18 58 #endif /* __ASM_SH_UNCACHED_H */