Commit f00ec48fadf5e37e7889f14cff900aa70d18b644

Authored by Russell King
1 parent 067173526c

ARM: Allow SMP kernels to boot on UP systems

UP systems do not implement all the instructions that SMP systems have,
so in order to boot a SMP kernel on a UP system, we need to rewrite
parts of the kernel.

Do this using an 'alternatives' scheme, where the kernel code and data
is modified prior to initialization to replace the SMP instructions,
thereby rendering the problematical code ineffectual.  We use the linker
to generate a list of 32-bit word locations and their replacement values,
and run through these replacements when we detect a UP system.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

Showing 14 changed files with 237 additions and 102 deletions Side-by-side Diff

... ... @@ -1191,6 +1191,19 @@
1191 1191  
1192 1192 If you don't know what to do here, say N.
1193 1193  
  1194 +config SMP_ON_UP
  1195 + bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)"
  1196 + depends on EXPERIMENTAL
  1197 + depends on SMP && !XIP && !THUMB2_KERNEL
  1198 + default y
  1199 + help
  1200 + SMP kernels contain instructions which fail on non-SMP processors.
  1201 + Enabling this option allows the kernel to modify itself to make
  1202 + these instructions safe. Disabling it allows about 1K of space
  1203 + savings.
  1204 +
  1205 + If you don't know what to do here, say Y.
  1206 +
1194 1207 config HAVE_ARM_SCU
1195 1208 bool
1196 1209 depends on SMP
arch/arm/include/asm/assembler.h
... ... @@ -154,16 +154,39 @@
154 154 .long 9999b,9001f; \
155 155 .popsection
156 156  
  157 +#ifdef CONFIG_SMP
  158 +#define ALT_SMP(instr...) \
  159 +9998: instr
  160 +#define ALT_UP(instr...) \
  161 + .pushsection ".alt.smp.init", "a" ;\
  162 + .long 9998b ;\
  163 + instr ;\
  164 + .popsection
  165 +#define ALT_UP_B(label) \
  166 + .equ up_b_offset, label - 9998b ;\
  167 + .pushsection ".alt.smp.init", "a" ;\
  168 + .long 9998b ;\
  169 + b . + up_b_offset ;\
  170 + .popsection
  171 +#else
  172 +#define ALT_SMP(instr...)
  173 +#define ALT_UP(instr...) instr
  174 +#define ALT_UP_B(label) b label
  175 +#endif
  176 +
157 177 /*
158 178 * SMP data memory barrier
159 179 */
160 180 .macro smp_dmb
161 181 #ifdef CONFIG_SMP
162 182 #if __LINUX_ARM_ARCH__ >= 7
163   - dmb
  183 + ALT_SMP(dmb)
164 184 #elif __LINUX_ARM_ARCH__ == 6
165   - mcr p15, 0, r0, c7, c10, 5 @ dmb
  185 + ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
  186 +#else
  187 +#error Incompatible SMP platform
166 188 #endif
  189 + ALT_UP(nop)
167 190 #endif
168 191 .endm
169 192  
arch/arm/include/asm/smp_mpidr.h
... ... @@ -4,7 +4,12 @@
4 4 #define hard_smp_processor_id() \
5 5 ({ \
6 6 unsigned int cpunum; \
7   - __asm__("mrc p15, 0, %0, c0, c0, 5\n" \
  7 + __asm__("\n" \
  8 + "1: mrc p15, 0, %0, c0, c0, 5\n" \
  9 + " .pushsection \".alt.smp.init\", \"a\"\n"\
  10 + " .long 1b\n" \
  11 + " mov %0, #0\n" \
  12 + " .popsection" \
8 13 : "=r" (cpunum)); \
9 14 cpunum &= 0x0F; \
10 15 })
arch/arm/include/asm/smp_plat.h
... ... @@ -18,5 +18,20 @@
18 18 return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1;
19 19 }
20 20  
  21 +/*
  22 + * Return true if we are running on a SMP platform
  23 + */
  24 +static inline bool is_smp(void)
  25 +{
  26 +#ifndef CONFIG_SMP
  27 + return false;
  28 +#elif defined(CONFIG_SMP_ON_UP)
  29 + extern unsigned int smp_on_up;
  30 + return !!smp_on_up;
  31 +#else
  32 + return true;
  33 +#endif
  34 +}
  35 +
21 36 #endif
arch/arm/include/asm/tlbflush.h
... ... @@ -70,6 +70,10 @@
70 70 #undef _TLB
71 71 #undef MULTI_TLB
72 72  
  73 +#ifdef CONFIG_SMP_ON_UP
  74 +#define MULTI_TLB 1
  75 +#endif
  76 +
73 77 #define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE)
74 78  
75 79 #ifdef CONFIG_CPU_TLB_V3
76 80  
77 81  
78 82  
... ... @@ -185,17 +189,23 @@
185 189 # define v6wbi_always_flags (-1UL)
186 190 #endif
187 191  
188   -#ifdef CONFIG_SMP
189   -#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
  192 +#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \
190 193 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
191   -#else
192   -#define v7wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \
  194 +#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BTB | \
193 195 TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
194   -#endif
195 196  
196 197 #ifdef CONFIG_CPU_TLB_V7
197   -# define v7wbi_possible_flags v7wbi_tlb_flags
198   -# define v7wbi_always_flags v7wbi_tlb_flags
  198 +
  199 +# ifdef CONFIG_SMP_ON_UP
  200 +# define v7wbi_possible_flags (v7wbi_tlb_flags_smp | v7wbi_tlb_flags_up)
  201 +# define v7wbi_always_flags (v7wbi_tlb_flags_smp & v7wbi_tlb_flags_up)
  202 +# elif defined(CONFIG_SMP)
  203 +# define v7wbi_possible_flags v7wbi_tlb_flags_smp
  204 +# define v7wbi_always_flags v7wbi_tlb_flags_smp
  205 +# else
  206 +# define v7wbi_possible_flags v7wbi_tlb_flags_up
  207 +# define v7wbi_always_flags v7wbi_tlb_flags_up
  208 +# endif
199 209 # ifdef _TLB
200 210 # define MULTI_TLB 1
201 211 # else
arch/arm/kernel/entry-armv.S
... ... @@ -46,7 +46,8 @@
46 46 * this macro assumes that irqstat (r6) and base (r5) are
47 47 * preserved from get_irqnr_and_base above
48 48 */
49   - test_for_ipi r0, r6, r5, lr
  49 + ALT_SMP(test_for_ipi r0, r6, r5, lr)
  50 + ALT_UP_B(9997f)
50 51 movne r0, sp
51 52 adrne lr, BSYM(1b)
52 53 bne do_IPI
... ... @@ -57,6 +58,7 @@
57 58 adrne lr, BSYM(1b)
58 59 bne do_local_timer
59 60 #endif
  61 +9997:
60 62 #endif
61 63  
62 64 .endm
... ... @@ -965,11 +967,8 @@
965 967 beq 1b
966 968 rsbs r0, r3, #0
967 969 /* beware -- each __kuser slot must be 8 instructions max */
968   -#ifdef CONFIG_SMP
969   - b __kuser_memory_barrier
970   -#else
971   - usr_ret lr
972   -#endif
  970 + ALT_SMP(b __kuser_memory_barrier)
  971 + ALT_UP(usr_ret lr)
973 972  
974 973 #endif
975 974  
arch/arm/kernel/head.S
... ... @@ -86,6 +86,9 @@
86 86 movs r8, r5 @ invalid machine (r5=0)?
87 87 beq __error_a @ yes, error 'a'
88 88 bl __vet_atags
  89 +#ifdef CONFIG_SMP_ON_UP
  90 + bl __fixup_smp
  91 +#endif
89 92 bl __create_page_tables
90 93  
91 94 /*
... ... @@ -332,6 +335,53 @@
332 335 mov pc, lr
333 336 ENDPROC(__create_page_tables)
334 337 .ltorg
  338 +
  339 +#ifdef CONFIG_SMP_ON_UP
  340 +__fixup_smp:
  341 + mov r7, #0x00070000
  342 + orr r6, r7, #0xff000000 @ mask 0xff070000
  343 + orr r7, r7, #0x41000000 @ val 0x41070000
  344 + and r0, r9, r6
  345 + teq r0, r7 @ ARM CPU and ARMv6/v7?
  346 + bne __fixup_smp_on_up @ no, assume UP
  347 +
  348 + orr r6, r6, #0x0000ff00
  349 + orr r6, r6, #0x000000f0 @ mask 0xff07fff0
  350 + orr r7, r7, #0x0000b000
  351 + orr r7, r7, #0x00000020 @ val 0x4107b020
  352 + and r0, r9, r6
  353 + teq r0, r7 @ ARM 11MPCore?
  354 + moveq pc, lr @ yes, assume SMP
  355 +
  356 + mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
  357 + tst r0, #1 << 31
  358 + movne pc, lr @ bit 31 => SMP
  359 +
  360 +__fixup_smp_on_up:
  361 + adr r0, 1f
  362 + ldmia r0, {r3, r6, r7}
  363 + sub r3, r0, r3
  364 + add r6, r6, r3
  365 + add r7, r7, r3
  366 +2: cmp r6, r7
  367 + ldmia r6!, {r0, r4}
  368 + strlo r4, [r0, r3]
  369 + blo 2b
  370 + mov pc, lr
  371 +ENDPROC(__fixup_smp)
  372 +
  373 +1: .word .
  374 + .word __smpalt_begin
  375 + .word __smpalt_end
  376 +
  377 + .pushsection .data
  378 + .globl smp_on_up
  379 +smp_on_up:
  380 + ALT_SMP(.long 1)
  381 + ALT_UP(.long 0)
  382 + .popsection
  383 +
  384 +#endif
335 385  
336 386 #include "head-common.S"
arch/arm/kernel/setup.c
... ... @@ -36,6 +36,7 @@
36 36 #include <asm/procinfo.h>
37 37 #include <asm/sections.h>
38 38 #include <asm/setup.h>
  39 +#include <asm/smp_plat.h>
39 40 #include <asm/mach-types.h>
40 41 #include <asm/cacheflush.h>
41 42 #include <asm/cachetype.h>
... ... @@ -825,7 +826,8 @@
825 826 request_standard_resources(&meminfo, mdesc);
826 827  
827 828 #ifdef CONFIG_SMP
828   - smp_init_cpus();
  829 + if (is_smp())
  830 + smp_init_cpus();
829 831 #endif
830 832 reserve_crashkernel();
831 833  
arch/arm/kernel/vmlinux.lds.S
... ... @@ -40,6 +40,11 @@
40 40 __tagtable_begin = .;
41 41 *(.taglist.init)
42 42 __tagtable_end = .;
  43 +#ifdef CONFIG_SMP_ON_UP
  44 + __smpalt_begin = .;
  45 + *(.alt.smp.init)
  46 + __smpalt_end = .;
  47 +#endif
43 48  
44 49 INIT_SETUP(16)
45 50  
... ... @@ -237,6 +242,12 @@
237 242  
238 243 /* Default discards */
239 244 DISCARDS
  245 +
  246 +#ifndef CONFIG_SMP_ON_UP
  247 + /DISCARD/ : {
  248 + *(.alt.smp.init)
  249 + }
  250 +#endif
240 251 }
241 252  
242 253 /*
arch/arm/mm/cache-v7.S
... ... @@ -91,11 +91,8 @@
91 91 THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
92 92 bl v7_flush_dcache_all
93 93 mov r0, #0
94   -#ifdef CONFIG_SMP
95   - mcr p15, 0, r0, c7, c1, 0 @ invalidate I-cache inner shareable
96   -#else
97   - mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
98   -#endif
  94 + ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
  95 + ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
99 96 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
100 97 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
101 98 mov pc, lr
... ... @@ -171,11 +168,8 @@
171 168 cmp r0, r1
172 169 blo 1b
173 170 mov r0, #0
174   -#ifdef CONFIG_SMP
175   - mcr p15, 0, r0, c7, c1, 6 @ invalidate BTB Inner Shareable
176   -#else
177   - mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
178   -#endif
  171 + ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable
  172 + ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB
179 173 dsb
180 174 isb
181 175 mov pc, lr
... ... @@ -310,9 +310,8 @@
310 310 cachepolicy = CPOLICY_WRITEBACK;
311 311 ecc_mask = 0;
312 312 }
313   -#ifdef CONFIG_SMP
314   - cachepolicy = CPOLICY_WRITEALLOC;
315   -#endif
  313 + if (is_smp())
  314 + cachepolicy = CPOLICY_WRITEALLOC;
316 315  
317 316 /*
318 317 * Strip out features not present on earlier architectures.
319 318  
320 319  
... ... @@ -406,13 +405,11 @@
406 405 cp = &cache_policies[cachepolicy];
407 406 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
408 407  
409   -#ifndef CONFIG_SMP
410 408 /*
411 409 * Only use write-through for non-SMP systems
412 410 */
413   - if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
  411 + if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
414 412 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
415   -#endif
416 413  
417 414 /*
418 415 * Enable CPU-specific coherency if supported.
... ... @@ -436,22 +433,23 @@
436 433 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
437 434 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
438 435  
439   -#ifdef CONFIG_SMP
440   - /*
441   - * Mark memory with the "shared" attribute for SMP systems
442   - */
443   - user_pgprot |= L_PTE_SHARED;
444   - kern_pgprot |= L_PTE_SHARED;
445   - vecs_pgprot |= L_PTE_SHARED;
446   - mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
447   - mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
448   - mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
449   - mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
450   - mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
451   - mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
452   - mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
453   - mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
454   -#endif
  436 + if (is_smp()) {
  437 + /*
  438 + * Mark memory with the "shared" attribute
  439 + * for SMP systems
  440 + */
  441 + user_pgprot |= L_PTE_SHARED;
  442 + kern_pgprot |= L_PTE_SHARED;
  443 + vecs_pgprot |= L_PTE_SHARED;
  444 + mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
  445 + mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
  446 + mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
  447 + mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
  448 + mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
  449 + mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
  450 + mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
  451 + mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
  452 + }
455 453 }
456 454  
457 455 /*
... ... @@ -829,8 +827,7 @@
829 827 * rather difficult.
830 828 */
831 829 reason = "with VIPT aliasing cache";
832   -#ifdef CONFIG_SMP
833   - } else if (tlb_ops_need_broadcast()) {
  830 + } else if (is_smp() && tlb_ops_need_broadcast()) {
834 831 /*
835 832 * kmap_high needs to occasionally flush TLB entries,
836 833 * however, if the TLB entries need to be broadcast
... ... @@ -840,7 +837,6 @@
840 837 * (must not be called with irqs off)
841 838 */
842 839 reason = "without hardware TLB ops broadcasting";
843   -#endif
844 840 }
845 841 if (reason) {
846 842 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
arch/arm/mm/proc-v6.S
... ... @@ -30,13 +30,10 @@
30 30 #define TTB_RGN_WT (2 << 3)
31 31 #define TTB_RGN_WB (3 << 3)
32 32  
33   -#ifndef CONFIG_SMP
34   -#define TTB_FLAGS TTB_RGN_WBWA
35   -#define PMD_FLAGS PMD_SECT_WB
36   -#else
37   -#define TTB_FLAGS TTB_RGN_WBWA|TTB_S
38   -#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S
39   -#endif
  33 +#define TTB_FLAGS_UP TTB_RGN_WBWA
  34 +#define PMD_FLAGS_UP PMD_SECT_WB
  35 +#define TTB_FLAGS_SMP TTB_RGN_WBWA|TTB_S
  36 +#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
40 37  
41 38 ENTRY(cpu_v6_proc_init)
42 39 mov pc, lr
... ... @@ -97,7 +94,8 @@
97 94 #ifdef CONFIG_MMU
98 95 mov r2, #0
99 96 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
100   - orr r0, r0, #TTB_FLAGS
  97 + ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
  98 + ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
101 99 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
102 100 mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
103 101 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
104 102  
... ... @@ -156,9 +154,11 @@
156 154 */
157 155 __v6_setup:
158 156 #ifdef CONFIG_SMP
159   - mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode
  157 + ALT_SMP(mrc p15, 0, r0, c1, c0, 1) @ Enable SMP/nAMP mode
  158 + ALT_UP(nop)
160 159 orr r0, r0, #0x20
161   - mcr p15, 0, r0, c1, c0, 1
  160 + ALT_SMP(mcr p15, 0, r0, c1, c0, 1)
  161 + ALT_UP(nop)
162 162 #endif
163 163  
164 164 mov r0, #0
... ... @@ -169,7 +169,8 @@
169 169 #ifdef CONFIG_MMU
170 170 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
171 171 mcr p15, 0, r0, c2, c0, 2 @ TTB control register
172   - orr r4, r4, #TTB_FLAGS
  172 + ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
  173 + ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
173 174 mcr p15, 0, r4, c2, c0, 1 @ load TTB1
174 175 #endif /* CONFIG_MMU */
175 176 adr r5, v6_crval
176 177  
... ... @@ -225,10 +226,16 @@
225 226 __v6_proc_info:
226 227 .long 0x0007b000
227 228 .long 0x0007f000
228   - .long PMD_TYPE_SECT | \
  229 + ALT_SMP(.long \
  230 + PMD_TYPE_SECT | \
229 231 PMD_SECT_AP_WRITE | \
230 232 PMD_SECT_AP_READ | \
231   - PMD_FLAGS
  233 + PMD_FLAGS_SMP)
  234 + ALT_UP(.long \
  235 + PMD_TYPE_SECT | \
  236 + PMD_SECT_AP_WRITE | \
  237 + PMD_SECT_AP_READ | \
  238 + PMD_FLAGS_UP)
232 239 .long PMD_TYPE_SECT | \
233 240 PMD_SECT_XN | \
234 241 PMD_SECT_AP_WRITE | \
235 242  
... ... @@ -249,10 +256,16 @@
249 256 __pj4_v6_proc_info:
250 257 .long 0x560f5810
251 258 .long 0xff0ffff0
252   - .long PMD_TYPE_SECT | \
  259 + ALT_SMP(.long \
  260 + PMD_TYPE_SECT | \
253 261 PMD_SECT_AP_WRITE | \
254 262 PMD_SECT_AP_READ | \
255   - PMD_FLAGS
  263 + PMD_FLAGS_SMP)
  264 + ALT_UP(.long \
  265 + PMD_TYPE_SECT | \
  266 + PMD_SECT_AP_WRITE | \
  267 + PMD_SECT_AP_READ | \
  268 + PMD_FLAGS_UP)
256 269 .long PMD_TYPE_SECT | \
257 270 PMD_SECT_XN | \
258 271 PMD_SECT_AP_WRITE | \
arch/arm/mm/proc-v7.S
... ... @@ -30,15 +30,13 @@
30 30 #define TTB_IRGN_WT ((1 << 0) | (0 << 6))
31 31 #define TTB_IRGN_WB ((1 << 0) | (1 << 6))
32 32  
33   -#ifndef CONFIG_SMP
34 33 /* PTWs cacheable, inner WB not shareable, outer WB not shareable */
35   -#define TTB_FLAGS TTB_IRGN_WB|TTB_RGN_OC_WB
36   -#define PMD_FLAGS PMD_SECT_WB
37   -#else
  34 +#define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB
  35 +#define PMD_FLAGS_UP PMD_SECT_WB
  36 +
38 37 /* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
39   -#define TTB_FLAGS TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
40   -#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S
41   -#endif
  38 +#define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
  39 +#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
42 40  
43 41 ENTRY(cpu_v7_proc_init)
44 42 mov pc, lr
... ... @@ -105,7 +103,8 @@
105 103 #ifdef CONFIG_MMU
106 104 mov r2, #0
107 105 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
108   - orr r0, r0, #TTB_FLAGS
  106 + ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
  107 + ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
109 108 #ifdef CONFIG_ARM_ERRATA_430973
110 109 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
111 110 #endif
... ... @@ -188,7 +187,8 @@
188 187 */
189 188 __v7_ca9mp_setup:
190 189 #ifdef CONFIG_SMP
191   - mrc p15, 0, r0, c1, c0, 1
  190 + ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
  191 + ALT_UP(mov r0, #(1 << 6)) @ fake it for UP
192 192 tst r0, #(1 << 6) @ SMP/nAMP mode enabled?
193 193 orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and
194 194 mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting
... ... @@ -262,7 +262,8 @@
262 262 #ifdef CONFIG_MMU
263 263 mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
264 264 mcr p15, 0, r10, c2, c0, 2 @ TTB control register
265   - orr r4, r4, #TTB_FLAGS
  265 + ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
  266 + ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
266 267 mcr p15, 0, r4, c2, c0, 1 @ load TTB1
267 268 mov r10, #0x1f @ domains 0, 1 = manager
268 269 mcr p15, 0, r10, c3, c0, 0 @ load domain access register
269 270  
... ... @@ -354,10 +355,16 @@
354 355 __v7_ca9mp_proc_info:
355 356 .long 0x410fc090 @ Required ID value
356 357 .long 0xff0ffff0 @ Mask for ID
357   - .long PMD_TYPE_SECT | \
  358 + ALT_SMP(.long \
  359 + PMD_TYPE_SECT | \
358 360 PMD_SECT_AP_WRITE | \
359 361 PMD_SECT_AP_READ | \
360   - PMD_FLAGS
  362 + PMD_FLAGS_SMP)
  363 + ALT_UP(.long \
  364 + PMD_TYPE_SECT | \
  365 + PMD_SECT_AP_WRITE | \
  366 + PMD_SECT_AP_READ | \
  367 + PMD_FLAGS_UP)
361 368 .long PMD_TYPE_SECT | \
362 369 PMD_SECT_XN | \
363 370 PMD_SECT_AP_WRITE | \
364 371  
... ... @@ -380,10 +387,16 @@
380 387 __v7_proc_info:
381 388 .long 0x000f0000 @ Required ID value
382 389 .long 0x000f0000 @ Mask for ID
383   - .long PMD_TYPE_SECT | \
  390 + ALT_SMP(.long \
  391 + PMD_TYPE_SECT | \
384 392 PMD_SECT_AP_WRITE | \
385 393 PMD_SECT_AP_READ | \
386   - PMD_FLAGS
  394 + PMD_FLAGS_SMP)
  395 + ALT_UP(.long \
  396 + PMD_TYPE_SECT | \
  397 + PMD_SECT_AP_WRITE | \
  398 + PMD_SECT_AP_READ | \
  399 + PMD_FLAGS_UP)
387 400 .long PMD_TYPE_SECT | \
388 401 PMD_SECT_XN | \
389 402 PMD_SECT_AP_WRITE | \
arch/arm/mm/tlb-v7.S
... ... @@ -13,6 +13,7 @@
13 13 */
14 14 #include <linux/init.h>
15 15 #include <linux/linkage.h>
  16 +#include <asm/assembler.h>
16 17 #include <asm/asm-offsets.h>
17 18 #include <asm/page.h>
18 19 #include <asm/tlbflush.h>
19 20  
... ... @@ -41,20 +42,15 @@
41 42 orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
42 43 mov r1, r1, lsl #PAGE_SHIFT
43 44 1:
44   -#ifdef CONFIG_SMP
45   - mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable)
46   -#else
47   - mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA
48   -#endif
  45 + ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
  46 + ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
  47 +
49 48 add r0, r0, #PAGE_SZ
50 49 cmp r0, r1
51 50 blo 1b
52 51 mov ip, #0
53   -#ifdef CONFIG_SMP
54   - mcr p15, 0, ip, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
55   -#else
56   - mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
57   -#endif
  52 + ALT_SMP(mcr p15, 0, ip, c7, c1, 6) @ flush BTAC/BTB Inner Shareable
  53 + ALT_UP(mcr p15, 0, ip, c7, c5, 6) @ flush BTAC/BTB
58 54 dsb
59 55 mov pc, lr
60 56 ENDPROC(v7wbi_flush_user_tlb_range)
61 57  
... ... @@ -74,20 +70,14 @@
74 70 mov r0, r0, lsl #PAGE_SHIFT
75 71 mov r1, r1, lsl #PAGE_SHIFT
76 72 1:
77   -#ifdef CONFIG_SMP
78   - mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable)
79   -#else
80   - mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA
81   -#endif
  73 + ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
  74 + ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
82 75 add r0, r0, #PAGE_SZ
83 76 cmp r0, r1
84 77 blo 1b
85 78 mov r2, #0
86   -#ifdef CONFIG_SMP
87   - mcr p15, 0, r2, c7, c1, 6 @ flush BTAC/BTB Inner Shareable
88   -#else
89   - mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
90   -#endif
  79 + ALT_SMP(mcr p15, 0, r2, c7, c1, 6) @ flush BTAC/BTB Inner Shareable
  80 + ALT_UP(mcr p15, 0, r2, c7, c5, 6) @ flush BTAC/BTB
91 81 dsb
92 82 isb
93 83 mov pc, lr
... ... @@ -99,6 +89,7 @@
99 89 ENTRY(v7wbi_tlb_fns)
100 90 .long v7wbi_flush_user_tlb_range
101 91 .long v7wbi_flush_kern_tlb_range
102   - .long v7wbi_tlb_flags
  92 + ALT_SMP(.long v7wbi_tlb_flags_smp)
  93 + ALT_UP(.long v7wbi_tlb_flags_up)
103 94 .size v7wbi_tlb_fns, . - v7wbi_tlb_fns