Commit d532f3d26716a39dfd4b88d687bd344fbe77e390

Authored by Steven J. Hill
Committed by Ralf Baechle
1 parent 49bffbdc88

MIPS: Allow ASID size to be determined at boot time.

Original patch by Ralf Baechle and removed by Harold Koerfgen
with commit f67e4ffc79905482c3b9b8c8dd65197bac7eb508. This
allows for more generic kernels since the size of the ASID
and corresponding masks can be determined at run-time. This
patch is also required for the new Aptiv cores and has been
tested on Malta and Malta Aptiv platforms.

[ralf@linux-mips.org: Added relevant part of fix
https://patchwork.linux-mips.org/patch/5213/]

Signed-off-by: Steven J. Hill <Steven.Hill@imgtec.com>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

Showing 10 changed files with 137 additions and 61 deletions Side-by-side Diff

arch/mips/include/asm/mmu_context.h
... ... @@ -62,45 +62,68 @@
62 62 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
63 63 #endif
64 64 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
65   -#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
66 65  
67   -#define ASID_INC 0x40
68   -#define ASID_MASK 0xfc0
  66 +#define ASID_INC(asid) \
  67 +({ \
  68 + unsigned long __asid = asid; \
  69 + __asm__("1:\taddiu\t%0,1\t\t\t\t# patched\n\t" \
  70 + ".section\t__asid_inc,\"a\"\n\t" \
  71 + ".word\t1b\n\t" \
  72 + ".previous" \
  73 + :"=r" (__asid) \
  74 + :"0" (__asid)); \
  75 + __asid; \
  76 +})
  77 +#define ASID_MASK(asid) \
  78 +({ \
  79 + unsigned long __asid = asid; \
  80 + __asm__("1:\tandi\t%0,%1,0xfc0\t\t\t# patched\n\t" \
  81 + ".section\t__asid_mask,\"a\"\n\t" \
  82 + ".word\t1b\n\t" \
  83 + ".previous" \
  84 + :"=r" (__asid) \
  85 + :"r" (__asid)); \
  86 + __asid; \
  87 +})
  88 +#define ASID_VERSION_MASK \
  89 +({ \
  90 + unsigned long __asid; \
  91 + __asm__("1:\taddiu\t%0,$0,0xff00\t\t\t\t# patched\n\t" \
  92 + ".section\t__asid_version_mask,\"a\"\n\t" \
  93 + ".word\t1b\n\t" \
  94 + ".previous" \
  95 + :"=r" (__asid)); \
  96 + __asid; \
  97 +})
  98 +#define ASID_FIRST_VERSION \
  99 +({ \
  100 + unsigned long __asid = asid; \
  101 + __asm__("1:\tli\t%0,0x100\t\t\t\t# patched\n\t" \
  102 + ".section\t__asid_first_version,\"a\"\n\t" \
  103 + ".word\t1b\n\t" \
  104 + ".previous" \
  105 + :"=r" (__asid)); \
  106 + __asid; \
  107 +})
69 108  
70   -#elif defined(CONFIG_CPU_R8000)
  109 +#define ASID_FIRST_VERSION_R3000 0x1000
  110 +#define ASID_FIRST_VERSION_R4000 0x100
  111 +#define ASID_FIRST_VERSION_R8000 0x1000
  112 +#define ASID_FIRST_VERSION_RM9000 0x1000
71 113  
72   -#define ASID_INC 0x10
73   -#define ASID_MASK 0xff0
74   -
75   -#elif defined(CONFIG_MIPS_MT_SMTC)
76   -
77   -#define ASID_INC 0x1
78   -extern unsigned long smtc_asid_mask;
79   -#define ASID_MASK (smtc_asid_mask)
80   -#define HW_ASID_MASK 0xff
81   -/* End SMTC/34K debug hack */
82   -#else /* FIXME: not correct for R6000 */
83   -
84   -#define ASID_INC 0x1
85   -#define ASID_MASK 0xff
86   -
  114 +#ifdef CONFIG_MIPS_MT_SMTC
  115 +#define SMTC_HW_ASID_MASK 0xff
  116 +extern unsigned int smtc_asid_mask;
87 117 #endif
88 118  
89 119 #define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
90   -#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
  120 +#define cpu_asid(cpu, mm) ASID_MASK(cpu_context((cpu), (mm)))
91 121 #define asid_cache(cpu) (cpu_data[cpu].asid_cache)
92 122  
93 123 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
94 124 {
95 125 }
96 126  
97   -/*
98   - * All unused by hardware upper bits will be considered
99   - * as a software asid extension.
100   - */
101   -#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
102   -#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
103   -
104 127 #ifndef CONFIG_MIPS_MT_SMTC
105 128 /* Normal, classic MIPS get_new_mmu_context */
106 129 static inline void
... ... @@ -108,7 +131,7 @@
108 131 {
109 132 unsigned long asid = asid_cache(cpu);
110 133  
111   - if (! ((asid += ASID_INC) & ASID_MASK) ) {
  134 + if (!ASID_MASK((asid = ASID_INC(asid)))) {
112 135 if (cpu_has_vtag_icache)
113 136 flush_icache_all();
114 137 local_flush_tlb_all(); /* start new asid cycle */
... ... @@ -166,7 +189,7 @@
166 189 * free up the ASID value for use and flush any old
167 190 * instances of it from the TLB.
168 191 */
169   - oldasid = (read_c0_entryhi() & ASID_MASK);
  192 + oldasid = ASID_MASK(read_c0_entryhi());
170 193 if(smtc_live_asid[mytlb][oldasid]) {
171 194 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
172 195 if(smtc_live_asid[mytlb][oldasid] == 0)
... ... @@ -177,7 +200,7 @@
177 200 * having ASID_MASK smaller than the hardware maximum,
178 201 * make sure no "soft" bits become "hard"...
179 202 */
180   - write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
  203 + write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
181 204 cpu_asid(cpu, next));
182 205 ehb(); /* Make sure it propagates to TCStatus */
183 206 evpe(mtflags);
184 207  
... ... @@ -230,15 +253,15 @@
230 253 #ifdef CONFIG_MIPS_MT_SMTC
231 254 /* See comments for similar code above */
232 255 mtflags = dvpe();
233   - oldasid = read_c0_entryhi() & ASID_MASK;
  256 + oldasid = ASID_MASK(read_c0_entryhi());
234 257 if(smtc_live_asid[mytlb][oldasid]) {
235 258 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
236 259 if(smtc_live_asid[mytlb][oldasid] == 0)
237 260 smtc_flush_tlb_asid(oldasid);
238 261 }
239 262 /* See comments for similar code above */
240   - write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
241   - cpu_asid(cpu, next));
  263 + write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) |
  264 + cpu_asid(cpu, next));
242 265 ehb(); /* Make sure it propagates to TCStatus */
243 266 evpe(mtflags);
244 267 #else
245 268  
... ... @@ -275,14 +298,14 @@
275 298 #ifdef CONFIG_MIPS_MT_SMTC
276 299 /* See comments for similar code above */
277 300 prevvpe = dvpe();
278   - oldasid = (read_c0_entryhi() & ASID_MASK);
  301 + oldasid = ASID_MASK(read_c0_entryhi());
279 302 if (smtc_live_asid[mytlb][oldasid]) {
280 303 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
281 304 if(smtc_live_asid[mytlb][oldasid] == 0)
282 305 smtc_flush_tlb_asid(oldasid);
283 306 }
284 307 /* See comments for similar code above */
285   - write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
  308 + write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK)
286 309 | cpu_asid(cpu, mm));
287 310 ehb(); /* Make sure it propagates to TCStatus */
288 311 evpe(prevvpe);
arch/mips/kernel/genex.S
... ... @@ -480,7 +480,7 @@
480 480 .set noreorder
481 481 /* check if TLB contains a entry for EPC */
482 482 MFC0 k1, CP0_ENTRYHI
483   - andi k1, 0xff /* ASID_MASK */
  483 + andi k1, 0xff /* ASID_MASK patched at run-time!! */
484 484 MFC0 k0, CP0_EPC
485 485 PTR_SRL k0, _PAGE_SHIFT + 1
486 486 PTR_SLL k0, _PAGE_SHIFT + 1
arch/mips/kernel/smtc.c
... ... @@ -111,7 +111,7 @@
111 111 static int ipibuffers;
112 112 static int nostlb;
113 113 static int asidmask;
114   -unsigned long smtc_asid_mask = 0xff;
  114 +unsigned int smtc_asid_mask = 0xff;
115 115  
116 116 static int __init vpe0tcs(char *str)
117 117 {
... ... @@ -1395,7 +1395,7 @@
1395 1395 asid = asid_cache(cpu);
1396 1396  
1397 1397 do {
1398   - if (!((asid += ASID_INC) & ASID_MASK) ) {
  1398 + if (!ASID_MASK(ASID_INC(asid))) {
1399 1399 if (cpu_has_vtag_icache)
1400 1400 flush_icache_all();
1401 1401 /* Traverse all online CPUs (hack requires contiguous range) */
... ... @@ -1414,7 +1414,7 @@
1414 1414 mips_ihb();
1415 1415 }
1416 1416 tcstat = read_tc_c0_tcstatus();
1417   - smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
  1417 + smtc_live_asid[tlb][ASID_MASK(tcstat)] |= (asiduse)(0x1 << i);
1418 1418 if (!prevhalt)
1419 1419 write_tc_c0_tchalt(0);
1420 1420 }
... ... @@ -1423,7 +1423,7 @@
1423 1423 asid = ASID_FIRST_VERSION;
1424 1424 local_flush_tlb_all(); /* start new asid cycle */
1425 1425 }
1426   - } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
  1426 + } while (smtc_live_asid[tlb][ASID_MASK(asid)]);
1427 1427  
1428 1428 /*
1429 1429 * SMTC shares the TLB within VPEs and possibly across all VPEs.
... ... @@ -1461,7 +1461,7 @@
1461 1461 tlb_read();
1462 1462 ehb();
1463 1463 ehi = read_c0_entryhi();
1464   - if ((ehi & ASID_MASK) == asid) {
  1464 + if (ASID_MASK(ehi) == asid) {
1465 1465 /*
1466 1466 * Invalidate only entries with specified ASID,
1467 1467 * makiing sure all entries differ.
arch/mips/kernel/traps.c
... ... @@ -1547,6 +1547,7 @@
1547 1547 unsigned int cpu = smp_processor_id();
1548 1548 unsigned int status_set = ST0_CU0;
1549 1549 unsigned int hwrena = cpu_hwrena_impl_bits;
  1550 + unsigned long asid = 0;
1550 1551 #ifdef CONFIG_MIPS_MT_SMTC
1551 1552 int secondaryTC = 0;
1552 1553 int bootTC = (cpu == 0);
... ... @@ -1630,8 +1631,9 @@
1630 1631 }
1631 1632 #endif /* CONFIG_MIPS_MT_SMTC */
1632 1633  
1633   - if (!cpu_data[cpu].asid_cache)
1634   - cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
  1634 + asid = ASID_FIRST_VERSION;
  1635 + cpu_data[cpu].asid_cache = asid;
  1636 + TLBMISS_HANDLER_SETUP();
1635 1637  
1636 1638 atomic_inc(&init_mm.mm_count);
1637 1639 current->active_mm = &init_mm;
arch/mips/lib/dump_tlb.c
... ... @@ -11,6 +11,7 @@
11 11 #include <asm/page.h>
12 12 #include <asm/pgtable.h>
13 13 #include <asm/tlbdebug.h>
  14 +#include <asm/mmu_context.h>
14 15  
15 16 static inline const char *msk2str(unsigned int mask)
16 17 {
... ... @@ -55,7 +56,7 @@
55 56 s_pagemask = read_c0_pagemask();
56 57 s_entryhi = read_c0_entryhi();
57 58 s_index = read_c0_index();
58   - asid = s_entryhi & 0xff;
  59 + asid = ASID_MASK(s_entryhi);
59 60  
60 61 for (i = first; i <= last; i++) {
61 62 write_c0_index(i);
... ... @@ -85,7 +86,7 @@
85 86  
86 87 printk("va=%0*lx asid=%02lx\n",
87 88 width, (entryhi & ~0x1fffUL),
88   - entryhi & 0xff);
  89 + ASID_MASK(entryhi));
89 90 printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ",
90 91 width,
91 92 (entrylo0 << 6) & PAGE_MASK, c0,
arch/mips/lib/r3k_dump_tlb.c
... ... @@ -9,6 +9,7 @@
9 9 #include <linux/mm.h>
10 10  
11 11 #include <asm/mipsregs.h>
  12 +#include <asm/mmu_context.h>
12 13 #include <asm/page.h>
13 14 #include <asm/pgtable.h>
14 15 #include <asm/tlbdebug.h>
... ... @@ -21,7 +22,7 @@
21 22 unsigned int asid;
22 23 unsigned long entryhi, entrylo0;
23 24  
24   - asid = read_c0_entryhi() & 0xfc0;
  25 + asid = ASID_MASK(read_c0_entryhi());
25 26  
26 27 for (i = first; i <= last; i++) {
27 28 write_c0_index(i<<8);
... ... @@ -35,7 +36,7 @@
35 36  
36 37 /* Unused entries have a virtual address of KSEG0. */
37 38 if ((entryhi & 0xffffe000) != 0x80000000
38   - && (entryhi & 0xfc0) == asid) {
  39 + && (ASID_MASK(entryhi) == asid)) {
39 40 /*
40 41 * Only print entries in use
41 42 */
... ... @@ -44,7 +45,7 @@
44 45 printk("va=%08lx asid=%08lx"
45 46 " [pa=%06lx n=%d d=%d v=%d g=%d]",
46 47 (entryhi & 0xffffe000),
47   - entryhi & 0xfc0,
  48 + ASID_MASK(entryhi),
48 49 entrylo0 & PAGE_MASK,
49 50 (entrylo0 & (1 << 11)) ? 1 : 0,
50 51 (entrylo0 & (1 << 10)) ? 1 : 0,
arch/mips/mm/tlb-r3k.c
... ... @@ -51,7 +51,7 @@
51 51 #endif
52 52  
53 53 local_irq_save(flags);
54   - old_ctx = read_c0_entryhi() & ASID_MASK;
  54 + old_ctx = ASID_MASK(read_c0_entryhi());
55 55 write_c0_entrylo0(0);
56 56 entry = r3k_have_wired_reg ? read_c0_wired() : 8;
57 57 for (; entry < current_cpu_data.tlbsize; entry++) {
58 58  
... ... @@ -87,13 +87,13 @@
87 87  
88 88 #ifdef DEBUG_TLB
89 89 printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
90   - cpu_context(cpu, mm) & ASID_MASK, start, end);
  90 + ASID_MASK(cpu_context(cpu, mm)), start, end);
91 91 #endif
92 92 local_irq_save(flags);
93 93 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
94 94 if (size <= current_cpu_data.tlbsize) {
95   - int oldpid = read_c0_entryhi() & ASID_MASK;
96   - int newpid = cpu_context(cpu, mm) & ASID_MASK;
  95 + int oldpid = ASID_MASK(read_c0_entryhi());
  96 + int newpid = ASID_MASK(cpu_context(cpu, mm));
97 97  
98 98 start &= PAGE_MASK;
99 99 end += PAGE_SIZE - 1;
100 100  
... ... @@ -166,10 +166,10 @@
166 166 #ifdef DEBUG_TLB
167 167 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
168 168 #endif
169   - newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK;
  169 + newpid = ASID_MASK(cpu_context(cpu, vma->vm_mm));
170 170 page &= PAGE_MASK;
171 171 local_irq_save(flags);
172   - oldpid = read_c0_entryhi() & ASID_MASK;
  172 + oldpid = ASID_MASK(read_c0_entryhi());
173 173 write_c0_entryhi(page | newpid);
174 174 BARRIER;
175 175 tlb_probe();
176 176  
... ... @@ -197,10 +197,10 @@
197 197 if (current->active_mm != vma->vm_mm)
198 198 return;
199 199  
200   - pid = read_c0_entryhi() & ASID_MASK;
  200 + pid = ASID_MASK(read_c0_entryhi());
201 201  
202 202 #ifdef DEBUG_TLB
203   - if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
  203 + if ((pid != ASID_MASK(cpu_context(cpu, vma->vm_mm))) || (cpu_context(cpu, vma->vm_mm) == 0)) {
204 204 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
205 205 (cpu_context(cpu, vma->vm_mm)), pid);
206 206 }
... ... @@ -241,7 +241,7 @@
241 241  
242 242 local_irq_save(flags);
243 243 /* Save old context and create impossible VPN2 value */
244   - old_ctx = read_c0_entryhi() & ASID_MASK;
  244 + old_ctx = ASID_MASK(read_c0_entryhi());
245 245 old_pagemask = read_c0_pagemask();
246 246 w = read_c0_wired();
247 247 write_c0_wired(w + 1);
... ... @@ -264,7 +264,7 @@
264 264 #endif
265 265  
266 266 local_irq_save(flags);
267   - old_ctx = read_c0_entryhi() & ASID_MASK;
  267 + old_ctx = ASID_MASK(read_c0_entryhi());
268 268 write_c0_entrylo0(entrylo0);
269 269 write_c0_entryhi(entryhi);
270 270 write_c0_index(wired);
arch/mips/mm/tlb-r4k.c
... ... @@ -285,7 +285,7 @@
285 285  
286 286 ENTER_CRITICAL(flags);
287 287  
288   - pid = read_c0_entryhi() & ASID_MASK;
  288 + pid = ASID_MASK(read_c0_entryhi());
289 289 address &= (PAGE_MASK << 1);
290 290 write_c0_entryhi(address | pid);
291 291 pgdp = pgd_offset(vma->vm_mm, address);
arch/mips/mm/tlb-r8k.c
... ... @@ -195,7 +195,7 @@
195 195 if (current->active_mm != vma->vm_mm)
196 196 return;
197 197  
198   - pid = read_c0_entryhi() & ASID_MASK;
  198 + pid = ASID_MASK(read_c0_entryhi());
199 199  
200 200 local_irq_save(flags);
201 201 address &= PAGE_MASK;
arch/mips/mm/tlbex.c
... ... @@ -29,6 +29,7 @@
29 29 #include <linux/init.h>
30 30 #include <linux/cache.h>
31 31  
  32 +#include <asm/mmu_context.h>
32 33 #include <asm/cacheflush.h>
33 34 #include <asm/pgtable.h>
34 35 #include <asm/war.h>
... ... @@ -305,6 +306,48 @@
305 306 static int check_for_high_segbits __cpuinitdata;
306 307 #endif
307 308  
  309 +static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop,
  310 + unsigned int i_const)
  311 +{
  312 + unsigned int **p, *ip;
  313 +
  314 + for (p = start; p < stop; p++) {
  315 + ip = *p;
  316 + *ip = (*ip & 0xffff0000) | i_const;
  317 + }
  318 + local_flush_icache_range((unsigned long)*p, (unsigned long)((*p) + 1));
  319 +}
  320 +
  321 +#define asid_insn_fixup(section, const) \
  322 +do { \
  323 + extern unsigned int *__start_ ## section; \
  324 + extern unsigned int *__stop_ ## section; \
  325 + insn_fixup(&__start_ ## section, &__stop_ ## section, const); \
  326 +} while(0)
  327 +
  328 +/*
  329 + * Caller is assumed to flush the caches before the first context switch.
  330 + */
  331 +static void __cpuinit setup_asid(unsigned int inc, unsigned int mask,
  332 + unsigned int version_mask,
  333 + unsigned int first_version)
  334 +{
  335 + extern asmlinkage void handle_ri_rdhwr_vivt(void);
  336 + unsigned long *vivt_exc;
  337 +
  338 + asid_insn_fixup(__asid_inc, inc);
  339 + asid_insn_fixup(__asid_mask, mask);
  340 + asid_insn_fixup(__asid_version_mask, version_mask);
  341 + asid_insn_fixup(__asid_first_version, first_version);
  342 +
  343 + /* Patch up the 'handle_ri_rdhwr_vivt' handler. */
  344 + vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt;
  345 + vivt_exc++;
  346 + *vivt_exc = (*vivt_exc & ~mask) | mask;
  347 +
  348 + current_cpu_data.asid_cache = first_version;
  349 +}
  350 +
308 351 static int check_for_high_segbits __cpuinitdata;
309 352  
310 353 static unsigned int kscratch_used_mask __cpuinitdata;
... ... @@ -2162,6 +2205,7 @@
2162 2205 case CPU_TX3922:
2163 2206 case CPU_TX3927:
2164 2207 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
  2208 + setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000);
2165 2209 build_r3000_tlb_refill_handler();
2166 2210 if (!run_once) {
2167 2211 build_r3000_tlb_load_handler();
... ... @@ -2184,6 +2228,11 @@
2184 2228 break;
2185 2229  
2186 2230 default:
  2231 +#ifndef CONFIG_MIPS_MT_SMTC
  2232 + setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000);
  2233 +#else
  2234 + setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000);
  2235 +#endif
2187 2236 if (!run_once) {
2188 2237 scratch_reg = allocate_kscratch();
2189 2238 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT