Commit 483e9bcb01432ce66448c214bd0afc231da48b4b
1 parent
d091fcb97f
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
ARC: MMUv4 preps/3 - Abstract out TLB Insert/Delete
This reorganizes the current TLB operations into psuedo-ops to better pair with MMUv4's native Insert/Delete operations Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Showing 2 changed files with 56 additions and 40 deletions Side-by-side Diff
arch/arc/include/asm/mmu.h
arch/arc/mm/tlb.c
... | ... | @@ -52,6 +52,7 @@ |
52 | 52 | */ |
53 | 53 | |
54 | 54 | #include <linux/module.h> |
55 | +#include <linux/bug.h> | |
55 | 56 | #include <asm/arcregs.h> |
56 | 57 | #include <asm/setup.h> |
57 | 58 | #include <asm/mmu_context.h> |
58 | 59 | |
59 | 60 | |
60 | 61 | |
61 | 62 | |
62 | 63 | |
63 | 64 | |
64 | 65 | |
... | ... | @@ -109,38 +110,41 @@ |
109 | 110 | |
110 | 111 | /* |
111 | 112 | * Utility Routine to erase a J-TLB entry |
112 | - * The procedure is to look it up in the MMU. If found, ERASE it by | |
113 | - * issuing a TlbWrite CMD with PD0 = PD1 = 0 | |
113 | + * Caller needs to setup Index Reg (manually or via getIndex) | |
114 | 114 | */ |
115 | - | |
116 | -static void __tlb_entry_erase(void) | |
115 | +static inline void __tlb_entry_erase(void) | |
117 | 116 | { |
118 | 117 | write_aux_reg(ARC_REG_TLBPD1, 0); |
119 | 118 | write_aux_reg(ARC_REG_TLBPD0, 0); |
120 | 119 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); |
121 | 120 | } |
122 | 121 | |
123 | -static void tlb_entry_erase(unsigned int vaddr_n_asid) | |
122 | +static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid) | |
124 | 123 | { |
125 | 124 | unsigned int idx; |
126 | 125 | |
127 | - /* Locate the TLB entry for this vaddr + ASID */ | |
128 | 126 | write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid); |
127 | + | |
129 | 128 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe); |
130 | 129 | idx = read_aux_reg(ARC_REG_TLBINDEX); |
131 | 130 | |
131 | + return idx; | |
132 | +} | |
133 | + | |
134 | +static void tlb_entry_erase(unsigned int vaddr_n_asid) | |
135 | +{ | |
136 | + unsigned int idx; | |
137 | + | |
138 | + /* Locate the TLB entry for this vaddr + ASID */ | |
139 | + idx = tlb_entry_lkup(vaddr_n_asid); | |
140 | + | |
132 | 141 | /* No error means entry found, zero it out */ |
133 | 142 | if (likely(!(idx & TLB_LKUP_ERR))) { |
134 | 143 | __tlb_entry_erase(); |
135 | - } else { /* Some sort of Error */ | |
136 | - | |
144 | + } else { | |
137 | 145 | /* Duplicate entry error */ |
138 | - if (idx & 0x1) { | |
139 | - /* TODO we need to handle this case too */ | |
140 | - pr_emerg("unhandled Duplicate flush for %x\n", | |
141 | - vaddr_n_asid); | |
142 | - } | |
143 | - /* else entry not found so nothing to do */ | |
146 | + WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n", | |
147 | + vaddr_n_asid); | |
144 | 148 | } |
145 | 149 | } |
146 | 150 | |
... | ... | @@ -159,7 +163,7 @@ |
159 | 163 | { |
160 | 164 | #if (CONFIG_ARC_MMU_VER >= 2) |
161 | 165 | |
162 | -#if (CONFIG_ARC_MMU_VER < 3) | |
166 | +#if (CONFIG_ARC_MMU_VER == 2) | |
163 | 167 | /* MMU v2 introduced the uTLB Flush command. |
164 | 168 | * There was however an obscure hardware bug, where uTLB flush would |
165 | 169 | * fail when a prior probe for J-TLB (both totally unrelated) would |
... | ... | @@ -182,6 +186,36 @@ |
182 | 186 | |
183 | 187 | } |
184 | 188 | |
189 | +static void tlb_entry_insert(unsigned int pd0, unsigned int pd1) | |
190 | +{ | |
191 | + unsigned int idx; | |
192 | + | |
193 | + /* | |
194 | + * First verify if entry for this vaddr+ASID already exists | |
195 | + * This also sets up PD0 (vaddr, ASID..) for final commit | |
196 | + */ | |
197 | + idx = tlb_entry_lkup(pd0); | |
198 | + | |
199 | + /* | |
200 | + * If Not already present get a free slot from MMU. | |
201 | + * Otherwise, Probe would have located the entry and set INDEX Reg | |
202 | + * with existing location. This will cause Write CMD to over-write | |
203 | + * existing entry with new PD0 and PD1 | |
204 | + */ | |
205 | + if (likely(idx & TLB_LKUP_ERR)) | |
206 | + write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex); | |
207 | + | |
208 | + /* setup the other half of TLB entry (pfn, rwx..) */ | |
209 | + write_aux_reg(ARC_REG_TLBPD1, pd1); | |
210 | + | |
211 | + /* | |
212 | + * Commit the Entry to MMU | |
213 | + * It doesnt sound safe to use the TLBWriteNI cmd here | |
214 | + * which doesn't flush uTLBs. I'd rather be safe than sorry. | |
215 | + */ | |
216 | + write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | |
217 | +} | |
218 | + | |
185 | 219 | /* |
186 | 220 | * Un-conditionally (without lookup) erase the entire MMU contents |
187 | 221 | */ |
... | ... | @@ -341,7 +375,8 @@ |
341 | 375 | void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) |
342 | 376 | { |
343 | 377 | unsigned long flags; |
344 | - unsigned int idx, asid_or_sasid, rwx; | |
378 | + unsigned int asid_or_sasid, rwx; | |
379 | + unsigned long pd0, pd1; | |
345 | 380 | |
346 | 381 | /* |
347 | 382 | * create_tlb() assumes that current->mm == vma->mm, since |
... | ... | @@ -385,8 +420,7 @@ |
385 | 420 | /* ASID for this task */ |
386 | 421 | asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff; |
387 | 422 | |
388 | - write_aux_reg(ARC_REG_TLBPD0, address | asid_or_sasid | | |
389 | - (pte_val(*ptep) & PTE_BITS_IN_PD0)); | |
423 | + pd0 = address | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0); | |
390 | 424 | |
391 | 425 | /* |
392 | 426 | * ARC MMU provides fully orthogonal access bits for K/U mode, |
393 | 427 | |
... | ... | @@ -402,29 +436,9 @@ |
402 | 436 | else |
403 | 437 | rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */ |
404 | 438 | |
405 | - /* Load remaining info in PD1 (Page Frame Addr and Kx/Kw/Kr Flags) */ | |
406 | - write_aux_reg(ARC_REG_TLBPD1, | |
407 | - rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1)); | |
439 | + pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1); | |
408 | 440 | |
409 | - /* First verify if entry for this vaddr+ASID already exists */ | |
410 | - write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe); | |
411 | - idx = read_aux_reg(ARC_REG_TLBINDEX); | |
412 | - | |
413 | - /* | |
414 | - * If Not already present get a free slot from MMU. | |
415 | - * Otherwise, Probe would have located the entry and set INDEX Reg | |
416 | - * with existing location. This will cause Write CMD to over-write | |
417 | - * existing entry with new PD0 and PD1 | |
418 | - */ | |
419 | - if (likely(idx & TLB_LKUP_ERR)) | |
420 | - write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex); | |
421 | - | |
422 | - /* | |
423 | - * Commit the Entry to MMU | |
424 | - * It doesnt sound safe to use the TLBWriteNI cmd here | |
425 | - * which doesn't flush uTLBs. I'd rather be safe than sorry. | |
426 | - */ | |
427 | - write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | |
441 | + tlb_entry_insert(pd0, pd1); | |
428 | 442 | |
429 | 443 | local_irq_restore(flags); |
430 | 444 | } |