Blame view
arch/powerpc/kvm/44x_tlb.c
13.9 KB
bbf45ba57 KVM: ppc: PowerPC... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 |
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright IBM Corp. 2007 * * Authors: Hollis Blanchard <hollisb@us.ibm.com> */ #include <linux/types.h> #include <linux/string.h> |
31711f229 KVM: ppc: adds tr... |
22 |
#include <linux/kvm.h> |
bbf45ba57 KVM: ppc: PowerPC... |
23 24 |
#include <linux/kvm_host.h> #include <linux/highmem.h> |
7924bd410 KVM: ppc: directl... |
25 26 |
#include <asm/tlbflush.h> |
bbf45ba57 KVM: ppc: PowerPC... |
27 28 |
#include <asm/mmu-44x.h> #include <asm/kvm_ppc.h> |
db93f5745 KVM: ppc: create ... |
29 |
#include <asm/kvm_44x.h> |
73e75b416 KVM: ppc: Impleme... |
30 |
#include "timing.h" |
bbf45ba57 KVM: ppc: PowerPC... |
31 32 |
#include "44x_tlb.h" |
46f43c6ee KVM: powerpc: con... |
33 |
#include "trace.h" |
bbf45ba57 KVM: ppc: PowerPC... |
34 |
|
891686188 KVM: ppc: support... |
35 36 37 38 39 40 |
#ifndef PPC44x_TLBE_SIZE #define PPC44x_TLBE_SIZE PPC44x_TLB_4K #endif #define PAGE_SIZE_4K (1<<12) #define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1)) |
df9b856c4 KVM: ppc: use pre... |
41 42 |
#define PPC44x_TLB_UATTR_MASK \ (PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3) |
bbf45ba57 KVM: ppc: PowerPC... |
43 44 |
#define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW) #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW) |
a0d7b9f24 KVM: ppc: Move 44... |
45 46 47 |
#ifdef DEBUG void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) { |
0b3bafc8e KVM: PPC: fix com... |
48 |
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
a0d7b9f24 KVM: ppc: Move 44... |
49 50 51 52 53 54 55 56 |
struct kvmppc_44x_tlbe *tlbe; int i; printk("vcpu %d TLB dump: ", vcpu->vcpu_id); printk("| %2s | %3s | %8s | %8s | %8s | ", "nr", "tid", "word0", "word1", "word2"); |
7924bd410 KVM: ppc: directl... |
57 |
for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) { |
db93f5745 KVM: ppc: create ... |
58 |
tlbe = &vcpu_44x->guest_tlb[i]; |
a0d7b9f24 KVM: ppc: Move 44... |
59 60 61 62 63 64 |
if (tlbe->word0 & PPC44x_TLB_VALID) printk(" G%2d | %02X | %08X | %08X | %08X | ", i, tlbe->tid, tlbe->word0, tlbe->word1, tlbe->word2); } |
a0d7b9f24 KVM: ppc: Move 44... |
65 66 |
} #endif |
7924bd410 KVM: ppc: directl... |
67 68 69 70 71 72 73 74 75 76 77 |
static inline void kvmppc_44x_tlbie(unsigned int index) { /* 0 <= index < 64, so the V bit is clear and we can use the index as * word0. */ asm volatile( "tlbwe %[index], %[index], 0 " : : [index] "r"(index) ); } |
c5fbdffbd KVM: ppc: save an... |
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 |
static inline void kvmppc_44x_tlbre(unsigned int index, struct kvmppc_44x_tlbe *tlbe) { asm volatile( "tlbre %[word0], %[index], 0 " "mfspr %[tid], %[sprn_mmucr] " "andi. %[tid], %[tid], 0xff " "tlbre %[word1], %[index], 1 " "tlbre %[word2], %[index], 2 " : [word0] "=r"(tlbe->word0), [word1] "=r"(tlbe->word1), [word2] "=r"(tlbe->word2), [tid] "=r"(tlbe->tid) : [index] "r"(index), [sprn_mmucr] "i"(SPRN_MMUCR) : "cc" ); } |
7924bd410 KVM: ppc: directl... |
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
static inline void kvmppc_44x_tlbwe(unsigned int index, struct kvmppc_44x_tlbe *stlbe) { unsigned long tmp; asm volatile( "mfspr %[tmp], %[sprn_mmucr] " "rlwimi %[tmp], %[tid], 0, 0xff " "mtspr %[sprn_mmucr], %[tmp] " "tlbwe %[word0], %[index], 0 " "tlbwe %[word1], %[index], 1 " "tlbwe %[word2], %[index], 2 " : [tmp] "=&r"(tmp) : [word0] "r"(stlbe->word0), [word1] "r"(stlbe->word1), [word2] "r"(stlbe->word2), [tid] "r"(stlbe->tid), [index] "r"(index), [sprn_mmucr] "i"(SPRN_MMUCR) ); } |
bbf45ba57 KVM: ppc: PowerPC... |
128 129 |
static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode) { |
df9b856c4 KVM: ppc: use pre... |
130 131 |
/* We only care about the guest's permission and user bits. */ attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_UATTR_MASK; |
bbf45ba57 KVM: ppc: PowerPC... |
132 133 134 135 136 137 138 139 140 141 |
if (!usermode) { /* Guest is in supervisor mode, so we need to translate guest * supervisor permissions into user permissions. */ attrib &= ~PPC44x_TLB_USER_PERM_MASK; attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3; } /* Make sure host can always access this memory. */ attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW; |
df9b856c4 KVM: ppc: use pre... |
142 143 |
/* WIMGE = 0b00100 */ attrib |= PPC44x_TLB_M; |
bbf45ba57 KVM: ppc: PowerPC... |
144 145 |
return attrib; } |
c5fbdffbd KVM: ppc: save an... |
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 |
/* Load shadow TLB back into hardware. */ void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); int i; for (i = 0; i <= tlb_44x_hwater; i++) { struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; if (get_tlb_v(stlbe) && get_tlb_ts(stlbe)) kvmppc_44x_tlbwe(i, stlbe); } } static void kvmppc_44x_tlbe_set_modified(struct kvmppc_vcpu_44x *vcpu_44x, unsigned int i) { vcpu_44x->shadow_tlb_mod[i] = 1; } /* Save hardware TLB to the vcpu, and invalidate all guest mappings. */ void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); int i; for (i = 0; i <= tlb_44x_hwater; i++) { struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; if (vcpu_44x->shadow_tlb_mod[i]) kvmppc_44x_tlbre(i, stlbe); if (get_tlb_v(stlbe) && get_tlb_ts(stlbe)) kvmppc_44x_tlbie(i); } } |
bbf45ba57 KVM: ppc: PowerPC... |
182 183 184 185 |
/* Search the guest TLB for a matching entry. */ int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, unsigned int as) { |
db93f5745 KVM: ppc: create ... |
186 |
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
bbf45ba57 KVM: ppc: PowerPC... |
187 188 189 |
int i; /* XXX Replace loop with fancy data structures. */ |
7924bd410 KVM: ppc: directl... |
190 |
for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) { |
db93f5745 KVM: ppc: create ... |
191 |
struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i]; |
bbf45ba57 KVM: ppc: PowerPC... |
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 |
unsigned int tid; if (eaddr < get_tlb_eaddr(tlbe)) continue; if (eaddr > get_tlb_end(tlbe)) continue; tid = get_tlb_tid(tlbe); if (tid && (tid != pid)) continue; if (!get_tlb_v(tlbe)) continue; if (get_tlb_ts(tlbe) != as) continue; return i; } return -1; } |
be8d1cae0 KVM: ppc: turn tl... |
215 216 217 218 219 220 221 222 223 |
gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, gva_t eaddr) { struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; unsigned int pgmask = get_tlb_bytes(gtlbe) - 1; return get_tlb_raddr(gtlbe) | (eaddr & pgmask); } |
fa86b8dda KVM: ppc: rename ... |
224 |
int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) |
bbf45ba57 KVM: ppc: PowerPC... |
225 |
{ |
666e7252a KVM: PPC: Convert... |
226 |
unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); |
bbf45ba57 KVM: ppc: PowerPC... |
227 |
|
7924bd410 KVM: ppc: directl... |
228 |
return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); |
bbf45ba57 KVM: ppc: PowerPC... |
229 |
} |
fa86b8dda KVM: ppc: rename ... |
230 |
int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) |
bbf45ba57 KVM: ppc: PowerPC... |
231 |
{ |
666e7252a KVM: PPC: Convert... |
232 |
unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); |
bbf45ba57 KVM: ppc: PowerPC... |
233 |
|
7924bd410 KVM: ppc: directl... |
234 |
return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); |
bbf45ba57 KVM: ppc: PowerPC... |
235 |
} |
b52a638c3 KVM: ppc: Add kvm... |
236 237 238 239 240 241 242 |
void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) { } void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) { } |
7924bd410 KVM: ppc: directl... |
243 244 |
static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x, unsigned int stlb_index) |
bbf45ba57 KVM: ppc: PowerPC... |
245 |
{ |
7924bd410 KVM: ppc: directl... |
246 |
struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[stlb_index]; |
bbf45ba57 KVM: ppc: PowerPC... |
247 |
|
7924bd410 KVM: ppc: directl... |
248 249 |
if (!ref->page) return; |
bbf45ba57 KVM: ppc: PowerPC... |
250 |
|
7924bd410 KVM: ppc: directl... |
251 252 253 254 |
/* Discard from the TLB. */ /* Note: we could actually invalidate a host mapping, if the host overwrote * this TLB entry since we inserted a guest mapping. */ kvmppc_44x_tlbie(stlb_index); |
bbf45ba57 KVM: ppc: PowerPC... |
255 |
|
7924bd410 KVM: ppc: directl... |
256 257 258 259 260 |
/* Now release the page. */ if (ref->writeable) kvm_release_page_dirty(ref->page); else kvm_release_page_clean(ref->page); |
c30f8a6c6 KVM: ppc: stop le... |
261 |
|
7924bd410 KVM: ppc: directl... |
262 263 264 |
ref->page = NULL; /* XXX set tlb_44x_index to stlb_index? */ |
46f43c6ee KVM: powerpc: con... |
265 |
trace_kvm_stlb_inval(stlb_index); |
c30f8a6c6 KVM: ppc: stop le... |
266 |
} |
ecc0981ff KVM: ppc: cosmeti... |
267 |
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) |
83aae4a80 KVM: ppc: Write o... |
268 |
{ |
db93f5745 KVM: ppc: create ... |
269 |
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
7924bd410 KVM: ppc: directl... |
270 |
int i; |
db93f5745 KVM: ppc: create ... |
271 |
|
7924bd410 KVM: ppc: directl... |
272 273 |
for (i = 0; i <= tlb_44x_hwater; i++) kvmppc_44x_shadow_release(vcpu_44x, i); |
83aae4a80 KVM: ppc: Write o... |
274 |
} |
891686188 KVM: ppc: support... |
275 276 277 278 279 280 281 282 283 284 285 286 |
/** * kvmppc_mmu_map -- create a host mapping for guest memory * * If the guest wanted a larger page than the host supports, only the first * host page is mapped here and the rest are demand faulted. * * If the guest wanted a smaller page than the host page size, we map only the * guest-size page (i.e. not a full host page mapping). * * Caller must ensure that the specified guest TLB entry is safe to insert into * the shadow TLB. */ |
58a96214a KVM: ppc: change ... |
287 288 |
void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, unsigned int gtlb_index) |
bbf45ba57 KVM: ppc: PowerPC... |
289 |
{ |
7924bd410 KVM: ppc: directl... |
290 |
struct kvmppc_44x_tlbe stlbe; |
db93f5745 KVM: ppc: create ... |
291 |
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
58a96214a KVM: ppc: change ... |
292 |
struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; |
7924bd410 KVM: ppc: directl... |
293 |
struct kvmppc_44x_shadow_ref *ref; |
bbf45ba57 KVM: ppc: PowerPC... |
294 |
struct page *new_page; |
bbf45ba57 KVM: ppc: PowerPC... |
295 |
hpa_t hpaddr; |
891686188 KVM: ppc: support... |
296 |
gfn_t gfn; |
58a96214a KVM: ppc: change ... |
297 298 299 |
u32 asid = gtlbe->tid; u32 flags = gtlbe->word2; u32 max_bytes = get_tlb_bytes(gtlbe); |
bbf45ba57 KVM: ppc: PowerPC... |
300 |
unsigned int victim; |
7924bd410 KVM: ppc: directl... |
301 302 303 304 305 306 307 308 |
/* Select TLB entry to clobber. Indirectly guard against races with the TLB * miss handler by disabling interrupts. */ local_irq_disable(); victim = ++tlb_44x_index; if (victim > tlb_44x_hwater) victim = 0; tlb_44x_index = victim; local_irq_enable(); |
bbf45ba57 KVM: ppc: PowerPC... |
309 310 |
/* Get reference to new page. */ |
891686188 KVM: ppc: support... |
311 |
gfn = gpaddr >> PAGE_SHIFT; |
bbf45ba57 KVM: ppc: PowerPC... |
312 313 |
new_page = gfn_to_page(vcpu->kvm, gfn); if (is_error_page(new_page)) { |
5689cc53f KVM: Use u64 for ... |
314 315 316 |
printk(KERN_ERR "Couldn't get guest page for gfn %llx! ", (unsigned long long)gfn); |
bbf45ba57 KVM: ppc: PowerPC... |
317 318 319 320 |
kvm_release_page_clean(new_page); return; } hpaddr = page_to_phys(new_page); |
7924bd410 KVM: ppc: directl... |
321 322 |
/* Invalidate any previous shadow mappings. */ kvmppc_44x_shadow_release(vcpu_44x, victim); |
bbf45ba57 KVM: ppc: PowerPC... |
323 324 325 326 327 328 |
/* XXX Make sure (va, size) doesn't overlap any other * entries. 440x6 user manual says the result would be * "undefined." */ /* XXX what about AS? */ |
bbf45ba57 KVM: ppc: PowerPC... |
329 |
/* Force TS=1 for all guest mappings. */ |
7924bd410 KVM: ppc: directl... |
330 |
stlbe.word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS; |
891686188 KVM: ppc: support... |
331 332 333 334 |
if (max_bytes >= PAGE_SIZE) { /* Guest mapping is larger than or equal to host page size. We can use * a "native" host mapping. */ |
7924bd410 KVM: ppc: directl... |
335 |
stlbe.word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE; |
891686188 KVM: ppc: support... |
336 337 338 339 340 |
} else { /* Guest mapping is smaller than host page size. We must restrict the * size of the mapping to be at most the smaller of the two, but for * simplicity we fall back to a 4K mapping (this is probably what the * guest is using anyways). */ |
7924bd410 KVM: ppc: directl... |
341 |
stlbe.word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K; |
891686188 KVM: ppc: support... |
342 343 344 345 346 347 |
/* 'hpaddr' is a host page, which is larger than the mapping we're * inserting here. To compensate, we must add the in-page offset to the * sub-page. */ hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K); } |
7924bd410 KVM: ppc: directl... |
348 349 |
stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags, |
666e7252a KVM: PPC: Convert... |
350 |
vcpu->arch.shared->msr & MSR_PR); |
7924bd410 KVM: ppc: directl... |
351 352 353 354 355 356 357 358 359 360 |
stlbe.tid = !(asid & 0xff); /* Keep track of the reference so we can properly release it later. */ ref = &vcpu_44x->shadow_refs[victim]; ref->page = new_page; ref->gtlb_index = gtlb_index; ref->writeable = !!(stlbe.word2 & PPC44x_TLB_UW); ref->tid = stlbe.tid; /* Insert shadow mapping into hardware TLB. */ |
c5fbdffbd KVM: ppc: save an... |
361 |
kvmppc_44x_tlbe_set_modified(vcpu_44x, victim); |
7924bd410 KVM: ppc: directl... |
362 |
kvmppc_44x_tlbwe(victim, &stlbe); |
46f43c6ee KVM: powerpc: con... |
363 364 |
trace_kvm_stlb_write(victim, stlbe.tid, stlbe.word0, stlbe.word1, stlbe.word2); |
bbf45ba57 KVM: ppc: PowerPC... |
365 |
} |
7924bd410 KVM: ppc: directl... |
366 367 368 369 |
/* For a particular guest TLB entry, invalidate the corresponding host TLB * mappings and release the host pages. */ static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu, unsigned int gtlb_index) |
bbf45ba57 KVM: ppc: PowerPC... |
370 |
{ |
db93f5745 KVM: ppc: create ... |
371 |
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
bbf45ba57 KVM: ppc: PowerPC... |
372 |
int i; |
7924bd410 KVM: ppc: directl... |
373 374 375 376 |
for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) { struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i]; if (ref->gtlb_index == gtlb_index) kvmppc_44x_shadow_release(vcpu_44x, i); |
bbf45ba57 KVM: ppc: PowerPC... |
377 |
} |
bbf45ba57 KVM: ppc: PowerPC... |
378 |
} |
dd9ebf1f9 KVM: PPC: e500: A... |
379 |
void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) |
bbf45ba57 KVM: ppc: PowerPC... |
380 |
{ |
dd9ebf1f9 KVM: PPC: e500: A... |
381 |
int usermode = vcpu->arch.shared->msr & MSR_PR; |
fe4e771d5 KVM: ppc: fix use... |
382 383 384 385 386 |
vcpu->arch.shadow_pid = !usermode; } void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid) { |
db93f5745 KVM: ppc: create ... |
387 |
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
bbf45ba57 KVM: ppc: PowerPC... |
388 |
int i; |
fe4e771d5 KVM: ppc: fix use... |
389 390 391 392 393 394 395 396 397 |
if (unlikely(vcpu->arch.pid == new_pid)) return; vcpu->arch.pid = new_pid; /* Guest userspace runs with TID=0 mappings and PID=0, to make sure it * can't access guest kernel mappings (TID=1). When we switch to a new * guest PID, which will also use host PID=0, we must discard the old guest * userspace mappings. */ |
7924bd410 KVM: ppc: directl... |
398 399 400 401 402 |
for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) { struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i]; if (ref->tid == 0) kvmppc_44x_shadow_release(vcpu_44x, i); |
bbf45ba57 KVM: ppc: PowerPC... |
403 |
} |
bbf45ba57 KVM: ppc: PowerPC... |
404 |
} |
a0d7b9f24 KVM: ppc: Move 44... |
405 406 |
static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, |
0f55dc481 KVM: ppc: Rename ... |
407 |
const struct kvmppc_44x_tlbe *tlbe) |
a0d7b9f24 KVM: ppc: Move 44... |
408 409 410 411 412 413 414 415 |
{ gpa_t gpa; if (!get_tlb_v(tlbe)) return 0; /* Does it match current guest AS? */ /* XXX what about IS != DS? */ |
666e7252a KVM: PPC: Convert... |
416 |
if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS)) |
a0d7b9f24 KVM: ppc: Move 44... |
417 418 419 420 421 422 423 424 425 |
return 0; gpa = get_tlb_raddr(tlbe); if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT)) /* Mapping is not for RAM. */ return 0; return 1; } |
75f74f0db KVM: ppc: refacto... |
426 |
int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) |
a0d7b9f24 KVM: ppc: Move 44... |
427 |
{ |
db93f5745 KVM: ppc: create ... |
428 |
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
0f55dc481 KVM: ppc: Rename ... |
429 |
struct kvmppc_44x_tlbe *tlbe; |
7924bd410 KVM: ppc: directl... |
430 |
unsigned int gtlb_index; |
a0d7b9f24 KVM: ppc: Move 44... |
431 |
|
8e5b26b55 KVM: PPC: Use acc... |
432 |
gtlb_index = kvmppc_get_gpr(vcpu, ra); |
4f018c513 KVM: PPC: Keep in... |
433 |
if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) { |
7924bd410 KVM: ppc: directl... |
434 435 |
printk("%s: index %d ", __func__, gtlb_index); |
a0d7b9f24 KVM: ppc: Move 44... |
436 437 438 |
kvmppc_dump_vcpu(vcpu); return EMULATE_FAIL; } |
7924bd410 KVM: ppc: directl... |
439 |
tlbe = &vcpu_44x->guest_tlb[gtlb_index]; |
a0d7b9f24 KVM: ppc: Move 44... |
440 |
|
7924bd410 KVM: ppc: directl... |
441 442 443 |
/* Invalidate shadow mappings for the about-to-be-clobbered TLB entry. */ if (tlbe->word0 & PPC44x_TLB_VALID) kvmppc_44x_invalidate(vcpu, gtlb_index); |
a0d7b9f24 KVM: ppc: Move 44... |
444 445 446 |
switch (ws) { case PPC44x_TLB_PAGEID: |
bf5d4025c KVM: ppc: use MMU... |
447 |
tlbe->tid = get_mmucr_stid(vcpu); |
8e5b26b55 KVM: PPC: Use acc... |
448 |
tlbe->word0 = kvmppc_get_gpr(vcpu, rs); |
a0d7b9f24 KVM: ppc: Move 44... |
449 450 451 |
break; case PPC44x_TLB_XLAT: |
8e5b26b55 KVM: PPC: Use acc... |
452 |
tlbe->word1 = kvmppc_get_gpr(vcpu, rs); |
a0d7b9f24 KVM: ppc: Move 44... |
453 454 455 |
break; case PPC44x_TLB_ATTRIB: |
8e5b26b55 KVM: PPC: Use acc... |
456 |
tlbe->word2 = kvmppc_get_gpr(vcpu, rs); |
a0d7b9f24 KVM: ppc: Move 44... |
457 458 459 460 461 462 463 |
break; default: return EMULATE_FAIL; } if (tlbe_is_host_safe(vcpu, tlbe)) { |
7924bd410 KVM: ppc: directl... |
464 |
gva_t eaddr; |
891686188 KVM: ppc: support... |
465 |
gpa_t gpaddr; |
891686188 KVM: ppc: support... |
466 |
u32 bytes; |
a0d7b9f24 KVM: ppc: Move 44... |
467 |
eaddr = get_tlb_eaddr(tlbe); |
891686188 KVM: ppc: support... |
468 469 470 471 472 473 |
gpaddr = get_tlb_raddr(tlbe); /* Use the advertised page size to mask effective and real addrs. */ bytes = get_tlb_bytes(tlbe); eaddr &= ~(bytes - 1); gpaddr &= ~(bytes - 1); |
58a96214a KVM: ppc: change ... |
474 |
kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); |
a0d7b9f24 KVM: ppc: Move 44... |
475 |
} |
46f43c6ee KVM: powerpc: con... |
476 477 |
trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1, tlbe->word2); |
a0d7b9f24 KVM: ppc: Move 44... |
478 |
|
73e75b416 KVM: ppc: Impleme... |
479 |
kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); |
a0d7b9f24 KVM: ppc: Move 44... |
480 481 |
return EMULATE_DONE; } |
75f74f0db KVM: ppc: refacto... |
482 |
int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc) |
a0d7b9f24 KVM: ppc: Move 44... |
483 484 |
{ u32 ea; |
7924bd410 KVM: ppc: directl... |
485 |
int gtlb_index; |
a0d7b9f24 KVM: ppc: Move 44... |
486 487 |
unsigned int as = get_mmucr_sts(vcpu); unsigned int pid = get_mmucr_stid(vcpu); |
8e5b26b55 KVM: PPC: Use acc... |
488 |
ea = kvmppc_get_gpr(vcpu, rb); |
a0d7b9f24 KVM: ppc: Move 44... |
489 |
if (ra) |
8e5b26b55 KVM: PPC: Use acc... |
490 |
ea += kvmppc_get_gpr(vcpu, ra); |
a0d7b9f24 KVM: ppc: Move 44... |
491 |
|
7924bd410 KVM: ppc: directl... |
492 |
gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as); |
a0d7b9f24 KVM: ppc: Move 44... |
493 |
if (rc) { |
992b5b29b KVM: PPC: Add hel... |
494 |
u32 cr = kvmppc_get_cr(vcpu); |
7924bd410 KVM: ppc: directl... |
495 |
if (gtlb_index < 0) |
992b5b29b KVM: PPC: Add hel... |
496 |
kvmppc_set_cr(vcpu, cr & ~0x20000000); |
a0d7b9f24 KVM: ppc: Move 44... |
497 |
else |
992b5b29b KVM: PPC: Add hel... |
498 |
kvmppc_set_cr(vcpu, cr | 0x20000000); |
a0d7b9f24 KVM: ppc: Move 44... |
499 |
} |
8e5b26b55 KVM: PPC: Use acc... |
500 |
kvmppc_set_gpr(vcpu, rt, gtlb_index); |
a0d7b9f24 KVM: ppc: Move 44... |
501 |
|
73e75b416 KVM: ppc: Impleme... |
502 |
kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); |
a0d7b9f24 KVM: ppc: Move 44... |
503 504 |
return EMULATE_DONE; } |