Commit fef093bec0364ff5e6fd488cd81637f6bb3a2d0d

Authored by Alexander Graf
Committed by Avi Kivity
1 parent 7741909bf1

KVM: PPC: Make use of hash based Shadow MMU

We just introduced generic functions to handle shadow pages on PPC.
This patch makes the respective backends make use of them, getting
rid of a lot of duplicate code along the way.

Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>

Showing 6 changed files with 54 additions and 190 deletions Side-by-side Diff

arch/powerpc/include/asm/kvm_book3s.h
... ... @@ -115,6 +115,15 @@
115 115 extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
116 116 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
117 117 extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
  118 +
  119 +extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
  120 +extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
  121 +extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
  122 +extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
  123 +extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
  124 +extern int kvmppc_mmu_hpte_sysinit(void);
  125 +extern void kvmppc_mmu_hpte_sysexit(void);
  126 +
118 127 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
119 128 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
120 129 extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
arch/powerpc/include/asm/kvm_host.h
... ... @@ -38,7 +38,13 @@
38 38 #define KVM_NR_PAGE_SIZES 1
39 39 #define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
40 40  
41   -#define HPTEG_CACHE_NUM 1024
  41 +#define HPTEG_CACHE_NUM (1 << 15)
  42 +#define HPTEG_HASH_BITS_PTE 13
  43 +#define HPTEG_HASH_BITS_VPTE 13
  44 +#define HPTEG_HASH_BITS_VPTE_LONG 5
  45 +#define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE)
  46 +#define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE)
  47 +#define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG)
42 48  
43 49 struct kvm;
44 50 struct kvm_run;
... ... @@ -151,6 +157,9 @@
151 157 };
152 158  
153 159 struct hpte_cache {
  160 + struct hlist_node list_pte;
  161 + struct hlist_node list_vpte;
  162 + struct hlist_node list_vpte_long;
154 163 u64 host_va;
155 164 u64 pfn;
156 165 ulong slot;
... ... @@ -282,8 +291,10 @@
282 291 unsigned long pending_exceptions;
283 292  
284 293 #ifdef CONFIG_PPC_BOOK3S
285   - struct hpte_cache hpte_cache[HPTEG_CACHE_NUM];
286   - int hpte_cache_offset;
  294 + struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
  295 + struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
  296 + struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
  297 + int hpte_cache_count;
287 298 #endif
288 299 };
289 300  
arch/powerpc/kvm/Makefile
... ... @@ -45,6 +45,7 @@
45 45 book3s.o \
46 46 book3s_emulate.o \
47 47 book3s_interrupts.o \
  48 + book3s_mmu_hpte.o \
48 49 book3s_64_mmu_host.o \
49 50 book3s_64_mmu.o \
50 51 book3s_32_mmu.o
... ... @@ -57,6 +58,7 @@
57 58 book3s.o \
58 59 book3s_emulate.o \
59 60 book3s_interrupts.o \
  61 + book3s_mmu_hpte.o \
60 62 book3s_32_mmu_host.o \
61 63 book3s_32_mmu.o
62 64 kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs)
arch/powerpc/kvm/book3s.c
... ... @@ -1384,12 +1384,22 @@
1384 1384  
1385 1385 static int kvmppc_book3s_init(void)
1386 1386 {
1387   - return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
1388   - THIS_MODULE);
  1387 + int r;
  1388 +
  1389 + r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
  1390 + THIS_MODULE);
  1391 +
  1392 + if (r)
  1393 + return r;
  1394 +
  1395 + r = kvmppc_mmu_hpte_sysinit();
  1396 +
  1397 + return r;
1389 1398 }
1390 1399  
1391 1400 static void kvmppc_book3s_exit(void)
1392 1401 {
  1402 + kvmppc_mmu_hpte_sysexit();
1393 1403 kvm_exit();
1394 1404 }
1395 1405  
arch/powerpc/kvm/book3s_32_mmu_host.c
... ... @@ -58,107 +58,21 @@
58 58 static ulong htab;
59 59 static u32 htabmask;
60 60  
61   -static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
  61 +void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
62 62 {
63 63 volatile u32 *pteg;
64 64  
65   - dprintk_mmu("KVM: Flushing SPTE: 0x%llx (0x%llx) -> 0x%llx\n",
66   - pte->pte.eaddr, pte->pte.vpage, pte->host_va);
67   -
  65 + /* Remove from host HTAB */
68 66 pteg = (u32*)pte->slot;
69   -
70 67 pteg[0] = 0;
  68 +
  69 + /* And make sure it's gone from the TLB too */
71 70 asm volatile ("sync");
72 71 asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory");
73 72 asm volatile ("sync");
74 73 asm volatile ("tlbsync");
75   -
76   - pte->host_va = 0;
77   -
78   - if (pte->pte.may_write)
79   - kvm_release_pfn_dirty(pte->pfn);
80   - else
81   - kvm_release_pfn_clean(pte->pfn);
82 74 }
83 75  
84   -void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
85   -{
86   - int i;
87   -
88   - dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%x & 0x%x\n",
89   - vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
90   - BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
91   -
92   - guest_ea &= ea_mask;
93   - for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
94   - struct hpte_cache *pte;
95   -
96   - pte = &vcpu->arch.hpte_cache[i];
97   - if (!pte->host_va)
98   - continue;
99   -
100   - if ((pte->pte.eaddr & ea_mask) == guest_ea) {
101   - invalidate_pte(vcpu, pte);
102   - }
103   - }
104   -
105   - /* Doing a complete flush -> start from scratch */
106   - if (!ea_mask)
107   - vcpu->arch.hpte_cache_offset = 0;
108   -}
109   -
110   -void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
111   -{
112   - int i;
113   -
114   - dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
115   - vcpu->arch.hpte_cache_offset, guest_vp, vp_mask);
116   - BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
117   -
118   - guest_vp &= vp_mask;
119   - for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
120   - struct hpte_cache *pte;
121   -
122   - pte = &vcpu->arch.hpte_cache[i];
123   - if (!pte->host_va)
124   - continue;
125   -
126   - if ((pte->pte.vpage & vp_mask) == guest_vp) {
127   - invalidate_pte(vcpu, pte);
128   - }
129   - }
130   -}
131   -
132   -void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
133   -{
134   - int i;
135   -
136   - dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n",
137   - vcpu->arch.hpte_cache_offset, pa_start, pa_end);
138   - BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
139   -
140   - for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
141   - struct hpte_cache *pte;
142   -
143   - pte = &vcpu->arch.hpte_cache[i];
144   - if (!pte->host_va)
145   - continue;
146   -
147   - if ((pte->pte.raddr >= pa_start) &&
148   - (pte->pte.raddr < pa_end)) {
149   - invalidate_pte(vcpu, pte);
150   - }
151   - }
152   -}
153   -
154   -static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
155   -{
156   - if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM)
157   - kvmppc_mmu_pte_flush(vcpu, 0, 0);
158   -
159   - return vcpu->arch.hpte_cache_offset++;
160   -}
161   -
162 76 /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
163 77 * a hash, so we don't waste cycles on looping */
164 78 static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
... ... @@ -230,7 +144,6 @@
230 144 register int rr = 0;
231 145 bool primary = false;
232 146 bool evict = false;
233   - int hpte_id;
234 147 struct hpte_cache *pte;
235 148  
236 149 /* Get host physical address for gpa */
... ... @@ -315,8 +228,7 @@
315 228  
316 229 /* Now tell our Shadow PTE code about the new page */
317 230  
318   - hpte_id = kvmppc_mmu_hpte_cache_next(vcpu);
319   - pte = &vcpu->arch.hpte_cache[hpte_id];
  231 + pte = kvmppc_mmu_hpte_cache_next(vcpu);
320 232  
321 233 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
322 234 orig_pte->may_write ? 'w' : '-',
... ... @@ -329,6 +241,8 @@
329 241 pte->pte = *orig_pte;
330 242 pte->pfn = hpaddr >> PAGE_SHIFT;
331 243  
  244 + kvmppc_mmu_hpte_cache_map(vcpu, pte);
  245 +
332 246 return 0;
333 247 }
334 248  
... ... @@ -413,7 +327,7 @@
413 327  
414 328 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
415 329 {
416   - kvmppc_mmu_pte_flush(vcpu, 0, 0);
  330 + kvmppc_mmu_hpte_destroy(vcpu);
417 331 preempt_disable();
418 332 __destroy_context(to_book3s(vcpu)->context_id);
419 333 preempt_enable();
... ... @@ -452,6 +366,8 @@
452 366 asm ( "mfsdr1 %0" : "=r"(sdr1) );
453 367 htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0;
454 368 htab = (ulong)__va(sdr1 & 0xffff0000);
  369 +
  370 + kvmppc_mmu_hpte_init(vcpu);
455 371  
456 372 return 0;
457 373 }
arch/powerpc/kvm/book3s_64_mmu_host.c
... ... @@ -47,100 +47,13 @@
47 47 #define dprintk_slb(a, ...) do { } while(0)
48 48 #endif
49 49  
50   -static void invalidate_pte(struct hpte_cache *pte)
  50 +void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
51 51 {
52   - dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n",
53   - pte->pte.eaddr, pte->pte.vpage, pte->host_va);
54   -
55 52 ppc_md.hpte_invalidate(pte->slot, pte->host_va,
56 53 MMU_PAGE_4K, MMU_SEGSIZE_256M,
57 54 false);
58   - pte->host_va = 0;
59   -
60   - if (pte->pte.may_write)
61   - kvm_release_pfn_dirty(pte->pfn);
62   - else
63   - kvm_release_pfn_clean(pte->pfn);
64 55 }
65 56  
66   -void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
67   -{
68   - int i;
69   -
70   - dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n",
71   - vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
72   - BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
73   -
74   - guest_ea &= ea_mask;
75   - for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
76   - struct hpte_cache *pte;
77   -
78   - pte = &vcpu->arch.hpte_cache[i];
79   - if (!pte->host_va)
80   - continue;
81   -
82   - if ((pte->pte.eaddr & ea_mask) == guest_ea) {
83   - invalidate_pte(pte);
84   - }
85   - }
86   -
87   - /* Doing a complete flush -> start from scratch */
88   - if (!ea_mask)
89   - vcpu->arch.hpte_cache_offset = 0;
90   -}
91   -
92   -void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
93   -{
94   - int i;
95   -
96   - dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
97   - vcpu->arch.hpte_cache_offset, guest_vp, vp_mask);
98   - BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
99   -
100   - guest_vp &= vp_mask;
101   - for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
102   - struct hpte_cache *pte;
103   -
104   - pte = &vcpu->arch.hpte_cache[i];
105   - if (!pte->host_va)
106   - continue;
107   -
108   - if ((pte->pte.vpage & vp_mask) == guest_vp) {
109   - invalidate_pte(pte);
110   - }
111   - }
112   -}
113   -
114   -void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
115   -{
116   - int i;
117   -
118   - dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx & 0x%lx\n",
119   - vcpu->arch.hpte_cache_offset, pa_start, pa_end);
120   - BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
121   -
122   - for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
123   - struct hpte_cache *pte;
124   -
125   - pte = &vcpu->arch.hpte_cache[i];
126   - if (!pte->host_va)
127   - continue;
128   -
129   - if ((pte->pte.raddr >= pa_start) &&
130   - (pte->pte.raddr < pa_end)) {
131   - invalidate_pte(pte);
132   - }
133   - }
134   -}
135   -
136   -static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
137   -{
138   - if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM)
139   - kvmppc_mmu_pte_flush(vcpu, 0, 0);
140   -
141   - return vcpu->arch.hpte_cache_offset++;
142   -}
143   -
144 57 /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
145 58 * a hash, so we don't waste cycles on looping */
146 59 static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
... ... @@ -246,8 +159,7 @@
246 159 attempt++;
247 160 goto map_again;
248 161 } else {
249   - int hpte_id = kvmppc_mmu_hpte_cache_next(vcpu);
250   - struct hpte_cache *pte = &vcpu->arch.hpte_cache[hpte_id];
  162 + struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu);
251 163  
252 164 dprintk_mmu("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx\n",
253 165 ((rflags & HPTE_R_PP) == 3) ? '-' : 'w',
... ... @@ -265,6 +177,8 @@
265 177 pte->host_va = va;
266 178 pte->pte = *orig_pte;
267 179 pte->pfn = hpaddr >> PAGE_SHIFT;
  180 +
  181 + kvmppc_mmu_hpte_cache_map(vcpu, pte);
268 182 }
269 183  
270 184 return 0;
... ... @@ -391,7 +305,7 @@
391 305  
392 306 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
393 307 {
394   - kvmppc_mmu_pte_flush(vcpu, 0, 0);
  308 + kvmppc_mmu_hpte_destroy(vcpu);
395 309 __destroy_context(to_book3s(vcpu)->context_id);
396 310 }
397 311  
... ... @@ -408,6 +322,8 @@
408 322 vcpu3s->vsid_max = ((vcpu3s->context_id + 1) << USER_ESID_BITS) - 1;
409 323 vcpu3s->vsid_first = vcpu3s->context_id << USER_ESID_BITS;
410 324 vcpu3s->vsid_next = vcpu3s->vsid_first;
  325 +
  326 + kvmppc_mmu_hpte_init(vcpu);
411 327  
412 328 return 0;
413 329 }