Commit 8e5b26b55a8b6aee2c789b1d20ec715f9e4bea5c

Authored by Alexander Graf
Committed by Marcelo Tosatti
1 parent 0d178975d0

KVM: PPC: Use accessor functions for GPR access

All code in PPC KVM currently accesses gprs in the vcpu struct directly.

While there's nothing wrong with that wrt the current way gprs are stored
and loaded, it doesn't suffice for the PACA acceleration that will follow
in this patchset.

So let's just create little wrapper inline functions that we call whenever
a GPR needs to be read from or written to. The compiled code shouldn't really
change at all for now.

Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>

Showing 11 changed files with 274 additions and 225 deletions Side-by-side Diff

arch/powerpc/include/asm/kvm_ppc.h
... ... @@ -96,5 +96,31 @@
96 96  
97 97 extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
98 98  
  99 +#ifdef CONFIG_PPC_BOOK3S
  100 +
  101 +static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
  102 +{
  103 + vcpu->arch.gpr[num] = val;
  104 +}
  105 +
  106 +static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
  107 +{
  108 + return vcpu->arch.gpr[num];
  109 +}
  110 +
  111 +#else
  112 +
  113 +static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
  114 +{
  115 + vcpu->arch.gpr[num] = val;
  116 +}
  117 +
  118 +static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
  119 +{
  120 + return vcpu->arch.gpr[num];
  121 +}
  122 +
  123 +#endif
  124 +
99 125 #endif /* __POWERPC_KVM_PPC_H__ */
arch/powerpc/kvm/44x_emulate.c
... ... @@ -65,13 +65,14 @@
65 65 */
66 66 switch (dcrn) {
67 67 case DCRN_CPR0_CONFIG_ADDR:
68   - vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr;
  68 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr);
69 69 break;
70 70 case DCRN_CPR0_CONFIG_DATA:
71 71 local_irq_disable();
72 72 mtdcr(DCRN_CPR0_CONFIG_ADDR,
73 73 vcpu->arch.cpr0_cfgaddr);
74   - vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA);
  74 + kvmppc_set_gpr(vcpu, rt,
  75 + mfdcr(DCRN_CPR0_CONFIG_DATA));
75 76 local_irq_enable();
76 77 break;
77 78 default:
78 79  
... ... @@ -93,11 +94,11 @@
93 94 /* emulate some access in kernel */
94 95 switch (dcrn) {
95 96 case DCRN_CPR0_CONFIG_ADDR:
96   - vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs];
  97 + vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs);
97 98 break;
98 99 default:
99 100 run->dcr.dcrn = dcrn;
100   - run->dcr.data = vcpu->arch.gpr[rs];
  101 + run->dcr.data = kvmppc_get_gpr(vcpu, rs);
101 102 run->dcr.is_write = 1;
102 103 vcpu->arch.dcr_needed = 1;
103 104 kvmppc_account_exit(vcpu, DCR_EXITS);
104 105  
105 106  
106 107  
... ... @@ -146,13 +147,13 @@
146 147  
147 148 switch (sprn) {
148 149 case SPRN_PID:
149   - kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break;
  150 + kvmppc_set_pid(vcpu, kvmppc_get_gpr(vcpu, rs)); break;
150 151 case SPRN_MMUCR:
151   - vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
  152 + vcpu->arch.mmucr = kvmppc_get_gpr(vcpu, rs); break;
152 153 case SPRN_CCR0:
153   - vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
  154 + vcpu->arch.ccr0 = kvmppc_get_gpr(vcpu, rs); break;
154 155 case SPRN_CCR1:
155   - vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break;
  156 + vcpu->arch.ccr1 = kvmppc_get_gpr(vcpu, rs); break;
156 157 default:
157 158 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs);
158 159 }
159 160  
160 161  
161 162  
... ... @@ -167,13 +168,13 @@
167 168  
168 169 switch (sprn) {
169 170 case SPRN_PID:
170   - vcpu->arch.gpr[rt] = vcpu->arch.pid; break;
  171 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.pid); break;
171 172 case SPRN_MMUCR:
172   - vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break;
  173 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucr); break;
173 174 case SPRN_CCR0:
174   - vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break;
  175 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr0); break;
175 176 case SPRN_CCR1:
176   - vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break;
  177 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr1); break;
177 178 default:
178 179 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
179 180 }
arch/powerpc/kvm/44x_tlb.c
... ... @@ -439,7 +439,7 @@
439 439 struct kvmppc_44x_tlbe *tlbe;
440 440 unsigned int gtlb_index;
441 441  
442   - gtlb_index = vcpu->arch.gpr[ra];
  442 + gtlb_index = kvmppc_get_gpr(vcpu, ra);
443 443 if (gtlb_index > KVM44x_GUEST_TLB_SIZE) {
444 444 printk("%s: index %d\n", __func__, gtlb_index);
445 445 kvmppc_dump_vcpu(vcpu);
446 446  
447 447  
... ... @@ -455,15 +455,15 @@
455 455 switch (ws) {
456 456 case PPC44x_TLB_PAGEID:
457 457 tlbe->tid = get_mmucr_stid(vcpu);
458   - tlbe->word0 = vcpu->arch.gpr[rs];
  458 + tlbe->word0 = kvmppc_get_gpr(vcpu, rs);
459 459 break;
460 460  
461 461 case PPC44x_TLB_XLAT:
462   - tlbe->word1 = vcpu->arch.gpr[rs];
  462 + tlbe->word1 = kvmppc_get_gpr(vcpu, rs);
463 463 break;
464 464  
465 465 case PPC44x_TLB_ATTRIB:
466   - tlbe->word2 = vcpu->arch.gpr[rs];
  466 + tlbe->word2 = kvmppc_get_gpr(vcpu, rs);
467 467 break;
468 468  
469 469 default:
470 470  
... ... @@ -500,9 +500,9 @@
500 500 unsigned int as = get_mmucr_sts(vcpu);
501 501 unsigned int pid = get_mmucr_stid(vcpu);
502 502  
503   - ea = vcpu->arch.gpr[rb];
  503 + ea = kvmppc_get_gpr(vcpu, rb);
504 504 if (ra)
505   - ea += vcpu->arch.gpr[ra];
  505 + ea += kvmppc_get_gpr(vcpu, ra);
506 506  
507 507 gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
508 508 if (rc) {
... ... @@ -511,7 +511,7 @@
511 511 else
512 512 vcpu->arch.cr |= 0x20000000;
513 513 }
514   - vcpu->arch.gpr[rt] = gtlb_index;
  514 + kvmppc_set_gpr(vcpu, rt, gtlb_index);
515 515  
516 516 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
517 517 return EMULATE_DONE;
arch/powerpc/kvm/book3s.c
... ... @@ -658,7 +658,7 @@
658 658 }
659 659 case BOOK3S_INTERRUPT_SYSCALL:
660 660 #ifdef EXIT_DEBUG
661   - printk(KERN_INFO "Syscall Nr %d\n", (int)vcpu->arch.gpr[0]);
  661 + printk(KERN_INFO "Syscall Nr %d\n", (int)kvmppc_get_gpr(vcpu, 0));
662 662 #endif
663 663 vcpu->stat.syscall_exits++;
664 664 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
... ... @@ -734,7 +734,7 @@
734 734 regs->sprg7 = vcpu->arch.sprg6;
735 735  
736 736 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
737   - regs->gpr[i] = vcpu->arch.gpr[i];
  737 + regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
738 738  
739 739 return 0;
740 740 }
... ... @@ -759,8 +759,8 @@
759 759 vcpu->arch.sprg6 = regs->sprg5;
760 760 vcpu->arch.sprg7 = regs->sprg6;
761 761  
762   - for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++)
763   - vcpu->arch.gpr[i] = regs->gpr[i];
  762 + for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  763 + kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
764 764  
765 765 return 0;
766 766 }
arch/powerpc/kvm/book3s_64_emulate.c
... ... @@ -65,11 +65,11 @@
65 65 case 31:
66 66 switch (get_xop(inst)) {
67 67 case OP_31_XOP_MFMSR:
68   - vcpu->arch.gpr[get_rt(inst)] = vcpu->arch.msr;
  68 + kvmppc_set_gpr(vcpu, get_rt(inst), vcpu->arch.msr);
69 69 break;
70 70 case OP_31_XOP_MTMSRD:
71 71 {
72   - ulong rs = vcpu->arch.gpr[get_rs(inst)];
  72 + ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst));
73 73 if (inst & 0x10000) {
74 74 vcpu->arch.msr &= ~(MSR_RI | MSR_EE);
75 75 vcpu->arch.msr |= rs & (MSR_RI | MSR_EE);
76 76  
77 77  
78 78  
79 79  
... ... @@ -78,30 +78,30 @@
78 78 break;
79 79 }
80 80 case OP_31_XOP_MTMSR:
81   - kvmppc_set_msr(vcpu, vcpu->arch.gpr[get_rs(inst)]);
  81 + kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst)));
82 82 break;
83 83 case OP_31_XOP_MFSRIN:
84 84 {
85 85 int srnum;
86 86  
87   - srnum = (vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf;
  87 + srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf;
88 88 if (vcpu->arch.mmu.mfsrin) {
89 89 u32 sr;
90 90 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
91   - vcpu->arch.gpr[get_rt(inst)] = sr;
  91 + kvmppc_set_gpr(vcpu, get_rt(inst), sr);
92 92 }
93 93 break;
94 94 }
95 95 case OP_31_XOP_MTSRIN:
96 96 vcpu->arch.mmu.mtsrin(vcpu,
97   - (vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf,
98   - vcpu->arch.gpr[get_rs(inst)]);
  97 + (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf,
  98 + kvmppc_get_gpr(vcpu, get_rs(inst)));
99 99 break;
100 100 case OP_31_XOP_TLBIE:
101 101 case OP_31_XOP_TLBIEL:
102 102 {
103 103 bool large = (inst & 0x00200000) ? true : false;
104   - ulong addr = vcpu->arch.gpr[get_rb(inst)];
  104 + ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst));
105 105 vcpu->arch.mmu.tlbie(vcpu, addr, large);
106 106 break;
107 107 }
108 108  
... ... @@ -111,14 +111,16 @@
111 111 if (!vcpu->arch.mmu.slbmte)
112 112 return EMULATE_FAIL;
113 113  
114   - vcpu->arch.mmu.slbmte(vcpu, vcpu->arch.gpr[get_rs(inst)],
115   - vcpu->arch.gpr[get_rb(inst)]);
  114 + vcpu->arch.mmu.slbmte(vcpu,
  115 + kvmppc_get_gpr(vcpu, get_rs(inst)),
  116 + kvmppc_get_gpr(vcpu, get_rb(inst)));
116 117 break;
117 118 case OP_31_XOP_SLBIE:
118 119 if (!vcpu->arch.mmu.slbie)
119 120 return EMULATE_FAIL;
120 121  
121   - vcpu->arch.mmu.slbie(vcpu, vcpu->arch.gpr[get_rb(inst)]);
  122 + vcpu->arch.mmu.slbie(vcpu,
  123 + kvmppc_get_gpr(vcpu, get_rb(inst)));
122 124 break;
123 125 case OP_31_XOP_SLBIA:
124 126 if (!vcpu->arch.mmu.slbia)
125 127  
... ... @@ -132,9 +134,9 @@
132 134 } else {
133 135 ulong t, rb;
134 136  
135   - rb = vcpu->arch.gpr[get_rb(inst)];
  137 + rb = kvmppc_get_gpr(vcpu, get_rb(inst));
136 138 t = vcpu->arch.mmu.slbmfee(vcpu, rb);
137   - vcpu->arch.gpr[get_rt(inst)] = t;
  139 + kvmppc_set_gpr(vcpu, get_rt(inst), t);
138 140 }
139 141 break;
140 142 case OP_31_XOP_SLBMFEV:
141 143  
142 144  
143 145  
... ... @@ -143,20 +145,20 @@
143 145 } else {
144 146 ulong t, rb;
145 147  
146   - rb = vcpu->arch.gpr[get_rb(inst)];
  148 + rb = kvmppc_get_gpr(vcpu, get_rb(inst));
147 149 t = vcpu->arch.mmu.slbmfev(vcpu, rb);
148   - vcpu->arch.gpr[get_rt(inst)] = t;
  150 + kvmppc_set_gpr(vcpu, get_rt(inst), t);
149 151 }
150 152 break;
151 153 case OP_31_XOP_DCBZ:
152 154 {
153   - ulong rb = vcpu->arch.gpr[get_rb(inst)];
  155 + ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst));
154 156 ulong ra = 0;
155 157 ulong addr;
156 158 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
157 159  
158 160 if (get_ra(inst))
159   - ra = vcpu->arch.gpr[get_ra(inst)];
  161 + ra = kvmppc_get_gpr(vcpu, get_ra(inst));
160 162  
161 163 addr = (ra + rb) & ~31ULL;
162 164 if (!(vcpu->arch.msr & MSR_SF))
163 165  
164 166  
165 167  
166 168  
167 169  
168 170  
169 171  
170 172  
171 173  
172 174  
... ... @@ -233,43 +235,44 @@
233 235 int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
234 236 {
235 237 int emulated = EMULATE_DONE;
  238 + ulong spr_val = kvmppc_get_gpr(vcpu, rs);
236 239  
237 240 switch (sprn) {
238 241 case SPRN_SDR1:
239   - to_book3s(vcpu)->sdr1 = vcpu->arch.gpr[rs];
  242 + to_book3s(vcpu)->sdr1 = spr_val;
240 243 break;
241 244 case SPRN_DSISR:
242   - to_book3s(vcpu)->dsisr = vcpu->arch.gpr[rs];
  245 + to_book3s(vcpu)->dsisr = spr_val;
243 246 break;
244 247 case SPRN_DAR:
245   - vcpu->arch.dear = vcpu->arch.gpr[rs];
  248 + vcpu->arch.dear = spr_val;
246 249 break;
247 250 case SPRN_HIOR:
248   - to_book3s(vcpu)->hior = vcpu->arch.gpr[rs];
  251 + to_book3s(vcpu)->hior = spr_val;
249 252 break;
250 253 case SPRN_IBAT0U ... SPRN_IBAT3L:
251 254 case SPRN_IBAT4U ... SPRN_IBAT7L:
252 255 case SPRN_DBAT0U ... SPRN_DBAT3L:
253 256 case SPRN_DBAT4U ... SPRN_DBAT7L:
254   - kvmppc_write_bat(vcpu, sprn, (u32)vcpu->arch.gpr[rs]);
  257 + kvmppc_write_bat(vcpu, sprn, (u32)spr_val);
255 258 /* BAT writes happen so rarely that we're ok to flush
256 259 * everything here */
257 260 kvmppc_mmu_pte_flush(vcpu, 0, 0);
258 261 break;
259 262 case SPRN_HID0:
260   - to_book3s(vcpu)->hid[0] = vcpu->arch.gpr[rs];
  263 + to_book3s(vcpu)->hid[0] = spr_val;
261 264 break;
262 265 case SPRN_HID1:
263   - to_book3s(vcpu)->hid[1] = vcpu->arch.gpr[rs];
  266 + to_book3s(vcpu)->hid[1] = spr_val;
264 267 break;
265 268 case SPRN_HID2:
266   - to_book3s(vcpu)->hid[2] = vcpu->arch.gpr[rs];
  269 + to_book3s(vcpu)->hid[2] = spr_val;
267 270 break;
268 271 case SPRN_HID4:
269   - to_book3s(vcpu)->hid[4] = vcpu->arch.gpr[rs];
  272 + to_book3s(vcpu)->hid[4] = spr_val;
270 273 break;
271 274 case SPRN_HID5:
272   - to_book3s(vcpu)->hid[5] = vcpu->arch.gpr[rs];
  275 + to_book3s(vcpu)->hid[5] = spr_val;
273 276 /* guest HID5 set can change is_dcbz32 */
274 277 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
275 278 (mfmsr() & MSR_HV))
276 279  
277 280  
278 281  
279 282  
280 283  
281 284  
282 285  
283 286  
284 287  
... ... @@ -299,38 +302,38 @@
299 302  
300 303 switch (sprn) {
301 304 case SPRN_SDR1:
302   - vcpu->arch.gpr[rt] = to_book3s(vcpu)->sdr1;
  305 + kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1);
303 306 break;
304 307 case SPRN_DSISR:
305   - vcpu->arch.gpr[rt] = to_book3s(vcpu)->dsisr;
  308 + kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->dsisr);
306 309 break;
307 310 case SPRN_DAR:
308   - vcpu->arch.gpr[rt] = vcpu->arch.dear;
  311 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear);
309 312 break;
310 313 case SPRN_HIOR:
311   - vcpu->arch.gpr[rt] = to_book3s(vcpu)->hior;
  314 + kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior);
312 315 break;
313 316 case SPRN_HID0:
314   - vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[0];
  317 + kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]);
315 318 break;
316 319 case SPRN_HID1:
317   - vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[1];
  320 + kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]);
318 321 break;
319 322 case SPRN_HID2:
320   - vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[2];
  323 + kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]);
321 324 break;
322 325 case SPRN_HID4:
323   - vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[4];
  326 + kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]);
324 327 break;
325 328 case SPRN_HID5:
326   - vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[5];
  329 + kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]);
327 330 break;
328 331 case SPRN_THRM1:
329 332 case SPRN_THRM2:
330 333 case SPRN_THRM3:
331 334 case SPRN_CTRLF:
332 335 case SPRN_CTRLT:
333   - vcpu->arch.gpr[rt] = 0;
  336 + kvmppc_set_gpr(vcpu, rt, 0);
334 337 break;
335 338 default:
336 339 printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn);
arch/powerpc/kvm/booke.c
... ... @@ -69,10 +69,10 @@
69 69  
70 70 for (i = 0; i < 32; i += 4) {
71 71 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
72   - vcpu->arch.gpr[i],
73   - vcpu->arch.gpr[i+1],
74   - vcpu->arch.gpr[i+2],
75   - vcpu->arch.gpr[i+3]);
  72 + kvmppc_get_gpr(vcpu, i),
  73 + kvmppc_get_gpr(vcpu, i+1),
  74 + kvmppc_get_gpr(vcpu, i+2),
  75 + kvmppc_get_gpr(vcpu, i+3));
76 76 }
77 77 }
78 78  
... ... @@ -431,7 +431,7 @@
431 431 {
432 432 vcpu->arch.pc = 0;
433 433 vcpu->arch.msr = 0;
434   - vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */
  434 + kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
435 435  
436 436 vcpu->arch.shadow_pid = 1;
437 437  
... ... @@ -466,7 +466,7 @@
466 466 regs->sprg7 = vcpu->arch.sprg6;
467 467  
468 468 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
469   - regs->gpr[i] = vcpu->arch.gpr[i];
  469 + regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
470 470  
471 471 return 0;
472 472 }
... ... @@ -491,8 +491,8 @@
491 491 vcpu->arch.sprg6 = regs->sprg5;
492 492 vcpu->arch.sprg7 = regs->sprg6;
493 493  
494   - for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++)
495   - vcpu->arch.gpr[i] = regs->gpr[i];
  494 + for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  495 + kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
496 496  
497 497 return 0;
498 498 }
arch/powerpc/kvm/booke_emulate.c
... ... @@ -62,20 +62,20 @@
62 62  
63 63 case OP_31_XOP_MFMSR:
64 64 rt = get_rt(inst);
65   - vcpu->arch.gpr[rt] = vcpu->arch.msr;
  65 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.msr);
66 66 kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
67 67 break;
68 68  
69 69 case OP_31_XOP_MTMSR:
70 70 rs = get_rs(inst);
71 71 kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS);
72   - kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
  72 + kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
73 73 break;
74 74  
75 75 case OP_31_XOP_WRTEE:
76 76 rs = get_rs(inst);
77 77 vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
78   - | (vcpu->arch.gpr[rs] & MSR_EE);
  78 + | (kvmppc_get_gpr(vcpu, rs) & MSR_EE);
79 79 kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
80 80 break;
81 81  
82 82  
83 83  
84 84  
85 85  
86 86  
87 87  
88 88  
... ... @@ -101,22 +101,23 @@
101 101 int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
102 102 {
103 103 int emulated = EMULATE_DONE;
  104 + ulong spr_val = kvmppc_get_gpr(vcpu, rs);
104 105  
105 106 switch (sprn) {
106 107 case SPRN_DEAR:
107   - vcpu->arch.dear = vcpu->arch.gpr[rs]; break;
  108 + vcpu->arch.dear = spr_val; break;
108 109 case SPRN_ESR:
109   - vcpu->arch.esr = vcpu->arch.gpr[rs]; break;
  110 + vcpu->arch.esr = spr_val; break;
110 111 case SPRN_DBCR0:
111   - vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break;
  112 + vcpu->arch.dbcr0 = spr_val; break;
112 113 case SPRN_DBCR1:
113   - vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break;
  114 + vcpu->arch.dbcr1 = spr_val; break;
114 115 case SPRN_DBSR:
115   - vcpu->arch.dbsr &= ~vcpu->arch.gpr[rs]; break;
  116 + vcpu->arch.dbsr &= ~spr_val; break;
116 117 case SPRN_TSR:
117   - vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break;
  118 + vcpu->arch.tsr &= ~spr_val; break;
118 119 case SPRN_TCR:
119   - vcpu->arch.tcr = vcpu->arch.gpr[rs];
  120 + vcpu->arch.tcr = spr_val;
120 121 kvmppc_emulate_dec(vcpu);
121 122 break;
122 123  
123 124  
124 125  
125 126  
126 127  
127 128  
128 129  
129 130  
130 131  
131 132  
132 133  
133 134  
134 135  
135 136  
136 137  
137 138  
138 139  
139 140  
140 141  
141 142  
142 143  
... ... @@ -124,64 +125,64 @@
124 125 * loaded into the real SPRGs when resuming the
125 126 * guest. */
126 127 case SPRN_SPRG4:
127   - vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break;
  128 + vcpu->arch.sprg4 = spr_val; break;
128 129 case SPRN_SPRG5:
129   - vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break;
  130 + vcpu->arch.sprg5 = spr_val; break;
130 131 case SPRN_SPRG6:
131   - vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break;
  132 + vcpu->arch.sprg6 = spr_val; break;
132 133 case SPRN_SPRG7:
133   - vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break;
  134 + vcpu->arch.sprg7 = spr_val; break;
134 135  
135 136 case SPRN_IVPR:
136   - vcpu->arch.ivpr = vcpu->arch.gpr[rs];
  137 + vcpu->arch.ivpr = spr_val;
137 138 break;
138 139 case SPRN_IVOR0:
139   - vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = vcpu->arch.gpr[rs];
  140 + vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val;
140 141 break;
141 142 case SPRN_IVOR1:
142   - vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = vcpu->arch.gpr[rs];
  143 + vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = spr_val;
143 144 break;
144 145 case SPRN_IVOR2:
145   - vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = vcpu->arch.gpr[rs];
  146 + vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val;
146 147 break;
147 148 case SPRN_IVOR3:
148   - vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = vcpu->arch.gpr[rs];
  149 + vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val;
149 150 break;
150 151 case SPRN_IVOR4:
151   - vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = vcpu->arch.gpr[rs];
  152 + vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = spr_val;
152 153 break;
153 154 case SPRN_IVOR5:
154   - vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = vcpu->arch.gpr[rs];
  155 + vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = spr_val;
155 156 break;
156 157 case SPRN_IVOR6:
157   - vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = vcpu->arch.gpr[rs];
  158 + vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = spr_val;
158 159 break;
159 160 case SPRN_IVOR7:
160   - vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = vcpu->arch.gpr[rs];
  161 + vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = spr_val;
161 162 break;
162 163 case SPRN_IVOR8:
163   - vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = vcpu->arch.gpr[rs];
  164 + vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val;
164 165 break;
165 166 case SPRN_IVOR9:
166   - vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = vcpu->arch.gpr[rs];
  167 + vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val;
167 168 break;
168 169 case SPRN_IVOR10:
169   - vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = vcpu->arch.gpr[rs];
  170 + vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = spr_val;
170 171 break;
171 172 case SPRN_IVOR11:
172   - vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = vcpu->arch.gpr[rs];
  173 + vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = spr_val;
173 174 break;
174 175 case SPRN_IVOR12:
175   - vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = vcpu->arch.gpr[rs];
  176 + vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = spr_val;
176 177 break;
177 178 case SPRN_IVOR13:
178   - vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = vcpu->arch.gpr[rs];
  179 + vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = spr_val;
179 180 break;
180 181 case SPRN_IVOR14:
181   - vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = vcpu->arch.gpr[rs];
  182 + vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = spr_val;
182 183 break;
183 184 case SPRN_IVOR15:
184   - vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = vcpu->arch.gpr[rs];
  185 + vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = spr_val;
185 186 break;
186 187  
187 188 default:
188 189  
189 190  
190 191  
191 192  
192 193  
193 194  
194 195  
195 196  
196 197  
197 198  
198 199  
199 200  
200 201  
201 202  
202 203  
203 204  
204 205  
205 206  
206 207  
207 208  
208 209  
... ... @@ -197,65 +198,65 @@
197 198  
198 199 switch (sprn) {
199 200 case SPRN_IVPR:
200   - vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break;
  201 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break;
201 202 case SPRN_DEAR:
202   - vcpu->arch.gpr[rt] = vcpu->arch.dear; break;
  203 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear); break;
203 204 case SPRN_ESR:
204   - vcpu->arch.gpr[rt] = vcpu->arch.esr; break;
  205 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.esr); break;
205 206 case SPRN_DBCR0:
206   - vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break;
  207 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break;
207 208 case SPRN_DBCR1:
208   - vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break;
  209 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break;
209 210 case SPRN_DBSR:
210   - vcpu->arch.gpr[rt] = vcpu->arch.dbsr; break;
  211 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break;
211 212  
212 213 case SPRN_IVOR0:
213   - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
  214 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]);
214 215 break;
215 216 case SPRN_IVOR1:
216   - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
  217 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]);
217 218 break;
218 219 case SPRN_IVOR2:
219   - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
  220 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]);
220 221 break;
221 222 case SPRN_IVOR3:
222   - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
  223 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]);
223 224 break;
224 225 case SPRN_IVOR4:
225   - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
  226 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]);
226 227 break;
227 228 case SPRN_IVOR5:
228   - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
  229 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]);
229 230 break;
230 231 case SPRN_IVOR6:
231   - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
  232 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]);
232 233 break;
233 234 case SPRN_IVOR7:
234   - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
  235 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]);
235 236 break;
236 237 case SPRN_IVOR8:
237   - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
  238 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]);
238 239 break;
239 240 case SPRN_IVOR9:
240   - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
  241 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]);
241 242 break;
242 243 case SPRN_IVOR10:
243   - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
  244 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]);
244 245 break;
245 246 case SPRN_IVOR11:
246   - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
  247 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]);
247 248 break;
248 249 case SPRN_IVOR12:
249   - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
  250 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]);
250 251 break;
251 252 case SPRN_IVOR13:
252   - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
  253 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]);
253 254 break;
254 255 case SPRN_IVOR14:
255   - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
  256 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]);
256 257 break;
257 258 case SPRN_IVOR15:
258   - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
  259 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]);
259 260 break;
260 261  
261 262 default:
arch/powerpc/kvm/e500_emulate.c
... ... @@ -74,54 +74,55 @@
74 74 {
75 75 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
76 76 int emulated = EMULATE_DONE;
  77 + ulong spr_val = kvmppc_get_gpr(vcpu, rs);
77 78  
78 79 switch (sprn) {
79 80 case SPRN_PID:
80 81 vcpu_e500->pid[0] = vcpu->arch.shadow_pid =
81   - vcpu->arch.pid = vcpu->arch.gpr[rs];
  82 + vcpu->arch.pid = spr_val;
82 83 break;
83 84 case SPRN_PID1:
84   - vcpu_e500->pid[1] = vcpu->arch.gpr[rs]; break;
  85 + vcpu_e500->pid[1] = spr_val; break;
85 86 case SPRN_PID2:
86   - vcpu_e500->pid[2] = vcpu->arch.gpr[rs]; break;
  87 + vcpu_e500->pid[2] = spr_val; break;
87 88 case SPRN_MAS0:
88   - vcpu_e500->mas0 = vcpu->arch.gpr[rs]; break;
  89 + vcpu_e500->mas0 = spr_val; break;
89 90 case SPRN_MAS1:
90   - vcpu_e500->mas1 = vcpu->arch.gpr[rs]; break;
  91 + vcpu_e500->mas1 = spr_val; break;
91 92 case SPRN_MAS2:
92   - vcpu_e500->mas2 = vcpu->arch.gpr[rs]; break;
  93 + vcpu_e500->mas2 = spr_val; break;
93 94 case SPRN_MAS3:
94   - vcpu_e500->mas3 = vcpu->arch.gpr[rs]; break;
  95 + vcpu_e500->mas3 = spr_val; break;
95 96 case SPRN_MAS4:
96   - vcpu_e500->mas4 = vcpu->arch.gpr[rs]; break;
  97 + vcpu_e500->mas4 = spr_val; break;
97 98 case SPRN_MAS6:
98   - vcpu_e500->mas6 = vcpu->arch.gpr[rs]; break;
  99 + vcpu_e500->mas6 = spr_val; break;
99 100 case SPRN_MAS7:
100   - vcpu_e500->mas7 = vcpu->arch.gpr[rs]; break;
  101 + vcpu_e500->mas7 = spr_val; break;
101 102 case SPRN_L1CSR1:
102   - vcpu_e500->l1csr1 = vcpu->arch.gpr[rs]; break;
  103 + vcpu_e500->l1csr1 = spr_val; break;
103 104 case SPRN_HID0:
104   - vcpu_e500->hid0 = vcpu->arch.gpr[rs]; break;
  105 + vcpu_e500->hid0 = spr_val; break;
105 106 case SPRN_HID1:
106   - vcpu_e500->hid1 = vcpu->arch.gpr[rs]; break;
  107 + vcpu_e500->hid1 = spr_val; break;
107 108  
108 109 case SPRN_MMUCSR0:
109 110 emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
110   - vcpu->arch.gpr[rs]);
  111 + spr_val);
111 112 break;
112 113  
113 114 /* extra exceptions */
114 115 case SPRN_IVOR32:
115   - vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = vcpu->arch.gpr[rs];
  116 + vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
116 117 break;
117 118 case SPRN_IVOR33:
118   - vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = vcpu->arch.gpr[rs];
  119 + vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val;
119 120 break;
120 121 case SPRN_IVOR34:
121   - vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = vcpu->arch.gpr[rs];
  122 + vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
122 123 break;
123 124 case SPRN_IVOR35:
124   - vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = vcpu->arch.gpr[rs];
  125 + vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
125 126 break;
126 127  
127 128 default:
128 129  
129 130  
130 131  
131 132  
132 133  
133 134  
134 135  
135 136  
136 137  
137 138  
138 139  
139 140  
140 141  
141 142  
142 143  
143 144  
144 145  
145 146  
146 147  
147 148  
148 149  
149 150  
... ... @@ -138,63 +139,71 @@
138 139  
139 140 switch (sprn) {
140 141 case SPRN_PID:
141   - vcpu->arch.gpr[rt] = vcpu_e500->pid[0]; break;
  142 + kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break;
142 143 case SPRN_PID1:
143   - vcpu->arch.gpr[rt] = vcpu_e500->pid[1]; break;
  144 + kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break;
144 145 case SPRN_PID2:
145   - vcpu->arch.gpr[rt] = vcpu_e500->pid[2]; break;
  146 + kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break;
146 147 case SPRN_MAS0:
147   - vcpu->arch.gpr[rt] = vcpu_e500->mas0; break;
  148 + kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas0); break;
148 149 case SPRN_MAS1:
149   - vcpu->arch.gpr[rt] = vcpu_e500->mas1; break;
  150 + kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas1); break;
150 151 case SPRN_MAS2:
151   - vcpu->arch.gpr[rt] = vcpu_e500->mas2; break;
  152 + kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas2); break;
152 153 case SPRN_MAS3:
153   - vcpu->arch.gpr[rt] = vcpu_e500->mas3; break;
  154 + kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas3); break;
154 155 case SPRN_MAS4:
155   - vcpu->arch.gpr[rt] = vcpu_e500->mas4; break;
  156 + kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas4); break;
156 157 case SPRN_MAS6:
157   - vcpu->arch.gpr[rt] = vcpu_e500->mas6; break;
  158 + kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas6); break;
158 159 case SPRN_MAS7:
159   - vcpu->arch.gpr[rt] = vcpu_e500->mas7; break;
  160 + kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas7); break;
160 161  
161 162 case SPRN_TLB0CFG:
162   - vcpu->arch.gpr[rt] = mfspr(SPRN_TLB0CFG);
163   - vcpu->arch.gpr[rt] &= ~0xfffUL;
164   - vcpu->arch.gpr[rt] |= vcpu_e500->guest_tlb_size[0];
  163 + {
  164 + ulong tmp = SPRN_TLB0CFG;
  165 +
  166 + tmp &= ~0xfffUL;
  167 + tmp |= vcpu_e500->guest_tlb_size[0];
  168 + kvmppc_set_gpr(vcpu, rt, tmp);
165 169 break;
  170 + }
166 171  
167 172 case SPRN_TLB1CFG:
168   - vcpu->arch.gpr[rt] = mfspr(SPRN_TLB1CFG);
169   - vcpu->arch.gpr[rt] &= ~0xfffUL;
170   - vcpu->arch.gpr[rt] |= vcpu_e500->guest_tlb_size[1];
  173 + {
  174 + ulong tmp = SPRN_TLB1CFG;
  175 +
  176 + tmp &= ~0xfffUL;
  177 + tmp |= vcpu_e500->guest_tlb_size[1];
  178 + kvmppc_set_gpr(vcpu, rt, tmp);
171 179 break;
  180 + }
172 181  
173 182 case SPRN_L1CSR1:
174   - vcpu->arch.gpr[rt] = vcpu_e500->l1csr1; break;
  183 + kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break;
175 184 case SPRN_HID0:
176   - vcpu->arch.gpr[rt] = vcpu_e500->hid0; break;
  185 + kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break;
177 186 case SPRN_HID1:
178   - vcpu->arch.gpr[rt] = vcpu_e500->hid1; break;
  187 + kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break;
179 188  
180 189 case SPRN_MMUCSR0:
181   - vcpu->arch.gpr[rt] = 0; break;
  190 + kvmppc_set_gpr(vcpu, rt, 0); break;
182 191  
183 192 case SPRN_MMUCFG:
184   - vcpu->arch.gpr[rt] = mfspr(SPRN_MMUCFG); break;
  193 + kvmppc_set_gpr(vcpu, rt, mfspr(SPRN_MMUCFG)); break;
185 194  
186 195 /* extra exceptions */
187 196 case SPRN_IVOR32:
188   - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
  197 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]);
189 198 break;
190 199 case SPRN_IVOR33:
191   - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
  200 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]);
192 201 break;
193 202 case SPRN_IVOR34:
194   - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
  203 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]);
195 204 break;
196 205 case SPRN_IVOR35:
197   - vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
  206 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]);
198 207 break;
199 208 default:
200 209 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
arch/powerpc/kvm/e500_tlb.c
... ... @@ -417,7 +417,7 @@
417 417 int esel, tlbsel;
418 418 gva_t ea;
419 419  
420   - ea = ((ra) ? vcpu->arch.gpr[ra] : 0) + vcpu->arch.gpr[rb];
  420 + ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb);
421 421  
422 422 ia = (ea >> 2) & 0x1;
423 423  
... ... @@ -470,7 +470,7 @@
470 470 struct tlbe *gtlbe = NULL;
471 471 gva_t ea;
472 472  
473   - ea = vcpu->arch.gpr[rb];
  473 + ea = kvmppc_get_gpr(vcpu, rb);
474 474  
475 475 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
476 476 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
arch/powerpc/kvm/emulate.c
... ... @@ -170,14 +170,14 @@
170 170 case OP_31_XOP_STWX:
171 171 rs = get_rs(inst);
172 172 emulated = kvmppc_handle_store(run, vcpu,
173   - vcpu->arch.gpr[rs],
  173 + kvmppc_get_gpr(vcpu, rs),
174 174 4, 1);
175 175 break;
176 176  
177 177 case OP_31_XOP_STBX:
178 178 rs = get_rs(inst);
179 179 emulated = kvmppc_handle_store(run, vcpu,
180   - vcpu->arch.gpr[rs],
  180 + kvmppc_get_gpr(vcpu, rs),
181 181 1, 1);
182 182 break;
183 183  
184 184  
185 185  
186 186  
... ... @@ -186,14 +186,14 @@
186 186 ra = get_ra(inst);
187 187 rb = get_rb(inst);
188 188  
189   - ea = vcpu->arch.gpr[rb];
  189 + ea = kvmppc_get_gpr(vcpu, rb);
190 190 if (ra)
191   - ea += vcpu->arch.gpr[ra];
  191 + ea += kvmppc_get_gpr(vcpu, ra);
192 192  
193 193 emulated = kvmppc_handle_store(run, vcpu,
194   - vcpu->arch.gpr[rs],
  194 + kvmppc_get_gpr(vcpu, rs),
195 195 1, 1);
196   - vcpu->arch.gpr[rs] = ea;
  196 + kvmppc_set_gpr(vcpu, rs, ea);
197 197 break;
198 198  
199 199 case OP_31_XOP_LHZX:
200 200  
201 201  
... ... @@ -206,12 +206,12 @@
206 206 ra = get_ra(inst);
207 207 rb = get_rb(inst);
208 208  
209   - ea = vcpu->arch.gpr[rb];
  209 + ea = kvmppc_get_gpr(vcpu, rb);
210 210 if (ra)
211   - ea += vcpu->arch.gpr[ra];
  211 + ea += kvmppc_get_gpr(vcpu, ra);
212 212  
213 213 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
214   - vcpu->arch.gpr[ra] = ea;
  214 + kvmppc_set_gpr(vcpu, ra, ea);
215 215 break;
216 216  
217 217 case OP_31_XOP_MFSPR:
218 218  
219 219  
220 220  
221 221  
222 222  
223 223  
224 224  
225 225  
226 226  
227 227  
228 228  
229 229  
... ... @@ -220,47 +220,49 @@
220 220  
221 221 switch (sprn) {
222 222 case SPRN_SRR0:
223   - vcpu->arch.gpr[rt] = vcpu->arch.srr0; break;
  223 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break;
224 224 case SPRN_SRR1:
225   - vcpu->arch.gpr[rt] = vcpu->arch.srr1; break;
  225 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break;
226 226 case SPRN_PVR:
227   - vcpu->arch.gpr[rt] = vcpu->arch.pvr; break;
  227 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
228 228 case SPRN_PIR:
229   - vcpu->arch.gpr[rt] = vcpu->vcpu_id; break;
  229 + kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
230 230 case SPRN_MSSSR0:
231   - vcpu->arch.gpr[rt] = 0; break;
  231 + kvmppc_set_gpr(vcpu, rt, 0); break;
232 232  
233 233 /* Note: mftb and TBRL/TBWL are user-accessible, so
234 234 * the guest can always access the real TB anyways.
235 235 * In fact, we probably will never see these traps. */
236 236 case SPRN_TBWL:
237   - vcpu->arch.gpr[rt] = get_tb() >> 32; break;
  237 + kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break;
238 238 case SPRN_TBWU:
239   - vcpu->arch.gpr[rt] = get_tb(); break;
  239 + kvmppc_set_gpr(vcpu, rt, get_tb()); break;
240 240  
241 241 case SPRN_SPRG0:
242   - vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break;
  242 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break;
243 243 case SPRN_SPRG1:
244   - vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break;
  244 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break;
245 245 case SPRN_SPRG2:
246   - vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break;
  246 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break;
247 247 case SPRN_SPRG3:
248   - vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break;
  248 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break;
249 249 /* Note: SPRG4-7 are user-readable, so we don't get
250 250 * a trap. */
251 251  
252 252 case SPRN_DEC:
253 253 {
254 254 u64 jd = get_tb() - vcpu->arch.dec_jiffies;
255   - vcpu->arch.gpr[rt] = vcpu->arch.dec - jd;
256   - pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", vcpu->arch.dec, jd, vcpu->arch.gpr[rt]);
  255 + kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd);
  256 + pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n",
  257 + vcpu->arch.dec, jd,
  258 + kvmppc_get_gpr(vcpu, rt));
257 259 break;
258 260 }
259 261 default:
260 262 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
261 263 if (emulated == EMULATE_FAIL) {
262 264 printk("mfspr: unknown spr %x\n", sprn);
263   - vcpu->arch.gpr[rt] = 0;
  265 + kvmppc_set_gpr(vcpu, rt, 0);
264 266 }
265 267 break;
266 268 }
... ... @@ -272,7 +274,7 @@
272 274 rb = get_rb(inst);
273 275  
274 276 emulated = kvmppc_handle_store(run, vcpu,
275   - vcpu->arch.gpr[rs],
  277 + kvmppc_get_gpr(vcpu, rs),
276 278 2, 1);
277 279 break;
278 280  
279 281  
280 282  
281 283  
... ... @@ -281,14 +283,14 @@
281 283 ra = get_ra(inst);
282 284 rb = get_rb(inst);
283 285  
284   - ea = vcpu->arch.gpr[rb];
  286 + ea = kvmppc_get_gpr(vcpu, rb);
285 287 if (ra)
286   - ea += vcpu->arch.gpr[ra];
  288 + ea += kvmppc_get_gpr(vcpu, ra);
287 289  
288 290 emulated = kvmppc_handle_store(run, vcpu,
289   - vcpu->arch.gpr[rs],
  291 + kvmppc_get_gpr(vcpu, rs),
290 292 2, 1);
291   - vcpu->arch.gpr[ra] = ea;
  293 + kvmppc_set_gpr(vcpu, ra, ea);
292 294 break;
293 295  
294 296 case OP_31_XOP_MTSPR:
295 297  
... ... @@ -296,9 +298,9 @@
296 298 rs = get_rs(inst);
297 299 switch (sprn) {
298 300 case SPRN_SRR0:
299   - vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break;
  301 + vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break;
300 302 case SPRN_SRR1:
301   - vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break;
  303 + vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break;
302 304  
303 305 /* XXX We need to context-switch the timebase for
304 306 * watchdog and FIT. */
305 307  
306 308  
307 309  
308 310  
... ... @@ -308,18 +310,18 @@
308 310 case SPRN_MSSSR0: break;
309 311  
310 312 case SPRN_DEC:
311   - vcpu->arch.dec = vcpu->arch.gpr[rs];
  313 + vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs);
312 314 kvmppc_emulate_dec(vcpu);
313 315 break;
314 316  
315 317 case SPRN_SPRG0:
316   - vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break;
  318 + vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break;
317 319 case SPRN_SPRG1:
318   - vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break;
  320 + vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break;
319 321 case SPRN_SPRG2:
320   - vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break;
  322 + vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break;
321 323 case SPRN_SPRG3:
322   - vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break;
  324 + vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break;
323 325  
324 326 default:
325 327 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
... ... @@ -351,7 +353,7 @@
351 353 rb = get_rb(inst);
352 354  
353 355 emulated = kvmppc_handle_store(run, vcpu,
354   - vcpu->arch.gpr[rs],
  356 + kvmppc_get_gpr(vcpu, rs),
355 357 4, 0);
356 358 break;
357 359  
... ... @@ -366,7 +368,7 @@
366 368 rb = get_rb(inst);
367 369  
368 370 emulated = kvmppc_handle_store(run, vcpu,
369   - vcpu->arch.gpr[rs],
  371 + kvmppc_get_gpr(vcpu, rs),
370 372 2, 0);
371 373 break;
372 374  
... ... @@ -385,7 +387,7 @@
385 387 ra = get_ra(inst);
386 388 rt = get_rt(inst);
387 389 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
388   - vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
  390 + kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
389 391 break;
390 392  
391 393 case OP_LBZ:
392 394  
393 395  
394 396  
395 397  
396 398  
397 399  
... ... @@ -397,35 +399,39 @@
397 399 ra = get_ra(inst);
398 400 rt = get_rt(inst);
399 401 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
400   - vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
  402 + kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
401 403 break;
402 404  
403 405 case OP_STW:
404 406 rs = get_rs(inst);
405   - emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
  407 + emulated = kvmppc_handle_store(run, vcpu,
  408 + kvmppc_get_gpr(vcpu, rs),
406 409 4, 1);
407 410 break;
408 411  
409 412 case OP_STWU:
410 413 ra = get_ra(inst);
411 414 rs = get_rs(inst);
412   - emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
  415 + emulated = kvmppc_handle_store(run, vcpu,
  416 + kvmppc_get_gpr(vcpu, rs),
413 417 4, 1);
414   - vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
  418 + kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
415 419 break;
416 420  
417 421 case OP_STB:
418 422 rs = get_rs(inst);
419   - emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
  423 + emulated = kvmppc_handle_store(run, vcpu,
  424 + kvmppc_get_gpr(vcpu, rs),
420 425 1, 1);
421 426 break;
422 427  
423 428 case OP_STBU:
424 429 ra = get_ra(inst);
425 430 rs = get_rs(inst);
426   - emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
  431 + emulated = kvmppc_handle_store(run, vcpu,
  432 + kvmppc_get_gpr(vcpu, rs),
427 433 1, 1);
428   - vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
  434 + kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
429 435 break;
430 436  
431 437 case OP_LHZ:
432 438  
433 439  
434 440  
... ... @@ -437,21 +443,23 @@
437 443 ra = get_ra(inst);
438 444 rt = get_rt(inst);
439 445 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
440   - vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
  446 + kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
441 447 break;
442 448  
443 449 case OP_STH:
444 450 rs = get_rs(inst);
445   - emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
  451 + emulated = kvmppc_handle_store(run, vcpu,
  452 + kvmppc_get_gpr(vcpu, rs),
446 453 2, 1);
447 454 break;
448 455  
449 456 case OP_STHU:
450 457 ra = get_ra(inst);
451 458 rs = get_rs(inst);
452   - emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
  459 + emulated = kvmppc_handle_store(run, vcpu,
  460 + kvmppc_get_gpr(vcpu, rs),
453 461 2, 1);
454   - vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
  462 + kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
455 463 break;
456 464  
457 465 default:
arch/powerpc/kvm/powerpc.c
... ... @@ -270,34 +270,35 @@
270 270 static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
271 271 struct kvm_run *run)
272 272 {
273   - ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
274   - *gpr = run->dcr.data;
  273 + kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
275 274 }
276 275  
277 276 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
278 277 struct kvm_run *run)
279 278 {
280   - ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
  279 + ulong gpr;
281 280  
282   - if (run->mmio.len > sizeof(*gpr)) {
  281 + if (run->mmio.len > sizeof(gpr)) {
283 282 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
284 283 return;
285 284 }
286 285  
287 286 if (vcpu->arch.mmio_is_bigendian) {
288 287 switch (run->mmio.len) {
289   - case 4: *gpr = *(u32 *)run->mmio.data; break;
290   - case 2: *gpr = *(u16 *)run->mmio.data; break;
291   - case 1: *gpr = *(u8 *)run->mmio.data; break;
  288 + case 4: gpr = *(u32 *)run->mmio.data; break;
  289 + case 2: gpr = *(u16 *)run->mmio.data; break;
  290 + case 1: gpr = *(u8 *)run->mmio.data; break;
292 291 }
293 292 } else {
294 293 /* Convert BE data from userland back to LE. */
295 294 switch (run->mmio.len) {
296   - case 4: *gpr = ld_le32((u32 *)run->mmio.data); break;
297   - case 2: *gpr = ld_le16((u16 *)run->mmio.data); break;
298   - case 1: *gpr = *(u8 *)run->mmio.data; break;
  295 + case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
  296 + case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
  297 + case 1: gpr = *(u8 *)run->mmio.data; break;
299 298 }
300 299 }
  300 +
  301 + kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
301 302 }
302 303  
303 304 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,