Commit efff19122315f1431f6b02cd2983b15f5d3957bd

Authored by Paul Mackerras
Committed by Alexander Graf
1 parent 09548fdaf3

KVM: PPC: Store FP/VSX/VMX state in thread_fp/vr_state structures

This uses struct thread_fp_state and struct thread_vr_state to store
the floating-point, VMX/Altivec and VSX state, rather than flat arrays.
This makes transferring the state to/from the thread_struct simpler
and allows us to unify the get/set_one_reg implementations for the
VSX registers.

Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>

Showing 9 changed files with 131 additions and 220 deletions Side-by-side Diff

arch/powerpc/include/asm/kvm_host.h
... ... @@ -410,8 +410,7 @@
410 410  
411 411 ulong gpr[32];
412 412  
413   - u64 fpr[32];
414   - u64 fpscr;
  413 + struct thread_fp_state fp;
415 414  
416 415 #ifdef CONFIG_SPE
417 416 ulong evr[32];
418 417  
... ... @@ -420,14 +419,9 @@
420 419 u64 acc;
421 420 #endif
422 421 #ifdef CONFIG_ALTIVEC
423   - vector128 vr[32];
424   - vector128 vscr;
  422 + struct thread_vr_state vr;
425 423 #endif
426 424  
427   -#ifdef CONFIG_VSX
428   - u64 vsr[64];
429   -#endif
430   -
431 425 #ifdef CONFIG_KVM_BOOKE_HV
432 426 u32 host_mas4;
433 427 u32 host_mas6;
... ... @@ -618,6 +612,8 @@
618 612 u64 busy_preempt;
619 613 #endif
620 614 };
  615 +
  616 +#define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
621 617  
622 618 /* Values for vcpu->arch.state */
623 619 #define KVMPPC_VCPU_NOTREADY 0
arch/powerpc/kernel/asm-offsets.c
... ... @@ -425,14 +425,11 @@
425 425 DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid));
426 426 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
427 427 DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
428   - DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr));
429   - DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr));
  428 + DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fp.fpr));
  429 + DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fp.fpscr));
430 430 #ifdef CONFIG_ALTIVEC
431   - DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr));
432   - DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr));
433   -#endif
434   -#ifdef CONFIG_VSX
435   - DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr));
  431 + DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr.vr));
  432 + DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vr.vscr));
436 433 #endif
437 434 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
438 435 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
arch/powerpc/kvm/book3s.c
... ... @@ -577,10 +577,10 @@
577 577 break;
578 578 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
579 579 i = reg->id - KVM_REG_PPC_FPR0;
580   - val = get_reg_val(reg->id, vcpu->arch.fpr[i]);
  580 + val = get_reg_val(reg->id, VCPU_FPR(vcpu, i));
581 581 break;
582 582 case KVM_REG_PPC_FPSCR:
583   - val = get_reg_val(reg->id, vcpu->arch.fpscr);
  583 + val = get_reg_val(reg->id, vcpu->arch.fp.fpscr);
584 584 break;
585 585 #ifdef CONFIG_ALTIVEC
586 586 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
587 587  
588 588  
... ... @@ -588,19 +588,30 @@
588 588 r = -ENXIO;
589 589 break;
590 590 }
591   - val.vval = vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0];
  591 + val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
592 592 break;
593 593 case KVM_REG_PPC_VSCR:
594 594 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
595 595 r = -ENXIO;
596 596 break;
597 597 }
598   - val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]);
  598 + val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
599 599 break;
600 600 case KVM_REG_PPC_VRSAVE:
601 601 val = get_reg_val(reg->id, vcpu->arch.vrsave);
602 602 break;
603 603 #endif /* CONFIG_ALTIVEC */
  604 +#ifdef CONFIG_VSX
  605 + case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
  606 + if (cpu_has_feature(CPU_FTR_VSX)) {
  607 + long int i = reg->id - KVM_REG_PPC_VSR0;
  608 + val.vsxval[0] = vcpu->arch.fp.fpr[i][0];
  609 + val.vsxval[1] = vcpu->arch.fp.fpr[i][1];
  610 + } else {
  611 + r = -ENXIO;
  612 + }
  613 + break;
  614 +#endif /* CONFIG_VSX */
604 615 case KVM_REG_PPC_DEBUG_INST: {
605 616 u32 opcode = INS_TW;
606 617 r = copy_to_user((u32 __user *)(long)reg->addr,
607 618  
... ... @@ -656,10 +667,10 @@
656 667 break;
657 668 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
658 669 i = reg->id - KVM_REG_PPC_FPR0;
659   - vcpu->arch.fpr[i] = set_reg_val(reg->id, val);
  670 + VCPU_FPR(vcpu, i) = set_reg_val(reg->id, val);
660 671 break;
661 672 case KVM_REG_PPC_FPSCR:
662   - vcpu->arch.fpscr = set_reg_val(reg->id, val);
  673 + vcpu->arch.fp.fpscr = set_reg_val(reg->id, val);
663 674 break;
664 675 #ifdef CONFIG_ALTIVEC
665 676 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
666 677  
... ... @@ -667,14 +678,14 @@
667 678 r = -ENXIO;
668 679 break;
669 680 }
670   - vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
  681 + vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
671 682 break;
672 683 case KVM_REG_PPC_VSCR:
673 684 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
674 685 r = -ENXIO;
675 686 break;
676 687 }
677   - vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val);
  688 + vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
678 689 break;
679 690 case KVM_REG_PPC_VRSAVE:
680 691 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
... ... @@ -684,6 +695,17 @@
684 695 vcpu->arch.vrsave = set_reg_val(reg->id, val);
685 696 break;
686 697 #endif /* CONFIG_ALTIVEC */
  698 +#ifdef CONFIG_VSX
  699 + case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
  700 + if (cpu_has_feature(CPU_FTR_VSX)) {
  701 + long int i = reg->id - KVM_REG_PPC_VSR0;
  702 + vcpu->arch.fp.fpr[i][0] = val.vsxval[0];
  703 + vcpu->arch.fp.fpr[i][1] = val.vsxval[1];
  704 + } else {
  705 + r = -ENXIO;
  706 + }
  707 + break;
  708 +#endif /* CONFIG_VSX */
687 709 #ifdef CONFIG_KVM_XICS
688 710 case KVM_REG_PPC_ICP_STATE:
689 711 if (!vcpu->arch.icp) {
arch/powerpc/kvm/book3s_hv.c
... ... @@ -811,27 +811,6 @@
811 811 case KVM_REG_PPC_SDAR:
812 812 *val = get_reg_val(id, vcpu->arch.sdar);
813 813 break;
814   -#ifdef CONFIG_VSX
815   - case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
816   - if (cpu_has_feature(CPU_FTR_VSX)) {
817   - /* VSX => FP reg i is stored in arch.vsr[2*i] */
818   - long int i = id - KVM_REG_PPC_FPR0;
819   - *val = get_reg_val(id, vcpu->arch.vsr[2 * i]);
820   - } else {
821   - /* let generic code handle it */
822   - r = -EINVAL;
823   - }
824   - break;
825   - case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
826   - if (cpu_has_feature(CPU_FTR_VSX)) {
827   - long int i = id - KVM_REG_PPC_VSR0;
828   - val->vsxval[0] = vcpu->arch.vsr[2 * i];
829   - val->vsxval[1] = vcpu->arch.vsr[2 * i + 1];
830   - } else {
831   - r = -ENXIO;
832   - }
833   - break;
834   -#endif /* CONFIG_VSX */
835 814 case KVM_REG_PPC_VPA_ADDR:
836 815 spin_lock(&vcpu->arch.vpa_update_lock);
837 816 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
... ... @@ -914,27 +893,6 @@
914 893 case KVM_REG_PPC_SDAR:
915 894 vcpu->arch.sdar = set_reg_val(id, *val);
916 895 break;
917   -#ifdef CONFIG_VSX
918   - case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
919   - if (cpu_has_feature(CPU_FTR_VSX)) {
920   - /* VSX => FP reg i is stored in arch.vsr[2*i] */
921   - long int i = id - KVM_REG_PPC_FPR0;
922   - vcpu->arch.vsr[2 * i] = set_reg_val(id, *val);
923   - } else {
924   - /* let generic code handle it */
925   - r = -EINVAL;
926   - }
927   - break;
928   - case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
929   - if (cpu_has_feature(CPU_FTR_VSX)) {
930   - long int i = id - KVM_REG_PPC_VSR0;
931   - vcpu->arch.vsr[2 * i] = val->vsxval[0];
932   - vcpu->arch.vsr[2 * i + 1] = val->vsxval[1];
933   - } else {
934   - r = -ENXIO;
935   - }
936   - break;
937   -#endif /* CONFIG_VSX */
938 896 case KVM_REG_PPC_VPA_ADDR:
939 897 addr = set_reg_val(id, *val);
940 898 r = -EINVAL;
arch/powerpc/kvm/book3s_hv_rmhandlers.S
... ... @@ -1889,7 +1889,7 @@
1889 1889 BEGIN_FTR_SECTION
1890 1890 reg = 0
1891 1891 .rept 32
1892   - li r6,reg*16+VCPU_VSRS
  1892 + li r6,reg*16+VCPU_FPRS
1893 1893 STXVD2X(reg,R6,R3)
1894 1894 reg = reg + 1
1895 1895 .endr
... ... @@ -1951,7 +1951,7 @@
1951 1951 BEGIN_FTR_SECTION
1952 1952 reg = 0
1953 1953 .rept 32
1954   - li r7,reg*16+VCPU_VSRS
  1954 + li r7,reg*16+VCPU_FPRS
1955 1955 LXVD2X(reg,R7,R4)
1956 1956 reg = reg + 1
1957 1957 .endr
arch/powerpc/kvm/book3s_paired_singles.c
... ... @@ -160,7 +160,7 @@
160 160  
161 161 static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
162 162 {
163   - kvm_cvt_df(&vcpu->arch.fpr[rt], &vcpu->arch.qpr[rt]);
  163 + kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]);
164 164 }
165 165  
166 166 static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
167 167  
... ... @@ -207,11 +207,11 @@
207 207 /* put in registers */
208 208 switch (ls_type) {
209 209 case FPU_LS_SINGLE:
210   - kvm_cvt_fd((u32*)tmp, &vcpu->arch.fpr[rs]);
  210 + kvm_cvt_fd((u32*)tmp, &VCPU_FPR(vcpu, rs));
211 211 vcpu->arch.qpr[rs] = *((u32*)tmp);
212 212 break;
213 213 case FPU_LS_DOUBLE:
214   - vcpu->arch.fpr[rs] = *((u64*)tmp);
  214 + VCPU_FPR(vcpu, rs) = *((u64*)tmp);
215 215 break;
216 216 }
217 217  
218 218  
219 219  
... ... @@ -233,18 +233,18 @@
233 233  
234 234 switch (ls_type) {
235 235 case FPU_LS_SINGLE:
236   - kvm_cvt_df(&vcpu->arch.fpr[rs], (u32*)tmp);
  236 + kvm_cvt_df(&VCPU_FPR(vcpu, rs), (u32*)tmp);
237 237 val = *((u32*)tmp);
238 238 len = sizeof(u32);
239 239 break;
240 240 case FPU_LS_SINGLE_LOW:
241   - *((u32*)tmp) = vcpu->arch.fpr[rs];
242   - val = vcpu->arch.fpr[rs] & 0xffffffff;
  241 + *((u32*)tmp) = VCPU_FPR(vcpu, rs);
  242 + val = VCPU_FPR(vcpu, rs) & 0xffffffff;
243 243 len = sizeof(u32);
244 244 break;
245 245 case FPU_LS_DOUBLE:
246   - *((u64*)tmp) = vcpu->arch.fpr[rs];
247   - val = vcpu->arch.fpr[rs];
  246 + *((u64*)tmp) = VCPU_FPR(vcpu, rs);
  247 + val = VCPU_FPR(vcpu, rs);
248 248 len = sizeof(u64);
249 249 break;
250 250 default:
... ... @@ -301,7 +301,7 @@
301 301 emulated = EMULATE_DONE;
302 302  
303 303 /* put in registers */
304   - kvm_cvt_fd(&tmp[0], &vcpu->arch.fpr[rs]);
  304 + kvm_cvt_fd(&tmp[0], &VCPU_FPR(vcpu, rs));
305 305 vcpu->arch.qpr[rs] = tmp[1];
306 306  
307 307 dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0],
... ... @@ -319,7 +319,7 @@
319 319 u32 tmp[2];
320 320 int len = w ? sizeof(u32) : sizeof(u64);
321 321  
322   - kvm_cvt_df(&vcpu->arch.fpr[rs], &tmp[0]);
  322 + kvm_cvt_df(&VCPU_FPR(vcpu, rs), &tmp[0]);
323 323 tmp[1] = vcpu->arch.qpr[rs];
324 324  
325 325 r = kvmppc_st(vcpu, &addr, len, tmp, true);
... ... @@ -512,7 +512,6 @@
512 512 u32 *src2, u32 *src3))
513 513 {
514 514 u32 *qpr = vcpu->arch.qpr;
515   - u64 *fpr = vcpu->arch.fpr;
516 515 u32 ps0_out;
517 516 u32 ps0_in1, ps0_in2, ps0_in3;
518 517 u32 ps1_in1, ps1_in2, ps1_in3;
519 518  
520 519  
... ... @@ -521,20 +520,20 @@
521 520 WARN_ON(rc);
522 521  
523 522 /* PS0 */
524   - kvm_cvt_df(&fpr[reg_in1], &ps0_in1);
525   - kvm_cvt_df(&fpr[reg_in2], &ps0_in2);
526   - kvm_cvt_df(&fpr[reg_in3], &ps0_in3);
  523 + kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1);
  524 + kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2);
  525 + kvm_cvt_df(&VCPU_FPR(vcpu, reg_in3), &ps0_in3);
527 526  
528 527 if (scalar & SCALAR_LOW)
529 528 ps0_in2 = qpr[reg_in2];
530 529  
531   - func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3);
  530 + func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3);
532 531  
533 532 dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
534 533 ps0_in1, ps0_in2, ps0_in3, ps0_out);
535 534  
536 535 if (!(scalar & SCALAR_NO_PS0))
537   - kvm_cvt_fd(&ps0_out, &fpr[reg_out]);
  536 + kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
538 537  
539 538 /* PS1 */
540 539 ps1_in1 = qpr[reg_in1];
... ... @@ -545,7 +544,7 @@
545 544 ps1_in2 = ps0_in2;
546 545  
547 546 if (!(scalar & SCALAR_NO_PS1))
548   - func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3);
  547 + func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3);
549 548  
550 549 dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n",
551 550 ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]);
... ... @@ -561,7 +560,6 @@
561 560 u32 *src2))
562 561 {
563 562 u32 *qpr = vcpu->arch.qpr;
564   - u64 *fpr = vcpu->arch.fpr;
565 563 u32 ps0_out;
566 564 u32 ps0_in1, ps0_in2;
567 565 u32 ps1_out;
568 566  
569 567  
570 568  
... ... @@ -571,20 +569,20 @@
571 569 WARN_ON(rc);
572 570  
573 571 /* PS0 */
574   - kvm_cvt_df(&fpr[reg_in1], &ps0_in1);
  572 + kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1);
575 573  
576 574 if (scalar & SCALAR_LOW)
577 575 ps0_in2 = qpr[reg_in2];
578 576 else
579   - kvm_cvt_df(&fpr[reg_in2], &ps0_in2);
  577 + kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2);
580 578  
581   - func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2);
  579 + func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2);
582 580  
583 581 if (!(scalar & SCALAR_NO_PS0)) {
584 582 dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n",
585 583 ps0_in1, ps0_in2, ps0_out);
586 584  
587   - kvm_cvt_fd(&ps0_out, &fpr[reg_out]);
  585 + kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
588 586 }
589 587  
590 588 /* PS1 */
... ... @@ -594,7 +592,7 @@
594 592 if (scalar & SCALAR_HIGH)
595 593 ps1_in2 = ps0_in2;
596 594  
597   - func(&vcpu->arch.fpscr, &ps1_out, &ps1_in1, &ps1_in2);
  595 + func(&vcpu->arch.fp.fpscr, &ps1_out, &ps1_in1, &ps1_in2);
598 596  
599 597 if (!(scalar & SCALAR_NO_PS1)) {
600 598 qpr[reg_out] = ps1_out;
... ... @@ -612,7 +610,6 @@
612 610 u32 *dst, u32 *src1))
613 611 {
614 612 u32 *qpr = vcpu->arch.qpr;
615   - u64 *fpr = vcpu->arch.fpr;
616 613 u32 ps0_out, ps0_in;
617 614 u32 ps1_in;
618 615  
619 616  
620 617  
... ... @@ -620,17 +617,17 @@
620 617 WARN_ON(rc);
621 618  
622 619 /* PS0 */
623   - kvm_cvt_df(&fpr[reg_in], &ps0_in);
624   - func(&vcpu->arch.fpscr, &ps0_out, &ps0_in);
  620 + kvm_cvt_df(&VCPU_FPR(vcpu, reg_in), &ps0_in);
  621 + func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in);
625 622  
626 623 dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n",
627 624 ps0_in, ps0_out);
628 625  
629   - kvm_cvt_fd(&ps0_out, &fpr[reg_out]);
  626 + kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out));
630 627  
631 628 /* PS1 */
632 629 ps1_in = qpr[reg_in];
633   - func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in);
  630 + func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in);
634 631  
635 632 dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n",
636 633 ps1_in, qpr[reg_out]);
... ... @@ -649,10 +646,10 @@
649 646 int ax_rc = inst_get_field(inst, 21, 25);
650 647 short full_d = inst_get_field(inst, 16, 31);
651 648  
652   - u64 *fpr_d = &vcpu->arch.fpr[ax_rd];
653   - u64 *fpr_a = &vcpu->arch.fpr[ax_ra];
654   - u64 *fpr_b = &vcpu->arch.fpr[ax_rb];
655   - u64 *fpr_c = &vcpu->arch.fpr[ax_rc];
  649 + u64 *fpr_d = &VCPU_FPR(vcpu, ax_rd);
  650 + u64 *fpr_a = &VCPU_FPR(vcpu, ax_ra);
  651 + u64 *fpr_b = &VCPU_FPR(vcpu, ax_rb);
  652 + u64 *fpr_c = &VCPU_FPR(vcpu, ax_rc);
656 653  
657 654 bool rcomp = (inst & 1) ? true : false;
658 655 u32 cr = kvmppc_get_cr(vcpu);
659 656  
660 657  
... ... @@ -674,11 +671,11 @@
674 671 /* Do we need to clear FE0 / FE1 here? Don't think so. */
675 672  
676 673 #ifdef DEBUG
677   - for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
  674 + for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) {
678 675 u32 f;
679   - kvm_cvt_df(&vcpu->arch.fpr[i], &f);
  676 + kvm_cvt_df(&VCPU_FPR(vcpu, i), &f);
680 677 dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n",
681   - i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]);
  678 + i, f, VCPU_FPR(vcpu, i), i, vcpu->arch.qpr[i]);
682 679 }
683 680 #endif
684 681  
... ... @@ -764,8 +761,8 @@
764 761 break;
765 762 }
766 763 case OP_4X_PS_NEG:
767   - vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
768   - vcpu->arch.fpr[ax_rd] ^= 0x8000000000000000ULL;
  764 + VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
  765 + VCPU_FPR(vcpu, ax_rd) ^= 0x8000000000000000ULL;
769 766 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
770 767 vcpu->arch.qpr[ax_rd] ^= 0x80000000;
771 768 break;
... ... @@ -775,7 +772,7 @@
775 772 break;
776 773 case OP_4X_PS_MR:
777 774 WARN_ON(rcomp);
778   - vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
  775 + VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
779 776 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
780 777 break;
781 778 case OP_4X_PS_CMPO1:
782 779  
783 780  
784 781  
785 782  
786 783  
787 784  
788 785  
... ... @@ -784,44 +781,44 @@
784 781 break;
785 782 case OP_4X_PS_NABS:
786 783 WARN_ON(rcomp);
787   - vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
788   - vcpu->arch.fpr[ax_rd] |= 0x8000000000000000ULL;
  784 + VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
  785 + VCPU_FPR(vcpu, ax_rd) |= 0x8000000000000000ULL;
789 786 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
790 787 vcpu->arch.qpr[ax_rd] |= 0x80000000;
791 788 break;
792 789 case OP_4X_PS_ABS:
793 790 WARN_ON(rcomp);
794   - vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
795   - vcpu->arch.fpr[ax_rd] &= ~0x8000000000000000ULL;
  791 + VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb);
  792 + VCPU_FPR(vcpu, ax_rd) &= ~0x8000000000000000ULL;
796 793 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
797 794 vcpu->arch.qpr[ax_rd] &= ~0x80000000;
798 795 break;
799 796 case OP_4X_PS_MERGE00:
800 797 WARN_ON(rcomp);
801   - vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
802   - /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
803   - kvm_cvt_df(&vcpu->arch.fpr[ax_rb],
  798 + VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra);
  799 + /* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */
  800 + kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb),
804 801 &vcpu->arch.qpr[ax_rd]);
805 802 break;
806 803 case OP_4X_PS_MERGE01:
807 804 WARN_ON(rcomp);
808   - vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
  805 + VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra);
809 806 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
810 807 break;
811 808 case OP_4X_PS_MERGE10:
812 809 WARN_ON(rcomp);
813   - /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
  810 + /* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */
814 811 kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
815   - &vcpu->arch.fpr[ax_rd]);
816   - /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
817   - kvm_cvt_df(&vcpu->arch.fpr[ax_rb],
  812 + &VCPU_FPR(vcpu, ax_rd));
  813 + /* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */
  814 + kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb),
818 815 &vcpu->arch.qpr[ax_rd]);
819 816 break;
820 817 case OP_4X_PS_MERGE11:
821 818 WARN_ON(rcomp);
822   - /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
  819 + /* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */
823 820 kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
824   - &vcpu->arch.fpr[ax_rd]);
  821 + &VCPU_FPR(vcpu, ax_rd));
825 822 vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
826 823 break;
827 824 }
... ... @@ -856,7 +853,7 @@
856 853 case OP_4A_PS_SUM1:
857 854 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
858 855 ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds);
859   - vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rc];
  856 + VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rc);
860 857 break;
861 858 case OP_4A_PS_SUM0:
862 859 emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
863 860  
864 861  
865 862  
866 863  
867 864  
868 865  
869 866  
870 867  
871 868  
... ... @@ -1106,45 +1103,45 @@
1106 1103 case 59:
1107 1104 switch (inst_get_field(inst, 21, 30)) {
1108 1105 case OP_59_FADDS:
1109   - fpd_fadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
  1106 + fpd_fadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1110 1107 kvmppc_sync_qpr(vcpu, ax_rd);
1111 1108 break;
1112 1109 case OP_59_FSUBS:
1113   - fpd_fsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
  1110 + fpd_fsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1114 1111 kvmppc_sync_qpr(vcpu, ax_rd);
1115 1112 break;
1116 1113 case OP_59_FDIVS:
1117   - fpd_fdivs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
  1114 + fpd_fdivs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1118 1115 kvmppc_sync_qpr(vcpu, ax_rd);
1119 1116 break;
1120 1117 case OP_59_FRES:
1121   - fpd_fres(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
  1118 + fpd_fres(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1122 1119 kvmppc_sync_qpr(vcpu, ax_rd);
1123 1120 break;
1124 1121 case OP_59_FRSQRTES:
1125   - fpd_frsqrtes(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
  1122 + fpd_frsqrtes(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1126 1123 kvmppc_sync_qpr(vcpu, ax_rd);
1127 1124 break;
1128 1125 }
1129 1126 switch (inst_get_field(inst, 26, 30)) {
1130 1127 case OP_59_FMULS:
1131   - fpd_fmuls(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c);
  1128 + fpd_fmuls(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c);
1132 1129 kvmppc_sync_qpr(vcpu, ax_rd);
1133 1130 break;
1134 1131 case OP_59_FMSUBS:
1135   - fpd_fmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
  1132 + fpd_fmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1136 1133 kvmppc_sync_qpr(vcpu, ax_rd);
1137 1134 break;
1138 1135 case OP_59_FMADDS:
1139   - fpd_fmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
  1136 + fpd_fmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1140 1137 kvmppc_sync_qpr(vcpu, ax_rd);
1141 1138 break;
1142 1139 case OP_59_FNMSUBS:
1143   - fpd_fnmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
  1140 + fpd_fnmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1144 1141 kvmppc_sync_qpr(vcpu, ax_rd);
1145 1142 break;
1146 1143 case OP_59_FNMADDS:
1147   - fpd_fnmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
  1144 + fpd_fnmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1148 1145 kvmppc_sync_qpr(vcpu, ax_rd);
1149 1146 break;
1150 1147 }
1151 1148  
... ... @@ -1159,12 +1156,12 @@
1159 1156 break;
1160 1157 case OP_63_MFFS:
1161 1158 /* XXX missing CR */
1162   - *fpr_d = vcpu->arch.fpscr;
  1159 + *fpr_d = vcpu->arch.fp.fpscr;
1163 1160 break;
1164 1161 case OP_63_MTFSF:
1165 1162 /* XXX missing fm bits */
1166 1163 /* XXX missing CR */
1167   - vcpu->arch.fpscr = *fpr_b;
  1164 + vcpu->arch.fp.fpscr = *fpr_b;
1168 1165 break;
1169 1166 case OP_63_FCMPU:
1170 1167 {
... ... @@ -1172,7 +1169,7 @@
1172 1169 u32 cr0_mask = 0xf0000000;
1173 1170 u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
1174 1171  
1175   - fpd_fcmpu(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b);
  1172 + fpd_fcmpu(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b);
1176 1173 cr &= ~(cr0_mask >> cr_shift);
1177 1174 cr |= (cr & cr0_mask) >> cr_shift;
1178 1175 break;
1179 1176  
1180 1177  
1181 1178  
1182 1179  
1183 1180  
1184 1181  
1185 1182  
1186 1183  
1187 1184  
... ... @@ -1183,40 +1180,40 @@
1183 1180 u32 cr0_mask = 0xf0000000;
1184 1181 u32 cr_shift = inst_get_field(inst, 6, 8) * 4;
1185 1182  
1186   - fpd_fcmpo(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b);
  1183 + fpd_fcmpo(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b);
1187 1184 cr &= ~(cr0_mask >> cr_shift);
1188 1185 cr |= (cr & cr0_mask) >> cr_shift;
1189 1186 break;
1190 1187 }
1191 1188 case OP_63_FNEG:
1192   - fpd_fneg(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
  1189 + fpd_fneg(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1193 1190 break;
1194 1191 case OP_63_FMR:
1195 1192 *fpr_d = *fpr_b;
1196 1193 break;
1197 1194 case OP_63_FABS:
1198   - fpd_fabs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
  1195 + fpd_fabs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1199 1196 break;
1200 1197 case OP_63_FCPSGN:
1201   - fpd_fcpsgn(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
  1198 + fpd_fcpsgn(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1202 1199 break;
1203 1200 case OP_63_FDIV:
1204   - fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
  1201 + fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1205 1202 break;
1206 1203 case OP_63_FADD:
1207   - fpd_fadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
  1204 + fpd_fadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1208 1205 break;
1209 1206 case OP_63_FSUB:
1210   - fpd_fsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
  1207 + fpd_fsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b);
1211 1208 break;
1212 1209 case OP_63_FCTIW:
1213   - fpd_fctiw(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
  1210 + fpd_fctiw(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1214 1211 break;
1215 1212 case OP_63_FCTIWZ:
1216   - fpd_fctiwz(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
  1213 + fpd_fctiwz(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1217 1214 break;
1218 1215 case OP_63_FRSP:
1219   - fpd_frsp(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
  1216 + fpd_frsp(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1220 1217 kvmppc_sync_qpr(vcpu, ax_rd);
1221 1218 break;
1222 1219 case OP_63_FRSQRTE:
1223 1220  
1224 1221  
1225 1222  
1226 1223  
1227 1224  
1228 1225  
1229 1226  
1230 1227  
1231 1228  
... ... @@ -1224,39 +1221,39 @@
1224 1221 double one = 1.0f;
1225 1222  
1226 1223 /* fD = sqrt(fB) */
1227   - fpd_fsqrt(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
  1224 + fpd_fsqrt(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b);
1228 1225 /* fD = 1.0f / fD */
1229   - fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, (u64*)&one, fpr_d);
  1226 + fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, (u64*)&one, fpr_d);
1230 1227 break;
1231 1228 }
1232 1229 }
1233 1230 switch (inst_get_field(inst, 26, 30)) {
1234 1231 case OP_63_FMUL:
1235   - fpd_fmul(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c);
  1232 + fpd_fmul(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c);
1236 1233 break;
1237 1234 case OP_63_FSEL:
1238   - fpd_fsel(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
  1235 + fpd_fsel(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1239 1236 break;
1240 1237 case OP_63_FMSUB:
1241   - fpd_fmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
  1238 + fpd_fmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1242 1239 break;
1243 1240 case OP_63_FMADD:
1244   - fpd_fmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
  1241 + fpd_fmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1245 1242 break;
1246 1243 case OP_63_FNMSUB:
1247   - fpd_fnmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
  1244 + fpd_fnmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1248 1245 break;
1249 1246 case OP_63_FNMADD:
1250   - fpd_fnmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
  1247 + fpd_fnmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
1251 1248 break;
1252 1249 }
1253 1250 break;
1254 1251 }
1255 1252  
1256 1253 #ifdef DEBUG
1257   - for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
  1254 + for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) {
1258 1255 u32 f;
1259   - kvm_cvt_df(&vcpu->arch.fpr[i], &f);
  1256 + kvm_cvt_df(&VCPU_FPR(vcpu, i), &f);
1260 1257 dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f);
1261 1258 }
1262 1259 #endif
arch/powerpc/kvm/book3s_pr.c
... ... @@ -545,12 +545,6 @@
545 545 void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
546 546 {
547 547 struct thread_struct *t = &current->thread;
548   - u64 *vcpu_fpr = vcpu->arch.fpr;
549   -#ifdef CONFIG_VSX
550   - u64 *vcpu_vsx = vcpu->arch.vsr;
551   -#endif
552   - u64 *thread_fpr = &t->fp_state.fpr[0][0];
553   - int i;
554 548  
555 549 /*
556 550 * VSX instructions can access FP and vector registers, so if
557 551  
... ... @@ -575,24 +569,14 @@
575 569 */
576 570 if (current->thread.regs->msr & MSR_FP)
577 571 giveup_fpu(current);
578   - for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
579   - vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
580   -
581   - vcpu->arch.fpscr = t->fp_state.fpscr;
582   -
583   -#ifdef CONFIG_VSX
584   - if (cpu_has_feature(CPU_FTR_VSX))
585   - for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
586   - vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
587   -#endif
  572 + vcpu->arch.fp = t->fp_state;
588 573 }
589 574  
590 575 #ifdef CONFIG_ALTIVEC
591 576 if (msr & MSR_VEC) {
592 577 if (current->thread.regs->msr & MSR_VEC)
593 578 giveup_altivec(current);
594   - memcpy(vcpu->arch.vr, t->vr_state.vr, sizeof(vcpu->arch.vr));
595   - vcpu->arch.vscr = t->vr_state.vscr;
  579 + vcpu->arch.vr = t->vr_state;
596 580 }
597 581 #endif
598 582  
... ... @@ -640,12 +624,6 @@
640 624 ulong msr)
641 625 {
642 626 struct thread_struct *t = &current->thread;
643   - u64 *vcpu_fpr = vcpu->arch.fpr;
644   -#ifdef CONFIG_VSX
645   - u64 *vcpu_vsx = vcpu->arch.vsr;
646   -#endif
647   - u64 *thread_fpr = &t->fp_state.fpr[0][0];
648   - int i;
649 627  
650 628 /* When we have paired singles, we emulate in software */
651 629 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
... ... @@ -683,13 +661,7 @@
683 661 #endif
684 662  
685 663 if (msr & MSR_FP) {
686   - for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
687   - thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
688   -#ifdef CONFIG_VSX
689   - for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
690   - thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
691   -#endif
692   - t->fp_state.fpscr = vcpu->arch.fpscr;
  664 + t->fp_state = vcpu->arch.fp;
693 665 t->fpexc_mode = 0;
694 666 enable_kernel_fp();
695 667 load_fp_state(&t->fp_state);
... ... @@ -697,8 +669,7 @@
697 669  
698 670 if (msr & MSR_VEC) {
699 671 #ifdef CONFIG_ALTIVEC
700   - memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
701   - t->vr_state.vscr = vcpu->arch.vscr;
  672 + t->vr_state = vcpu->arch.vr;
702 673 t->vrsave = -1;
703 674 enable_kernel_altivec();
704 675 load_vr_state(&t->vr_state);
... ... @@ -1118,19 +1089,6 @@
1118 1089 case KVM_REG_PPC_HIOR:
1119 1090 *val = get_reg_val(id, to_book3s(vcpu)->hior);
1120 1091 break;
1121   -#ifdef CONFIG_VSX
1122   - case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
1123   - long int i = id - KVM_REG_PPC_VSR0;
1124   -
1125   - if (!cpu_has_feature(CPU_FTR_VSX)) {
1126   - r = -ENXIO;
1127   - break;
1128   - }
1129   - val->vsxval[0] = vcpu->arch.fpr[i];
1130   - val->vsxval[1] = vcpu->arch.vsr[i];
1131   - break;
1132   - }
1133   -#endif /* CONFIG_VSX */
1134 1092 default:
1135 1093 r = -EINVAL;
1136 1094 break;
... ... @@ -1149,19 +1107,6 @@
1149 1107 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1150 1108 to_book3s(vcpu)->hior_explicit = true;
1151 1109 break;
1152   -#ifdef CONFIG_VSX
1153   - case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
1154   - long int i = id - KVM_REG_PPC_VSR0;
1155   -
1156   - if (!cpu_has_feature(CPU_FTR_VSX)) {
1157   - r = -ENXIO;
1158   - break;
1159   - }
1160   - vcpu->arch.fpr[i] = val->vsxval[0];
1161   - vcpu->arch.vsr[i] = val->vsxval[1];
1162   - break;
1163   - }
1164   -#endif /* CONFIG_VSX */
1165 1110 default:
1166 1111 r = -EINVAL;
1167 1112 break;
arch/powerpc/kvm/booke.c
... ... @@ -707,9 +707,7 @@
707 707 fpexc_mode = current->thread.fpexc_mode;
708 708  
709 709 /* Restore guest FPU state to thread */
710   - memcpy(current->thread.fp_state.fpr, vcpu->arch.fpr,
711   - sizeof(vcpu->arch.fpr));
712   - current->thread.fp_state.fpscr = vcpu->arch.fpscr;
  710 + current->thread.fp_state = vcpu->arch.fp;
713 711  
714 712 /*
715 713 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
... ... @@ -745,9 +743,7 @@
745 743 vcpu->fpu_active = 0;
746 744  
747 745 /* Save guest FPU state from thread */
748   - memcpy(vcpu->arch.fpr, current->thread.fp_state.fpr,
749   - sizeof(vcpu->arch.fpr));
750   - vcpu->arch.fpscr = current->thread.fp_state.fpscr;
  746 + vcpu->arch.fp = current->thread.fp_state;
751 747  
752 748 /* Restore userspace FPU state from stack */
753 749 current->thread.fp_state = fp;
arch/powerpc/kvm/powerpc.c
... ... @@ -656,14 +656,14 @@
656 656 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
657 657 break;
658 658 case KVM_MMIO_REG_FPR:
659   - vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
  659 + VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
660 660 break;
661 661 #ifdef CONFIG_PPC_BOOK3S
662 662 case KVM_MMIO_REG_QPR:
663 663 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
664 664 break;
665 665 case KVM_MMIO_REG_FQPR:
666   - vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
  666 + VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
667 667 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
668 668 break;
669 669 #endif