Commit b005255e12a311d2c87ea70a7c7b192b2187c22c

Authored by Michael Neuling
Committed by Alexander Graf
1 parent e0b7ec058c

KVM: PPC: Book3S HV: Context-switch new POWER8 SPRs

This adds fields to the struct kvm_vcpu_arch to store the new
guest-accessible SPRs on POWER8, adds code to the get/set_one_reg
functions to allow userspace to access this state, and adds code to
the guest entry and exit to context-switch these SPRs between host
and guest.

Note that DPDES (Directed Privileged Doorbell Exception State) is
shared between threads on a core; hence we store it in struct
kvmppc_vcore and have the master thread save and restore it.

Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>

Showing 6 changed files with 361 additions and 3 deletions Side-by-side Diff

arch/powerpc/include/asm/kvm_host.h
... ... @@ -304,6 +304,7 @@
304 304 ulong lpcr;
305 305 u32 arch_compat;
306 306 ulong pcr;
  307 + ulong dpdes; /* doorbell state (POWER8) */
307 308 };
308 309  
309 310 #define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
... ... @@ -448,6 +449,7 @@
448 449 ulong pc;
449 450 ulong ctr;
450 451 ulong lr;
  452 + ulong tar;
451 453  
452 454 ulong xer;
453 455 u32 cr;
454 456  
455 457  
456 458  
... ... @@ -457,13 +459,32 @@
457 459 ulong guest_owned_ext;
458 460 ulong purr;
459 461 ulong spurr;
  462 + ulong ic;
  463 + ulong vtb;
460 464 ulong dscr;
461 465 ulong amr;
462 466 ulong uamor;
  467 + ulong iamr;
463 468 u32 ctrl;
464 469 ulong dabr;
  470 + ulong dawr;
  471 + ulong dawrx;
  472 + ulong ciabr;
465 473 ulong cfar;
466 474 ulong ppr;
  475 + ulong pspb;
  476 + ulong fscr;
  477 + ulong tfhar;
  478 + ulong tfiar;
  479 + ulong texasr;
  480 + ulong ebbhr;
  481 + ulong ebbrr;
  482 + ulong bescr;
  483 + ulong csigr;
  484 + ulong tacr;
  485 + ulong tcscr;
  486 + ulong acop;
  487 + ulong wort;
467 488 ulong shadow_srr1;
468 489 #endif
469 490 u32 vrsave; /* also USPRG0 */
470 491  
471 492  
... ... @@ -498,10 +519,12 @@
498 519 u32 ccr1;
499 520 u32 dbsr;
500 521  
501   - u64 mmcr[3];
  522 + u64 mmcr[5];
502 523 u32 pmc[8];
  524 + u32 spmc[2];
503 525 u64 siar;
504 526 u64 sdar;
  527 + u64 sier;
505 528  
506 529 #ifdef CONFIG_KVM_EXIT_TIMING
507 530 struct mutex exit_timing_lock;
arch/powerpc/include/asm/reg.h
... ... @@ -223,6 +223,11 @@
223 223 #define CTRL_TE 0x00c00000 /* thread enable */
224 224 #define CTRL_RUNLATCH 0x1
225 225 #define SPRN_DAWR 0xB4
  226 +#define SPRN_CIABR 0xBB
  227 +#define CIABR_PRIV 0x3
  228 +#define CIABR_PRIV_USER 1
  229 +#define CIABR_PRIV_SUPER 2
  230 +#define CIABR_PRIV_HYPER 3
226 231 #define SPRN_DAWRX 0xBC
227 232 #define DAWRX_USER (1UL << 0)
228 233 #define DAWRX_KERNEL (1UL << 1)
... ... @@ -260,6 +265,8 @@
260 265 #define SPRN_HRMOR 0x139 /* Real mode offset register */
261 266 #define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
262 267 #define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
  268 +#define SPRN_IC 0x350 /* Virtual Instruction Count */
  269 +#define SPRN_VTB 0x351 /* Virtual Time Base */
263 270 /* HFSCR and FSCR bit numbers are the same */
264 271 #define FSCR_TAR_LG 8 /* Enable Target Address Register */
265 272 #define FSCR_EBB_LG 7 /* Enable Event Based Branching */
... ... @@ -368,6 +375,8 @@
368 375 #define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */
369 376 #define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */
370 377 #define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */
  378 +#define SPRN_DHDES 0x0B1 /* Directed Hyp. Doorbell Exc. State */
  379 +#define SPRN_DPDES 0x0B0 /* Directed Priv. Doorbell Exc. State */
371 380 #define SPRN_EAR 0x11A /* External Address Register */
372 381 #define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */
373 382 #define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */
... ... @@ -427,6 +436,7 @@
427 436 #define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
428 437 #define SPRN_IABR2 0x3FA /* 83xx */
429 438 #define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */
  439 +#define SPRN_IAMR 0x03D /* Instr. Authority Mask Reg */
430 440 #define SPRN_HID4 0x3F4 /* 970 HID4 */
431 441 #define HID4_LPES0 (1ul << (63-0)) /* LPAR env. sel. bit 0 */
432 442 #define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */
... ... @@ -541,6 +551,7 @@
541 551 #define SPRN_PIR 0x3FF /* Processor Identification Register */
542 552 #endif
543 553 #define SPRN_TIR 0x1BE /* Thread Identification Register */
  554 +#define SPRN_PSPB 0x09F /* Problem State Priority Boost reg */
544 555 #define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */
545 556 #define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */
546 557 #define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */
... ... @@ -682,6 +693,7 @@
682 693 #define SPRN_EBBHR 804 /* Event based branch handler register */
683 694 #define SPRN_EBBRR 805 /* Event based branch return register */
684 695 #define SPRN_BESCR 806 /* Branch event status and control register */
  696 +#define SPRN_WORT 895 /* Workload optimization register - thread */
685 697  
686 698 #define SPRN_PMC1 787
687 699 #define SPRN_PMC2 788
... ... @@ -698,6 +710,11 @@
698 710 #define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
699 711 #define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
700 712 #define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
  713 +#define SPRN_TACR 888
  714 +#define SPRN_TCSCR 889
  715 +#define SPRN_CSIGR 890
  716 +#define SPRN_SPMC1 892
  717 +#define SPRN_SPMC2 893
701 718  
702 719 /* When EBB is enabled, some of MMCR0/MMCR2/SIER are user accessible */
703 720 #define MMCR0_USER_MASK (MMCR0_FC | MMCR0_PMXE | MMCR0_PMAO)
arch/powerpc/include/uapi/asm/kvm.h
... ... @@ -545,6 +545,7 @@
545 545 #define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1)
546 546 #define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2)
547 547 #define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3)
  548 +#define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb4)
548 549  
549 550 #define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
550 551 #define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
arch/powerpc/kernel/asm-offsets.c
... ... @@ -432,6 +432,7 @@
432 432 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
433 433 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
434 434 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
  435 + DEFINE(VCPU_TAR, offsetof(struct kvm_vcpu, arch.tar));
435 436 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
436 437 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
437 438 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
438 439  
439 440  
... ... @@ -484,11 +485,17 @@
484 485 DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
485 486 DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
486 487 DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
  488 + DEFINE(VCPU_IC, offsetof(struct kvm_vcpu, arch.ic));
  489 + DEFINE(VCPU_VTB, offsetof(struct kvm_vcpu, arch.vtb));
487 490 DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr));
488 491 DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr));
489 492 DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor));
  493 + DEFINE(VCPU_IAMR, offsetof(struct kvm_vcpu, arch.iamr));
490 494 DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl));
491 495 DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr));
  496 + DEFINE(VCPU_DAWR, offsetof(struct kvm_vcpu, arch.dawr));
  497 + DEFINE(VCPU_DAWRX, offsetof(struct kvm_vcpu, arch.dawrx));
  498 + DEFINE(VCPU_CIABR, offsetof(struct kvm_vcpu, arch.ciabr));
492 499 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
493 500 DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec));
494 501 DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires));
495 502  
... ... @@ -497,8 +504,10 @@
497 504 DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
498 505 DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
499 506 DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
  507 + DEFINE(VCPU_SPMC, offsetof(struct kvm_vcpu, arch.spmc));
500 508 DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar));
501 509 DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar));
  510 + DEFINE(VCPU_SIER, offsetof(struct kvm_vcpu, arch.sier));
502 511 DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
503 512 DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
504 513 DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
... ... @@ -508,6 +517,19 @@
508 517 DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
509 518 DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
510 519 DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
  520 + DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr));
  521 + DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb));
  522 + DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar));
  523 + DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar));
  524 + DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr));
  525 + DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr));
  526 + DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr));
  527 + DEFINE(VCPU_BESCR, offsetof(struct kvm_vcpu, arch.bescr));
  528 + DEFINE(VCPU_CSIGR, offsetof(struct kvm_vcpu, arch.csigr));
  529 + DEFINE(VCPU_TACR, offsetof(struct kvm_vcpu, arch.tacr));
  530 + DEFINE(VCPU_TCSCR, offsetof(struct kvm_vcpu, arch.tcscr));
  531 + DEFINE(VCPU_ACOP, offsetof(struct kvm_vcpu, arch.acop));
  532 + DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort));
511 533 DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
512 534 DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
513 535 DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count));
... ... @@ -517,6 +539,7 @@
517 539 DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset));
518 540 DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr));
519 541 DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr));
  542 + DEFINE(VCORE_DPDES, offsetof(struct kvmppc_vcore, dpdes));
520 543 DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
521 544 DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
522 545 DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
arch/powerpc/kvm/book3s_hv.c
... ... @@ -800,7 +800,7 @@
800 800 case KVM_REG_PPC_UAMOR:
801 801 *val = get_reg_val(id, vcpu->arch.uamor);
802 802 break;
803   - case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA:
  803 + case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
804 804 i = id - KVM_REG_PPC_MMCR0;
805 805 *val = get_reg_val(id, vcpu->arch.mmcr[i]);
806 806 break;
807 807  
... ... @@ -808,12 +808,85 @@
808 808 i = id - KVM_REG_PPC_PMC1;
809 809 *val = get_reg_val(id, vcpu->arch.pmc[i]);
810 810 break;
  811 + case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
  812 + i = id - KVM_REG_PPC_SPMC1;
  813 + *val = get_reg_val(id, vcpu->arch.spmc[i]);
  814 + break;
811 815 case KVM_REG_PPC_SIAR:
812 816 *val = get_reg_val(id, vcpu->arch.siar);
813 817 break;
814 818 case KVM_REG_PPC_SDAR:
815 819 *val = get_reg_val(id, vcpu->arch.sdar);
816 820 break;
  821 + case KVM_REG_PPC_SIER:
  822 + *val = get_reg_val(id, vcpu->arch.sier);
  823 + break;
  824 + case KVM_REG_PPC_IAMR:
  825 + *val = get_reg_val(id, vcpu->arch.iamr);
  826 + break;
  827 + case KVM_REG_PPC_TFHAR:
  828 + *val = get_reg_val(id, vcpu->arch.tfhar);
  829 + break;
  830 + case KVM_REG_PPC_TFIAR:
  831 + *val = get_reg_val(id, vcpu->arch.tfiar);
  832 + break;
  833 + case KVM_REG_PPC_TEXASR:
  834 + *val = get_reg_val(id, vcpu->arch.texasr);
  835 + break;
  836 + case KVM_REG_PPC_FSCR:
  837 + *val = get_reg_val(id, vcpu->arch.fscr);
  838 + break;
  839 + case KVM_REG_PPC_PSPB:
  840 + *val = get_reg_val(id, vcpu->arch.pspb);
  841 + break;
  842 + case KVM_REG_PPC_EBBHR:
  843 + *val = get_reg_val(id, vcpu->arch.ebbhr);
  844 + break;
  845 + case KVM_REG_PPC_EBBRR:
  846 + *val = get_reg_val(id, vcpu->arch.ebbrr);
  847 + break;
  848 + case KVM_REG_PPC_BESCR:
  849 + *val = get_reg_val(id, vcpu->arch.bescr);
  850 + break;
  851 + case KVM_REG_PPC_TAR:
  852 + *val = get_reg_val(id, vcpu->arch.tar);
  853 + break;
  854 + case KVM_REG_PPC_DPDES:
  855 + *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
  856 + break;
  857 + case KVM_REG_PPC_DAWR:
  858 + *val = get_reg_val(id, vcpu->arch.dawr);
  859 + break;
  860 + case KVM_REG_PPC_DAWRX:
  861 + *val = get_reg_val(id, vcpu->arch.dawrx);
  862 + break;
  863 + case KVM_REG_PPC_CIABR:
  864 + *val = get_reg_val(id, vcpu->arch.ciabr);
  865 + break;
  866 + case KVM_REG_PPC_IC:
  867 + *val = get_reg_val(id, vcpu->arch.ic);
  868 + break;
  869 + case KVM_REG_PPC_VTB:
  870 + *val = get_reg_val(id, vcpu->arch.vtb);
  871 + break;
  872 + case KVM_REG_PPC_CSIGR:
  873 + *val = get_reg_val(id, vcpu->arch.csigr);
  874 + break;
  875 + case KVM_REG_PPC_TACR:
  876 + *val = get_reg_val(id, vcpu->arch.tacr);
  877 + break;
  878 + case KVM_REG_PPC_TCSCR:
  879 + *val = get_reg_val(id, vcpu->arch.tcscr);
  880 + break;
  881 + case KVM_REG_PPC_PID:
  882 + *val = get_reg_val(id, vcpu->arch.pid);
  883 + break;
  884 + case KVM_REG_PPC_ACOP:
  885 + *val = get_reg_val(id, vcpu->arch.acop);
  886 + break;
  887 + case KVM_REG_PPC_WORT:
  888 + *val = get_reg_val(id, vcpu->arch.wort);
  889 + break;
817 890 case KVM_REG_PPC_VPA_ADDR:
818 891 spin_lock(&vcpu->arch.vpa_update_lock);
819 892 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
... ... @@ -882,7 +955,7 @@
882 955 case KVM_REG_PPC_UAMOR:
883 956 vcpu->arch.uamor = set_reg_val(id, *val);
884 957 break;
885   - case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA:
  958 + case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
886 959 i = id - KVM_REG_PPC_MMCR0;
887 960 vcpu->arch.mmcr[i] = set_reg_val(id, *val);
888 961 break;
889 962  
... ... @@ -890,11 +963,87 @@
890 963 i = id - KVM_REG_PPC_PMC1;
891 964 vcpu->arch.pmc[i] = set_reg_val(id, *val);
892 965 break;
  966 + case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
  967 + i = id - KVM_REG_PPC_SPMC1;
  968 + vcpu->arch.spmc[i] = set_reg_val(id, *val);
  969 + break;
893 970 case KVM_REG_PPC_SIAR:
894 971 vcpu->arch.siar = set_reg_val(id, *val);
895 972 break;
896 973 case KVM_REG_PPC_SDAR:
897 974 vcpu->arch.sdar = set_reg_val(id, *val);
  975 + break;
  976 + case KVM_REG_PPC_SIER:
  977 + vcpu->arch.sier = set_reg_val(id, *val);
  978 + break;
  979 + case KVM_REG_PPC_IAMR:
  980 + vcpu->arch.iamr = set_reg_val(id, *val);
  981 + break;
  982 + case KVM_REG_PPC_TFHAR:
  983 + vcpu->arch.tfhar = set_reg_val(id, *val);
  984 + break;
  985 + case KVM_REG_PPC_TFIAR:
  986 + vcpu->arch.tfiar = set_reg_val(id, *val);
  987 + break;
  988 + case KVM_REG_PPC_TEXASR:
  989 + vcpu->arch.texasr = set_reg_val(id, *val);
  990 + break;
  991 + case KVM_REG_PPC_FSCR:
  992 + vcpu->arch.fscr = set_reg_val(id, *val);
  993 + break;
  994 + case KVM_REG_PPC_PSPB:
  995 + vcpu->arch.pspb = set_reg_val(id, *val);
  996 + break;
  997 + case KVM_REG_PPC_EBBHR:
  998 + vcpu->arch.ebbhr = set_reg_val(id, *val);
  999 + break;
  1000 + case KVM_REG_PPC_EBBRR:
  1001 + vcpu->arch.ebbrr = set_reg_val(id, *val);
  1002 + break;
  1003 + case KVM_REG_PPC_BESCR:
  1004 + vcpu->arch.bescr = set_reg_val(id, *val);
  1005 + break;
  1006 + case KVM_REG_PPC_TAR:
  1007 + vcpu->arch.tar = set_reg_val(id, *val);
  1008 + break;
  1009 + case KVM_REG_PPC_DPDES:
  1010 + vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
  1011 + break;
  1012 + case KVM_REG_PPC_DAWR:
  1013 + vcpu->arch.dawr = set_reg_val(id, *val);
  1014 + break;
  1015 + case KVM_REG_PPC_DAWRX:
  1016 + vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP;
  1017 + break;
  1018 + case KVM_REG_PPC_CIABR:
  1019 + vcpu->arch.ciabr = set_reg_val(id, *val);
  1020 + /* Don't allow setting breakpoints in hypervisor code */
  1021 + if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
  1022 + vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */
  1023 + break;
  1024 + case KVM_REG_PPC_IC:
  1025 + vcpu->arch.ic = set_reg_val(id, *val);
  1026 + break;
  1027 + case KVM_REG_PPC_VTB:
  1028 + vcpu->arch.vtb = set_reg_val(id, *val);
  1029 + break;
  1030 + case KVM_REG_PPC_CSIGR:
  1031 + vcpu->arch.csigr = set_reg_val(id, *val);
  1032 + break;
  1033 + case KVM_REG_PPC_TACR:
  1034 + vcpu->arch.tacr = set_reg_val(id, *val);
  1035 + break;
  1036 + case KVM_REG_PPC_TCSCR:
  1037 + vcpu->arch.tcscr = set_reg_val(id, *val);
  1038 + break;
  1039 + case KVM_REG_PPC_PID:
  1040 + vcpu->arch.pid = set_reg_val(id, *val);
  1041 + break;
  1042 + case KVM_REG_PPC_ACOP:
  1043 + vcpu->arch.acop = set_reg_val(id, *val);
  1044 + break;
  1045 + case KVM_REG_PPC_WORT:
  1046 + vcpu->arch.wort = set_reg_val(id, *val);
898 1047 break;
899 1048 case KVM_REG_PPC_VPA_ADDR:
900 1049 addr = set_reg_val(id, *val);
arch/powerpc/kvm/book3s_hv_rmhandlers.S
... ... @@ -460,6 +460,13 @@
460 460 beq 38f
461 461 mtspr SPRN_PCR, r7
462 462 38:
  463 +
  464 +BEGIN_FTR_SECTION
  465 + /* DPDES is shared between threads */
  466 + ld r8, VCORE_DPDES(r5)
  467 + mtspr SPRN_DPDES, r8
  468 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
  469 +
463 470 li r0,1
464 471 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
465 472 b 10f
... ... @@ -659,6 +666,18 @@
659 666 mtspr SPRN_MMCRA, r6
660 667 mtspr SPRN_SIAR, r7
661 668 mtspr SPRN_SDAR, r8
  669 +BEGIN_FTR_SECTION
  670 + ld r5, VCPU_MMCR + 24(r4)
  671 + ld r6, VCPU_SIER(r4)
  672 + lwz r7, VCPU_PMC + 24(r4)
  673 + lwz r8, VCPU_PMC + 28(r4)
  674 + ld r9, VCPU_MMCR + 32(r4)
  675 + mtspr SPRN_MMCR2, r5
  676 + mtspr SPRN_SIER, r6
  677 + mtspr SPRN_SPMC1, r7
  678 + mtspr SPRN_SPMC2, r8
  679 + mtspr SPRN_MMCRS, r9
  680 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
662 681 mtspr SPRN_MMCR0, r3
663 682 isync
664 683  
... ... @@ -690,6 +709,61 @@
690 709 mtspr SPRN_DSCR, r5
691 710 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
692 711  
  712 +BEGIN_FTR_SECTION
  713 + /* Skip next section on POWER7 or PPC970 */
  714 + b 8f
  715 +END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
  716 + /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
  717 + mfmsr r8
  718 + li r0, 1
  719 + rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
  720 + mtmsrd r8
  721 +
  722 + /* Load up POWER8-specific registers */
  723 + ld r5, VCPU_IAMR(r4)
  724 + lwz r6, VCPU_PSPB(r4)
  725 + ld r7, VCPU_FSCR(r4)
  726 + mtspr SPRN_IAMR, r5
  727 + mtspr SPRN_PSPB, r6
  728 + mtspr SPRN_FSCR, r7
  729 + ld r5, VCPU_DAWR(r4)
  730 + ld r6, VCPU_DAWRX(r4)
  731 + ld r7, VCPU_CIABR(r4)
  732 + ld r8, VCPU_TAR(r4)
  733 + mtspr SPRN_DAWR, r5
  734 + mtspr SPRN_DAWRX, r6
  735 + mtspr SPRN_CIABR, r7
  736 + mtspr SPRN_TAR, r8
  737 + ld r5, VCPU_IC(r4)
  738 + ld r6, VCPU_VTB(r4)
  739 + mtspr SPRN_IC, r5
  740 + mtspr SPRN_VTB, r6
  741 + ld r5, VCPU_TFHAR(r4)
  742 + ld r6, VCPU_TFIAR(r4)
  743 + ld r7, VCPU_TEXASR(r4)
  744 + ld r8, VCPU_EBBHR(r4)
  745 + mtspr SPRN_TFHAR, r5
  746 + mtspr SPRN_TFIAR, r6
  747 + mtspr SPRN_TEXASR, r7
  748 + mtspr SPRN_EBBHR, r8
  749 + ld r5, VCPU_EBBRR(r4)
  750 + ld r6, VCPU_BESCR(r4)
  751 + ld r7, VCPU_CSIGR(r4)
  752 + ld r8, VCPU_TACR(r4)
  753 + mtspr SPRN_EBBRR, r5
  754 + mtspr SPRN_BESCR, r6
  755 + mtspr SPRN_CSIGR, r7
  756 + mtspr SPRN_TACR, r8
  757 + ld r5, VCPU_TCSCR(r4)
  758 + ld r6, VCPU_ACOP(r4)
  759 + lwz r7, VCPU_GUEST_PID(r4)
  760 + ld r8, VCPU_WORT(r4)
  761 + mtspr SPRN_TCSCR, r5
  762 + mtspr SPRN_ACOP, r6
  763 + mtspr SPRN_PID, r7
  764 + mtspr SPRN_WORT, r8
  765 +8:
  766 +
693 767 /*
694 768 * Set the decrementer to the guest decrementer.
695 769 */
... ... @@ -1081,6 +1155,54 @@
1081 1155 add r5,r5,r6
1082 1156 std r5,VCPU_DEC_EXPIRES(r9)
1083 1157  
  1158 +BEGIN_FTR_SECTION
  1159 + b 8f
  1160 +END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
  1161 + /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
  1162 + mfmsr r8
  1163 + li r0, 1
  1164 + rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
  1165 + mtmsrd r8
  1166 +
  1167 + /* Save POWER8-specific registers */
  1168 + mfspr r5, SPRN_IAMR
  1169 + mfspr r6, SPRN_PSPB
  1170 + mfspr r7, SPRN_FSCR
  1171 + std r5, VCPU_IAMR(r9)
  1172 + stw r6, VCPU_PSPB(r9)
  1173 + std r7, VCPU_FSCR(r9)
  1174 + mfspr r5, SPRN_IC
  1175 + mfspr r6, SPRN_VTB
  1176 + mfspr r7, SPRN_TAR
  1177 + std r5, VCPU_IC(r9)
  1178 + std r6, VCPU_VTB(r9)
  1179 + std r7, VCPU_TAR(r9)
  1180 + mfspr r5, SPRN_TFHAR
  1181 + mfspr r6, SPRN_TFIAR
  1182 + mfspr r7, SPRN_TEXASR
  1183 + mfspr r8, SPRN_EBBHR
  1184 + std r5, VCPU_TFHAR(r9)
  1185 + std r6, VCPU_TFIAR(r9)
  1186 + std r7, VCPU_TEXASR(r9)
  1187 + std r8, VCPU_EBBHR(r9)
  1188 + mfspr r5, SPRN_EBBRR
  1189 + mfspr r6, SPRN_BESCR
  1190 + mfspr r7, SPRN_CSIGR
  1191 + mfspr r8, SPRN_TACR
  1192 + std r5, VCPU_EBBRR(r9)
  1193 + std r6, VCPU_BESCR(r9)
  1194 + std r7, VCPU_CSIGR(r9)
  1195 + std r8, VCPU_TACR(r9)
  1196 + mfspr r5, SPRN_TCSCR
  1197 + mfspr r6, SPRN_ACOP
  1198 + mfspr r7, SPRN_PID
  1199 + mfspr r8, SPRN_WORT
  1200 + std r5, VCPU_TCSCR(r9)
  1201 + std r6, VCPU_ACOP(r9)
  1202 + stw r7, VCPU_GUEST_PID(r9)
  1203 + std r8, VCPU_WORT(r9)
  1204 +8:
  1205 +
1084 1206 /* Save and reset AMR and UAMOR before turning on the MMU */
1085 1207 BEGIN_FTR_SECTION
1086 1208 mfspr r5,SPRN_AMR
... ... @@ -1190,6 +1312,20 @@
1190 1312 stw r10, VCPU_PMC + 24(r9)
1191 1313 stw r11, VCPU_PMC + 28(r9)
1192 1314 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  1315 +BEGIN_FTR_SECTION
  1316 + mfspr r4, SPRN_MMCR2
  1317 + mfspr r5, SPRN_SIER
  1318 + mfspr r6, SPRN_SPMC1
  1319 + mfspr r7, SPRN_SPMC2
  1320 + mfspr r8, SPRN_MMCRS
  1321 + std r4, VCPU_MMCR + 24(r9)
  1322 + std r5, VCPU_SIER(r9)
  1323 + stw r6, VCPU_PMC + 24(r9)
  1324 + stw r7, VCPU_PMC + 28(r9)
  1325 + std r8, VCPU_MMCR + 32(r9)
  1326 + lis r4, 0x8000
  1327 + mtspr SPRN_MMCRS, r4
  1328 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1193 1329 22:
1194 1330 /* Clear out SLB */
1195 1331 li r5,0
... ... @@ -1289,6 +1425,15 @@
1289 1425 mtspr SPRN_SDR1,r6 /* switch to partition page table */
1290 1426 mtspr SPRN_LPID,r7
1291 1427 isync
  1428 +
  1429 +BEGIN_FTR_SECTION
  1430 + /* DPDES is shared between threads */
  1431 + mfspr r7, SPRN_DPDES
  1432 + std r7, VCORE_DPDES(r5)
  1433 + /* clear DPDES so we don't get guest doorbells in the host */
  1434 + li r8, 0
  1435 + mtspr SPRN_DPDES, r8
  1436 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1292 1437  
1293 1438 /* Subtract timebase offset from timebase */
1294 1439 ld r8,VCORE_TB_OFFSET(r5)