Commit e4e38121507a27d2ccc4b28d9e7fc4818a12c44c

Authored by Michael Neuling
Committed by Paul Mackerras
1 parent 7505258c5f

KVM: PPC: Book3S HV: Add transactional memory support

This adds saving of the transactional memory (TM) checkpointed state
on guest entry and exit.  We only do this if we see that the guest has
an active transaction.

It also adds emulation of the TM state changes when delivering IRQs
into the guest.  According to the architecture, if we are
transactional when an IRQ occurs, the TM state is changed to
suspended, otherwise it's left unchanged.

Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Scott Wood <scottwood@freescale.com>

Showing 4 changed files with 149 additions and 30 deletions Side-by-side Diff

arch/powerpc/include/asm/reg.h
... ... @@ -213,6 +213,7 @@
213 213 #define SPRN_ACOP 0x1F /* Available Coprocessor Register */
214 214 #define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */
215 215 #define SPRN_TEXASR 0x82 /* Transaction EXception & Summary */
  216 +#define TEXASR_FS __MASK(63-36) /* Transaction Failure Summary */
216 217 #define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */
217 218 #define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */
218 219 #define SPRN_CTRLF 0x088
arch/powerpc/include/asm/tm.h
... ... @@ -7,6 +7,8 @@
7 7  
8 8 #include <uapi/asm/tm.h>
9 9  
  10 +#ifndef __ASSEMBLY__
  11 +
10 12 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
11 13 extern void do_load_up_transact_fpu(struct thread_struct *thread);
12 14 extern void do_load_up_transact_altivec(struct thread_struct *thread);
... ... @@ -20,4 +22,6 @@
20 22 extern void tm_abort(uint8_t cause);
21 23 extern void tm_save_sprs(struct thread_struct *thread);
22 24 extern void tm_restore_sprs(struct thread_struct *thread);
  25 +
  26 +#endif /* __ASSEMBLY__ */
arch/powerpc/kvm/book3s_64_mmu_hv.c
... ... @@ -262,7 +262,14 @@
262 262  
263 263 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
264 264 {
265   - kvmppc_set_msr(vcpu, vcpu->arch.intr_msr);
  265 + unsigned long msr = vcpu->arch.intr_msr;
  266 +
  267 + /* If transactional, change to suspend mode on IRQ delivery */
  268 + if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
  269 + msr |= MSR_TS_S;
  270 + else
  271 + msr |= vcpu->arch.shregs.msr & MSR_TS_MASK;
  272 + kvmppc_set_msr(vcpu, msr);
266 273 }
267 274  
268 275 /*
arch/powerpc/kvm/book3s_hv_rmhandlers.S
... ... @@ -28,7 +28,10 @@
28 28 #include <asm/exception-64s.h>
29 29 #include <asm/kvm_book3s_asm.h>
30 30 #include <asm/mmu-hash64.h>
  31 +#include <asm/tm.h>
31 32  
  33 +#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
  34 +
32 35 #ifdef __LITTLE_ENDIAN__
33 36 #error Need to fix lppaca and SLB shadow accesses in little endian mode
34 37 #endif
... ... @@ -597,6 +600,116 @@
597 600 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
598 601 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
599 602  
  603 +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  604 +BEGIN_FTR_SECTION
  605 + b skip_tm
  606 +END_FTR_SECTION_IFCLR(CPU_FTR_TM)
  607 +
  608 + /* Turn on TM/FP/VSX/VMX so we can restore them. */
  609 + mfmsr r5
  610 + li r6, MSR_TM >> 32
  611 + sldi r6, r6, 32
  612 + or r5, r5, r6
  613 + ori r5, r5, MSR_FP
  614 + oris r5, r5, (MSR_VEC | MSR_VSX)@h
  615 + mtmsrd r5
  616 +
  617 + /*
  618 + * The user may change these outside of a transaction, so they must
  619 + * always be context switched.
  620 + */
  621 + ld r5, VCPU_TFHAR(r4)
  622 + ld r6, VCPU_TFIAR(r4)
  623 + ld r7, VCPU_TEXASR(r4)
  624 + mtspr SPRN_TFHAR, r5
  625 + mtspr SPRN_TFIAR, r6
  626 + mtspr SPRN_TEXASR, r7
  627 +
  628 + ld r5, VCPU_MSR(r4)
  629 + rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
  630 + beq skip_tm /* TM not active in guest */
  631 +
  632 + /* Make sure the failure summary is set, otherwise we'll program check
  633 + * when we trechkpt. It's possible that this might have been not set
  634 + * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
  635 + * host.
  636 + */
  637 + oris r7, r7, (TEXASR_FS)@h
  638 + mtspr SPRN_TEXASR, r7
  639 +
  640 + /*
  641 + * We need to load up the checkpointed state for the guest.
  642 + * We need to do this early as it will blow away any GPRs, VSRs and
  643 + * some SPRs.
  644 + */
  645 +
  646 + mr r31, r4
  647 + addi r3, r31, VCPU_FPRS_TM
  648 + bl .load_fp_state
  649 + addi r3, r31, VCPU_VRS_TM
  650 + bl .load_vr_state
  651 + mr r4, r31
  652 + lwz r7, VCPU_VRSAVE_TM(r4)
  653 + mtspr SPRN_VRSAVE, r7
  654 +
  655 + ld r5, VCPU_LR_TM(r4)
  656 + lwz r6, VCPU_CR_TM(r4)
  657 + ld r7, VCPU_CTR_TM(r4)
  658 + ld r8, VCPU_AMR_TM(r4)
  659 + ld r9, VCPU_TAR_TM(r4)
  660 + mtlr r5
  661 + mtcr r6
  662 + mtctr r7
  663 + mtspr SPRN_AMR, r8
  664 + mtspr SPRN_TAR, r9
  665 +
  666 + /*
  667 + * Load up PPR and DSCR values but don't put them in the actual SPRs
  668 + * till the last moment to avoid running with userspace PPR and DSCR for
  669 + * too long.
  670 + */
  671 + ld r29, VCPU_DSCR_TM(r4)
  672 + ld r30, VCPU_PPR_TM(r4)
  673 +
  674 + std r2, PACATMSCRATCH(r13) /* Save TOC */
  675 +
  676 + /* Clear the MSR RI since r1, r13 are all going to be foobar. */
  677 + li r5, 0
  678 + mtmsrd r5, 1
  679 +
  680 + /* Load GPRs r0-r28 */
  681 + reg = 0
  682 + .rept 29
  683 + ld reg, VCPU_GPRS_TM(reg)(r31)
  684 + reg = reg + 1
  685 + .endr
  686 +
  687 + mtspr SPRN_DSCR, r29
  688 + mtspr SPRN_PPR, r30
  689 +
  690 + /* Load final GPRs */
  691 + ld 29, VCPU_GPRS_TM(29)(r31)
  692 + ld 30, VCPU_GPRS_TM(30)(r31)
  693 + ld 31, VCPU_GPRS_TM(31)(r31)
  694 +
  695 + /* TM checkpointed state is now setup. All GPRs are now volatile. */
  696 + TRECHKPT
  697 +
  698 + /* Now let's get back the state we need. */
  699 + HMT_MEDIUM
  700 + GET_PACA(r13)
  701 + ld r29, HSTATE_DSCR(r13)
  702 + mtspr SPRN_DSCR, r29
  703 + ld r4, HSTATE_KVM_VCPU(r13)
  704 + ld r1, HSTATE_HOST_R1(r13)
  705 + ld r2, PACATMSCRATCH(r13)
  706 +
  707 + /* Set the MSR RI since we have our registers back. */
  708 + li r5, MSR_RI
  709 + mtmsrd r5, 1
  710 +skip_tm:
  711 +#endif
  712 +
600 713 /* Load guest PMU registers */
601 714 /* R4 is live here (vcpu pointer) */
602 715 li r3, 1
... ... @@ -704,14 +817,6 @@
704 817 ld r6, VCPU_VTB(r4)
705 818 mtspr SPRN_IC, r5
706 819 mtspr SPRN_VTB, r6
707   -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
708   - ld r5, VCPU_TFHAR(r4)
709   - ld r6, VCPU_TFIAR(r4)
710   - ld r7, VCPU_TEXASR(r4)
711   - mtspr SPRN_TFHAR, r5
712   - mtspr SPRN_TFIAR, r6
713   - mtspr SPRN_TEXASR, r7
714   -#endif
715 820 ld r8, VCPU_EBBHR(r4)
716 821 mtspr SPRN_EBBHR, r8
717 822 ld r5, VCPU_EBBRR(r4)
... ... @@ -817,7 +922,8 @@
817 922 12: mtspr SPRN_SRR0, r10
818 923 mr r10,r0
819 924 mtspr SPRN_SRR1, r11
820   - ld r11, VCPU_INTR_MSR(r4)
  925 + mr r9, r4
  926 + bl kvmppc_msr_interrupt
821 927 5:
822 928  
823 929 /*
... ... @@ -1103,12 +1209,6 @@
1103 1209 BEGIN_FTR_SECTION
1104 1210 b 8f
1105 1211 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1106   - /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
1107   - mfmsr r8
1108   - li r0, 1
1109   - rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1110   - mtmsrd r8
1111   -
1112 1212 /* Save POWER8-specific registers */
1113 1213 mfspr r5, SPRN_IAMR
1114 1214 mfspr r6, SPRN_PSPB
... ... @@ -1122,14 +1222,6 @@
1122 1222 std r5, VCPU_IC(r9)
1123 1223 std r6, VCPU_VTB(r9)
1124 1224 std r7, VCPU_TAR(r9)
1125   -#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1126   - mfspr r5, SPRN_TFHAR
1127   - mfspr r6, SPRN_TFIAR
1128   - mfspr r7, SPRN_TEXASR
1129   - std r5, VCPU_TFHAR(r9)
1130   - std r6, VCPU_TFIAR(r9)
1131   - std r7, VCPU_TEXASR(r9)
1132   -#endif
1133 1225 mfspr r8, SPRN_EBBHR
1134 1226 std r8, VCPU_EBBHR(r9)
1135 1227 mfspr r5, SPRN_EBBRR
... ... @@ -1557,7 +1649,7 @@
1557 1649 mtspr SPRN_SRR0, r10
1558 1650 mtspr SPRN_SRR1, r11
1559 1651 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
1560   - ld r11, VCPU_INTR_MSR(r9)
  1652 + bl kvmppc_msr_interrupt
1561 1653 fast_interrupt_c_return:
1562 1654 6: ld r7, VCPU_CTR(r9)
1563 1655 lwz r8, VCPU_XER(r9)
... ... @@ -1626,7 +1718,7 @@
1626 1718 1: mtspr SPRN_SRR0, r10
1627 1719 mtspr SPRN_SRR1, r11
1628 1720 li r10, BOOK3S_INTERRUPT_INST_STORAGE
1629   - ld r11, VCPU_INTR_MSR(r9)
  1721 + bl kvmppc_msr_interrupt
1630 1722 b fast_interrupt_c_return
1631 1723  
1632 1724 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
... ... @@ -1669,7 +1761,7 @@
1669 1761 mtspr SPRN_SRR0,r10
1670 1762 mtspr SPRN_SRR1,r11
1671 1763 li r10, BOOK3S_INTERRUPT_SYSCALL
1672   - ld r11, VCPU_INTR_MSR(r9)
  1764 + bl kvmppc_msr_interrupt
1673 1765 mr r4,r9
1674 1766 b fast_guest_return
1675 1767  
... ... @@ -1997,7 +2089,7 @@
1997 2089 beq mc_cont
1998 2090 /* If not, deliver a machine check. SRR0/1 are already set */
1999 2091 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2000   - ld r11, VCPU_INTR_MSR(r9)
  2092 + bl kvmppc_msr_interrupt
2001 2093 b fast_interrupt_c_return
2002 2094  
2003 2095 /*
... ... @@ -2138,8 +2230,6 @@
2138 2230 mfspr r6,SPRN_VRSAVE
2139 2231 stw r6,VCPU_VRSAVE(r31)
2140 2232 mtlr r30
2141   - mtmsrd r5
2142   - isync
2143 2233 blr
2144 2234  
2145 2235 /*
... ... @@ -2186,4 +2276,21 @@
2186 2276 */
2187 2277 kvmppc_bad_host_intr:
2188 2278 b .
  2279 +
  2280 +/*
  2281 + * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
  2282 + * from VCPU_INTR_MSR and is modified based on the required TM state changes.
  2283 + * r11 has the guest MSR value (in/out)
  2284 + * r9 has a vcpu pointer (in)
  2285 + * r0 is used as a scratch register
  2286 + */
  2287 +kvmppc_msr_interrupt:
  2288 + rldicl r0, r11, 64 - MSR_TS_S_LG, 62
  2289 + cmpwi r0, 2 /* Check if we are in transactional state.. */
  2290 + ld r11, VCPU_INTR_MSR(r9)
  2291 + bne 1f
  2292 + /* ... if transactional, change to suspended */
  2293 + li r0, 1
  2294 +1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
  2295 + blr