Commit 218309b75b5e800af952362b5cbb086fa64f59ba

Authored by Paul Mackerras
Committed by Alexander Graf
1 parent 42d7604d0c

KVM: PPC: Book3S HV: Restructure kvmppc_hv_entry to be a subroutine

We have two paths into and out of the low-level guest entry and exit
code: from a vcpu task via kvmppc_hv_entry_trampoline, and from the
system reset vector for an offline secondary thread on POWER7 via
kvm_start_guest.  Currently both just branch to kvmppc_hv_entry to
enter the guest, and on guest exit, we test the vcpu physical thread
ID to detect which way we came in and thus whether we should return
to the vcpu task or go back to nap mode.

In order to make the code flow clearer, and to keep the code relating
to each flow together, this turns kvmppc_hv_entry into a subroutine
that follows the normal conventions for call and return.  This means
that kvmppc_hv_entry_trampoline() and kvmppc_hv_entry() now establish
normal stack frames, and we use the normal stack slots for saving
return addresses rather than local_paca->kvm_hstate.vmhandler.  Apart
from that this is mostly moving code around unchanged.

Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>

Showing 1 changed file with 178 additions and 166 deletions Side-by-side Diff

arch/powerpc/kvm/book3s_hv_rmhandlers.S
... ... @@ -66,8 +66,11 @@
66 66 * LR = return address to continue at after eventually re-enabling MMU
67 67 */
68 68 _GLOBAL(kvmppc_hv_entry_trampoline)
  69 + mflr r0
  70 + std r0, PPC_LR_STKOFF(r1)
  71 + stdu r1, -112(r1)
69 72 mfmsr r10
70   - LOAD_REG_ADDR(r5, kvmppc_hv_entry)
  73 + LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
71 74 li r0,MSR_RI
72 75 andc r0,r10,r0
73 76 li r6,MSR_IR | MSR_DR
74 77  
... ... @@ -77,12 +80,104 @@
77 80 mtsrr1 r6
78 81 RFI
79 82  
80   -/******************************************************************************
81   - * *
82   - * Entry code *
83   - * *
84   - *****************************************************************************/
  83 +kvmppc_call_hv_entry:
  84 + bl kvmppc_hv_entry
85 85  
  86 + /* Back from guest - restore host state and return to caller */
  87 +
  88 + /* Restore host DABR and DABRX */
  89 + ld r5,HSTATE_DABR(r13)
  90 + li r6,7
  91 + mtspr SPRN_DABR,r5
  92 + mtspr SPRN_DABRX,r6
  93 +
  94 + /* Restore SPRG3 */
  95 + ld r3,PACA_SPRG3(r13)
  96 + mtspr SPRN_SPRG3,r3
  97 +
  98 + /*
  99 + * Reload DEC. HDEC interrupts were disabled when
  100 + * we reloaded the host's LPCR value.
  101 + */
  102 + ld r3, HSTATE_DECEXP(r13)
  103 + mftb r4
  104 + subf r4, r4, r3
  105 + mtspr SPRN_DEC, r4
  106 +
  107 + /* Reload the host's PMU registers */
  108 + ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
  109 + lbz r4, LPPACA_PMCINUSE(r3)
  110 + cmpwi r4, 0
  111 + beq 23f /* skip if not */
  112 + lwz r3, HSTATE_PMC(r13)
  113 + lwz r4, HSTATE_PMC + 4(r13)
  114 + lwz r5, HSTATE_PMC + 8(r13)
  115 + lwz r6, HSTATE_PMC + 12(r13)
  116 + lwz r8, HSTATE_PMC + 16(r13)
  117 + lwz r9, HSTATE_PMC + 20(r13)
  118 +BEGIN_FTR_SECTION
  119 + lwz r10, HSTATE_PMC + 24(r13)
  120 + lwz r11, HSTATE_PMC + 28(r13)
  121 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  122 + mtspr SPRN_PMC1, r3
  123 + mtspr SPRN_PMC2, r4
  124 + mtspr SPRN_PMC3, r5
  125 + mtspr SPRN_PMC4, r6
  126 + mtspr SPRN_PMC5, r8
  127 + mtspr SPRN_PMC6, r9
  128 +BEGIN_FTR_SECTION
  129 + mtspr SPRN_PMC7, r10
  130 + mtspr SPRN_PMC8, r11
  131 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
  132 + ld r3, HSTATE_MMCR(r13)
  133 + ld r4, HSTATE_MMCR + 8(r13)
  134 + ld r5, HSTATE_MMCR + 16(r13)
  135 + mtspr SPRN_MMCR1, r4
  136 + mtspr SPRN_MMCRA, r5
  137 + mtspr SPRN_MMCR0, r3
  138 + isync
  139 +23:
  140 +
  141 + /*
  142 + * For external and machine check interrupts, we need
  143 + * to call the Linux handler to process the interrupt.
  144 + * We do that by jumping to absolute address 0x500 for
  145 + * external interrupts, or the machine_check_fwnmi label
  146 + * for machine checks (since firmware might have patched
  147 + * the vector area at 0x200). The [h]rfid at the end of the
  148 + * handler will return to the book3s_hv_interrupts.S code.
  149 + * For other interrupts we do the rfid to get back
  150 + * to the book3s_hv_interrupts.S code here.
  151 + */
  152 + ld r8, 112+PPC_LR_STKOFF(r1)
  153 + addi r1, r1, 112
  154 + ld r7, HSTATE_HOST_MSR(r13)
  155 +
  156 + cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
  157 + cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
  158 +BEGIN_FTR_SECTION
  159 + beq 11f
  160 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  161 +
  162 + /* RFI into the highmem handler, or branch to interrupt handler */
  163 + mfmsr r6
  164 + li r0, MSR_RI
  165 + andc r6, r6, r0
  166 + mtmsrd r6, 1 /* Clear RI in MSR */
  167 + mtsrr0 r8
  168 + mtsrr1 r7
  169 + beqa 0x500 /* external interrupt (PPC970) */
  170 + beq cr1, 13f /* machine check */
  171 + RFI
  172 +
  173 + /* On POWER7, we have external interrupts set to use HSRR0/1 */
  174 +11: mtspr SPRN_HSRR0, r8
  175 + mtspr SPRN_HSRR1, r7
  176 + ba 0x500
  177 +
  178 +13: b machine_check_fwnmi
  179 +
  180 +
86 181 /*
87 182 * We come in here when wakened from nap mode on a secondary hw thread.
88 183 * Relocation is off and most register values are lost.
... ... @@ -137,7 +232,7 @@
137 232 cmpdi r4,0
138 233 /* if we have no vcpu to run, go back to sleep */
139 234 beq kvm_no_guest
140   - b kvmppc_hv_entry
  235 + b 30f
141 236  
142 237 27: /* XXX should handle hypervisor maintenance interrupts etc. here */
143 238 b kvm_no_guest
... ... @@ -147,6 +242,57 @@
147 242 stw r8,HSTATE_SAVED_XIRR(r13)
148 243 b kvm_no_guest
149 244  
  245 +30: bl kvmppc_hv_entry
  246 +
  247 + /* Back from the guest, go back to nap */
  248 + /* Clear our vcpu pointer so we don't come back in early */
  249 + li r0, 0
  250 + std r0, HSTATE_KVM_VCPU(r13)
  251 + lwsync
  252 + /* Clear any pending IPI - we're an offline thread */
  253 + ld r5, HSTATE_XICS_PHYS(r13)
  254 + li r7, XICS_XIRR
  255 + lwzcix r3, r5, r7 /* ack any pending interrupt */
  256 + rlwinm. r0, r3, 0, 0xffffff /* any pending? */
  257 + beq 37f
  258 + sync
  259 + li r0, 0xff
  260 + li r6, XICS_MFRR
  261 + stbcix r0, r5, r6 /* clear the IPI */
  262 + stwcix r3, r5, r7 /* EOI it */
  263 +37: sync
  264 +
  265 + /* increment the nap count and then go to nap mode */
  266 + ld r4, HSTATE_KVM_VCORE(r13)
  267 + addi r4, r4, VCORE_NAP_COUNT
  268 + lwsync /* make previous updates visible */
  269 +51: lwarx r3, 0, r4
  270 + addi r3, r3, 1
  271 + stwcx. r3, 0, r4
  272 + bne 51b
  273 +
  274 +kvm_no_guest:
  275 + li r0, KVM_HWTHREAD_IN_NAP
  276 + stb r0, HSTATE_HWTHREAD_STATE(r13)
  277 + li r3, LPCR_PECE0
  278 + mfspr r4, SPRN_LPCR
  279 + rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
  280 + mtspr SPRN_LPCR, r4
  281 + isync
  282 + std r0, HSTATE_SCRATCH0(r13)
  283 + ptesync
  284 + ld r0, HSTATE_SCRATCH0(r13)
  285 +1: cmpd r0, r0
  286 + bne 1b
  287 + nap
  288 + b .
  289 +
  290 +/******************************************************************************
  291 + * *
  292 + * Entry code *
  293 + * *
  294 + *****************************************************************************/
  295 +
150 296 .global kvmppc_hv_entry
151 297 kvmppc_hv_entry:
152 298  
... ... @@ -159,7 +305,8 @@
159 305 * all other volatile GPRS = free
160 306 */
161 307 mflr r0
162   - std r0, HSTATE_VMHANDLER(r13)
  308 + std r0, PPC_LR_STKOFF(r1)
  309 + stdu r1, -112(r1)
163 310  
164 311 /* Set partition DABR */
165 312 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
166 313  
167 314  
... ... @@ -1198,104 +1345,31 @@
1198 1345 stw r11, VCPU_PMC + 28(r9)
1199 1346 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1200 1347 22:
  1348 + ld r0, 112+PPC_LR_STKOFF(r1)
  1349 + addi r1, r1, 112
  1350 + mtlr r0
  1351 + blr
  1352 +secondary_too_late:
  1353 + ld r5,HSTATE_KVM_VCORE(r13)
  1354 + HMT_LOW
  1355 +13: lbz r3,VCORE_IN_GUEST(r5)
  1356 + cmpwi r3,0
  1357 + bne 13b
  1358 + HMT_MEDIUM
  1359 + li r0, KVM_GUEST_MODE_NONE
  1360 + stb r0, HSTATE_IN_GUEST(r13)
  1361 + ld r11,PACA_SLBSHADOWPTR(r13)
1201 1362  
1202   - /* Secondary threads go off to take a nap on POWER7 */
1203   -BEGIN_FTR_SECTION
1204   - lwz r0,VCPU_PTID(r9)
1205   - cmpwi r0,0
1206   - bne secondary_nap
1207   -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
  1363 + .rept SLB_NUM_BOLTED
  1364 + ld r5,SLBSHADOW_SAVEAREA(r11)
  1365 + ld r6,SLBSHADOW_SAVEAREA+8(r11)
  1366 + andis. r7,r5,SLB_ESID_V@h
  1367 + beq 1f
  1368 + slbmte r6,r5
  1369 +1: addi r11,r11,16
  1370 + .endr
  1371 + b 22b
1208 1372  
1209   - /* Restore host DABR and DABRX */
1210   - ld r5,HSTATE_DABR(r13)
1211   - li r6,7
1212   - mtspr SPRN_DABR,r5
1213   - mtspr SPRN_DABRX,r6
1214   -
1215   - /* Restore SPRG3 */
1216   - ld r3,PACA_SPRG3(r13)
1217   - mtspr SPRN_SPRG3,r3
1218   -
1219   - /*
1220   - * Reload DEC. HDEC interrupts were disabled when
1221   - * we reloaded the host's LPCR value.
1222   - */
1223   - ld r3, HSTATE_DECEXP(r13)
1224   - mftb r4
1225   - subf r4, r4, r3
1226   - mtspr SPRN_DEC, r4
1227   -
1228   - /* Reload the host's PMU registers */
1229   - ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
1230   - lbz r4, LPPACA_PMCINUSE(r3)
1231   - cmpwi r4, 0
1232   - beq 23f /* skip if not */
1233   - lwz r3, HSTATE_PMC(r13)
1234   - lwz r4, HSTATE_PMC + 4(r13)
1235   - lwz r5, HSTATE_PMC + 8(r13)
1236   - lwz r6, HSTATE_PMC + 12(r13)
1237   - lwz r8, HSTATE_PMC + 16(r13)
1238   - lwz r9, HSTATE_PMC + 20(r13)
1239   -BEGIN_FTR_SECTION
1240   - lwz r10, HSTATE_PMC + 24(r13)
1241   - lwz r11, HSTATE_PMC + 28(r13)
1242   -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1243   - mtspr SPRN_PMC1, r3
1244   - mtspr SPRN_PMC2, r4
1245   - mtspr SPRN_PMC3, r5
1246   - mtspr SPRN_PMC4, r6
1247   - mtspr SPRN_PMC5, r8
1248   - mtspr SPRN_PMC6, r9
1249   -BEGIN_FTR_SECTION
1250   - mtspr SPRN_PMC7, r10
1251   - mtspr SPRN_PMC8, r11
1252   -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1253   - ld r3, HSTATE_MMCR(r13)
1254   - ld r4, HSTATE_MMCR + 8(r13)
1255   - ld r5, HSTATE_MMCR + 16(r13)
1256   - mtspr SPRN_MMCR1, r4
1257   - mtspr SPRN_MMCRA, r5
1258   - mtspr SPRN_MMCR0, r3
1259   - isync
1260   -23:
1261   - /*
1262   - * For external and machine check interrupts, we need
1263   - * to call the Linux handler to process the interrupt.
1264   - * We do that by jumping to absolute address 0x500 for
1265   - * external interrupts, or the machine_check_fwnmi label
1266   - * for machine checks (since firmware might have patched
1267   - * the vector area at 0x200). The [h]rfid at the end of the
1268   - * handler will return to the book3s_hv_interrupts.S code.
1269   - * For other interrupts we do the rfid to get back
1270   - * to the book3s_hv_interrupts.S code here.
1271   - */
1272   - ld r8, HSTATE_VMHANDLER(r13)
1273   - ld r7, HSTATE_HOST_MSR(r13)
1274   -
1275   - cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1276   - cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1277   -BEGIN_FTR_SECTION
1278   - beq 11f
1279   -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1280   -
1281   - /* RFI into the highmem handler, or branch to interrupt handler */
1282   - mfmsr r6
1283   - li r0, MSR_RI
1284   - andc r6, r6, r0
1285   - mtmsrd r6, 1 /* Clear RI in MSR */
1286   - mtsrr0 r8
1287   - mtsrr1 r7
1288   - beqa 0x500 /* external interrupt (PPC970) */
1289   - beq cr1, 13f /* machine check */
1290   - RFI
1291   -
1292   - /* On POWER7, we have external interrupts set to use HSRR0/1 */
1293   -11: mtspr SPRN_HSRR0, r8
1294   - mtspr SPRN_HSRR1, r7
1295   - ba 0x500
1296   -
1297   -13: b machine_check_fwnmi
1298   -
1299 1373 /*
1300 1374 * Check whether an HDSI is an HPTE not found fault or something else.
1301 1375 * If it is an HPTE not found fault that is due to the guest accessing
... ... @@ -1740,68 +1814,6 @@
1740 1814 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1741 1815 rotldi r11, r11, 63
1742 1816 b fast_interrupt_c_return
1743   -
1744   -secondary_too_late:
1745   - ld r5,HSTATE_KVM_VCORE(r13)
1746   - HMT_LOW
1747   -13: lbz r3,VCORE_IN_GUEST(r5)
1748   - cmpwi r3,0
1749   - bne 13b
1750   - HMT_MEDIUM
1751   - ld r11,PACA_SLBSHADOWPTR(r13)
1752   -
1753   - .rept SLB_NUM_BOLTED
1754   - ld r5,SLBSHADOW_SAVEAREA(r11)
1755   - ld r6,SLBSHADOW_SAVEAREA+8(r11)
1756   - andis. r7,r5,SLB_ESID_V@h
1757   - beq 1f
1758   - slbmte r6,r5
1759   -1: addi r11,r11,16
1760   - .endr
1761   -
1762   -secondary_nap:
1763   - /* Clear our vcpu pointer so we don't come back in early */
1764   - li r0, 0
1765   - std r0, HSTATE_KVM_VCPU(r13)
1766   - lwsync
1767   - /* Clear any pending IPI - assume we're a secondary thread */
1768   - ld r5, HSTATE_XICS_PHYS(r13)
1769   - li r7, XICS_XIRR
1770   - lwzcix r3, r5, r7 /* ack any pending interrupt */
1771   - rlwinm. r0, r3, 0, 0xffffff /* any pending? */
1772   - beq 37f
1773   - sync
1774   - li r0, 0xff
1775   - li r6, XICS_MFRR
1776   - stbcix r0, r5, r6 /* clear the IPI */
1777   - stwcix r3, r5, r7 /* EOI it */
1778   -37: sync
1779   -
1780   - /* increment the nap count and then go to nap mode */
1781   - ld r4, HSTATE_KVM_VCORE(r13)
1782   - addi r4, r4, VCORE_NAP_COUNT
1783   - lwsync /* make previous updates visible */
1784   -51: lwarx r3, 0, r4
1785   - addi r3, r3, 1
1786   - stwcx. r3, 0, r4
1787   - bne 51b
1788   -
1789   -kvm_no_guest:
1790   - li r0, KVM_HWTHREAD_IN_NAP
1791   - stb r0, HSTATE_HWTHREAD_STATE(r13)
1792   -
1793   - li r3, LPCR_PECE0
1794   - mfspr r4, SPRN_LPCR
1795   - rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
1796   - mtspr SPRN_LPCR, r4
1797   - isync
1798   - std r0, HSTATE_SCRATCH0(r13)
1799   - ptesync
1800   - ld r0, HSTATE_SCRATCH0(r13)
1801   -1: cmpd r0, r0
1802   - bne 1b
1803   - nap
1804   - b .
1805 1817  
1806 1818 /*
1807 1819 * Save away FP, VMX and VSX registers.