Commit 582b910edafd283dfab78f41f437a92a65ee5103

Authored by Preeti U Murthy
Committed by Benjamin Herrenschmidt
1 parent fd17dc7b9a

ppc/kvm: Clear the runlatch bit of a vcpu before napping

When the guest cedes the vcpu or the vcpu has no guest to
run it naps. Clear the runlatch bit of the vcpu before
napping to indicate an idle cpu.

Signed-off-by: Preeti U Murthy <preeti@linux.vnet.ibm.com>
Acked-by: Paul Mackerras <paulus@samba.org>
Reviewed-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

Showing 1 changed file with 11 additions and 1 deletions Inline Diff

arch/powerpc/kvm/book3s_hv_rmhandlers.S
1 /* 1 /*
2 * This program is free software; you can redistribute it and/or modify 2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as 3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation. 4 * published by the Free Software Foundation.
5 * 5 *
6 * This program is distributed in the hope that it will be useful, 6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details. 9 * GNU General Public License for more details.
10 * 10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 * 12 *
13 * Derived from book3s_rmhandlers.S and other files, which are: 13 * Derived from book3s_rmhandlers.S and other files, which are:
14 * 14 *
15 * Copyright SUSE Linux Products GmbH 2009 15 * Copyright SUSE Linux Products GmbH 2009
16 * 16 *
17 * Authors: Alexander Graf <agraf@suse.de> 17 * Authors: Alexander Graf <agraf@suse.de>
18 */ 18 */
19 19
20 #include <asm/ppc_asm.h> 20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h> 21 #include <asm/kvm_asm.h>
22 #include <asm/reg.h> 22 #include <asm/reg.h>
23 #include <asm/mmu.h> 23 #include <asm/mmu.h>
24 #include <asm/page.h> 24 #include <asm/page.h>
25 #include <asm/ptrace.h> 25 #include <asm/ptrace.h>
26 #include <asm/hvcall.h> 26 #include <asm/hvcall.h>
27 #include <asm/asm-offsets.h> 27 #include <asm/asm-offsets.h>
28 #include <asm/exception-64s.h> 28 #include <asm/exception-64s.h>
29 #include <asm/kvm_book3s_asm.h> 29 #include <asm/kvm_book3s_asm.h>
30 #include <asm/mmu-hash64.h> 30 #include <asm/mmu-hash64.h>
31 #include <asm/tm.h> 31 #include <asm/tm.h>
32 32
33 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM) 33 #define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
34 34
35 #ifdef __LITTLE_ENDIAN__ 35 #ifdef __LITTLE_ENDIAN__
36 #error Need to fix lppaca and SLB shadow accesses in little endian mode 36 #error Need to fix lppaca and SLB shadow accesses in little endian mode
37 #endif 37 #endif
38 38
39 /* Values in HSTATE_NAPPING(r13) */ 39 /* Values in HSTATE_NAPPING(r13) */
40 #define NAPPING_CEDE 1 40 #define NAPPING_CEDE 1
41 #define NAPPING_NOVCPU 2 41 #define NAPPING_NOVCPU 2
42 42
43 /* 43 /*
44 * Call kvmppc_hv_entry in real mode. 44 * Call kvmppc_hv_entry in real mode.
45 * Must be called with interrupts hard-disabled. 45 * Must be called with interrupts hard-disabled.
46 * 46 *
47 * Input Registers: 47 * Input Registers:
48 * 48 *
49 * LR = return address to continue at after eventually re-enabling MMU 49 * LR = return address to continue at after eventually re-enabling MMU
50 */ 50 */
51 _GLOBAL(kvmppc_hv_entry_trampoline) 51 _GLOBAL(kvmppc_hv_entry_trampoline)
52 mflr r0 52 mflr r0
53 std r0, PPC_LR_STKOFF(r1) 53 std r0, PPC_LR_STKOFF(r1)
54 stdu r1, -112(r1) 54 stdu r1, -112(r1)
55 mfmsr r10 55 mfmsr r10
56 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry) 56 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
57 li r0,MSR_RI 57 li r0,MSR_RI
58 andc r0,r10,r0 58 andc r0,r10,r0
59 li r6,MSR_IR | MSR_DR 59 li r6,MSR_IR | MSR_DR
60 andc r6,r10,r6 60 andc r6,r10,r6
61 mtmsrd r0,1 /* clear RI in MSR */ 61 mtmsrd r0,1 /* clear RI in MSR */
62 mtsrr0 r5 62 mtsrr0 r5
63 mtsrr1 r6 63 mtsrr1 r6
64 RFI 64 RFI
65 65
66 kvmppc_call_hv_entry: 66 kvmppc_call_hv_entry:
67 ld r4, HSTATE_KVM_VCPU(r13) 67 ld r4, HSTATE_KVM_VCPU(r13)
68 bl kvmppc_hv_entry 68 bl kvmppc_hv_entry
69 69
70 /* Back from guest - restore host state and return to caller */ 70 /* Back from guest - restore host state and return to caller */
71 71
72 BEGIN_FTR_SECTION 72 BEGIN_FTR_SECTION
73 /* Restore host DABR and DABRX */ 73 /* Restore host DABR and DABRX */
74 ld r5,HSTATE_DABR(r13) 74 ld r5,HSTATE_DABR(r13)
75 li r6,7 75 li r6,7
76 mtspr SPRN_DABR,r5 76 mtspr SPRN_DABR,r5
77 mtspr SPRN_DABRX,r6 77 mtspr SPRN_DABRX,r6
78 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 78 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
79 79
80 /* Restore SPRG3 */ 80 /* Restore SPRG3 */
81 ld r3,PACA_SPRG_VDSO(r13) 81 ld r3,PACA_SPRG_VDSO(r13)
82 mtspr SPRN_SPRG_VDSO_WRITE,r3 82 mtspr SPRN_SPRG_VDSO_WRITE,r3
83 83
84 /* Reload the host's PMU registers */ 84 /* Reload the host's PMU registers */
85 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ 85 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
86 lbz r4, LPPACA_PMCINUSE(r3) 86 lbz r4, LPPACA_PMCINUSE(r3)
87 cmpwi r4, 0 87 cmpwi r4, 0
88 beq 23f /* skip if not */ 88 beq 23f /* skip if not */
89 lwz r3, HSTATE_PMC(r13) 89 lwz r3, HSTATE_PMC(r13)
90 lwz r4, HSTATE_PMC + 4(r13) 90 lwz r4, HSTATE_PMC + 4(r13)
91 lwz r5, HSTATE_PMC + 8(r13) 91 lwz r5, HSTATE_PMC + 8(r13)
92 lwz r6, HSTATE_PMC + 12(r13) 92 lwz r6, HSTATE_PMC + 12(r13)
93 lwz r8, HSTATE_PMC + 16(r13) 93 lwz r8, HSTATE_PMC + 16(r13)
94 lwz r9, HSTATE_PMC + 20(r13) 94 lwz r9, HSTATE_PMC + 20(r13)
95 BEGIN_FTR_SECTION 95 BEGIN_FTR_SECTION
96 lwz r10, HSTATE_PMC + 24(r13) 96 lwz r10, HSTATE_PMC + 24(r13)
97 lwz r11, HSTATE_PMC + 28(r13) 97 lwz r11, HSTATE_PMC + 28(r13)
98 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 98 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
99 mtspr SPRN_PMC1, r3 99 mtspr SPRN_PMC1, r3
100 mtspr SPRN_PMC2, r4 100 mtspr SPRN_PMC2, r4
101 mtspr SPRN_PMC3, r5 101 mtspr SPRN_PMC3, r5
102 mtspr SPRN_PMC4, r6 102 mtspr SPRN_PMC4, r6
103 mtspr SPRN_PMC5, r8 103 mtspr SPRN_PMC5, r8
104 mtspr SPRN_PMC6, r9 104 mtspr SPRN_PMC6, r9
105 BEGIN_FTR_SECTION 105 BEGIN_FTR_SECTION
106 mtspr SPRN_PMC7, r10 106 mtspr SPRN_PMC7, r10
107 mtspr SPRN_PMC8, r11 107 mtspr SPRN_PMC8, r11
108 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 108 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
109 ld r3, HSTATE_MMCR(r13) 109 ld r3, HSTATE_MMCR(r13)
110 ld r4, HSTATE_MMCR + 8(r13) 110 ld r4, HSTATE_MMCR + 8(r13)
111 ld r5, HSTATE_MMCR + 16(r13) 111 ld r5, HSTATE_MMCR + 16(r13)
112 ld r6, HSTATE_MMCR + 24(r13) 112 ld r6, HSTATE_MMCR + 24(r13)
113 ld r7, HSTATE_MMCR + 32(r13) 113 ld r7, HSTATE_MMCR + 32(r13)
114 mtspr SPRN_MMCR1, r4 114 mtspr SPRN_MMCR1, r4
115 mtspr SPRN_MMCRA, r5 115 mtspr SPRN_MMCRA, r5
116 mtspr SPRN_SIAR, r6 116 mtspr SPRN_SIAR, r6
117 mtspr SPRN_SDAR, r7 117 mtspr SPRN_SDAR, r7
118 BEGIN_FTR_SECTION 118 BEGIN_FTR_SECTION
119 ld r8, HSTATE_MMCR + 40(r13) 119 ld r8, HSTATE_MMCR + 40(r13)
120 ld r9, HSTATE_MMCR + 48(r13) 120 ld r9, HSTATE_MMCR + 48(r13)
121 mtspr SPRN_MMCR2, r8 121 mtspr SPRN_MMCR2, r8
122 mtspr SPRN_SIER, r9 122 mtspr SPRN_SIER, r9
123 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 123 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
124 mtspr SPRN_MMCR0, r3 124 mtspr SPRN_MMCR0, r3
125 isync 125 isync
126 23: 126 23:
127 127
128 /* 128 /*
129 * Reload DEC. HDEC interrupts were disabled when 129 * Reload DEC. HDEC interrupts were disabled when
130 * we reloaded the host's LPCR value. 130 * we reloaded the host's LPCR value.
131 */ 131 */
132 ld r3, HSTATE_DECEXP(r13) 132 ld r3, HSTATE_DECEXP(r13)
133 mftb r4 133 mftb r4
134 subf r4, r4, r3 134 subf r4, r4, r3
135 mtspr SPRN_DEC, r4 135 mtspr SPRN_DEC, r4
136 136
137 /* 137 /*
138 * For external and machine check interrupts, we need 138 * For external and machine check interrupts, we need
139 * to call the Linux handler to process the interrupt. 139 * to call the Linux handler to process the interrupt.
140 * We do that by jumping to absolute address 0x500 for 140 * We do that by jumping to absolute address 0x500 for
141 * external interrupts, or the machine_check_fwnmi label 141 * external interrupts, or the machine_check_fwnmi label
142 * for machine checks (since firmware might have patched 142 * for machine checks (since firmware might have patched
143 * the vector area at 0x200). The [h]rfid at the end of the 143 * the vector area at 0x200). The [h]rfid at the end of the
144 * handler will return to the book3s_hv_interrupts.S code. 144 * handler will return to the book3s_hv_interrupts.S code.
145 * For other interrupts we do the rfid to get back 145 * For other interrupts we do the rfid to get back
146 * to the book3s_hv_interrupts.S code here. 146 * to the book3s_hv_interrupts.S code here.
147 */ 147 */
148 ld r8, 112+PPC_LR_STKOFF(r1) 148 ld r8, 112+PPC_LR_STKOFF(r1)
149 addi r1, r1, 112 149 addi r1, r1, 112
150 ld r7, HSTATE_HOST_MSR(r13) 150 ld r7, HSTATE_HOST_MSR(r13)
151 151
152 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK 152 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
153 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 153 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
154 BEGIN_FTR_SECTION 154 BEGIN_FTR_SECTION
155 beq 11f 155 beq 11f
156 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 156 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
157 157
158 /* RFI into the highmem handler, or branch to interrupt handler */ 158 /* RFI into the highmem handler, or branch to interrupt handler */
159 mfmsr r6 159 mfmsr r6
160 li r0, MSR_RI 160 li r0, MSR_RI
161 andc r6, r6, r0 161 andc r6, r6, r0
162 mtmsrd r6, 1 /* Clear RI in MSR */ 162 mtmsrd r6, 1 /* Clear RI in MSR */
163 mtsrr0 r8 163 mtsrr0 r8
164 mtsrr1 r7 164 mtsrr1 r7
165 beqa 0x500 /* external interrupt (PPC970) */ 165 beqa 0x500 /* external interrupt (PPC970) */
166 beq cr1, 13f /* machine check */ 166 beq cr1, 13f /* machine check */
167 RFI 167 RFI
168 168
169 /* On POWER7, we have external interrupts set to use HSRR0/1 */ 169 /* On POWER7, we have external interrupts set to use HSRR0/1 */
170 11: mtspr SPRN_HSRR0, r8 170 11: mtspr SPRN_HSRR0, r8
171 mtspr SPRN_HSRR1, r7 171 mtspr SPRN_HSRR1, r7
172 ba 0x500 172 ba 0x500
173 173
174 13: b machine_check_fwnmi 174 13: b machine_check_fwnmi
175 175
176 kvmppc_primary_no_guest: 176 kvmppc_primary_no_guest:
177 /* We handle this much like a ceded vcpu */ 177 /* We handle this much like a ceded vcpu */
178 /* set our bit in napping_threads */ 178 /* set our bit in napping_threads */
179 ld r5, HSTATE_KVM_VCORE(r13) 179 ld r5, HSTATE_KVM_VCORE(r13)
180 lbz r7, HSTATE_PTID(r13) 180 lbz r7, HSTATE_PTID(r13)
181 li r0, 1 181 li r0, 1
182 sld r0, r0, r7 182 sld r0, r0, r7
183 addi r6, r5, VCORE_NAPPING_THREADS 183 addi r6, r5, VCORE_NAPPING_THREADS
184 1: lwarx r3, 0, r6 184 1: lwarx r3, 0, r6
185 or r3, r3, r0 185 or r3, r3, r0
186 stwcx. r3, 0, r6 186 stwcx. r3, 0, r6
187 bne 1b 187 bne 1b
188 /* order napping_threads update vs testing entry_exit_count */ 188 /* order napping_threads update vs testing entry_exit_count */
189 isync 189 isync
190 li r12, 0 190 li r12, 0
191 lwz r7, VCORE_ENTRY_EXIT(r5) 191 lwz r7, VCORE_ENTRY_EXIT(r5)
192 cmpwi r7, 0x100 192 cmpwi r7, 0x100
193 bge kvm_novcpu_exit /* another thread already exiting */ 193 bge kvm_novcpu_exit /* another thread already exiting */
194 li r3, NAPPING_NOVCPU 194 li r3, NAPPING_NOVCPU
195 stb r3, HSTATE_NAPPING(r13) 195 stb r3, HSTATE_NAPPING(r13)
196 li r3, 1 196 li r3, 1
197 stb r3, HSTATE_HWTHREAD_REQ(r13) 197 stb r3, HSTATE_HWTHREAD_REQ(r13)
198 198
199 b kvm_do_nap 199 b kvm_do_nap
200 200
201 kvm_novcpu_wakeup: 201 kvm_novcpu_wakeup:
202 ld r1, HSTATE_HOST_R1(r13) 202 ld r1, HSTATE_HOST_R1(r13)
203 ld r5, HSTATE_KVM_VCORE(r13) 203 ld r5, HSTATE_KVM_VCORE(r13)
204 li r0, 0 204 li r0, 0
205 stb r0, HSTATE_NAPPING(r13) 205 stb r0, HSTATE_NAPPING(r13)
206 stb r0, HSTATE_HWTHREAD_REQ(r13) 206 stb r0, HSTATE_HWTHREAD_REQ(r13)
207 207
208 /* check the wake reason */ 208 /* check the wake reason */
209 bl kvmppc_check_wake_reason 209 bl kvmppc_check_wake_reason
210 210
211 /* see if any other thread is already exiting */ 211 /* see if any other thread is already exiting */
212 lwz r0, VCORE_ENTRY_EXIT(r5) 212 lwz r0, VCORE_ENTRY_EXIT(r5)
213 cmpwi r0, 0x100 213 cmpwi r0, 0x100
214 bge kvm_novcpu_exit 214 bge kvm_novcpu_exit
215 215
216 /* clear our bit in napping_threads */ 216 /* clear our bit in napping_threads */
217 lbz r7, HSTATE_PTID(r13) 217 lbz r7, HSTATE_PTID(r13)
218 li r0, 1 218 li r0, 1
219 sld r0, r0, r7 219 sld r0, r0, r7
220 addi r6, r5, VCORE_NAPPING_THREADS 220 addi r6, r5, VCORE_NAPPING_THREADS
221 4: lwarx r7, 0, r6 221 4: lwarx r7, 0, r6
222 andc r7, r7, r0 222 andc r7, r7, r0
223 stwcx. r7, 0, r6 223 stwcx. r7, 0, r6
224 bne 4b 224 bne 4b
225 225
226 /* See if the wake reason means we need to exit */ 226 /* See if the wake reason means we need to exit */
227 cmpdi r3, 0 227 cmpdi r3, 0
228 bge kvm_novcpu_exit 228 bge kvm_novcpu_exit
229 229
230 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ 230 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
231 ld r4, HSTATE_KVM_VCPU(r13) 231 ld r4, HSTATE_KVM_VCPU(r13)
232 cmpdi r4, 0 232 cmpdi r4, 0
233 bne kvmppc_got_guest 233 bne kvmppc_got_guest
234 234
235 kvm_novcpu_exit: 235 kvm_novcpu_exit:
236 b hdec_soon 236 b hdec_soon
237 237
238 /* 238 /*
239 * We come in here when wakened from nap mode. 239 * We come in here when wakened from nap mode.
240 * Relocation is off and most register values are lost. 240 * Relocation is off and most register values are lost.
241 * r13 points to the PACA. 241 * r13 points to the PACA.
242 */ 242 */
243 .globl kvm_start_guest 243 .globl kvm_start_guest
244 kvm_start_guest: 244 kvm_start_guest:
245 245
246 /* Set runlatch bit the minute you wake up from nap */ 246 /* Set runlatch bit the minute you wake up from nap */
247 mfspr r1, SPRN_CTRLF 247 mfspr r1, SPRN_CTRLF
248 ori r1, r1, 1 248 ori r1, r1, 1
249 mtspr SPRN_CTRLT, r1 249 mtspr SPRN_CTRLT, r1
250 250
251 ld r2,PACATOC(r13) 251 ld r2,PACATOC(r13)
252 252
253 li r0,KVM_HWTHREAD_IN_KVM 253 li r0,KVM_HWTHREAD_IN_KVM
254 stb r0,HSTATE_HWTHREAD_STATE(r13) 254 stb r0,HSTATE_HWTHREAD_STATE(r13)
255 255
256 /* NV GPR values from power7_idle() will no longer be valid */ 256 /* NV GPR values from power7_idle() will no longer be valid */
257 li r0,1 257 li r0,1
258 stb r0,PACA_NAPSTATELOST(r13) 258 stb r0,PACA_NAPSTATELOST(r13)
259 259
260 /* were we napping due to cede? */ 260 /* were we napping due to cede? */
261 lbz r0,HSTATE_NAPPING(r13) 261 lbz r0,HSTATE_NAPPING(r13)
262 cmpwi r0,NAPPING_CEDE 262 cmpwi r0,NAPPING_CEDE
263 beq kvm_end_cede 263 beq kvm_end_cede
264 cmpwi r0,NAPPING_NOVCPU 264 cmpwi r0,NAPPING_NOVCPU
265 beq kvm_novcpu_wakeup 265 beq kvm_novcpu_wakeup
266 266
267 ld r1,PACAEMERGSP(r13) 267 ld r1,PACAEMERGSP(r13)
268 subi r1,r1,STACK_FRAME_OVERHEAD 268 subi r1,r1,STACK_FRAME_OVERHEAD
269 269
270 /* 270 /*
271 * We weren't napping due to cede, so this must be a secondary 271 * We weren't napping due to cede, so this must be a secondary
272 * thread being woken up to run a guest, or being woken up due 272 * thread being woken up to run a guest, or being woken up due
273 * to a stray IPI. (Or due to some machine check or hypervisor 273 * to a stray IPI. (Or due to some machine check or hypervisor
274 * maintenance interrupt while the core is in KVM.) 274 * maintenance interrupt while the core is in KVM.)
275 */ 275 */
276 276
277 /* Check the wake reason in SRR1 to see why we got here */ 277 /* Check the wake reason in SRR1 to see why we got here */
278 bl kvmppc_check_wake_reason 278 bl kvmppc_check_wake_reason
279 cmpdi r3, 0 279 cmpdi r3, 0
280 bge kvm_no_guest 280 bge kvm_no_guest
281 281
282 /* get vcpu pointer, NULL if we have no vcpu to run */ 282 /* get vcpu pointer, NULL if we have no vcpu to run */
283 ld r4,HSTATE_KVM_VCPU(r13) 283 ld r4,HSTATE_KVM_VCPU(r13)
284 cmpdi r4,0 284 cmpdi r4,0
285 /* if we have no vcpu to run, go back to sleep */ 285 /* if we have no vcpu to run, go back to sleep */
286 beq kvm_no_guest 286 beq kvm_no_guest
287 287
288 /* Set HSTATE_DSCR(r13) to something sensible */ 288 /* Set HSTATE_DSCR(r13) to something sensible */
289 LOAD_REG_ADDR(r6, dscr_default) 289 LOAD_REG_ADDR(r6, dscr_default)
290 ld r6, 0(r6) 290 ld r6, 0(r6)
291 std r6, HSTATE_DSCR(r13) 291 std r6, HSTATE_DSCR(r13)
292 292
293 bl kvmppc_hv_entry 293 bl kvmppc_hv_entry
294 294
295 /* Back from the guest, go back to nap */ 295 /* Back from the guest, go back to nap */
296 /* Clear our vcpu pointer so we don't come back in early */ 296 /* Clear our vcpu pointer so we don't come back in early */
297 li r0, 0 297 li r0, 0
298 std r0, HSTATE_KVM_VCPU(r13) 298 std r0, HSTATE_KVM_VCPU(r13)
299 /* 299 /*
300 * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing 300 * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
301 * the nap_count, because once the increment to nap_count is 301 * the nap_count, because once the increment to nap_count is
302 * visible we could be given another vcpu. 302 * visible we could be given another vcpu.
303 */ 303 */
304 lwsync 304 lwsync
305 305
306 /* increment the nap count and then go to nap mode */ 306 /* increment the nap count and then go to nap mode */
307 ld r4, HSTATE_KVM_VCORE(r13) 307 ld r4, HSTATE_KVM_VCORE(r13)
308 addi r4, r4, VCORE_NAP_COUNT 308 addi r4, r4, VCORE_NAP_COUNT
309 51: lwarx r3, 0, r4 309 51: lwarx r3, 0, r4
310 addi r3, r3, 1 310 addi r3, r3, 1
311 stwcx. r3, 0, r4 311 stwcx. r3, 0, r4
312 bne 51b 312 bne 51b
313 313
314 kvm_no_guest: 314 kvm_no_guest:
315 li r0, KVM_HWTHREAD_IN_NAP 315 li r0, KVM_HWTHREAD_IN_NAP
316 stb r0, HSTATE_HWTHREAD_STATE(r13) 316 stb r0, HSTATE_HWTHREAD_STATE(r13)
317 kvm_do_nap: 317 kvm_do_nap:
318 /* Clear the runlatch bit before napping */
319 mfspr r2, SPRN_CTRLF
320 clrrdi r2, r2, 1
321 mtspr SPRN_CTRLT, r2
322
318 li r3, LPCR_PECE0 323 li r3, LPCR_PECE0
319 mfspr r4, SPRN_LPCR 324 mfspr r4, SPRN_LPCR
320 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 325 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
321 mtspr SPRN_LPCR, r4 326 mtspr SPRN_LPCR, r4
322 isync 327 isync
323 std r0, HSTATE_SCRATCH0(r13) 328 std r0, HSTATE_SCRATCH0(r13)
324 ptesync 329 ptesync
325 ld r0, HSTATE_SCRATCH0(r13) 330 ld r0, HSTATE_SCRATCH0(r13)
326 1: cmpd r0, r0 331 1: cmpd r0, r0
327 bne 1b 332 bne 1b
328 nap 333 nap
329 b . 334 b .
330 335
331 /****************************************************************************** 336 /******************************************************************************
332 * * 337 * *
333 * Entry code * 338 * Entry code *
334 * * 339 * *
335 *****************************************************************************/ 340 *****************************************************************************/
336 341
337 .global kvmppc_hv_entry 342 .global kvmppc_hv_entry
338 kvmppc_hv_entry: 343 kvmppc_hv_entry:
339 344
340 /* Required state: 345 /* Required state:
341 * 346 *
342 * R4 = vcpu pointer (or NULL) 347 * R4 = vcpu pointer (or NULL)
343 * MSR = ~IR|DR 348 * MSR = ~IR|DR
344 * R13 = PACA 349 * R13 = PACA
345 * R1 = host R1 350 * R1 = host R1
346 * all other volatile GPRS = free 351 * all other volatile GPRS = free
347 */ 352 */
348 mflr r0 353 mflr r0
349 std r0, PPC_LR_STKOFF(r1) 354 std r0, PPC_LR_STKOFF(r1)
350 stdu r1, -112(r1) 355 stdu r1, -112(r1)
351 356
352 /* Save R1 in the PACA */ 357 /* Save R1 in the PACA */
353 std r1, HSTATE_HOST_R1(r13) 358 std r1, HSTATE_HOST_R1(r13)
354 359
355 li r6, KVM_GUEST_MODE_HOST_HV 360 li r6, KVM_GUEST_MODE_HOST_HV
356 stb r6, HSTATE_IN_GUEST(r13) 361 stb r6, HSTATE_IN_GUEST(r13)
357 362
358 /* Clear out SLB */ 363 /* Clear out SLB */
359 li r6,0 364 li r6,0
360 slbmte r6,r6 365 slbmte r6,r6
361 slbia 366 slbia
362 ptesync 367 ptesync
363 368
364 BEGIN_FTR_SECTION 369 BEGIN_FTR_SECTION
365 b 30f 370 b 30f
366 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 371 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
367 /* 372 /*
368 * POWER7 host -> guest partition switch code. 373 * POWER7 host -> guest partition switch code.
369 * We don't have to lock against concurrent tlbies, 374 * We don't have to lock against concurrent tlbies,
370 * but we do have to coordinate across hardware threads. 375 * but we do have to coordinate across hardware threads.
371 */ 376 */
372 /* Increment entry count iff exit count is zero. */ 377 /* Increment entry count iff exit count is zero. */
373 ld r5,HSTATE_KVM_VCORE(r13) 378 ld r5,HSTATE_KVM_VCORE(r13)
374 addi r9,r5,VCORE_ENTRY_EXIT 379 addi r9,r5,VCORE_ENTRY_EXIT
375 21: lwarx r3,0,r9 380 21: lwarx r3,0,r9
376 cmpwi r3,0x100 /* any threads starting to exit? */ 381 cmpwi r3,0x100 /* any threads starting to exit? */
377 bge secondary_too_late /* if so we're too late to the party */ 382 bge secondary_too_late /* if so we're too late to the party */
378 addi r3,r3,1 383 addi r3,r3,1
379 stwcx. r3,0,r9 384 stwcx. r3,0,r9
380 bne 21b 385 bne 21b
381 386
382 /* Primary thread switches to guest partition. */ 387 /* Primary thread switches to guest partition. */
383 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */ 388 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
384 lbz r6,HSTATE_PTID(r13) 389 lbz r6,HSTATE_PTID(r13)
385 cmpwi r6,0 390 cmpwi r6,0
386 bne 20f 391 bne 20f
387 ld r6,KVM_SDR1(r9) 392 ld r6,KVM_SDR1(r9)
388 lwz r7,KVM_LPID(r9) 393 lwz r7,KVM_LPID(r9)
389 li r0,LPID_RSVD /* switch to reserved LPID */ 394 li r0,LPID_RSVD /* switch to reserved LPID */
390 mtspr SPRN_LPID,r0 395 mtspr SPRN_LPID,r0
391 ptesync 396 ptesync
392 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 397 mtspr SPRN_SDR1,r6 /* switch to partition page table */
393 mtspr SPRN_LPID,r7 398 mtspr SPRN_LPID,r7
394 isync 399 isync
395 400
396 /* See if we need to flush the TLB */ 401 /* See if we need to flush the TLB */
397 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */ 402 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
398 clrldi r7,r6,64-6 /* extract bit number (6 bits) */ 403 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
399 srdi r6,r6,6 /* doubleword number */ 404 srdi r6,r6,6 /* doubleword number */
400 sldi r6,r6,3 /* address offset */ 405 sldi r6,r6,3 /* address offset */
401 add r6,r6,r9 406 add r6,r6,r9
402 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */ 407 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
403 li r0,1 408 li r0,1
404 sld r0,r0,r7 409 sld r0,r0,r7
405 ld r7,0(r6) 410 ld r7,0(r6)
406 and. r7,r7,r0 411 and. r7,r7,r0
407 beq 22f 412 beq 22f
408 23: ldarx r7,0,r6 /* if set, clear the bit */ 413 23: ldarx r7,0,r6 /* if set, clear the bit */
409 andc r7,r7,r0 414 andc r7,r7,r0
410 stdcx. r7,0,r6 415 stdcx. r7,0,r6
411 bne 23b 416 bne 23b
412 /* Flush the TLB of any entries for this LPID */ 417 /* Flush the TLB of any entries for this LPID */
413 /* use arch 2.07S as a proxy for POWER8 */ 418 /* use arch 2.07S as a proxy for POWER8 */
414 BEGIN_FTR_SECTION 419 BEGIN_FTR_SECTION
415 li r6,512 /* POWER8 has 512 sets */ 420 li r6,512 /* POWER8 has 512 sets */
416 FTR_SECTION_ELSE 421 FTR_SECTION_ELSE
417 li r6,128 /* POWER7 has 128 sets */ 422 li r6,128 /* POWER7 has 128 sets */
418 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) 423 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
419 mtctr r6 424 mtctr r6
420 li r7,0x800 /* IS field = 0b10 */ 425 li r7,0x800 /* IS field = 0b10 */
421 ptesync 426 ptesync
422 28: tlbiel r7 427 28: tlbiel r7
423 addi r7,r7,0x1000 428 addi r7,r7,0x1000
424 bdnz 28b 429 bdnz 28b
425 ptesync 430 ptesync
426 431
427 /* Add timebase offset onto timebase */ 432 /* Add timebase offset onto timebase */
428 22: ld r8,VCORE_TB_OFFSET(r5) 433 22: ld r8,VCORE_TB_OFFSET(r5)
429 cmpdi r8,0 434 cmpdi r8,0
430 beq 37f 435 beq 37f
431 mftb r6 /* current host timebase */ 436 mftb r6 /* current host timebase */
432 add r8,r8,r6 437 add r8,r8,r6
433 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 438 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
434 mftb r7 /* check if lower 24 bits overflowed */ 439 mftb r7 /* check if lower 24 bits overflowed */
435 clrldi r6,r6,40 440 clrldi r6,r6,40
436 clrldi r7,r7,40 441 clrldi r7,r7,40
437 cmpld r7,r6 442 cmpld r7,r6
438 bge 37f 443 bge 37f
439 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 444 addis r8,r8,0x100 /* if so, increment upper 40 bits */
440 mtspr SPRN_TBU40,r8 445 mtspr SPRN_TBU40,r8
441 446
442 /* Load guest PCR value to select appropriate compat mode */ 447 /* Load guest PCR value to select appropriate compat mode */
443 37: ld r7, VCORE_PCR(r5) 448 37: ld r7, VCORE_PCR(r5)
444 cmpdi r7, 0 449 cmpdi r7, 0
445 beq 38f 450 beq 38f
446 mtspr SPRN_PCR, r7 451 mtspr SPRN_PCR, r7
447 38: 452 38:
448 453
449 BEGIN_FTR_SECTION 454 BEGIN_FTR_SECTION
450 /* DPDES is shared between threads */ 455 /* DPDES is shared between threads */
451 ld r8, VCORE_DPDES(r5) 456 ld r8, VCORE_DPDES(r5)
452 mtspr SPRN_DPDES, r8 457 mtspr SPRN_DPDES, r8
453 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 458 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
454 459
455 li r0,1 460 li r0,1
456 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ 461 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
457 b 10f 462 b 10f
458 463
459 /* Secondary threads wait for primary to have done partition switch */ 464 /* Secondary threads wait for primary to have done partition switch */
460 20: lbz r0,VCORE_IN_GUEST(r5) 465 20: lbz r0,VCORE_IN_GUEST(r5)
461 cmpwi r0,0 466 cmpwi r0,0
462 beq 20b 467 beq 20b
463 468
464 /* Set LPCR and RMOR. */ 469 /* Set LPCR and RMOR. */
465 10: ld r8,VCORE_LPCR(r5) 470 10: ld r8,VCORE_LPCR(r5)
466 mtspr SPRN_LPCR,r8 471 mtspr SPRN_LPCR,r8
467 ld r8,KVM_RMOR(r9) 472 ld r8,KVM_RMOR(r9)
468 mtspr SPRN_RMOR,r8 473 mtspr SPRN_RMOR,r8
469 isync 474 isync
470 475
471 /* Check if HDEC expires soon */ 476 /* Check if HDEC expires soon */
472 mfspr r3,SPRN_HDEC 477 mfspr r3,SPRN_HDEC
473 cmpwi r3,512 /* 1 microsecond */ 478 cmpwi r3,512 /* 1 microsecond */
474 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 479 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
475 blt hdec_soon 480 blt hdec_soon
476 b 31f 481 b 31f
477 482
478 /* 483 /*
479 * PPC970 host -> guest partition switch code. 484 * PPC970 host -> guest partition switch code.
480 * We have to lock against concurrent tlbies, 485 * We have to lock against concurrent tlbies,
481 * using native_tlbie_lock to lock against host tlbies 486 * using native_tlbie_lock to lock against host tlbies
482 * and kvm->arch.tlbie_lock to lock against guest tlbies. 487 * and kvm->arch.tlbie_lock to lock against guest tlbies.
483 * We also have to invalidate the TLB since its 488 * We also have to invalidate the TLB since its
484 * entries aren't tagged with the LPID. 489 * entries aren't tagged with the LPID.
485 */ 490 */
486 30: ld r5,HSTATE_KVM_VCORE(r13) 491 30: ld r5,HSTATE_KVM_VCORE(r13)
487 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */ 492 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
488 493
489 /* first take native_tlbie_lock */ 494 /* first take native_tlbie_lock */
490 .section ".toc","aw" 495 .section ".toc","aw"
491 toc_tlbie_lock: 496 toc_tlbie_lock:
492 .tc native_tlbie_lock[TC],native_tlbie_lock 497 .tc native_tlbie_lock[TC],native_tlbie_lock
493 .previous 498 .previous
494 ld r3,toc_tlbie_lock@toc(2) 499 ld r3,toc_tlbie_lock@toc(2)
495 #ifdef __BIG_ENDIAN__ 500 #ifdef __BIG_ENDIAN__
496 lwz r8,PACA_LOCK_TOKEN(r13) 501 lwz r8,PACA_LOCK_TOKEN(r13)
497 #else 502 #else
498 lwz r8,PACAPACAINDEX(r13) 503 lwz r8,PACAPACAINDEX(r13)
499 #endif 504 #endif
500 24: lwarx r0,0,r3 505 24: lwarx r0,0,r3
501 cmpwi r0,0 506 cmpwi r0,0
502 bne 24b 507 bne 24b
503 stwcx. r8,0,r3 508 stwcx. r8,0,r3
504 bne 24b 509 bne 24b
505 isync 510 isync
506 511
507 ld r5,HSTATE_KVM_VCORE(r13) 512 ld r5,HSTATE_KVM_VCORE(r13)
508 ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */ 513 ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */
509 li r0,0x18f 514 li r0,0x18f
510 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ 515 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
511 or r0,r7,r0 516 or r0,r7,r0
512 ptesync 517 ptesync
513 sync 518 sync
514 mtspr SPRN_HID4,r0 /* switch to reserved LPID */ 519 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
515 isync 520 isync
516 li r0,0 521 li r0,0
517 stw r0,0(r3) /* drop native_tlbie_lock */ 522 stw r0,0(r3) /* drop native_tlbie_lock */
518 523
519 /* invalidate the whole TLB */ 524 /* invalidate the whole TLB */
520 li r0,256 525 li r0,256
521 mtctr r0 526 mtctr r0
522 li r6,0 527 li r6,0
523 25: tlbiel r6 528 25: tlbiel r6
524 addi r6,r6,0x1000 529 addi r6,r6,0x1000
525 bdnz 25b 530 bdnz 25b
526 ptesync 531 ptesync
527 532
528 /* Take the guest's tlbie_lock */ 533 /* Take the guest's tlbie_lock */
529 addi r3,r9,KVM_TLBIE_LOCK 534 addi r3,r9,KVM_TLBIE_LOCK
530 24: lwarx r0,0,r3 535 24: lwarx r0,0,r3
531 cmpwi r0,0 536 cmpwi r0,0
532 bne 24b 537 bne 24b
533 stwcx. r8,0,r3 538 stwcx. r8,0,r3
534 bne 24b 539 bne 24b
535 isync 540 isync
536 ld r6,KVM_SDR1(r9) 541 ld r6,KVM_SDR1(r9)
537 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 542 mtspr SPRN_SDR1,r6 /* switch to partition page table */
538 543
539 /* Set up HID4 with the guest's LPID etc. */ 544 /* Set up HID4 with the guest's LPID etc. */
540 sync 545 sync
541 mtspr SPRN_HID4,r7 546 mtspr SPRN_HID4,r7
542 isync 547 isync
543 548
544 /* drop the guest's tlbie_lock */ 549 /* drop the guest's tlbie_lock */
545 li r0,0 550 li r0,0
546 stw r0,0(r3) 551 stw r0,0(r3)
547 552
548 /* Check if HDEC expires soon */ 553 /* Check if HDEC expires soon */
549 mfspr r3,SPRN_HDEC 554 mfspr r3,SPRN_HDEC
550 cmpwi r3,10 555 cmpwi r3,10
551 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER 556 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
552 blt hdec_soon 557 blt hdec_soon
553 558
554 /* Enable HDEC interrupts */ 559 /* Enable HDEC interrupts */
555 mfspr r0,SPRN_HID0 560 mfspr r0,SPRN_HID0
556 li r3,1 561 li r3,1
557 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 562 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
558 sync 563 sync
559 mtspr SPRN_HID0,r0 564 mtspr SPRN_HID0,r0
560 mfspr r0,SPRN_HID0 565 mfspr r0,SPRN_HID0
561 mfspr r0,SPRN_HID0 566 mfspr r0,SPRN_HID0
562 mfspr r0,SPRN_HID0 567 mfspr r0,SPRN_HID0
563 mfspr r0,SPRN_HID0 568 mfspr r0,SPRN_HID0
564 mfspr r0,SPRN_HID0 569 mfspr r0,SPRN_HID0
565 mfspr r0,SPRN_HID0 570 mfspr r0,SPRN_HID0
566 31: 571 31:
567 /* Do we have a guest vcpu to run? */ 572 /* Do we have a guest vcpu to run? */
568 cmpdi r4, 0 573 cmpdi r4, 0
569 beq kvmppc_primary_no_guest 574 beq kvmppc_primary_no_guest
570 kvmppc_got_guest: 575 kvmppc_got_guest:
571 576
572 /* Load up guest SLB entries */ 577 /* Load up guest SLB entries */
573 lwz r5,VCPU_SLB_MAX(r4) 578 lwz r5,VCPU_SLB_MAX(r4)
574 cmpwi r5,0 579 cmpwi r5,0
575 beq 9f 580 beq 9f
576 mtctr r5 581 mtctr r5
577 addi r6,r4,VCPU_SLB 582 addi r6,r4,VCPU_SLB
578 1: ld r8,VCPU_SLB_E(r6) 583 1: ld r8,VCPU_SLB_E(r6)
579 ld r9,VCPU_SLB_V(r6) 584 ld r9,VCPU_SLB_V(r6)
580 slbmte r9,r8 585 slbmte r9,r8
581 addi r6,r6,VCPU_SLB_SIZE 586 addi r6,r6,VCPU_SLB_SIZE
582 bdnz 1b 587 bdnz 1b
583 9: 588 9:
584 /* Increment yield count if they have a VPA */ 589 /* Increment yield count if they have a VPA */
585 ld r3, VCPU_VPA(r4) 590 ld r3, VCPU_VPA(r4)
586 cmpdi r3, 0 591 cmpdi r3, 0
587 beq 25f 592 beq 25f
588 lwz r5, LPPACA_YIELDCOUNT(r3) 593 lwz r5, LPPACA_YIELDCOUNT(r3)
589 addi r5, r5, 1 594 addi r5, r5, 1
590 stw r5, LPPACA_YIELDCOUNT(r3) 595 stw r5, LPPACA_YIELDCOUNT(r3)
591 li r6, 1 596 li r6, 1
592 stb r6, VCPU_VPA_DIRTY(r4) 597 stb r6, VCPU_VPA_DIRTY(r4)
593 25: 598 25:
594 599
595 BEGIN_FTR_SECTION 600 BEGIN_FTR_SECTION
596 /* Save purr/spurr */ 601 /* Save purr/spurr */
597 mfspr r5,SPRN_PURR 602 mfspr r5,SPRN_PURR
598 mfspr r6,SPRN_SPURR 603 mfspr r6,SPRN_SPURR
599 std r5,HSTATE_PURR(r13) 604 std r5,HSTATE_PURR(r13)
600 std r6,HSTATE_SPURR(r13) 605 std r6,HSTATE_SPURR(r13)
601 ld r7,VCPU_PURR(r4) 606 ld r7,VCPU_PURR(r4)
602 ld r8,VCPU_SPURR(r4) 607 ld r8,VCPU_SPURR(r4)
603 mtspr SPRN_PURR,r7 608 mtspr SPRN_PURR,r7
604 mtspr SPRN_SPURR,r8 609 mtspr SPRN_SPURR,r8
605 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 610 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
606 611
607 BEGIN_FTR_SECTION 612 BEGIN_FTR_SECTION
608 /* Set partition DABR */ 613 /* Set partition DABR */
609 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ 614 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
610 lwz r5,VCPU_DABRX(r4) 615 lwz r5,VCPU_DABRX(r4)
611 ld r6,VCPU_DABR(r4) 616 ld r6,VCPU_DABR(r4)
612 mtspr SPRN_DABRX,r5 617 mtspr SPRN_DABRX,r5
613 mtspr SPRN_DABR,r6 618 mtspr SPRN_DABR,r6
614 BEGIN_FTR_SECTION_NESTED(89) 619 BEGIN_FTR_SECTION_NESTED(89)
615 isync 620 isync
616 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89) 621 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
617 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 622 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
618 623
619 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 624 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
620 BEGIN_FTR_SECTION 625 BEGIN_FTR_SECTION
621 b skip_tm 626 b skip_tm
622 END_FTR_SECTION_IFCLR(CPU_FTR_TM) 627 END_FTR_SECTION_IFCLR(CPU_FTR_TM)
623 628
624 /* Turn on TM/FP/VSX/VMX so we can restore them. */ 629 /* Turn on TM/FP/VSX/VMX so we can restore them. */
625 mfmsr r5 630 mfmsr r5
626 li r6, MSR_TM >> 32 631 li r6, MSR_TM >> 32
627 sldi r6, r6, 32 632 sldi r6, r6, 32
628 or r5, r5, r6 633 or r5, r5, r6
629 ori r5, r5, MSR_FP 634 ori r5, r5, MSR_FP
630 oris r5, r5, (MSR_VEC | MSR_VSX)@h 635 oris r5, r5, (MSR_VEC | MSR_VSX)@h
631 mtmsrd r5 636 mtmsrd r5
632 637
633 /* 638 /*
634 * The user may change these outside of a transaction, so they must 639 * The user may change these outside of a transaction, so they must
635 * always be context switched. 640 * always be context switched.
636 */ 641 */
637 ld r5, VCPU_TFHAR(r4) 642 ld r5, VCPU_TFHAR(r4)
638 ld r6, VCPU_TFIAR(r4) 643 ld r6, VCPU_TFIAR(r4)
639 ld r7, VCPU_TEXASR(r4) 644 ld r7, VCPU_TEXASR(r4)
640 mtspr SPRN_TFHAR, r5 645 mtspr SPRN_TFHAR, r5
641 mtspr SPRN_TFIAR, r6 646 mtspr SPRN_TFIAR, r6
642 mtspr SPRN_TEXASR, r7 647 mtspr SPRN_TEXASR, r7
643 648
644 ld r5, VCPU_MSR(r4) 649 ld r5, VCPU_MSR(r4)
645 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 650 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
646 beq skip_tm /* TM not active in guest */ 651 beq skip_tm /* TM not active in guest */
647 652
648 /* Make sure the failure summary is set, otherwise we'll program check 653 /* Make sure the failure summary is set, otherwise we'll program check
649 * when we trechkpt. It's possible that this might have been not set 654 * when we trechkpt. It's possible that this might have been not set
650 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the 655 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
651 * host. 656 * host.
652 */ 657 */
653 oris r7, r7, (TEXASR_FS)@h 658 oris r7, r7, (TEXASR_FS)@h
654 mtspr SPRN_TEXASR, r7 659 mtspr SPRN_TEXASR, r7
655 660
656 /* 661 /*
657 * We need to load up the checkpointed state for the guest. 662 * We need to load up the checkpointed state for the guest.
658 * We need to do this early as it will blow away any GPRs, VSRs and 663 * We need to do this early as it will blow away any GPRs, VSRs and
659 * some SPRs. 664 * some SPRs.
660 */ 665 */
661 666
662 mr r31, r4 667 mr r31, r4
663 addi r3, r31, VCPU_FPRS_TM 668 addi r3, r31, VCPU_FPRS_TM
664 bl .load_fp_state 669 bl .load_fp_state
665 addi r3, r31, VCPU_VRS_TM 670 addi r3, r31, VCPU_VRS_TM
666 bl .load_vr_state 671 bl .load_vr_state
667 mr r4, r31 672 mr r4, r31
668 lwz r7, VCPU_VRSAVE_TM(r4) 673 lwz r7, VCPU_VRSAVE_TM(r4)
669 mtspr SPRN_VRSAVE, r7 674 mtspr SPRN_VRSAVE, r7
670 675
671 ld r5, VCPU_LR_TM(r4) 676 ld r5, VCPU_LR_TM(r4)
672 lwz r6, VCPU_CR_TM(r4) 677 lwz r6, VCPU_CR_TM(r4)
673 ld r7, VCPU_CTR_TM(r4) 678 ld r7, VCPU_CTR_TM(r4)
674 ld r8, VCPU_AMR_TM(r4) 679 ld r8, VCPU_AMR_TM(r4)
675 ld r9, VCPU_TAR_TM(r4) 680 ld r9, VCPU_TAR_TM(r4)
676 mtlr r5 681 mtlr r5
677 mtcr r6 682 mtcr r6
678 mtctr r7 683 mtctr r7
679 mtspr SPRN_AMR, r8 684 mtspr SPRN_AMR, r8
680 mtspr SPRN_TAR, r9 685 mtspr SPRN_TAR, r9
681 686
682 /* 687 /*
683 * Load up PPR and DSCR values but don't put them in the actual SPRs 688 * Load up PPR and DSCR values but don't put them in the actual SPRs
684 * till the last moment to avoid running with userspace PPR and DSCR for 689 * till the last moment to avoid running with userspace PPR and DSCR for
685 * too long. 690 * too long.
686 */ 691 */
687 ld r29, VCPU_DSCR_TM(r4) 692 ld r29, VCPU_DSCR_TM(r4)
688 ld r30, VCPU_PPR_TM(r4) 693 ld r30, VCPU_PPR_TM(r4)
689 694
690 std r2, PACATMSCRATCH(r13) /* Save TOC */ 695 std r2, PACATMSCRATCH(r13) /* Save TOC */
691 696
692 /* Clear the MSR RI since r1, r13 are all going to be foobar. */ 697 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
693 li r5, 0 698 li r5, 0
694 mtmsrd r5, 1 699 mtmsrd r5, 1
695 700
696 /* Load GPRs r0-r28 */ 701 /* Load GPRs r0-r28 */
697 reg = 0 702 reg = 0
698 .rept 29 703 .rept 29
699 ld reg, VCPU_GPRS_TM(reg)(r31) 704 ld reg, VCPU_GPRS_TM(reg)(r31)
700 reg = reg + 1 705 reg = reg + 1
701 .endr 706 .endr
702 707
703 mtspr SPRN_DSCR, r29 708 mtspr SPRN_DSCR, r29
704 mtspr SPRN_PPR, r30 709 mtspr SPRN_PPR, r30
705 710
706 /* Load final GPRs */ 711 /* Load final GPRs */
707 ld 29, VCPU_GPRS_TM(29)(r31) 712 ld 29, VCPU_GPRS_TM(29)(r31)
708 ld 30, VCPU_GPRS_TM(30)(r31) 713 ld 30, VCPU_GPRS_TM(30)(r31)
709 ld 31, VCPU_GPRS_TM(31)(r31) 714 ld 31, VCPU_GPRS_TM(31)(r31)
710 715
711 /* TM checkpointed state is now setup. All GPRs are now volatile. */ 716 /* TM checkpointed state is now setup. All GPRs are now volatile. */
712 TRECHKPT 717 TRECHKPT
713 718
714 /* Now let's get back the state we need. */ 719 /* Now let's get back the state we need. */
715 HMT_MEDIUM 720 HMT_MEDIUM
716 GET_PACA(r13) 721 GET_PACA(r13)
717 ld r29, HSTATE_DSCR(r13) 722 ld r29, HSTATE_DSCR(r13)
718 mtspr SPRN_DSCR, r29 723 mtspr SPRN_DSCR, r29
719 ld r4, HSTATE_KVM_VCPU(r13) 724 ld r4, HSTATE_KVM_VCPU(r13)
720 ld r1, HSTATE_HOST_R1(r13) 725 ld r1, HSTATE_HOST_R1(r13)
721 ld r2, PACATMSCRATCH(r13) 726 ld r2, PACATMSCRATCH(r13)
722 727
723 /* Set the MSR RI since we have our registers back. */ 728 /* Set the MSR RI since we have our registers back. */
724 li r5, MSR_RI 729 li r5, MSR_RI
725 mtmsrd r5, 1 730 mtmsrd r5, 1
726 skip_tm: 731 skip_tm:
727 #endif 732 #endif
728 733
729 /* Load guest PMU registers */ 734 /* Load guest PMU registers */
730 /* R4 is live here (vcpu pointer) */ 735 /* R4 is live here (vcpu pointer) */
731 li r3, 1 736 li r3, 1
732 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 737 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
733 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 738 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
734 isync 739 isync
735 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ 740 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
736 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ 741 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
737 lwz r6, VCPU_PMC + 8(r4) 742 lwz r6, VCPU_PMC + 8(r4)
738 lwz r7, VCPU_PMC + 12(r4) 743 lwz r7, VCPU_PMC + 12(r4)
739 lwz r8, VCPU_PMC + 16(r4) 744 lwz r8, VCPU_PMC + 16(r4)
740 lwz r9, VCPU_PMC + 20(r4) 745 lwz r9, VCPU_PMC + 20(r4)
741 BEGIN_FTR_SECTION 746 BEGIN_FTR_SECTION
742 lwz r10, VCPU_PMC + 24(r4) 747 lwz r10, VCPU_PMC + 24(r4)
743 lwz r11, VCPU_PMC + 28(r4) 748 lwz r11, VCPU_PMC + 28(r4)
744 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 749 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
745 mtspr SPRN_PMC1, r3 750 mtspr SPRN_PMC1, r3
746 mtspr SPRN_PMC2, r5 751 mtspr SPRN_PMC2, r5
747 mtspr SPRN_PMC3, r6 752 mtspr SPRN_PMC3, r6
748 mtspr SPRN_PMC4, r7 753 mtspr SPRN_PMC4, r7
749 mtspr SPRN_PMC5, r8 754 mtspr SPRN_PMC5, r8
750 mtspr SPRN_PMC6, r9 755 mtspr SPRN_PMC6, r9
751 BEGIN_FTR_SECTION 756 BEGIN_FTR_SECTION
752 mtspr SPRN_PMC7, r10 757 mtspr SPRN_PMC7, r10
753 mtspr SPRN_PMC8, r11 758 mtspr SPRN_PMC8, r11
754 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 759 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
755 ld r3, VCPU_MMCR(r4) 760 ld r3, VCPU_MMCR(r4)
756 ld r5, VCPU_MMCR + 8(r4) 761 ld r5, VCPU_MMCR + 8(r4)
757 ld r6, VCPU_MMCR + 16(r4) 762 ld r6, VCPU_MMCR + 16(r4)
758 ld r7, VCPU_SIAR(r4) 763 ld r7, VCPU_SIAR(r4)
759 ld r8, VCPU_SDAR(r4) 764 ld r8, VCPU_SDAR(r4)
760 mtspr SPRN_MMCR1, r5 765 mtspr SPRN_MMCR1, r5
761 mtspr SPRN_MMCRA, r6 766 mtspr SPRN_MMCRA, r6
762 mtspr SPRN_SIAR, r7 767 mtspr SPRN_SIAR, r7
763 mtspr SPRN_SDAR, r8 768 mtspr SPRN_SDAR, r8
764 BEGIN_FTR_SECTION 769 BEGIN_FTR_SECTION
765 ld r5, VCPU_MMCR + 24(r4) 770 ld r5, VCPU_MMCR + 24(r4)
766 ld r6, VCPU_SIER(r4) 771 ld r6, VCPU_SIER(r4)
767 lwz r7, VCPU_PMC + 24(r4) 772 lwz r7, VCPU_PMC + 24(r4)
768 lwz r8, VCPU_PMC + 28(r4) 773 lwz r8, VCPU_PMC + 28(r4)
769 ld r9, VCPU_MMCR + 32(r4) 774 ld r9, VCPU_MMCR + 32(r4)
770 mtspr SPRN_MMCR2, r5 775 mtspr SPRN_MMCR2, r5
771 mtspr SPRN_SIER, r6 776 mtspr SPRN_SIER, r6
772 mtspr SPRN_SPMC1, r7 777 mtspr SPRN_SPMC1, r7
773 mtspr SPRN_SPMC2, r8 778 mtspr SPRN_SPMC2, r8
774 mtspr SPRN_MMCRS, r9 779 mtspr SPRN_MMCRS, r9
775 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 780 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
776 mtspr SPRN_MMCR0, r3 781 mtspr SPRN_MMCR0, r3
777 isync 782 isync
778 783
779 /* Load up FP, VMX and VSX registers */ 784 /* Load up FP, VMX and VSX registers */
780 bl kvmppc_load_fp 785 bl kvmppc_load_fp
781 786
782 ld r14, VCPU_GPR(R14)(r4) 787 ld r14, VCPU_GPR(R14)(r4)
783 ld r15, VCPU_GPR(R15)(r4) 788 ld r15, VCPU_GPR(R15)(r4)
784 ld r16, VCPU_GPR(R16)(r4) 789 ld r16, VCPU_GPR(R16)(r4)
785 ld r17, VCPU_GPR(R17)(r4) 790 ld r17, VCPU_GPR(R17)(r4)
786 ld r18, VCPU_GPR(R18)(r4) 791 ld r18, VCPU_GPR(R18)(r4)
787 ld r19, VCPU_GPR(R19)(r4) 792 ld r19, VCPU_GPR(R19)(r4)
788 ld r20, VCPU_GPR(R20)(r4) 793 ld r20, VCPU_GPR(R20)(r4)
789 ld r21, VCPU_GPR(R21)(r4) 794 ld r21, VCPU_GPR(R21)(r4)
790 ld r22, VCPU_GPR(R22)(r4) 795 ld r22, VCPU_GPR(R22)(r4)
791 ld r23, VCPU_GPR(R23)(r4) 796 ld r23, VCPU_GPR(R23)(r4)
792 ld r24, VCPU_GPR(R24)(r4) 797 ld r24, VCPU_GPR(R24)(r4)
793 ld r25, VCPU_GPR(R25)(r4) 798 ld r25, VCPU_GPR(R25)(r4)
794 ld r26, VCPU_GPR(R26)(r4) 799 ld r26, VCPU_GPR(R26)(r4)
795 ld r27, VCPU_GPR(R27)(r4) 800 ld r27, VCPU_GPR(R27)(r4)
796 ld r28, VCPU_GPR(R28)(r4) 801 ld r28, VCPU_GPR(R28)(r4)
797 ld r29, VCPU_GPR(R29)(r4) 802 ld r29, VCPU_GPR(R29)(r4)
798 ld r30, VCPU_GPR(R30)(r4) 803 ld r30, VCPU_GPR(R30)(r4)
799 ld r31, VCPU_GPR(R31)(r4) 804 ld r31, VCPU_GPR(R31)(r4)
800 805
801 BEGIN_FTR_SECTION 806 BEGIN_FTR_SECTION
802 /* Switch DSCR to guest value */ 807 /* Switch DSCR to guest value */
803 ld r5, VCPU_DSCR(r4) 808 ld r5, VCPU_DSCR(r4)
804 mtspr SPRN_DSCR, r5 809 mtspr SPRN_DSCR, r5
805 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 810 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
806 811
807 BEGIN_FTR_SECTION 812 BEGIN_FTR_SECTION
808 /* Skip next section on POWER7 or PPC970 */ 813 /* Skip next section on POWER7 or PPC970 */
809 b 8f 814 b 8f
810 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 815 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
811 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */ 816 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
812 mfmsr r8 817 mfmsr r8
813 li r0, 1 818 li r0, 1
814 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG 819 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
815 mtmsrd r8 820 mtmsrd r8
816 821
817 /* Load up POWER8-specific registers */ 822 /* Load up POWER8-specific registers */
818 ld r5, VCPU_IAMR(r4) 823 ld r5, VCPU_IAMR(r4)
819 lwz r6, VCPU_PSPB(r4) 824 lwz r6, VCPU_PSPB(r4)
820 ld r7, VCPU_FSCR(r4) 825 ld r7, VCPU_FSCR(r4)
821 mtspr SPRN_IAMR, r5 826 mtspr SPRN_IAMR, r5
822 mtspr SPRN_PSPB, r6 827 mtspr SPRN_PSPB, r6
823 mtspr SPRN_FSCR, r7 828 mtspr SPRN_FSCR, r7
824 ld r5, VCPU_DAWR(r4) 829 ld r5, VCPU_DAWR(r4)
825 ld r6, VCPU_DAWRX(r4) 830 ld r6, VCPU_DAWRX(r4)
826 ld r7, VCPU_CIABR(r4) 831 ld r7, VCPU_CIABR(r4)
827 ld r8, VCPU_TAR(r4) 832 ld r8, VCPU_TAR(r4)
828 mtspr SPRN_DAWR, r5 833 mtspr SPRN_DAWR, r5
829 mtspr SPRN_DAWRX, r6 834 mtspr SPRN_DAWRX, r6
830 mtspr SPRN_CIABR, r7 835 mtspr SPRN_CIABR, r7
831 mtspr SPRN_TAR, r8 836 mtspr SPRN_TAR, r8
832 ld r5, VCPU_IC(r4) 837 ld r5, VCPU_IC(r4)
833 ld r6, VCPU_VTB(r4) 838 ld r6, VCPU_VTB(r4)
834 mtspr SPRN_IC, r5 839 mtspr SPRN_IC, r5
835 mtspr SPRN_VTB, r6 840 mtspr SPRN_VTB, r6
836 ld r8, VCPU_EBBHR(r4) 841 ld r8, VCPU_EBBHR(r4)
837 mtspr SPRN_EBBHR, r8 842 mtspr SPRN_EBBHR, r8
838 ld r5, VCPU_EBBRR(r4) 843 ld r5, VCPU_EBBRR(r4)
839 ld r6, VCPU_BESCR(r4) 844 ld r6, VCPU_BESCR(r4)
840 ld r7, VCPU_CSIGR(r4) 845 ld r7, VCPU_CSIGR(r4)
841 ld r8, VCPU_TACR(r4) 846 ld r8, VCPU_TACR(r4)
842 mtspr SPRN_EBBRR, r5 847 mtspr SPRN_EBBRR, r5
843 mtspr SPRN_BESCR, r6 848 mtspr SPRN_BESCR, r6
844 mtspr SPRN_CSIGR, r7 849 mtspr SPRN_CSIGR, r7
845 mtspr SPRN_TACR, r8 850 mtspr SPRN_TACR, r8
846 ld r5, VCPU_TCSCR(r4) 851 ld r5, VCPU_TCSCR(r4)
847 ld r6, VCPU_ACOP(r4) 852 ld r6, VCPU_ACOP(r4)
848 lwz r7, VCPU_GUEST_PID(r4) 853 lwz r7, VCPU_GUEST_PID(r4)
849 ld r8, VCPU_WORT(r4) 854 ld r8, VCPU_WORT(r4)
850 mtspr SPRN_TCSCR, r5 855 mtspr SPRN_TCSCR, r5
851 mtspr SPRN_ACOP, r6 856 mtspr SPRN_ACOP, r6
852 mtspr SPRN_PID, r7 857 mtspr SPRN_PID, r7
853 mtspr SPRN_WORT, r8 858 mtspr SPRN_WORT, r8
854 8: 859 8:
855 860
856 /* 861 /*
857 * Set the decrementer to the guest decrementer. 862 * Set the decrementer to the guest decrementer.
858 */ 863 */
859 ld r8,VCPU_DEC_EXPIRES(r4) 864 ld r8,VCPU_DEC_EXPIRES(r4)
860 /* r8 is a host timebase value here, convert to guest TB */ 865 /* r8 is a host timebase value here, convert to guest TB */
861 ld r5,HSTATE_KVM_VCORE(r13) 866 ld r5,HSTATE_KVM_VCORE(r13)
862 ld r6,VCORE_TB_OFFSET(r5) 867 ld r6,VCORE_TB_OFFSET(r5)
863 add r8,r8,r6 868 add r8,r8,r6
864 mftb r7 869 mftb r7
865 subf r3,r7,r8 870 subf r3,r7,r8
866 mtspr SPRN_DEC,r3 871 mtspr SPRN_DEC,r3
867 stw r3,VCPU_DEC(r4) 872 stw r3,VCPU_DEC(r4)
868 873
869 ld r5, VCPU_SPRG0(r4) 874 ld r5, VCPU_SPRG0(r4)
870 ld r6, VCPU_SPRG1(r4) 875 ld r6, VCPU_SPRG1(r4)
871 ld r7, VCPU_SPRG2(r4) 876 ld r7, VCPU_SPRG2(r4)
872 ld r8, VCPU_SPRG3(r4) 877 ld r8, VCPU_SPRG3(r4)
873 mtspr SPRN_SPRG0, r5 878 mtspr SPRN_SPRG0, r5
874 mtspr SPRN_SPRG1, r6 879 mtspr SPRN_SPRG1, r6
875 mtspr SPRN_SPRG2, r7 880 mtspr SPRN_SPRG2, r7
876 mtspr SPRN_SPRG3, r8 881 mtspr SPRN_SPRG3, r8
877 882
878 /* Load up DAR and DSISR */ 883 /* Load up DAR and DSISR */
879 ld r5, VCPU_DAR(r4) 884 ld r5, VCPU_DAR(r4)
880 lwz r6, VCPU_DSISR(r4) 885 lwz r6, VCPU_DSISR(r4)
881 mtspr SPRN_DAR, r5 886 mtspr SPRN_DAR, r5
882 mtspr SPRN_DSISR, r6 887 mtspr SPRN_DSISR, r6
883 888
884 BEGIN_FTR_SECTION 889 BEGIN_FTR_SECTION
885 /* Restore AMR and UAMOR, set AMOR to all 1s */ 890 /* Restore AMR and UAMOR, set AMOR to all 1s */
886 ld r5,VCPU_AMR(r4) 891 ld r5,VCPU_AMR(r4)
887 ld r6,VCPU_UAMOR(r4) 892 ld r6,VCPU_UAMOR(r4)
888 li r7,-1 893 li r7,-1
889 mtspr SPRN_AMR,r5 894 mtspr SPRN_AMR,r5
890 mtspr SPRN_UAMOR,r6 895 mtspr SPRN_UAMOR,r6
891 mtspr SPRN_AMOR,r7 896 mtspr SPRN_AMOR,r7
892 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 897 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
893 898
894 /* Restore state of CTRL run bit; assume 1 on entry */ 899 /* Restore state of CTRL run bit; assume 1 on entry */
895 lwz r5,VCPU_CTRL(r4) 900 lwz r5,VCPU_CTRL(r4)
896 andi. r5,r5,1 901 andi. r5,r5,1
897 bne 4f 902 bne 4f
898 mfspr r6,SPRN_CTRLF 903 mfspr r6,SPRN_CTRLF
899 clrrdi r6,r6,1 904 clrrdi r6,r6,1
900 mtspr SPRN_CTRLT,r6 905 mtspr SPRN_CTRLT,r6
901 4: 906 4:
902 ld r6, VCPU_CTR(r4) 907 ld r6, VCPU_CTR(r4)
903 lwz r7, VCPU_XER(r4) 908 lwz r7, VCPU_XER(r4)
904 909
905 mtctr r6 910 mtctr r6
906 mtxer r7 911 mtxer r7
907 912
908 kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ 913 kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
909 ld r10, VCPU_PC(r4) 914 ld r10, VCPU_PC(r4)
910 ld r11, VCPU_MSR(r4) 915 ld r11, VCPU_MSR(r4)
911 ld r6, VCPU_SRR0(r4) 916 ld r6, VCPU_SRR0(r4)
912 ld r7, VCPU_SRR1(r4) 917 ld r7, VCPU_SRR1(r4)
913 mtspr SPRN_SRR0, r6 918 mtspr SPRN_SRR0, r6
914 mtspr SPRN_SRR1, r7 919 mtspr SPRN_SRR1, r7
915 920
916 deliver_guest_interrupt: 921 deliver_guest_interrupt:
917 /* r11 = vcpu->arch.msr & ~MSR_HV */ 922 /* r11 = vcpu->arch.msr & ~MSR_HV */
918 rldicl r11, r11, 63 - MSR_HV_LG, 1 923 rldicl r11, r11, 63 - MSR_HV_LG, 1
919 rotldi r11, r11, 1 + MSR_HV_LG 924 rotldi r11, r11, 1 + MSR_HV_LG
920 ori r11, r11, MSR_ME 925 ori r11, r11, MSR_ME
921 926
922 /* Check if we can deliver an external or decrementer interrupt now */ 927 /* Check if we can deliver an external or decrementer interrupt now */
923 ld r0, VCPU_PENDING_EXC(r4) 928 ld r0, VCPU_PENDING_EXC(r4)
924 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63 929 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
925 cmpdi cr1, r0, 0 930 cmpdi cr1, r0, 0
926 andi. r8, r11, MSR_EE 931 andi. r8, r11, MSR_EE
927 BEGIN_FTR_SECTION 932 BEGIN_FTR_SECTION
928 mfspr r8, SPRN_LPCR 933 mfspr r8, SPRN_LPCR
929 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */ 934 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
930 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH 935 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
931 mtspr SPRN_LPCR, r8 936 mtspr SPRN_LPCR, r8
932 isync 937 isync
933 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 938 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
934 beq 5f 939 beq 5f
935 li r0, BOOK3S_INTERRUPT_EXTERNAL 940 li r0, BOOK3S_INTERRUPT_EXTERNAL
936 bne cr1, 12f 941 bne cr1, 12f
937 mfspr r0, SPRN_DEC 942 mfspr r0, SPRN_DEC
938 cmpwi r0, 0 943 cmpwi r0, 0
939 li r0, BOOK3S_INTERRUPT_DECREMENTER 944 li r0, BOOK3S_INTERRUPT_DECREMENTER
940 bge 5f 945 bge 5f
941 946
942 12: mtspr SPRN_SRR0, r10 947 12: mtspr SPRN_SRR0, r10
943 mr r10,r0 948 mr r10,r0
944 mtspr SPRN_SRR1, r11 949 mtspr SPRN_SRR1, r11
945 mr r9, r4 950 mr r9, r4
946 bl kvmppc_msr_interrupt 951 bl kvmppc_msr_interrupt
947 5: 952 5:
948 953
949 /* 954 /*
950 * Required state: 955 * Required state:
951 * R4 = vcpu 956 * R4 = vcpu
952 * R10: value for HSRR0 957 * R10: value for HSRR0
953 * R11: value for HSRR1 958 * R11: value for HSRR1
954 * R13 = PACA 959 * R13 = PACA
955 */ 960 */
956 fast_guest_return: 961 fast_guest_return:
957 li r0,0 962 li r0,0
958 stb r0,VCPU_CEDED(r4) /* cancel cede */ 963 stb r0,VCPU_CEDED(r4) /* cancel cede */
959 mtspr SPRN_HSRR0,r10 964 mtspr SPRN_HSRR0,r10
960 mtspr SPRN_HSRR1,r11 965 mtspr SPRN_HSRR1,r11
961 966
962 /* Activate guest mode, so faults get handled by KVM */ 967 /* Activate guest mode, so faults get handled by KVM */
963 li r9, KVM_GUEST_MODE_GUEST_HV 968 li r9, KVM_GUEST_MODE_GUEST_HV
964 stb r9, HSTATE_IN_GUEST(r13) 969 stb r9, HSTATE_IN_GUEST(r13)
965 970
966 /* Enter guest */ 971 /* Enter guest */
967 972
968 BEGIN_FTR_SECTION 973 BEGIN_FTR_SECTION
969 ld r5, VCPU_CFAR(r4) 974 ld r5, VCPU_CFAR(r4)
970 mtspr SPRN_CFAR, r5 975 mtspr SPRN_CFAR, r5
971 END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 976 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
972 BEGIN_FTR_SECTION 977 BEGIN_FTR_SECTION
973 ld r0, VCPU_PPR(r4) 978 ld r0, VCPU_PPR(r4)
974 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 979 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
975 980
976 ld r5, VCPU_LR(r4) 981 ld r5, VCPU_LR(r4)
977 lwz r6, VCPU_CR(r4) 982 lwz r6, VCPU_CR(r4)
978 mtlr r5 983 mtlr r5
979 mtcr r6 984 mtcr r6
980 985
981 ld r1, VCPU_GPR(R1)(r4) 986 ld r1, VCPU_GPR(R1)(r4)
982 ld r2, VCPU_GPR(R2)(r4) 987 ld r2, VCPU_GPR(R2)(r4)
983 ld r3, VCPU_GPR(R3)(r4) 988 ld r3, VCPU_GPR(R3)(r4)
984 ld r5, VCPU_GPR(R5)(r4) 989 ld r5, VCPU_GPR(R5)(r4)
985 ld r6, VCPU_GPR(R6)(r4) 990 ld r6, VCPU_GPR(R6)(r4)
986 ld r7, VCPU_GPR(R7)(r4) 991 ld r7, VCPU_GPR(R7)(r4)
987 ld r8, VCPU_GPR(R8)(r4) 992 ld r8, VCPU_GPR(R8)(r4)
988 ld r9, VCPU_GPR(R9)(r4) 993 ld r9, VCPU_GPR(R9)(r4)
989 ld r10, VCPU_GPR(R10)(r4) 994 ld r10, VCPU_GPR(R10)(r4)
990 ld r11, VCPU_GPR(R11)(r4) 995 ld r11, VCPU_GPR(R11)(r4)
991 ld r12, VCPU_GPR(R12)(r4) 996 ld r12, VCPU_GPR(R12)(r4)
992 ld r13, VCPU_GPR(R13)(r4) 997 ld r13, VCPU_GPR(R13)(r4)
993 998
994 BEGIN_FTR_SECTION 999 BEGIN_FTR_SECTION
995 mtspr SPRN_PPR, r0 1000 mtspr SPRN_PPR, r0
996 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1001 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
997 ld r0, VCPU_GPR(R0)(r4) 1002 ld r0, VCPU_GPR(R0)(r4)
998 ld r4, VCPU_GPR(R4)(r4) 1003 ld r4, VCPU_GPR(R4)(r4)
999 1004
1000 hrfid 1005 hrfid
1001 b . 1006 b .
1002 1007
1003 /****************************************************************************** 1008 /******************************************************************************
1004 * * 1009 * *
1005 * Exit code * 1010 * Exit code *
1006 * * 1011 * *
1007 *****************************************************************************/ 1012 *****************************************************************************/
1008 1013
1009 /* 1014 /*
1010 * We come here from the first-level interrupt handlers. 1015 * We come here from the first-level interrupt handlers.
1011 */ 1016 */
1012 .globl kvmppc_interrupt_hv 1017 .globl kvmppc_interrupt_hv
1013 kvmppc_interrupt_hv: 1018 kvmppc_interrupt_hv:
1014 /* 1019 /*
1015 * Register contents: 1020 * Register contents:
1016 * R12 = interrupt vector 1021 * R12 = interrupt vector
1017 * R13 = PACA 1022 * R13 = PACA
1018 * guest CR, R12 saved in shadow VCPU SCRATCH1/0 1023 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
1019 * guest R13 saved in SPRN_SCRATCH0 1024 * guest R13 saved in SPRN_SCRATCH0
1020 */ 1025 */
1021 std r9, HSTATE_SCRATCH2(r13) 1026 std r9, HSTATE_SCRATCH2(r13)
1022 1027
1023 lbz r9, HSTATE_IN_GUEST(r13) 1028 lbz r9, HSTATE_IN_GUEST(r13)
1024 cmpwi r9, KVM_GUEST_MODE_HOST_HV 1029 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1025 beq kvmppc_bad_host_intr 1030 beq kvmppc_bad_host_intr
1026 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 1031 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1027 cmpwi r9, KVM_GUEST_MODE_GUEST 1032 cmpwi r9, KVM_GUEST_MODE_GUEST
1028 ld r9, HSTATE_SCRATCH2(r13) 1033 ld r9, HSTATE_SCRATCH2(r13)
1029 beq kvmppc_interrupt_pr 1034 beq kvmppc_interrupt_pr
1030 #endif 1035 #endif
1031 /* We're now back in the host but in guest MMU context */ 1036 /* We're now back in the host but in guest MMU context */
1032 li r9, KVM_GUEST_MODE_HOST_HV 1037 li r9, KVM_GUEST_MODE_HOST_HV
1033 stb r9, HSTATE_IN_GUEST(r13) 1038 stb r9, HSTATE_IN_GUEST(r13)
1034 1039
1035 ld r9, HSTATE_KVM_VCPU(r13) 1040 ld r9, HSTATE_KVM_VCPU(r13)
1036 1041
1037 /* Save registers */ 1042 /* Save registers */
1038 1043
1039 std r0, VCPU_GPR(R0)(r9) 1044 std r0, VCPU_GPR(R0)(r9)
1040 std r1, VCPU_GPR(R1)(r9) 1045 std r1, VCPU_GPR(R1)(r9)
1041 std r2, VCPU_GPR(R2)(r9) 1046 std r2, VCPU_GPR(R2)(r9)
1042 std r3, VCPU_GPR(R3)(r9) 1047 std r3, VCPU_GPR(R3)(r9)
1043 std r4, VCPU_GPR(R4)(r9) 1048 std r4, VCPU_GPR(R4)(r9)
1044 std r5, VCPU_GPR(R5)(r9) 1049 std r5, VCPU_GPR(R5)(r9)
1045 std r6, VCPU_GPR(R6)(r9) 1050 std r6, VCPU_GPR(R6)(r9)
1046 std r7, VCPU_GPR(R7)(r9) 1051 std r7, VCPU_GPR(R7)(r9)
1047 std r8, VCPU_GPR(R8)(r9) 1052 std r8, VCPU_GPR(R8)(r9)
1048 ld r0, HSTATE_SCRATCH2(r13) 1053 ld r0, HSTATE_SCRATCH2(r13)
1049 std r0, VCPU_GPR(R9)(r9) 1054 std r0, VCPU_GPR(R9)(r9)
1050 std r10, VCPU_GPR(R10)(r9) 1055 std r10, VCPU_GPR(R10)(r9)
1051 std r11, VCPU_GPR(R11)(r9) 1056 std r11, VCPU_GPR(R11)(r9)
1052 ld r3, HSTATE_SCRATCH0(r13) 1057 ld r3, HSTATE_SCRATCH0(r13)
1053 lwz r4, HSTATE_SCRATCH1(r13) 1058 lwz r4, HSTATE_SCRATCH1(r13)
1054 std r3, VCPU_GPR(R12)(r9) 1059 std r3, VCPU_GPR(R12)(r9)
1055 stw r4, VCPU_CR(r9) 1060 stw r4, VCPU_CR(r9)
1056 BEGIN_FTR_SECTION 1061 BEGIN_FTR_SECTION
1057 ld r3, HSTATE_CFAR(r13) 1062 ld r3, HSTATE_CFAR(r13)
1058 std r3, VCPU_CFAR(r9) 1063 std r3, VCPU_CFAR(r9)
1059 END_FTR_SECTION_IFSET(CPU_FTR_CFAR) 1064 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1060 BEGIN_FTR_SECTION 1065 BEGIN_FTR_SECTION
1061 ld r4, HSTATE_PPR(r13) 1066 ld r4, HSTATE_PPR(r13)
1062 std r4, VCPU_PPR(r9) 1067 std r4, VCPU_PPR(r9)
1063 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 1068 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1064 1069
1065 /* Restore R1/R2 so we can handle faults */ 1070 /* Restore R1/R2 so we can handle faults */
1066 ld r1, HSTATE_HOST_R1(r13) 1071 ld r1, HSTATE_HOST_R1(r13)
1067 ld r2, PACATOC(r13) 1072 ld r2, PACATOC(r13)
1068 1073
1069 mfspr r10, SPRN_SRR0 1074 mfspr r10, SPRN_SRR0
1070 mfspr r11, SPRN_SRR1 1075 mfspr r11, SPRN_SRR1
1071 std r10, VCPU_SRR0(r9) 1076 std r10, VCPU_SRR0(r9)
1072 std r11, VCPU_SRR1(r9) 1077 std r11, VCPU_SRR1(r9)
1073 andi. r0, r12, 2 /* need to read HSRR0/1? */ 1078 andi. r0, r12, 2 /* need to read HSRR0/1? */
1074 beq 1f 1079 beq 1f
1075 mfspr r10, SPRN_HSRR0 1080 mfspr r10, SPRN_HSRR0
1076 mfspr r11, SPRN_HSRR1 1081 mfspr r11, SPRN_HSRR1
1077 clrrdi r12, r12, 2 1082 clrrdi r12, r12, 2
1078 1: std r10, VCPU_PC(r9) 1083 1: std r10, VCPU_PC(r9)
1079 std r11, VCPU_MSR(r9) 1084 std r11, VCPU_MSR(r9)
1080 1085
1081 GET_SCRATCH0(r3) 1086 GET_SCRATCH0(r3)
1082 mflr r4 1087 mflr r4
1083 std r3, VCPU_GPR(R13)(r9) 1088 std r3, VCPU_GPR(R13)(r9)
1084 std r4, VCPU_LR(r9) 1089 std r4, VCPU_LR(r9)
1085 1090
1086 stw r12,VCPU_TRAP(r9) 1091 stw r12,VCPU_TRAP(r9)
1087 1092
1088 /* Save HEIR (HV emulation assist reg) in last_inst 1093 /* Save HEIR (HV emulation assist reg) in last_inst
1089 if this is an HEI (HV emulation interrupt, e40) */ 1094 if this is an HEI (HV emulation interrupt, e40) */
1090 li r3,KVM_INST_FETCH_FAILED 1095 li r3,KVM_INST_FETCH_FAILED
1091 BEGIN_FTR_SECTION 1096 BEGIN_FTR_SECTION
1092 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST 1097 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1093 bne 11f 1098 bne 11f
1094 mfspr r3,SPRN_HEIR 1099 mfspr r3,SPRN_HEIR
1095 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1100 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1096 11: stw r3,VCPU_LAST_INST(r9) 1101 11: stw r3,VCPU_LAST_INST(r9)
1097 1102
1098 /* these are volatile across C function calls */ 1103 /* these are volatile across C function calls */
1099 mfctr r3 1104 mfctr r3
1100 mfxer r4 1105 mfxer r4
1101 std r3, VCPU_CTR(r9) 1106 std r3, VCPU_CTR(r9)
1102 stw r4, VCPU_XER(r9) 1107 stw r4, VCPU_XER(r9)
1103 1108
1104 BEGIN_FTR_SECTION 1109 BEGIN_FTR_SECTION
1105 /* If this is a page table miss then see if it's theirs or ours */ 1110 /* If this is a page table miss then see if it's theirs or ours */
1106 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1111 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1107 beq kvmppc_hdsi 1112 beq kvmppc_hdsi
1108 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE 1113 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1109 beq kvmppc_hisi 1114 beq kvmppc_hisi
1110 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1115 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1111 1116
1112 /* See if this is a leftover HDEC interrupt */ 1117 /* See if this is a leftover HDEC interrupt */
1113 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 1118 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1114 bne 2f 1119 bne 2f
1115 mfspr r3,SPRN_HDEC 1120 mfspr r3,SPRN_HDEC
1116 cmpwi r3,0 1121 cmpwi r3,0
1117 bge ignore_hdec 1122 bge ignore_hdec
1118 2: 1123 2:
1119 /* See if this is an hcall we can handle in real mode */ 1124 /* See if this is an hcall we can handle in real mode */
1120 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL 1125 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1121 beq hcall_try_real_mode 1126 beq hcall_try_real_mode
1122 1127
1123 /* Only handle external interrupts here on arch 206 and later */ 1128 /* Only handle external interrupts here on arch 206 and later */
1124 BEGIN_FTR_SECTION 1129 BEGIN_FTR_SECTION
1125 b ext_interrupt_to_host 1130 b ext_interrupt_to_host
1126 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) 1131 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1127 1132
1128 /* External interrupt ? */ 1133 /* External interrupt ? */
1129 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL 1134 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1130 bne+ ext_interrupt_to_host 1135 bne+ ext_interrupt_to_host
1131 1136
1132 /* External interrupt, first check for host_ipi. If this is 1137 /* External interrupt, first check for host_ipi. If this is
1133 * set, we know the host wants us out so let's do it now 1138 * set, we know the host wants us out so let's do it now
1134 */ 1139 */
1135 bl kvmppc_read_intr 1140 bl kvmppc_read_intr
1136 cmpdi r3, 0 1141 cmpdi r3, 0
1137 bgt ext_interrupt_to_host 1142 bgt ext_interrupt_to_host
1138 1143
1139 /* Check if any CPU is heading out to the host, if so head out too */ 1144 /* Check if any CPU is heading out to the host, if so head out too */
1140 ld r5, HSTATE_KVM_VCORE(r13) 1145 ld r5, HSTATE_KVM_VCORE(r13)
1141 lwz r0, VCORE_ENTRY_EXIT(r5) 1146 lwz r0, VCORE_ENTRY_EXIT(r5)
1142 cmpwi r0, 0x100 1147 cmpwi r0, 0x100
1143 bge ext_interrupt_to_host 1148 bge ext_interrupt_to_host
1144 1149
1145 /* Return to guest after delivering any pending interrupt */ 1150 /* Return to guest after delivering any pending interrupt */
1146 mr r4, r9 1151 mr r4, r9
1147 b deliver_guest_interrupt 1152 b deliver_guest_interrupt
1148 1153
1149 ext_interrupt_to_host: 1154 ext_interrupt_to_host:
1150 1155
1151 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 1156 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1152 /* Save more register state */ 1157 /* Save more register state */
1153 mfdar r6 1158 mfdar r6
1154 mfdsisr r7 1159 mfdsisr r7
1155 std r6, VCPU_DAR(r9) 1160 std r6, VCPU_DAR(r9)
1156 stw r7, VCPU_DSISR(r9) 1161 stw r7, VCPU_DSISR(r9)
1157 BEGIN_FTR_SECTION 1162 BEGIN_FTR_SECTION
1158 /* don't overwrite fault_dar/fault_dsisr if HDSI */ 1163 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1159 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE 1164 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1160 beq 6f 1165 beq 6f
1161 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1166 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1162 std r6, VCPU_FAULT_DAR(r9) 1167 std r6, VCPU_FAULT_DAR(r9)
1163 stw r7, VCPU_FAULT_DSISR(r9) 1168 stw r7, VCPU_FAULT_DSISR(r9)
1164 1169
1165 /* See if it is a machine check */ 1170 /* See if it is a machine check */
1166 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK 1171 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1167 beq machine_check_realmode 1172 beq machine_check_realmode
1168 mc_cont: 1173 mc_cont:
1169 1174
1170 /* Save guest CTRL register, set runlatch to 1 */ 1175 /* Save guest CTRL register, set runlatch to 1 */
1171 6: mfspr r6,SPRN_CTRLF 1176 6: mfspr r6,SPRN_CTRLF
1172 stw r6,VCPU_CTRL(r9) 1177 stw r6,VCPU_CTRL(r9)
1173 andi. r0,r6,1 1178 andi. r0,r6,1
1174 bne 4f 1179 bne 4f
1175 ori r6,r6,1 1180 ori r6,r6,1
1176 mtspr SPRN_CTRLT,r6 1181 mtspr SPRN_CTRLT,r6
1177 4: 1182 4:
1178 /* Read the guest SLB and save it away */ 1183 /* Read the guest SLB and save it away */
1179 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ 1184 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1180 mtctr r0 1185 mtctr r0
1181 li r6,0 1186 li r6,0
1182 addi r7,r9,VCPU_SLB 1187 addi r7,r9,VCPU_SLB
1183 li r5,0 1188 li r5,0
1184 1: slbmfee r8,r6 1189 1: slbmfee r8,r6
1185 andis. r0,r8,SLB_ESID_V@h 1190 andis. r0,r8,SLB_ESID_V@h
1186 beq 2f 1191 beq 2f
1187 add r8,r8,r6 /* put index in */ 1192 add r8,r8,r6 /* put index in */
1188 slbmfev r3,r6 1193 slbmfev r3,r6
1189 std r8,VCPU_SLB_E(r7) 1194 std r8,VCPU_SLB_E(r7)
1190 std r3,VCPU_SLB_V(r7) 1195 std r3,VCPU_SLB_V(r7)
1191 addi r7,r7,VCPU_SLB_SIZE 1196 addi r7,r7,VCPU_SLB_SIZE
1192 addi r5,r5,1 1197 addi r5,r5,1
1193 2: addi r6,r6,1 1198 2: addi r6,r6,1
1194 bdnz 1b 1199 bdnz 1b
1195 stw r5,VCPU_SLB_MAX(r9) 1200 stw r5,VCPU_SLB_MAX(r9)
1196 1201
1197 /* 1202 /*
1198 * Save the guest PURR/SPURR 1203 * Save the guest PURR/SPURR
1199 */ 1204 */
1200 BEGIN_FTR_SECTION 1205 BEGIN_FTR_SECTION
1201 mfspr r5,SPRN_PURR 1206 mfspr r5,SPRN_PURR
1202 mfspr r6,SPRN_SPURR 1207 mfspr r6,SPRN_SPURR
1203 ld r7,VCPU_PURR(r9) 1208 ld r7,VCPU_PURR(r9)
1204 ld r8,VCPU_SPURR(r9) 1209 ld r8,VCPU_SPURR(r9)
1205 std r5,VCPU_PURR(r9) 1210 std r5,VCPU_PURR(r9)
1206 std r6,VCPU_SPURR(r9) 1211 std r6,VCPU_SPURR(r9)
1207 subf r5,r7,r5 1212 subf r5,r7,r5
1208 subf r6,r8,r6 1213 subf r6,r8,r6
1209 1214
1210 /* 1215 /*
1211 * Restore host PURR/SPURR and add guest times 1216 * Restore host PURR/SPURR and add guest times
1212 * so that the time in the guest gets accounted. 1217 * so that the time in the guest gets accounted.
1213 */ 1218 */
1214 ld r3,HSTATE_PURR(r13) 1219 ld r3,HSTATE_PURR(r13)
1215 ld r4,HSTATE_SPURR(r13) 1220 ld r4,HSTATE_SPURR(r13)
1216 add r3,r3,r5 1221 add r3,r3,r5
1217 add r4,r4,r6 1222 add r4,r4,r6
1218 mtspr SPRN_PURR,r3 1223 mtspr SPRN_PURR,r3
1219 mtspr SPRN_SPURR,r4 1224 mtspr SPRN_SPURR,r4
1220 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201) 1225 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
1221 1226
1222 /* Save DEC */ 1227 /* Save DEC */
1223 mfspr r5,SPRN_DEC 1228 mfspr r5,SPRN_DEC
1224 mftb r6 1229 mftb r6
1225 extsw r5,r5 1230 extsw r5,r5
1226 add r5,r5,r6 1231 add r5,r5,r6
1227 /* r5 is a guest timebase value here, convert to host TB */ 1232 /* r5 is a guest timebase value here, convert to host TB */
1228 ld r3,HSTATE_KVM_VCORE(r13) 1233 ld r3,HSTATE_KVM_VCORE(r13)
1229 ld r4,VCORE_TB_OFFSET(r3) 1234 ld r4,VCORE_TB_OFFSET(r3)
1230 subf r5,r4,r5 1235 subf r5,r4,r5
1231 std r5,VCPU_DEC_EXPIRES(r9) 1236 std r5,VCPU_DEC_EXPIRES(r9)
1232 1237
1233 BEGIN_FTR_SECTION 1238 BEGIN_FTR_SECTION
1234 b 8f 1239 b 8f
1235 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) 1240 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1236 /* Save POWER8-specific registers */ 1241 /* Save POWER8-specific registers */
1237 mfspr r5, SPRN_IAMR 1242 mfspr r5, SPRN_IAMR
1238 mfspr r6, SPRN_PSPB 1243 mfspr r6, SPRN_PSPB
1239 mfspr r7, SPRN_FSCR 1244 mfspr r7, SPRN_FSCR
1240 std r5, VCPU_IAMR(r9) 1245 std r5, VCPU_IAMR(r9)
1241 stw r6, VCPU_PSPB(r9) 1246 stw r6, VCPU_PSPB(r9)
1242 std r7, VCPU_FSCR(r9) 1247 std r7, VCPU_FSCR(r9)
1243 mfspr r5, SPRN_IC 1248 mfspr r5, SPRN_IC
1244 mfspr r6, SPRN_VTB 1249 mfspr r6, SPRN_VTB
1245 mfspr r7, SPRN_TAR 1250 mfspr r7, SPRN_TAR
1246 std r5, VCPU_IC(r9) 1251 std r5, VCPU_IC(r9)
1247 std r6, VCPU_VTB(r9) 1252 std r6, VCPU_VTB(r9)
1248 std r7, VCPU_TAR(r9) 1253 std r7, VCPU_TAR(r9)
1249 mfspr r8, SPRN_EBBHR 1254 mfspr r8, SPRN_EBBHR
1250 std r8, VCPU_EBBHR(r9) 1255 std r8, VCPU_EBBHR(r9)
1251 mfspr r5, SPRN_EBBRR 1256 mfspr r5, SPRN_EBBRR
1252 mfspr r6, SPRN_BESCR 1257 mfspr r6, SPRN_BESCR
1253 mfspr r7, SPRN_CSIGR 1258 mfspr r7, SPRN_CSIGR
1254 mfspr r8, SPRN_TACR 1259 mfspr r8, SPRN_TACR
1255 std r5, VCPU_EBBRR(r9) 1260 std r5, VCPU_EBBRR(r9)
1256 std r6, VCPU_BESCR(r9) 1261 std r6, VCPU_BESCR(r9)
1257 std r7, VCPU_CSIGR(r9) 1262 std r7, VCPU_CSIGR(r9)
1258 std r8, VCPU_TACR(r9) 1263 std r8, VCPU_TACR(r9)
1259 mfspr r5, SPRN_TCSCR 1264 mfspr r5, SPRN_TCSCR
1260 mfspr r6, SPRN_ACOP 1265 mfspr r6, SPRN_ACOP
1261 mfspr r7, SPRN_PID 1266 mfspr r7, SPRN_PID
1262 mfspr r8, SPRN_WORT 1267 mfspr r8, SPRN_WORT
1263 std r5, VCPU_TCSCR(r9) 1268 std r5, VCPU_TCSCR(r9)
1264 std r6, VCPU_ACOP(r9) 1269 std r6, VCPU_ACOP(r9)
1265 stw r7, VCPU_GUEST_PID(r9) 1270 stw r7, VCPU_GUEST_PID(r9)
1266 std r8, VCPU_WORT(r9) 1271 std r8, VCPU_WORT(r9)
1267 8: 1272 8:
1268 1273
1269 /* Save and reset AMR and UAMOR before turning on the MMU */ 1274 /* Save and reset AMR and UAMOR before turning on the MMU */
1270 BEGIN_FTR_SECTION 1275 BEGIN_FTR_SECTION
1271 mfspr r5,SPRN_AMR 1276 mfspr r5,SPRN_AMR
1272 mfspr r6,SPRN_UAMOR 1277 mfspr r6,SPRN_UAMOR
1273 std r5,VCPU_AMR(r9) 1278 std r5,VCPU_AMR(r9)
1274 std r6,VCPU_UAMOR(r9) 1279 std r6,VCPU_UAMOR(r9)
1275 li r6,0 1280 li r6,0
1276 mtspr SPRN_AMR,r6 1281 mtspr SPRN_AMR,r6
1277 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1282 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1278 1283
1279 /* Switch DSCR back to host value */ 1284 /* Switch DSCR back to host value */
1280 BEGIN_FTR_SECTION 1285 BEGIN_FTR_SECTION
1281 mfspr r8, SPRN_DSCR 1286 mfspr r8, SPRN_DSCR
1282 ld r7, HSTATE_DSCR(r13) 1287 ld r7, HSTATE_DSCR(r13)
1283 std r8, VCPU_DSCR(r9) 1288 std r8, VCPU_DSCR(r9)
1284 mtspr SPRN_DSCR, r7 1289 mtspr SPRN_DSCR, r7
1285 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1290 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1286 1291
1287 /* Save non-volatile GPRs */ 1292 /* Save non-volatile GPRs */
1288 std r14, VCPU_GPR(R14)(r9) 1293 std r14, VCPU_GPR(R14)(r9)
1289 std r15, VCPU_GPR(R15)(r9) 1294 std r15, VCPU_GPR(R15)(r9)
1290 std r16, VCPU_GPR(R16)(r9) 1295 std r16, VCPU_GPR(R16)(r9)
1291 std r17, VCPU_GPR(R17)(r9) 1296 std r17, VCPU_GPR(R17)(r9)
1292 std r18, VCPU_GPR(R18)(r9) 1297 std r18, VCPU_GPR(R18)(r9)
1293 std r19, VCPU_GPR(R19)(r9) 1298 std r19, VCPU_GPR(R19)(r9)
1294 std r20, VCPU_GPR(R20)(r9) 1299 std r20, VCPU_GPR(R20)(r9)
1295 std r21, VCPU_GPR(R21)(r9) 1300 std r21, VCPU_GPR(R21)(r9)
1296 std r22, VCPU_GPR(R22)(r9) 1301 std r22, VCPU_GPR(R22)(r9)
1297 std r23, VCPU_GPR(R23)(r9) 1302 std r23, VCPU_GPR(R23)(r9)
1298 std r24, VCPU_GPR(R24)(r9) 1303 std r24, VCPU_GPR(R24)(r9)
1299 std r25, VCPU_GPR(R25)(r9) 1304 std r25, VCPU_GPR(R25)(r9)
1300 std r26, VCPU_GPR(R26)(r9) 1305 std r26, VCPU_GPR(R26)(r9)
1301 std r27, VCPU_GPR(R27)(r9) 1306 std r27, VCPU_GPR(R27)(r9)
1302 std r28, VCPU_GPR(R28)(r9) 1307 std r28, VCPU_GPR(R28)(r9)
1303 std r29, VCPU_GPR(R29)(r9) 1308 std r29, VCPU_GPR(R29)(r9)
1304 std r30, VCPU_GPR(R30)(r9) 1309 std r30, VCPU_GPR(R30)(r9)
1305 std r31, VCPU_GPR(R31)(r9) 1310 std r31, VCPU_GPR(R31)(r9)
1306 1311
1307 /* Save SPRGs */ 1312 /* Save SPRGs */
1308 mfspr r3, SPRN_SPRG0 1313 mfspr r3, SPRN_SPRG0
1309 mfspr r4, SPRN_SPRG1 1314 mfspr r4, SPRN_SPRG1
1310 mfspr r5, SPRN_SPRG2 1315 mfspr r5, SPRN_SPRG2
1311 mfspr r6, SPRN_SPRG3 1316 mfspr r6, SPRN_SPRG3
1312 std r3, VCPU_SPRG0(r9) 1317 std r3, VCPU_SPRG0(r9)
1313 std r4, VCPU_SPRG1(r9) 1318 std r4, VCPU_SPRG1(r9)
1314 std r5, VCPU_SPRG2(r9) 1319 std r5, VCPU_SPRG2(r9)
1315 std r6, VCPU_SPRG3(r9) 1320 std r6, VCPU_SPRG3(r9)
1316 1321
1317 /* save FP state */ 1322 /* save FP state */
1318 mr r3, r9 1323 mr r3, r9
1319 bl kvmppc_save_fp 1324 bl kvmppc_save_fp
1320 1325
1321 /* Increment yield count if they have a VPA */ 1326 /* Increment yield count if they have a VPA */
1322 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1327 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1323 cmpdi r8, 0 1328 cmpdi r8, 0
1324 beq 25f 1329 beq 25f
1325 lwz r3, LPPACA_YIELDCOUNT(r8) 1330 lwz r3, LPPACA_YIELDCOUNT(r8)
1326 addi r3, r3, 1 1331 addi r3, r3, 1
1327 stw r3, LPPACA_YIELDCOUNT(r8) 1332 stw r3, LPPACA_YIELDCOUNT(r8)
1328 li r3, 1 1333 li r3, 1
1329 stb r3, VCPU_VPA_DIRTY(r9) 1334 stb r3, VCPU_VPA_DIRTY(r9)
1330 25: 1335 25:
1331 /* Save PMU registers if requested */ 1336 /* Save PMU registers if requested */
1332 /* r8 and cr0.eq are live here */ 1337 /* r8 and cr0.eq are live here */
1333 li r3, 1 1338 li r3, 1
1334 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 1339 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1335 mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 1340 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1336 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 1341 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1337 mfspr r6, SPRN_MMCRA 1342 mfspr r6, SPRN_MMCRA
1338 BEGIN_FTR_SECTION 1343 BEGIN_FTR_SECTION
1339 /* On P7, clear MMCRA in order to disable SDAR updates */ 1344 /* On P7, clear MMCRA in order to disable SDAR updates */
1340 li r7, 0 1345 li r7, 0
1341 mtspr SPRN_MMCRA, r7 1346 mtspr SPRN_MMCRA, r7
1342 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1347 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1343 isync 1348 isync
1344 beq 21f /* if no VPA, save PMU stuff anyway */ 1349 beq 21f /* if no VPA, save PMU stuff anyway */
1345 lbz r7, LPPACA_PMCINUSE(r8) 1350 lbz r7, LPPACA_PMCINUSE(r8)
1346 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */ 1351 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1347 bne 21f 1352 bne 21f
1348 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 1353 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1349 b 22f 1354 b 22f
1350 21: mfspr r5, SPRN_MMCR1 1355 21: mfspr r5, SPRN_MMCR1
1351 mfspr r7, SPRN_SIAR 1356 mfspr r7, SPRN_SIAR
1352 mfspr r8, SPRN_SDAR 1357 mfspr r8, SPRN_SDAR
1353 std r4, VCPU_MMCR(r9) 1358 std r4, VCPU_MMCR(r9)
1354 std r5, VCPU_MMCR + 8(r9) 1359 std r5, VCPU_MMCR + 8(r9)
1355 std r6, VCPU_MMCR + 16(r9) 1360 std r6, VCPU_MMCR + 16(r9)
1356 std r7, VCPU_SIAR(r9) 1361 std r7, VCPU_SIAR(r9)
1357 std r8, VCPU_SDAR(r9) 1362 std r8, VCPU_SDAR(r9)
1358 mfspr r3, SPRN_PMC1 1363 mfspr r3, SPRN_PMC1
1359 mfspr r4, SPRN_PMC2 1364 mfspr r4, SPRN_PMC2
1360 mfspr r5, SPRN_PMC3 1365 mfspr r5, SPRN_PMC3
1361 mfspr r6, SPRN_PMC4 1366 mfspr r6, SPRN_PMC4
1362 mfspr r7, SPRN_PMC5 1367 mfspr r7, SPRN_PMC5
1363 mfspr r8, SPRN_PMC6 1368 mfspr r8, SPRN_PMC6
1364 BEGIN_FTR_SECTION 1369 BEGIN_FTR_SECTION
1365 mfspr r10, SPRN_PMC7 1370 mfspr r10, SPRN_PMC7
1366 mfspr r11, SPRN_PMC8 1371 mfspr r11, SPRN_PMC8
1367 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1372 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1368 stw r3, VCPU_PMC(r9) 1373 stw r3, VCPU_PMC(r9)
1369 stw r4, VCPU_PMC + 4(r9) 1374 stw r4, VCPU_PMC + 4(r9)
1370 stw r5, VCPU_PMC + 8(r9) 1375 stw r5, VCPU_PMC + 8(r9)
1371 stw r6, VCPU_PMC + 12(r9) 1376 stw r6, VCPU_PMC + 12(r9)
1372 stw r7, VCPU_PMC + 16(r9) 1377 stw r7, VCPU_PMC + 16(r9)
1373 stw r8, VCPU_PMC + 20(r9) 1378 stw r8, VCPU_PMC + 20(r9)
1374 BEGIN_FTR_SECTION 1379 BEGIN_FTR_SECTION
1375 stw r10, VCPU_PMC + 24(r9) 1380 stw r10, VCPU_PMC + 24(r9)
1376 stw r11, VCPU_PMC + 28(r9) 1381 stw r11, VCPU_PMC + 28(r9)
1377 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1382 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1378 BEGIN_FTR_SECTION 1383 BEGIN_FTR_SECTION
1379 mfspr r4, SPRN_MMCR2 1384 mfspr r4, SPRN_MMCR2
1380 mfspr r5, SPRN_SIER 1385 mfspr r5, SPRN_SIER
1381 mfspr r6, SPRN_SPMC1 1386 mfspr r6, SPRN_SPMC1
1382 mfspr r7, SPRN_SPMC2 1387 mfspr r7, SPRN_SPMC2
1383 mfspr r8, SPRN_MMCRS 1388 mfspr r8, SPRN_MMCRS
1384 std r4, VCPU_MMCR + 24(r9) 1389 std r4, VCPU_MMCR + 24(r9)
1385 std r5, VCPU_SIER(r9) 1390 std r5, VCPU_SIER(r9)
1386 stw r6, VCPU_PMC + 24(r9) 1391 stw r6, VCPU_PMC + 24(r9)
1387 stw r7, VCPU_PMC + 28(r9) 1392 stw r7, VCPU_PMC + 28(r9)
1388 std r8, VCPU_MMCR + 32(r9) 1393 std r8, VCPU_MMCR + 32(r9)
1389 lis r4, 0x8000 1394 lis r4, 0x8000
1390 mtspr SPRN_MMCRS, r4 1395 mtspr SPRN_MMCRS, r4
1391 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1396 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1392 22: 1397 22:
1393 /* Clear out SLB */ 1398 /* Clear out SLB */
1394 li r5,0 1399 li r5,0
1395 slbmte r5,r5 1400 slbmte r5,r5
1396 slbia 1401 slbia
1397 ptesync 1402 ptesync
1398 1403
1399 hdec_soon: /* r12 = trap, r13 = paca */ 1404 hdec_soon: /* r12 = trap, r13 = paca */
1400 BEGIN_FTR_SECTION 1405 BEGIN_FTR_SECTION
1401 b 32f 1406 b 32f
1402 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1407 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1403 /* 1408 /*
1404 * POWER7 guest -> host partition switch code. 1409 * POWER7 guest -> host partition switch code.
1405 * We don't have to lock against tlbies but we do 1410 * We don't have to lock against tlbies but we do
1406 * have to coordinate the hardware threads. 1411 * have to coordinate the hardware threads.
1407 */ 1412 */
1408 /* Increment the threads-exiting-guest count in the 0xff00 1413 /* Increment the threads-exiting-guest count in the 0xff00
1409 bits of vcore->entry_exit_count */ 1414 bits of vcore->entry_exit_count */
1410 ld r5,HSTATE_KVM_VCORE(r13) 1415 ld r5,HSTATE_KVM_VCORE(r13)
1411 addi r6,r5,VCORE_ENTRY_EXIT 1416 addi r6,r5,VCORE_ENTRY_EXIT
1412 41: lwarx r3,0,r6 1417 41: lwarx r3,0,r6
1413 addi r0,r3,0x100 1418 addi r0,r3,0x100
1414 stwcx. r0,0,r6 1419 stwcx. r0,0,r6
1415 bne 41b 1420 bne 41b
1416 isync /* order stwcx. vs. reading napping_threads */ 1421 isync /* order stwcx. vs. reading napping_threads */
1417 1422
1418 /* 1423 /*
1419 * At this point we have an interrupt that we have to pass 1424 * At this point we have an interrupt that we have to pass
1420 * up to the kernel or qemu; we can't handle it in real mode. 1425 * up to the kernel or qemu; we can't handle it in real mode.
1421 * Thus we have to do a partition switch, so we have to 1426 * Thus we have to do a partition switch, so we have to
1422 * collect the other threads, if we are the first thread 1427 * collect the other threads, if we are the first thread
1423 * to take an interrupt. To do this, we set the HDEC to 0, 1428 * to take an interrupt. To do this, we set the HDEC to 0,
1424 * which causes an HDEC interrupt in all threads within 2ns 1429 * which causes an HDEC interrupt in all threads within 2ns
1425 * because the HDEC register is shared between all 4 threads. 1430 * because the HDEC register is shared between all 4 threads.
1426 * However, we don't need to bother if this is an HDEC 1431 * However, we don't need to bother if this is an HDEC
1427 * interrupt, since the other threads will already be on their 1432 * interrupt, since the other threads will already be on their
1428 * way here in that case. 1433 * way here in that case.
1429 */ 1434 */
1430 cmpwi r3,0x100 /* Are we the first here? */ 1435 cmpwi r3,0x100 /* Are we the first here? */
1431 bge 43f 1436 bge 43f
1432 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 1437 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1433 beq 40f 1438 beq 40f
1434 li r0,0 1439 li r0,0
1435 mtspr SPRN_HDEC,r0 1440 mtspr SPRN_HDEC,r0
1436 40: 1441 40:
1437 /* 1442 /*
1438 * Send an IPI to any napping threads, since an HDEC interrupt 1443 * Send an IPI to any napping threads, since an HDEC interrupt
1439 * doesn't wake CPUs up from nap. 1444 * doesn't wake CPUs up from nap.
1440 */ 1445 */
1441 lwz r3,VCORE_NAPPING_THREADS(r5) 1446 lwz r3,VCORE_NAPPING_THREADS(r5)
1442 lbz r4,HSTATE_PTID(r13) 1447 lbz r4,HSTATE_PTID(r13)
1443 li r0,1 1448 li r0,1
1444 sld r0,r0,r4 1449 sld r0,r0,r4
1445 andc. r3,r3,r0 /* no sense IPI'ing ourselves */ 1450 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
1446 beq 43f 1451 beq 43f
1447 /* Order entry/exit update vs. IPIs */ 1452 /* Order entry/exit update vs. IPIs */
1448 sync 1453 sync
1449 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ 1454 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
1450 subf r6,r4,r13 1455 subf r6,r4,r13
1451 42: andi. r0,r3,1 1456 42: andi. r0,r3,1
1452 beq 44f 1457 beq 44f
1453 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */ 1458 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
1454 li r0,IPI_PRIORITY 1459 li r0,IPI_PRIORITY
1455 li r7,XICS_MFRR 1460 li r7,XICS_MFRR
1456 stbcix r0,r7,r8 /* trigger the IPI */ 1461 stbcix r0,r7,r8 /* trigger the IPI */
1457 44: srdi. r3,r3,1 1462 44: srdi. r3,r3,1
1458 addi r6,r6,PACA_SIZE 1463 addi r6,r6,PACA_SIZE
1459 bne 42b 1464 bne 42b
1460 1465
1461 secondary_too_late: 1466 secondary_too_late:
1462 /* Secondary threads wait for primary to do partition switch */ 1467 /* Secondary threads wait for primary to do partition switch */
1463 43: ld r5,HSTATE_KVM_VCORE(r13) 1468 43: ld r5,HSTATE_KVM_VCORE(r13)
1464 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1469 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1465 lbz r3,HSTATE_PTID(r13) 1470 lbz r3,HSTATE_PTID(r13)
1466 cmpwi r3,0 1471 cmpwi r3,0
1467 beq 15f 1472 beq 15f
1468 HMT_LOW 1473 HMT_LOW
1469 13: lbz r3,VCORE_IN_GUEST(r5) 1474 13: lbz r3,VCORE_IN_GUEST(r5)
1470 cmpwi r3,0 1475 cmpwi r3,0
1471 bne 13b 1476 bne 13b
1472 HMT_MEDIUM 1477 HMT_MEDIUM
1473 b 16f 1478 b 16f
1474 1479
1475 /* Primary thread waits for all the secondaries to exit guest */ 1480 /* Primary thread waits for all the secondaries to exit guest */
1476 15: lwz r3,VCORE_ENTRY_EXIT(r5) 1481 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1477 srwi r0,r3,8 1482 srwi r0,r3,8
1478 clrldi r3,r3,56 1483 clrldi r3,r3,56
1479 cmpw r3,r0 1484 cmpw r3,r0
1480 bne 15b 1485 bne 15b
1481 isync 1486 isync
1482 1487
1483 /* Primary thread switches back to host partition */ 1488 /* Primary thread switches back to host partition */
1484 ld r6,KVM_HOST_SDR1(r4) 1489 ld r6,KVM_HOST_SDR1(r4)
1485 lwz r7,KVM_HOST_LPID(r4) 1490 lwz r7,KVM_HOST_LPID(r4)
1486 li r8,LPID_RSVD /* switch to reserved LPID */ 1491 li r8,LPID_RSVD /* switch to reserved LPID */
1487 mtspr SPRN_LPID,r8 1492 mtspr SPRN_LPID,r8
1488 ptesync 1493 ptesync
1489 mtspr SPRN_SDR1,r6 /* switch to partition page table */ 1494 mtspr SPRN_SDR1,r6 /* switch to partition page table */
1490 mtspr SPRN_LPID,r7 1495 mtspr SPRN_LPID,r7
1491 isync 1496 isync
1492 1497
1493 BEGIN_FTR_SECTION 1498 BEGIN_FTR_SECTION
1494 /* DPDES is shared between threads */ 1499 /* DPDES is shared between threads */
1495 mfspr r7, SPRN_DPDES 1500 mfspr r7, SPRN_DPDES
1496 std r7, VCORE_DPDES(r5) 1501 std r7, VCORE_DPDES(r5)
1497 /* clear DPDES so we don't get guest doorbells in the host */ 1502 /* clear DPDES so we don't get guest doorbells in the host */
1498 li r8, 0 1503 li r8, 0
1499 mtspr SPRN_DPDES, r8 1504 mtspr SPRN_DPDES, r8
1500 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1505 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1501 1506
1502 /* Subtract timebase offset from timebase */ 1507 /* Subtract timebase offset from timebase */
1503 ld r8,VCORE_TB_OFFSET(r5) 1508 ld r8,VCORE_TB_OFFSET(r5)
1504 cmpdi r8,0 1509 cmpdi r8,0
1505 beq 17f 1510 beq 17f
1506 mftb r6 /* current guest timebase */ 1511 mftb r6 /* current guest timebase */
1507 subf r8,r8,r6 1512 subf r8,r8,r6
1508 mtspr SPRN_TBU40,r8 /* update upper 40 bits */ 1513 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1509 mftb r7 /* check if lower 24 bits overflowed */ 1514 mftb r7 /* check if lower 24 bits overflowed */
1510 clrldi r6,r6,40 1515 clrldi r6,r6,40
1511 clrldi r7,r7,40 1516 clrldi r7,r7,40
1512 cmpld r7,r6 1517 cmpld r7,r6
1513 bge 17f 1518 bge 17f
1514 addis r8,r8,0x100 /* if so, increment upper 40 bits */ 1519 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1515 mtspr SPRN_TBU40,r8 1520 mtspr SPRN_TBU40,r8
1516 1521
1517 /* Reset PCR */ 1522 /* Reset PCR */
1518 17: ld r0, VCORE_PCR(r5) 1523 17: ld r0, VCORE_PCR(r5)
1519 cmpdi r0, 0 1524 cmpdi r0, 0
1520 beq 18f 1525 beq 18f
1521 li r0, 0 1526 li r0, 0
1522 mtspr SPRN_PCR, r0 1527 mtspr SPRN_PCR, r0
1523 18: 1528 18:
1524 /* Signal secondary CPUs to continue */ 1529 /* Signal secondary CPUs to continue */
1525 stb r0,VCORE_IN_GUEST(r5) 1530 stb r0,VCORE_IN_GUEST(r5)
1526 lis r8,0x7fff /* MAX_INT@h */ 1531 lis r8,0x7fff /* MAX_INT@h */
1527 mtspr SPRN_HDEC,r8 1532 mtspr SPRN_HDEC,r8
1528 1533
1529 16: ld r8,KVM_HOST_LPCR(r4) 1534 16: ld r8,KVM_HOST_LPCR(r4)
1530 mtspr SPRN_LPCR,r8 1535 mtspr SPRN_LPCR,r8
1531 isync 1536 isync
1532 b 33f 1537 b 33f
1533 1538
1534 /* 1539 /*
1535 * PPC970 guest -> host partition switch code. 1540 * PPC970 guest -> host partition switch code.
1536 * We have to lock against concurrent tlbies, and 1541 * We have to lock against concurrent tlbies, and
1537 * we have to flush the whole TLB. 1542 * we have to flush the whole TLB.
1538 */ 1543 */
1539 32: ld r5,HSTATE_KVM_VCORE(r13) 1544 32: ld r5,HSTATE_KVM_VCORE(r13)
1540 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ 1545 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1541 1546
1542 /* Take the guest's tlbie_lock */ 1547 /* Take the guest's tlbie_lock */
1543 #ifdef __BIG_ENDIAN__ 1548 #ifdef __BIG_ENDIAN__
1544 lwz r8,PACA_LOCK_TOKEN(r13) 1549 lwz r8,PACA_LOCK_TOKEN(r13)
1545 #else 1550 #else
1546 lwz r8,PACAPACAINDEX(r13) 1551 lwz r8,PACAPACAINDEX(r13)
1547 #endif 1552 #endif
1548 addi r3,r4,KVM_TLBIE_LOCK 1553 addi r3,r4,KVM_TLBIE_LOCK
1549 24: lwarx r0,0,r3 1554 24: lwarx r0,0,r3
1550 cmpwi r0,0 1555 cmpwi r0,0
1551 bne 24b 1556 bne 24b
1552 stwcx. r8,0,r3 1557 stwcx. r8,0,r3
1553 bne 24b 1558 bne 24b
1554 isync 1559 isync
1555 1560
1556 ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */ 1561 ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
1557 li r0,0x18f 1562 li r0,0x18f
1558 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ 1563 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
1559 or r0,r7,r0 1564 or r0,r7,r0
1560 ptesync 1565 ptesync
1561 sync 1566 sync
1562 mtspr SPRN_HID4,r0 /* switch to reserved LPID */ 1567 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
1563 isync 1568 isync
1564 li r0,0 1569 li r0,0
1565 stw r0,0(r3) /* drop guest tlbie_lock */ 1570 stw r0,0(r3) /* drop guest tlbie_lock */
1566 1571
1567 /* invalidate the whole TLB */ 1572 /* invalidate the whole TLB */
1568 li r0,256 1573 li r0,256
1569 mtctr r0 1574 mtctr r0
1570 li r6,0 1575 li r6,0
1571 25: tlbiel r6 1576 25: tlbiel r6
1572 addi r6,r6,0x1000 1577 addi r6,r6,0x1000
1573 bdnz 25b 1578 bdnz 25b
1574 ptesync 1579 ptesync
1575 1580
1576 /* take native_tlbie_lock */ 1581 /* take native_tlbie_lock */
1577 ld r3,toc_tlbie_lock@toc(2) 1582 ld r3,toc_tlbie_lock@toc(2)
1578 24: lwarx r0,0,r3 1583 24: lwarx r0,0,r3
1579 cmpwi r0,0 1584 cmpwi r0,0
1580 bne 24b 1585 bne 24b
1581 stwcx. r8,0,r3 1586 stwcx. r8,0,r3
1582 bne 24b 1587 bne 24b
1583 isync 1588 isync
1584 1589
1585 ld r6,KVM_HOST_SDR1(r4) 1590 ld r6,KVM_HOST_SDR1(r4)
1586 mtspr SPRN_SDR1,r6 /* switch to host page table */ 1591 mtspr SPRN_SDR1,r6 /* switch to host page table */
1587 1592
1588 /* Set up host HID4 value */ 1593 /* Set up host HID4 value */
1589 sync 1594 sync
1590 mtspr SPRN_HID4,r7 1595 mtspr SPRN_HID4,r7
1591 isync 1596 isync
1592 li r0,0 1597 li r0,0
1593 stw r0,0(r3) /* drop native_tlbie_lock */ 1598 stw r0,0(r3) /* drop native_tlbie_lock */
1594 1599
1595 lis r8,0x7fff /* MAX_INT@h */ 1600 lis r8,0x7fff /* MAX_INT@h */
1596 mtspr SPRN_HDEC,r8 1601 mtspr SPRN_HDEC,r8
1597 1602
1598 /* Disable HDEC interrupts */ 1603 /* Disable HDEC interrupts */
1599 mfspr r0,SPRN_HID0 1604 mfspr r0,SPRN_HID0
1600 li r3,0 1605 li r3,0
1601 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 1606 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
1602 sync 1607 sync
1603 mtspr SPRN_HID0,r0 1608 mtspr SPRN_HID0,r0
1604 mfspr r0,SPRN_HID0 1609 mfspr r0,SPRN_HID0
1605 mfspr r0,SPRN_HID0 1610 mfspr r0,SPRN_HID0
1606 mfspr r0,SPRN_HID0 1611 mfspr r0,SPRN_HID0
1607 mfspr r0,SPRN_HID0 1612 mfspr r0,SPRN_HID0
1608 mfspr r0,SPRN_HID0 1613 mfspr r0,SPRN_HID0
1609 mfspr r0,SPRN_HID0 1614 mfspr r0,SPRN_HID0
1610 1615
1611 /* load host SLB entries */ 1616 /* load host SLB entries */
1612 33: ld r8,PACA_SLBSHADOWPTR(r13) 1617 33: ld r8,PACA_SLBSHADOWPTR(r13)
1613 1618
1614 .rept SLB_NUM_BOLTED 1619 .rept SLB_NUM_BOLTED
1615 ld r5,SLBSHADOW_SAVEAREA(r8) 1620 ld r5,SLBSHADOW_SAVEAREA(r8)
1616 ld r6,SLBSHADOW_SAVEAREA+8(r8) 1621 ld r6,SLBSHADOW_SAVEAREA+8(r8)
1617 andis. r7,r5,SLB_ESID_V@h 1622 andis. r7,r5,SLB_ESID_V@h
1618 beq 1f 1623 beq 1f
1619 slbmte r6,r5 1624 slbmte r6,r5
1620 1: addi r8,r8,16 1625 1: addi r8,r8,16
1621 .endr 1626 .endr
1622 1627
1623 /* Unset guest mode */ 1628 /* Unset guest mode */
1624 li r0, KVM_GUEST_MODE_NONE 1629 li r0, KVM_GUEST_MODE_NONE
1625 stb r0, HSTATE_IN_GUEST(r13) 1630 stb r0, HSTATE_IN_GUEST(r13)
1626 1631
1627 ld r0, 112+PPC_LR_STKOFF(r1) 1632 ld r0, 112+PPC_LR_STKOFF(r1)
1628 addi r1, r1, 112 1633 addi r1, r1, 112
1629 mtlr r0 1634 mtlr r0
1630 blr 1635 blr
1631 1636
1632 /* 1637 /*
1633 * Check whether an HDSI is an HPTE not found fault or something else. 1638 * Check whether an HDSI is an HPTE not found fault or something else.
1634 * If it is an HPTE not found fault that is due to the guest accessing 1639 * If it is an HPTE not found fault that is due to the guest accessing
1635 * a page that they have mapped but which we have paged out, then 1640 * a page that they have mapped but which we have paged out, then
1636 * we continue on with the guest exit path. In all other cases, 1641 * we continue on with the guest exit path. In all other cases,
1637 * reflect the HDSI to the guest as a DSI. 1642 * reflect the HDSI to the guest as a DSI.
1638 */ 1643 */
1639 kvmppc_hdsi: 1644 kvmppc_hdsi:
1640 mfspr r4, SPRN_HDAR 1645 mfspr r4, SPRN_HDAR
1641 mfspr r6, SPRN_HDSISR 1646 mfspr r6, SPRN_HDSISR
1642 /* HPTE not found fault or protection fault? */ 1647 /* HPTE not found fault or protection fault? */
1643 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h 1648 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1644 beq 1f /* if not, send it to the guest */ 1649 beq 1f /* if not, send it to the guest */
1645 andi. r0, r11, MSR_DR /* data relocation enabled? */ 1650 andi. r0, r11, MSR_DR /* data relocation enabled? */
1646 beq 3f 1651 beq 3f
1647 clrrdi r0, r4, 28 1652 clrrdi r0, r4, 28
1648 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 1653 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1649 bne 1f /* if no SLB entry found */ 1654 bne 1f /* if no SLB entry found */
1650 4: std r4, VCPU_FAULT_DAR(r9) 1655 4: std r4, VCPU_FAULT_DAR(r9)
1651 stw r6, VCPU_FAULT_DSISR(r9) 1656 stw r6, VCPU_FAULT_DSISR(r9)
1652 1657
1653 /* Search the hash table. */ 1658 /* Search the hash table. */
1654 mr r3, r9 /* vcpu pointer */ 1659 mr r3, r9 /* vcpu pointer */
1655 li r7, 1 /* data fault */ 1660 li r7, 1 /* data fault */
1656 bl .kvmppc_hpte_hv_fault 1661 bl .kvmppc_hpte_hv_fault
1657 ld r9, HSTATE_KVM_VCPU(r13) 1662 ld r9, HSTATE_KVM_VCPU(r13)
1658 ld r10, VCPU_PC(r9) 1663 ld r10, VCPU_PC(r9)
1659 ld r11, VCPU_MSR(r9) 1664 ld r11, VCPU_MSR(r9)
1660 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE 1665 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1661 cmpdi r3, 0 /* retry the instruction */ 1666 cmpdi r3, 0 /* retry the instruction */
1662 beq 6f 1667 beq 6f
1663 cmpdi r3, -1 /* handle in kernel mode */ 1668 cmpdi r3, -1 /* handle in kernel mode */
1664 beq guest_exit_cont 1669 beq guest_exit_cont
1665 cmpdi r3, -2 /* MMIO emulation; need instr word */ 1670 cmpdi r3, -2 /* MMIO emulation; need instr word */
1666 beq 2f 1671 beq 2f
1667 1672
1668 /* Synthesize a DSI for the guest */ 1673 /* Synthesize a DSI for the guest */
1669 ld r4, VCPU_FAULT_DAR(r9) 1674 ld r4, VCPU_FAULT_DAR(r9)
1670 mr r6, r3 1675 mr r6, r3
1671 1: mtspr SPRN_DAR, r4 1676 1: mtspr SPRN_DAR, r4
1672 mtspr SPRN_DSISR, r6 1677 mtspr SPRN_DSISR, r6
1673 mtspr SPRN_SRR0, r10 1678 mtspr SPRN_SRR0, r10
1674 mtspr SPRN_SRR1, r11 1679 mtspr SPRN_SRR1, r11
1675 li r10, BOOK3S_INTERRUPT_DATA_STORAGE 1680 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
1676 bl kvmppc_msr_interrupt 1681 bl kvmppc_msr_interrupt
1677 fast_interrupt_c_return: 1682 fast_interrupt_c_return:
1678 6: ld r7, VCPU_CTR(r9) 1683 6: ld r7, VCPU_CTR(r9)
1679 lwz r8, VCPU_XER(r9) 1684 lwz r8, VCPU_XER(r9)
1680 mtctr r7 1685 mtctr r7
1681 mtxer r8 1686 mtxer r8
1682 mr r4, r9 1687 mr r4, r9
1683 b fast_guest_return 1688 b fast_guest_return
1684 1689
1685 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */ 1690 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1686 ld r5, KVM_VRMA_SLB_V(r5) 1691 ld r5, KVM_VRMA_SLB_V(r5)
1687 b 4b 1692 b 4b
1688 1693
1689 /* If this is for emulated MMIO, load the instruction word */ 1694 /* If this is for emulated MMIO, load the instruction word */
1690 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */ 1695 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1691 1696
1692 /* Set guest mode to 'jump over instruction' so if lwz faults 1697 /* Set guest mode to 'jump over instruction' so if lwz faults
1693 * we'll just continue at the next IP. */ 1698 * we'll just continue at the next IP. */
1694 li r0, KVM_GUEST_MODE_SKIP 1699 li r0, KVM_GUEST_MODE_SKIP
1695 stb r0, HSTATE_IN_GUEST(r13) 1700 stb r0, HSTATE_IN_GUEST(r13)
1696 1701
1697 /* Do the access with MSR:DR enabled */ 1702 /* Do the access with MSR:DR enabled */
1698 mfmsr r3 1703 mfmsr r3
1699 ori r4, r3, MSR_DR /* Enable paging for data */ 1704 ori r4, r3, MSR_DR /* Enable paging for data */
1700 mtmsrd r4 1705 mtmsrd r4
1701 lwz r8, 0(r10) 1706 lwz r8, 0(r10)
1702 mtmsrd r3 1707 mtmsrd r3
1703 1708
1704 /* Store the result */ 1709 /* Store the result */
1705 stw r8, VCPU_LAST_INST(r9) 1710 stw r8, VCPU_LAST_INST(r9)
1706 1711
1707 /* Unset guest mode. */ 1712 /* Unset guest mode. */
1708 li r0, KVM_GUEST_MODE_HOST_HV 1713 li r0, KVM_GUEST_MODE_HOST_HV
1709 stb r0, HSTATE_IN_GUEST(r13) 1714 stb r0, HSTATE_IN_GUEST(r13)
1710 b guest_exit_cont 1715 b guest_exit_cont
1711 1716
1712 /* 1717 /*
1713 * Similarly for an HISI, reflect it to the guest as an ISI unless 1718 * Similarly for an HISI, reflect it to the guest as an ISI unless
1714 * it is an HPTE not found fault for a page that we have paged out. 1719 * it is an HPTE not found fault for a page that we have paged out.
1715 */ 1720 */
1716 kvmppc_hisi: 1721 kvmppc_hisi:
1717 andis. r0, r11, SRR1_ISI_NOPT@h 1722 andis. r0, r11, SRR1_ISI_NOPT@h
1718 beq 1f 1723 beq 1f
1719 andi. r0, r11, MSR_IR /* instruction relocation enabled? */ 1724 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1720 beq 3f 1725 beq 3f
1721 clrrdi r0, r10, 28 1726 clrrdi r0, r10, 28
1722 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ 1727 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1723 bne 1f /* if no SLB entry found */ 1728 bne 1f /* if no SLB entry found */
1724 4: 1729 4:
1725 /* Search the hash table. */ 1730 /* Search the hash table. */
1726 mr r3, r9 /* vcpu pointer */ 1731 mr r3, r9 /* vcpu pointer */
1727 mr r4, r10 1732 mr r4, r10
1728 mr r6, r11 1733 mr r6, r11
1729 li r7, 0 /* instruction fault */ 1734 li r7, 0 /* instruction fault */
1730 bl .kvmppc_hpte_hv_fault 1735 bl .kvmppc_hpte_hv_fault
1731 ld r9, HSTATE_KVM_VCPU(r13) 1736 ld r9, HSTATE_KVM_VCPU(r13)
1732 ld r10, VCPU_PC(r9) 1737 ld r10, VCPU_PC(r9)
1733 ld r11, VCPU_MSR(r9) 1738 ld r11, VCPU_MSR(r9)
1734 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE 1739 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1735 cmpdi r3, 0 /* retry the instruction */ 1740 cmpdi r3, 0 /* retry the instruction */
1736 beq fast_interrupt_c_return 1741 beq fast_interrupt_c_return
1737 cmpdi r3, -1 /* handle in kernel mode */ 1742 cmpdi r3, -1 /* handle in kernel mode */
1738 beq guest_exit_cont 1743 beq guest_exit_cont
1739 1744
1740 /* Synthesize an ISI for the guest */ 1745 /* Synthesize an ISI for the guest */
1741 mr r11, r3 1746 mr r11, r3
1742 1: mtspr SPRN_SRR0, r10 1747 1: mtspr SPRN_SRR0, r10
1743 mtspr SPRN_SRR1, r11 1748 mtspr SPRN_SRR1, r11
1744 li r10, BOOK3S_INTERRUPT_INST_STORAGE 1749 li r10, BOOK3S_INTERRUPT_INST_STORAGE
1745 bl kvmppc_msr_interrupt 1750 bl kvmppc_msr_interrupt
1746 b fast_interrupt_c_return 1751 b fast_interrupt_c_return
1747 1752
1748 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ 1753 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1749 ld r5, KVM_VRMA_SLB_V(r6) 1754 ld r5, KVM_VRMA_SLB_V(r6)
1750 b 4b 1755 b 4b
1751 1756
1752 /* 1757 /*
1753 * Try to handle an hcall in real mode. 1758 * Try to handle an hcall in real mode.
1754 * Returns to the guest if we handle it, or continues on up to 1759 * Returns to the guest if we handle it, or continues on up to
1755 * the kernel if we can't (i.e. if we don't have a handler for 1760 * the kernel if we can't (i.e. if we don't have a handler for
1756 * it, or if the handler returns H_TOO_HARD). 1761 * it, or if the handler returns H_TOO_HARD).
1757 */ 1762 */
1758 .globl hcall_try_real_mode 1763 .globl hcall_try_real_mode
1759 hcall_try_real_mode: 1764 hcall_try_real_mode:
1760 ld r3,VCPU_GPR(R3)(r9) 1765 ld r3,VCPU_GPR(R3)(r9)
1761 andi. r0,r11,MSR_PR 1766 andi. r0,r11,MSR_PR
1762 /* sc 1 from userspace - reflect to guest syscall */ 1767 /* sc 1 from userspace - reflect to guest syscall */
1763 bne sc_1_fast_return 1768 bne sc_1_fast_return
1764 clrrdi r3,r3,2 1769 clrrdi r3,r3,2
1765 cmpldi r3,hcall_real_table_end - hcall_real_table 1770 cmpldi r3,hcall_real_table_end - hcall_real_table
1766 bge guest_exit_cont 1771 bge guest_exit_cont
1767 LOAD_REG_ADDR(r4, hcall_real_table) 1772 LOAD_REG_ADDR(r4, hcall_real_table)
1768 lwax r3,r3,r4 1773 lwax r3,r3,r4
1769 cmpwi r3,0 1774 cmpwi r3,0
1770 beq guest_exit_cont 1775 beq guest_exit_cont
1771 add r3,r3,r4 1776 add r3,r3,r4
1772 mtctr r3 1777 mtctr r3
1773 mr r3,r9 /* get vcpu pointer */ 1778 mr r3,r9 /* get vcpu pointer */
1774 ld r4,VCPU_GPR(R4)(r9) 1779 ld r4,VCPU_GPR(R4)(r9)
1775 bctrl 1780 bctrl
1776 cmpdi r3,H_TOO_HARD 1781 cmpdi r3,H_TOO_HARD
1777 beq hcall_real_fallback 1782 beq hcall_real_fallback
1778 ld r4,HSTATE_KVM_VCPU(r13) 1783 ld r4,HSTATE_KVM_VCPU(r13)
1779 std r3,VCPU_GPR(R3)(r4) 1784 std r3,VCPU_GPR(R3)(r4)
1780 ld r10,VCPU_PC(r4) 1785 ld r10,VCPU_PC(r4)
1781 ld r11,VCPU_MSR(r4) 1786 ld r11,VCPU_MSR(r4)
1782 b fast_guest_return 1787 b fast_guest_return
1783 1788
1784 sc_1_fast_return: 1789 sc_1_fast_return:
1785 mtspr SPRN_SRR0,r10 1790 mtspr SPRN_SRR0,r10
1786 mtspr SPRN_SRR1,r11 1791 mtspr SPRN_SRR1,r11
1787 li r10, BOOK3S_INTERRUPT_SYSCALL 1792 li r10, BOOK3S_INTERRUPT_SYSCALL
1788 bl kvmppc_msr_interrupt 1793 bl kvmppc_msr_interrupt
1789 mr r4,r9 1794 mr r4,r9
1790 b fast_guest_return 1795 b fast_guest_return
1791 1796
1792 /* We've attempted a real mode hcall, but it's punted it back 1797 /* We've attempted a real mode hcall, but it's punted it back
1793 * to userspace. We need to restore some clobbered volatiles 1798 * to userspace. We need to restore some clobbered volatiles
1794 * before resuming the pass-it-to-qemu path */ 1799 * before resuming the pass-it-to-qemu path */
1795 hcall_real_fallback: 1800 hcall_real_fallback:
1796 li r12,BOOK3S_INTERRUPT_SYSCALL 1801 li r12,BOOK3S_INTERRUPT_SYSCALL
1797 ld r9, HSTATE_KVM_VCPU(r13) 1802 ld r9, HSTATE_KVM_VCPU(r13)
1798 1803
1799 b guest_exit_cont 1804 b guest_exit_cont
1800 1805
1801 .globl hcall_real_table 1806 .globl hcall_real_table
1802 hcall_real_table: 1807 hcall_real_table:
1803 .long 0 /* 0 - unused */ 1808 .long 0 /* 0 - unused */
1804 .long .kvmppc_h_remove - hcall_real_table 1809 .long .kvmppc_h_remove - hcall_real_table
1805 .long .kvmppc_h_enter - hcall_real_table 1810 .long .kvmppc_h_enter - hcall_real_table
1806 .long .kvmppc_h_read - hcall_real_table 1811 .long .kvmppc_h_read - hcall_real_table
1807 .long 0 /* 0x10 - H_CLEAR_MOD */ 1812 .long 0 /* 0x10 - H_CLEAR_MOD */
1808 .long 0 /* 0x14 - H_CLEAR_REF */ 1813 .long 0 /* 0x14 - H_CLEAR_REF */
1809 .long .kvmppc_h_protect - hcall_real_table 1814 .long .kvmppc_h_protect - hcall_real_table
1810 .long .kvmppc_h_get_tce - hcall_real_table 1815 .long .kvmppc_h_get_tce - hcall_real_table
1811 .long .kvmppc_h_put_tce - hcall_real_table 1816 .long .kvmppc_h_put_tce - hcall_real_table
1812 .long 0 /* 0x24 - H_SET_SPRG0 */ 1817 .long 0 /* 0x24 - H_SET_SPRG0 */
1813 .long .kvmppc_h_set_dabr - hcall_real_table 1818 .long .kvmppc_h_set_dabr - hcall_real_table
1814 .long 0 /* 0x2c */ 1819 .long 0 /* 0x2c */
1815 .long 0 /* 0x30 */ 1820 .long 0 /* 0x30 */
1816 .long 0 /* 0x34 */ 1821 .long 0 /* 0x34 */
1817 .long 0 /* 0x38 */ 1822 .long 0 /* 0x38 */
1818 .long 0 /* 0x3c */ 1823 .long 0 /* 0x3c */
1819 .long 0 /* 0x40 */ 1824 .long 0 /* 0x40 */
1820 .long 0 /* 0x44 */ 1825 .long 0 /* 0x44 */
1821 .long 0 /* 0x48 */ 1826 .long 0 /* 0x48 */
1822 .long 0 /* 0x4c */ 1827 .long 0 /* 0x4c */
1823 .long 0 /* 0x50 */ 1828 .long 0 /* 0x50 */
1824 .long 0 /* 0x54 */ 1829 .long 0 /* 0x54 */
1825 .long 0 /* 0x58 */ 1830 .long 0 /* 0x58 */
1826 .long 0 /* 0x5c */ 1831 .long 0 /* 0x5c */
1827 .long 0 /* 0x60 */ 1832 .long 0 /* 0x60 */
1828 #ifdef CONFIG_KVM_XICS 1833 #ifdef CONFIG_KVM_XICS
1829 .long .kvmppc_rm_h_eoi - hcall_real_table 1834 .long .kvmppc_rm_h_eoi - hcall_real_table
1830 .long .kvmppc_rm_h_cppr - hcall_real_table 1835 .long .kvmppc_rm_h_cppr - hcall_real_table
1831 .long .kvmppc_rm_h_ipi - hcall_real_table 1836 .long .kvmppc_rm_h_ipi - hcall_real_table
1832 .long 0 /* 0x70 - H_IPOLL */ 1837 .long 0 /* 0x70 - H_IPOLL */
1833 .long .kvmppc_rm_h_xirr - hcall_real_table 1838 .long .kvmppc_rm_h_xirr - hcall_real_table
1834 #else 1839 #else
1835 .long 0 /* 0x64 - H_EOI */ 1840 .long 0 /* 0x64 - H_EOI */
1836 .long 0 /* 0x68 - H_CPPR */ 1841 .long 0 /* 0x68 - H_CPPR */
1837 .long 0 /* 0x6c - H_IPI */ 1842 .long 0 /* 0x6c - H_IPI */
1838 .long 0 /* 0x70 - H_IPOLL */ 1843 .long 0 /* 0x70 - H_IPOLL */
1839 .long 0 /* 0x74 - H_XIRR */ 1844 .long 0 /* 0x74 - H_XIRR */
1840 #endif 1845 #endif
1841 .long 0 /* 0x78 */ 1846 .long 0 /* 0x78 */
1842 .long 0 /* 0x7c */ 1847 .long 0 /* 0x7c */
1843 .long 0 /* 0x80 */ 1848 .long 0 /* 0x80 */
1844 .long 0 /* 0x84 */ 1849 .long 0 /* 0x84 */
1845 .long 0 /* 0x88 */ 1850 .long 0 /* 0x88 */
1846 .long 0 /* 0x8c */ 1851 .long 0 /* 0x8c */
1847 .long 0 /* 0x90 */ 1852 .long 0 /* 0x90 */
1848 .long 0 /* 0x94 */ 1853 .long 0 /* 0x94 */
1849 .long 0 /* 0x98 */ 1854 .long 0 /* 0x98 */
1850 .long 0 /* 0x9c */ 1855 .long 0 /* 0x9c */
1851 .long 0 /* 0xa0 */ 1856 .long 0 /* 0xa0 */
1852 .long 0 /* 0xa4 */ 1857 .long 0 /* 0xa4 */
1853 .long 0 /* 0xa8 */ 1858 .long 0 /* 0xa8 */
1854 .long 0 /* 0xac */ 1859 .long 0 /* 0xac */
1855 .long 0 /* 0xb0 */ 1860 .long 0 /* 0xb0 */
1856 .long 0 /* 0xb4 */ 1861 .long 0 /* 0xb4 */
1857 .long 0 /* 0xb8 */ 1862 .long 0 /* 0xb8 */
1858 .long 0 /* 0xbc */ 1863 .long 0 /* 0xbc */
1859 .long 0 /* 0xc0 */ 1864 .long 0 /* 0xc0 */
1860 .long 0 /* 0xc4 */ 1865 .long 0 /* 0xc4 */
1861 .long 0 /* 0xc8 */ 1866 .long 0 /* 0xc8 */
1862 .long 0 /* 0xcc */ 1867 .long 0 /* 0xcc */
1863 .long 0 /* 0xd0 */ 1868 .long 0 /* 0xd0 */
1864 .long 0 /* 0xd4 */ 1869 .long 0 /* 0xd4 */
1865 .long 0 /* 0xd8 */ 1870 .long 0 /* 0xd8 */
1866 .long 0 /* 0xdc */ 1871 .long 0 /* 0xdc */
1867 .long .kvmppc_h_cede - hcall_real_table 1872 .long .kvmppc_h_cede - hcall_real_table
1868 .long 0 /* 0xe4 */ 1873 .long 0 /* 0xe4 */
1869 .long 0 /* 0xe8 */ 1874 .long 0 /* 0xe8 */
1870 .long 0 /* 0xec */ 1875 .long 0 /* 0xec */
1871 .long 0 /* 0xf0 */ 1876 .long 0 /* 0xf0 */
1872 .long 0 /* 0xf4 */ 1877 .long 0 /* 0xf4 */
1873 .long 0 /* 0xf8 */ 1878 .long 0 /* 0xf8 */
1874 .long 0 /* 0xfc */ 1879 .long 0 /* 0xfc */
1875 .long 0 /* 0x100 */ 1880 .long 0 /* 0x100 */
1876 .long 0 /* 0x104 */ 1881 .long 0 /* 0x104 */
1877 .long 0 /* 0x108 */ 1882 .long 0 /* 0x108 */
1878 .long 0 /* 0x10c */ 1883 .long 0 /* 0x10c */
1879 .long 0 /* 0x110 */ 1884 .long 0 /* 0x110 */
1880 .long 0 /* 0x114 */ 1885 .long 0 /* 0x114 */
1881 .long 0 /* 0x118 */ 1886 .long 0 /* 0x118 */
1882 .long 0 /* 0x11c */ 1887 .long 0 /* 0x11c */
1883 .long 0 /* 0x120 */ 1888 .long 0 /* 0x120 */
1884 .long .kvmppc_h_bulk_remove - hcall_real_table 1889 .long .kvmppc_h_bulk_remove - hcall_real_table
1885 .long 0 /* 0x128 */ 1890 .long 0 /* 0x128 */
1886 .long 0 /* 0x12c */ 1891 .long 0 /* 0x12c */
1887 .long 0 /* 0x130 */ 1892 .long 0 /* 0x130 */
1888 .long .kvmppc_h_set_xdabr - hcall_real_table 1893 .long .kvmppc_h_set_xdabr - hcall_real_table
1889 hcall_real_table_end: 1894 hcall_real_table_end:
1890 1895
1891 ignore_hdec: 1896 ignore_hdec:
1892 mr r4,r9 1897 mr r4,r9
1893 b fast_guest_return 1898 b fast_guest_return
1894 1899
1895 _GLOBAL(kvmppc_h_set_xdabr) 1900 _GLOBAL(kvmppc_h_set_xdabr)
1896 andi. r0, r5, DABRX_USER | DABRX_KERNEL 1901 andi. r0, r5, DABRX_USER | DABRX_KERNEL
1897 beq 6f 1902 beq 6f
1898 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI 1903 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
1899 andc. r0, r5, r0 1904 andc. r0, r5, r0
1900 beq 3f 1905 beq 3f
1901 6: li r3, H_PARAMETER 1906 6: li r3, H_PARAMETER
1902 blr 1907 blr
1903 1908
1904 _GLOBAL(kvmppc_h_set_dabr) 1909 _GLOBAL(kvmppc_h_set_dabr)
1905 li r5, DABRX_USER | DABRX_KERNEL 1910 li r5, DABRX_USER | DABRX_KERNEL
1906 3: 1911 3:
1907 BEGIN_FTR_SECTION 1912 BEGIN_FTR_SECTION
1908 b 2f 1913 b 2f
1909 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1914 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1910 std r4,VCPU_DABR(r3) 1915 std r4,VCPU_DABR(r3)
1911 stw r5, VCPU_DABRX(r3) 1916 stw r5, VCPU_DABRX(r3)
1912 mtspr SPRN_DABRX, r5 1917 mtspr SPRN_DABRX, r5
1913 /* Work around P7 bug where DABR can get corrupted on mtspr */ 1918 /* Work around P7 bug where DABR can get corrupted on mtspr */
1914 1: mtspr SPRN_DABR,r4 1919 1: mtspr SPRN_DABR,r4
1915 mfspr r5, SPRN_DABR 1920 mfspr r5, SPRN_DABR
1916 cmpd r4, r5 1921 cmpd r4, r5
1917 bne 1b 1922 bne 1b
1918 isync 1923 isync
1919 li r3,0 1924 li r3,0
1920 blr 1925 blr
1921 1926
1922 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ 1927 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
1923 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW 1928 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
1924 rlwimi r5, r4, 1, DAWRX_WT 1929 rlwimi r5, r4, 1, DAWRX_WT
1925 clrrdi r4, r4, 3 1930 clrrdi r4, r4, 3
1926 std r4, VCPU_DAWR(r3) 1931 std r4, VCPU_DAWR(r3)
1927 std r5, VCPU_DAWRX(r3) 1932 std r5, VCPU_DAWRX(r3)
1928 mtspr SPRN_DAWR, r4 1933 mtspr SPRN_DAWR, r4
1929 mtspr SPRN_DAWRX, r5 1934 mtspr SPRN_DAWRX, r5
1930 li r3, 0 1935 li r3, 0
1931 blr 1936 blr
1932 1937
1933 _GLOBAL(kvmppc_h_cede) 1938 _GLOBAL(kvmppc_h_cede)
1934 ori r11,r11,MSR_EE 1939 ori r11,r11,MSR_EE
1935 std r11,VCPU_MSR(r3) 1940 std r11,VCPU_MSR(r3)
1936 li r0,1 1941 li r0,1
1937 stb r0,VCPU_CEDED(r3) 1942 stb r0,VCPU_CEDED(r3)
1938 sync /* order setting ceded vs. testing prodded */ 1943 sync /* order setting ceded vs. testing prodded */
1939 lbz r5,VCPU_PRODDED(r3) 1944 lbz r5,VCPU_PRODDED(r3)
1940 cmpwi r5,0 1945 cmpwi r5,0
1941 bne kvm_cede_prodded 1946 bne kvm_cede_prodded
1942 li r0,0 /* set trap to 0 to say hcall is handled */ 1947 li r0,0 /* set trap to 0 to say hcall is handled */
1943 stw r0,VCPU_TRAP(r3) 1948 stw r0,VCPU_TRAP(r3)
1944 li r0,H_SUCCESS 1949 li r0,H_SUCCESS
1945 std r0,VCPU_GPR(R3)(r3) 1950 std r0,VCPU_GPR(R3)(r3)
1946 BEGIN_FTR_SECTION 1951 BEGIN_FTR_SECTION
1947 b kvm_cede_exit /* just send it up to host on 970 */ 1952 b kvm_cede_exit /* just send it up to host on 970 */
1948 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) 1953 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1949 1954
1950 /* 1955 /*
1951 * Set our bit in the bitmask of napping threads unless all the 1956 * Set our bit in the bitmask of napping threads unless all the
1952 * other threads are already napping, in which case we send this 1957 * other threads are already napping, in which case we send this
1953 * up to the host. 1958 * up to the host.
1954 */ 1959 */
1955 ld r5,HSTATE_KVM_VCORE(r13) 1960 ld r5,HSTATE_KVM_VCORE(r13)
1956 lbz r6,HSTATE_PTID(r13) 1961 lbz r6,HSTATE_PTID(r13)
1957 lwz r8,VCORE_ENTRY_EXIT(r5) 1962 lwz r8,VCORE_ENTRY_EXIT(r5)
1958 clrldi r8,r8,56 1963 clrldi r8,r8,56
1959 li r0,1 1964 li r0,1
1960 sld r0,r0,r6 1965 sld r0,r0,r6
1961 addi r6,r5,VCORE_NAPPING_THREADS 1966 addi r6,r5,VCORE_NAPPING_THREADS
1962 31: lwarx r4,0,r6 1967 31: lwarx r4,0,r6
1963 or r4,r4,r0 1968 or r4,r4,r0
1964 PPC_POPCNTW(R7,R4) 1969 PPC_POPCNTW(R7,R4)
1965 cmpw r7,r8 1970 cmpw r7,r8
1966 bge kvm_cede_exit 1971 bge kvm_cede_exit
1967 stwcx. r4,0,r6 1972 stwcx. r4,0,r6
1968 bne 31b 1973 bne 31b
1969 /* order napping_threads update vs testing entry_exit_count */ 1974 /* order napping_threads update vs testing entry_exit_count */
1970 isync 1975 isync
1971 li r0,NAPPING_CEDE 1976 li r0,NAPPING_CEDE
1972 stb r0,HSTATE_NAPPING(r13) 1977 stb r0,HSTATE_NAPPING(r13)
1973 lwz r7,VCORE_ENTRY_EXIT(r5) 1978 lwz r7,VCORE_ENTRY_EXIT(r5)
1974 cmpwi r7,0x100 1979 cmpwi r7,0x100
1975 bge 33f /* another thread already exiting */ 1980 bge 33f /* another thread already exiting */
1976 1981
1977 /* 1982 /*
1978 * Although not specifically required by the architecture, POWER7 1983 * Although not specifically required by the architecture, POWER7
1979 * preserves the following registers in nap mode, even if an SMT mode 1984 * preserves the following registers in nap mode, even if an SMT mode
1980 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3, 1985 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
1981 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. 1986 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
1982 */ 1987 */
1983 /* Save non-volatile GPRs */ 1988 /* Save non-volatile GPRs */
1984 std r14, VCPU_GPR(R14)(r3) 1989 std r14, VCPU_GPR(R14)(r3)
1985 std r15, VCPU_GPR(R15)(r3) 1990 std r15, VCPU_GPR(R15)(r3)
1986 std r16, VCPU_GPR(R16)(r3) 1991 std r16, VCPU_GPR(R16)(r3)
1987 std r17, VCPU_GPR(R17)(r3) 1992 std r17, VCPU_GPR(R17)(r3)
1988 std r18, VCPU_GPR(R18)(r3) 1993 std r18, VCPU_GPR(R18)(r3)
1989 std r19, VCPU_GPR(R19)(r3) 1994 std r19, VCPU_GPR(R19)(r3)
1990 std r20, VCPU_GPR(R20)(r3) 1995 std r20, VCPU_GPR(R20)(r3)
1991 std r21, VCPU_GPR(R21)(r3) 1996 std r21, VCPU_GPR(R21)(r3)
1992 std r22, VCPU_GPR(R22)(r3) 1997 std r22, VCPU_GPR(R22)(r3)
1993 std r23, VCPU_GPR(R23)(r3) 1998 std r23, VCPU_GPR(R23)(r3)
1994 std r24, VCPU_GPR(R24)(r3) 1999 std r24, VCPU_GPR(R24)(r3)
1995 std r25, VCPU_GPR(R25)(r3) 2000 std r25, VCPU_GPR(R25)(r3)
1996 std r26, VCPU_GPR(R26)(r3) 2001 std r26, VCPU_GPR(R26)(r3)
1997 std r27, VCPU_GPR(R27)(r3) 2002 std r27, VCPU_GPR(R27)(r3)
1998 std r28, VCPU_GPR(R28)(r3) 2003 std r28, VCPU_GPR(R28)(r3)
1999 std r29, VCPU_GPR(R29)(r3) 2004 std r29, VCPU_GPR(R29)(r3)
2000 std r30, VCPU_GPR(R30)(r3) 2005 std r30, VCPU_GPR(R30)(r3)
2001 std r31, VCPU_GPR(R31)(r3) 2006 std r31, VCPU_GPR(R31)(r3)
2002 2007
2003 /* save FP state */ 2008 /* save FP state */
2004 bl kvmppc_save_fp 2009 bl kvmppc_save_fp
2005 2010
2006 /* 2011 /*
2007 * Take a nap until a decrementer or external or doobell interrupt 2012 * Take a nap until a decrementer or external or doobell interrupt
2008 * occurs, with PECE1, PECE0 and PECEDP set in LPCR 2013 * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the
2014 * runlatch bit before napping.
2009 */ 2015 */
2016 mfspr r2, SPRN_CTRLF
2017 clrrdi r2, r2, 1
2018 mtspr SPRN_CTRLT, r2
2019
2010 li r0,1 2020 li r0,1
2011 stb r0,HSTATE_HWTHREAD_REQ(r13) 2021 stb r0,HSTATE_HWTHREAD_REQ(r13)
2012 mfspr r5,SPRN_LPCR 2022 mfspr r5,SPRN_LPCR
2013 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 2023 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2014 BEGIN_FTR_SECTION 2024 BEGIN_FTR_SECTION
2015 oris r5,r5,LPCR_PECEDP@h 2025 oris r5,r5,LPCR_PECEDP@h
2016 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2026 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2017 mtspr SPRN_LPCR,r5 2027 mtspr SPRN_LPCR,r5
2018 isync 2028 isync
2019 li r0, 0 2029 li r0, 0
2020 std r0, HSTATE_SCRATCH0(r13) 2030 std r0, HSTATE_SCRATCH0(r13)
2021 ptesync 2031 ptesync
2022 ld r0, HSTATE_SCRATCH0(r13) 2032 ld r0, HSTATE_SCRATCH0(r13)
2023 1: cmpd r0, r0 2033 1: cmpd r0, r0
2024 bne 1b 2034 bne 1b
2025 nap 2035 nap
2026 b . 2036 b .
2027 2037
2028 33: mr r4, r3 2038 33: mr r4, r3
2029 li r3, 0 2039 li r3, 0
2030 li r12, 0 2040 li r12, 0
2031 b 34f 2041 b 34f
2032 2042
2033 kvm_end_cede: 2043 kvm_end_cede:
2034 /* get vcpu pointer */ 2044 /* get vcpu pointer */
2035 ld r4, HSTATE_KVM_VCPU(r13) 2045 ld r4, HSTATE_KVM_VCPU(r13)
2036 2046
2037 /* Woken by external or decrementer interrupt */ 2047 /* Woken by external or decrementer interrupt */
2038 ld r1, HSTATE_HOST_R1(r13) 2048 ld r1, HSTATE_HOST_R1(r13)
2039 2049
2040 /* load up FP state */ 2050 /* load up FP state */
2041 bl kvmppc_load_fp 2051 bl kvmppc_load_fp
2042 2052
2043 /* Load NV GPRS */ 2053 /* Load NV GPRS */
2044 ld r14, VCPU_GPR(R14)(r4) 2054 ld r14, VCPU_GPR(R14)(r4)
2045 ld r15, VCPU_GPR(R15)(r4) 2055 ld r15, VCPU_GPR(R15)(r4)
2046 ld r16, VCPU_GPR(R16)(r4) 2056 ld r16, VCPU_GPR(R16)(r4)
2047 ld r17, VCPU_GPR(R17)(r4) 2057 ld r17, VCPU_GPR(R17)(r4)
2048 ld r18, VCPU_GPR(R18)(r4) 2058 ld r18, VCPU_GPR(R18)(r4)
2049 ld r19, VCPU_GPR(R19)(r4) 2059 ld r19, VCPU_GPR(R19)(r4)
2050 ld r20, VCPU_GPR(R20)(r4) 2060 ld r20, VCPU_GPR(R20)(r4)
2051 ld r21, VCPU_GPR(R21)(r4) 2061 ld r21, VCPU_GPR(R21)(r4)
2052 ld r22, VCPU_GPR(R22)(r4) 2062 ld r22, VCPU_GPR(R22)(r4)
2053 ld r23, VCPU_GPR(R23)(r4) 2063 ld r23, VCPU_GPR(R23)(r4)
2054 ld r24, VCPU_GPR(R24)(r4) 2064 ld r24, VCPU_GPR(R24)(r4)
2055 ld r25, VCPU_GPR(R25)(r4) 2065 ld r25, VCPU_GPR(R25)(r4)
2056 ld r26, VCPU_GPR(R26)(r4) 2066 ld r26, VCPU_GPR(R26)(r4)
2057 ld r27, VCPU_GPR(R27)(r4) 2067 ld r27, VCPU_GPR(R27)(r4)
2058 ld r28, VCPU_GPR(R28)(r4) 2068 ld r28, VCPU_GPR(R28)(r4)
2059 ld r29, VCPU_GPR(R29)(r4) 2069 ld r29, VCPU_GPR(R29)(r4)
2060 ld r30, VCPU_GPR(R30)(r4) 2070 ld r30, VCPU_GPR(R30)(r4)
2061 ld r31, VCPU_GPR(R31)(r4) 2071 ld r31, VCPU_GPR(R31)(r4)
2062 2072
2063 /* Check the wake reason in SRR1 to see why we got here */ 2073 /* Check the wake reason in SRR1 to see why we got here */
2064 bl kvmppc_check_wake_reason 2074 bl kvmppc_check_wake_reason
2065 2075
2066 /* clear our bit in vcore->napping_threads */ 2076 /* clear our bit in vcore->napping_threads */
2067 34: ld r5,HSTATE_KVM_VCORE(r13) 2077 34: ld r5,HSTATE_KVM_VCORE(r13)
2068 lbz r7,HSTATE_PTID(r13) 2078 lbz r7,HSTATE_PTID(r13)
2069 li r0,1 2079 li r0,1
2070 sld r0,r0,r7 2080 sld r0,r0,r7
2071 addi r6,r5,VCORE_NAPPING_THREADS 2081 addi r6,r5,VCORE_NAPPING_THREADS
2072 32: lwarx r7,0,r6 2082 32: lwarx r7,0,r6
2073 andc r7,r7,r0 2083 andc r7,r7,r0
2074 stwcx. r7,0,r6 2084 stwcx. r7,0,r6
2075 bne 32b 2085 bne 32b
2076 li r0,0 2086 li r0,0
2077 stb r0,HSTATE_NAPPING(r13) 2087 stb r0,HSTATE_NAPPING(r13)
2078 2088
2079 /* See if the wake reason means we need to exit */ 2089 /* See if the wake reason means we need to exit */
2080 stw r12, VCPU_TRAP(r4) 2090 stw r12, VCPU_TRAP(r4)
2081 mr r9, r4 2091 mr r9, r4
2082 cmpdi r3, 0 2092 cmpdi r3, 0
2083 bgt guest_exit_cont 2093 bgt guest_exit_cont
2084 2094
2085 /* see if any other thread is already exiting */ 2095 /* see if any other thread is already exiting */
2086 lwz r0,VCORE_ENTRY_EXIT(r5) 2096 lwz r0,VCORE_ENTRY_EXIT(r5)
2087 cmpwi r0,0x100 2097 cmpwi r0,0x100
2088 bge guest_exit_cont 2098 bge guest_exit_cont
2089 2099
2090 b kvmppc_cede_reentry /* if not go back to guest */ 2100 b kvmppc_cede_reentry /* if not go back to guest */
2091 2101
2092 /* cede when already previously prodded case */ 2102 /* cede when already previously prodded case */
2093 kvm_cede_prodded: 2103 kvm_cede_prodded:
2094 li r0,0 2104 li r0,0
2095 stb r0,VCPU_PRODDED(r3) 2105 stb r0,VCPU_PRODDED(r3)
2096 sync /* order testing prodded vs. clearing ceded */ 2106 sync /* order testing prodded vs. clearing ceded */
2097 stb r0,VCPU_CEDED(r3) 2107 stb r0,VCPU_CEDED(r3)
2098 li r3,H_SUCCESS 2108 li r3,H_SUCCESS
2099 blr 2109 blr
2100 2110
2101 /* we've ceded but we want to give control to the host */ 2111 /* we've ceded but we want to give control to the host */
2102 kvm_cede_exit: 2112 kvm_cede_exit:
2103 b hcall_real_fallback 2113 b hcall_real_fallback
2104 2114
2105 /* Try to handle a machine check in real mode */ 2115 /* Try to handle a machine check in real mode */
2106 machine_check_realmode: 2116 machine_check_realmode:
2107 mr r3, r9 /* get vcpu pointer */ 2117 mr r3, r9 /* get vcpu pointer */
2108 bl .kvmppc_realmode_machine_check 2118 bl .kvmppc_realmode_machine_check
2109 nop 2119 nop
2110 cmpdi r3, 0 /* continue exiting from guest? */ 2120 cmpdi r3, 0 /* continue exiting from guest? */
2111 ld r9, HSTATE_KVM_VCPU(r13) 2121 ld r9, HSTATE_KVM_VCPU(r13)
2112 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK 2122 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2113 beq mc_cont 2123 beq mc_cont
2114 /* If not, deliver a machine check. SRR0/1 are already set */ 2124 /* If not, deliver a machine check. SRR0/1 are already set */
2115 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK 2125 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2116 bl kvmppc_msr_interrupt 2126 bl kvmppc_msr_interrupt
2117 b fast_interrupt_c_return 2127 b fast_interrupt_c_return
2118 2128
2119 /* 2129 /*
2120 * Check the reason we woke from nap, and take appropriate action. 2130 * Check the reason we woke from nap, and take appropriate action.
2121 * Returns: 2131 * Returns:
2122 * 0 if nothing needs to be done 2132 * 0 if nothing needs to be done
2123 * 1 if something happened that needs to be handled by the host 2133 * 1 if something happened that needs to be handled by the host
2124 * -1 if there was a guest wakeup (IPI) 2134 * -1 if there was a guest wakeup (IPI)
2125 * 2135 *
2126 * Also sets r12 to the interrupt vector for any interrupt that needs 2136 * Also sets r12 to the interrupt vector for any interrupt that needs
2127 * to be handled now by the host (0x500 for external interrupt), or zero. 2137 * to be handled now by the host (0x500 for external interrupt), or zero.
2128 */ 2138 */
2129 kvmppc_check_wake_reason: 2139 kvmppc_check_wake_reason:
2130 mfspr r6, SPRN_SRR1 2140 mfspr r6, SPRN_SRR1
2131 BEGIN_FTR_SECTION 2141 BEGIN_FTR_SECTION
2132 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ 2142 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2133 FTR_SECTION_ELSE 2143 FTR_SECTION_ELSE
2134 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */ 2144 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2135 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) 2145 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2136 cmpwi r6, 8 /* was it an external interrupt? */ 2146 cmpwi r6, 8 /* was it an external interrupt? */
2137 li r12, BOOK3S_INTERRUPT_EXTERNAL 2147 li r12, BOOK3S_INTERRUPT_EXTERNAL
2138 beq kvmppc_read_intr /* if so, see what it was */ 2148 beq kvmppc_read_intr /* if so, see what it was */
2139 li r3, 0 2149 li r3, 0
2140 li r12, 0 2150 li r12, 0
2141 cmpwi r6, 6 /* was it the decrementer? */ 2151 cmpwi r6, 6 /* was it the decrementer? */
2142 beq 0f 2152 beq 0f
2143 BEGIN_FTR_SECTION 2153 BEGIN_FTR_SECTION
2144 cmpwi r6, 5 /* privileged doorbell? */ 2154 cmpwi r6, 5 /* privileged doorbell? */
2145 beq 0f 2155 beq 0f
2146 cmpwi r6, 3 /* hypervisor doorbell? */ 2156 cmpwi r6, 3 /* hypervisor doorbell? */
2147 beq 3f 2157 beq 3f
2148 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 2158 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2149 li r3, 1 /* anything else, return 1 */ 2159 li r3, 1 /* anything else, return 1 */
2150 0: blr 2160 0: blr
2151 2161
2152 /* hypervisor doorbell */ 2162 /* hypervisor doorbell */
2153 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL 2163 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2154 li r3, 1 2164 li r3, 1
2155 blr 2165 blr
2156 2166
2157 /* 2167 /*
2158 * Determine what sort of external interrupt is pending (if any). 2168 * Determine what sort of external interrupt is pending (if any).
2159 * Returns: 2169 * Returns:
2160 * 0 if no interrupt is pending 2170 * 0 if no interrupt is pending
2161 * 1 if an interrupt is pending that needs to be handled by the host 2171 * 1 if an interrupt is pending that needs to be handled by the host
2162 * -1 if there was a guest wakeup IPI (which has now been cleared) 2172 * -1 if there was a guest wakeup IPI (which has now been cleared)
2163 */ 2173 */
2164 kvmppc_read_intr: 2174 kvmppc_read_intr:
2165 /* see if a host IPI is pending */ 2175 /* see if a host IPI is pending */
2166 li r3, 1 2176 li r3, 1
2167 lbz r0, HSTATE_HOST_IPI(r13) 2177 lbz r0, HSTATE_HOST_IPI(r13)
2168 cmpwi r0, 0 2178 cmpwi r0, 0
2169 bne 1f 2179 bne 1f
2170 2180
2171 /* Now read the interrupt from the ICP */ 2181 /* Now read the interrupt from the ICP */
2172 ld r6, HSTATE_XICS_PHYS(r13) 2182 ld r6, HSTATE_XICS_PHYS(r13)
2173 li r7, XICS_XIRR 2183 li r7, XICS_XIRR
2174 cmpdi r6, 0 2184 cmpdi r6, 0
2175 beq- 1f 2185 beq- 1f
2176 lwzcix r0, r6, r7 2186 lwzcix r0, r6, r7
2177 rlwinm. r3, r0, 0, 0xffffff 2187 rlwinm. r3, r0, 0, 0xffffff
2178 sync 2188 sync
2179 beq 1f /* if nothing pending in the ICP */ 2189 beq 1f /* if nothing pending in the ICP */
2180 2190
2181 /* We found something in the ICP... 2191 /* We found something in the ICP...
2182 * 2192 *
2183 * If it's not an IPI, stash it in the PACA and return to 2193 * If it's not an IPI, stash it in the PACA and return to
2184 * the host, we don't (yet) handle directing real external 2194 * the host, we don't (yet) handle directing real external
2185 * interrupts directly to the guest 2195 * interrupts directly to the guest
2186 */ 2196 */
2187 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */ 2197 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
2188 bne 42f 2198 bne 42f
2189 2199
2190 /* It's an IPI, clear the MFRR and EOI it */ 2200 /* It's an IPI, clear the MFRR and EOI it */
2191 li r3, 0xff 2201 li r3, 0xff
2192 li r8, XICS_MFRR 2202 li r8, XICS_MFRR
2193 stbcix r3, r6, r8 /* clear the IPI */ 2203 stbcix r3, r6, r8 /* clear the IPI */
2194 stwcix r0, r6, r7 /* EOI it */ 2204 stwcix r0, r6, r7 /* EOI it */
2195 sync 2205 sync
2196 2206
2197 /* We need to re-check host IPI now in case it got set in the 2207 /* We need to re-check host IPI now in case it got set in the
2198 * meantime. If it's clear, we bounce the interrupt to the 2208 * meantime. If it's clear, we bounce the interrupt to the
2199 * guest 2209 * guest
2200 */ 2210 */
2201 lbz r0, HSTATE_HOST_IPI(r13) 2211 lbz r0, HSTATE_HOST_IPI(r13)
2202 cmpwi r0, 0 2212 cmpwi r0, 0
2203 bne- 43f 2213 bne- 43f
2204 2214
2205 /* OK, it's an IPI for us */ 2215 /* OK, it's an IPI for us */
2206 li r3, -1 2216 li r3, -1
2207 1: blr 2217 1: blr
2208 2218
2209 42: /* It's not an IPI and it's for the host, stash it in the PACA 2219 42: /* It's not an IPI and it's for the host, stash it in the PACA
2210 * before exit, it will be picked up by the host ICP driver 2220 * before exit, it will be picked up by the host ICP driver
2211 */ 2221 */
2212 stw r0, HSTATE_SAVED_XIRR(r13) 2222 stw r0, HSTATE_SAVED_XIRR(r13)
2213 li r3, 1 2223 li r3, 1
2214 b 1b 2224 b 1b
2215 2225
2216 43: /* We raced with the host, we need to resend that IPI, bummer */ 2226 43: /* We raced with the host, we need to resend that IPI, bummer */
2217 li r0, IPI_PRIORITY 2227 li r0, IPI_PRIORITY
2218 stbcix r0, r6, r8 /* set the IPI */ 2228 stbcix r0, r6, r8 /* set the IPI */
2219 sync 2229 sync
2220 li r3, 1 2230 li r3, 1
2221 b 1b 2231 b 1b
2222 2232
2223 /* 2233 /*
2224 * Save away FP, VMX and VSX registers. 2234 * Save away FP, VMX and VSX registers.
2225 * r3 = vcpu pointer 2235 * r3 = vcpu pointer
2226 * N.B. r30 and r31 are volatile across this function, 2236 * N.B. r30 and r31 are volatile across this function,
2227 * thus it is not callable from C. 2237 * thus it is not callable from C.
2228 */ 2238 */
2229 kvmppc_save_fp: 2239 kvmppc_save_fp:
2230 mflr r30 2240 mflr r30
2231 mr r31,r3 2241 mr r31,r3
2232 mfmsr r5 2242 mfmsr r5
2233 ori r8,r5,MSR_FP 2243 ori r8,r5,MSR_FP
2234 #ifdef CONFIG_ALTIVEC 2244 #ifdef CONFIG_ALTIVEC
2235 BEGIN_FTR_SECTION 2245 BEGIN_FTR_SECTION
2236 oris r8,r8,MSR_VEC@h 2246 oris r8,r8,MSR_VEC@h
2237 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2247 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2238 #endif 2248 #endif
2239 #ifdef CONFIG_VSX 2249 #ifdef CONFIG_VSX
2240 BEGIN_FTR_SECTION 2250 BEGIN_FTR_SECTION
2241 oris r8,r8,MSR_VSX@h 2251 oris r8,r8,MSR_VSX@h
2242 END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2252 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2243 #endif 2253 #endif
2244 mtmsrd r8 2254 mtmsrd r8
2245 isync 2255 isync
2246 addi r3,r3,VCPU_FPRS 2256 addi r3,r3,VCPU_FPRS
2247 bl .store_fp_state 2257 bl .store_fp_state
2248 #ifdef CONFIG_ALTIVEC 2258 #ifdef CONFIG_ALTIVEC
2249 BEGIN_FTR_SECTION 2259 BEGIN_FTR_SECTION
2250 addi r3,r31,VCPU_VRS 2260 addi r3,r31,VCPU_VRS
2251 bl .store_vr_state 2261 bl .store_vr_state
2252 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2262 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2253 #endif 2263 #endif
2254 mfspr r6,SPRN_VRSAVE 2264 mfspr r6,SPRN_VRSAVE
2255 stw r6,VCPU_VRSAVE(r31) 2265 stw r6,VCPU_VRSAVE(r31)
2256 mtlr r30 2266 mtlr r30
2257 blr 2267 blr
2258 2268
2259 /* 2269 /*
2260 * Load up FP, VMX and VSX registers 2270 * Load up FP, VMX and VSX registers
2261 * r4 = vcpu pointer 2271 * r4 = vcpu pointer
2262 * N.B. r30 and r31 are volatile across this function, 2272 * N.B. r30 and r31 are volatile across this function,
2263 * thus it is not callable from C. 2273 * thus it is not callable from C.
2264 */ 2274 */
2265 kvmppc_load_fp: 2275 kvmppc_load_fp:
2266 mflr r30 2276 mflr r30
2267 mr r31,r4 2277 mr r31,r4
2268 mfmsr r9 2278 mfmsr r9
2269 ori r8,r9,MSR_FP 2279 ori r8,r9,MSR_FP
2270 #ifdef CONFIG_ALTIVEC 2280 #ifdef CONFIG_ALTIVEC
2271 BEGIN_FTR_SECTION 2281 BEGIN_FTR_SECTION
2272 oris r8,r8,MSR_VEC@h 2282 oris r8,r8,MSR_VEC@h
2273 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2283 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2274 #endif 2284 #endif
2275 #ifdef CONFIG_VSX 2285 #ifdef CONFIG_VSX
2276 BEGIN_FTR_SECTION 2286 BEGIN_FTR_SECTION
2277 oris r8,r8,MSR_VSX@h 2287 oris r8,r8,MSR_VSX@h
2278 END_FTR_SECTION_IFSET(CPU_FTR_VSX) 2288 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2279 #endif 2289 #endif
2280 mtmsrd r8 2290 mtmsrd r8
2281 isync 2291 isync
2282 addi r3,r4,VCPU_FPRS 2292 addi r3,r4,VCPU_FPRS
2283 bl .load_fp_state 2293 bl .load_fp_state
2284 #ifdef CONFIG_ALTIVEC 2294 #ifdef CONFIG_ALTIVEC
2285 BEGIN_FTR_SECTION 2295 BEGIN_FTR_SECTION
2286 addi r3,r31,VCPU_VRS 2296 addi r3,r31,VCPU_VRS
2287 bl .load_vr_state 2297 bl .load_vr_state
2288 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 2298 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2289 #endif 2299 #endif
2290 lwz r7,VCPU_VRSAVE(r31) 2300 lwz r7,VCPU_VRSAVE(r31)
2291 mtspr SPRN_VRSAVE,r7 2301 mtspr SPRN_VRSAVE,r7
2292 mtlr r30 2302 mtlr r30
2293 mr r4,r31 2303 mr r4,r31
2294 blr 2304 blr
2295 2305
2296 /* 2306 /*
2297 * We come here if we get any exception or interrupt while we are 2307 * We come here if we get any exception or interrupt while we are
2298 * executing host real mode code while in guest MMU context. 2308 * executing host real mode code while in guest MMU context.
2299 * For now just spin, but we should do something better. 2309 * For now just spin, but we should do something better.
2300 */ 2310 */
2301 kvmppc_bad_host_intr: 2311 kvmppc_bad_host_intr:
2302 b . 2312 b .
2303 2313
2304 /* 2314 /*
2305 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken 2315 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
2306 * from VCPU_INTR_MSR and is modified based on the required TM state changes. 2316 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2307 * r11 has the guest MSR value (in/out) 2317 * r11 has the guest MSR value (in/out)
2308 * r9 has a vcpu pointer (in) 2318 * r9 has a vcpu pointer (in)
2309 * r0 is used as a scratch register 2319 * r0 is used as a scratch register
2310 */ 2320 */
2311 kvmppc_msr_interrupt: 2321 kvmppc_msr_interrupt:
2312 rldicl r0, r11, 64 - MSR_TS_S_LG, 62 2322 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
2313 cmpwi r0, 2 /* Check if we are in transactional state.. */ 2323 cmpwi r0, 2 /* Check if we are in transactional state.. */
2314 ld r11, VCPU_INTR_MSR(r9) 2324 ld r11, VCPU_INTR_MSR(r9)
2315 bne 1f 2325 bne 1f
2316 /* ... if transactional, change to suspended */ 2326 /* ... if transactional, change to suspended */
2317 li r0, 1 2327 li r0, 1
2318 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG 2328 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2319 blr 2329 blr
2320 2330