Commit 623347ab1c9885c235f90a5b9b75f8d4494b76c1

Authored by James Hogan
Committed by Greg Kroah-Hartman
1 parent 645177f349

KVM: MIPS: Don't leak FPU/DSP to guest

commit f798217dfd038af981a18bbe4bc57027a08bb182 upstream.

The FPU and DSP are enabled via the CP0 Status CU1 and MX bits by
kvm_mips_set_c0_status() on a guest exit, presumably in case there is
active state that needs saving if pre-emption occurs. However neither of
these bits are cleared again when returning to the guest.

This effectively gives the guest access to the FPU/DSP hardware after
the first guest exit even though it is not aware of its presence,
allowing FP instructions in guest user code to intermittently actually
execute instead of trapping into the guest OS for emulation. It will
then read & manipulate the hardware FP registers which technically
belong to the user process (e.g. QEMU), or are stale from another user
process. It can also crash the guest OS by causing an FP exception, for
which a guest exception handler won't have been registered.

First lets save and disable the FPU (and MSA) state with lose_fpu(1)
before entering the guest. This simplifies the problem, especially for
when guest FPU/MSA support is added in the future, and prevents FR=1 FPU
state being live when the FR bit gets cleared for the guest, which
according to the architecture causes the contents of the FPU and vector
registers to become UNPREDICTABLE.

We can then safely remove the enabling of the FPU in
kvm_mips_set_c0_status(), since there should never be any active FPU or
MSA state to save at pre-emption, which should plug the FPU leak.

DSP state is always live rather than being lazily restored, so for that
it is simpler to just clear the MX bit again when re-entering the guest.

Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Sanjay Lal <sanjayl@kymasys.com>
Cc: Gleb Natapov <gleb@kernel.org>
Cc: kvm@vger.kernel.org
Cc: linux-mips@linux-mips.org
Cc: <stable@vger.kernel.org> # v3.10+: 044f0f03eca0: MIPS: KVM: Deliver guest interrupts
Cc: <stable@vger.kernel.org> # v3.10+
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

Showing 2 changed files with 4 additions and 4 deletions Inline Diff

arch/mips/kvm/kvm_locore.S
1 /* 1 /*
2 * This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Main entry point for the guest, exception handling. 6 * Main entry point for the guest, exception handling.
7 * 7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com> 9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */ 10 */
11 11
12 #include <asm/asm.h> 12 #include <asm/asm.h>
13 #include <asm/asmmacro.h> 13 #include <asm/asmmacro.h>
14 #include <asm/regdef.h> 14 #include <asm/regdef.h>
15 #include <asm/mipsregs.h> 15 #include <asm/mipsregs.h>
16 #include <asm/stackframe.h> 16 #include <asm/stackframe.h>
17 #include <asm/asm-offsets.h> 17 #include <asm/asm-offsets.h>
18 18
19 19
20 #define _C_LABEL(x) x 20 #define _C_LABEL(x) x
21 #define MIPSX(name) mips32_ ## name 21 #define MIPSX(name) mips32_ ## name
22 #define CALLFRAME_SIZ 32 22 #define CALLFRAME_SIZ 32
23 23
24 /* 24 /*
25 * VECTOR 25 * VECTOR
26 * exception vector entrypoint 26 * exception vector entrypoint
27 */ 27 */
28 #define VECTOR(x, regmask) \ 28 #define VECTOR(x, regmask) \
29 .ent _C_LABEL(x),0; \ 29 .ent _C_LABEL(x),0; \
30 EXPORT(x); 30 EXPORT(x);
31 31
32 #define VECTOR_END(x) \ 32 #define VECTOR_END(x) \
33 EXPORT(x); 33 EXPORT(x);
34 34
35 /* Overload, Danger Will Robinson!! */ 35 /* Overload, Danger Will Robinson!! */
36 #define PT_HOST_ASID PT_BVADDR 36 #define PT_HOST_ASID PT_BVADDR
37 #define PT_HOST_USERLOCAL PT_EPC 37 #define PT_HOST_USERLOCAL PT_EPC
38 38
39 #define CP0_DDATA_LO $28,3 39 #define CP0_DDATA_LO $28,3
40 #define CP0_EBASE $15,1 40 #define CP0_EBASE $15,1
41 41
42 #define CP0_INTCTL $12,1 42 #define CP0_INTCTL $12,1
43 #define CP0_SRSCTL $12,2 43 #define CP0_SRSCTL $12,2
44 #define CP0_SRSMAP $12,3 44 #define CP0_SRSMAP $12,3
45 #define CP0_HWRENA $7,0 45 #define CP0_HWRENA $7,0
46 46
47 /* Resume Flags */ 47 /* Resume Flags */
48 #define RESUME_FLAG_HOST (1<<1) /* Resume host? */ 48 #define RESUME_FLAG_HOST (1<<1) /* Resume host? */
49 49
50 #define RESUME_GUEST 0 50 #define RESUME_GUEST 0
51 #define RESUME_HOST RESUME_FLAG_HOST 51 #define RESUME_HOST RESUME_FLAG_HOST
52 52
53 /* 53 /*
54 * __kvm_mips_vcpu_run: entry point to the guest 54 * __kvm_mips_vcpu_run: entry point to the guest
55 * a0: run 55 * a0: run
56 * a1: vcpu 56 * a1: vcpu
57 */ 57 */
58 .set noreorder 58 .set noreorder
59 .set noat 59 .set noat
60 60
61 FEXPORT(__kvm_mips_vcpu_run) 61 FEXPORT(__kvm_mips_vcpu_run)
62 /* k0/k1 not being used in host kernel context */ 62 /* k0/k1 not being used in host kernel context */
63 INT_ADDIU k1, sp, -PT_SIZE 63 INT_ADDIU k1, sp, -PT_SIZE
64 LONG_S $0, PT_R0(k1) 64 LONG_S $0, PT_R0(k1)
65 LONG_S $1, PT_R1(k1) 65 LONG_S $1, PT_R1(k1)
66 LONG_S $2, PT_R2(k1) 66 LONG_S $2, PT_R2(k1)
67 LONG_S $3, PT_R3(k1) 67 LONG_S $3, PT_R3(k1)
68 68
69 LONG_S $4, PT_R4(k1) 69 LONG_S $4, PT_R4(k1)
70 LONG_S $5, PT_R5(k1) 70 LONG_S $5, PT_R5(k1)
71 LONG_S $6, PT_R6(k1) 71 LONG_S $6, PT_R6(k1)
72 LONG_S $7, PT_R7(k1) 72 LONG_S $7, PT_R7(k1)
73 73
74 LONG_S $8, PT_R8(k1) 74 LONG_S $8, PT_R8(k1)
75 LONG_S $9, PT_R9(k1) 75 LONG_S $9, PT_R9(k1)
76 LONG_S $10, PT_R10(k1) 76 LONG_S $10, PT_R10(k1)
77 LONG_S $11, PT_R11(k1) 77 LONG_S $11, PT_R11(k1)
78 LONG_S $12, PT_R12(k1) 78 LONG_S $12, PT_R12(k1)
79 LONG_S $13, PT_R13(k1) 79 LONG_S $13, PT_R13(k1)
80 LONG_S $14, PT_R14(k1) 80 LONG_S $14, PT_R14(k1)
81 LONG_S $15, PT_R15(k1) 81 LONG_S $15, PT_R15(k1)
82 LONG_S $16, PT_R16(k1) 82 LONG_S $16, PT_R16(k1)
83 LONG_S $17, PT_R17(k1) 83 LONG_S $17, PT_R17(k1)
84 84
85 LONG_S $18, PT_R18(k1) 85 LONG_S $18, PT_R18(k1)
86 LONG_S $19, PT_R19(k1) 86 LONG_S $19, PT_R19(k1)
87 LONG_S $20, PT_R20(k1) 87 LONG_S $20, PT_R20(k1)
88 LONG_S $21, PT_R21(k1) 88 LONG_S $21, PT_R21(k1)
89 LONG_S $22, PT_R22(k1) 89 LONG_S $22, PT_R22(k1)
90 LONG_S $23, PT_R23(k1) 90 LONG_S $23, PT_R23(k1)
91 LONG_S $24, PT_R24(k1) 91 LONG_S $24, PT_R24(k1)
92 LONG_S $25, PT_R25(k1) 92 LONG_S $25, PT_R25(k1)
93 93
94 /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */ 94 /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */
95 95
96 LONG_S $28, PT_R28(k1) 96 LONG_S $28, PT_R28(k1)
97 LONG_S $29, PT_R29(k1) 97 LONG_S $29, PT_R29(k1)
98 LONG_S $30, PT_R30(k1) 98 LONG_S $30, PT_R30(k1)
99 LONG_S $31, PT_R31(k1) 99 LONG_S $31, PT_R31(k1)
100 100
101 /* Save hi/lo */ 101 /* Save hi/lo */
102 mflo v0 102 mflo v0
103 LONG_S v0, PT_LO(k1) 103 LONG_S v0, PT_LO(k1)
104 mfhi v1 104 mfhi v1
105 LONG_S v1, PT_HI(k1) 105 LONG_S v1, PT_HI(k1)
106 106
107 /* Save host status */ 107 /* Save host status */
108 mfc0 v0, CP0_STATUS 108 mfc0 v0, CP0_STATUS
109 LONG_S v0, PT_STATUS(k1) 109 LONG_S v0, PT_STATUS(k1)
110 110
111 /* Save host ASID, shove it into the BVADDR location */ 111 /* Save host ASID, shove it into the BVADDR location */
112 mfc0 v1, CP0_ENTRYHI 112 mfc0 v1, CP0_ENTRYHI
113 andi v1, 0xff 113 andi v1, 0xff
114 LONG_S v1, PT_HOST_ASID(k1) 114 LONG_S v1, PT_HOST_ASID(k1)
115 115
116 /* Save DDATA_LO, will be used to store pointer to vcpu */ 116 /* Save DDATA_LO, will be used to store pointer to vcpu */
117 mfc0 v1, CP0_DDATA_LO 117 mfc0 v1, CP0_DDATA_LO
118 LONG_S v1, PT_HOST_USERLOCAL(k1) 118 LONG_S v1, PT_HOST_USERLOCAL(k1)
119 119
120 /* DDATA_LO has pointer to vcpu */ 120 /* DDATA_LO has pointer to vcpu */
121 mtc0 a1, CP0_DDATA_LO 121 mtc0 a1, CP0_DDATA_LO
122 122
123 /* Offset into vcpu->arch */ 123 /* Offset into vcpu->arch */
124 INT_ADDIU k1, a1, VCPU_HOST_ARCH 124 INT_ADDIU k1, a1, VCPU_HOST_ARCH
125 125
126 /* 126 /*
127 * Save the host stack to VCPU, used for exception processing 127 * Save the host stack to VCPU, used for exception processing
128 * when we exit from the Guest 128 * when we exit from the Guest
129 */ 129 */
130 LONG_S sp, VCPU_HOST_STACK(k1) 130 LONG_S sp, VCPU_HOST_STACK(k1)
131 131
132 /* Save the kernel gp as well */ 132 /* Save the kernel gp as well */
133 LONG_S gp, VCPU_HOST_GP(k1) 133 LONG_S gp, VCPU_HOST_GP(k1)
134 134
135 /* Setup status register for running the guest in UM, interrupts are disabled */ 135 /* Setup status register for running the guest in UM, interrupts are disabled */
136 li k0, (ST0_EXL | KSU_USER | ST0_BEV) 136 li k0, (ST0_EXL | KSU_USER | ST0_BEV)
137 mtc0 k0, CP0_STATUS 137 mtc0 k0, CP0_STATUS
138 ehb 138 ehb
139 139
140 /* load up the new EBASE */ 140 /* load up the new EBASE */
141 LONG_L k0, VCPU_GUEST_EBASE(k1) 141 LONG_L k0, VCPU_GUEST_EBASE(k1)
142 mtc0 k0, CP0_EBASE 142 mtc0 k0, CP0_EBASE
143 143
144 /* 144 /*
145 * Now that the new EBASE has been loaded, unset BEV, set 145 * Now that the new EBASE has been loaded, unset BEV, set
146 * interrupt mask as it was but make sure that timer interrupts 146 * interrupt mask as it was but make sure that timer interrupts
147 * are enabled 147 * are enabled
148 */ 148 */
149 li k0, (ST0_EXL | KSU_USER | ST0_IE) 149 li k0, (ST0_EXL | KSU_USER | ST0_IE)
150 andi v0, v0, ST0_IM 150 andi v0, v0, ST0_IM
151 or k0, k0, v0 151 or k0, k0, v0
152 mtc0 k0, CP0_STATUS 152 mtc0 k0, CP0_STATUS
153 ehb 153 ehb
154 154
155 155
156 /* Set Guest EPC */ 156 /* Set Guest EPC */
157 LONG_L t0, VCPU_PC(k1) 157 LONG_L t0, VCPU_PC(k1)
158 mtc0 t0, CP0_EPC 158 mtc0 t0, CP0_EPC
159 159
160 FEXPORT(__kvm_mips_load_asid) 160 FEXPORT(__kvm_mips_load_asid)
161 /* Set the ASID for the Guest Kernel */ 161 /* Set the ASID for the Guest Kernel */
162 INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ 162 INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
163 /* addresses shift to 0x80000000 */ 163 /* addresses shift to 0x80000000 */
164 bltz t0, 1f /* If kernel */ 164 bltz t0, 1f /* If kernel */
165 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ 165 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
166 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ 166 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
167 1: 167 1:
168 /* t1: contains the base of the ASID array, need to get the cpu id */ 168 /* t1: contains the base of the ASID array, need to get the cpu id */
169 LONG_L t2, TI_CPU($28) /* smp_processor_id */ 169 LONG_L t2, TI_CPU($28) /* smp_processor_id */
170 INT_SLL t2, t2, 2 /* x4 */ 170 INT_SLL t2, t2, 2 /* x4 */
171 REG_ADDU t3, t1, t2 171 REG_ADDU t3, t1, t2
172 LONG_L k0, (t3) 172 LONG_L k0, (t3)
173 andi k0, k0, 0xff 173 andi k0, k0, 0xff
174 mtc0 k0, CP0_ENTRYHI 174 mtc0 k0, CP0_ENTRYHI
175 ehb 175 ehb
176 176
177 /* Disable RDHWR access */ 177 /* Disable RDHWR access */
178 mtc0 zero, CP0_HWRENA 178 mtc0 zero, CP0_HWRENA
179 179
180 /* Now load up the Guest Context from VCPU */ 180 /* Now load up the Guest Context from VCPU */
181 LONG_L $1, VCPU_R1(k1) 181 LONG_L $1, VCPU_R1(k1)
182 LONG_L $2, VCPU_R2(k1) 182 LONG_L $2, VCPU_R2(k1)
183 LONG_L $3, VCPU_R3(k1) 183 LONG_L $3, VCPU_R3(k1)
184 184
185 LONG_L $4, VCPU_R4(k1) 185 LONG_L $4, VCPU_R4(k1)
186 LONG_L $5, VCPU_R5(k1) 186 LONG_L $5, VCPU_R5(k1)
187 LONG_L $6, VCPU_R6(k1) 187 LONG_L $6, VCPU_R6(k1)
188 LONG_L $7, VCPU_R7(k1) 188 LONG_L $7, VCPU_R7(k1)
189 189
190 LONG_L $8, VCPU_R8(k1) 190 LONG_L $8, VCPU_R8(k1)
191 LONG_L $9, VCPU_R9(k1) 191 LONG_L $9, VCPU_R9(k1)
192 LONG_L $10, VCPU_R10(k1) 192 LONG_L $10, VCPU_R10(k1)
193 LONG_L $11, VCPU_R11(k1) 193 LONG_L $11, VCPU_R11(k1)
194 LONG_L $12, VCPU_R12(k1) 194 LONG_L $12, VCPU_R12(k1)
195 LONG_L $13, VCPU_R13(k1) 195 LONG_L $13, VCPU_R13(k1)
196 LONG_L $14, VCPU_R14(k1) 196 LONG_L $14, VCPU_R14(k1)
197 LONG_L $15, VCPU_R15(k1) 197 LONG_L $15, VCPU_R15(k1)
198 LONG_L $16, VCPU_R16(k1) 198 LONG_L $16, VCPU_R16(k1)
199 LONG_L $17, VCPU_R17(k1) 199 LONG_L $17, VCPU_R17(k1)
200 LONG_L $18, VCPU_R18(k1) 200 LONG_L $18, VCPU_R18(k1)
201 LONG_L $19, VCPU_R19(k1) 201 LONG_L $19, VCPU_R19(k1)
202 LONG_L $20, VCPU_R20(k1) 202 LONG_L $20, VCPU_R20(k1)
203 LONG_L $21, VCPU_R21(k1) 203 LONG_L $21, VCPU_R21(k1)
204 LONG_L $22, VCPU_R22(k1) 204 LONG_L $22, VCPU_R22(k1)
205 LONG_L $23, VCPU_R23(k1) 205 LONG_L $23, VCPU_R23(k1)
206 LONG_L $24, VCPU_R24(k1) 206 LONG_L $24, VCPU_R24(k1)
207 LONG_L $25, VCPU_R25(k1) 207 LONG_L $25, VCPU_R25(k1)
208 208
209 /* k0/k1 loaded up later */ 209 /* k0/k1 loaded up later */
210 210
211 LONG_L $28, VCPU_R28(k1) 211 LONG_L $28, VCPU_R28(k1)
212 LONG_L $29, VCPU_R29(k1) 212 LONG_L $29, VCPU_R29(k1)
213 LONG_L $30, VCPU_R30(k1) 213 LONG_L $30, VCPU_R30(k1)
214 LONG_L $31, VCPU_R31(k1) 214 LONG_L $31, VCPU_R31(k1)
215 215
216 /* Restore hi/lo */ 216 /* Restore hi/lo */
217 LONG_L k0, VCPU_LO(k1) 217 LONG_L k0, VCPU_LO(k1)
218 mtlo k0 218 mtlo k0
219 219
220 LONG_L k0, VCPU_HI(k1) 220 LONG_L k0, VCPU_HI(k1)
221 mthi k0 221 mthi k0
222 222
223 FEXPORT(__kvm_mips_load_k0k1) 223 FEXPORT(__kvm_mips_load_k0k1)
224 /* Restore the guest's k0/k1 registers */ 224 /* Restore the guest's k0/k1 registers */
225 LONG_L k0, VCPU_R26(k1) 225 LONG_L k0, VCPU_R26(k1)
226 LONG_L k1, VCPU_R27(k1) 226 LONG_L k1, VCPU_R27(k1)
227 227
228 /* Jump to guest */ 228 /* Jump to guest */
229 eret 229 eret
230 230
231 VECTOR(MIPSX(exception), unknown) 231 VECTOR(MIPSX(exception), unknown)
232 /* 232 /*
233 * Find out what mode we came from and jump to the proper handler. 233 * Find out what mode we came from and jump to the proper handler.
234 */ 234 */
235 mtc0 k0, CP0_ERROREPC #01: Save guest k0 235 mtc0 k0, CP0_ERROREPC #01: Save guest k0
236 ehb #02: 236 ehb #02:
237 237
238 mfc0 k0, CP0_EBASE #02: Get EBASE 238 mfc0 k0, CP0_EBASE #02: Get EBASE
239 INT_SRL k0, k0, 10 #03: Get rid of CPUNum 239 INT_SRL k0, k0, 10 #03: Get rid of CPUNum
240 INT_SLL k0, k0, 10 #04 240 INT_SLL k0, k0, 10 #04
241 LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000 241 LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000
242 INT_ADDIU k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000 242 INT_ADDIU k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000
243 j k0 #07: jump to the function 243 j k0 #07: jump to the function
244 nop #08: branch delay slot 244 nop #08: branch delay slot
245 VECTOR_END(MIPSX(exceptionEnd)) 245 VECTOR_END(MIPSX(exceptionEnd))
246 .end MIPSX(exception) 246 .end MIPSX(exception)
247 247
248 /* 248 /*
249 * Generic Guest exception handler. We end up here when the guest 249 * Generic Guest exception handler. We end up here when the guest
250 * does something that causes a trap to kernel mode. 250 * does something that causes a trap to kernel mode.
251 * 251 *
252 */ 252 */
253 NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) 253 NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
254 /* Get the VCPU pointer from DDTATA_LO */ 254 /* Get the VCPU pointer from DDTATA_LO */
255 mfc0 k1, CP0_DDATA_LO 255 mfc0 k1, CP0_DDATA_LO
256 INT_ADDIU k1, k1, VCPU_HOST_ARCH 256 INT_ADDIU k1, k1, VCPU_HOST_ARCH
257 257
258 /* Start saving Guest context to VCPU */ 258 /* Start saving Guest context to VCPU */
259 LONG_S $0, VCPU_R0(k1) 259 LONG_S $0, VCPU_R0(k1)
260 LONG_S $1, VCPU_R1(k1) 260 LONG_S $1, VCPU_R1(k1)
261 LONG_S $2, VCPU_R2(k1) 261 LONG_S $2, VCPU_R2(k1)
262 LONG_S $3, VCPU_R3(k1) 262 LONG_S $3, VCPU_R3(k1)
263 LONG_S $4, VCPU_R4(k1) 263 LONG_S $4, VCPU_R4(k1)
264 LONG_S $5, VCPU_R5(k1) 264 LONG_S $5, VCPU_R5(k1)
265 LONG_S $6, VCPU_R6(k1) 265 LONG_S $6, VCPU_R6(k1)
266 LONG_S $7, VCPU_R7(k1) 266 LONG_S $7, VCPU_R7(k1)
267 LONG_S $8, VCPU_R8(k1) 267 LONG_S $8, VCPU_R8(k1)
268 LONG_S $9, VCPU_R9(k1) 268 LONG_S $9, VCPU_R9(k1)
269 LONG_S $10, VCPU_R10(k1) 269 LONG_S $10, VCPU_R10(k1)
270 LONG_S $11, VCPU_R11(k1) 270 LONG_S $11, VCPU_R11(k1)
271 LONG_S $12, VCPU_R12(k1) 271 LONG_S $12, VCPU_R12(k1)
272 LONG_S $13, VCPU_R13(k1) 272 LONG_S $13, VCPU_R13(k1)
273 LONG_S $14, VCPU_R14(k1) 273 LONG_S $14, VCPU_R14(k1)
274 LONG_S $15, VCPU_R15(k1) 274 LONG_S $15, VCPU_R15(k1)
275 LONG_S $16, VCPU_R16(k1) 275 LONG_S $16, VCPU_R16(k1)
276 LONG_S $17, VCPU_R17(k1) 276 LONG_S $17, VCPU_R17(k1)
277 LONG_S $18, VCPU_R18(k1) 277 LONG_S $18, VCPU_R18(k1)
278 LONG_S $19, VCPU_R19(k1) 278 LONG_S $19, VCPU_R19(k1)
279 LONG_S $20, VCPU_R20(k1) 279 LONG_S $20, VCPU_R20(k1)
280 LONG_S $21, VCPU_R21(k1) 280 LONG_S $21, VCPU_R21(k1)
281 LONG_S $22, VCPU_R22(k1) 281 LONG_S $22, VCPU_R22(k1)
282 LONG_S $23, VCPU_R23(k1) 282 LONG_S $23, VCPU_R23(k1)
283 LONG_S $24, VCPU_R24(k1) 283 LONG_S $24, VCPU_R24(k1)
284 LONG_S $25, VCPU_R25(k1) 284 LONG_S $25, VCPU_R25(k1)
285 285
286 /* Guest k0/k1 saved later */ 286 /* Guest k0/k1 saved later */
287 287
288 LONG_S $28, VCPU_R28(k1) 288 LONG_S $28, VCPU_R28(k1)
289 LONG_S $29, VCPU_R29(k1) 289 LONG_S $29, VCPU_R29(k1)
290 LONG_S $30, VCPU_R30(k1) 290 LONG_S $30, VCPU_R30(k1)
291 LONG_S $31, VCPU_R31(k1) 291 LONG_S $31, VCPU_R31(k1)
292 292
293 /* We need to save hi/lo and restore them on 293 /* We need to save hi/lo and restore them on
294 * the way out 294 * the way out
295 */ 295 */
296 mfhi t0 296 mfhi t0
297 LONG_S t0, VCPU_HI(k1) 297 LONG_S t0, VCPU_HI(k1)
298 298
299 mflo t0 299 mflo t0
300 LONG_S t0, VCPU_LO(k1) 300 LONG_S t0, VCPU_LO(k1)
301 301
302 /* Finally save guest k0/k1 to VCPU */ 302 /* Finally save guest k0/k1 to VCPU */
303 mfc0 t0, CP0_ERROREPC 303 mfc0 t0, CP0_ERROREPC
304 LONG_S t0, VCPU_R26(k1) 304 LONG_S t0, VCPU_R26(k1)
305 305
306 /* Get GUEST k1 and save it in VCPU */ 306 /* Get GUEST k1 and save it in VCPU */
307 PTR_LI t1, ~0x2ff 307 PTR_LI t1, ~0x2ff
308 mfc0 t0, CP0_EBASE 308 mfc0 t0, CP0_EBASE
309 and t0, t0, t1 309 and t0, t0, t1
310 LONG_L t0, 0x3000(t0) 310 LONG_L t0, 0x3000(t0)
311 LONG_S t0, VCPU_R27(k1) 311 LONG_S t0, VCPU_R27(k1)
312 312
313 /* Now that context has been saved, we can use other registers */ 313 /* Now that context has been saved, we can use other registers */
314 314
315 /* Restore vcpu */ 315 /* Restore vcpu */
316 mfc0 a1, CP0_DDATA_LO 316 mfc0 a1, CP0_DDATA_LO
317 move s1, a1 317 move s1, a1
318 318
319 /* Restore run (vcpu->run) */ 319 /* Restore run (vcpu->run) */
320 LONG_L a0, VCPU_RUN(a1) 320 LONG_L a0, VCPU_RUN(a1)
321 /* Save pointer to run in s0, will be saved by the compiler */ 321 /* Save pointer to run in s0, will be saved by the compiler */
322 move s0, a0 322 move s0, a0
323 323
324 /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to 324 /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to
325 * process the exception */ 325 * process the exception */
326 mfc0 k0,CP0_EPC 326 mfc0 k0,CP0_EPC
327 LONG_S k0, VCPU_PC(k1) 327 LONG_S k0, VCPU_PC(k1)
328 328
329 mfc0 k0, CP0_BADVADDR 329 mfc0 k0, CP0_BADVADDR
330 LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1) 330 LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1)
331 331
332 mfc0 k0, CP0_CAUSE 332 mfc0 k0, CP0_CAUSE
333 LONG_S k0, VCPU_HOST_CP0_CAUSE(k1) 333 LONG_S k0, VCPU_HOST_CP0_CAUSE(k1)
334 334
335 mfc0 k0, CP0_ENTRYHI 335 mfc0 k0, CP0_ENTRYHI
336 LONG_S k0, VCPU_HOST_ENTRYHI(k1) 336 LONG_S k0, VCPU_HOST_ENTRYHI(k1)
337 337
338 /* Now restore the host state just enough to run the handlers */ 338 /* Now restore the host state just enough to run the handlers */
339 339
340 /* Swtich EBASE to the one used by Linux */ 340 /* Swtich EBASE to the one used by Linux */
341 /* load up the host EBASE */ 341 /* load up the host EBASE */
342 mfc0 v0, CP0_STATUS 342 mfc0 v0, CP0_STATUS
343 343
344 .set at 344 .set at
345 or k0, v0, ST0_BEV 345 or k0, v0, ST0_BEV
346 .set noat 346 .set noat
347 347
348 mtc0 k0, CP0_STATUS 348 mtc0 k0, CP0_STATUS
349 ehb 349 ehb
350 350
351 LONG_L k0, VCPU_HOST_EBASE(k1) 351 LONG_L k0, VCPU_HOST_EBASE(k1)
352 mtc0 k0,CP0_EBASE 352 mtc0 k0,CP0_EBASE
353 353
354 354
355 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ 355 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
356 .set at 356 .set at
357 and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) 357 and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
358 or v0, v0, ST0_CU0 358 or v0, v0, ST0_CU0
359 .set noat 359 .set noat
360 mtc0 v0, CP0_STATUS 360 mtc0 v0, CP0_STATUS
361 ehb 361 ehb
362 362
363 /* Load up host GP */ 363 /* Load up host GP */
364 LONG_L gp, VCPU_HOST_GP(k1) 364 LONG_L gp, VCPU_HOST_GP(k1)
365 365
366 /* Need a stack before we can jump to "C" */ 366 /* Need a stack before we can jump to "C" */
367 LONG_L sp, VCPU_HOST_STACK(k1) 367 LONG_L sp, VCPU_HOST_STACK(k1)
368 368
369 /* Saved host state */ 369 /* Saved host state */
370 INT_ADDIU sp, sp, -PT_SIZE 370 INT_ADDIU sp, sp, -PT_SIZE
371 371
372 /* XXXKYMA do we need to load the host ASID, maybe not because the 372 /* XXXKYMA do we need to load the host ASID, maybe not because the
373 * kernel entries are marked GLOBAL, need to verify 373 * kernel entries are marked GLOBAL, need to verify
374 */ 374 */
375 375
376 /* Restore host DDATA_LO */ 376 /* Restore host DDATA_LO */
377 LONG_L k0, PT_HOST_USERLOCAL(sp) 377 LONG_L k0, PT_HOST_USERLOCAL(sp)
378 mtc0 k0, CP0_DDATA_LO 378 mtc0 k0, CP0_DDATA_LO
379 379
380 /* Restore RDHWR access */ 380 /* Restore RDHWR access */
381 PTR_LI k0, 0x2000000F 381 PTR_LI k0, 0x2000000F
382 mtc0 k0, CP0_HWRENA 382 mtc0 k0, CP0_HWRENA
383 383
384 /* Jump to handler */ 384 /* Jump to handler */
385 FEXPORT(__kvm_mips_jump_to_handler) 385 FEXPORT(__kvm_mips_jump_to_handler)
386 /* XXXKYMA: not sure if this is safe, how large is the stack?? 386 /* XXXKYMA: not sure if this is safe, how large is the stack??
387 * Now jump to the kvm_mips_handle_exit() to see if we can deal 387 * Now jump to the kvm_mips_handle_exit() to see if we can deal
388 * with this in the kernel */ 388 * with this in the kernel */
389 PTR_LA t9, kvm_mips_handle_exit 389 PTR_LA t9, kvm_mips_handle_exit
390 jalr.hb t9 390 jalr.hb t9
391 INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */ 391 INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */
392 392
393 /* Return from handler Make sure interrupts are disabled */ 393 /* Return from handler Make sure interrupts are disabled */
394 di 394 di
395 ehb 395 ehb
396 396
397 /* XXXKYMA: k0/k1 could have been blown away if we processed 397 /* XXXKYMA: k0/k1 could have been blown away if we processed
398 * an exception while we were handling the exception from the 398 * an exception while we were handling the exception from the
399 * guest, reload k1 399 * guest, reload k1
400 */ 400 */
401 401
402 move k1, s1 402 move k1, s1
403 INT_ADDIU k1, k1, VCPU_HOST_ARCH 403 INT_ADDIU k1, k1, VCPU_HOST_ARCH
404 404
405 /* Check return value, should tell us if we are returning to the 405 /* Check return value, should tell us if we are returning to the
406 * host (handle I/O etc)or resuming the guest 406 * host (handle I/O etc)or resuming the guest
407 */ 407 */
408 andi t0, v0, RESUME_HOST 408 andi t0, v0, RESUME_HOST
409 bnez t0, __kvm_mips_return_to_host 409 bnez t0, __kvm_mips_return_to_host
410 nop 410 nop
411 411
412 __kvm_mips_return_to_guest: 412 __kvm_mips_return_to_guest:
413 /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */ 413 /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
414 mtc0 s1, CP0_DDATA_LO 414 mtc0 s1, CP0_DDATA_LO
415 415
416 /* Load up the Guest EBASE to minimize the window where BEV is set */ 416 /* Load up the Guest EBASE to minimize the window where BEV is set */
417 LONG_L t0, VCPU_GUEST_EBASE(k1) 417 LONG_L t0, VCPU_GUEST_EBASE(k1)
418 418
419 /* Switch EBASE back to the one used by KVM */ 419 /* Switch EBASE back to the one used by KVM */
420 mfc0 v1, CP0_STATUS 420 mfc0 v1, CP0_STATUS
421 .set at 421 .set at
422 or k0, v1, ST0_BEV 422 or k0, v1, ST0_BEV
423 .set noat 423 .set noat
424 mtc0 k0, CP0_STATUS 424 mtc0 k0, CP0_STATUS
425 ehb 425 ehb
426 mtc0 t0, CP0_EBASE 426 mtc0 t0, CP0_EBASE
427 427
428 /* Setup status register for running guest in UM */ 428 /* Setup status register for running guest in UM */
429 .set at 429 .set at
430 or v1, v1, (ST0_EXL | KSU_USER | ST0_IE) 430 or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
431 and v1, v1, ~ST0_CU0 431 and v1, v1, ~(ST0_CU0 | ST0_MX)
432 .set noat 432 .set noat
433 mtc0 v1, CP0_STATUS 433 mtc0 v1, CP0_STATUS
434 ehb 434 ehb
435 435
436 /* Set Guest EPC */ 436 /* Set Guest EPC */
437 LONG_L t0, VCPU_PC(k1) 437 LONG_L t0, VCPU_PC(k1)
438 mtc0 t0, CP0_EPC 438 mtc0 t0, CP0_EPC
439 439
440 /* Set the ASID for the Guest Kernel */ 440 /* Set the ASID for the Guest Kernel */
441 INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ 441 INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
442 /* addresses shift to 0x80000000 */ 442 /* addresses shift to 0x80000000 */
443 bltz t0, 1f /* If kernel */ 443 bltz t0, 1f /* If kernel */
444 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ 444 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
445 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ 445 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
446 1: 446 1:
447 /* t1: contains the base of the ASID array, need to get the cpu id */ 447 /* t1: contains the base of the ASID array, need to get the cpu id */
448 LONG_L t2, TI_CPU($28) /* smp_processor_id */ 448 LONG_L t2, TI_CPU($28) /* smp_processor_id */
449 INT_SLL t2, t2, 2 /* x4 */ 449 INT_SLL t2, t2, 2 /* x4 */
450 REG_ADDU t3, t1, t2 450 REG_ADDU t3, t1, t2
451 LONG_L k0, (t3) 451 LONG_L k0, (t3)
452 andi k0, k0, 0xff 452 andi k0, k0, 0xff
453 mtc0 k0,CP0_ENTRYHI 453 mtc0 k0,CP0_ENTRYHI
454 ehb 454 ehb
455 455
456 /* Disable RDHWR access */ 456 /* Disable RDHWR access */
457 mtc0 zero, CP0_HWRENA 457 mtc0 zero, CP0_HWRENA
458 458
459 /* load the guest context from VCPU and return */ 459 /* load the guest context from VCPU and return */
460 LONG_L $0, VCPU_R0(k1) 460 LONG_L $0, VCPU_R0(k1)
461 LONG_L $1, VCPU_R1(k1) 461 LONG_L $1, VCPU_R1(k1)
462 LONG_L $2, VCPU_R2(k1) 462 LONG_L $2, VCPU_R2(k1)
463 LONG_L $3, VCPU_R3(k1) 463 LONG_L $3, VCPU_R3(k1)
464 LONG_L $4, VCPU_R4(k1) 464 LONG_L $4, VCPU_R4(k1)
465 LONG_L $5, VCPU_R5(k1) 465 LONG_L $5, VCPU_R5(k1)
466 LONG_L $6, VCPU_R6(k1) 466 LONG_L $6, VCPU_R6(k1)
467 LONG_L $7, VCPU_R7(k1) 467 LONG_L $7, VCPU_R7(k1)
468 LONG_L $8, VCPU_R8(k1) 468 LONG_L $8, VCPU_R8(k1)
469 LONG_L $9, VCPU_R9(k1) 469 LONG_L $9, VCPU_R9(k1)
470 LONG_L $10, VCPU_R10(k1) 470 LONG_L $10, VCPU_R10(k1)
471 LONG_L $11, VCPU_R11(k1) 471 LONG_L $11, VCPU_R11(k1)
472 LONG_L $12, VCPU_R12(k1) 472 LONG_L $12, VCPU_R12(k1)
473 LONG_L $13, VCPU_R13(k1) 473 LONG_L $13, VCPU_R13(k1)
474 LONG_L $14, VCPU_R14(k1) 474 LONG_L $14, VCPU_R14(k1)
475 LONG_L $15, VCPU_R15(k1) 475 LONG_L $15, VCPU_R15(k1)
476 LONG_L $16, VCPU_R16(k1) 476 LONG_L $16, VCPU_R16(k1)
477 LONG_L $17, VCPU_R17(k1) 477 LONG_L $17, VCPU_R17(k1)
478 LONG_L $18, VCPU_R18(k1) 478 LONG_L $18, VCPU_R18(k1)
479 LONG_L $19, VCPU_R19(k1) 479 LONG_L $19, VCPU_R19(k1)
480 LONG_L $20, VCPU_R20(k1) 480 LONG_L $20, VCPU_R20(k1)
481 LONG_L $21, VCPU_R21(k1) 481 LONG_L $21, VCPU_R21(k1)
482 LONG_L $22, VCPU_R22(k1) 482 LONG_L $22, VCPU_R22(k1)
483 LONG_L $23, VCPU_R23(k1) 483 LONG_L $23, VCPU_R23(k1)
484 LONG_L $24, VCPU_R24(k1) 484 LONG_L $24, VCPU_R24(k1)
485 LONG_L $25, VCPU_R25(k1) 485 LONG_L $25, VCPU_R25(k1)
486 486
487 /* $/k1 loaded later */ 487 /* $/k1 loaded later */
488 LONG_L $28, VCPU_R28(k1) 488 LONG_L $28, VCPU_R28(k1)
489 LONG_L $29, VCPU_R29(k1) 489 LONG_L $29, VCPU_R29(k1)
490 LONG_L $30, VCPU_R30(k1) 490 LONG_L $30, VCPU_R30(k1)
491 LONG_L $31, VCPU_R31(k1) 491 LONG_L $31, VCPU_R31(k1)
492 492
493 FEXPORT(__kvm_mips_skip_guest_restore) 493 FEXPORT(__kvm_mips_skip_guest_restore)
494 LONG_L k0, VCPU_HI(k1) 494 LONG_L k0, VCPU_HI(k1)
495 mthi k0 495 mthi k0
496 496
497 LONG_L k0, VCPU_LO(k1) 497 LONG_L k0, VCPU_LO(k1)
498 mtlo k0 498 mtlo k0
499 499
500 LONG_L k0, VCPU_R26(k1) 500 LONG_L k0, VCPU_R26(k1)
501 LONG_L k1, VCPU_R27(k1) 501 LONG_L k1, VCPU_R27(k1)
502 502
503 eret 503 eret
504 504
505 __kvm_mips_return_to_host: 505 __kvm_mips_return_to_host:
506 /* EBASE is already pointing to Linux */ 506 /* EBASE is already pointing to Linux */
507 LONG_L k1, VCPU_HOST_STACK(k1) 507 LONG_L k1, VCPU_HOST_STACK(k1)
508 INT_ADDIU k1,k1, -PT_SIZE 508 INT_ADDIU k1,k1, -PT_SIZE
509 509
510 /* Restore host DDATA_LO */ 510 /* Restore host DDATA_LO */
511 LONG_L k0, PT_HOST_USERLOCAL(k1) 511 LONG_L k0, PT_HOST_USERLOCAL(k1)
512 mtc0 k0, CP0_DDATA_LO 512 mtc0 k0, CP0_DDATA_LO
513 513
514 /* Restore host ASID */ 514 /* Restore host ASID */
515 LONG_L k0, PT_HOST_ASID(sp) 515 LONG_L k0, PT_HOST_ASID(sp)
516 andi k0, 0xff 516 andi k0, 0xff
517 mtc0 k0,CP0_ENTRYHI 517 mtc0 k0,CP0_ENTRYHI
518 ehb 518 ehb
519 519
520 /* Load context saved on the host stack */ 520 /* Load context saved on the host stack */
521 LONG_L $0, PT_R0(k1) 521 LONG_L $0, PT_R0(k1)
522 LONG_L $1, PT_R1(k1) 522 LONG_L $1, PT_R1(k1)
523 523
524 /* r2/v0 is the return code, shift it down by 2 (arithmetic) 524 /* r2/v0 is the return code, shift it down by 2 (arithmetic)
525 * to recover the err code */ 525 * to recover the err code */
526 INT_SRA k0, v0, 2 526 INT_SRA k0, v0, 2
527 move $2, k0 527 move $2, k0
528 528
529 LONG_L $3, PT_R3(k1) 529 LONG_L $3, PT_R3(k1)
530 LONG_L $4, PT_R4(k1) 530 LONG_L $4, PT_R4(k1)
531 LONG_L $5, PT_R5(k1) 531 LONG_L $5, PT_R5(k1)
532 LONG_L $6, PT_R6(k1) 532 LONG_L $6, PT_R6(k1)
533 LONG_L $7, PT_R7(k1) 533 LONG_L $7, PT_R7(k1)
534 LONG_L $8, PT_R8(k1) 534 LONG_L $8, PT_R8(k1)
535 LONG_L $9, PT_R9(k1) 535 LONG_L $9, PT_R9(k1)
536 LONG_L $10, PT_R10(k1) 536 LONG_L $10, PT_R10(k1)
537 LONG_L $11, PT_R11(k1) 537 LONG_L $11, PT_R11(k1)
538 LONG_L $12, PT_R12(k1) 538 LONG_L $12, PT_R12(k1)
539 LONG_L $13, PT_R13(k1) 539 LONG_L $13, PT_R13(k1)
540 LONG_L $14, PT_R14(k1) 540 LONG_L $14, PT_R14(k1)
541 LONG_L $15, PT_R15(k1) 541 LONG_L $15, PT_R15(k1)
542 LONG_L $16, PT_R16(k1) 542 LONG_L $16, PT_R16(k1)
543 LONG_L $17, PT_R17(k1) 543 LONG_L $17, PT_R17(k1)
544 LONG_L $18, PT_R18(k1) 544 LONG_L $18, PT_R18(k1)
545 LONG_L $19, PT_R19(k1) 545 LONG_L $19, PT_R19(k1)
546 LONG_L $20, PT_R20(k1) 546 LONG_L $20, PT_R20(k1)
547 LONG_L $21, PT_R21(k1) 547 LONG_L $21, PT_R21(k1)
548 LONG_L $22, PT_R22(k1) 548 LONG_L $22, PT_R22(k1)
549 LONG_L $23, PT_R23(k1) 549 LONG_L $23, PT_R23(k1)
550 LONG_L $24, PT_R24(k1) 550 LONG_L $24, PT_R24(k1)
551 LONG_L $25, PT_R25(k1) 551 LONG_L $25, PT_R25(k1)
552 552
553 /* Host k0/k1 were not saved */ 553 /* Host k0/k1 were not saved */
554 554
555 LONG_L $28, PT_R28(k1) 555 LONG_L $28, PT_R28(k1)
556 LONG_L $29, PT_R29(k1) 556 LONG_L $29, PT_R29(k1)
557 LONG_L $30, PT_R30(k1) 557 LONG_L $30, PT_R30(k1)
558 558
559 LONG_L k0, PT_HI(k1) 559 LONG_L k0, PT_HI(k1)
560 mthi k0 560 mthi k0
561 561
562 LONG_L k0, PT_LO(k1) 562 LONG_L k0, PT_LO(k1)
563 mtlo k0 563 mtlo k0
564 564
565 /* Restore RDHWR access */ 565 /* Restore RDHWR access */
566 PTR_LI k0, 0x2000000F 566 PTR_LI k0, 0x2000000F
567 mtc0 k0, CP0_HWRENA 567 mtc0 k0, CP0_HWRENA
568 568
569 569
570 /* Restore RA, which is the address we will return to */ 570 /* Restore RA, which is the address we will return to */
571 LONG_L ra, PT_R31(k1) 571 LONG_L ra, PT_R31(k1)
572 j ra 572 j ra
573 nop 573 nop
574 574
575 VECTOR_END(MIPSX(GuestExceptionEnd)) 575 VECTOR_END(MIPSX(GuestExceptionEnd))
576 .end MIPSX(GuestException) 576 .end MIPSX(GuestException)
577 577
578 MIPSX(exceptions): 578 MIPSX(exceptions):
579 #### 579 ####
580 ##### The exception handlers. 580 ##### The exception handlers.
581 ##### 581 #####
582 .word _C_LABEL(MIPSX(GuestException)) # 0 582 .word _C_LABEL(MIPSX(GuestException)) # 0
583 .word _C_LABEL(MIPSX(GuestException)) # 1 583 .word _C_LABEL(MIPSX(GuestException)) # 1
584 .word _C_LABEL(MIPSX(GuestException)) # 2 584 .word _C_LABEL(MIPSX(GuestException)) # 2
585 .word _C_LABEL(MIPSX(GuestException)) # 3 585 .word _C_LABEL(MIPSX(GuestException)) # 3
586 .word _C_LABEL(MIPSX(GuestException)) # 4 586 .word _C_LABEL(MIPSX(GuestException)) # 4
587 .word _C_LABEL(MIPSX(GuestException)) # 5 587 .word _C_LABEL(MIPSX(GuestException)) # 5
588 .word _C_LABEL(MIPSX(GuestException)) # 6 588 .word _C_LABEL(MIPSX(GuestException)) # 6
589 .word _C_LABEL(MIPSX(GuestException)) # 7 589 .word _C_LABEL(MIPSX(GuestException)) # 7
590 .word _C_LABEL(MIPSX(GuestException)) # 8 590 .word _C_LABEL(MIPSX(GuestException)) # 8
591 .word _C_LABEL(MIPSX(GuestException)) # 9 591 .word _C_LABEL(MIPSX(GuestException)) # 9
592 .word _C_LABEL(MIPSX(GuestException)) # 10 592 .word _C_LABEL(MIPSX(GuestException)) # 10
593 .word _C_LABEL(MIPSX(GuestException)) # 11 593 .word _C_LABEL(MIPSX(GuestException)) # 11
594 .word _C_LABEL(MIPSX(GuestException)) # 12 594 .word _C_LABEL(MIPSX(GuestException)) # 12
595 .word _C_LABEL(MIPSX(GuestException)) # 13 595 .word _C_LABEL(MIPSX(GuestException)) # 13
596 .word _C_LABEL(MIPSX(GuestException)) # 14 596 .word _C_LABEL(MIPSX(GuestException)) # 14
597 .word _C_LABEL(MIPSX(GuestException)) # 15 597 .word _C_LABEL(MIPSX(GuestException)) # 15
598 .word _C_LABEL(MIPSX(GuestException)) # 16 598 .word _C_LABEL(MIPSX(GuestException)) # 16
599 .word _C_LABEL(MIPSX(GuestException)) # 17 599 .word _C_LABEL(MIPSX(GuestException)) # 17
600 .word _C_LABEL(MIPSX(GuestException)) # 18 600 .word _C_LABEL(MIPSX(GuestException)) # 18
601 .word _C_LABEL(MIPSX(GuestException)) # 19 601 .word _C_LABEL(MIPSX(GuestException)) # 19
602 .word _C_LABEL(MIPSX(GuestException)) # 20 602 .word _C_LABEL(MIPSX(GuestException)) # 20
603 .word _C_LABEL(MIPSX(GuestException)) # 21 603 .word _C_LABEL(MIPSX(GuestException)) # 21
604 .word _C_LABEL(MIPSX(GuestException)) # 22 604 .word _C_LABEL(MIPSX(GuestException)) # 22
605 .word _C_LABEL(MIPSX(GuestException)) # 23 605 .word _C_LABEL(MIPSX(GuestException)) # 23
606 .word _C_LABEL(MIPSX(GuestException)) # 24 606 .word _C_LABEL(MIPSX(GuestException)) # 24
607 .word _C_LABEL(MIPSX(GuestException)) # 25 607 .word _C_LABEL(MIPSX(GuestException)) # 25
608 .word _C_LABEL(MIPSX(GuestException)) # 26 608 .word _C_LABEL(MIPSX(GuestException)) # 26
609 .word _C_LABEL(MIPSX(GuestException)) # 27 609 .word _C_LABEL(MIPSX(GuestException)) # 27
610 .word _C_LABEL(MIPSX(GuestException)) # 28 610 .word _C_LABEL(MIPSX(GuestException)) # 28
611 .word _C_LABEL(MIPSX(GuestException)) # 29 611 .word _C_LABEL(MIPSX(GuestException)) # 29
612 .word _C_LABEL(MIPSX(GuestException)) # 30 612 .word _C_LABEL(MIPSX(GuestException)) # 30
613 .word _C_LABEL(MIPSX(GuestException)) # 31 613 .word _C_LABEL(MIPSX(GuestException)) # 31
614 614
615 615
616 /* This routine makes changes to the instruction stream effective to the hardware. 616 /* This routine makes changes to the instruction stream effective to the hardware.
617 * It should be called after the instruction stream is written. 617 * It should be called after the instruction stream is written.
618 * On return, the new instructions are effective. 618 * On return, the new instructions are effective.
619 * Inputs: 619 * Inputs:
620 * a0 = Start address of new instruction stream 620 * a0 = Start address of new instruction stream
621 * a1 = Size, in bytes, of new instruction stream 621 * a1 = Size, in bytes, of new instruction stream
622 */ 622 */
623 623
624 #define HW_SYNCI_Step $1 624 #define HW_SYNCI_Step $1
625 LEAF(MIPSX(SyncICache)) 625 LEAF(MIPSX(SyncICache))
626 .set push 626 .set push
627 .set mips32r2 627 .set mips32r2
628 beq a1, zero, 20f 628 beq a1, zero, 20f
629 nop 629 nop
630 REG_ADDU a1, a0, a1 630 REG_ADDU a1, a0, a1
631 rdhwr v0, HW_SYNCI_Step 631 rdhwr v0, HW_SYNCI_Step
632 beq v0, zero, 20f 632 beq v0, zero, 20f
633 nop 633 nop
634 10: 634 10:
635 synci 0(a0) 635 synci 0(a0)
636 REG_ADDU a0, a0, v0 636 REG_ADDU a0, a0, v0
637 sltu v1, a0, a1 637 sltu v1, a0, a1
638 bne v1, zero, 10b 638 bne v1, zero, 10b
639 nop 639 nop
640 sync 640 sync
641 20: 641 20:
642 jr.hb ra 642 jr.hb ra
643 nop 643 nop
644 .set pop 644 .set pop
645 END(MIPSX(SyncICache)) 645 END(MIPSX(SyncICache))
646 646
arch/mips/kvm/kvm_mips.c
1 /* 1 /*
2 * This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * KVM/MIPS: MIPS specific KVM APIs 6 * KVM/MIPS: MIPS specific KVM APIs
7 * 7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com> 9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */ 10 */
11 11
12 #include <linux/errno.h> 12 #include <linux/errno.h>
13 #include <linux/err.h> 13 #include <linux/err.h>
14 #include <linux/module.h> 14 #include <linux/module.h>
15 #include <linux/vmalloc.h> 15 #include <linux/vmalloc.h>
16 #include <linux/fs.h> 16 #include <linux/fs.h>
17 #include <linux/bootmem.h> 17 #include <linux/bootmem.h>
18 #include <asm/fpu.h>
18 #include <asm/page.h> 19 #include <asm/page.h>
19 #include <asm/cacheflush.h> 20 #include <asm/cacheflush.h>
20 #include <asm/mmu_context.h> 21 #include <asm/mmu_context.h>
21 22
22 #include <linux/kvm_host.h> 23 #include <linux/kvm_host.h>
23 24
24 #include "kvm_mips_int.h" 25 #include "kvm_mips_int.h"
25 #include "kvm_mips_comm.h" 26 #include "kvm_mips_comm.h"
26 27
27 #define CREATE_TRACE_POINTS 28 #define CREATE_TRACE_POINTS
28 #include "trace.h" 29 #include "trace.h"
29 30
30 #ifndef VECTORSPACING 31 #ifndef VECTORSPACING
31 #define VECTORSPACING 0x100 /* for EI/VI mode */ 32 #define VECTORSPACING 0x100 /* for EI/VI mode */
32 #endif 33 #endif
33 34
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 35 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35 struct kvm_stats_debugfs_item debugfs_entries[] = { 36 struct kvm_stats_debugfs_item debugfs_entries[] = {
36 { "wait", VCPU_STAT(wait_exits) }, 37 { "wait", VCPU_STAT(wait_exits) },
37 { "cache", VCPU_STAT(cache_exits) }, 38 { "cache", VCPU_STAT(cache_exits) },
38 { "signal", VCPU_STAT(signal_exits) }, 39 { "signal", VCPU_STAT(signal_exits) },
39 { "interrupt", VCPU_STAT(int_exits) }, 40 { "interrupt", VCPU_STAT(int_exits) },
40 { "cop_unsuable", VCPU_STAT(cop_unusable_exits) }, 41 { "cop_unsuable", VCPU_STAT(cop_unusable_exits) },
41 { "tlbmod", VCPU_STAT(tlbmod_exits) }, 42 { "tlbmod", VCPU_STAT(tlbmod_exits) },
42 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) }, 43 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) },
43 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) }, 44 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) },
44 { "addrerr_st", VCPU_STAT(addrerr_st_exits) }, 45 { "addrerr_st", VCPU_STAT(addrerr_st_exits) },
45 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) }, 46 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) },
46 { "syscall", VCPU_STAT(syscall_exits) }, 47 { "syscall", VCPU_STAT(syscall_exits) },
47 { "resvd_inst", VCPU_STAT(resvd_inst_exits) }, 48 { "resvd_inst", VCPU_STAT(resvd_inst_exits) },
48 { "break_inst", VCPU_STAT(break_inst_exits) }, 49 { "break_inst", VCPU_STAT(break_inst_exits) },
49 { "flush_dcache", VCPU_STAT(flush_dcache_exits) }, 50 { "flush_dcache", VCPU_STAT(flush_dcache_exits) },
50 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 51 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
51 {NULL} 52 {NULL}
52 }; 53 };
53 54
54 static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu) 55 static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
55 { 56 {
56 int i; 57 int i;
57 for_each_possible_cpu(i) { 58 for_each_possible_cpu(i) {
58 vcpu->arch.guest_kernel_asid[i] = 0; 59 vcpu->arch.guest_kernel_asid[i] = 0;
59 vcpu->arch.guest_user_asid[i] = 0; 60 vcpu->arch.guest_user_asid[i] = 0;
60 } 61 }
61 return 0; 62 return 0;
62 } 63 }
63 64
64 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) 65 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
65 { 66 {
66 return gfn; 67 return gfn;
67 } 68 }
68 69
69 /* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we 70 /* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we
70 * are "runnable" if interrupts are pending 71 * are "runnable" if interrupts are pending
71 */ 72 */
72 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 73 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
73 { 74 {
74 return !!(vcpu->arch.pending_exceptions); 75 return !!(vcpu->arch.pending_exceptions);
75 } 76 }
76 77
77 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 78 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
78 { 79 {
79 return 1; 80 return 1;
80 } 81 }
81 82
82 int kvm_arch_hardware_enable(void *garbage) 83 int kvm_arch_hardware_enable(void *garbage)
83 { 84 {
84 return 0; 85 return 0;
85 } 86 }
86 87
87 void kvm_arch_hardware_disable(void *garbage) 88 void kvm_arch_hardware_disable(void *garbage)
88 { 89 {
89 } 90 }
90 91
91 int kvm_arch_hardware_setup(void) 92 int kvm_arch_hardware_setup(void)
92 { 93 {
93 return 0; 94 return 0;
94 } 95 }
95 96
96 void kvm_arch_hardware_unsetup(void) 97 void kvm_arch_hardware_unsetup(void)
97 { 98 {
98 } 99 }
99 100
100 void kvm_arch_check_processor_compat(void *rtn) 101 void kvm_arch_check_processor_compat(void *rtn)
101 { 102 {
102 int *r = (int *)rtn; 103 int *r = (int *)rtn;
103 *r = 0; 104 *r = 0;
104 return; 105 return;
105 } 106 }
106 107
107 static void kvm_mips_init_tlbs(struct kvm *kvm) 108 static void kvm_mips_init_tlbs(struct kvm *kvm)
108 { 109 {
109 unsigned long wired; 110 unsigned long wired;
110 111
111 /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */ 112 /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */
112 wired = read_c0_wired(); 113 wired = read_c0_wired();
113 write_c0_wired(wired + 1); 114 write_c0_wired(wired + 1);
114 mtc0_tlbw_hazard(); 115 mtc0_tlbw_hazard();
115 kvm->arch.commpage_tlb = wired; 116 kvm->arch.commpage_tlb = wired;
116 117
117 kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(), 118 kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
118 kvm->arch.commpage_tlb); 119 kvm->arch.commpage_tlb);
119 } 120 }
120 121
121 static void kvm_mips_init_vm_percpu(void *arg) 122 static void kvm_mips_init_vm_percpu(void *arg)
122 { 123 {
123 struct kvm *kvm = (struct kvm *)arg; 124 struct kvm *kvm = (struct kvm *)arg;
124 125
125 kvm_mips_init_tlbs(kvm); 126 kvm_mips_init_tlbs(kvm);
126 kvm_mips_callbacks->vm_init(kvm); 127 kvm_mips_callbacks->vm_init(kvm);
127 128
128 } 129 }
129 130
130 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 131 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
131 { 132 {
132 if (atomic_inc_return(&kvm_mips_instance) == 1) { 133 if (atomic_inc_return(&kvm_mips_instance) == 1) {
133 kvm_info("%s: 1st KVM instance, setup host TLB parameters\n", 134 kvm_info("%s: 1st KVM instance, setup host TLB parameters\n",
134 __func__); 135 __func__);
135 on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1); 136 on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
136 } 137 }
137 138
138 139
139 return 0; 140 return 0;
140 } 141 }
141 142
142 void kvm_mips_free_vcpus(struct kvm *kvm) 143 void kvm_mips_free_vcpus(struct kvm *kvm)
143 { 144 {
144 unsigned int i; 145 unsigned int i;
145 struct kvm_vcpu *vcpu; 146 struct kvm_vcpu *vcpu;
146 147
147 /* Put the pages we reserved for the guest pmap */ 148 /* Put the pages we reserved for the guest pmap */
148 for (i = 0; i < kvm->arch.guest_pmap_npages; i++) { 149 for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
149 if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE) 150 if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
150 kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]); 151 kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
151 } 152 }
152 kfree(kvm->arch.guest_pmap); 153 kfree(kvm->arch.guest_pmap);
153 154
154 kvm_for_each_vcpu(i, vcpu, kvm) { 155 kvm_for_each_vcpu(i, vcpu, kvm) {
155 kvm_arch_vcpu_free(vcpu); 156 kvm_arch_vcpu_free(vcpu);
156 } 157 }
157 158
158 mutex_lock(&kvm->lock); 159 mutex_lock(&kvm->lock);
159 160
160 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 161 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
161 kvm->vcpus[i] = NULL; 162 kvm->vcpus[i] = NULL;
162 163
163 atomic_set(&kvm->online_vcpus, 0); 164 atomic_set(&kvm->online_vcpus, 0);
164 165
165 mutex_unlock(&kvm->lock); 166 mutex_unlock(&kvm->lock);
166 } 167 }
167 168
168 void kvm_arch_sync_events(struct kvm *kvm) 169 void kvm_arch_sync_events(struct kvm *kvm)
169 { 170 {
170 } 171 }
171 172
172 static void kvm_mips_uninit_tlbs(void *arg) 173 static void kvm_mips_uninit_tlbs(void *arg)
173 { 174 {
174 /* Restore wired count */ 175 /* Restore wired count */
175 write_c0_wired(0); 176 write_c0_wired(0);
176 mtc0_tlbw_hazard(); 177 mtc0_tlbw_hazard();
177 /* Clear out all the TLBs */ 178 /* Clear out all the TLBs */
178 kvm_local_flush_tlb_all(); 179 kvm_local_flush_tlb_all();
179 } 180 }
180 181
181 void kvm_arch_destroy_vm(struct kvm *kvm) 182 void kvm_arch_destroy_vm(struct kvm *kvm)
182 { 183 {
183 kvm_mips_free_vcpus(kvm); 184 kvm_mips_free_vcpus(kvm);
184 185
185 /* If this is the last instance, restore wired count */ 186 /* If this is the last instance, restore wired count */
186 if (atomic_dec_return(&kvm_mips_instance) == 0) { 187 if (atomic_dec_return(&kvm_mips_instance) == 0) {
187 kvm_info("%s: last KVM instance, restoring TLB parameters\n", 188 kvm_info("%s: last KVM instance, restoring TLB parameters\n",
188 __func__); 189 __func__);
189 on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1); 190 on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
190 } 191 }
191 } 192 }
192 193
193 long 194 long
194 kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) 195 kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
195 { 196 {
196 return -ENOIOCTLCMD; 197 return -ENOIOCTLCMD;
197 } 198 }
198 199
199 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 200 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
200 struct kvm_memory_slot *dont) 201 struct kvm_memory_slot *dont)
201 { 202 {
202 } 203 }
203 204
204 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 205 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
205 unsigned long npages) 206 unsigned long npages)
206 { 207 {
207 return 0; 208 return 0;
208 } 209 }
209 210
210 void kvm_arch_memslots_updated(struct kvm *kvm) 211 void kvm_arch_memslots_updated(struct kvm *kvm)
211 { 212 {
212 } 213 }
213 214
214 int kvm_arch_prepare_memory_region(struct kvm *kvm, 215 int kvm_arch_prepare_memory_region(struct kvm *kvm,
215 struct kvm_memory_slot *memslot, 216 struct kvm_memory_slot *memslot,
216 struct kvm_userspace_memory_region *mem, 217 struct kvm_userspace_memory_region *mem,
217 enum kvm_mr_change change) 218 enum kvm_mr_change change)
218 { 219 {
219 return 0; 220 return 0;
220 } 221 }
221 222
222 void kvm_arch_commit_memory_region(struct kvm *kvm, 223 void kvm_arch_commit_memory_region(struct kvm *kvm,
223 struct kvm_userspace_memory_region *mem, 224 struct kvm_userspace_memory_region *mem,
224 const struct kvm_memory_slot *old, 225 const struct kvm_memory_slot *old,
225 enum kvm_mr_change change) 226 enum kvm_mr_change change)
226 { 227 {
227 unsigned long npages = 0; 228 unsigned long npages = 0;
228 int i, err = 0; 229 int i, err = 0;
229 230
230 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n", 231 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
231 __func__, kvm, mem->slot, mem->guest_phys_addr, 232 __func__, kvm, mem->slot, mem->guest_phys_addr,
232 mem->memory_size, mem->userspace_addr); 233 mem->memory_size, mem->userspace_addr);
233 234
234 /* Setup Guest PMAP table */ 235 /* Setup Guest PMAP table */
235 if (!kvm->arch.guest_pmap) { 236 if (!kvm->arch.guest_pmap) {
236 if (mem->slot == 0) 237 if (mem->slot == 0)
237 npages = mem->memory_size >> PAGE_SHIFT; 238 npages = mem->memory_size >> PAGE_SHIFT;
238 239
239 if (npages) { 240 if (npages) {
240 kvm->arch.guest_pmap_npages = npages; 241 kvm->arch.guest_pmap_npages = npages;
241 kvm->arch.guest_pmap = 242 kvm->arch.guest_pmap =
242 kzalloc(npages * sizeof(unsigned long), GFP_KERNEL); 243 kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
243 244
244 if (!kvm->arch.guest_pmap) { 245 if (!kvm->arch.guest_pmap) {
245 kvm_err("Failed to allocate guest PMAP"); 246 kvm_err("Failed to allocate guest PMAP");
246 err = -ENOMEM; 247 err = -ENOMEM;
247 goto out; 248 goto out;
248 } 249 }
249 250
250 kvm_info 251 kvm_info
251 ("Allocated space for Guest PMAP Table (%ld pages) @ %p\n", 252 ("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
252 npages, kvm->arch.guest_pmap); 253 npages, kvm->arch.guest_pmap);
253 254
254 /* Now setup the page table */ 255 /* Now setup the page table */
255 for (i = 0; i < npages; i++) { 256 for (i = 0; i < npages; i++) {
256 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE; 257 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
257 } 258 }
258 } 259 }
259 } 260 }
260 out: 261 out:
261 return; 262 return;
262 } 263 }
263 264
264 void kvm_arch_flush_shadow_all(struct kvm *kvm) 265 void kvm_arch_flush_shadow_all(struct kvm *kvm)
265 { 266 {
266 } 267 }
267 268
268 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 269 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
269 struct kvm_memory_slot *slot) 270 struct kvm_memory_slot *slot)
270 { 271 {
271 } 272 }
272 273
273 void kvm_arch_flush_shadow(struct kvm *kvm) 274 void kvm_arch_flush_shadow(struct kvm *kvm)
274 { 275 {
275 } 276 }
276 277
277 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 278 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
278 { 279 {
279 extern char mips32_exception[], mips32_exceptionEnd[]; 280 extern char mips32_exception[], mips32_exceptionEnd[];
280 extern char mips32_GuestException[], mips32_GuestExceptionEnd[]; 281 extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
281 int err, size, offset; 282 int err, size, offset;
282 void *gebase; 283 void *gebase;
283 int i; 284 int i;
284 285
285 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); 286 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
286 287
287 if (!vcpu) { 288 if (!vcpu) {
288 err = -ENOMEM; 289 err = -ENOMEM;
289 goto out; 290 goto out;
290 } 291 }
291 292
292 err = kvm_vcpu_init(vcpu, kvm, id); 293 err = kvm_vcpu_init(vcpu, kvm, id);
293 294
294 if (err) 295 if (err)
295 goto out_free_cpu; 296 goto out_free_cpu;
296 297
297 kvm_info("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu); 298 kvm_info("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
298 299
299 /* Allocate space for host mode exception handlers that handle 300 /* Allocate space for host mode exception handlers that handle
300 * guest mode exits 301 * guest mode exits
301 */ 302 */
302 if (cpu_has_veic || cpu_has_vint) { 303 if (cpu_has_veic || cpu_has_vint) {
303 size = 0x200 + VECTORSPACING * 64; 304 size = 0x200 + VECTORSPACING * 64;
304 } else { 305 } else {
305 size = 0x4000; 306 size = 0x4000;
306 } 307 }
307 308
308 /* Save Linux EBASE */ 309 /* Save Linux EBASE */
309 vcpu->arch.host_ebase = (void *)read_c0_ebase(); 310 vcpu->arch.host_ebase = (void *)read_c0_ebase();
310 311
311 gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL); 312 gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
312 313
313 if (!gebase) { 314 if (!gebase) {
314 err = -ENOMEM; 315 err = -ENOMEM;
315 goto out_free_cpu; 316 goto out_free_cpu;
316 } 317 }
317 kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n", 318 kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n",
318 ALIGN(size, PAGE_SIZE), gebase); 319 ALIGN(size, PAGE_SIZE), gebase);
319 320
320 /* Save new ebase */ 321 /* Save new ebase */
321 vcpu->arch.guest_ebase = gebase; 322 vcpu->arch.guest_ebase = gebase;
322 323
323 /* Copy L1 Guest Exception handler to correct offset */ 324 /* Copy L1 Guest Exception handler to correct offset */
324 325
325 /* TLB Refill, EXL = 0 */ 326 /* TLB Refill, EXL = 0 */
326 memcpy(gebase, mips32_exception, 327 memcpy(gebase, mips32_exception,
327 mips32_exceptionEnd - mips32_exception); 328 mips32_exceptionEnd - mips32_exception);
328 329
329 /* General Exception Entry point */ 330 /* General Exception Entry point */
330 memcpy(gebase + 0x180, mips32_exception, 331 memcpy(gebase + 0x180, mips32_exception,
331 mips32_exceptionEnd - mips32_exception); 332 mips32_exceptionEnd - mips32_exception);
332 333
333 /* For vectored interrupts poke the exception code @ all offsets 0-7 */ 334 /* For vectored interrupts poke the exception code @ all offsets 0-7 */
334 for (i = 0; i < 8; i++) { 335 for (i = 0; i < 8; i++) {
335 kvm_debug("L1 Vectored handler @ %p\n", 336 kvm_debug("L1 Vectored handler @ %p\n",
336 gebase + 0x200 + (i * VECTORSPACING)); 337 gebase + 0x200 + (i * VECTORSPACING));
337 memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception, 338 memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
338 mips32_exceptionEnd - mips32_exception); 339 mips32_exceptionEnd - mips32_exception);
339 } 340 }
340 341
341 /* General handler, relocate to unmapped space for sanity's sake */ 342 /* General handler, relocate to unmapped space for sanity's sake */
342 offset = 0x2000; 343 offset = 0x2000;
343 kvm_info("Installing KVM Exception handlers @ %p, %#x bytes\n", 344 kvm_info("Installing KVM Exception handlers @ %p, %#x bytes\n",
344 gebase + offset, 345 gebase + offset,
345 mips32_GuestExceptionEnd - mips32_GuestException); 346 mips32_GuestExceptionEnd - mips32_GuestException);
346 347
347 memcpy(gebase + offset, mips32_GuestException, 348 memcpy(gebase + offset, mips32_GuestException,
348 mips32_GuestExceptionEnd - mips32_GuestException); 349 mips32_GuestExceptionEnd - mips32_GuestException);
349 350
350 /* Invalidate the icache for these ranges */ 351 /* Invalidate the icache for these ranges */
351 mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE)); 352 mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE));
352 353
353 /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */ 354 /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */
354 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); 355 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
355 356
356 if (!vcpu->arch.kseg0_commpage) { 357 if (!vcpu->arch.kseg0_commpage) {
357 err = -ENOMEM; 358 err = -ENOMEM;
358 goto out_free_gebase; 359 goto out_free_gebase;
359 } 360 }
360 361
361 kvm_info("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage); 362 kvm_info("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
362 kvm_mips_commpage_init(vcpu); 363 kvm_mips_commpage_init(vcpu);
363 364
364 /* Init */ 365 /* Init */
365 vcpu->arch.last_sched_cpu = -1; 366 vcpu->arch.last_sched_cpu = -1;
366 367
367 /* Start off the timer */ 368 /* Start off the timer */
368 kvm_mips_emulate_count(vcpu); 369 kvm_mips_emulate_count(vcpu);
369 370
370 return vcpu; 371 return vcpu;
371 372
372 out_free_gebase: 373 out_free_gebase:
373 kfree(gebase); 374 kfree(gebase);
374 375
375 out_free_cpu: 376 out_free_cpu:
376 kfree(vcpu); 377 kfree(vcpu);
377 378
378 out: 379 out:
379 return ERR_PTR(err); 380 return ERR_PTR(err);
380 } 381 }
381 382
382 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 383 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
383 { 384 {
384 hrtimer_cancel(&vcpu->arch.comparecount_timer); 385 hrtimer_cancel(&vcpu->arch.comparecount_timer);
385 386
386 kvm_vcpu_uninit(vcpu); 387 kvm_vcpu_uninit(vcpu);
387 388
388 kvm_mips_dump_stats(vcpu); 389 kvm_mips_dump_stats(vcpu);
389 390
390 kfree(vcpu->arch.guest_ebase); 391 kfree(vcpu->arch.guest_ebase);
391 kfree(vcpu->arch.kseg0_commpage); 392 kfree(vcpu->arch.kseg0_commpage);
392 kfree(vcpu); 393 kfree(vcpu);
393 } 394 }
394 395
395 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 396 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
396 { 397 {
397 kvm_arch_vcpu_free(vcpu); 398 kvm_arch_vcpu_free(vcpu);
398 } 399 }
399 400
400 int 401 int
401 kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 402 kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
402 struct kvm_guest_debug *dbg) 403 struct kvm_guest_debug *dbg)
403 { 404 {
404 return -ENOIOCTLCMD; 405 return -ENOIOCTLCMD;
405 } 406 }
406 407
407 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 408 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
408 { 409 {
409 int r = 0; 410 int r = 0;
410 sigset_t sigsaved; 411 sigset_t sigsaved;
411 412
412 if (vcpu->sigset_active) 413 if (vcpu->sigset_active)
413 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 414 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
414 415
415 if (vcpu->mmio_needed) { 416 if (vcpu->mmio_needed) {
416 if (!vcpu->mmio_is_write) 417 if (!vcpu->mmio_is_write)
417 kvm_mips_complete_mmio_load(vcpu, run); 418 kvm_mips_complete_mmio_load(vcpu, run);
418 vcpu->mmio_needed = 0; 419 vcpu->mmio_needed = 0;
419 } 420 }
420 421
422 lose_fpu(1);
423
421 local_irq_disable(); 424 local_irq_disable();
422 /* Check if we have any exceptions/interrupts pending */ 425 /* Check if we have any exceptions/interrupts pending */
423 kvm_mips_deliver_interrupts(vcpu, 426 kvm_mips_deliver_interrupts(vcpu,
424 kvm_read_c0_guest_cause(vcpu->arch.cop0)); 427 kvm_read_c0_guest_cause(vcpu->arch.cop0));
425 428
426 kvm_guest_enter(); 429 kvm_guest_enter();
427 430
428 r = __kvm_mips_vcpu_run(run, vcpu); 431 r = __kvm_mips_vcpu_run(run, vcpu);
429 432
430 kvm_guest_exit(); 433 kvm_guest_exit();
431 local_irq_enable(); 434 local_irq_enable();
432 435
433 if (vcpu->sigset_active) 436 if (vcpu->sigset_active)
434 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 437 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
435 438
436 return r; 439 return r;
437 } 440 }
438 441
439 int 442 int
440 kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) 443 kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
441 { 444 {
442 int intr = (int)irq->irq; 445 int intr = (int)irq->irq;
443 struct kvm_vcpu *dvcpu = NULL; 446 struct kvm_vcpu *dvcpu = NULL;
444 447
445 if (intr == 3 || intr == -3 || intr == 4 || intr == -4) 448 if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
446 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu, 449 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
447 (int)intr); 450 (int)intr);
448 451
449 if (irq->cpu == -1) 452 if (irq->cpu == -1)
450 dvcpu = vcpu; 453 dvcpu = vcpu;
451 else 454 else
452 dvcpu = vcpu->kvm->vcpus[irq->cpu]; 455 dvcpu = vcpu->kvm->vcpus[irq->cpu];
453 456
454 if (intr == 2 || intr == 3 || intr == 4) { 457 if (intr == 2 || intr == 3 || intr == 4) {
455 kvm_mips_callbacks->queue_io_int(dvcpu, irq); 458 kvm_mips_callbacks->queue_io_int(dvcpu, irq);
456 459
457 } else if (intr == -2 || intr == -3 || intr == -4) { 460 } else if (intr == -2 || intr == -3 || intr == -4) {
458 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq); 461 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
459 } else { 462 } else {
460 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__, 463 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
461 irq->cpu, irq->irq); 464 irq->cpu, irq->irq);
462 return -EINVAL; 465 return -EINVAL;
463 } 466 }
464 467
465 dvcpu->arch.wait = 0; 468 dvcpu->arch.wait = 0;
466 469
467 if (waitqueue_active(&dvcpu->wq)) { 470 if (waitqueue_active(&dvcpu->wq)) {
468 wake_up_interruptible(&dvcpu->wq); 471 wake_up_interruptible(&dvcpu->wq);
469 } 472 }
470 473
471 return 0; 474 return 0;
472 } 475 }
473 476
474 int 477 int
475 kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 478 kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
476 struct kvm_mp_state *mp_state) 479 struct kvm_mp_state *mp_state)
477 { 480 {
478 return -ENOIOCTLCMD; 481 return -ENOIOCTLCMD;
479 } 482 }
480 483
481 int 484 int
482 kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 485 kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
483 struct kvm_mp_state *mp_state) 486 struct kvm_mp_state *mp_state)
484 { 487 {
485 return -ENOIOCTLCMD; 488 return -ENOIOCTLCMD;
486 } 489 }
487 490
488 #define MIPS_CP0_32(_R, _S) \ 491 #define MIPS_CP0_32(_R, _S) \
489 (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S))) 492 (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S)))
490 493
491 #define MIPS_CP0_64(_R, _S) \ 494 #define MIPS_CP0_64(_R, _S) \
492 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S))) 495 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S)))
493 496
494 #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) 497 #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
495 #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0) 498 #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
496 #define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0) 499 #define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
497 #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0) 500 #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
498 #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2) 501 #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
499 #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0) 502 #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
500 #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1) 503 #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
501 #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0) 504 #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
502 #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0) 505 #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
503 #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0) 506 #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
504 #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0) 507 #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
505 #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) 508 #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
506 #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) 509 #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
507 #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) 510 #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
508 #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) 511 #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
509 #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1) 512 #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
510 #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) 513 #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
511 #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) 514 #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
512 #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) 515 #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
513 #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) 516 #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
514 #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7) 517 #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
515 #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) 518 #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
516 #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) 519 #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
517 520
518 static u64 kvm_mips_get_one_regs[] = { 521 static u64 kvm_mips_get_one_regs[] = {
519 KVM_REG_MIPS_R0, 522 KVM_REG_MIPS_R0,
520 KVM_REG_MIPS_R1, 523 KVM_REG_MIPS_R1,
521 KVM_REG_MIPS_R2, 524 KVM_REG_MIPS_R2,
522 KVM_REG_MIPS_R3, 525 KVM_REG_MIPS_R3,
523 KVM_REG_MIPS_R4, 526 KVM_REG_MIPS_R4,
524 KVM_REG_MIPS_R5, 527 KVM_REG_MIPS_R5,
525 KVM_REG_MIPS_R6, 528 KVM_REG_MIPS_R6,
526 KVM_REG_MIPS_R7, 529 KVM_REG_MIPS_R7,
527 KVM_REG_MIPS_R8, 530 KVM_REG_MIPS_R8,
528 KVM_REG_MIPS_R9, 531 KVM_REG_MIPS_R9,
529 KVM_REG_MIPS_R10, 532 KVM_REG_MIPS_R10,
530 KVM_REG_MIPS_R11, 533 KVM_REG_MIPS_R11,
531 KVM_REG_MIPS_R12, 534 KVM_REG_MIPS_R12,
532 KVM_REG_MIPS_R13, 535 KVM_REG_MIPS_R13,
533 KVM_REG_MIPS_R14, 536 KVM_REG_MIPS_R14,
534 KVM_REG_MIPS_R15, 537 KVM_REG_MIPS_R15,
535 KVM_REG_MIPS_R16, 538 KVM_REG_MIPS_R16,
536 KVM_REG_MIPS_R17, 539 KVM_REG_MIPS_R17,
537 KVM_REG_MIPS_R18, 540 KVM_REG_MIPS_R18,
538 KVM_REG_MIPS_R19, 541 KVM_REG_MIPS_R19,
539 KVM_REG_MIPS_R20, 542 KVM_REG_MIPS_R20,
540 KVM_REG_MIPS_R21, 543 KVM_REG_MIPS_R21,
541 KVM_REG_MIPS_R22, 544 KVM_REG_MIPS_R22,
542 KVM_REG_MIPS_R23, 545 KVM_REG_MIPS_R23,
543 KVM_REG_MIPS_R24, 546 KVM_REG_MIPS_R24,
544 KVM_REG_MIPS_R25, 547 KVM_REG_MIPS_R25,
545 KVM_REG_MIPS_R26, 548 KVM_REG_MIPS_R26,
546 KVM_REG_MIPS_R27, 549 KVM_REG_MIPS_R27,
547 KVM_REG_MIPS_R28, 550 KVM_REG_MIPS_R28,
548 KVM_REG_MIPS_R29, 551 KVM_REG_MIPS_R29,
549 KVM_REG_MIPS_R30, 552 KVM_REG_MIPS_R30,
550 KVM_REG_MIPS_R31, 553 KVM_REG_MIPS_R31,
551 554
552 KVM_REG_MIPS_HI, 555 KVM_REG_MIPS_HI,
553 KVM_REG_MIPS_LO, 556 KVM_REG_MIPS_LO,
554 KVM_REG_MIPS_PC, 557 KVM_REG_MIPS_PC,
555 558
556 KVM_REG_MIPS_CP0_INDEX, 559 KVM_REG_MIPS_CP0_INDEX,
557 KVM_REG_MIPS_CP0_CONTEXT, 560 KVM_REG_MIPS_CP0_CONTEXT,
558 KVM_REG_MIPS_CP0_PAGEMASK, 561 KVM_REG_MIPS_CP0_PAGEMASK,
559 KVM_REG_MIPS_CP0_WIRED, 562 KVM_REG_MIPS_CP0_WIRED,
560 KVM_REG_MIPS_CP0_BADVADDR, 563 KVM_REG_MIPS_CP0_BADVADDR,
561 KVM_REG_MIPS_CP0_ENTRYHI, 564 KVM_REG_MIPS_CP0_ENTRYHI,
562 KVM_REG_MIPS_CP0_STATUS, 565 KVM_REG_MIPS_CP0_STATUS,
563 KVM_REG_MIPS_CP0_CAUSE, 566 KVM_REG_MIPS_CP0_CAUSE,
564 /* EPC set via kvm_regs, et al. */ 567 /* EPC set via kvm_regs, et al. */
565 KVM_REG_MIPS_CP0_CONFIG, 568 KVM_REG_MIPS_CP0_CONFIG,
566 KVM_REG_MIPS_CP0_CONFIG1, 569 KVM_REG_MIPS_CP0_CONFIG1,
567 KVM_REG_MIPS_CP0_CONFIG2, 570 KVM_REG_MIPS_CP0_CONFIG2,
568 KVM_REG_MIPS_CP0_CONFIG3, 571 KVM_REG_MIPS_CP0_CONFIG3,
569 KVM_REG_MIPS_CP0_CONFIG7, 572 KVM_REG_MIPS_CP0_CONFIG7,
570 KVM_REG_MIPS_CP0_ERROREPC 573 KVM_REG_MIPS_CP0_ERROREPC
571 }; 574 };
572 575
573 static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, 576 static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
574 const struct kvm_one_reg *reg) 577 const struct kvm_one_reg *reg)
575 { 578 {
576 struct mips_coproc *cop0 = vcpu->arch.cop0; 579 struct mips_coproc *cop0 = vcpu->arch.cop0;
577 s64 v; 580 s64 v;
578 581
579 switch (reg->id) { 582 switch (reg->id) {
580 case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31: 583 case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
581 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; 584 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
582 break; 585 break;
583 case KVM_REG_MIPS_HI: 586 case KVM_REG_MIPS_HI:
584 v = (long)vcpu->arch.hi; 587 v = (long)vcpu->arch.hi;
585 break; 588 break;
586 case KVM_REG_MIPS_LO: 589 case KVM_REG_MIPS_LO:
587 v = (long)vcpu->arch.lo; 590 v = (long)vcpu->arch.lo;
588 break; 591 break;
589 case KVM_REG_MIPS_PC: 592 case KVM_REG_MIPS_PC:
590 v = (long)vcpu->arch.pc; 593 v = (long)vcpu->arch.pc;
591 break; 594 break;
592 595
593 case KVM_REG_MIPS_CP0_INDEX: 596 case KVM_REG_MIPS_CP0_INDEX:
594 v = (long)kvm_read_c0_guest_index(cop0); 597 v = (long)kvm_read_c0_guest_index(cop0);
595 break; 598 break;
596 case KVM_REG_MIPS_CP0_CONTEXT: 599 case KVM_REG_MIPS_CP0_CONTEXT:
597 v = (long)kvm_read_c0_guest_context(cop0); 600 v = (long)kvm_read_c0_guest_context(cop0);
598 break; 601 break;
599 case KVM_REG_MIPS_CP0_PAGEMASK: 602 case KVM_REG_MIPS_CP0_PAGEMASK:
600 v = (long)kvm_read_c0_guest_pagemask(cop0); 603 v = (long)kvm_read_c0_guest_pagemask(cop0);
601 break; 604 break;
602 case KVM_REG_MIPS_CP0_WIRED: 605 case KVM_REG_MIPS_CP0_WIRED:
603 v = (long)kvm_read_c0_guest_wired(cop0); 606 v = (long)kvm_read_c0_guest_wired(cop0);
604 break; 607 break;
605 case KVM_REG_MIPS_CP0_BADVADDR: 608 case KVM_REG_MIPS_CP0_BADVADDR:
606 v = (long)kvm_read_c0_guest_badvaddr(cop0); 609 v = (long)kvm_read_c0_guest_badvaddr(cop0);
607 break; 610 break;
608 case KVM_REG_MIPS_CP0_ENTRYHI: 611 case KVM_REG_MIPS_CP0_ENTRYHI:
609 v = (long)kvm_read_c0_guest_entryhi(cop0); 612 v = (long)kvm_read_c0_guest_entryhi(cop0);
610 break; 613 break;
611 case KVM_REG_MIPS_CP0_STATUS: 614 case KVM_REG_MIPS_CP0_STATUS:
612 v = (long)kvm_read_c0_guest_status(cop0); 615 v = (long)kvm_read_c0_guest_status(cop0);
613 break; 616 break;
614 case KVM_REG_MIPS_CP0_CAUSE: 617 case KVM_REG_MIPS_CP0_CAUSE:
615 v = (long)kvm_read_c0_guest_cause(cop0); 618 v = (long)kvm_read_c0_guest_cause(cop0);
616 break; 619 break;
617 case KVM_REG_MIPS_CP0_ERROREPC: 620 case KVM_REG_MIPS_CP0_ERROREPC:
618 v = (long)kvm_read_c0_guest_errorepc(cop0); 621 v = (long)kvm_read_c0_guest_errorepc(cop0);
619 break; 622 break;
620 case KVM_REG_MIPS_CP0_CONFIG: 623 case KVM_REG_MIPS_CP0_CONFIG:
621 v = (long)kvm_read_c0_guest_config(cop0); 624 v = (long)kvm_read_c0_guest_config(cop0);
622 break; 625 break;
623 case KVM_REG_MIPS_CP0_CONFIG1: 626 case KVM_REG_MIPS_CP0_CONFIG1:
624 v = (long)kvm_read_c0_guest_config1(cop0); 627 v = (long)kvm_read_c0_guest_config1(cop0);
625 break; 628 break;
626 case KVM_REG_MIPS_CP0_CONFIG2: 629 case KVM_REG_MIPS_CP0_CONFIG2:
627 v = (long)kvm_read_c0_guest_config2(cop0); 630 v = (long)kvm_read_c0_guest_config2(cop0);
628 break; 631 break;
629 case KVM_REG_MIPS_CP0_CONFIG3: 632 case KVM_REG_MIPS_CP0_CONFIG3:
630 v = (long)kvm_read_c0_guest_config3(cop0); 633 v = (long)kvm_read_c0_guest_config3(cop0);
631 break; 634 break;
632 case KVM_REG_MIPS_CP0_CONFIG7: 635 case KVM_REG_MIPS_CP0_CONFIG7:
633 v = (long)kvm_read_c0_guest_config7(cop0); 636 v = (long)kvm_read_c0_guest_config7(cop0);
634 break; 637 break;
635 default: 638 default:
636 return -EINVAL; 639 return -EINVAL;
637 } 640 }
638 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { 641 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
639 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; 642 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
640 return put_user(v, uaddr64); 643 return put_user(v, uaddr64);
641 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { 644 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
642 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; 645 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
643 u32 v32 = (u32)v; 646 u32 v32 = (u32)v;
644 return put_user(v32, uaddr32); 647 return put_user(v32, uaddr32);
645 } else { 648 } else {
646 return -EINVAL; 649 return -EINVAL;
647 } 650 }
648 } 651 }
649 652
650 static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, 653 static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
651 const struct kvm_one_reg *reg) 654 const struct kvm_one_reg *reg)
652 { 655 {
653 struct mips_coproc *cop0 = vcpu->arch.cop0; 656 struct mips_coproc *cop0 = vcpu->arch.cop0;
654 u64 v; 657 u64 v;
655 658
656 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { 659 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
657 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; 660 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
658 661
659 if (get_user(v, uaddr64) != 0) 662 if (get_user(v, uaddr64) != 0)
660 return -EFAULT; 663 return -EFAULT;
661 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { 664 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
662 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; 665 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
663 s32 v32; 666 s32 v32;
664 667
665 if (get_user(v32, uaddr32) != 0) 668 if (get_user(v32, uaddr32) != 0)
666 return -EFAULT; 669 return -EFAULT;
667 v = (s64)v32; 670 v = (s64)v32;
668 } else { 671 } else {
669 return -EINVAL; 672 return -EINVAL;
670 } 673 }
671 674
672 switch (reg->id) { 675 switch (reg->id) {
673 case KVM_REG_MIPS_R0: 676 case KVM_REG_MIPS_R0:
674 /* Silently ignore requests to set $0 */ 677 /* Silently ignore requests to set $0 */
675 break; 678 break;
676 case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31: 679 case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
677 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; 680 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
678 break; 681 break;
679 case KVM_REG_MIPS_HI: 682 case KVM_REG_MIPS_HI:
680 vcpu->arch.hi = v; 683 vcpu->arch.hi = v;
681 break; 684 break;
682 case KVM_REG_MIPS_LO: 685 case KVM_REG_MIPS_LO:
683 vcpu->arch.lo = v; 686 vcpu->arch.lo = v;
684 break; 687 break;
685 case KVM_REG_MIPS_PC: 688 case KVM_REG_MIPS_PC:
686 vcpu->arch.pc = v; 689 vcpu->arch.pc = v;
687 break; 690 break;
688 691
689 case KVM_REG_MIPS_CP0_INDEX: 692 case KVM_REG_MIPS_CP0_INDEX:
690 kvm_write_c0_guest_index(cop0, v); 693 kvm_write_c0_guest_index(cop0, v);
691 break; 694 break;
692 case KVM_REG_MIPS_CP0_CONTEXT: 695 case KVM_REG_MIPS_CP0_CONTEXT:
693 kvm_write_c0_guest_context(cop0, v); 696 kvm_write_c0_guest_context(cop0, v);
694 break; 697 break;
695 case KVM_REG_MIPS_CP0_PAGEMASK: 698 case KVM_REG_MIPS_CP0_PAGEMASK:
696 kvm_write_c0_guest_pagemask(cop0, v); 699 kvm_write_c0_guest_pagemask(cop0, v);
697 break; 700 break;
698 case KVM_REG_MIPS_CP0_WIRED: 701 case KVM_REG_MIPS_CP0_WIRED:
699 kvm_write_c0_guest_wired(cop0, v); 702 kvm_write_c0_guest_wired(cop0, v);
700 break; 703 break;
701 case KVM_REG_MIPS_CP0_BADVADDR: 704 case KVM_REG_MIPS_CP0_BADVADDR:
702 kvm_write_c0_guest_badvaddr(cop0, v); 705 kvm_write_c0_guest_badvaddr(cop0, v);
703 break; 706 break;
704 case KVM_REG_MIPS_CP0_ENTRYHI: 707 case KVM_REG_MIPS_CP0_ENTRYHI:
705 kvm_write_c0_guest_entryhi(cop0, v); 708 kvm_write_c0_guest_entryhi(cop0, v);
706 break; 709 break;
707 case KVM_REG_MIPS_CP0_STATUS: 710 case KVM_REG_MIPS_CP0_STATUS:
708 kvm_write_c0_guest_status(cop0, v); 711 kvm_write_c0_guest_status(cop0, v);
709 break; 712 break;
710 case KVM_REG_MIPS_CP0_CAUSE: 713 case KVM_REG_MIPS_CP0_CAUSE:
711 kvm_write_c0_guest_cause(cop0, v); 714 kvm_write_c0_guest_cause(cop0, v);
712 break; 715 break;
713 case KVM_REG_MIPS_CP0_ERROREPC: 716 case KVM_REG_MIPS_CP0_ERROREPC:
714 kvm_write_c0_guest_errorepc(cop0, v); 717 kvm_write_c0_guest_errorepc(cop0, v);
715 break; 718 break;
716 default: 719 default:
717 return -EINVAL; 720 return -EINVAL;
718 } 721 }
719 return 0; 722 return 0;
720 } 723 }
721 724
722 long 725 long
723 kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) 726 kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
724 { 727 {
725 struct kvm_vcpu *vcpu = filp->private_data; 728 struct kvm_vcpu *vcpu = filp->private_data;
726 void __user *argp = (void __user *)arg; 729 void __user *argp = (void __user *)arg;
727 long r; 730 long r;
728 731
729 switch (ioctl) { 732 switch (ioctl) {
730 case KVM_SET_ONE_REG: 733 case KVM_SET_ONE_REG:
731 case KVM_GET_ONE_REG: { 734 case KVM_GET_ONE_REG: {
732 struct kvm_one_reg reg; 735 struct kvm_one_reg reg;
733 if (copy_from_user(&reg, argp, sizeof(reg))) 736 if (copy_from_user(&reg, argp, sizeof(reg)))
734 return -EFAULT; 737 return -EFAULT;
735 if (ioctl == KVM_SET_ONE_REG) 738 if (ioctl == KVM_SET_ONE_REG)
736 return kvm_mips_set_reg(vcpu, &reg); 739 return kvm_mips_set_reg(vcpu, &reg);
737 else 740 else
738 return kvm_mips_get_reg(vcpu, &reg); 741 return kvm_mips_get_reg(vcpu, &reg);
739 } 742 }
740 case KVM_GET_REG_LIST: { 743 case KVM_GET_REG_LIST: {
741 struct kvm_reg_list __user *user_list = argp; 744 struct kvm_reg_list __user *user_list = argp;
742 u64 __user *reg_dest; 745 u64 __user *reg_dest;
743 struct kvm_reg_list reg_list; 746 struct kvm_reg_list reg_list;
744 unsigned n; 747 unsigned n;
745 748
746 if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) 749 if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
747 return -EFAULT; 750 return -EFAULT;
748 n = reg_list.n; 751 n = reg_list.n;
749 reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs); 752 reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
750 if (copy_to_user(user_list, &reg_list, sizeof(reg_list))) 753 if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
751 return -EFAULT; 754 return -EFAULT;
752 if (n < reg_list.n) 755 if (n < reg_list.n)
753 return -E2BIG; 756 return -E2BIG;
754 reg_dest = user_list->reg; 757 reg_dest = user_list->reg;
755 if (copy_to_user(reg_dest, kvm_mips_get_one_regs, 758 if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
756 sizeof(kvm_mips_get_one_regs))) 759 sizeof(kvm_mips_get_one_regs)))
757 return -EFAULT; 760 return -EFAULT;
758 return 0; 761 return 0;
759 } 762 }
760 case KVM_NMI: 763 case KVM_NMI:
761 /* Treat the NMI as a CPU reset */ 764 /* Treat the NMI as a CPU reset */
762 r = kvm_mips_reset_vcpu(vcpu); 765 r = kvm_mips_reset_vcpu(vcpu);
763 break; 766 break;
764 case KVM_INTERRUPT: 767 case KVM_INTERRUPT:
765 { 768 {
766 struct kvm_mips_interrupt irq; 769 struct kvm_mips_interrupt irq;
767 r = -EFAULT; 770 r = -EFAULT;
768 if (copy_from_user(&irq, argp, sizeof(irq))) 771 if (copy_from_user(&irq, argp, sizeof(irq)))
769 goto out; 772 goto out;
770 773
771 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, 774 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
772 irq.irq); 775 irq.irq);
773 776
774 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 777 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
775 break; 778 break;
776 } 779 }
777 default: 780 default:
778 r = -ENOIOCTLCMD; 781 r = -ENOIOCTLCMD;
779 } 782 }
780 783
781 out: 784 out:
782 return r; 785 return r;
783 } 786 }
784 787
785 /* 788 /*
786 * Get (and clear) the dirty memory log for a memory slot. 789 * Get (and clear) the dirty memory log for a memory slot.
787 */ 790 */
788 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 791 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
789 { 792 {
790 struct kvm_memory_slot *memslot; 793 struct kvm_memory_slot *memslot;
791 unsigned long ga, ga_end; 794 unsigned long ga, ga_end;
792 int is_dirty = 0; 795 int is_dirty = 0;
793 int r; 796 int r;
794 unsigned long n; 797 unsigned long n;
795 798
796 mutex_lock(&kvm->slots_lock); 799 mutex_lock(&kvm->slots_lock);
797 800
798 r = kvm_get_dirty_log(kvm, log, &is_dirty); 801 r = kvm_get_dirty_log(kvm, log, &is_dirty);
799 if (r) 802 if (r)
800 goto out; 803 goto out;
801 804
802 /* If nothing is dirty, don't bother messing with page tables. */ 805 /* If nothing is dirty, don't bother messing with page tables. */
803 if (is_dirty) { 806 if (is_dirty) {
804 memslot = &kvm->memslots->memslots[log->slot]; 807 memslot = &kvm->memslots->memslots[log->slot];
805 808
806 ga = memslot->base_gfn << PAGE_SHIFT; 809 ga = memslot->base_gfn << PAGE_SHIFT;
807 ga_end = ga + (memslot->npages << PAGE_SHIFT); 810 ga_end = ga + (memslot->npages << PAGE_SHIFT);
808 811
809 printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga, 812 printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
810 ga_end); 813 ga_end);
811 814
812 n = kvm_dirty_bitmap_bytes(memslot); 815 n = kvm_dirty_bitmap_bytes(memslot);
813 memset(memslot->dirty_bitmap, 0, n); 816 memset(memslot->dirty_bitmap, 0, n);
814 } 817 }
815 818
816 r = 0; 819 r = 0;
817 out: 820 out:
818 mutex_unlock(&kvm->slots_lock); 821 mutex_unlock(&kvm->slots_lock);
819 return r; 822 return r;
820 823
821 } 824 }
822 825
823 long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) 826 long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
824 { 827 {
825 long r; 828 long r;
826 829
827 switch (ioctl) { 830 switch (ioctl) {
828 default: 831 default:
829 r = -ENOIOCTLCMD; 832 r = -ENOIOCTLCMD;
830 } 833 }
831 834
832 return r; 835 return r;
833 } 836 }
834 837
835 int kvm_arch_init(void *opaque) 838 int kvm_arch_init(void *opaque)
836 { 839 {
837 int ret; 840 int ret;
838 841
839 if (kvm_mips_callbacks) { 842 if (kvm_mips_callbacks) {
840 kvm_err("kvm: module already exists\n"); 843 kvm_err("kvm: module already exists\n");
841 return -EEXIST; 844 return -EEXIST;
842 } 845 }
843 846
844 ret = kvm_mips_emulation_init(&kvm_mips_callbacks); 847 ret = kvm_mips_emulation_init(&kvm_mips_callbacks);
845 848
846 return ret; 849 return ret;
847 } 850 }
848 851
849 void kvm_arch_exit(void) 852 void kvm_arch_exit(void)
850 { 853 {
851 kvm_mips_callbacks = NULL; 854 kvm_mips_callbacks = NULL;
852 } 855 }
853 856
854 int 857 int
855 kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 858 kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
856 { 859 {
857 return -ENOIOCTLCMD; 860 return -ENOIOCTLCMD;
858 } 861 }
859 862
860 int 863 int
861 kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 864 kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
862 { 865 {
863 return -ENOIOCTLCMD; 866 return -ENOIOCTLCMD;
864 } 867 }
865 868
866 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 869 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
867 { 870 {
868 return 0; 871 return 0;
869 } 872 }
870 873
871 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 874 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
872 { 875 {
873 return -ENOIOCTLCMD; 876 return -ENOIOCTLCMD;
874 } 877 }
875 878
876 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 879 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
877 { 880 {
878 return -ENOIOCTLCMD; 881 return -ENOIOCTLCMD;
879 } 882 }
880 883
881 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 884 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
882 { 885 {
883 return VM_FAULT_SIGBUS; 886 return VM_FAULT_SIGBUS;
884 } 887 }
885 888
886 int kvm_dev_ioctl_check_extension(long ext) 889 int kvm_dev_ioctl_check_extension(long ext)
887 { 890 {
888 int r; 891 int r;
889 892
890 switch (ext) { 893 switch (ext) {
891 case KVM_CAP_ONE_REG: 894 case KVM_CAP_ONE_REG:
892 r = 1; 895 r = 1;
893 break; 896 break;
894 case KVM_CAP_COALESCED_MMIO: 897 case KVM_CAP_COALESCED_MMIO:
895 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 898 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
896 break; 899 break;
897 default: 900 default:
898 r = 0; 901 r = 0;
899 break; 902 break;
900 } 903 }
901 return r; 904 return r;
902 } 905 }
903 906
904 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 907 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
905 { 908 {
906 return kvm_mips_pending_timer(vcpu); 909 return kvm_mips_pending_timer(vcpu);
907 } 910 }
908 911
909 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) 912 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
910 { 913 {
911 int i; 914 int i;
912 struct mips_coproc *cop0; 915 struct mips_coproc *cop0;
913 916
914 if (!vcpu) 917 if (!vcpu)
915 return -1; 918 return -1;
916 919
917 printk("VCPU Register Dump:\n"); 920 printk("VCPU Register Dump:\n");
918 printk("\tpc = 0x%08lx\n", vcpu->arch.pc);; 921 printk("\tpc = 0x%08lx\n", vcpu->arch.pc);;
919 printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); 922 printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
920 923
921 for (i = 0; i < 32; i += 4) { 924 for (i = 0; i < 32; i += 4) {
922 printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i, 925 printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
923 vcpu->arch.gprs[i], 926 vcpu->arch.gprs[i],
924 vcpu->arch.gprs[i + 1], 927 vcpu->arch.gprs[i + 1],
925 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); 928 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
926 } 929 }
927 printk("\thi: 0x%08lx\n", vcpu->arch.hi); 930 printk("\thi: 0x%08lx\n", vcpu->arch.hi);
928 printk("\tlo: 0x%08lx\n", vcpu->arch.lo); 931 printk("\tlo: 0x%08lx\n", vcpu->arch.lo);
929 932
930 cop0 = vcpu->arch.cop0; 933 cop0 = vcpu->arch.cop0;
931 printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n", 934 printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
932 kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0)); 935 kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0));
933 936
934 printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0)); 937 printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
935 938
936 return 0; 939 return 0;
937 } 940 }
938 941
939 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 942 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
940 { 943 {
941 int i; 944 int i;
942 945
943 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) 946 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
944 vcpu->arch.gprs[i] = regs->gpr[i]; 947 vcpu->arch.gprs[i] = regs->gpr[i];
945 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ 948 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
946 vcpu->arch.hi = regs->hi; 949 vcpu->arch.hi = regs->hi;
947 vcpu->arch.lo = regs->lo; 950 vcpu->arch.lo = regs->lo;
948 vcpu->arch.pc = regs->pc; 951 vcpu->arch.pc = regs->pc;
949 952
950 return 0; 953 return 0;
951 } 954 }
952 955
953 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 956 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
954 { 957 {
955 int i; 958 int i;
956 959
957 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) 960 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
958 regs->gpr[i] = vcpu->arch.gprs[i]; 961 regs->gpr[i] = vcpu->arch.gprs[i];
959 962
960 regs->hi = vcpu->arch.hi; 963 regs->hi = vcpu->arch.hi;
961 regs->lo = vcpu->arch.lo; 964 regs->lo = vcpu->arch.lo;
962 regs->pc = vcpu->arch.pc; 965 regs->pc = vcpu->arch.pc;
963 966
964 return 0; 967 return 0;
965 } 968 }
966 969
967 void kvm_mips_comparecount_func(unsigned long data) 970 void kvm_mips_comparecount_func(unsigned long data)
968 { 971 {
969 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; 972 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
970 973
971 kvm_mips_callbacks->queue_timer_int(vcpu); 974 kvm_mips_callbacks->queue_timer_int(vcpu);
972 975
973 vcpu->arch.wait = 0; 976 vcpu->arch.wait = 0;
974 if (waitqueue_active(&vcpu->wq)) { 977 if (waitqueue_active(&vcpu->wq)) {
975 wake_up_interruptible(&vcpu->wq); 978 wake_up_interruptible(&vcpu->wq);
976 } 979 }
977 } 980 }
978 981
979 /* 982 /*
980 * low level hrtimer wake routine. 983 * low level hrtimer wake routine.
981 */ 984 */
982 enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) 985 enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
983 { 986 {
984 struct kvm_vcpu *vcpu; 987 struct kvm_vcpu *vcpu;
985 988
986 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer); 989 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
987 kvm_mips_comparecount_func((unsigned long) vcpu); 990 kvm_mips_comparecount_func((unsigned long) vcpu);
988 hrtimer_forward_now(&vcpu->arch.comparecount_timer, 991 hrtimer_forward_now(&vcpu->arch.comparecount_timer,
989 ktime_set(0, MS_TO_NS(10))); 992 ktime_set(0, MS_TO_NS(10)));
990 return HRTIMER_RESTART; 993 return HRTIMER_RESTART;
991 } 994 }
992 995
993 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 996 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
994 { 997 {
995 kvm_mips_callbacks->vcpu_init(vcpu); 998 kvm_mips_callbacks->vcpu_init(vcpu);
996 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, 999 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
997 HRTIMER_MODE_REL); 1000 HRTIMER_MODE_REL);
998 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; 1001 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
999 return 0; 1002 return 0;
1000 } 1003 }
1001 1004
1002 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 1005 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1003 { 1006 {
1004 return; 1007 return;
1005 } 1008 }
1006 1009
1007 int 1010 int
1008 kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) 1011 kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr)
1009 { 1012 {
1010 return 0; 1013 return 0;
1011 } 1014 }
1012 1015
1013 /* Initial guest state */ 1016 /* Initial guest state */
1014 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 1017 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1015 { 1018 {
1016 return kvm_mips_callbacks->vcpu_setup(vcpu); 1019 return kvm_mips_callbacks->vcpu_setup(vcpu);
1017 } 1020 }
1018 1021
1019 static 1022 static
1020 void kvm_mips_set_c0_status(void) 1023 void kvm_mips_set_c0_status(void)
1021 { 1024 {
1022 uint32_t status = read_c0_status(); 1025 uint32_t status = read_c0_status();
1023
1024 if (cpu_has_fpu)
1025 status |= (ST0_CU1);
1026 1026
1027 if (cpu_has_dsp) 1027 if (cpu_has_dsp)
1028 status |= (ST0_MX); 1028 status |= (ST0_MX);
1029 1029
1030 write_c0_status(status); 1030 write_c0_status(status);
1031 ehb(); 1031 ehb();
1032 } 1032 }
1033 1033
1034 /* 1034 /*
1035 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) 1035 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1036 */ 1036 */
1037 int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) 1037 int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1038 { 1038 {
1039 uint32_t cause = vcpu->arch.host_cp0_cause; 1039 uint32_t cause = vcpu->arch.host_cp0_cause;
1040 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 1040 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1041 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; 1041 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
1042 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; 1042 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1043 enum emulation_result er = EMULATE_DONE; 1043 enum emulation_result er = EMULATE_DONE;
1044 int ret = RESUME_GUEST; 1044 int ret = RESUME_GUEST;
1045 1045
1046 /* Set a default exit reason */ 1046 /* Set a default exit reason */
1047 run->exit_reason = KVM_EXIT_UNKNOWN; 1047 run->exit_reason = KVM_EXIT_UNKNOWN;
1048 run->ready_for_interrupt_injection = 1; 1048 run->ready_for_interrupt_injection = 1;
1049 1049
1050 /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */ 1050 /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */
1051 kvm_mips_set_c0_status(); 1051 kvm_mips_set_c0_status();
1052 1052
1053 local_irq_enable(); 1053 local_irq_enable();
1054 1054
1055 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", 1055 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1056 cause, opc, run, vcpu); 1056 cause, opc, run, vcpu);
1057 1057
1058 /* Do a privilege check, if in UM most of these exit conditions end up 1058 /* Do a privilege check, if in UM most of these exit conditions end up
1059 * causing an exception to be delivered to the Guest Kernel 1059 * causing an exception to be delivered to the Guest Kernel
1060 */ 1060 */
1061 er = kvm_mips_check_privilege(cause, opc, run, vcpu); 1061 er = kvm_mips_check_privilege(cause, opc, run, vcpu);
1062 if (er == EMULATE_PRIV_FAIL) { 1062 if (er == EMULATE_PRIV_FAIL) {
1063 goto skip_emul; 1063 goto skip_emul;
1064 } else if (er == EMULATE_FAIL) { 1064 } else if (er == EMULATE_FAIL) {
1065 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1065 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1066 ret = RESUME_HOST; 1066 ret = RESUME_HOST;
1067 goto skip_emul; 1067 goto skip_emul;
1068 } 1068 }
1069 1069
1070 switch (exccode) { 1070 switch (exccode) {
1071 case T_INT: 1071 case T_INT:
1072 kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc); 1072 kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
1073 1073
1074 ++vcpu->stat.int_exits; 1074 ++vcpu->stat.int_exits;
1075 trace_kvm_exit(vcpu, INT_EXITS); 1075 trace_kvm_exit(vcpu, INT_EXITS);
1076 1076
1077 if (need_resched()) { 1077 if (need_resched()) {
1078 cond_resched(); 1078 cond_resched();
1079 } 1079 }
1080 1080
1081 ret = RESUME_GUEST; 1081 ret = RESUME_GUEST;
1082 break; 1082 break;
1083 1083
1084 case T_COP_UNUSABLE: 1084 case T_COP_UNUSABLE:
1085 kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc); 1085 kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
1086 1086
1087 ++vcpu->stat.cop_unusable_exits; 1087 ++vcpu->stat.cop_unusable_exits;
1088 trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS); 1088 trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
1089 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); 1089 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1090 /* XXXKYMA: Might need to return to user space */ 1090 /* XXXKYMA: Might need to return to user space */
1091 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) { 1091 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) {
1092 ret = RESUME_HOST; 1092 ret = RESUME_HOST;
1093 } 1093 }
1094 break; 1094 break;
1095 1095
1096 case T_TLB_MOD: 1096 case T_TLB_MOD:
1097 ++vcpu->stat.tlbmod_exits; 1097 ++vcpu->stat.tlbmod_exits;
1098 trace_kvm_exit(vcpu, TLBMOD_EXITS); 1098 trace_kvm_exit(vcpu, TLBMOD_EXITS);
1099 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu); 1099 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1100 break; 1100 break;
1101 1101
1102 case T_TLB_ST_MISS: 1102 case T_TLB_ST_MISS:
1103 kvm_debug 1103 kvm_debug
1104 ("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n", 1104 ("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
1105 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, 1105 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1106 badvaddr); 1106 badvaddr);
1107 1107
1108 ++vcpu->stat.tlbmiss_st_exits; 1108 ++vcpu->stat.tlbmiss_st_exits;
1109 trace_kvm_exit(vcpu, TLBMISS_ST_EXITS); 1109 trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
1110 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu); 1110 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1111 break; 1111 break;
1112 1112
1113 case T_TLB_LD_MISS: 1113 case T_TLB_LD_MISS:
1114 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n", 1114 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1115 cause, opc, badvaddr); 1115 cause, opc, badvaddr);
1116 1116
1117 ++vcpu->stat.tlbmiss_ld_exits; 1117 ++vcpu->stat.tlbmiss_ld_exits;
1118 trace_kvm_exit(vcpu, TLBMISS_LD_EXITS); 1118 trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
1119 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu); 1119 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1120 break; 1120 break;
1121 1121
1122 case T_ADDR_ERR_ST: 1122 case T_ADDR_ERR_ST:
1123 ++vcpu->stat.addrerr_st_exits; 1123 ++vcpu->stat.addrerr_st_exits;
1124 trace_kvm_exit(vcpu, ADDRERR_ST_EXITS); 1124 trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
1125 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu); 1125 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1126 break; 1126 break;
1127 1127
1128 case T_ADDR_ERR_LD: 1128 case T_ADDR_ERR_LD:
1129 ++vcpu->stat.addrerr_ld_exits; 1129 ++vcpu->stat.addrerr_ld_exits;
1130 trace_kvm_exit(vcpu, ADDRERR_LD_EXITS); 1130 trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
1131 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu); 1131 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1132 break; 1132 break;
1133 1133
1134 case T_SYSCALL: 1134 case T_SYSCALL:
1135 ++vcpu->stat.syscall_exits; 1135 ++vcpu->stat.syscall_exits;
1136 trace_kvm_exit(vcpu, SYSCALL_EXITS); 1136 trace_kvm_exit(vcpu, SYSCALL_EXITS);
1137 ret = kvm_mips_callbacks->handle_syscall(vcpu); 1137 ret = kvm_mips_callbacks->handle_syscall(vcpu);
1138 break; 1138 break;
1139 1139
1140 case T_RES_INST: 1140 case T_RES_INST:
1141 ++vcpu->stat.resvd_inst_exits; 1141 ++vcpu->stat.resvd_inst_exits;
1142 trace_kvm_exit(vcpu, RESVD_INST_EXITS); 1142 trace_kvm_exit(vcpu, RESVD_INST_EXITS);
1143 ret = kvm_mips_callbacks->handle_res_inst(vcpu); 1143 ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1144 break; 1144 break;
1145 1145
1146 case T_BREAK: 1146 case T_BREAK:
1147 ++vcpu->stat.break_inst_exits; 1147 ++vcpu->stat.break_inst_exits;
1148 trace_kvm_exit(vcpu, BREAK_INST_EXITS); 1148 trace_kvm_exit(vcpu, BREAK_INST_EXITS);
1149 ret = kvm_mips_callbacks->handle_break(vcpu); 1149 ret = kvm_mips_callbacks->handle_break(vcpu);
1150 break; 1150 break;
1151 1151
1152 default: 1152 default:
1153 kvm_err 1153 kvm_err
1154 ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", 1154 ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
1155 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, 1155 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
1156 kvm_read_c0_guest_status(vcpu->arch.cop0)); 1156 kvm_read_c0_guest_status(vcpu->arch.cop0));
1157 kvm_arch_vcpu_dump_regs(vcpu); 1157 kvm_arch_vcpu_dump_regs(vcpu);
1158 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1158 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1159 ret = RESUME_HOST; 1159 ret = RESUME_HOST;
1160 break; 1160 break;
1161 1161
1162 } 1162 }
1163 1163
1164 skip_emul: 1164 skip_emul:
1165 local_irq_disable(); 1165 local_irq_disable();
1166 1166
1167 if (er == EMULATE_DONE && !(ret & RESUME_HOST)) 1167 if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1168 kvm_mips_deliver_interrupts(vcpu, cause); 1168 kvm_mips_deliver_interrupts(vcpu, cause);
1169 1169
1170 if (!(ret & RESUME_HOST)) { 1170 if (!(ret & RESUME_HOST)) {
1171 /* Only check for signals if not already exiting to userspace */ 1171 /* Only check for signals if not already exiting to userspace */
1172 if (signal_pending(current)) { 1172 if (signal_pending(current)) {
1173 run->exit_reason = KVM_EXIT_INTR; 1173 run->exit_reason = KVM_EXIT_INTR;
1174 ret = (-EINTR << 2) | RESUME_HOST; 1174 ret = (-EINTR << 2) | RESUME_HOST;
1175 ++vcpu->stat.signal_exits; 1175 ++vcpu->stat.signal_exits;
1176 trace_kvm_exit(vcpu, SIGNAL_EXITS); 1176 trace_kvm_exit(vcpu, SIGNAL_EXITS);
1177 } 1177 }
1178 } 1178 }
1179 1179
1180 return ret; 1180 return ret;
1181 } 1181 }
1182 1182
1183 int __init kvm_mips_init(void) 1183 int __init kvm_mips_init(void)
1184 { 1184 {
1185 int ret; 1185 int ret;
1186 1186
1187 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 1187 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1188 1188
1189 if (ret) 1189 if (ret)
1190 return ret; 1190 return ret;
1191 1191
1192 /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs. 1192 /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs.
1193 * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c) 1193 * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c)
1194 * to avoid the possibility of double faulting. The issue is that the TLB code 1194 * to avoid the possibility of double faulting. The issue is that the TLB code
1195 * references routines that are part of the the KVM module, 1195 * references routines that are part of the the KVM module,
1196 * which are only available once the module is loaded. 1196 * which are only available once the module is loaded.
1197 */ 1197 */
1198 kvm_mips_gfn_to_pfn = gfn_to_pfn; 1198 kvm_mips_gfn_to_pfn = gfn_to_pfn;
1199 kvm_mips_release_pfn_clean = kvm_release_pfn_clean; 1199 kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
1200 kvm_mips_is_error_pfn = is_error_pfn; 1200 kvm_mips_is_error_pfn = is_error_pfn;
1201 1201
1202 pr_info("KVM/MIPS Initialized\n"); 1202 pr_info("KVM/MIPS Initialized\n");
1203 return 0; 1203 return 0;
1204 } 1204 }
1205 1205
1206 void __exit kvm_mips_exit(void) 1206 void __exit kvm_mips_exit(void)
1207 { 1207 {
1208 kvm_exit(); 1208 kvm_exit();
1209 1209
1210 kvm_mips_gfn_to_pfn = NULL; 1210 kvm_mips_gfn_to_pfn = NULL;
1211 kvm_mips_release_pfn_clean = NULL; 1211 kvm_mips_release_pfn_clean = NULL;
1212 kvm_mips_is_error_pfn = NULL; 1212 kvm_mips_is_error_pfn = NULL;
1213 1213
1214 pr_info("KVM/MIPS unloaded\n"); 1214 pr_info("KVM/MIPS unloaded\n");
1215 } 1215 }
1216 1216
1217 module_init(kvm_mips_init); 1217 module_init(kvm_mips_init);
1218 module_exit(kvm_mips_exit); 1218 module_exit(kvm_mips_exit);