Commit a6fa60f5614352eb53fc57b23f1984fed4e71eac
Committed by
Greg Kroah-Hartman
1 parent
9e01c02f98
Exists in
smarct4x-processor-sdk-linux-03.00.00.04
and in
2 other branches
MIPS: KVM: Fix timer IRQ race when writing CP0_Compare
commit b45bacd2d048f405c7760e5cc9b60dd67708734f upstream. Writing CP0_Compare clears the timer interrupt pending bit (CP0_Cause.TI), but this wasn't being done atomically. If a timer interrupt raced with the write of the guest CP0_Compare, the timer interrupt could end up being pending even though the new CP0_Compare is nowhere near CP0_Count. We were already updating the hrtimer expiry with kvm_mips_update_hrtimer(), which used both kvm_mips_freeze_hrtimer() and kvm_mips_resume_hrtimer(). Close the race window by expanding out kvm_mips_update_hrtimer(), and clearing CP0_Cause.TI and setting CP0_Compare between the freeze and resume. Since the pending timer interrupt should not be cleared when CP0_Compare is written via the KVM user API, an ack argument is added to distinguish the source of the write. Fixes: e30492bbe95a ("MIPS: KVM: Rewrite count/compare timer emulation") Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim KrÄmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Showing 3 changed files with 29 additions and 36 deletions Inline Diff
arch/mips/include/asm/kvm_host.h
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 6 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
7 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 7 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #ifndef __MIPS_KVM_HOST_H__ | 10 | #ifndef __MIPS_KVM_HOST_H__ |
11 | #define __MIPS_KVM_HOST_H__ | 11 | #define __MIPS_KVM_HOST_H__ |
12 | 12 | ||
13 | #include <linux/mutex.h> | 13 | #include <linux/mutex.h> |
14 | #include <linux/hrtimer.h> | 14 | #include <linux/hrtimer.h> |
15 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <linux/kvm.h> | 17 | #include <linux/kvm.h> |
18 | #include <linux/kvm_types.h> | 18 | #include <linux/kvm_types.h> |
19 | #include <linux/threads.h> | 19 | #include <linux/threads.h> |
20 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
21 | 21 | ||
22 | /* MIPS KVM register ids */ | 22 | /* MIPS KVM register ids */ |
23 | #define MIPS_CP0_32(_R, _S) \ | 23 | #define MIPS_CP0_32(_R, _S) \ |
24 | (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S))) | 24 | (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S))) |
25 | 25 | ||
26 | #define MIPS_CP0_64(_R, _S) \ | 26 | #define MIPS_CP0_64(_R, _S) \ |
27 | (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S))) | 27 | (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S))) |
28 | 28 | ||
29 | #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) | 29 | #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) |
30 | #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0) | 30 | #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0) |
31 | #define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0) | 31 | #define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0) |
32 | #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0) | 32 | #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0) |
33 | #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2) | 33 | #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2) |
34 | #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0) | 34 | #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0) |
35 | #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1) | 35 | #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1) |
36 | #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0) | 36 | #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0) |
37 | #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0) | 37 | #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0) |
38 | #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0) | 38 | #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0) |
39 | #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0) | 39 | #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0) |
40 | #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) | 40 | #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) |
41 | #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) | 41 | #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) |
42 | #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) | 42 | #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) |
43 | #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) | 43 | #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) |
44 | #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0) | 44 | #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0) |
45 | #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0) | 45 | #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0) |
46 | #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1) | 46 | #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1) |
47 | #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) | 47 | #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) |
48 | #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) | 48 | #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) |
49 | #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) | 49 | #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) |
50 | #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) | 50 | #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) |
51 | #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4) | 51 | #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4) |
52 | #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5) | 52 | #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5) |
53 | #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7) | 53 | #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7) |
54 | #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) | 54 | #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) |
55 | #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) | 55 | #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) |
56 | 56 | ||
57 | 57 | ||
58 | #define KVM_MAX_VCPUS 1 | 58 | #define KVM_MAX_VCPUS 1 |
59 | #define KVM_USER_MEM_SLOTS 8 | 59 | #define KVM_USER_MEM_SLOTS 8 |
60 | /* memory slots that does not exposed to userspace */ | 60 | /* memory slots that does not exposed to userspace */ |
61 | #define KVM_PRIVATE_MEM_SLOTS 0 | 61 | #define KVM_PRIVATE_MEM_SLOTS 0 |
62 | 62 | ||
63 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | 63 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 |
64 | #define KVM_HALT_POLL_NS_DEFAULT 500000 | 64 | #define KVM_HALT_POLL_NS_DEFAULT 500000 |
65 | 65 | ||
66 | 66 | ||
67 | 67 | ||
68 | /* Special address that contains the comm page, used for reducing # of traps */ | 68 | /* Special address that contains the comm page, used for reducing # of traps */ |
69 | #define KVM_GUEST_COMMPAGE_ADDR 0x0 | 69 | #define KVM_GUEST_COMMPAGE_ADDR 0x0 |
70 | 70 | ||
71 | #define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \ | 71 | #define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \ |
72 | ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0)) | 72 | ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0)) |
73 | 73 | ||
74 | #define KVM_GUEST_KUSEG 0x00000000UL | 74 | #define KVM_GUEST_KUSEG 0x00000000UL |
75 | #define KVM_GUEST_KSEG0 0x40000000UL | 75 | #define KVM_GUEST_KSEG0 0x40000000UL |
76 | #define KVM_GUEST_KSEG23 0x60000000UL | 76 | #define KVM_GUEST_KSEG23 0x60000000UL |
77 | #define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0x60000000) | 77 | #define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0x60000000) |
78 | #define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff) | 78 | #define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff) |
79 | 79 | ||
80 | #define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) | 80 | #define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) |
81 | #define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1) | 81 | #define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1) |
82 | #define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) | 82 | #define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) |
83 | 83 | ||
84 | /* | 84 | /* |
85 | * Map an address to a certain kernel segment | 85 | * Map an address to a certain kernel segment |
86 | */ | 86 | */ |
87 | #define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) | 87 | #define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) |
88 | #define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1) | 88 | #define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1) |
89 | #define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) | 89 | #define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) |
90 | 90 | ||
91 | #define KVM_INVALID_PAGE 0xdeadbeef | 91 | #define KVM_INVALID_PAGE 0xdeadbeef |
92 | #define KVM_INVALID_INST 0xdeadbeef | 92 | #define KVM_INVALID_INST 0xdeadbeef |
93 | #define KVM_INVALID_ADDR 0xdeadbeef | 93 | #define KVM_INVALID_ADDR 0xdeadbeef |
94 | 94 | ||
95 | #define KVM_MALTA_GUEST_RTC_ADDR 0xb8000070UL | 95 | #define KVM_MALTA_GUEST_RTC_ADDR 0xb8000070UL |
96 | 96 | ||
97 | #define GUEST_TICKS_PER_JIFFY (40000000/HZ) | 97 | #define GUEST_TICKS_PER_JIFFY (40000000/HZ) |
98 | #define MS_TO_NS(x) (x * 1E6L) | 98 | #define MS_TO_NS(x) (x * 1E6L) |
99 | 99 | ||
100 | #define CAUSEB_DC 27 | 100 | #define CAUSEB_DC 27 |
101 | #define CAUSEF_DC (_ULCAST_(1) << 27) | 101 | #define CAUSEF_DC (_ULCAST_(1) << 27) |
102 | 102 | ||
103 | extern atomic_t kvm_mips_instance; | 103 | extern atomic_t kvm_mips_instance; |
104 | extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn); | 104 | extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn); |
105 | extern void (*kvm_mips_release_pfn_clean) (pfn_t pfn); | 105 | extern void (*kvm_mips_release_pfn_clean) (pfn_t pfn); |
106 | extern bool(*kvm_mips_is_error_pfn) (pfn_t pfn); | 106 | extern bool(*kvm_mips_is_error_pfn) (pfn_t pfn); |
107 | 107 | ||
108 | struct kvm_vm_stat { | 108 | struct kvm_vm_stat { |
109 | u32 remote_tlb_flush; | 109 | u32 remote_tlb_flush; |
110 | }; | 110 | }; |
111 | 111 | ||
112 | struct kvm_vcpu_stat { | 112 | struct kvm_vcpu_stat { |
113 | u32 wait_exits; | 113 | u32 wait_exits; |
114 | u32 cache_exits; | 114 | u32 cache_exits; |
115 | u32 signal_exits; | 115 | u32 signal_exits; |
116 | u32 int_exits; | 116 | u32 int_exits; |
117 | u32 cop_unusable_exits; | 117 | u32 cop_unusable_exits; |
118 | u32 tlbmod_exits; | 118 | u32 tlbmod_exits; |
119 | u32 tlbmiss_ld_exits; | 119 | u32 tlbmiss_ld_exits; |
120 | u32 tlbmiss_st_exits; | 120 | u32 tlbmiss_st_exits; |
121 | u32 addrerr_st_exits; | 121 | u32 addrerr_st_exits; |
122 | u32 addrerr_ld_exits; | 122 | u32 addrerr_ld_exits; |
123 | u32 syscall_exits; | 123 | u32 syscall_exits; |
124 | u32 resvd_inst_exits; | 124 | u32 resvd_inst_exits; |
125 | u32 break_inst_exits; | 125 | u32 break_inst_exits; |
126 | u32 trap_inst_exits; | 126 | u32 trap_inst_exits; |
127 | u32 msa_fpe_exits; | 127 | u32 msa_fpe_exits; |
128 | u32 fpe_exits; | 128 | u32 fpe_exits; |
129 | u32 msa_disabled_exits; | 129 | u32 msa_disabled_exits; |
130 | u32 flush_dcache_exits; | 130 | u32 flush_dcache_exits; |
131 | u32 halt_successful_poll; | 131 | u32 halt_successful_poll; |
132 | u32 halt_attempted_poll; | 132 | u32 halt_attempted_poll; |
133 | u32 halt_wakeup; | 133 | u32 halt_wakeup; |
134 | }; | 134 | }; |
135 | 135 | ||
136 | enum kvm_mips_exit_types { | 136 | enum kvm_mips_exit_types { |
137 | WAIT_EXITS, | 137 | WAIT_EXITS, |
138 | CACHE_EXITS, | 138 | CACHE_EXITS, |
139 | SIGNAL_EXITS, | 139 | SIGNAL_EXITS, |
140 | INT_EXITS, | 140 | INT_EXITS, |
141 | COP_UNUSABLE_EXITS, | 141 | COP_UNUSABLE_EXITS, |
142 | TLBMOD_EXITS, | 142 | TLBMOD_EXITS, |
143 | TLBMISS_LD_EXITS, | 143 | TLBMISS_LD_EXITS, |
144 | TLBMISS_ST_EXITS, | 144 | TLBMISS_ST_EXITS, |
145 | ADDRERR_ST_EXITS, | 145 | ADDRERR_ST_EXITS, |
146 | ADDRERR_LD_EXITS, | 146 | ADDRERR_LD_EXITS, |
147 | SYSCALL_EXITS, | 147 | SYSCALL_EXITS, |
148 | RESVD_INST_EXITS, | 148 | RESVD_INST_EXITS, |
149 | BREAK_INST_EXITS, | 149 | BREAK_INST_EXITS, |
150 | TRAP_INST_EXITS, | 150 | TRAP_INST_EXITS, |
151 | MSA_FPE_EXITS, | 151 | MSA_FPE_EXITS, |
152 | FPE_EXITS, | 152 | FPE_EXITS, |
153 | MSA_DISABLED_EXITS, | 153 | MSA_DISABLED_EXITS, |
154 | FLUSH_DCACHE_EXITS, | 154 | FLUSH_DCACHE_EXITS, |
155 | MAX_KVM_MIPS_EXIT_TYPES | 155 | MAX_KVM_MIPS_EXIT_TYPES |
156 | }; | 156 | }; |
157 | 157 | ||
158 | struct kvm_arch_memory_slot { | 158 | struct kvm_arch_memory_slot { |
159 | }; | 159 | }; |
160 | 160 | ||
161 | struct kvm_arch { | 161 | struct kvm_arch { |
162 | /* Guest GVA->HPA page table */ | 162 | /* Guest GVA->HPA page table */ |
163 | unsigned long *guest_pmap; | 163 | unsigned long *guest_pmap; |
164 | unsigned long guest_pmap_npages; | 164 | unsigned long guest_pmap_npages; |
165 | 165 | ||
166 | /* Wired host TLB used for the commpage */ | 166 | /* Wired host TLB used for the commpage */ |
167 | int commpage_tlb; | 167 | int commpage_tlb; |
168 | }; | 168 | }; |
169 | 169 | ||
170 | #define N_MIPS_COPROC_REGS 32 | 170 | #define N_MIPS_COPROC_REGS 32 |
171 | #define N_MIPS_COPROC_SEL 8 | 171 | #define N_MIPS_COPROC_SEL 8 |
172 | 172 | ||
173 | struct mips_coproc { | 173 | struct mips_coproc { |
174 | unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; | 174 | unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; |
175 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS | 175 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS |
176 | unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; | 176 | unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; |
177 | #endif | 177 | #endif |
178 | }; | 178 | }; |
179 | 179 | ||
180 | /* | 180 | /* |
181 | * Coprocessor 0 register names | 181 | * Coprocessor 0 register names |
182 | */ | 182 | */ |
183 | #define MIPS_CP0_TLB_INDEX 0 | 183 | #define MIPS_CP0_TLB_INDEX 0 |
184 | #define MIPS_CP0_TLB_RANDOM 1 | 184 | #define MIPS_CP0_TLB_RANDOM 1 |
185 | #define MIPS_CP0_TLB_LOW 2 | 185 | #define MIPS_CP0_TLB_LOW 2 |
186 | #define MIPS_CP0_TLB_LO0 2 | 186 | #define MIPS_CP0_TLB_LO0 2 |
187 | #define MIPS_CP0_TLB_LO1 3 | 187 | #define MIPS_CP0_TLB_LO1 3 |
188 | #define MIPS_CP0_TLB_CONTEXT 4 | 188 | #define MIPS_CP0_TLB_CONTEXT 4 |
189 | #define MIPS_CP0_TLB_PG_MASK 5 | 189 | #define MIPS_CP0_TLB_PG_MASK 5 |
190 | #define MIPS_CP0_TLB_WIRED 6 | 190 | #define MIPS_CP0_TLB_WIRED 6 |
191 | #define MIPS_CP0_HWRENA 7 | 191 | #define MIPS_CP0_HWRENA 7 |
192 | #define MIPS_CP0_BAD_VADDR 8 | 192 | #define MIPS_CP0_BAD_VADDR 8 |
193 | #define MIPS_CP0_COUNT 9 | 193 | #define MIPS_CP0_COUNT 9 |
194 | #define MIPS_CP0_TLB_HI 10 | 194 | #define MIPS_CP0_TLB_HI 10 |
195 | #define MIPS_CP0_COMPARE 11 | 195 | #define MIPS_CP0_COMPARE 11 |
196 | #define MIPS_CP0_STATUS 12 | 196 | #define MIPS_CP0_STATUS 12 |
197 | #define MIPS_CP0_CAUSE 13 | 197 | #define MIPS_CP0_CAUSE 13 |
198 | #define MIPS_CP0_EXC_PC 14 | 198 | #define MIPS_CP0_EXC_PC 14 |
199 | #define MIPS_CP0_PRID 15 | 199 | #define MIPS_CP0_PRID 15 |
200 | #define MIPS_CP0_CONFIG 16 | 200 | #define MIPS_CP0_CONFIG 16 |
201 | #define MIPS_CP0_LLADDR 17 | 201 | #define MIPS_CP0_LLADDR 17 |
202 | #define MIPS_CP0_WATCH_LO 18 | 202 | #define MIPS_CP0_WATCH_LO 18 |
203 | #define MIPS_CP0_WATCH_HI 19 | 203 | #define MIPS_CP0_WATCH_HI 19 |
204 | #define MIPS_CP0_TLB_XCONTEXT 20 | 204 | #define MIPS_CP0_TLB_XCONTEXT 20 |
205 | #define MIPS_CP0_ECC 26 | 205 | #define MIPS_CP0_ECC 26 |
206 | #define MIPS_CP0_CACHE_ERR 27 | 206 | #define MIPS_CP0_CACHE_ERR 27 |
207 | #define MIPS_CP0_TAG_LO 28 | 207 | #define MIPS_CP0_TAG_LO 28 |
208 | #define MIPS_CP0_TAG_HI 29 | 208 | #define MIPS_CP0_TAG_HI 29 |
209 | #define MIPS_CP0_ERROR_PC 30 | 209 | #define MIPS_CP0_ERROR_PC 30 |
210 | #define MIPS_CP0_DEBUG 23 | 210 | #define MIPS_CP0_DEBUG 23 |
211 | #define MIPS_CP0_DEPC 24 | 211 | #define MIPS_CP0_DEPC 24 |
212 | #define MIPS_CP0_PERFCNT 25 | 212 | #define MIPS_CP0_PERFCNT 25 |
213 | #define MIPS_CP0_ERRCTL 26 | 213 | #define MIPS_CP0_ERRCTL 26 |
214 | #define MIPS_CP0_DATA_LO 28 | 214 | #define MIPS_CP0_DATA_LO 28 |
215 | #define MIPS_CP0_DATA_HI 29 | 215 | #define MIPS_CP0_DATA_HI 29 |
216 | #define MIPS_CP0_DESAVE 31 | 216 | #define MIPS_CP0_DESAVE 31 |
217 | 217 | ||
218 | #define MIPS_CP0_CONFIG_SEL 0 | 218 | #define MIPS_CP0_CONFIG_SEL 0 |
219 | #define MIPS_CP0_CONFIG1_SEL 1 | 219 | #define MIPS_CP0_CONFIG1_SEL 1 |
220 | #define MIPS_CP0_CONFIG2_SEL 2 | 220 | #define MIPS_CP0_CONFIG2_SEL 2 |
221 | #define MIPS_CP0_CONFIG3_SEL 3 | 221 | #define MIPS_CP0_CONFIG3_SEL 3 |
222 | #define MIPS_CP0_CONFIG4_SEL 4 | 222 | #define MIPS_CP0_CONFIG4_SEL 4 |
223 | #define MIPS_CP0_CONFIG5_SEL 5 | 223 | #define MIPS_CP0_CONFIG5_SEL 5 |
224 | 224 | ||
225 | /* Config0 register bits */ | 225 | /* Config0 register bits */ |
226 | #define CP0C0_M 31 | 226 | #define CP0C0_M 31 |
227 | #define CP0C0_K23 28 | 227 | #define CP0C0_K23 28 |
228 | #define CP0C0_KU 25 | 228 | #define CP0C0_KU 25 |
229 | #define CP0C0_MDU 20 | 229 | #define CP0C0_MDU 20 |
230 | #define CP0C0_MM 17 | 230 | #define CP0C0_MM 17 |
231 | #define CP0C0_BM 16 | 231 | #define CP0C0_BM 16 |
232 | #define CP0C0_BE 15 | 232 | #define CP0C0_BE 15 |
233 | #define CP0C0_AT 13 | 233 | #define CP0C0_AT 13 |
234 | #define CP0C0_AR 10 | 234 | #define CP0C0_AR 10 |
235 | #define CP0C0_MT 7 | 235 | #define CP0C0_MT 7 |
236 | #define CP0C0_VI 3 | 236 | #define CP0C0_VI 3 |
237 | #define CP0C0_K0 0 | 237 | #define CP0C0_K0 0 |
238 | 238 | ||
239 | /* Config1 register bits */ | 239 | /* Config1 register bits */ |
240 | #define CP0C1_M 31 | 240 | #define CP0C1_M 31 |
241 | #define CP0C1_MMU 25 | 241 | #define CP0C1_MMU 25 |
242 | #define CP0C1_IS 22 | 242 | #define CP0C1_IS 22 |
243 | #define CP0C1_IL 19 | 243 | #define CP0C1_IL 19 |
244 | #define CP0C1_IA 16 | 244 | #define CP0C1_IA 16 |
245 | #define CP0C1_DS 13 | 245 | #define CP0C1_DS 13 |
246 | #define CP0C1_DL 10 | 246 | #define CP0C1_DL 10 |
247 | #define CP0C1_DA 7 | 247 | #define CP0C1_DA 7 |
248 | #define CP0C1_C2 6 | 248 | #define CP0C1_C2 6 |
249 | #define CP0C1_MD 5 | 249 | #define CP0C1_MD 5 |
250 | #define CP0C1_PC 4 | 250 | #define CP0C1_PC 4 |
251 | #define CP0C1_WR 3 | 251 | #define CP0C1_WR 3 |
252 | #define CP0C1_CA 2 | 252 | #define CP0C1_CA 2 |
253 | #define CP0C1_EP 1 | 253 | #define CP0C1_EP 1 |
254 | #define CP0C1_FP 0 | 254 | #define CP0C1_FP 0 |
255 | 255 | ||
256 | /* Config2 Register bits */ | 256 | /* Config2 Register bits */ |
257 | #define CP0C2_M 31 | 257 | #define CP0C2_M 31 |
258 | #define CP0C2_TU 28 | 258 | #define CP0C2_TU 28 |
259 | #define CP0C2_TS 24 | 259 | #define CP0C2_TS 24 |
260 | #define CP0C2_TL 20 | 260 | #define CP0C2_TL 20 |
261 | #define CP0C2_TA 16 | 261 | #define CP0C2_TA 16 |
262 | #define CP0C2_SU 12 | 262 | #define CP0C2_SU 12 |
263 | #define CP0C2_SS 8 | 263 | #define CP0C2_SS 8 |
264 | #define CP0C2_SL 4 | 264 | #define CP0C2_SL 4 |
265 | #define CP0C2_SA 0 | 265 | #define CP0C2_SA 0 |
266 | 266 | ||
267 | /* Config3 Register bits */ | 267 | /* Config3 Register bits */ |
268 | #define CP0C3_M 31 | 268 | #define CP0C3_M 31 |
269 | #define CP0C3_ISA_ON_EXC 16 | 269 | #define CP0C3_ISA_ON_EXC 16 |
270 | #define CP0C3_ULRI 13 | 270 | #define CP0C3_ULRI 13 |
271 | #define CP0C3_DSPP 10 | 271 | #define CP0C3_DSPP 10 |
272 | #define CP0C3_LPA 7 | 272 | #define CP0C3_LPA 7 |
273 | #define CP0C3_VEIC 6 | 273 | #define CP0C3_VEIC 6 |
274 | #define CP0C3_VInt 5 | 274 | #define CP0C3_VInt 5 |
275 | #define CP0C3_SP 4 | 275 | #define CP0C3_SP 4 |
276 | #define CP0C3_MT 2 | 276 | #define CP0C3_MT 2 |
277 | #define CP0C3_SM 1 | 277 | #define CP0C3_SM 1 |
278 | #define CP0C3_TL 0 | 278 | #define CP0C3_TL 0 |
279 | 279 | ||
280 | /* MMU types, the first four entries have the same layout as the | 280 | /* MMU types, the first four entries have the same layout as the |
281 | CP0C0_MT field. */ | 281 | CP0C0_MT field. */ |
282 | enum mips_mmu_types { | 282 | enum mips_mmu_types { |
283 | MMU_TYPE_NONE, | 283 | MMU_TYPE_NONE, |
284 | MMU_TYPE_R4000, | 284 | MMU_TYPE_R4000, |
285 | MMU_TYPE_RESERVED, | 285 | MMU_TYPE_RESERVED, |
286 | MMU_TYPE_FMT, | 286 | MMU_TYPE_FMT, |
287 | MMU_TYPE_R3000, | 287 | MMU_TYPE_R3000, |
288 | MMU_TYPE_R6000, | 288 | MMU_TYPE_R6000, |
289 | MMU_TYPE_R8000 | 289 | MMU_TYPE_R8000 |
290 | }; | 290 | }; |
291 | 291 | ||
292 | /* | 292 | /* |
293 | * Trap codes | 293 | * Trap codes |
294 | */ | 294 | */ |
295 | #define T_INT 0 /* Interrupt pending */ | 295 | #define T_INT 0 /* Interrupt pending */ |
296 | #define T_TLB_MOD 1 /* TLB modified fault */ | 296 | #define T_TLB_MOD 1 /* TLB modified fault */ |
297 | #define T_TLB_LD_MISS 2 /* TLB miss on load or ifetch */ | 297 | #define T_TLB_LD_MISS 2 /* TLB miss on load or ifetch */ |
298 | #define T_TLB_ST_MISS 3 /* TLB miss on a store */ | 298 | #define T_TLB_ST_MISS 3 /* TLB miss on a store */ |
299 | #define T_ADDR_ERR_LD 4 /* Address error on a load or ifetch */ | 299 | #define T_ADDR_ERR_LD 4 /* Address error on a load or ifetch */ |
300 | #define T_ADDR_ERR_ST 5 /* Address error on a store */ | 300 | #define T_ADDR_ERR_ST 5 /* Address error on a store */ |
301 | #define T_BUS_ERR_IFETCH 6 /* Bus error on an ifetch */ | 301 | #define T_BUS_ERR_IFETCH 6 /* Bus error on an ifetch */ |
302 | #define T_BUS_ERR_LD_ST 7 /* Bus error on a load or store */ | 302 | #define T_BUS_ERR_LD_ST 7 /* Bus error on a load or store */ |
303 | #define T_SYSCALL 8 /* System call */ | 303 | #define T_SYSCALL 8 /* System call */ |
304 | #define T_BREAK 9 /* Breakpoint */ | 304 | #define T_BREAK 9 /* Breakpoint */ |
305 | #define T_RES_INST 10 /* Reserved instruction exception */ | 305 | #define T_RES_INST 10 /* Reserved instruction exception */ |
306 | #define T_COP_UNUSABLE 11 /* Coprocessor unusable */ | 306 | #define T_COP_UNUSABLE 11 /* Coprocessor unusable */ |
307 | #define T_OVFLOW 12 /* Arithmetic overflow */ | 307 | #define T_OVFLOW 12 /* Arithmetic overflow */ |
308 | 308 | ||
309 | /* | 309 | /* |
310 | * Trap definitions added for r4000 port. | 310 | * Trap definitions added for r4000 port. |
311 | */ | 311 | */ |
312 | #define T_TRAP 13 /* Trap instruction */ | 312 | #define T_TRAP 13 /* Trap instruction */ |
313 | #define T_VCEI 14 /* Virtual coherency exception */ | 313 | #define T_VCEI 14 /* Virtual coherency exception */ |
314 | #define T_MSAFPE 14 /* MSA floating point exception */ | 314 | #define T_MSAFPE 14 /* MSA floating point exception */ |
315 | #define T_FPE 15 /* Floating point exception */ | 315 | #define T_FPE 15 /* Floating point exception */ |
316 | #define T_MSADIS 21 /* MSA disabled exception */ | 316 | #define T_MSADIS 21 /* MSA disabled exception */ |
317 | #define T_WATCH 23 /* Watch address reference */ | 317 | #define T_WATCH 23 /* Watch address reference */ |
318 | #define T_VCED 31 /* Virtual coherency data */ | 318 | #define T_VCED 31 /* Virtual coherency data */ |
319 | 319 | ||
320 | /* Resume Flags */ | 320 | /* Resume Flags */ |
321 | #define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */ | 321 | #define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */ |
322 | #define RESUME_FLAG_HOST (1<<1) /* Resume host? */ | 322 | #define RESUME_FLAG_HOST (1<<1) /* Resume host? */ |
323 | 323 | ||
324 | #define RESUME_GUEST 0 | 324 | #define RESUME_GUEST 0 |
325 | #define RESUME_GUEST_DR RESUME_FLAG_DR | 325 | #define RESUME_GUEST_DR RESUME_FLAG_DR |
326 | #define RESUME_HOST RESUME_FLAG_HOST | 326 | #define RESUME_HOST RESUME_FLAG_HOST |
327 | 327 | ||
328 | enum emulation_result { | 328 | enum emulation_result { |
329 | EMULATE_DONE, /* no further processing */ | 329 | EMULATE_DONE, /* no further processing */ |
330 | EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ | 330 | EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ |
331 | EMULATE_FAIL, /* can't emulate this instruction */ | 331 | EMULATE_FAIL, /* can't emulate this instruction */ |
332 | EMULATE_WAIT, /* WAIT instruction */ | 332 | EMULATE_WAIT, /* WAIT instruction */ |
333 | EMULATE_PRIV_FAIL, | 333 | EMULATE_PRIV_FAIL, |
334 | }; | 334 | }; |
335 | 335 | ||
336 | #define MIPS3_PG_G 0x00000001 /* Global; ignore ASID if in lo0 & lo1 */ | 336 | #define MIPS3_PG_G 0x00000001 /* Global; ignore ASID if in lo0 & lo1 */ |
337 | #define MIPS3_PG_V 0x00000002 /* Valid */ | 337 | #define MIPS3_PG_V 0x00000002 /* Valid */ |
338 | #define MIPS3_PG_NV 0x00000000 | 338 | #define MIPS3_PG_NV 0x00000000 |
339 | #define MIPS3_PG_D 0x00000004 /* Dirty */ | 339 | #define MIPS3_PG_D 0x00000004 /* Dirty */ |
340 | 340 | ||
341 | #define mips3_paddr_to_tlbpfn(x) \ | 341 | #define mips3_paddr_to_tlbpfn(x) \ |
342 | (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME) | 342 | (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME) |
343 | #define mips3_tlbpfn_to_paddr(x) \ | 343 | #define mips3_tlbpfn_to_paddr(x) \ |
344 | ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT) | 344 | ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT) |
345 | 345 | ||
346 | #define MIPS3_PG_SHIFT 6 | 346 | #define MIPS3_PG_SHIFT 6 |
347 | #define MIPS3_PG_FRAME 0x3fffffc0 | 347 | #define MIPS3_PG_FRAME 0x3fffffc0 |
348 | 348 | ||
349 | #define VPN2_MASK 0xffffe000 | 349 | #define VPN2_MASK 0xffffe000 |
350 | #define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && \ | 350 | #define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && \ |
351 | ((x).tlb_lo1 & MIPS3_PG_G)) | 351 | ((x).tlb_lo1 & MIPS3_PG_G)) |
352 | #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) | 352 | #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) |
353 | #define TLB_ASID(x) ((x).tlb_hi & ASID_MASK) | 353 | #define TLB_ASID(x) ((x).tlb_hi & ASID_MASK) |
354 | #define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) \ | 354 | #define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) \ |
355 | ? ((x).tlb_lo1 & MIPS3_PG_V) \ | 355 | ? ((x).tlb_lo1 & MIPS3_PG_V) \ |
356 | : ((x).tlb_lo0 & MIPS3_PG_V)) | 356 | : ((x).tlb_lo0 & MIPS3_PG_V)) |
357 | #define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \ | 357 | #define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \ |
358 | ((y) & VPN2_MASK & ~(x).tlb_mask)) | 358 | ((y) & VPN2_MASK & ~(x).tlb_mask)) |
359 | #define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \ | 359 | #define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \ |
360 | TLB_ASID(x) == ((y) & ASID_MASK)) | 360 | TLB_ASID(x) == ((y) & ASID_MASK)) |
361 | 361 | ||
362 | struct kvm_mips_tlb { | 362 | struct kvm_mips_tlb { |
363 | long tlb_mask; | 363 | long tlb_mask; |
364 | long tlb_hi; | 364 | long tlb_hi; |
365 | long tlb_lo0; | 365 | long tlb_lo0; |
366 | long tlb_lo1; | 366 | long tlb_lo1; |
367 | }; | 367 | }; |
368 | 368 | ||
369 | #define KVM_MIPS_FPU_FPU 0x1 | 369 | #define KVM_MIPS_FPU_FPU 0x1 |
370 | #define KVM_MIPS_FPU_MSA 0x2 | 370 | #define KVM_MIPS_FPU_MSA 0x2 |
371 | 371 | ||
372 | #define KVM_MIPS_GUEST_TLB_SIZE 64 | 372 | #define KVM_MIPS_GUEST_TLB_SIZE 64 |
373 | struct kvm_vcpu_arch { | 373 | struct kvm_vcpu_arch { |
374 | void *host_ebase, *guest_ebase; | 374 | void *host_ebase, *guest_ebase; |
375 | unsigned long host_stack; | 375 | unsigned long host_stack; |
376 | unsigned long host_gp; | 376 | unsigned long host_gp; |
377 | 377 | ||
378 | /* Host CP0 registers used when handling exits from guest */ | 378 | /* Host CP0 registers used when handling exits from guest */ |
379 | unsigned long host_cp0_badvaddr; | 379 | unsigned long host_cp0_badvaddr; |
380 | unsigned long host_cp0_cause; | 380 | unsigned long host_cp0_cause; |
381 | unsigned long host_cp0_epc; | 381 | unsigned long host_cp0_epc; |
382 | unsigned long host_cp0_entryhi; | 382 | unsigned long host_cp0_entryhi; |
383 | uint32_t guest_inst; | 383 | uint32_t guest_inst; |
384 | 384 | ||
385 | /* GPRS */ | 385 | /* GPRS */ |
386 | unsigned long gprs[32]; | 386 | unsigned long gprs[32]; |
387 | unsigned long hi; | 387 | unsigned long hi; |
388 | unsigned long lo; | 388 | unsigned long lo; |
389 | unsigned long pc; | 389 | unsigned long pc; |
390 | 390 | ||
391 | /* FPU State */ | 391 | /* FPU State */ |
392 | struct mips_fpu_struct fpu; | 392 | struct mips_fpu_struct fpu; |
393 | /* Which FPU state is loaded (KVM_MIPS_FPU_*) */ | 393 | /* Which FPU state is loaded (KVM_MIPS_FPU_*) */ |
394 | unsigned int fpu_inuse; | 394 | unsigned int fpu_inuse; |
395 | 395 | ||
396 | /* COP0 State */ | 396 | /* COP0 State */ |
397 | struct mips_coproc *cop0; | 397 | struct mips_coproc *cop0; |
398 | 398 | ||
399 | /* Host KSEG0 address of the EI/DI offset */ | 399 | /* Host KSEG0 address of the EI/DI offset */ |
400 | void *kseg0_commpage; | 400 | void *kseg0_commpage; |
401 | 401 | ||
402 | u32 io_gpr; /* GPR used as IO source/target */ | 402 | u32 io_gpr; /* GPR used as IO source/target */ |
403 | 403 | ||
404 | struct hrtimer comparecount_timer; | 404 | struct hrtimer comparecount_timer; |
405 | /* Count timer control KVM register */ | 405 | /* Count timer control KVM register */ |
406 | uint32_t count_ctl; | 406 | uint32_t count_ctl; |
407 | /* Count bias from the raw time */ | 407 | /* Count bias from the raw time */ |
408 | uint32_t count_bias; | 408 | uint32_t count_bias; |
409 | /* Frequency of timer in Hz */ | 409 | /* Frequency of timer in Hz */ |
410 | uint32_t count_hz; | 410 | uint32_t count_hz; |
411 | /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */ | 411 | /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */ |
412 | s64 count_dyn_bias; | 412 | s64 count_dyn_bias; |
413 | /* Resume time */ | 413 | /* Resume time */ |
414 | ktime_t count_resume; | 414 | ktime_t count_resume; |
415 | /* Period of timer tick in ns */ | 415 | /* Period of timer tick in ns */ |
416 | u64 count_period; | 416 | u64 count_period; |
417 | 417 | ||
418 | /* Bitmask of exceptions that are pending */ | 418 | /* Bitmask of exceptions that are pending */ |
419 | unsigned long pending_exceptions; | 419 | unsigned long pending_exceptions; |
420 | 420 | ||
421 | /* Bitmask of pending exceptions to be cleared */ | 421 | /* Bitmask of pending exceptions to be cleared */ |
422 | unsigned long pending_exceptions_clr; | 422 | unsigned long pending_exceptions_clr; |
423 | 423 | ||
424 | unsigned long pending_load_cause; | 424 | unsigned long pending_load_cause; |
425 | 425 | ||
426 | /* Save/Restore the entryhi register when are are preempted/scheduled back in */ | 426 | /* Save/Restore the entryhi register when are are preempted/scheduled back in */ |
427 | unsigned long preempt_entryhi; | 427 | unsigned long preempt_entryhi; |
428 | 428 | ||
429 | /* S/W Based TLB for guest */ | 429 | /* S/W Based TLB for guest */ |
430 | struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE]; | 430 | struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE]; |
431 | 431 | ||
432 | /* Cached guest kernel/user ASIDs */ | 432 | /* Cached guest kernel/user ASIDs */ |
433 | uint32_t guest_user_asid[NR_CPUS]; | 433 | uint32_t guest_user_asid[NR_CPUS]; |
434 | uint32_t guest_kernel_asid[NR_CPUS]; | 434 | uint32_t guest_kernel_asid[NR_CPUS]; |
435 | struct mm_struct guest_kernel_mm, guest_user_mm; | 435 | struct mm_struct guest_kernel_mm, guest_user_mm; |
436 | 436 | ||
437 | int last_sched_cpu; | 437 | int last_sched_cpu; |
438 | 438 | ||
439 | /* WAIT executed */ | 439 | /* WAIT executed */ |
440 | int wait; | 440 | int wait; |
441 | 441 | ||
442 | u8 fpu_enabled; | 442 | u8 fpu_enabled; |
443 | u8 msa_enabled; | 443 | u8 msa_enabled; |
444 | }; | 444 | }; |
445 | 445 | ||
446 | 446 | ||
447 | #define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0]) | 447 | #define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0]) |
448 | #define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val) | 448 | #define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val) |
449 | #define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0]) | 449 | #define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0]) |
450 | #define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0]) | 450 | #define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0]) |
451 | #define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0]) | 451 | #define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0]) |
452 | #define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val)) | 452 | #define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val)) |
453 | #define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2]) | 453 | #define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2]) |
454 | #define kvm_write_c0_guest_userlocal(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2] = (val)) | 454 | #define kvm_write_c0_guest_userlocal(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2] = (val)) |
455 | #define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0]) | 455 | #define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0]) |
456 | #define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val)) | 456 | #define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val)) |
457 | #define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0]) | 457 | #define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0]) |
458 | #define kvm_write_c0_guest_wired(cop0, val) (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val)) | 458 | #define kvm_write_c0_guest_wired(cop0, val) (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val)) |
459 | #define kvm_read_c0_guest_hwrena(cop0) (cop0->reg[MIPS_CP0_HWRENA][0]) | 459 | #define kvm_read_c0_guest_hwrena(cop0) (cop0->reg[MIPS_CP0_HWRENA][0]) |
460 | #define kvm_write_c0_guest_hwrena(cop0, val) (cop0->reg[MIPS_CP0_HWRENA][0] = (val)) | 460 | #define kvm_write_c0_guest_hwrena(cop0, val) (cop0->reg[MIPS_CP0_HWRENA][0] = (val)) |
461 | #define kvm_read_c0_guest_badvaddr(cop0) (cop0->reg[MIPS_CP0_BAD_VADDR][0]) | 461 | #define kvm_read_c0_guest_badvaddr(cop0) (cop0->reg[MIPS_CP0_BAD_VADDR][0]) |
462 | #define kvm_write_c0_guest_badvaddr(cop0, val) (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val)) | 462 | #define kvm_write_c0_guest_badvaddr(cop0, val) (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val)) |
463 | #define kvm_read_c0_guest_count(cop0) (cop0->reg[MIPS_CP0_COUNT][0]) | 463 | #define kvm_read_c0_guest_count(cop0) (cop0->reg[MIPS_CP0_COUNT][0]) |
464 | #define kvm_write_c0_guest_count(cop0, val) (cop0->reg[MIPS_CP0_COUNT][0] = (val)) | 464 | #define kvm_write_c0_guest_count(cop0, val) (cop0->reg[MIPS_CP0_COUNT][0] = (val)) |
465 | #define kvm_read_c0_guest_entryhi(cop0) (cop0->reg[MIPS_CP0_TLB_HI][0]) | 465 | #define kvm_read_c0_guest_entryhi(cop0) (cop0->reg[MIPS_CP0_TLB_HI][0]) |
466 | #define kvm_write_c0_guest_entryhi(cop0, val) (cop0->reg[MIPS_CP0_TLB_HI][0] = (val)) | 466 | #define kvm_write_c0_guest_entryhi(cop0, val) (cop0->reg[MIPS_CP0_TLB_HI][0] = (val)) |
467 | #define kvm_read_c0_guest_compare(cop0) (cop0->reg[MIPS_CP0_COMPARE][0]) | 467 | #define kvm_read_c0_guest_compare(cop0) (cop0->reg[MIPS_CP0_COMPARE][0]) |
468 | #define kvm_write_c0_guest_compare(cop0, val) (cop0->reg[MIPS_CP0_COMPARE][0] = (val)) | 468 | #define kvm_write_c0_guest_compare(cop0, val) (cop0->reg[MIPS_CP0_COMPARE][0] = (val)) |
469 | #define kvm_read_c0_guest_status(cop0) (cop0->reg[MIPS_CP0_STATUS][0]) | 469 | #define kvm_read_c0_guest_status(cop0) (cop0->reg[MIPS_CP0_STATUS][0]) |
470 | #define kvm_write_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] = (val)) | 470 | #define kvm_write_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] = (val)) |
471 | #define kvm_read_c0_guest_intctl(cop0) (cop0->reg[MIPS_CP0_STATUS][1]) | 471 | #define kvm_read_c0_guest_intctl(cop0) (cop0->reg[MIPS_CP0_STATUS][1]) |
472 | #define kvm_write_c0_guest_intctl(cop0, val) (cop0->reg[MIPS_CP0_STATUS][1] = (val)) | 472 | #define kvm_write_c0_guest_intctl(cop0, val) (cop0->reg[MIPS_CP0_STATUS][1] = (val)) |
473 | #define kvm_read_c0_guest_cause(cop0) (cop0->reg[MIPS_CP0_CAUSE][0]) | 473 | #define kvm_read_c0_guest_cause(cop0) (cop0->reg[MIPS_CP0_CAUSE][0]) |
474 | #define kvm_write_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] = (val)) | 474 | #define kvm_write_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] = (val)) |
475 | #define kvm_read_c0_guest_epc(cop0) (cop0->reg[MIPS_CP0_EXC_PC][0]) | 475 | #define kvm_read_c0_guest_epc(cop0) (cop0->reg[MIPS_CP0_EXC_PC][0]) |
476 | #define kvm_write_c0_guest_epc(cop0, val) (cop0->reg[MIPS_CP0_EXC_PC][0] = (val)) | 476 | #define kvm_write_c0_guest_epc(cop0, val) (cop0->reg[MIPS_CP0_EXC_PC][0] = (val)) |
477 | #define kvm_read_c0_guest_prid(cop0) (cop0->reg[MIPS_CP0_PRID][0]) | 477 | #define kvm_read_c0_guest_prid(cop0) (cop0->reg[MIPS_CP0_PRID][0]) |
478 | #define kvm_write_c0_guest_prid(cop0, val) (cop0->reg[MIPS_CP0_PRID][0] = (val)) | 478 | #define kvm_write_c0_guest_prid(cop0, val) (cop0->reg[MIPS_CP0_PRID][0] = (val)) |
479 | #define kvm_read_c0_guest_ebase(cop0) (cop0->reg[MIPS_CP0_PRID][1]) | 479 | #define kvm_read_c0_guest_ebase(cop0) (cop0->reg[MIPS_CP0_PRID][1]) |
480 | #define kvm_write_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] = (val)) | 480 | #define kvm_write_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] = (val)) |
481 | #define kvm_read_c0_guest_config(cop0) (cop0->reg[MIPS_CP0_CONFIG][0]) | 481 | #define kvm_read_c0_guest_config(cop0) (cop0->reg[MIPS_CP0_CONFIG][0]) |
482 | #define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1]) | 482 | #define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1]) |
483 | #define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2]) | 483 | #define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2]) |
484 | #define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3]) | 484 | #define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3]) |
485 | #define kvm_read_c0_guest_config4(cop0) (cop0->reg[MIPS_CP0_CONFIG][4]) | 485 | #define kvm_read_c0_guest_config4(cop0) (cop0->reg[MIPS_CP0_CONFIG][4]) |
486 | #define kvm_read_c0_guest_config5(cop0) (cop0->reg[MIPS_CP0_CONFIG][5]) | 486 | #define kvm_read_c0_guest_config5(cop0) (cop0->reg[MIPS_CP0_CONFIG][5]) |
487 | #define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7]) | 487 | #define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7]) |
488 | #define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val)) | 488 | #define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val)) |
489 | #define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val)) | 489 | #define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val)) |
490 | #define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val)) | 490 | #define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val)) |
491 | #define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val)) | 491 | #define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val)) |
492 | #define kvm_write_c0_guest_config4(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][4] = (val)) | 492 | #define kvm_write_c0_guest_config4(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][4] = (val)) |
493 | #define kvm_write_c0_guest_config5(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][5] = (val)) | 493 | #define kvm_write_c0_guest_config5(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][5] = (val)) |
494 | #define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val)) | 494 | #define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val)) |
495 | #define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0]) | 495 | #define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0]) |
496 | #define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val)) | 496 | #define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val)) |
497 | 497 | ||
498 | /* | 498 | /* |
499 | * Some of the guest registers may be modified asynchronously (e.g. from a | 499 | * Some of the guest registers may be modified asynchronously (e.g. from a |
500 | * hrtimer callback in hard irq context) and therefore need stronger atomicity | 500 | * hrtimer callback in hard irq context) and therefore need stronger atomicity |
501 | * guarantees than other registers. | 501 | * guarantees than other registers. |
502 | */ | 502 | */ |
503 | 503 | ||
504 | static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg, | 504 | static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg, |
505 | unsigned long val) | 505 | unsigned long val) |
506 | { | 506 | { |
507 | unsigned long temp; | 507 | unsigned long temp; |
508 | do { | 508 | do { |
509 | __asm__ __volatile__( | 509 | __asm__ __volatile__( |
510 | " .set mips3 \n" | 510 | " .set mips3 \n" |
511 | " " __LL "%0, %1 \n" | 511 | " " __LL "%0, %1 \n" |
512 | " or %0, %2 \n" | 512 | " or %0, %2 \n" |
513 | " " __SC "%0, %1 \n" | 513 | " " __SC "%0, %1 \n" |
514 | " .set mips0 \n" | 514 | " .set mips0 \n" |
515 | : "=&r" (temp), "+m" (*reg) | 515 | : "=&r" (temp), "+m" (*reg) |
516 | : "r" (val)); | 516 | : "r" (val)); |
517 | } while (unlikely(!temp)); | 517 | } while (unlikely(!temp)); |
518 | } | 518 | } |
519 | 519 | ||
520 | static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg, | 520 | static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg, |
521 | unsigned long val) | 521 | unsigned long val) |
522 | { | 522 | { |
523 | unsigned long temp; | 523 | unsigned long temp; |
524 | do { | 524 | do { |
525 | __asm__ __volatile__( | 525 | __asm__ __volatile__( |
526 | " .set mips3 \n" | 526 | " .set mips3 \n" |
527 | " " __LL "%0, %1 \n" | 527 | " " __LL "%0, %1 \n" |
528 | " and %0, %2 \n" | 528 | " and %0, %2 \n" |
529 | " " __SC "%0, %1 \n" | 529 | " " __SC "%0, %1 \n" |
530 | " .set mips0 \n" | 530 | " .set mips0 \n" |
531 | : "=&r" (temp), "+m" (*reg) | 531 | : "=&r" (temp), "+m" (*reg) |
532 | : "r" (~val)); | 532 | : "r" (~val)); |
533 | } while (unlikely(!temp)); | 533 | } while (unlikely(!temp)); |
534 | } | 534 | } |
535 | 535 | ||
536 | static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg, | 536 | static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg, |
537 | unsigned long change, | 537 | unsigned long change, |
538 | unsigned long val) | 538 | unsigned long val) |
539 | { | 539 | { |
540 | unsigned long temp; | 540 | unsigned long temp; |
541 | do { | 541 | do { |
542 | __asm__ __volatile__( | 542 | __asm__ __volatile__( |
543 | " .set mips3 \n" | 543 | " .set mips3 \n" |
544 | " " __LL "%0, %1 \n" | 544 | " " __LL "%0, %1 \n" |
545 | " and %0, %2 \n" | 545 | " and %0, %2 \n" |
546 | " or %0, %3 \n" | 546 | " or %0, %3 \n" |
547 | " " __SC "%0, %1 \n" | 547 | " " __SC "%0, %1 \n" |
548 | " .set mips0 \n" | 548 | " .set mips0 \n" |
549 | : "=&r" (temp), "+m" (*reg) | 549 | : "=&r" (temp), "+m" (*reg) |
550 | : "r" (~change), "r" (val & change)); | 550 | : "r" (~change), "r" (val & change)); |
551 | } while (unlikely(!temp)); | 551 | } while (unlikely(!temp)); |
552 | } | 552 | } |
553 | 553 | ||
554 | #define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val)) | 554 | #define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val)) |
555 | #define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val)) | 555 | #define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val)) |
556 | 556 | ||
557 | /* Cause can be modified asynchronously from hardirq hrtimer callback */ | 557 | /* Cause can be modified asynchronously from hardirq hrtimer callback */ |
558 | #define kvm_set_c0_guest_cause(cop0, val) \ | 558 | #define kvm_set_c0_guest_cause(cop0, val) \ |
559 | _kvm_atomic_set_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val) | 559 | _kvm_atomic_set_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val) |
560 | #define kvm_clear_c0_guest_cause(cop0, val) \ | 560 | #define kvm_clear_c0_guest_cause(cop0, val) \ |
561 | _kvm_atomic_clear_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val) | 561 | _kvm_atomic_clear_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val) |
562 | #define kvm_change_c0_guest_cause(cop0, change, val) \ | 562 | #define kvm_change_c0_guest_cause(cop0, change, val) \ |
563 | _kvm_atomic_change_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], \ | 563 | _kvm_atomic_change_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], \ |
564 | change, val) | 564 | change, val) |
565 | 565 | ||
566 | #define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val)) | 566 | #define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val)) |
567 | #define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val)) | 567 | #define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val)) |
568 | #define kvm_change_c0_guest_ebase(cop0, change, val) \ | 568 | #define kvm_change_c0_guest_ebase(cop0, change, val) \ |
569 | { \ | 569 | { \ |
570 | kvm_clear_c0_guest_ebase(cop0, change); \ | 570 | kvm_clear_c0_guest_ebase(cop0, change); \ |
571 | kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \ | 571 | kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \ |
572 | } | 572 | } |
573 | 573 | ||
574 | /* Helpers */ | 574 | /* Helpers */ |
575 | 575 | ||
576 | static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu) | 576 | static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu) |
577 | { | 577 | { |
578 | return (!__builtin_constant_p(cpu_has_fpu) || cpu_has_fpu) && | 578 | return (!__builtin_constant_p(cpu_has_fpu) || cpu_has_fpu) && |
579 | vcpu->fpu_enabled; | 579 | vcpu->fpu_enabled; |
580 | } | 580 | } |
581 | 581 | ||
582 | static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu) | 582 | static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu) |
583 | { | 583 | { |
584 | return kvm_mips_guest_can_have_fpu(vcpu) && | 584 | return kvm_mips_guest_can_have_fpu(vcpu) && |
585 | kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP; | 585 | kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP; |
586 | } | 586 | } |
587 | 587 | ||
588 | static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu) | 588 | static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu) |
589 | { | 589 | { |
590 | return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) && | 590 | return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) && |
591 | vcpu->msa_enabled; | 591 | vcpu->msa_enabled; |
592 | } | 592 | } |
593 | 593 | ||
594 | static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu) | 594 | static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu) |
595 | { | 595 | { |
596 | return kvm_mips_guest_can_have_msa(vcpu) && | 596 | return kvm_mips_guest_can_have_msa(vcpu) && |
597 | kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA; | 597 | kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA; |
598 | } | 598 | } |
599 | 599 | ||
600 | struct kvm_mips_callbacks { | 600 | struct kvm_mips_callbacks { |
601 | int (*handle_cop_unusable)(struct kvm_vcpu *vcpu); | 601 | int (*handle_cop_unusable)(struct kvm_vcpu *vcpu); |
602 | int (*handle_tlb_mod)(struct kvm_vcpu *vcpu); | 602 | int (*handle_tlb_mod)(struct kvm_vcpu *vcpu); |
603 | int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu); | 603 | int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu); |
604 | int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu); | 604 | int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu); |
605 | int (*handle_addr_err_st)(struct kvm_vcpu *vcpu); | 605 | int (*handle_addr_err_st)(struct kvm_vcpu *vcpu); |
606 | int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu); | 606 | int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu); |
607 | int (*handle_syscall)(struct kvm_vcpu *vcpu); | 607 | int (*handle_syscall)(struct kvm_vcpu *vcpu); |
608 | int (*handle_res_inst)(struct kvm_vcpu *vcpu); | 608 | int (*handle_res_inst)(struct kvm_vcpu *vcpu); |
609 | int (*handle_break)(struct kvm_vcpu *vcpu); | 609 | int (*handle_break)(struct kvm_vcpu *vcpu); |
610 | int (*handle_trap)(struct kvm_vcpu *vcpu); | 610 | int (*handle_trap)(struct kvm_vcpu *vcpu); |
611 | int (*handle_msa_fpe)(struct kvm_vcpu *vcpu); | 611 | int (*handle_msa_fpe)(struct kvm_vcpu *vcpu); |
612 | int (*handle_fpe)(struct kvm_vcpu *vcpu); | 612 | int (*handle_fpe)(struct kvm_vcpu *vcpu); |
613 | int (*handle_msa_disabled)(struct kvm_vcpu *vcpu); | 613 | int (*handle_msa_disabled)(struct kvm_vcpu *vcpu); |
614 | int (*vm_init)(struct kvm *kvm); | 614 | int (*vm_init)(struct kvm *kvm); |
615 | int (*vcpu_init)(struct kvm_vcpu *vcpu); | 615 | int (*vcpu_init)(struct kvm_vcpu *vcpu); |
616 | int (*vcpu_setup)(struct kvm_vcpu *vcpu); | 616 | int (*vcpu_setup)(struct kvm_vcpu *vcpu); |
617 | gpa_t (*gva_to_gpa)(gva_t gva); | 617 | gpa_t (*gva_to_gpa)(gva_t gva); |
618 | void (*queue_timer_int)(struct kvm_vcpu *vcpu); | 618 | void (*queue_timer_int)(struct kvm_vcpu *vcpu); |
619 | void (*dequeue_timer_int)(struct kvm_vcpu *vcpu); | 619 | void (*dequeue_timer_int)(struct kvm_vcpu *vcpu); |
620 | void (*queue_io_int)(struct kvm_vcpu *vcpu, | 620 | void (*queue_io_int)(struct kvm_vcpu *vcpu, |
621 | struct kvm_mips_interrupt *irq); | 621 | struct kvm_mips_interrupt *irq); |
622 | void (*dequeue_io_int)(struct kvm_vcpu *vcpu, | 622 | void (*dequeue_io_int)(struct kvm_vcpu *vcpu, |
623 | struct kvm_mips_interrupt *irq); | 623 | struct kvm_mips_interrupt *irq); |
624 | int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority, | 624 | int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority, |
625 | uint32_t cause); | 625 | uint32_t cause); |
626 | int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority, | 626 | int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority, |
627 | uint32_t cause); | 627 | uint32_t cause); |
628 | int (*get_one_reg)(struct kvm_vcpu *vcpu, | 628 | int (*get_one_reg)(struct kvm_vcpu *vcpu, |
629 | const struct kvm_one_reg *reg, s64 *v); | 629 | const struct kvm_one_reg *reg, s64 *v); |
630 | int (*set_one_reg)(struct kvm_vcpu *vcpu, | 630 | int (*set_one_reg)(struct kvm_vcpu *vcpu, |
631 | const struct kvm_one_reg *reg, s64 v); | 631 | const struct kvm_one_reg *reg, s64 v); |
632 | int (*vcpu_get_regs)(struct kvm_vcpu *vcpu); | 632 | int (*vcpu_get_regs)(struct kvm_vcpu *vcpu); |
633 | int (*vcpu_set_regs)(struct kvm_vcpu *vcpu); | 633 | int (*vcpu_set_regs)(struct kvm_vcpu *vcpu); |
634 | }; | 634 | }; |
635 | extern struct kvm_mips_callbacks *kvm_mips_callbacks; | 635 | extern struct kvm_mips_callbacks *kvm_mips_callbacks; |
636 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); | 636 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); |
637 | 637 | ||
638 | /* Debug: dump vcpu state */ | 638 | /* Debug: dump vcpu state */ |
639 | int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); | 639 | int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); |
640 | 640 | ||
641 | /* Trampoline ASM routine to start running in "Guest" context */ | 641 | /* Trampoline ASM routine to start running in "Guest" context */ |
642 | extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); | 642 | extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); |
643 | 643 | ||
644 | /* FPU/MSA context management */ | 644 | /* FPU/MSA context management */ |
645 | void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu); | 645 | void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu); |
646 | void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu); | 646 | void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu); |
647 | void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu); | 647 | void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu); |
648 | void __kvm_save_msa(struct kvm_vcpu_arch *vcpu); | 648 | void __kvm_save_msa(struct kvm_vcpu_arch *vcpu); |
649 | void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu); | 649 | void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu); |
650 | void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu); | 650 | void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu); |
651 | void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu); | 651 | void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu); |
652 | void kvm_own_fpu(struct kvm_vcpu *vcpu); | 652 | void kvm_own_fpu(struct kvm_vcpu *vcpu); |
653 | void kvm_own_msa(struct kvm_vcpu *vcpu); | 653 | void kvm_own_msa(struct kvm_vcpu *vcpu); |
654 | void kvm_drop_fpu(struct kvm_vcpu *vcpu); | 654 | void kvm_drop_fpu(struct kvm_vcpu *vcpu); |
655 | void kvm_lose_fpu(struct kvm_vcpu *vcpu); | 655 | void kvm_lose_fpu(struct kvm_vcpu *vcpu); |
656 | 656 | ||
657 | /* TLB handling */ | 657 | /* TLB handling */ |
658 | uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu); | 658 | uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu); |
659 | 659 | ||
660 | uint32_t kvm_get_user_asid(struct kvm_vcpu *vcpu); | 660 | uint32_t kvm_get_user_asid(struct kvm_vcpu *vcpu); |
661 | 661 | ||
662 | uint32_t kvm_get_commpage_asid (struct kvm_vcpu *vcpu); | 662 | uint32_t kvm_get_commpage_asid (struct kvm_vcpu *vcpu); |
663 | 663 | ||
664 | extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr, | 664 | extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr, |
665 | struct kvm_vcpu *vcpu); | 665 | struct kvm_vcpu *vcpu); |
666 | 666 | ||
667 | extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, | 667 | extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, |
668 | struct kvm_vcpu *vcpu); | 668 | struct kvm_vcpu *vcpu); |
669 | 669 | ||
670 | extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, | 670 | extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, |
671 | struct kvm_mips_tlb *tlb, | 671 | struct kvm_mips_tlb *tlb, |
672 | unsigned long *hpa0, | 672 | unsigned long *hpa0, |
673 | unsigned long *hpa1); | 673 | unsigned long *hpa1); |
674 | 674 | ||
675 | extern enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause, | 675 | extern enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause, |
676 | uint32_t *opc, | 676 | uint32_t *opc, |
677 | struct kvm_run *run, | 677 | struct kvm_run *run, |
678 | struct kvm_vcpu *vcpu); | 678 | struct kvm_vcpu *vcpu); |
679 | 679 | ||
680 | extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, | 680 | extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, |
681 | uint32_t *opc, | 681 | uint32_t *opc, |
682 | struct kvm_run *run, | 682 | struct kvm_run *run, |
683 | struct kvm_vcpu *vcpu); | 683 | struct kvm_vcpu *vcpu); |
684 | 684 | ||
685 | extern void kvm_mips_dump_host_tlbs(void); | 685 | extern void kvm_mips_dump_host_tlbs(void); |
686 | extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu); | 686 | extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu); |
687 | extern void kvm_mips_flush_host_tlb(int skip_kseg0); | 687 | extern void kvm_mips_flush_host_tlb(int skip_kseg0); |
688 | extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi); | 688 | extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi); |
689 | extern int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index); | 689 | extern int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index); |
690 | 690 | ||
691 | extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, | 691 | extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, |
692 | unsigned long entryhi); | 692 | unsigned long entryhi); |
693 | extern int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr); | 693 | extern int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr); |
694 | extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, | 694 | extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, |
695 | unsigned long gva); | 695 | unsigned long gva); |
696 | extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, | 696 | extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, |
697 | struct kvm_vcpu *vcpu); | 697 | struct kvm_vcpu *vcpu); |
698 | extern void kvm_local_flush_tlb_all(void); | 698 | extern void kvm_local_flush_tlb_all(void); |
699 | extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu); | 699 | extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu); |
700 | extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | 700 | extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
701 | extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu); | 701 | extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu); |
702 | 702 | ||
703 | /* Emulation */ | 703 | /* Emulation */ |
704 | uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu); | 704 | uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu); |
705 | enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause); | 705 | enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause); |
706 | 706 | ||
707 | extern enum emulation_result kvm_mips_emulate_inst(unsigned long cause, | 707 | extern enum emulation_result kvm_mips_emulate_inst(unsigned long cause, |
708 | uint32_t *opc, | 708 | uint32_t *opc, |
709 | struct kvm_run *run, | 709 | struct kvm_run *run, |
710 | struct kvm_vcpu *vcpu); | 710 | struct kvm_vcpu *vcpu); |
711 | 711 | ||
712 | extern enum emulation_result kvm_mips_emulate_syscall(unsigned long cause, | 712 | extern enum emulation_result kvm_mips_emulate_syscall(unsigned long cause, |
713 | uint32_t *opc, | 713 | uint32_t *opc, |
714 | struct kvm_run *run, | 714 | struct kvm_run *run, |
715 | struct kvm_vcpu *vcpu); | 715 | struct kvm_vcpu *vcpu); |
716 | 716 | ||
717 | extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause, | 717 | extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause, |
718 | uint32_t *opc, | 718 | uint32_t *opc, |
719 | struct kvm_run *run, | 719 | struct kvm_run *run, |
720 | struct kvm_vcpu *vcpu); | 720 | struct kvm_vcpu *vcpu); |
721 | 721 | ||
722 | extern enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause, | 722 | extern enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause, |
723 | uint32_t *opc, | 723 | uint32_t *opc, |
724 | struct kvm_run *run, | 724 | struct kvm_run *run, |
725 | struct kvm_vcpu *vcpu); | 725 | struct kvm_vcpu *vcpu); |
726 | 726 | ||
727 | extern enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause, | 727 | extern enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause, |
728 | uint32_t *opc, | 728 | uint32_t *opc, |
729 | struct kvm_run *run, | 729 | struct kvm_run *run, |
730 | struct kvm_vcpu *vcpu); | 730 | struct kvm_vcpu *vcpu); |
731 | 731 | ||
732 | extern enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause, | 732 | extern enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause, |
733 | uint32_t *opc, | 733 | uint32_t *opc, |
734 | struct kvm_run *run, | 734 | struct kvm_run *run, |
735 | struct kvm_vcpu *vcpu); | 735 | struct kvm_vcpu *vcpu); |
736 | 736 | ||
737 | extern enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause, | 737 | extern enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause, |
738 | uint32_t *opc, | 738 | uint32_t *opc, |
739 | struct kvm_run *run, | 739 | struct kvm_run *run, |
740 | struct kvm_vcpu *vcpu); | 740 | struct kvm_vcpu *vcpu); |
741 | 741 | ||
742 | extern enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause, | 742 | extern enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause, |
743 | uint32_t *opc, | 743 | uint32_t *opc, |
744 | struct kvm_run *run, | 744 | struct kvm_run *run, |
745 | struct kvm_vcpu *vcpu); | 745 | struct kvm_vcpu *vcpu); |
746 | 746 | ||
747 | extern enum emulation_result kvm_mips_handle_ri(unsigned long cause, | 747 | extern enum emulation_result kvm_mips_handle_ri(unsigned long cause, |
748 | uint32_t *opc, | 748 | uint32_t *opc, |
749 | struct kvm_run *run, | 749 | struct kvm_run *run, |
750 | struct kvm_vcpu *vcpu); | 750 | struct kvm_vcpu *vcpu); |
751 | 751 | ||
752 | extern enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause, | 752 | extern enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause, |
753 | uint32_t *opc, | 753 | uint32_t *opc, |
754 | struct kvm_run *run, | 754 | struct kvm_run *run, |
755 | struct kvm_vcpu *vcpu); | 755 | struct kvm_vcpu *vcpu); |
756 | 756 | ||
757 | extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause, | 757 | extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause, |
758 | uint32_t *opc, | 758 | uint32_t *opc, |
759 | struct kvm_run *run, | 759 | struct kvm_run *run, |
760 | struct kvm_vcpu *vcpu); | 760 | struct kvm_vcpu *vcpu); |
761 | 761 | ||
762 | extern enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause, | 762 | extern enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause, |
763 | uint32_t *opc, | 763 | uint32_t *opc, |
764 | struct kvm_run *run, | 764 | struct kvm_run *run, |
765 | struct kvm_vcpu *vcpu); | 765 | struct kvm_vcpu *vcpu); |
766 | 766 | ||
767 | extern enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause, | 767 | extern enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause, |
768 | uint32_t *opc, | 768 | uint32_t *opc, |
769 | struct kvm_run *run, | 769 | struct kvm_run *run, |
770 | struct kvm_vcpu *vcpu); | 770 | struct kvm_vcpu *vcpu); |
771 | 771 | ||
772 | extern enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause, | 772 | extern enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause, |
773 | uint32_t *opc, | 773 | uint32_t *opc, |
774 | struct kvm_run *run, | 774 | struct kvm_run *run, |
775 | struct kvm_vcpu *vcpu); | 775 | struct kvm_vcpu *vcpu); |
776 | 776 | ||
777 | extern enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause, | 777 | extern enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause, |
778 | uint32_t *opc, | 778 | uint32_t *opc, |
779 | struct kvm_run *run, | 779 | struct kvm_run *run, |
780 | struct kvm_vcpu *vcpu); | 780 | struct kvm_vcpu *vcpu); |
781 | 781 | ||
782 | extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, | 782 | extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, |
783 | struct kvm_run *run); | 783 | struct kvm_run *run); |
784 | 784 | ||
785 | uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu); | 785 | uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu); |
786 | void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count); | 786 | void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count); |
787 | void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare); | 787 | void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack); |
788 | void kvm_mips_init_count(struct kvm_vcpu *vcpu); | 788 | void kvm_mips_init_count(struct kvm_vcpu *vcpu); |
789 | int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl); | 789 | int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl); |
790 | int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume); | 790 | int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume); |
791 | int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz); | 791 | int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz); |
792 | void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu); | 792 | void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu); |
793 | void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu); | 793 | void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu); |
794 | enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu); | 794 | enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu); |
795 | 795 | ||
796 | enum emulation_result kvm_mips_check_privilege(unsigned long cause, | 796 | enum emulation_result kvm_mips_check_privilege(unsigned long cause, |
797 | uint32_t *opc, | 797 | uint32_t *opc, |
798 | struct kvm_run *run, | 798 | struct kvm_run *run, |
799 | struct kvm_vcpu *vcpu); | 799 | struct kvm_vcpu *vcpu); |
800 | 800 | ||
801 | enum emulation_result kvm_mips_emulate_cache(uint32_t inst, | 801 | enum emulation_result kvm_mips_emulate_cache(uint32_t inst, |
802 | uint32_t *opc, | 802 | uint32_t *opc, |
803 | uint32_t cause, | 803 | uint32_t cause, |
804 | struct kvm_run *run, | 804 | struct kvm_run *run, |
805 | struct kvm_vcpu *vcpu); | 805 | struct kvm_vcpu *vcpu); |
806 | enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, | 806 | enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, |
807 | uint32_t *opc, | 807 | uint32_t *opc, |
808 | uint32_t cause, | 808 | uint32_t cause, |
809 | struct kvm_run *run, | 809 | struct kvm_run *run, |
810 | struct kvm_vcpu *vcpu); | 810 | struct kvm_vcpu *vcpu); |
811 | enum emulation_result kvm_mips_emulate_store(uint32_t inst, | 811 | enum emulation_result kvm_mips_emulate_store(uint32_t inst, |
812 | uint32_t cause, | 812 | uint32_t cause, |
813 | struct kvm_run *run, | 813 | struct kvm_run *run, |
814 | struct kvm_vcpu *vcpu); | 814 | struct kvm_vcpu *vcpu); |
815 | enum emulation_result kvm_mips_emulate_load(uint32_t inst, | 815 | enum emulation_result kvm_mips_emulate_load(uint32_t inst, |
816 | uint32_t cause, | 816 | uint32_t cause, |
817 | struct kvm_run *run, | 817 | struct kvm_run *run, |
818 | struct kvm_vcpu *vcpu); | 818 | struct kvm_vcpu *vcpu); |
819 | 819 | ||
820 | unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu); | 820 | unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu); |
821 | unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu); | 821 | unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu); |
822 | unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu); | 822 | unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu); |
823 | unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu); | 823 | unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu); |
824 | 824 | ||
825 | /* Dynamic binary translation */ | 825 | /* Dynamic binary translation */ |
826 | extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, | 826 | extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, |
827 | struct kvm_vcpu *vcpu); | 827 | struct kvm_vcpu *vcpu); |
828 | extern int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc, | 828 | extern int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc, |
829 | struct kvm_vcpu *vcpu); | 829 | struct kvm_vcpu *vcpu); |
830 | extern int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, | 830 | extern int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, |
831 | struct kvm_vcpu *vcpu); | 831 | struct kvm_vcpu *vcpu); |
832 | extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, | 832 | extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, |
833 | struct kvm_vcpu *vcpu); | 833 | struct kvm_vcpu *vcpu); |
834 | 834 | ||
835 | /* Misc */ | 835 | /* Misc */ |
836 | extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu); | 836 | extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu); |
837 | extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm); | 837 | extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm); |
838 | 838 | ||
839 | static inline void kvm_arch_hardware_disable(void) {} | 839 | static inline void kvm_arch_hardware_disable(void) {} |
840 | static inline void kvm_arch_hardware_unsetup(void) {} | 840 | static inline void kvm_arch_hardware_unsetup(void) {} |
841 | static inline void kvm_arch_sync_events(struct kvm *kvm) {} | 841 | static inline void kvm_arch_sync_events(struct kvm *kvm) {} |
842 | static inline void kvm_arch_free_memslot(struct kvm *kvm, | 842 | static inline void kvm_arch_free_memslot(struct kvm *kvm, |
843 | struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {} | 843 | struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {} |
844 | static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {} | 844 | static inline void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots) {} |
845 | static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {} | 845 | static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {} |
846 | static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | 846 | static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
847 | struct kvm_memory_slot *slot) {} | 847 | struct kvm_memory_slot *slot) {} |
848 | static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} | 848 | static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} |
849 | static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} | 849 | static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} |
850 | static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} | 850 | static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} |
851 | static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} | 851 | static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} |
852 | 852 | ||
853 | #endif /* __MIPS_KVM_HOST_H__ */ | 853 | #endif /* __MIPS_KVM_HOST_H__ */ |
854 | 854 |
arch/mips/kvm/emulate.c
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * KVM/MIPS: Instruction/Exception emulation | 6 | * KVM/MIPS: Instruction/Exception emulation |
7 | * | 7 | * |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
14 | #include <linux/ktime.h> | 14 | #include <linux/ktime.h> |
15 | #include <linux/kvm_host.h> | 15 | #include <linux/kvm_host.h> |
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/vmalloc.h> | 17 | #include <linux/vmalloc.h> |
18 | #include <linux/fs.h> | 18 | #include <linux/fs.h> |
19 | #include <linux/bootmem.h> | 19 | #include <linux/bootmem.h> |
20 | #include <linux/random.h> | 20 | #include <linux/random.h> |
21 | #include <asm/page.h> | 21 | #include <asm/page.h> |
22 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
23 | #include <asm/cpu-info.h> | 23 | #include <asm/cpu-info.h> |
24 | #include <asm/mmu_context.h> | 24 | #include <asm/mmu_context.h> |
25 | #include <asm/tlbflush.h> | 25 | #include <asm/tlbflush.h> |
26 | #include <asm/inst.h> | 26 | #include <asm/inst.h> |
27 | 27 | ||
28 | #undef CONFIG_MIPS_MT | 28 | #undef CONFIG_MIPS_MT |
29 | #include <asm/r4kcache.h> | 29 | #include <asm/r4kcache.h> |
30 | #define CONFIG_MIPS_MT | 30 | #define CONFIG_MIPS_MT |
31 | 31 | ||
32 | #include "opcode.h" | 32 | #include "opcode.h" |
33 | #include "interrupt.h" | 33 | #include "interrupt.h" |
34 | #include "commpage.h" | 34 | #include "commpage.h" |
35 | 35 | ||
36 | #include "trace.h" | 36 | #include "trace.h" |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * Compute the return address and do emulate branch simulation, if required. | 39 | * Compute the return address and do emulate branch simulation, if required. |
40 | * This function should be called only in branch delay slot active. | 40 | * This function should be called only in branch delay slot active. |
41 | */ | 41 | */ |
42 | unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, | 42 | unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, |
43 | unsigned long instpc) | 43 | unsigned long instpc) |
44 | { | 44 | { |
45 | unsigned int dspcontrol; | 45 | unsigned int dspcontrol; |
46 | union mips_instruction insn; | 46 | union mips_instruction insn; |
47 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 47 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
48 | long epc = instpc; | 48 | long epc = instpc; |
49 | long nextpc = KVM_INVALID_INST; | 49 | long nextpc = KVM_INVALID_INST; |
50 | 50 | ||
51 | if (epc & 3) | 51 | if (epc & 3) |
52 | goto unaligned; | 52 | goto unaligned; |
53 | 53 | ||
54 | /* Read the instruction */ | 54 | /* Read the instruction */ |
55 | insn.word = kvm_get_inst((uint32_t *) epc, vcpu); | 55 | insn.word = kvm_get_inst((uint32_t *) epc, vcpu); |
56 | 56 | ||
57 | if (insn.word == KVM_INVALID_INST) | 57 | if (insn.word == KVM_INVALID_INST) |
58 | return KVM_INVALID_INST; | 58 | return KVM_INVALID_INST; |
59 | 59 | ||
60 | switch (insn.i_format.opcode) { | 60 | switch (insn.i_format.opcode) { |
61 | /* jr and jalr are in r_format format. */ | 61 | /* jr and jalr are in r_format format. */ |
62 | case spec_op: | 62 | case spec_op: |
63 | switch (insn.r_format.func) { | 63 | switch (insn.r_format.func) { |
64 | case jalr_op: | 64 | case jalr_op: |
65 | arch->gprs[insn.r_format.rd] = epc + 8; | 65 | arch->gprs[insn.r_format.rd] = epc + 8; |
66 | /* Fall through */ | 66 | /* Fall through */ |
67 | case jr_op: | 67 | case jr_op: |
68 | nextpc = arch->gprs[insn.r_format.rs]; | 68 | nextpc = arch->gprs[insn.r_format.rs]; |
69 | break; | 69 | break; |
70 | } | 70 | } |
71 | break; | 71 | break; |
72 | 72 | ||
73 | /* | 73 | /* |
74 | * This group contains: | 74 | * This group contains: |
75 | * bltz_op, bgez_op, bltzl_op, bgezl_op, | 75 | * bltz_op, bgez_op, bltzl_op, bgezl_op, |
76 | * bltzal_op, bgezal_op, bltzall_op, bgezall_op. | 76 | * bltzal_op, bgezal_op, bltzall_op, bgezall_op. |
77 | */ | 77 | */ |
78 | case bcond_op: | 78 | case bcond_op: |
79 | switch (insn.i_format.rt) { | 79 | switch (insn.i_format.rt) { |
80 | case bltz_op: | 80 | case bltz_op: |
81 | case bltzl_op: | 81 | case bltzl_op: |
82 | if ((long)arch->gprs[insn.i_format.rs] < 0) | 82 | if ((long)arch->gprs[insn.i_format.rs] < 0) |
83 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 83 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
84 | else | 84 | else |
85 | epc += 8; | 85 | epc += 8; |
86 | nextpc = epc; | 86 | nextpc = epc; |
87 | break; | 87 | break; |
88 | 88 | ||
89 | case bgez_op: | 89 | case bgez_op: |
90 | case bgezl_op: | 90 | case bgezl_op: |
91 | if ((long)arch->gprs[insn.i_format.rs] >= 0) | 91 | if ((long)arch->gprs[insn.i_format.rs] >= 0) |
92 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 92 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
93 | else | 93 | else |
94 | epc += 8; | 94 | epc += 8; |
95 | nextpc = epc; | 95 | nextpc = epc; |
96 | break; | 96 | break; |
97 | 97 | ||
98 | case bltzal_op: | 98 | case bltzal_op: |
99 | case bltzall_op: | 99 | case bltzall_op: |
100 | arch->gprs[31] = epc + 8; | 100 | arch->gprs[31] = epc + 8; |
101 | if ((long)arch->gprs[insn.i_format.rs] < 0) | 101 | if ((long)arch->gprs[insn.i_format.rs] < 0) |
102 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 102 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
103 | else | 103 | else |
104 | epc += 8; | 104 | epc += 8; |
105 | nextpc = epc; | 105 | nextpc = epc; |
106 | break; | 106 | break; |
107 | 107 | ||
108 | case bgezal_op: | 108 | case bgezal_op: |
109 | case bgezall_op: | 109 | case bgezall_op: |
110 | arch->gprs[31] = epc + 8; | 110 | arch->gprs[31] = epc + 8; |
111 | if ((long)arch->gprs[insn.i_format.rs] >= 0) | 111 | if ((long)arch->gprs[insn.i_format.rs] >= 0) |
112 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 112 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
113 | else | 113 | else |
114 | epc += 8; | 114 | epc += 8; |
115 | nextpc = epc; | 115 | nextpc = epc; |
116 | break; | 116 | break; |
117 | case bposge32_op: | 117 | case bposge32_op: |
118 | if (!cpu_has_dsp) | 118 | if (!cpu_has_dsp) |
119 | goto sigill; | 119 | goto sigill; |
120 | 120 | ||
121 | dspcontrol = rddsp(0x01); | 121 | dspcontrol = rddsp(0x01); |
122 | 122 | ||
123 | if (dspcontrol >= 32) | 123 | if (dspcontrol >= 32) |
124 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 124 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
125 | else | 125 | else |
126 | epc += 8; | 126 | epc += 8; |
127 | nextpc = epc; | 127 | nextpc = epc; |
128 | break; | 128 | break; |
129 | } | 129 | } |
130 | break; | 130 | break; |
131 | 131 | ||
132 | /* These are unconditional and in j_format. */ | 132 | /* These are unconditional and in j_format. */ |
133 | case jal_op: | 133 | case jal_op: |
134 | arch->gprs[31] = instpc + 8; | 134 | arch->gprs[31] = instpc + 8; |
135 | case j_op: | 135 | case j_op: |
136 | epc += 4; | 136 | epc += 4; |
137 | epc >>= 28; | 137 | epc >>= 28; |
138 | epc <<= 28; | 138 | epc <<= 28; |
139 | epc |= (insn.j_format.target << 2); | 139 | epc |= (insn.j_format.target << 2); |
140 | nextpc = epc; | 140 | nextpc = epc; |
141 | break; | 141 | break; |
142 | 142 | ||
143 | /* These are conditional and in i_format. */ | 143 | /* These are conditional and in i_format. */ |
144 | case beq_op: | 144 | case beq_op: |
145 | case beql_op: | 145 | case beql_op: |
146 | if (arch->gprs[insn.i_format.rs] == | 146 | if (arch->gprs[insn.i_format.rs] == |
147 | arch->gprs[insn.i_format.rt]) | 147 | arch->gprs[insn.i_format.rt]) |
148 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 148 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
149 | else | 149 | else |
150 | epc += 8; | 150 | epc += 8; |
151 | nextpc = epc; | 151 | nextpc = epc; |
152 | break; | 152 | break; |
153 | 153 | ||
154 | case bne_op: | 154 | case bne_op: |
155 | case bnel_op: | 155 | case bnel_op: |
156 | if (arch->gprs[insn.i_format.rs] != | 156 | if (arch->gprs[insn.i_format.rs] != |
157 | arch->gprs[insn.i_format.rt]) | 157 | arch->gprs[insn.i_format.rt]) |
158 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 158 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
159 | else | 159 | else |
160 | epc += 8; | 160 | epc += 8; |
161 | nextpc = epc; | 161 | nextpc = epc; |
162 | break; | 162 | break; |
163 | 163 | ||
164 | case blez_op: /* not really i_format */ | 164 | case blez_op: /* not really i_format */ |
165 | case blezl_op: | 165 | case blezl_op: |
166 | /* rt field assumed to be zero */ | 166 | /* rt field assumed to be zero */ |
167 | if ((long)arch->gprs[insn.i_format.rs] <= 0) | 167 | if ((long)arch->gprs[insn.i_format.rs] <= 0) |
168 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 168 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
169 | else | 169 | else |
170 | epc += 8; | 170 | epc += 8; |
171 | nextpc = epc; | 171 | nextpc = epc; |
172 | break; | 172 | break; |
173 | 173 | ||
174 | case bgtz_op: | 174 | case bgtz_op: |
175 | case bgtzl_op: | 175 | case bgtzl_op: |
176 | /* rt field assumed to be zero */ | 176 | /* rt field assumed to be zero */ |
177 | if ((long)arch->gprs[insn.i_format.rs] > 0) | 177 | if ((long)arch->gprs[insn.i_format.rs] > 0) |
178 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 178 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
179 | else | 179 | else |
180 | epc += 8; | 180 | epc += 8; |
181 | nextpc = epc; | 181 | nextpc = epc; |
182 | break; | 182 | break; |
183 | 183 | ||
184 | /* And now the FPA/cp1 branch instructions. */ | 184 | /* And now the FPA/cp1 branch instructions. */ |
185 | case cop1_op: | 185 | case cop1_op: |
186 | kvm_err("%s: unsupported cop1_op\n", __func__); | 186 | kvm_err("%s: unsupported cop1_op\n", __func__); |
187 | break; | 187 | break; |
188 | } | 188 | } |
189 | 189 | ||
190 | return nextpc; | 190 | return nextpc; |
191 | 191 | ||
192 | unaligned: | 192 | unaligned: |
193 | kvm_err("%s: unaligned epc\n", __func__); | 193 | kvm_err("%s: unaligned epc\n", __func__); |
194 | return nextpc; | 194 | return nextpc; |
195 | 195 | ||
196 | sigill: | 196 | sigill: |
197 | kvm_err("%s: DSP branch but not DSP ASE\n", __func__); | 197 | kvm_err("%s: DSP branch but not DSP ASE\n", __func__); |
198 | return nextpc; | 198 | return nextpc; |
199 | } | 199 | } |
200 | 200 | ||
201 | enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause) | 201 | enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause) |
202 | { | 202 | { |
203 | unsigned long branch_pc; | 203 | unsigned long branch_pc; |
204 | enum emulation_result er = EMULATE_DONE; | 204 | enum emulation_result er = EMULATE_DONE; |
205 | 205 | ||
206 | if (cause & CAUSEF_BD) { | 206 | if (cause & CAUSEF_BD) { |
207 | branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc); | 207 | branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc); |
208 | if (branch_pc == KVM_INVALID_INST) { | 208 | if (branch_pc == KVM_INVALID_INST) { |
209 | er = EMULATE_FAIL; | 209 | er = EMULATE_FAIL; |
210 | } else { | 210 | } else { |
211 | vcpu->arch.pc = branch_pc; | 211 | vcpu->arch.pc = branch_pc; |
212 | kvm_debug("BD update_pc(): New PC: %#lx\n", | 212 | kvm_debug("BD update_pc(): New PC: %#lx\n", |
213 | vcpu->arch.pc); | 213 | vcpu->arch.pc); |
214 | } | 214 | } |
215 | } else | 215 | } else |
216 | vcpu->arch.pc += 4; | 216 | vcpu->arch.pc += 4; |
217 | 217 | ||
218 | kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); | 218 | kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); |
219 | 219 | ||
220 | return er; | 220 | return er; |
221 | } | 221 | } |
222 | 222 | ||
223 | /** | 223 | /** |
224 | * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled. | 224 | * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled. |
225 | * @vcpu: Virtual CPU. | 225 | * @vcpu: Virtual CPU. |
226 | * | 226 | * |
227 | * Returns: 1 if the CP0_Count timer is disabled by either the guest | 227 | * Returns: 1 if the CP0_Count timer is disabled by either the guest |
228 | * CP0_Cause.DC bit or the count_ctl.DC bit. | 228 | * CP0_Cause.DC bit or the count_ctl.DC bit. |
229 | * 0 otherwise (in which case CP0_Count timer is running). | 229 | * 0 otherwise (in which case CP0_Count timer is running). |
230 | */ | 230 | */ |
231 | static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) | 231 | static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) |
232 | { | 232 | { |
233 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 233 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
234 | 234 | ||
235 | return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || | 235 | return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || |
236 | (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC); | 236 | (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC); |
237 | } | 237 | } |
238 | 238 | ||
239 | /** | 239 | /** |
240 | * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count. | 240 | * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count. |
241 | * | 241 | * |
242 | * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias. | 242 | * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias. |
243 | * | 243 | * |
244 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). | 244 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). |
245 | */ | 245 | */ |
246 | static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now) | 246 | static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now) |
247 | { | 247 | { |
248 | s64 now_ns, periods; | 248 | s64 now_ns, periods; |
249 | u64 delta; | 249 | u64 delta; |
250 | 250 | ||
251 | now_ns = ktime_to_ns(now); | 251 | now_ns = ktime_to_ns(now); |
252 | delta = now_ns + vcpu->arch.count_dyn_bias; | 252 | delta = now_ns + vcpu->arch.count_dyn_bias; |
253 | 253 | ||
254 | if (delta >= vcpu->arch.count_period) { | 254 | if (delta >= vcpu->arch.count_period) { |
255 | /* If delta is out of safe range the bias needs adjusting */ | 255 | /* If delta is out of safe range the bias needs adjusting */ |
256 | periods = div64_s64(now_ns, vcpu->arch.count_period); | 256 | periods = div64_s64(now_ns, vcpu->arch.count_period); |
257 | vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period; | 257 | vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period; |
258 | /* Recalculate delta with new bias */ | 258 | /* Recalculate delta with new bias */ |
259 | delta = now_ns + vcpu->arch.count_dyn_bias; | 259 | delta = now_ns + vcpu->arch.count_dyn_bias; |
260 | } | 260 | } |
261 | 261 | ||
262 | /* | 262 | /* |
263 | * We've ensured that: | 263 | * We've ensured that: |
264 | * delta < count_period | 264 | * delta < count_period |
265 | * | 265 | * |
266 | * Therefore the intermediate delta*count_hz will never overflow since | 266 | * Therefore the intermediate delta*count_hz will never overflow since |
267 | * at the boundary condition: | 267 | * at the boundary condition: |
268 | * delta = count_period | 268 | * delta = count_period |
269 | * delta = NSEC_PER_SEC * 2^32 / count_hz | 269 | * delta = NSEC_PER_SEC * 2^32 / count_hz |
270 | * delta * count_hz = NSEC_PER_SEC * 2^32 | 270 | * delta * count_hz = NSEC_PER_SEC * 2^32 |
271 | */ | 271 | */ |
272 | return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC); | 272 | return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC); |
273 | } | 273 | } |
274 | 274 | ||
275 | /** | 275 | /** |
276 | * kvm_mips_count_time() - Get effective current time. | 276 | * kvm_mips_count_time() - Get effective current time. |
277 | * @vcpu: Virtual CPU. | 277 | * @vcpu: Virtual CPU. |
278 | * | 278 | * |
279 | * Get effective monotonic ktime. This is usually a straightforward ktime_get(), | 279 | * Get effective monotonic ktime. This is usually a straightforward ktime_get(), |
280 | * except when the master disable bit is set in count_ctl, in which case it is | 280 | * except when the master disable bit is set in count_ctl, in which case it is |
281 | * count_resume, i.e. the time that the count was disabled. | 281 | * count_resume, i.e. the time that the count was disabled. |
282 | * | 282 | * |
283 | * Returns: Effective monotonic ktime for CP0_Count. | 283 | * Returns: Effective monotonic ktime for CP0_Count. |
284 | */ | 284 | */ |
285 | static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu) | 285 | static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu) |
286 | { | 286 | { |
287 | if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) | 287 | if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) |
288 | return vcpu->arch.count_resume; | 288 | return vcpu->arch.count_resume; |
289 | 289 | ||
290 | return ktime_get(); | 290 | return ktime_get(); |
291 | } | 291 | } |
292 | 292 | ||
293 | /** | 293 | /** |
294 | * kvm_mips_read_count_running() - Read the current count value as if running. | 294 | * kvm_mips_read_count_running() - Read the current count value as if running. |
295 | * @vcpu: Virtual CPU. | 295 | * @vcpu: Virtual CPU. |
296 | * @now: Kernel time to read CP0_Count at. | 296 | * @now: Kernel time to read CP0_Count at. |
297 | * | 297 | * |
298 | * Returns the current guest CP0_Count register at time @now and handles if the | 298 | * Returns the current guest CP0_Count register at time @now and handles if the |
299 | * timer interrupt is pending and hasn't been handled yet. | 299 | * timer interrupt is pending and hasn't been handled yet. |
300 | * | 300 | * |
301 | * Returns: The current value of the guest CP0_Count register. | 301 | * Returns: The current value of the guest CP0_Count register. |
302 | */ | 302 | */ |
303 | static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) | 303 | static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) |
304 | { | 304 | { |
305 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 305 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
306 | ktime_t expires, threshold; | 306 | ktime_t expires, threshold; |
307 | uint32_t count, compare; | 307 | uint32_t count, compare; |
308 | int running; | 308 | int running; |
309 | 309 | ||
310 | /* Calculate the biased and scaled guest CP0_Count */ | 310 | /* Calculate the biased and scaled guest CP0_Count */ |
311 | count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); | 311 | count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); |
312 | compare = kvm_read_c0_guest_compare(cop0); | 312 | compare = kvm_read_c0_guest_compare(cop0); |
313 | 313 | ||
314 | /* | 314 | /* |
315 | * Find whether CP0_Count has reached the closest timer interrupt. If | 315 | * Find whether CP0_Count has reached the closest timer interrupt. If |
316 | * not, we shouldn't inject it. | 316 | * not, we shouldn't inject it. |
317 | */ | 317 | */ |
318 | if ((int32_t)(count - compare) < 0) | 318 | if ((int32_t)(count - compare) < 0) |
319 | return count; | 319 | return count; |
320 | 320 | ||
321 | /* | 321 | /* |
322 | * The CP0_Count we're going to return has already reached the closest | 322 | * The CP0_Count we're going to return has already reached the closest |
323 | * timer interrupt. Quickly check if it really is a new interrupt by | 323 | * timer interrupt. Quickly check if it really is a new interrupt by |
324 | * looking at whether the interval until the hrtimer expiry time is | 324 | * looking at whether the interval until the hrtimer expiry time is |
325 | * less than 1/4 of the timer period. | 325 | * less than 1/4 of the timer period. |
326 | */ | 326 | */ |
327 | expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer); | 327 | expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer); |
328 | threshold = ktime_add_ns(now, vcpu->arch.count_period / 4); | 328 | threshold = ktime_add_ns(now, vcpu->arch.count_period / 4); |
329 | if (ktime_before(expires, threshold)) { | 329 | if (ktime_before(expires, threshold)) { |
330 | /* | 330 | /* |
331 | * Cancel it while we handle it so there's no chance of | 331 | * Cancel it while we handle it so there's no chance of |
332 | * interference with the timeout handler. | 332 | * interference with the timeout handler. |
333 | */ | 333 | */ |
334 | running = hrtimer_cancel(&vcpu->arch.comparecount_timer); | 334 | running = hrtimer_cancel(&vcpu->arch.comparecount_timer); |
335 | 335 | ||
336 | /* Nothing should be waiting on the timeout */ | 336 | /* Nothing should be waiting on the timeout */ |
337 | kvm_mips_callbacks->queue_timer_int(vcpu); | 337 | kvm_mips_callbacks->queue_timer_int(vcpu); |
338 | 338 | ||
339 | /* | 339 | /* |
340 | * Restart the timer if it was running based on the expiry time | 340 | * Restart the timer if it was running based on the expiry time |
341 | * we read, so that we don't push it back 2 periods. | 341 | * we read, so that we don't push it back 2 periods. |
342 | */ | 342 | */ |
343 | if (running) { | 343 | if (running) { |
344 | expires = ktime_add_ns(expires, | 344 | expires = ktime_add_ns(expires, |
345 | vcpu->arch.count_period); | 345 | vcpu->arch.count_period); |
346 | hrtimer_start(&vcpu->arch.comparecount_timer, expires, | 346 | hrtimer_start(&vcpu->arch.comparecount_timer, expires, |
347 | HRTIMER_MODE_ABS); | 347 | HRTIMER_MODE_ABS); |
348 | } | 348 | } |
349 | } | 349 | } |
350 | 350 | ||
351 | return count; | 351 | return count; |
352 | } | 352 | } |
353 | 353 | ||
354 | /** | 354 | /** |
355 | * kvm_mips_read_count() - Read the current count value. | 355 | * kvm_mips_read_count() - Read the current count value. |
356 | * @vcpu: Virtual CPU. | 356 | * @vcpu: Virtual CPU. |
357 | * | 357 | * |
358 | * Read the current guest CP0_Count value, taking into account whether the timer | 358 | * Read the current guest CP0_Count value, taking into account whether the timer |
359 | * is stopped. | 359 | * is stopped. |
360 | * | 360 | * |
361 | * Returns: The current guest CP0_Count value. | 361 | * Returns: The current guest CP0_Count value. |
362 | */ | 362 | */ |
363 | uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu) | 363 | uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu) |
364 | { | 364 | { |
365 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 365 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
366 | 366 | ||
367 | /* If count disabled just read static copy of count */ | 367 | /* If count disabled just read static copy of count */ |
368 | if (kvm_mips_count_disabled(vcpu)) | 368 | if (kvm_mips_count_disabled(vcpu)) |
369 | return kvm_read_c0_guest_count(cop0); | 369 | return kvm_read_c0_guest_count(cop0); |
370 | 370 | ||
371 | return kvm_mips_read_count_running(vcpu, ktime_get()); | 371 | return kvm_mips_read_count_running(vcpu, ktime_get()); |
372 | } | 372 | } |
373 | 373 | ||
374 | /** | 374 | /** |
375 | * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer. | 375 | * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer. |
376 | * @vcpu: Virtual CPU. | 376 | * @vcpu: Virtual CPU. |
377 | * @count: Output pointer for CP0_Count value at point of freeze. | 377 | * @count: Output pointer for CP0_Count value at point of freeze. |
378 | * | 378 | * |
379 | * Freeze the hrtimer safely and return both the ktime and the CP0_Count value | 379 | * Freeze the hrtimer safely and return both the ktime and the CP0_Count value |
380 | * at the point it was frozen. It is guaranteed that any pending interrupts at | 380 | * at the point it was frozen. It is guaranteed that any pending interrupts at |
381 | * the point it was frozen are handled, and none after that point. | 381 | * the point it was frozen are handled, and none after that point. |
382 | * | 382 | * |
383 | * This is useful where the time/CP0_Count is needed in the calculation of the | 383 | * This is useful where the time/CP0_Count is needed in the calculation of the |
384 | * new parameters. | 384 | * new parameters. |
385 | * | 385 | * |
386 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). | 386 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). |
387 | * | 387 | * |
388 | * Returns: The ktime at the point of freeze. | 388 | * Returns: The ktime at the point of freeze. |
389 | */ | 389 | */ |
390 | static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, | 390 | static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, |
391 | uint32_t *count) | 391 | uint32_t *count) |
392 | { | 392 | { |
393 | ktime_t now; | 393 | ktime_t now; |
394 | 394 | ||
395 | /* stop hrtimer before finding time */ | 395 | /* stop hrtimer before finding time */ |
396 | hrtimer_cancel(&vcpu->arch.comparecount_timer); | 396 | hrtimer_cancel(&vcpu->arch.comparecount_timer); |
397 | now = ktime_get(); | 397 | now = ktime_get(); |
398 | 398 | ||
399 | /* find count at this point and handle pending hrtimer */ | 399 | /* find count at this point and handle pending hrtimer */ |
400 | *count = kvm_mips_read_count_running(vcpu, now); | 400 | *count = kvm_mips_read_count_running(vcpu, now); |
401 | 401 | ||
402 | return now; | 402 | return now; |
403 | } | 403 | } |
404 | 404 | ||
405 | /** | 405 | /** |
406 | * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry. | 406 | * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry. |
407 | * @vcpu: Virtual CPU. | 407 | * @vcpu: Virtual CPU. |
408 | * @now: ktime at point of resume. | 408 | * @now: ktime at point of resume. |
409 | * @count: CP0_Count at point of resume. | 409 | * @count: CP0_Count at point of resume. |
410 | * | 410 | * |
411 | * Resumes the timer and updates the timer expiry based on @now and @count. | 411 | * Resumes the timer and updates the timer expiry based on @now and @count. |
412 | * This can be used in conjunction with kvm_mips_freeze_timer() when timer | 412 | * This can be used in conjunction with kvm_mips_freeze_timer() when timer |
413 | * parameters need to be changed. | 413 | * parameters need to be changed. |
414 | * | 414 | * |
415 | * It is guaranteed that a timer interrupt immediately after resume will be | 415 | * It is guaranteed that a timer interrupt immediately after resume will be |
416 | * handled, but not if CP_Compare is exactly at @count. That case is already | 416 | * handled, but not if CP_Compare is exactly at @count. That case is already |
417 | * handled by kvm_mips_freeze_timer(). | 417 | * handled by kvm_mips_freeze_timer(). |
418 | * | 418 | * |
419 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). | 419 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). |
420 | */ | 420 | */ |
421 | static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu, | 421 | static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu, |
422 | ktime_t now, uint32_t count) | 422 | ktime_t now, uint32_t count) |
423 | { | 423 | { |
424 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 424 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
425 | uint32_t compare; | 425 | uint32_t compare; |
426 | u64 delta; | 426 | u64 delta; |
427 | ktime_t expire; | 427 | ktime_t expire; |
428 | 428 | ||
429 | /* Calculate timeout (wrap 0 to 2^32) */ | 429 | /* Calculate timeout (wrap 0 to 2^32) */ |
430 | compare = kvm_read_c0_guest_compare(cop0); | 430 | compare = kvm_read_c0_guest_compare(cop0); |
431 | delta = (u64)(uint32_t)(compare - count - 1) + 1; | 431 | delta = (u64)(uint32_t)(compare - count - 1) + 1; |
432 | delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz); | 432 | delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz); |
433 | expire = ktime_add_ns(now, delta); | 433 | expire = ktime_add_ns(now, delta); |
434 | 434 | ||
435 | /* Update hrtimer to use new timeout */ | 435 | /* Update hrtimer to use new timeout */ |
436 | hrtimer_cancel(&vcpu->arch.comparecount_timer); | 436 | hrtimer_cancel(&vcpu->arch.comparecount_timer); |
437 | hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS); | 437 | hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS); |
438 | } | 438 | } |
439 | 439 | ||
440 | /** | 440 | /** |
441 | * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer. | ||
442 | * @vcpu: Virtual CPU. | ||
443 | * | ||
444 | * Recalculates and updates the expiry time of the hrtimer. This can be used | ||
445 | * after timer parameters have been altered which do not depend on the time that | ||
446 | * the change occurs (in those cases kvm_mips_freeze_hrtimer() and | ||
447 | * kvm_mips_resume_hrtimer() are used directly). | ||
448 | * | ||
449 | * It is guaranteed that no timer interrupts will be lost in the process. | ||
450 | * | ||
451 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). | ||
452 | */ | ||
453 | static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu) | ||
454 | { | ||
455 | ktime_t now; | ||
456 | uint32_t count; | ||
457 | |||
458 | /* | ||
459 | * freeze_hrtimer takes care of a timer interrupts <= count, and | ||
460 | * resume_hrtimer the hrtimer takes care of a timer interrupts > count. | ||
461 | */ | ||
462 | now = kvm_mips_freeze_hrtimer(vcpu, &count); | ||
463 | kvm_mips_resume_hrtimer(vcpu, now, count); | ||
464 | } | ||
465 | |||
466 | /** | ||
467 | * kvm_mips_write_count() - Modify the count and update timer. | 441 | * kvm_mips_write_count() - Modify the count and update timer. |
468 | * @vcpu: Virtual CPU. | 442 | * @vcpu: Virtual CPU. |
469 | * @count: Guest CP0_Count value to set. | 443 | * @count: Guest CP0_Count value to set. |
470 | * | 444 | * |
471 | * Sets the CP0_Count value and updates the timer accordingly. | 445 | * Sets the CP0_Count value and updates the timer accordingly. |
472 | */ | 446 | */ |
473 | void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count) | 447 | void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count) |
474 | { | 448 | { |
475 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 449 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
476 | ktime_t now; | 450 | ktime_t now; |
477 | 451 | ||
478 | /* Calculate bias */ | 452 | /* Calculate bias */ |
479 | now = kvm_mips_count_time(vcpu); | 453 | now = kvm_mips_count_time(vcpu); |
480 | vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); | 454 | vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); |
481 | 455 | ||
482 | if (kvm_mips_count_disabled(vcpu)) | 456 | if (kvm_mips_count_disabled(vcpu)) |
483 | /* The timer's disabled, adjust the static count */ | 457 | /* The timer's disabled, adjust the static count */ |
484 | kvm_write_c0_guest_count(cop0, count); | 458 | kvm_write_c0_guest_count(cop0, count); |
485 | else | 459 | else |
486 | /* Update timeout */ | 460 | /* Update timeout */ |
487 | kvm_mips_resume_hrtimer(vcpu, now, count); | 461 | kvm_mips_resume_hrtimer(vcpu, now, count); |
488 | } | 462 | } |
489 | 463 | ||
490 | /** | 464 | /** |
491 | * kvm_mips_init_count() - Initialise timer. | 465 | * kvm_mips_init_count() - Initialise timer. |
492 | * @vcpu: Virtual CPU. | 466 | * @vcpu: Virtual CPU. |
493 | * | 467 | * |
494 | * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set | 468 | * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set |
495 | * it going if it's enabled. | 469 | * it going if it's enabled. |
496 | */ | 470 | */ |
497 | void kvm_mips_init_count(struct kvm_vcpu *vcpu) | 471 | void kvm_mips_init_count(struct kvm_vcpu *vcpu) |
498 | { | 472 | { |
499 | /* 100 MHz */ | 473 | /* 100 MHz */ |
500 | vcpu->arch.count_hz = 100*1000*1000; | 474 | vcpu->arch.count_hz = 100*1000*1000; |
501 | vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, | 475 | vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, |
502 | vcpu->arch.count_hz); | 476 | vcpu->arch.count_hz); |
503 | vcpu->arch.count_dyn_bias = 0; | 477 | vcpu->arch.count_dyn_bias = 0; |
504 | 478 | ||
505 | /* Starting at 0 */ | 479 | /* Starting at 0 */ |
506 | kvm_mips_write_count(vcpu, 0); | 480 | kvm_mips_write_count(vcpu, 0); |
507 | } | 481 | } |
508 | 482 | ||
509 | /** | 483 | /** |
510 | * kvm_mips_set_count_hz() - Update the frequency of the timer. | 484 | * kvm_mips_set_count_hz() - Update the frequency of the timer. |
511 | * @vcpu: Virtual CPU. | 485 | * @vcpu: Virtual CPU. |
512 | * @count_hz: Frequency of CP0_Count timer in Hz. | 486 | * @count_hz: Frequency of CP0_Count timer in Hz. |
513 | * | 487 | * |
514 | * Change the frequency of the CP0_Count timer. This is done atomically so that | 488 | * Change the frequency of the CP0_Count timer. This is done atomically so that |
515 | * CP0_Count is continuous and no timer interrupt is lost. | 489 | * CP0_Count is continuous and no timer interrupt is lost. |
516 | * | 490 | * |
517 | * Returns: -EINVAL if @count_hz is out of range. | 491 | * Returns: -EINVAL if @count_hz is out of range. |
518 | * 0 on success. | 492 | * 0 on success. |
519 | */ | 493 | */ |
520 | int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz) | 494 | int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz) |
521 | { | 495 | { |
522 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 496 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
523 | int dc; | 497 | int dc; |
524 | ktime_t now; | 498 | ktime_t now; |
525 | u32 count; | 499 | u32 count; |
526 | 500 | ||
527 | /* ensure the frequency is in a sensible range... */ | 501 | /* ensure the frequency is in a sensible range... */ |
528 | if (count_hz <= 0 || count_hz > NSEC_PER_SEC) | 502 | if (count_hz <= 0 || count_hz > NSEC_PER_SEC) |
529 | return -EINVAL; | 503 | return -EINVAL; |
530 | /* ... and has actually changed */ | 504 | /* ... and has actually changed */ |
531 | if (vcpu->arch.count_hz == count_hz) | 505 | if (vcpu->arch.count_hz == count_hz) |
532 | return 0; | 506 | return 0; |
533 | 507 | ||
534 | /* Safely freeze timer so we can keep it continuous */ | 508 | /* Safely freeze timer so we can keep it continuous */ |
535 | dc = kvm_mips_count_disabled(vcpu); | 509 | dc = kvm_mips_count_disabled(vcpu); |
536 | if (dc) { | 510 | if (dc) { |
537 | now = kvm_mips_count_time(vcpu); | 511 | now = kvm_mips_count_time(vcpu); |
538 | count = kvm_read_c0_guest_count(cop0); | 512 | count = kvm_read_c0_guest_count(cop0); |
539 | } else { | 513 | } else { |
540 | now = kvm_mips_freeze_hrtimer(vcpu, &count); | 514 | now = kvm_mips_freeze_hrtimer(vcpu, &count); |
541 | } | 515 | } |
542 | 516 | ||
543 | /* Update the frequency */ | 517 | /* Update the frequency */ |
544 | vcpu->arch.count_hz = count_hz; | 518 | vcpu->arch.count_hz = count_hz; |
545 | vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz); | 519 | vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz); |
546 | vcpu->arch.count_dyn_bias = 0; | 520 | vcpu->arch.count_dyn_bias = 0; |
547 | 521 | ||
548 | /* Calculate adjusted bias so dynamic count is unchanged */ | 522 | /* Calculate adjusted bias so dynamic count is unchanged */ |
549 | vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); | 523 | vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); |
550 | 524 | ||
551 | /* Update and resume hrtimer */ | 525 | /* Update and resume hrtimer */ |
552 | if (!dc) | 526 | if (!dc) |
553 | kvm_mips_resume_hrtimer(vcpu, now, count); | 527 | kvm_mips_resume_hrtimer(vcpu, now, count); |
554 | return 0; | 528 | return 0; |
555 | } | 529 | } |
556 | 530 | ||
557 | /** | 531 | /** |
558 | * kvm_mips_write_compare() - Modify compare and update timer. | 532 | * kvm_mips_write_compare() - Modify compare and update timer. |
559 | * @vcpu: Virtual CPU. | 533 | * @vcpu: Virtual CPU. |
560 | * @compare: New CP0_Compare value. | 534 | * @compare: New CP0_Compare value. |
535 | * @ack: Whether to acknowledge timer interrupt. | ||
561 | * | 536 | * |
562 | * Update CP0_Compare to a new value and update the timeout. | 537 | * Update CP0_Compare to a new value and update the timeout. |
538 | * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure | ||
539 | * any pending timer interrupt is preserved. | ||
563 | */ | 540 | */ |
564 | void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare) | 541 | void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack) |
565 | { | 542 | { |
566 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 543 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
544 | int dc; | ||
545 | u32 old_compare = kvm_read_c0_guest_compare(cop0); | ||
546 | ktime_t now; | ||
547 | uint32_t count; | ||
567 | 548 | ||
568 | /* if unchanged, must just be an ack */ | 549 | /* if unchanged, must just be an ack */ |
569 | if (kvm_read_c0_guest_compare(cop0) == compare) | 550 | if (old_compare == compare) { |
551 | if (!ack) | ||
552 | return; | ||
553 | kvm_mips_callbacks->dequeue_timer_int(vcpu); | ||
554 | kvm_write_c0_guest_compare(cop0, compare); | ||
570 | return; | 555 | return; |
556 | } | ||
571 | 557 | ||
572 | /* Update compare */ | 558 | /* freeze_hrtimer() takes care of timer interrupts <= count */ |
559 | dc = kvm_mips_count_disabled(vcpu); | ||
560 | if (!dc) | ||
561 | now = kvm_mips_freeze_hrtimer(vcpu, &count); | ||
562 | |||
563 | if (ack) | ||
564 | kvm_mips_callbacks->dequeue_timer_int(vcpu); | ||
565 | |||
573 | kvm_write_c0_guest_compare(cop0, compare); | 566 | kvm_write_c0_guest_compare(cop0, compare); |
574 | 567 | ||
575 | /* Update timeout if count enabled */ | 568 | /* resume_hrtimer() takes care of timer interrupts > count */ |
576 | if (!kvm_mips_count_disabled(vcpu)) | 569 | if (!dc) |
577 | kvm_mips_update_hrtimer(vcpu); | 570 | kvm_mips_resume_hrtimer(vcpu, now, count); |
578 | } | 571 | } |
579 | 572 | ||
580 | /** | 573 | /** |
581 | * kvm_mips_count_disable() - Disable count. | 574 | * kvm_mips_count_disable() - Disable count. |
582 | * @vcpu: Virtual CPU. | 575 | * @vcpu: Virtual CPU. |
583 | * | 576 | * |
584 | * Disable the CP0_Count timer. A timer interrupt on or before the final stop | 577 | * Disable the CP0_Count timer. A timer interrupt on or before the final stop |
585 | * time will be handled but not after. | 578 | * time will be handled but not after. |
586 | * | 579 | * |
587 | * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or | 580 | * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or |
588 | * count_ctl.DC has been set (count disabled). | 581 | * count_ctl.DC has been set (count disabled). |
589 | * | 582 | * |
590 | * Returns: The time that the timer was stopped. | 583 | * Returns: The time that the timer was stopped. |
591 | */ | 584 | */ |
592 | static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu) | 585 | static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu) |
593 | { | 586 | { |
594 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 587 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
595 | uint32_t count; | 588 | uint32_t count; |
596 | ktime_t now; | 589 | ktime_t now; |
597 | 590 | ||
598 | /* Stop hrtimer */ | 591 | /* Stop hrtimer */ |
599 | hrtimer_cancel(&vcpu->arch.comparecount_timer); | 592 | hrtimer_cancel(&vcpu->arch.comparecount_timer); |
600 | 593 | ||
601 | /* Set the static count from the dynamic count, handling pending TI */ | 594 | /* Set the static count from the dynamic count, handling pending TI */ |
602 | now = ktime_get(); | 595 | now = ktime_get(); |
603 | count = kvm_mips_read_count_running(vcpu, now); | 596 | count = kvm_mips_read_count_running(vcpu, now); |
604 | kvm_write_c0_guest_count(cop0, count); | 597 | kvm_write_c0_guest_count(cop0, count); |
605 | 598 | ||
606 | return now; | 599 | return now; |
607 | } | 600 | } |
608 | 601 | ||
609 | /** | 602 | /** |
610 | * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC. | 603 | * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC. |
611 | * @vcpu: Virtual CPU. | 604 | * @vcpu: Virtual CPU. |
612 | * | 605 | * |
613 | * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or | 606 | * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or |
614 | * before the final stop time will be handled if the timer isn't disabled by | 607 | * before the final stop time will be handled if the timer isn't disabled by |
615 | * count_ctl.DC, but not after. | 608 | * count_ctl.DC, but not after. |
616 | * | 609 | * |
617 | * Assumes CP0_Cause.DC is clear (count enabled). | 610 | * Assumes CP0_Cause.DC is clear (count enabled). |
618 | */ | 611 | */ |
619 | void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu) | 612 | void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu) |
620 | { | 613 | { |
621 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 614 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
622 | 615 | ||
623 | kvm_set_c0_guest_cause(cop0, CAUSEF_DC); | 616 | kvm_set_c0_guest_cause(cop0, CAUSEF_DC); |
624 | if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) | 617 | if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) |
625 | kvm_mips_count_disable(vcpu); | 618 | kvm_mips_count_disable(vcpu); |
626 | } | 619 | } |
627 | 620 | ||
628 | /** | 621 | /** |
629 | * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC. | 622 | * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC. |
630 | * @vcpu: Virtual CPU. | 623 | * @vcpu: Virtual CPU. |
631 | * | 624 | * |
632 | * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after | 625 | * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after |
633 | * the start time will be handled if the timer isn't disabled by count_ctl.DC, | 626 | * the start time will be handled if the timer isn't disabled by count_ctl.DC, |
634 | * potentially before even returning, so the caller should be careful with | 627 | * potentially before even returning, so the caller should be careful with |
635 | * ordering of CP0_Cause modifications so as not to lose it. | 628 | * ordering of CP0_Cause modifications so as not to lose it. |
636 | * | 629 | * |
637 | * Assumes CP0_Cause.DC is set (count disabled). | 630 | * Assumes CP0_Cause.DC is set (count disabled). |
638 | */ | 631 | */ |
639 | void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu) | 632 | void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu) |
640 | { | 633 | { |
641 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 634 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
642 | uint32_t count; | 635 | uint32_t count; |
643 | 636 | ||
644 | kvm_clear_c0_guest_cause(cop0, CAUSEF_DC); | 637 | kvm_clear_c0_guest_cause(cop0, CAUSEF_DC); |
645 | 638 | ||
646 | /* | 639 | /* |
647 | * Set the dynamic count to match the static count. | 640 | * Set the dynamic count to match the static count. |
648 | * This starts the hrtimer if count_ctl.DC allows it. | 641 | * This starts the hrtimer if count_ctl.DC allows it. |
649 | * Otherwise it conveniently updates the biases. | 642 | * Otherwise it conveniently updates the biases. |
650 | */ | 643 | */ |
651 | count = kvm_read_c0_guest_count(cop0); | 644 | count = kvm_read_c0_guest_count(cop0); |
652 | kvm_mips_write_count(vcpu, count); | 645 | kvm_mips_write_count(vcpu, count); |
653 | } | 646 | } |
654 | 647 | ||
655 | /** | 648 | /** |
656 | * kvm_mips_set_count_ctl() - Update the count control KVM register. | 649 | * kvm_mips_set_count_ctl() - Update the count control KVM register. |
657 | * @vcpu: Virtual CPU. | 650 | * @vcpu: Virtual CPU. |
658 | * @count_ctl: Count control register new value. | 651 | * @count_ctl: Count control register new value. |
659 | * | 652 | * |
660 | * Set the count control KVM register. The timer is updated accordingly. | 653 | * Set the count control KVM register. The timer is updated accordingly. |
661 | * | 654 | * |
662 | * Returns: -EINVAL if reserved bits are set. | 655 | * Returns: -EINVAL if reserved bits are set. |
663 | * 0 on success. | 656 | * 0 on success. |
664 | */ | 657 | */ |
665 | int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl) | 658 | int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl) |
666 | { | 659 | { |
667 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 660 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
668 | s64 changed = count_ctl ^ vcpu->arch.count_ctl; | 661 | s64 changed = count_ctl ^ vcpu->arch.count_ctl; |
669 | s64 delta; | 662 | s64 delta; |
670 | ktime_t expire, now; | 663 | ktime_t expire, now; |
671 | uint32_t count, compare; | 664 | uint32_t count, compare; |
672 | 665 | ||
673 | /* Only allow defined bits to be changed */ | 666 | /* Only allow defined bits to be changed */ |
674 | if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC)) | 667 | if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC)) |
675 | return -EINVAL; | 668 | return -EINVAL; |
676 | 669 | ||
677 | /* Apply new value */ | 670 | /* Apply new value */ |
678 | vcpu->arch.count_ctl = count_ctl; | 671 | vcpu->arch.count_ctl = count_ctl; |
679 | 672 | ||
680 | /* Master CP0_Count disable */ | 673 | /* Master CP0_Count disable */ |
681 | if (changed & KVM_REG_MIPS_COUNT_CTL_DC) { | 674 | if (changed & KVM_REG_MIPS_COUNT_CTL_DC) { |
682 | /* Is CP0_Cause.DC already disabling CP0_Count? */ | 675 | /* Is CP0_Cause.DC already disabling CP0_Count? */ |
683 | if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) { | 676 | if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) { |
684 | if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) | 677 | if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) |
685 | /* Just record the current time */ | 678 | /* Just record the current time */ |
686 | vcpu->arch.count_resume = ktime_get(); | 679 | vcpu->arch.count_resume = ktime_get(); |
687 | } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) { | 680 | } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) { |
688 | /* disable timer and record current time */ | 681 | /* disable timer and record current time */ |
689 | vcpu->arch.count_resume = kvm_mips_count_disable(vcpu); | 682 | vcpu->arch.count_resume = kvm_mips_count_disable(vcpu); |
690 | } else { | 683 | } else { |
691 | /* | 684 | /* |
692 | * Calculate timeout relative to static count at resume | 685 | * Calculate timeout relative to static count at resume |
693 | * time (wrap 0 to 2^32). | 686 | * time (wrap 0 to 2^32). |
694 | */ | 687 | */ |
695 | count = kvm_read_c0_guest_count(cop0); | 688 | count = kvm_read_c0_guest_count(cop0); |
696 | compare = kvm_read_c0_guest_compare(cop0); | 689 | compare = kvm_read_c0_guest_compare(cop0); |
697 | delta = (u64)(uint32_t)(compare - count - 1) + 1; | 690 | delta = (u64)(uint32_t)(compare - count - 1) + 1; |
698 | delta = div_u64(delta * NSEC_PER_SEC, | 691 | delta = div_u64(delta * NSEC_PER_SEC, |
699 | vcpu->arch.count_hz); | 692 | vcpu->arch.count_hz); |
700 | expire = ktime_add_ns(vcpu->arch.count_resume, delta); | 693 | expire = ktime_add_ns(vcpu->arch.count_resume, delta); |
701 | 694 | ||
702 | /* Handle pending interrupt */ | 695 | /* Handle pending interrupt */ |
703 | now = ktime_get(); | 696 | now = ktime_get(); |
704 | if (ktime_compare(now, expire) >= 0) | 697 | if (ktime_compare(now, expire) >= 0) |
705 | /* Nothing should be waiting on the timeout */ | 698 | /* Nothing should be waiting on the timeout */ |
706 | kvm_mips_callbacks->queue_timer_int(vcpu); | 699 | kvm_mips_callbacks->queue_timer_int(vcpu); |
707 | 700 | ||
708 | /* Resume hrtimer without changing bias */ | 701 | /* Resume hrtimer without changing bias */ |
709 | count = kvm_mips_read_count_running(vcpu, now); | 702 | count = kvm_mips_read_count_running(vcpu, now); |
710 | kvm_mips_resume_hrtimer(vcpu, now, count); | 703 | kvm_mips_resume_hrtimer(vcpu, now, count); |
711 | } | 704 | } |
712 | } | 705 | } |
713 | 706 | ||
714 | return 0; | 707 | return 0; |
715 | } | 708 | } |
716 | 709 | ||
717 | /** | 710 | /** |
718 | * kvm_mips_set_count_resume() - Update the count resume KVM register. | 711 | * kvm_mips_set_count_resume() - Update the count resume KVM register. |
719 | * @vcpu: Virtual CPU. | 712 | * @vcpu: Virtual CPU. |
720 | * @count_resume: Count resume register new value. | 713 | * @count_resume: Count resume register new value. |
721 | * | 714 | * |
722 | * Set the count resume KVM register. | 715 | * Set the count resume KVM register. |
723 | * | 716 | * |
724 | * Returns: -EINVAL if out of valid range (0..now). | 717 | * Returns: -EINVAL if out of valid range (0..now). |
725 | * 0 on success. | 718 | * 0 on success. |
726 | */ | 719 | */ |
727 | int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume) | 720 | int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume) |
728 | { | 721 | { |
729 | /* | 722 | /* |
730 | * It doesn't make sense for the resume time to be in the future, as it | 723 | * It doesn't make sense for the resume time to be in the future, as it |
731 | * would be possible for the next interrupt to be more than a full | 724 | * would be possible for the next interrupt to be more than a full |
732 | * period in the future. | 725 | * period in the future. |
733 | */ | 726 | */ |
734 | if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get())) | 727 | if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get())) |
735 | return -EINVAL; | 728 | return -EINVAL; |
736 | 729 | ||
737 | vcpu->arch.count_resume = ns_to_ktime(count_resume); | 730 | vcpu->arch.count_resume = ns_to_ktime(count_resume); |
738 | return 0; | 731 | return 0; |
739 | } | 732 | } |
740 | 733 | ||
741 | /** | 734 | /** |
742 | * kvm_mips_count_timeout() - Push timer forward on timeout. | 735 | * kvm_mips_count_timeout() - Push timer forward on timeout. |
743 | * @vcpu: Virtual CPU. | 736 | * @vcpu: Virtual CPU. |
744 | * | 737 | * |
745 | * Handle an hrtimer event by push the hrtimer forward a period. | 738 | * Handle an hrtimer event by push the hrtimer forward a period. |
746 | * | 739 | * |
747 | * Returns: The hrtimer_restart value to return to the hrtimer subsystem. | 740 | * Returns: The hrtimer_restart value to return to the hrtimer subsystem. |
748 | */ | 741 | */ |
749 | enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu) | 742 | enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu) |
750 | { | 743 | { |
751 | /* Add the Count period to the current expiry time */ | 744 | /* Add the Count period to the current expiry time */ |
752 | hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer, | 745 | hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer, |
753 | vcpu->arch.count_period); | 746 | vcpu->arch.count_period); |
754 | return HRTIMER_RESTART; | 747 | return HRTIMER_RESTART; |
755 | } | 748 | } |
756 | 749 | ||
757 | enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) | 750 | enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) |
758 | { | 751 | { |
759 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 752 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
760 | enum emulation_result er = EMULATE_DONE; | 753 | enum emulation_result er = EMULATE_DONE; |
761 | 754 | ||
762 | if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { | 755 | if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { |
763 | kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, | 756 | kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, |
764 | kvm_read_c0_guest_epc(cop0)); | 757 | kvm_read_c0_guest_epc(cop0)); |
765 | kvm_clear_c0_guest_status(cop0, ST0_EXL); | 758 | kvm_clear_c0_guest_status(cop0, ST0_EXL); |
766 | vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); | 759 | vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); |
767 | 760 | ||
768 | } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { | 761 | } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { |
769 | kvm_clear_c0_guest_status(cop0, ST0_ERL); | 762 | kvm_clear_c0_guest_status(cop0, ST0_ERL); |
770 | vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); | 763 | vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); |
771 | } else { | 764 | } else { |
772 | kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", | 765 | kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", |
773 | vcpu->arch.pc); | 766 | vcpu->arch.pc); |
774 | er = EMULATE_FAIL; | 767 | er = EMULATE_FAIL; |
775 | } | 768 | } |
776 | 769 | ||
777 | return er; | 770 | return er; |
778 | } | 771 | } |
779 | 772 | ||
780 | enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) | 773 | enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) |
781 | { | 774 | { |
782 | kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, | 775 | kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, |
783 | vcpu->arch.pending_exceptions); | 776 | vcpu->arch.pending_exceptions); |
784 | 777 | ||
785 | ++vcpu->stat.wait_exits; | 778 | ++vcpu->stat.wait_exits; |
786 | trace_kvm_exit(vcpu, WAIT_EXITS); | 779 | trace_kvm_exit(vcpu, WAIT_EXITS); |
787 | if (!vcpu->arch.pending_exceptions) { | 780 | if (!vcpu->arch.pending_exceptions) { |
788 | vcpu->arch.wait = 1; | 781 | vcpu->arch.wait = 1; |
789 | kvm_vcpu_block(vcpu); | 782 | kvm_vcpu_block(vcpu); |
790 | 783 | ||
791 | /* | 784 | /* |
792 | * We we are runnable, then definitely go off to user space to | 785 | * We we are runnable, then definitely go off to user space to |
793 | * check if any I/O interrupts are pending. | 786 | * check if any I/O interrupts are pending. |
794 | */ | 787 | */ |
795 | if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { | 788 | if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { |
796 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | 789 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); |
797 | vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; | 790 | vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; |
798 | } | 791 | } |
799 | } | 792 | } |
800 | 793 | ||
801 | return EMULATE_DONE; | 794 | return EMULATE_DONE; |
802 | } | 795 | } |
803 | 796 | ||
804 | /* | 797 | /* |
805 | * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that | 798 | * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that |
806 | * we can catch this, if things ever change | 799 | * we can catch this, if things ever change |
807 | */ | 800 | */ |
808 | enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) | 801 | enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) |
809 | { | 802 | { |
810 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 803 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
811 | uint32_t pc = vcpu->arch.pc; | 804 | uint32_t pc = vcpu->arch.pc; |
812 | 805 | ||
813 | kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0)); | 806 | kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0)); |
814 | return EMULATE_FAIL; | 807 | return EMULATE_FAIL; |
815 | } | 808 | } |
816 | 809 | ||
817 | /* Write Guest TLB Entry @ Index */ | 810 | /* Write Guest TLB Entry @ Index */ |
818 | enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) | 811 | enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) |
819 | { | 812 | { |
820 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 813 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
821 | int index = kvm_read_c0_guest_index(cop0); | 814 | int index = kvm_read_c0_guest_index(cop0); |
822 | struct kvm_mips_tlb *tlb = NULL; | 815 | struct kvm_mips_tlb *tlb = NULL; |
823 | uint32_t pc = vcpu->arch.pc; | 816 | uint32_t pc = vcpu->arch.pc; |
824 | 817 | ||
825 | if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { | 818 | if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { |
826 | kvm_debug("%s: illegal index: %d\n", __func__, index); | 819 | kvm_debug("%s: illegal index: %d\n", __func__, index); |
827 | kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", | 820 | kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", |
828 | pc, index, kvm_read_c0_guest_entryhi(cop0), | 821 | pc, index, kvm_read_c0_guest_entryhi(cop0), |
829 | kvm_read_c0_guest_entrylo0(cop0), | 822 | kvm_read_c0_guest_entrylo0(cop0), |
830 | kvm_read_c0_guest_entrylo1(cop0), | 823 | kvm_read_c0_guest_entrylo1(cop0), |
831 | kvm_read_c0_guest_pagemask(cop0)); | 824 | kvm_read_c0_guest_pagemask(cop0)); |
832 | index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE; | 825 | index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE; |
833 | } | 826 | } |
834 | 827 | ||
835 | tlb = &vcpu->arch.guest_tlb[index]; | 828 | tlb = &vcpu->arch.guest_tlb[index]; |
836 | /* | 829 | /* |
837 | * Probe the shadow host TLB for the entry being overwritten, if one | 830 | * Probe the shadow host TLB for the entry being overwritten, if one |
838 | * matches, invalidate it | 831 | * matches, invalidate it |
839 | */ | 832 | */ |
840 | kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); | 833 | kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); |
841 | 834 | ||
842 | tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); | 835 | tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); |
843 | tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); | 836 | tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); |
844 | tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); | 837 | tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); |
845 | tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); | 838 | tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); |
846 | 839 | ||
847 | kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", | 840 | kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", |
848 | pc, index, kvm_read_c0_guest_entryhi(cop0), | 841 | pc, index, kvm_read_c0_guest_entryhi(cop0), |
849 | kvm_read_c0_guest_entrylo0(cop0), | 842 | kvm_read_c0_guest_entrylo0(cop0), |
850 | kvm_read_c0_guest_entrylo1(cop0), | 843 | kvm_read_c0_guest_entrylo1(cop0), |
851 | kvm_read_c0_guest_pagemask(cop0)); | 844 | kvm_read_c0_guest_pagemask(cop0)); |
852 | 845 | ||
853 | return EMULATE_DONE; | 846 | return EMULATE_DONE; |
854 | } | 847 | } |
855 | 848 | ||
856 | /* Write Guest TLB Entry @ Random Index */ | 849 | /* Write Guest TLB Entry @ Random Index */ |
857 | enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) | 850 | enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) |
858 | { | 851 | { |
859 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 852 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
860 | struct kvm_mips_tlb *tlb = NULL; | 853 | struct kvm_mips_tlb *tlb = NULL; |
861 | uint32_t pc = vcpu->arch.pc; | 854 | uint32_t pc = vcpu->arch.pc; |
862 | int index; | 855 | int index; |
863 | 856 | ||
864 | get_random_bytes(&index, sizeof(index)); | 857 | get_random_bytes(&index, sizeof(index)); |
865 | index &= (KVM_MIPS_GUEST_TLB_SIZE - 1); | 858 | index &= (KVM_MIPS_GUEST_TLB_SIZE - 1); |
866 | 859 | ||
867 | tlb = &vcpu->arch.guest_tlb[index]; | 860 | tlb = &vcpu->arch.guest_tlb[index]; |
868 | 861 | ||
869 | /* | 862 | /* |
870 | * Probe the shadow host TLB for the entry being overwritten, if one | 863 | * Probe the shadow host TLB for the entry being overwritten, if one |
871 | * matches, invalidate it | 864 | * matches, invalidate it |
872 | */ | 865 | */ |
873 | kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); | 866 | kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); |
874 | 867 | ||
875 | tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); | 868 | tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); |
876 | tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); | 869 | tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); |
877 | tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); | 870 | tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); |
878 | tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); | 871 | tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); |
879 | 872 | ||
880 | kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", | 873 | kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", |
881 | pc, index, kvm_read_c0_guest_entryhi(cop0), | 874 | pc, index, kvm_read_c0_guest_entryhi(cop0), |
882 | kvm_read_c0_guest_entrylo0(cop0), | 875 | kvm_read_c0_guest_entrylo0(cop0), |
883 | kvm_read_c0_guest_entrylo1(cop0)); | 876 | kvm_read_c0_guest_entrylo1(cop0)); |
884 | 877 | ||
885 | return EMULATE_DONE; | 878 | return EMULATE_DONE; |
886 | } | 879 | } |
887 | 880 | ||
888 | enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) | 881 | enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) |
889 | { | 882 | { |
890 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 883 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
891 | long entryhi = kvm_read_c0_guest_entryhi(cop0); | 884 | long entryhi = kvm_read_c0_guest_entryhi(cop0); |
892 | uint32_t pc = vcpu->arch.pc; | 885 | uint32_t pc = vcpu->arch.pc; |
893 | int index = -1; | 886 | int index = -1; |
894 | 887 | ||
895 | index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); | 888 | index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); |
896 | 889 | ||
897 | kvm_write_c0_guest_index(cop0, index); | 890 | kvm_write_c0_guest_index(cop0, index); |
898 | 891 | ||
899 | kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, | 892 | kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, |
900 | index); | 893 | index); |
901 | 894 | ||
902 | return EMULATE_DONE; | 895 | return EMULATE_DONE; |
903 | } | 896 | } |
904 | 897 | ||
905 | /** | 898 | /** |
906 | * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1 | 899 | * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1 |
907 | * @vcpu: Virtual CPU. | 900 | * @vcpu: Virtual CPU. |
908 | * | 901 | * |
909 | * Finds the mask of bits which are writable in the guest's Config1 CP0 | 902 | * Finds the mask of bits which are writable in the guest's Config1 CP0 |
910 | * register, by userland (currently read-only to the guest). | 903 | * register, by userland (currently read-only to the guest). |
911 | */ | 904 | */ |
912 | unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu) | 905 | unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu) |
913 | { | 906 | { |
914 | unsigned int mask = 0; | 907 | unsigned int mask = 0; |
915 | 908 | ||
916 | /* Permit FPU to be present if FPU is supported */ | 909 | /* Permit FPU to be present if FPU is supported */ |
917 | if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) | 910 | if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) |
918 | mask |= MIPS_CONF1_FP; | 911 | mask |= MIPS_CONF1_FP; |
919 | 912 | ||
920 | return mask; | 913 | return mask; |
921 | } | 914 | } |
922 | 915 | ||
923 | /** | 916 | /** |
924 | * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3 | 917 | * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3 |
925 | * @vcpu: Virtual CPU. | 918 | * @vcpu: Virtual CPU. |
926 | * | 919 | * |
927 | * Finds the mask of bits which are writable in the guest's Config3 CP0 | 920 | * Finds the mask of bits which are writable in the guest's Config3 CP0 |
928 | * register, by userland (currently read-only to the guest). | 921 | * register, by userland (currently read-only to the guest). |
929 | */ | 922 | */ |
930 | unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu) | 923 | unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu) |
931 | { | 924 | { |
932 | /* Config4 is optional */ | 925 | /* Config4 is optional */ |
933 | unsigned int mask = MIPS_CONF_M; | 926 | unsigned int mask = MIPS_CONF_M; |
934 | 927 | ||
935 | /* Permit MSA to be present if MSA is supported */ | 928 | /* Permit MSA to be present if MSA is supported */ |
936 | if (kvm_mips_guest_can_have_msa(&vcpu->arch)) | 929 | if (kvm_mips_guest_can_have_msa(&vcpu->arch)) |
937 | mask |= MIPS_CONF3_MSA; | 930 | mask |= MIPS_CONF3_MSA; |
938 | 931 | ||
939 | return mask; | 932 | return mask; |
940 | } | 933 | } |
941 | 934 | ||
942 | /** | 935 | /** |
943 | * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4 | 936 | * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4 |
944 | * @vcpu: Virtual CPU. | 937 | * @vcpu: Virtual CPU. |
945 | * | 938 | * |
946 | * Finds the mask of bits which are writable in the guest's Config4 CP0 | 939 | * Finds the mask of bits which are writable in the guest's Config4 CP0 |
947 | * register, by userland (currently read-only to the guest). | 940 | * register, by userland (currently read-only to the guest). |
948 | */ | 941 | */ |
949 | unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu) | 942 | unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu) |
950 | { | 943 | { |
951 | /* Config5 is optional */ | 944 | /* Config5 is optional */ |
952 | return MIPS_CONF_M; | 945 | return MIPS_CONF_M; |
953 | } | 946 | } |
954 | 947 | ||
955 | /** | 948 | /** |
956 | * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5 | 949 | * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5 |
957 | * @vcpu: Virtual CPU. | 950 | * @vcpu: Virtual CPU. |
958 | * | 951 | * |
959 | * Finds the mask of bits which are writable in the guest's Config5 CP0 | 952 | * Finds the mask of bits which are writable in the guest's Config5 CP0 |
960 | * register, by the guest itself. | 953 | * register, by the guest itself. |
961 | */ | 954 | */ |
962 | unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu) | 955 | unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu) |
963 | { | 956 | { |
964 | unsigned int mask = 0; | 957 | unsigned int mask = 0; |
965 | 958 | ||
966 | /* Permit MSAEn changes if MSA supported and enabled */ | 959 | /* Permit MSAEn changes if MSA supported and enabled */ |
967 | if (kvm_mips_guest_has_msa(&vcpu->arch)) | 960 | if (kvm_mips_guest_has_msa(&vcpu->arch)) |
968 | mask |= MIPS_CONF5_MSAEN; | 961 | mask |= MIPS_CONF5_MSAEN; |
969 | 962 | ||
970 | /* | 963 | /* |
971 | * Permit guest FPU mode changes if FPU is enabled and the relevant | 964 | * Permit guest FPU mode changes if FPU is enabled and the relevant |
972 | * feature exists according to FIR register. | 965 | * feature exists according to FIR register. |
973 | */ | 966 | */ |
974 | if (kvm_mips_guest_has_fpu(&vcpu->arch)) { | 967 | if (kvm_mips_guest_has_fpu(&vcpu->arch)) { |
975 | if (cpu_has_fre) | 968 | if (cpu_has_fre) |
976 | mask |= MIPS_CONF5_FRE; | 969 | mask |= MIPS_CONF5_FRE; |
977 | /* We don't support UFR or UFE */ | 970 | /* We don't support UFR or UFE */ |
978 | } | 971 | } |
979 | 972 | ||
980 | return mask; | 973 | return mask; |
981 | } | 974 | } |
982 | 975 | ||
983 | enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, | 976 | enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, |
984 | uint32_t cause, struct kvm_run *run, | 977 | uint32_t cause, struct kvm_run *run, |
985 | struct kvm_vcpu *vcpu) | 978 | struct kvm_vcpu *vcpu) |
986 | { | 979 | { |
987 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 980 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
988 | enum emulation_result er = EMULATE_DONE; | 981 | enum emulation_result er = EMULATE_DONE; |
989 | int32_t rt, rd, copz, sel, co_bit, op; | 982 | int32_t rt, rd, copz, sel, co_bit, op; |
990 | uint32_t pc = vcpu->arch.pc; | 983 | uint32_t pc = vcpu->arch.pc; |
991 | unsigned long curr_pc; | 984 | unsigned long curr_pc; |
992 | 985 | ||
993 | /* | 986 | /* |
994 | * Update PC and hold onto current PC in case there is | 987 | * Update PC and hold onto current PC in case there is |
995 | * an error and we want to rollback the PC | 988 | * an error and we want to rollback the PC |
996 | */ | 989 | */ |
997 | curr_pc = vcpu->arch.pc; | 990 | curr_pc = vcpu->arch.pc; |
998 | er = update_pc(vcpu, cause); | 991 | er = update_pc(vcpu, cause); |
999 | if (er == EMULATE_FAIL) | 992 | if (er == EMULATE_FAIL) |
1000 | return er; | 993 | return er; |
1001 | 994 | ||
1002 | copz = (inst >> 21) & 0x1f; | 995 | copz = (inst >> 21) & 0x1f; |
1003 | rt = (inst >> 16) & 0x1f; | 996 | rt = (inst >> 16) & 0x1f; |
1004 | rd = (inst >> 11) & 0x1f; | 997 | rd = (inst >> 11) & 0x1f; |
1005 | sel = inst & 0x7; | 998 | sel = inst & 0x7; |
1006 | co_bit = (inst >> 25) & 1; | 999 | co_bit = (inst >> 25) & 1; |
1007 | 1000 | ||
1008 | if (co_bit) { | 1001 | if (co_bit) { |
1009 | op = (inst) & 0xff; | 1002 | op = (inst) & 0xff; |
1010 | 1003 | ||
1011 | switch (op) { | 1004 | switch (op) { |
1012 | case tlbr_op: /* Read indexed TLB entry */ | 1005 | case tlbr_op: /* Read indexed TLB entry */ |
1013 | er = kvm_mips_emul_tlbr(vcpu); | 1006 | er = kvm_mips_emul_tlbr(vcpu); |
1014 | break; | 1007 | break; |
1015 | case tlbwi_op: /* Write indexed */ | 1008 | case tlbwi_op: /* Write indexed */ |
1016 | er = kvm_mips_emul_tlbwi(vcpu); | 1009 | er = kvm_mips_emul_tlbwi(vcpu); |
1017 | break; | 1010 | break; |
1018 | case tlbwr_op: /* Write random */ | 1011 | case tlbwr_op: /* Write random */ |
1019 | er = kvm_mips_emul_tlbwr(vcpu); | 1012 | er = kvm_mips_emul_tlbwr(vcpu); |
1020 | break; | 1013 | break; |
1021 | case tlbp_op: /* TLB Probe */ | 1014 | case tlbp_op: /* TLB Probe */ |
1022 | er = kvm_mips_emul_tlbp(vcpu); | 1015 | er = kvm_mips_emul_tlbp(vcpu); |
1023 | break; | 1016 | break; |
1024 | case rfe_op: | 1017 | case rfe_op: |
1025 | kvm_err("!!!COP0_RFE!!!\n"); | 1018 | kvm_err("!!!COP0_RFE!!!\n"); |
1026 | break; | 1019 | break; |
1027 | case eret_op: | 1020 | case eret_op: |
1028 | er = kvm_mips_emul_eret(vcpu); | 1021 | er = kvm_mips_emul_eret(vcpu); |
1029 | goto dont_update_pc; | 1022 | goto dont_update_pc; |
1030 | break; | 1023 | break; |
1031 | case wait_op: | 1024 | case wait_op: |
1032 | er = kvm_mips_emul_wait(vcpu); | 1025 | er = kvm_mips_emul_wait(vcpu); |
1033 | break; | 1026 | break; |
1034 | } | 1027 | } |
1035 | } else { | 1028 | } else { |
1036 | switch (copz) { | 1029 | switch (copz) { |
1037 | case mfc_op: | 1030 | case mfc_op: |
1038 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS | 1031 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS |
1039 | cop0->stat[rd][sel]++; | 1032 | cop0->stat[rd][sel]++; |
1040 | #endif | 1033 | #endif |
1041 | /* Get reg */ | 1034 | /* Get reg */ |
1042 | if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { | 1035 | if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { |
1043 | vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu); | 1036 | vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu); |
1044 | } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) { | 1037 | } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) { |
1045 | vcpu->arch.gprs[rt] = 0x0; | 1038 | vcpu->arch.gprs[rt] = 0x0; |
1046 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | 1039 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS |
1047 | kvm_mips_trans_mfc0(inst, opc, vcpu); | 1040 | kvm_mips_trans_mfc0(inst, opc, vcpu); |
1048 | #endif | 1041 | #endif |
1049 | } else { | 1042 | } else { |
1050 | vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; | 1043 | vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; |
1051 | 1044 | ||
1052 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | 1045 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS |
1053 | kvm_mips_trans_mfc0(inst, opc, vcpu); | 1046 | kvm_mips_trans_mfc0(inst, opc, vcpu); |
1054 | #endif | 1047 | #endif |
1055 | } | 1048 | } |
1056 | 1049 | ||
1057 | kvm_debug | 1050 | kvm_debug |
1058 | ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n", | 1051 | ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n", |
1059 | pc, rd, sel, rt, vcpu->arch.gprs[rt]); | 1052 | pc, rd, sel, rt, vcpu->arch.gprs[rt]); |
1060 | 1053 | ||
1061 | break; | 1054 | break; |
1062 | 1055 | ||
1063 | case dmfc_op: | 1056 | case dmfc_op: |
1064 | vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; | 1057 | vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; |
1065 | break; | 1058 | break; |
1066 | 1059 | ||
1067 | case mtc_op: | 1060 | case mtc_op: |
1068 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS | 1061 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS |
1069 | cop0->stat[rd][sel]++; | 1062 | cop0->stat[rd][sel]++; |
1070 | #endif | 1063 | #endif |
1071 | if ((rd == MIPS_CP0_TLB_INDEX) | 1064 | if ((rd == MIPS_CP0_TLB_INDEX) |
1072 | && (vcpu->arch.gprs[rt] >= | 1065 | && (vcpu->arch.gprs[rt] >= |
1073 | KVM_MIPS_GUEST_TLB_SIZE)) { | 1066 | KVM_MIPS_GUEST_TLB_SIZE)) { |
1074 | kvm_err("Invalid TLB Index: %ld", | 1067 | kvm_err("Invalid TLB Index: %ld", |
1075 | vcpu->arch.gprs[rt]); | 1068 | vcpu->arch.gprs[rt]); |
1076 | er = EMULATE_FAIL; | 1069 | er = EMULATE_FAIL; |
1077 | break; | 1070 | break; |
1078 | } | 1071 | } |
1079 | #define C0_EBASE_CORE_MASK 0xff | 1072 | #define C0_EBASE_CORE_MASK 0xff |
1080 | if ((rd == MIPS_CP0_PRID) && (sel == 1)) { | 1073 | if ((rd == MIPS_CP0_PRID) && (sel == 1)) { |
1081 | /* Preserve CORE number */ | 1074 | /* Preserve CORE number */ |
1082 | kvm_change_c0_guest_ebase(cop0, | 1075 | kvm_change_c0_guest_ebase(cop0, |
1083 | ~(C0_EBASE_CORE_MASK), | 1076 | ~(C0_EBASE_CORE_MASK), |
1084 | vcpu->arch.gprs[rt]); | 1077 | vcpu->arch.gprs[rt]); |
1085 | kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n", | 1078 | kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n", |
1086 | kvm_read_c0_guest_ebase(cop0)); | 1079 | kvm_read_c0_guest_ebase(cop0)); |
1087 | } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { | 1080 | } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { |
1088 | uint32_t nasid = | 1081 | uint32_t nasid = |
1089 | vcpu->arch.gprs[rt] & ASID_MASK; | 1082 | vcpu->arch.gprs[rt] & ASID_MASK; |
1090 | if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) && | 1083 | if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) && |
1091 | ((kvm_read_c0_guest_entryhi(cop0) & | 1084 | ((kvm_read_c0_guest_entryhi(cop0) & |
1092 | ASID_MASK) != nasid)) { | 1085 | ASID_MASK) != nasid)) { |
1093 | kvm_debug("MTCz, change ASID from %#lx to %#lx\n", | 1086 | kvm_debug("MTCz, change ASID from %#lx to %#lx\n", |
1094 | kvm_read_c0_guest_entryhi(cop0) | 1087 | kvm_read_c0_guest_entryhi(cop0) |
1095 | & ASID_MASK, | 1088 | & ASID_MASK, |
1096 | vcpu->arch.gprs[rt] | 1089 | vcpu->arch.gprs[rt] |
1097 | & ASID_MASK); | 1090 | & ASID_MASK); |
1098 | 1091 | ||
1099 | /* Blow away the shadow host TLBs */ | 1092 | /* Blow away the shadow host TLBs */ |
1100 | kvm_mips_flush_host_tlb(1); | 1093 | kvm_mips_flush_host_tlb(1); |
1101 | } | 1094 | } |
1102 | kvm_write_c0_guest_entryhi(cop0, | 1095 | kvm_write_c0_guest_entryhi(cop0, |
1103 | vcpu->arch.gprs[rt]); | 1096 | vcpu->arch.gprs[rt]); |
1104 | } | 1097 | } |
1105 | /* Are we writing to COUNT */ | 1098 | /* Are we writing to COUNT */ |
1106 | else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { | 1099 | else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { |
1107 | kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); | 1100 | kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); |
1108 | goto done; | 1101 | goto done; |
1109 | } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) { | 1102 | } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) { |
1110 | kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n", | 1103 | kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n", |
1111 | pc, kvm_read_c0_guest_compare(cop0), | 1104 | pc, kvm_read_c0_guest_compare(cop0), |
1112 | vcpu->arch.gprs[rt]); | 1105 | vcpu->arch.gprs[rt]); |
1113 | 1106 | ||
1114 | /* If we are writing to COMPARE */ | 1107 | /* If we are writing to COMPARE */ |
1115 | /* Clear pending timer interrupt, if any */ | 1108 | /* Clear pending timer interrupt, if any */ |
1116 | kvm_mips_callbacks->dequeue_timer_int(vcpu); | ||
1117 | kvm_mips_write_compare(vcpu, | 1109 | kvm_mips_write_compare(vcpu, |
1118 | vcpu->arch.gprs[rt]); | 1110 | vcpu->arch.gprs[rt], |
1111 | true); | ||
1119 | } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { | 1112 | } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { |
1120 | unsigned int old_val, val, change; | 1113 | unsigned int old_val, val, change; |
1121 | 1114 | ||
1122 | old_val = kvm_read_c0_guest_status(cop0); | 1115 | old_val = kvm_read_c0_guest_status(cop0); |
1123 | val = vcpu->arch.gprs[rt]; | 1116 | val = vcpu->arch.gprs[rt]; |
1124 | change = val ^ old_val; | 1117 | change = val ^ old_val; |
1125 | 1118 | ||
1126 | /* Make sure that the NMI bit is never set */ | 1119 | /* Make sure that the NMI bit is never set */ |
1127 | val &= ~ST0_NMI; | 1120 | val &= ~ST0_NMI; |
1128 | 1121 | ||
1129 | /* | 1122 | /* |
1130 | * Don't allow CU1 or FR to be set unless FPU | 1123 | * Don't allow CU1 or FR to be set unless FPU |
1131 | * capability enabled and exists in guest | 1124 | * capability enabled and exists in guest |
1132 | * configuration. | 1125 | * configuration. |
1133 | */ | 1126 | */ |
1134 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | 1127 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) |
1135 | val &= ~(ST0_CU1 | ST0_FR); | 1128 | val &= ~(ST0_CU1 | ST0_FR); |
1136 | 1129 | ||
1137 | /* | 1130 | /* |
1138 | * Also don't allow FR to be set if host doesn't | 1131 | * Also don't allow FR to be set if host doesn't |
1139 | * support it. | 1132 | * support it. |
1140 | */ | 1133 | */ |
1141 | if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64)) | 1134 | if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64)) |
1142 | val &= ~ST0_FR; | 1135 | val &= ~ST0_FR; |
1143 | 1136 | ||
1144 | 1137 | ||
1145 | /* Handle changes in FPU mode */ | 1138 | /* Handle changes in FPU mode */ |
1146 | preempt_disable(); | 1139 | preempt_disable(); |
1147 | 1140 | ||
1148 | /* | 1141 | /* |
1149 | * FPU and Vector register state is made | 1142 | * FPU and Vector register state is made |
1150 | * UNPREDICTABLE by a change of FR, so don't | 1143 | * UNPREDICTABLE by a change of FR, so don't |
1151 | * even bother saving it. | 1144 | * even bother saving it. |
1152 | */ | 1145 | */ |
1153 | if (change & ST0_FR) | 1146 | if (change & ST0_FR) |
1154 | kvm_drop_fpu(vcpu); | 1147 | kvm_drop_fpu(vcpu); |
1155 | 1148 | ||
1156 | /* | 1149 | /* |
1157 | * If MSA state is already live, it is undefined | 1150 | * If MSA state is already live, it is undefined |
1158 | * how it interacts with FR=0 FPU state, and we | 1151 | * how it interacts with FR=0 FPU state, and we |
1159 | * don't want to hit reserved instruction | 1152 | * don't want to hit reserved instruction |
1160 | * exceptions trying to save the MSA state later | 1153 | * exceptions trying to save the MSA state later |
1161 | * when CU=1 && FR=1, so play it safe and save | 1154 | * when CU=1 && FR=1, so play it safe and save |
1162 | * it first. | 1155 | * it first. |
1163 | */ | 1156 | */ |
1164 | if (change & ST0_CU1 && !(val & ST0_FR) && | 1157 | if (change & ST0_CU1 && !(val & ST0_FR) && |
1165 | vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) | 1158 | vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) |
1166 | kvm_lose_fpu(vcpu); | 1159 | kvm_lose_fpu(vcpu); |
1167 | 1160 | ||
1168 | /* | 1161 | /* |
1169 | * Propagate CU1 (FPU enable) changes | 1162 | * Propagate CU1 (FPU enable) changes |
1170 | * immediately if the FPU context is already | 1163 | * immediately if the FPU context is already |
1171 | * loaded. When disabling we leave the context | 1164 | * loaded. When disabling we leave the context |
1172 | * loaded so it can be quickly enabled again in | 1165 | * loaded so it can be quickly enabled again in |
1173 | * the near future. | 1166 | * the near future. |
1174 | */ | 1167 | */ |
1175 | if (change & ST0_CU1 && | 1168 | if (change & ST0_CU1 && |
1176 | vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) | 1169 | vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) |
1177 | change_c0_status(ST0_CU1, val); | 1170 | change_c0_status(ST0_CU1, val); |
1178 | 1171 | ||
1179 | preempt_enable(); | 1172 | preempt_enable(); |
1180 | 1173 | ||
1181 | kvm_write_c0_guest_status(cop0, val); | 1174 | kvm_write_c0_guest_status(cop0, val); |
1182 | 1175 | ||
1183 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | 1176 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS |
1184 | /* | 1177 | /* |
1185 | * If FPU present, we need CU1/FR bits to take | 1178 | * If FPU present, we need CU1/FR bits to take |
1186 | * effect fairly soon. | 1179 | * effect fairly soon. |
1187 | */ | 1180 | */ |
1188 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | 1181 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) |
1189 | kvm_mips_trans_mtc0(inst, opc, vcpu); | 1182 | kvm_mips_trans_mtc0(inst, opc, vcpu); |
1190 | #endif | 1183 | #endif |
1191 | } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) { | 1184 | } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) { |
1192 | unsigned int old_val, val, change, wrmask; | 1185 | unsigned int old_val, val, change, wrmask; |
1193 | 1186 | ||
1194 | old_val = kvm_read_c0_guest_config5(cop0); | 1187 | old_val = kvm_read_c0_guest_config5(cop0); |
1195 | val = vcpu->arch.gprs[rt]; | 1188 | val = vcpu->arch.gprs[rt]; |
1196 | 1189 | ||
1197 | /* Only a few bits are writable in Config5 */ | 1190 | /* Only a few bits are writable in Config5 */ |
1198 | wrmask = kvm_mips_config5_wrmask(vcpu); | 1191 | wrmask = kvm_mips_config5_wrmask(vcpu); |
1199 | change = (val ^ old_val) & wrmask; | 1192 | change = (val ^ old_val) & wrmask; |
1200 | val = old_val ^ change; | 1193 | val = old_val ^ change; |
1201 | 1194 | ||
1202 | 1195 | ||
1203 | /* Handle changes in FPU/MSA modes */ | 1196 | /* Handle changes in FPU/MSA modes */ |
1204 | preempt_disable(); | 1197 | preempt_disable(); |
1205 | 1198 | ||
1206 | /* | 1199 | /* |
1207 | * Propagate FRE changes immediately if the FPU | 1200 | * Propagate FRE changes immediately if the FPU |
1208 | * context is already loaded. | 1201 | * context is already loaded. |
1209 | */ | 1202 | */ |
1210 | if (change & MIPS_CONF5_FRE && | 1203 | if (change & MIPS_CONF5_FRE && |
1211 | vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) | 1204 | vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) |
1212 | change_c0_config5(MIPS_CONF5_FRE, val); | 1205 | change_c0_config5(MIPS_CONF5_FRE, val); |
1213 | 1206 | ||
1214 | /* | 1207 | /* |
1215 | * Propagate MSAEn changes immediately if the | 1208 | * Propagate MSAEn changes immediately if the |
1216 | * MSA context is already loaded. When disabling | 1209 | * MSA context is already loaded. When disabling |
1217 | * we leave the context loaded so it can be | 1210 | * we leave the context loaded so it can be |
1218 | * quickly enabled again in the near future. | 1211 | * quickly enabled again in the near future. |
1219 | */ | 1212 | */ |
1220 | if (change & MIPS_CONF5_MSAEN && | 1213 | if (change & MIPS_CONF5_MSAEN && |
1221 | vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) | 1214 | vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) |
1222 | change_c0_config5(MIPS_CONF5_MSAEN, | 1215 | change_c0_config5(MIPS_CONF5_MSAEN, |
1223 | val); | 1216 | val); |
1224 | 1217 | ||
1225 | preempt_enable(); | 1218 | preempt_enable(); |
1226 | 1219 | ||
1227 | kvm_write_c0_guest_config5(cop0, val); | 1220 | kvm_write_c0_guest_config5(cop0, val); |
1228 | } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { | 1221 | } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { |
1229 | uint32_t old_cause, new_cause; | 1222 | uint32_t old_cause, new_cause; |
1230 | 1223 | ||
1231 | old_cause = kvm_read_c0_guest_cause(cop0); | 1224 | old_cause = kvm_read_c0_guest_cause(cop0); |
1232 | new_cause = vcpu->arch.gprs[rt]; | 1225 | new_cause = vcpu->arch.gprs[rt]; |
1233 | /* Update R/W bits */ | 1226 | /* Update R/W bits */ |
1234 | kvm_change_c0_guest_cause(cop0, 0x08800300, | 1227 | kvm_change_c0_guest_cause(cop0, 0x08800300, |
1235 | new_cause); | 1228 | new_cause); |
1236 | /* DC bit enabling/disabling timer? */ | 1229 | /* DC bit enabling/disabling timer? */ |
1237 | if ((old_cause ^ new_cause) & CAUSEF_DC) { | 1230 | if ((old_cause ^ new_cause) & CAUSEF_DC) { |
1238 | if (new_cause & CAUSEF_DC) | 1231 | if (new_cause & CAUSEF_DC) |
1239 | kvm_mips_count_disable_cause(vcpu); | 1232 | kvm_mips_count_disable_cause(vcpu); |
1240 | else | 1233 | else |
1241 | kvm_mips_count_enable_cause(vcpu); | 1234 | kvm_mips_count_enable_cause(vcpu); |
1242 | } | 1235 | } |
1243 | } else { | 1236 | } else { |
1244 | cop0->reg[rd][sel] = vcpu->arch.gprs[rt]; | 1237 | cop0->reg[rd][sel] = vcpu->arch.gprs[rt]; |
1245 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | 1238 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS |
1246 | kvm_mips_trans_mtc0(inst, opc, vcpu); | 1239 | kvm_mips_trans_mtc0(inst, opc, vcpu); |
1247 | #endif | 1240 | #endif |
1248 | } | 1241 | } |
1249 | 1242 | ||
1250 | kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc, | 1243 | kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc, |
1251 | rd, sel, cop0->reg[rd][sel]); | 1244 | rd, sel, cop0->reg[rd][sel]); |
1252 | break; | 1245 | break; |
1253 | 1246 | ||
1254 | case dmtc_op: | 1247 | case dmtc_op: |
1255 | kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", | 1248 | kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", |
1256 | vcpu->arch.pc, rt, rd, sel); | 1249 | vcpu->arch.pc, rt, rd, sel); |
1257 | er = EMULATE_FAIL; | 1250 | er = EMULATE_FAIL; |
1258 | break; | 1251 | break; |
1259 | 1252 | ||
1260 | case mfmcz_op: | 1253 | case mfmcz_op: |
1261 | #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS | 1254 | #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS |
1262 | cop0->stat[MIPS_CP0_STATUS][0]++; | 1255 | cop0->stat[MIPS_CP0_STATUS][0]++; |
1263 | #endif | 1256 | #endif |
1264 | if (rt != 0) { | 1257 | if (rt != 0) { |
1265 | vcpu->arch.gprs[rt] = | 1258 | vcpu->arch.gprs[rt] = |
1266 | kvm_read_c0_guest_status(cop0); | 1259 | kvm_read_c0_guest_status(cop0); |
1267 | } | 1260 | } |
1268 | /* EI */ | 1261 | /* EI */ |
1269 | if (inst & 0x20) { | 1262 | if (inst & 0x20) { |
1270 | kvm_debug("[%#lx] mfmcz_op: EI\n", | 1263 | kvm_debug("[%#lx] mfmcz_op: EI\n", |
1271 | vcpu->arch.pc); | 1264 | vcpu->arch.pc); |
1272 | kvm_set_c0_guest_status(cop0, ST0_IE); | 1265 | kvm_set_c0_guest_status(cop0, ST0_IE); |
1273 | } else { | 1266 | } else { |
1274 | kvm_debug("[%#lx] mfmcz_op: DI\n", | 1267 | kvm_debug("[%#lx] mfmcz_op: DI\n", |
1275 | vcpu->arch.pc); | 1268 | vcpu->arch.pc); |
1276 | kvm_clear_c0_guest_status(cop0, ST0_IE); | 1269 | kvm_clear_c0_guest_status(cop0, ST0_IE); |
1277 | } | 1270 | } |
1278 | 1271 | ||
1279 | break; | 1272 | break; |
1280 | 1273 | ||
1281 | case wrpgpr_op: | 1274 | case wrpgpr_op: |
1282 | { | 1275 | { |
1283 | uint32_t css = | 1276 | uint32_t css = |
1284 | cop0->reg[MIPS_CP0_STATUS][2] & 0xf; | 1277 | cop0->reg[MIPS_CP0_STATUS][2] & 0xf; |
1285 | uint32_t pss = | 1278 | uint32_t pss = |
1286 | (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; | 1279 | (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; |
1287 | /* | 1280 | /* |
1288 | * We don't support any shadow register sets, so | 1281 | * We don't support any shadow register sets, so |
1289 | * SRSCtl[PSS] == SRSCtl[CSS] = 0 | 1282 | * SRSCtl[PSS] == SRSCtl[CSS] = 0 |
1290 | */ | 1283 | */ |
1291 | if (css || pss) { | 1284 | if (css || pss) { |
1292 | er = EMULATE_FAIL; | 1285 | er = EMULATE_FAIL; |
1293 | break; | 1286 | break; |
1294 | } | 1287 | } |
1295 | kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd, | 1288 | kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd, |
1296 | vcpu->arch.gprs[rt]); | 1289 | vcpu->arch.gprs[rt]); |
1297 | vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt]; | 1290 | vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt]; |
1298 | } | 1291 | } |
1299 | break; | 1292 | break; |
1300 | default: | 1293 | default: |
1301 | kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", | 1294 | kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", |
1302 | vcpu->arch.pc, copz); | 1295 | vcpu->arch.pc, copz); |
1303 | er = EMULATE_FAIL; | 1296 | er = EMULATE_FAIL; |
1304 | break; | 1297 | break; |
1305 | } | 1298 | } |
1306 | } | 1299 | } |
1307 | 1300 | ||
1308 | done: | 1301 | done: |
1309 | /* Rollback PC only if emulation was unsuccessful */ | 1302 | /* Rollback PC only if emulation was unsuccessful */ |
1310 | if (er == EMULATE_FAIL) | 1303 | if (er == EMULATE_FAIL) |
1311 | vcpu->arch.pc = curr_pc; | 1304 | vcpu->arch.pc = curr_pc; |
1312 | 1305 | ||
1313 | dont_update_pc: | 1306 | dont_update_pc: |
1314 | /* | 1307 | /* |
1315 | * This is for special instructions whose emulation | 1308 | * This is for special instructions whose emulation |
1316 | * updates the PC, so do not overwrite the PC under | 1309 | * updates the PC, so do not overwrite the PC under |
1317 | * any circumstances | 1310 | * any circumstances |
1318 | */ | 1311 | */ |
1319 | 1312 | ||
1320 | return er; | 1313 | return er; |
1321 | } | 1314 | } |
1322 | 1315 | ||
1323 | enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause, | 1316 | enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause, |
1324 | struct kvm_run *run, | 1317 | struct kvm_run *run, |
1325 | struct kvm_vcpu *vcpu) | 1318 | struct kvm_vcpu *vcpu) |
1326 | { | 1319 | { |
1327 | enum emulation_result er = EMULATE_DO_MMIO; | 1320 | enum emulation_result er = EMULATE_DO_MMIO; |
1328 | int32_t op, base, rt, offset; | 1321 | int32_t op, base, rt, offset; |
1329 | uint32_t bytes; | 1322 | uint32_t bytes; |
1330 | void *data = run->mmio.data; | 1323 | void *data = run->mmio.data; |
1331 | unsigned long curr_pc; | 1324 | unsigned long curr_pc; |
1332 | 1325 | ||
1333 | /* | 1326 | /* |
1334 | * Update PC and hold onto current PC in case there is | 1327 | * Update PC and hold onto current PC in case there is |
1335 | * an error and we want to rollback the PC | 1328 | * an error and we want to rollback the PC |
1336 | */ | 1329 | */ |
1337 | curr_pc = vcpu->arch.pc; | 1330 | curr_pc = vcpu->arch.pc; |
1338 | er = update_pc(vcpu, cause); | 1331 | er = update_pc(vcpu, cause); |
1339 | if (er == EMULATE_FAIL) | 1332 | if (er == EMULATE_FAIL) |
1340 | return er; | 1333 | return er; |
1341 | 1334 | ||
1342 | rt = (inst >> 16) & 0x1f; | 1335 | rt = (inst >> 16) & 0x1f; |
1343 | base = (inst >> 21) & 0x1f; | 1336 | base = (inst >> 21) & 0x1f; |
1344 | offset = inst & 0xffff; | 1337 | offset = inst & 0xffff; |
1345 | op = (inst >> 26) & 0x3f; | 1338 | op = (inst >> 26) & 0x3f; |
1346 | 1339 | ||
1347 | switch (op) { | 1340 | switch (op) { |
1348 | case sb_op: | 1341 | case sb_op: |
1349 | bytes = 1; | 1342 | bytes = 1; |
1350 | if (bytes > sizeof(run->mmio.data)) { | 1343 | if (bytes > sizeof(run->mmio.data)) { |
1351 | kvm_err("%s: bad MMIO length: %d\n", __func__, | 1344 | kvm_err("%s: bad MMIO length: %d\n", __func__, |
1352 | run->mmio.len); | 1345 | run->mmio.len); |
1353 | } | 1346 | } |
1354 | run->mmio.phys_addr = | 1347 | run->mmio.phys_addr = |
1355 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. | 1348 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. |
1356 | host_cp0_badvaddr); | 1349 | host_cp0_badvaddr); |
1357 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { | 1350 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { |
1358 | er = EMULATE_FAIL; | 1351 | er = EMULATE_FAIL; |
1359 | break; | 1352 | break; |
1360 | } | 1353 | } |
1361 | run->mmio.len = bytes; | 1354 | run->mmio.len = bytes; |
1362 | run->mmio.is_write = 1; | 1355 | run->mmio.is_write = 1; |
1363 | vcpu->mmio_needed = 1; | 1356 | vcpu->mmio_needed = 1; |
1364 | vcpu->mmio_is_write = 1; | 1357 | vcpu->mmio_is_write = 1; |
1365 | *(u8 *) data = vcpu->arch.gprs[rt]; | 1358 | *(u8 *) data = vcpu->arch.gprs[rt]; |
1366 | kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n", | 1359 | kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n", |
1367 | vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt], | 1360 | vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt], |
1368 | *(uint8_t *) data); | 1361 | *(uint8_t *) data); |
1369 | 1362 | ||
1370 | break; | 1363 | break; |
1371 | 1364 | ||
1372 | case sw_op: | 1365 | case sw_op: |
1373 | bytes = 4; | 1366 | bytes = 4; |
1374 | if (bytes > sizeof(run->mmio.data)) { | 1367 | if (bytes > sizeof(run->mmio.data)) { |
1375 | kvm_err("%s: bad MMIO length: %d\n", __func__, | 1368 | kvm_err("%s: bad MMIO length: %d\n", __func__, |
1376 | run->mmio.len); | 1369 | run->mmio.len); |
1377 | } | 1370 | } |
1378 | run->mmio.phys_addr = | 1371 | run->mmio.phys_addr = |
1379 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. | 1372 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. |
1380 | host_cp0_badvaddr); | 1373 | host_cp0_badvaddr); |
1381 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { | 1374 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { |
1382 | er = EMULATE_FAIL; | 1375 | er = EMULATE_FAIL; |
1383 | break; | 1376 | break; |
1384 | } | 1377 | } |
1385 | 1378 | ||
1386 | run->mmio.len = bytes; | 1379 | run->mmio.len = bytes; |
1387 | run->mmio.is_write = 1; | 1380 | run->mmio.is_write = 1; |
1388 | vcpu->mmio_needed = 1; | 1381 | vcpu->mmio_needed = 1; |
1389 | vcpu->mmio_is_write = 1; | 1382 | vcpu->mmio_is_write = 1; |
1390 | *(uint32_t *) data = vcpu->arch.gprs[rt]; | 1383 | *(uint32_t *) data = vcpu->arch.gprs[rt]; |
1391 | 1384 | ||
1392 | kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n", | 1385 | kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n", |
1393 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, | 1386 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, |
1394 | vcpu->arch.gprs[rt], *(uint32_t *) data); | 1387 | vcpu->arch.gprs[rt], *(uint32_t *) data); |
1395 | break; | 1388 | break; |
1396 | 1389 | ||
1397 | case sh_op: | 1390 | case sh_op: |
1398 | bytes = 2; | 1391 | bytes = 2; |
1399 | if (bytes > sizeof(run->mmio.data)) { | 1392 | if (bytes > sizeof(run->mmio.data)) { |
1400 | kvm_err("%s: bad MMIO length: %d\n", __func__, | 1393 | kvm_err("%s: bad MMIO length: %d\n", __func__, |
1401 | run->mmio.len); | 1394 | run->mmio.len); |
1402 | } | 1395 | } |
1403 | run->mmio.phys_addr = | 1396 | run->mmio.phys_addr = |
1404 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. | 1397 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. |
1405 | host_cp0_badvaddr); | 1398 | host_cp0_badvaddr); |
1406 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { | 1399 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { |
1407 | er = EMULATE_FAIL; | 1400 | er = EMULATE_FAIL; |
1408 | break; | 1401 | break; |
1409 | } | 1402 | } |
1410 | 1403 | ||
1411 | run->mmio.len = bytes; | 1404 | run->mmio.len = bytes; |
1412 | run->mmio.is_write = 1; | 1405 | run->mmio.is_write = 1; |
1413 | vcpu->mmio_needed = 1; | 1406 | vcpu->mmio_needed = 1; |
1414 | vcpu->mmio_is_write = 1; | 1407 | vcpu->mmio_is_write = 1; |
1415 | *(uint16_t *) data = vcpu->arch.gprs[rt]; | 1408 | *(uint16_t *) data = vcpu->arch.gprs[rt]; |
1416 | 1409 | ||
1417 | kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n", | 1410 | kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n", |
1418 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, | 1411 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, |
1419 | vcpu->arch.gprs[rt], *(uint32_t *) data); | 1412 | vcpu->arch.gprs[rt], *(uint32_t *) data); |
1420 | break; | 1413 | break; |
1421 | 1414 | ||
1422 | default: | 1415 | default: |
1423 | kvm_err("Store not yet supported"); | 1416 | kvm_err("Store not yet supported"); |
1424 | er = EMULATE_FAIL; | 1417 | er = EMULATE_FAIL; |
1425 | break; | 1418 | break; |
1426 | } | 1419 | } |
1427 | 1420 | ||
1428 | /* Rollback PC if emulation was unsuccessful */ | 1421 | /* Rollback PC if emulation was unsuccessful */ |
1429 | if (er == EMULATE_FAIL) | 1422 | if (er == EMULATE_FAIL) |
1430 | vcpu->arch.pc = curr_pc; | 1423 | vcpu->arch.pc = curr_pc; |
1431 | 1424 | ||
1432 | return er; | 1425 | return er; |
1433 | } | 1426 | } |
1434 | 1427 | ||
1435 | enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause, | 1428 | enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause, |
1436 | struct kvm_run *run, | 1429 | struct kvm_run *run, |
1437 | struct kvm_vcpu *vcpu) | 1430 | struct kvm_vcpu *vcpu) |
1438 | { | 1431 | { |
1439 | enum emulation_result er = EMULATE_DO_MMIO; | 1432 | enum emulation_result er = EMULATE_DO_MMIO; |
1440 | int32_t op, base, rt, offset; | 1433 | int32_t op, base, rt, offset; |
1441 | uint32_t bytes; | 1434 | uint32_t bytes; |
1442 | 1435 | ||
1443 | rt = (inst >> 16) & 0x1f; | 1436 | rt = (inst >> 16) & 0x1f; |
1444 | base = (inst >> 21) & 0x1f; | 1437 | base = (inst >> 21) & 0x1f; |
1445 | offset = inst & 0xffff; | 1438 | offset = inst & 0xffff; |
1446 | op = (inst >> 26) & 0x3f; | 1439 | op = (inst >> 26) & 0x3f; |
1447 | 1440 | ||
1448 | vcpu->arch.pending_load_cause = cause; | 1441 | vcpu->arch.pending_load_cause = cause; |
1449 | vcpu->arch.io_gpr = rt; | 1442 | vcpu->arch.io_gpr = rt; |
1450 | 1443 | ||
1451 | switch (op) { | 1444 | switch (op) { |
1452 | case lw_op: | 1445 | case lw_op: |
1453 | bytes = 4; | 1446 | bytes = 4; |
1454 | if (bytes > sizeof(run->mmio.data)) { | 1447 | if (bytes > sizeof(run->mmio.data)) { |
1455 | kvm_err("%s: bad MMIO length: %d\n", __func__, | 1448 | kvm_err("%s: bad MMIO length: %d\n", __func__, |
1456 | run->mmio.len); | 1449 | run->mmio.len); |
1457 | er = EMULATE_FAIL; | 1450 | er = EMULATE_FAIL; |
1458 | break; | 1451 | break; |
1459 | } | 1452 | } |
1460 | run->mmio.phys_addr = | 1453 | run->mmio.phys_addr = |
1461 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. | 1454 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. |
1462 | host_cp0_badvaddr); | 1455 | host_cp0_badvaddr); |
1463 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { | 1456 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { |
1464 | er = EMULATE_FAIL; | 1457 | er = EMULATE_FAIL; |
1465 | break; | 1458 | break; |
1466 | } | 1459 | } |
1467 | 1460 | ||
1468 | run->mmio.len = bytes; | 1461 | run->mmio.len = bytes; |
1469 | run->mmio.is_write = 0; | 1462 | run->mmio.is_write = 0; |
1470 | vcpu->mmio_needed = 1; | 1463 | vcpu->mmio_needed = 1; |
1471 | vcpu->mmio_is_write = 0; | 1464 | vcpu->mmio_is_write = 0; |
1472 | break; | 1465 | break; |
1473 | 1466 | ||
1474 | case lh_op: | 1467 | case lh_op: |
1475 | case lhu_op: | 1468 | case lhu_op: |
1476 | bytes = 2; | 1469 | bytes = 2; |
1477 | if (bytes > sizeof(run->mmio.data)) { | 1470 | if (bytes > sizeof(run->mmio.data)) { |
1478 | kvm_err("%s: bad MMIO length: %d\n", __func__, | 1471 | kvm_err("%s: bad MMIO length: %d\n", __func__, |
1479 | run->mmio.len); | 1472 | run->mmio.len); |
1480 | er = EMULATE_FAIL; | 1473 | er = EMULATE_FAIL; |
1481 | break; | 1474 | break; |
1482 | } | 1475 | } |
1483 | run->mmio.phys_addr = | 1476 | run->mmio.phys_addr = |
1484 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. | 1477 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. |
1485 | host_cp0_badvaddr); | 1478 | host_cp0_badvaddr); |
1486 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { | 1479 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { |
1487 | er = EMULATE_FAIL; | 1480 | er = EMULATE_FAIL; |
1488 | break; | 1481 | break; |
1489 | } | 1482 | } |
1490 | 1483 | ||
1491 | run->mmio.len = bytes; | 1484 | run->mmio.len = bytes; |
1492 | run->mmio.is_write = 0; | 1485 | run->mmio.is_write = 0; |
1493 | vcpu->mmio_needed = 1; | 1486 | vcpu->mmio_needed = 1; |
1494 | vcpu->mmio_is_write = 0; | 1487 | vcpu->mmio_is_write = 0; |
1495 | 1488 | ||
1496 | if (op == lh_op) | 1489 | if (op == lh_op) |
1497 | vcpu->mmio_needed = 2; | 1490 | vcpu->mmio_needed = 2; |
1498 | else | 1491 | else |
1499 | vcpu->mmio_needed = 1; | 1492 | vcpu->mmio_needed = 1; |
1500 | 1493 | ||
1501 | break; | 1494 | break; |
1502 | 1495 | ||
1503 | case lbu_op: | 1496 | case lbu_op: |
1504 | case lb_op: | 1497 | case lb_op: |
1505 | bytes = 1; | 1498 | bytes = 1; |
1506 | if (bytes > sizeof(run->mmio.data)) { | 1499 | if (bytes > sizeof(run->mmio.data)) { |
1507 | kvm_err("%s: bad MMIO length: %d\n", __func__, | 1500 | kvm_err("%s: bad MMIO length: %d\n", __func__, |
1508 | run->mmio.len); | 1501 | run->mmio.len); |
1509 | er = EMULATE_FAIL; | 1502 | er = EMULATE_FAIL; |
1510 | break; | 1503 | break; |
1511 | } | 1504 | } |
1512 | run->mmio.phys_addr = | 1505 | run->mmio.phys_addr = |
1513 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. | 1506 | kvm_mips_callbacks->gva_to_gpa(vcpu->arch. |
1514 | host_cp0_badvaddr); | 1507 | host_cp0_badvaddr); |
1515 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { | 1508 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) { |
1516 | er = EMULATE_FAIL; | 1509 | er = EMULATE_FAIL; |
1517 | break; | 1510 | break; |
1518 | } | 1511 | } |
1519 | 1512 | ||
1520 | run->mmio.len = bytes; | 1513 | run->mmio.len = bytes; |
1521 | run->mmio.is_write = 0; | 1514 | run->mmio.is_write = 0; |
1522 | vcpu->mmio_is_write = 0; | 1515 | vcpu->mmio_is_write = 0; |
1523 | 1516 | ||
1524 | if (op == lb_op) | 1517 | if (op == lb_op) |
1525 | vcpu->mmio_needed = 2; | 1518 | vcpu->mmio_needed = 2; |
1526 | else | 1519 | else |
1527 | vcpu->mmio_needed = 1; | 1520 | vcpu->mmio_needed = 1; |
1528 | 1521 | ||
1529 | break; | 1522 | break; |
1530 | 1523 | ||
1531 | default: | 1524 | default: |
1532 | kvm_err("Load not yet supported"); | 1525 | kvm_err("Load not yet supported"); |
1533 | er = EMULATE_FAIL; | 1526 | er = EMULATE_FAIL; |
1534 | break; | 1527 | break; |
1535 | } | 1528 | } |
1536 | 1529 | ||
1537 | return er; | 1530 | return er; |
1538 | } | 1531 | } |
1539 | 1532 | ||
1540 | int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu) | 1533 | int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu) |
1541 | { | 1534 | { |
1542 | unsigned long offset = (va & ~PAGE_MASK); | 1535 | unsigned long offset = (va & ~PAGE_MASK); |
1543 | struct kvm *kvm = vcpu->kvm; | 1536 | struct kvm *kvm = vcpu->kvm; |
1544 | unsigned long pa; | 1537 | unsigned long pa; |
1545 | gfn_t gfn; | 1538 | gfn_t gfn; |
1546 | pfn_t pfn; | 1539 | pfn_t pfn; |
1547 | 1540 | ||
1548 | gfn = va >> PAGE_SHIFT; | 1541 | gfn = va >> PAGE_SHIFT; |
1549 | 1542 | ||
1550 | if (gfn >= kvm->arch.guest_pmap_npages) { | 1543 | if (gfn >= kvm->arch.guest_pmap_npages) { |
1551 | kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn); | 1544 | kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn); |
1552 | kvm_mips_dump_host_tlbs(); | 1545 | kvm_mips_dump_host_tlbs(); |
1553 | kvm_arch_vcpu_dump_regs(vcpu); | 1546 | kvm_arch_vcpu_dump_regs(vcpu); |
1554 | return -1; | 1547 | return -1; |
1555 | } | 1548 | } |
1556 | pfn = kvm->arch.guest_pmap[gfn]; | 1549 | pfn = kvm->arch.guest_pmap[gfn]; |
1557 | pa = (pfn << PAGE_SHIFT) | offset; | 1550 | pa = (pfn << PAGE_SHIFT) | offset; |
1558 | 1551 | ||
1559 | kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va, | 1552 | kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va, |
1560 | CKSEG0ADDR(pa)); | 1553 | CKSEG0ADDR(pa)); |
1561 | 1554 | ||
1562 | local_flush_icache_range(CKSEG0ADDR(pa), 32); | 1555 | local_flush_icache_range(CKSEG0ADDR(pa), 32); |
1563 | return 0; | 1556 | return 0; |
1564 | } | 1557 | } |
1565 | 1558 | ||
1566 | #define MIPS_CACHE_OP_INDEX_INV 0x0 | 1559 | #define MIPS_CACHE_OP_INDEX_INV 0x0 |
1567 | #define MIPS_CACHE_OP_INDEX_LD_TAG 0x1 | 1560 | #define MIPS_CACHE_OP_INDEX_LD_TAG 0x1 |
1568 | #define MIPS_CACHE_OP_INDEX_ST_TAG 0x2 | 1561 | #define MIPS_CACHE_OP_INDEX_ST_TAG 0x2 |
1569 | #define MIPS_CACHE_OP_IMP 0x3 | 1562 | #define MIPS_CACHE_OP_IMP 0x3 |
1570 | #define MIPS_CACHE_OP_HIT_INV 0x4 | 1563 | #define MIPS_CACHE_OP_HIT_INV 0x4 |
1571 | #define MIPS_CACHE_OP_FILL_WB_INV 0x5 | 1564 | #define MIPS_CACHE_OP_FILL_WB_INV 0x5 |
1572 | #define MIPS_CACHE_OP_HIT_HB 0x6 | 1565 | #define MIPS_CACHE_OP_HIT_HB 0x6 |
1573 | #define MIPS_CACHE_OP_FETCH_LOCK 0x7 | 1566 | #define MIPS_CACHE_OP_FETCH_LOCK 0x7 |
1574 | 1567 | ||
1575 | #define MIPS_CACHE_ICACHE 0x0 | 1568 | #define MIPS_CACHE_ICACHE 0x0 |
1576 | #define MIPS_CACHE_DCACHE 0x1 | 1569 | #define MIPS_CACHE_DCACHE 0x1 |
1577 | #define MIPS_CACHE_SEC 0x3 | 1570 | #define MIPS_CACHE_SEC 0x3 |
1578 | 1571 | ||
1579 | enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, | 1572 | enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, |
1580 | uint32_t cause, | 1573 | uint32_t cause, |
1581 | struct kvm_run *run, | 1574 | struct kvm_run *run, |
1582 | struct kvm_vcpu *vcpu) | 1575 | struct kvm_vcpu *vcpu) |
1583 | { | 1576 | { |
1584 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1577 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1585 | enum emulation_result er = EMULATE_DONE; | 1578 | enum emulation_result er = EMULATE_DONE; |
1586 | int32_t offset, cache, op_inst, op, base; | 1579 | int32_t offset, cache, op_inst, op, base; |
1587 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1580 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1588 | unsigned long va; | 1581 | unsigned long va; |
1589 | unsigned long curr_pc; | 1582 | unsigned long curr_pc; |
1590 | 1583 | ||
1591 | /* | 1584 | /* |
1592 | * Update PC and hold onto current PC in case there is | 1585 | * Update PC and hold onto current PC in case there is |
1593 | * an error and we want to rollback the PC | 1586 | * an error and we want to rollback the PC |
1594 | */ | 1587 | */ |
1595 | curr_pc = vcpu->arch.pc; | 1588 | curr_pc = vcpu->arch.pc; |
1596 | er = update_pc(vcpu, cause); | 1589 | er = update_pc(vcpu, cause); |
1597 | if (er == EMULATE_FAIL) | 1590 | if (er == EMULATE_FAIL) |
1598 | return er; | 1591 | return er; |
1599 | 1592 | ||
1600 | base = (inst >> 21) & 0x1f; | 1593 | base = (inst >> 21) & 0x1f; |
1601 | op_inst = (inst >> 16) & 0x1f; | 1594 | op_inst = (inst >> 16) & 0x1f; |
1602 | offset = (int16_t)inst; | 1595 | offset = (int16_t)inst; |
1603 | cache = (inst >> 16) & 0x3; | 1596 | cache = (inst >> 16) & 0x3; |
1604 | op = (inst >> 18) & 0x7; | 1597 | op = (inst >> 18) & 0x7; |
1605 | 1598 | ||
1606 | va = arch->gprs[base] + offset; | 1599 | va = arch->gprs[base] + offset; |
1607 | 1600 | ||
1608 | kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | 1601 | kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", |
1609 | cache, op, base, arch->gprs[base], offset); | 1602 | cache, op, base, arch->gprs[base], offset); |
1610 | 1603 | ||
1611 | /* | 1604 | /* |
1612 | * Treat INDEX_INV as a nop, basically issued by Linux on startup to | 1605 | * Treat INDEX_INV as a nop, basically issued by Linux on startup to |
1613 | * invalidate the caches entirely by stepping through all the | 1606 | * invalidate the caches entirely by stepping through all the |
1614 | * ways/indexes | 1607 | * ways/indexes |
1615 | */ | 1608 | */ |
1616 | if (op == MIPS_CACHE_OP_INDEX_INV) { | 1609 | if (op == MIPS_CACHE_OP_INDEX_INV) { |
1617 | kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | 1610 | kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", |
1618 | vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, | 1611 | vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, |
1619 | arch->gprs[base], offset); | 1612 | arch->gprs[base], offset); |
1620 | 1613 | ||
1621 | if (cache == MIPS_CACHE_DCACHE) | 1614 | if (cache == MIPS_CACHE_DCACHE) |
1622 | r4k_blast_dcache(); | 1615 | r4k_blast_dcache(); |
1623 | else if (cache == MIPS_CACHE_ICACHE) | 1616 | else if (cache == MIPS_CACHE_ICACHE) |
1624 | r4k_blast_icache(); | 1617 | r4k_blast_icache(); |
1625 | else { | 1618 | else { |
1626 | kvm_err("%s: unsupported CACHE INDEX operation\n", | 1619 | kvm_err("%s: unsupported CACHE INDEX operation\n", |
1627 | __func__); | 1620 | __func__); |
1628 | return EMULATE_FAIL; | 1621 | return EMULATE_FAIL; |
1629 | } | 1622 | } |
1630 | 1623 | ||
1631 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | 1624 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS |
1632 | kvm_mips_trans_cache_index(inst, opc, vcpu); | 1625 | kvm_mips_trans_cache_index(inst, opc, vcpu); |
1633 | #endif | 1626 | #endif |
1634 | goto done; | 1627 | goto done; |
1635 | } | 1628 | } |
1636 | 1629 | ||
1637 | preempt_disable(); | 1630 | preempt_disable(); |
1638 | if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { | 1631 | if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { |
1639 | if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) | 1632 | if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) |
1640 | kvm_mips_handle_kseg0_tlb_fault(va, vcpu); | 1633 | kvm_mips_handle_kseg0_tlb_fault(va, vcpu); |
1641 | } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || | 1634 | } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || |
1642 | KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { | 1635 | KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { |
1643 | int index; | 1636 | int index; |
1644 | 1637 | ||
1645 | /* If an entry already exists then skip */ | 1638 | /* If an entry already exists then skip */ |
1646 | if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) | 1639 | if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) |
1647 | goto skip_fault; | 1640 | goto skip_fault; |
1648 | 1641 | ||
1649 | /* | 1642 | /* |
1650 | * If address not in the guest TLB, then give the guest a fault, | 1643 | * If address not in the guest TLB, then give the guest a fault, |
1651 | * the resulting handler will do the right thing | 1644 | * the resulting handler will do the right thing |
1652 | */ | 1645 | */ |
1653 | index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | | 1646 | index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | |
1654 | (kvm_read_c0_guest_entryhi | 1647 | (kvm_read_c0_guest_entryhi |
1655 | (cop0) & ASID_MASK)); | 1648 | (cop0) & ASID_MASK)); |
1656 | 1649 | ||
1657 | if (index < 0) { | 1650 | if (index < 0) { |
1658 | vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); | 1651 | vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); |
1659 | vcpu->arch.host_cp0_badvaddr = va; | 1652 | vcpu->arch.host_cp0_badvaddr = va; |
1660 | er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, | 1653 | er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, |
1661 | vcpu); | 1654 | vcpu); |
1662 | preempt_enable(); | 1655 | preempt_enable(); |
1663 | goto dont_update_pc; | 1656 | goto dont_update_pc; |
1664 | } else { | 1657 | } else { |
1665 | struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; | 1658 | struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; |
1666 | /* | 1659 | /* |
1667 | * Check if the entry is valid, if not then setup a TLB | 1660 | * Check if the entry is valid, if not then setup a TLB |
1668 | * invalid exception to the guest | 1661 | * invalid exception to the guest |
1669 | */ | 1662 | */ |
1670 | if (!TLB_IS_VALID(*tlb, va)) { | 1663 | if (!TLB_IS_VALID(*tlb, va)) { |
1671 | er = kvm_mips_emulate_tlbinv_ld(cause, NULL, | 1664 | er = kvm_mips_emulate_tlbinv_ld(cause, NULL, |
1672 | run, vcpu); | 1665 | run, vcpu); |
1673 | preempt_enable(); | 1666 | preempt_enable(); |
1674 | goto dont_update_pc; | 1667 | goto dont_update_pc; |
1675 | } else { | 1668 | } else { |
1676 | /* | 1669 | /* |
1677 | * We fault an entry from the guest tlb to the | 1670 | * We fault an entry from the guest tlb to the |
1678 | * shadow host TLB | 1671 | * shadow host TLB |
1679 | */ | 1672 | */ |
1680 | kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, | 1673 | kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, |
1681 | NULL, | 1674 | NULL, |
1682 | NULL); | 1675 | NULL); |
1683 | } | 1676 | } |
1684 | } | 1677 | } |
1685 | } else { | 1678 | } else { |
1686 | kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | 1679 | kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", |
1687 | cache, op, base, arch->gprs[base], offset); | 1680 | cache, op, base, arch->gprs[base], offset); |
1688 | er = EMULATE_FAIL; | 1681 | er = EMULATE_FAIL; |
1689 | preempt_enable(); | 1682 | preempt_enable(); |
1690 | goto dont_update_pc; | 1683 | goto dont_update_pc; |
1691 | 1684 | ||
1692 | } | 1685 | } |
1693 | 1686 | ||
1694 | skip_fault: | 1687 | skip_fault: |
1695 | /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */ | 1688 | /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */ |
1696 | if (cache == MIPS_CACHE_DCACHE | 1689 | if (cache == MIPS_CACHE_DCACHE |
1697 | && (op == MIPS_CACHE_OP_FILL_WB_INV | 1690 | && (op == MIPS_CACHE_OP_FILL_WB_INV |
1698 | || op == MIPS_CACHE_OP_HIT_INV)) { | 1691 | || op == MIPS_CACHE_OP_HIT_INV)) { |
1699 | flush_dcache_line(va); | 1692 | flush_dcache_line(va); |
1700 | 1693 | ||
1701 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | 1694 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS |
1702 | /* | 1695 | /* |
1703 | * Replace the CACHE instruction, with a SYNCI, not the same, | 1696 | * Replace the CACHE instruction, with a SYNCI, not the same, |
1704 | * but avoids a trap | 1697 | * but avoids a trap |
1705 | */ | 1698 | */ |
1706 | kvm_mips_trans_cache_va(inst, opc, vcpu); | 1699 | kvm_mips_trans_cache_va(inst, opc, vcpu); |
1707 | #endif | 1700 | #endif |
1708 | } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) { | 1701 | } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) { |
1709 | flush_dcache_line(va); | 1702 | flush_dcache_line(va); |
1710 | flush_icache_line(va); | 1703 | flush_icache_line(va); |
1711 | 1704 | ||
1712 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | 1705 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS |
1713 | /* Replace the CACHE instruction, with a SYNCI */ | 1706 | /* Replace the CACHE instruction, with a SYNCI */ |
1714 | kvm_mips_trans_cache_va(inst, opc, vcpu); | 1707 | kvm_mips_trans_cache_va(inst, opc, vcpu); |
1715 | #endif | 1708 | #endif |
1716 | } else { | 1709 | } else { |
1717 | kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | 1710 | kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", |
1718 | cache, op, base, arch->gprs[base], offset); | 1711 | cache, op, base, arch->gprs[base], offset); |
1719 | er = EMULATE_FAIL; | 1712 | er = EMULATE_FAIL; |
1720 | preempt_enable(); | 1713 | preempt_enable(); |
1721 | goto dont_update_pc; | 1714 | goto dont_update_pc; |
1722 | } | 1715 | } |
1723 | 1716 | ||
1724 | preempt_enable(); | 1717 | preempt_enable(); |
1725 | 1718 | ||
1726 | dont_update_pc: | 1719 | dont_update_pc: |
1727 | /* Rollback PC */ | 1720 | /* Rollback PC */ |
1728 | vcpu->arch.pc = curr_pc; | 1721 | vcpu->arch.pc = curr_pc; |
1729 | done: | 1722 | done: |
1730 | return er; | 1723 | return er; |
1731 | } | 1724 | } |
1732 | 1725 | ||
1733 | enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc, | 1726 | enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc, |
1734 | struct kvm_run *run, | 1727 | struct kvm_run *run, |
1735 | struct kvm_vcpu *vcpu) | 1728 | struct kvm_vcpu *vcpu) |
1736 | { | 1729 | { |
1737 | enum emulation_result er = EMULATE_DONE; | 1730 | enum emulation_result er = EMULATE_DONE; |
1738 | uint32_t inst; | 1731 | uint32_t inst; |
1739 | 1732 | ||
1740 | /* Fetch the instruction. */ | 1733 | /* Fetch the instruction. */ |
1741 | if (cause & CAUSEF_BD) | 1734 | if (cause & CAUSEF_BD) |
1742 | opc += 1; | 1735 | opc += 1; |
1743 | 1736 | ||
1744 | inst = kvm_get_inst(opc, vcpu); | 1737 | inst = kvm_get_inst(opc, vcpu); |
1745 | 1738 | ||
1746 | switch (((union mips_instruction)inst).r_format.opcode) { | 1739 | switch (((union mips_instruction)inst).r_format.opcode) { |
1747 | case cop0_op: | 1740 | case cop0_op: |
1748 | er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu); | 1741 | er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu); |
1749 | break; | 1742 | break; |
1750 | case sb_op: | 1743 | case sb_op: |
1751 | case sh_op: | 1744 | case sh_op: |
1752 | case sw_op: | 1745 | case sw_op: |
1753 | er = kvm_mips_emulate_store(inst, cause, run, vcpu); | 1746 | er = kvm_mips_emulate_store(inst, cause, run, vcpu); |
1754 | break; | 1747 | break; |
1755 | case lb_op: | 1748 | case lb_op: |
1756 | case lbu_op: | 1749 | case lbu_op: |
1757 | case lhu_op: | 1750 | case lhu_op: |
1758 | case lh_op: | 1751 | case lh_op: |
1759 | case lw_op: | 1752 | case lw_op: |
1760 | er = kvm_mips_emulate_load(inst, cause, run, vcpu); | 1753 | er = kvm_mips_emulate_load(inst, cause, run, vcpu); |
1761 | break; | 1754 | break; |
1762 | 1755 | ||
1763 | case cache_op: | 1756 | case cache_op: |
1764 | ++vcpu->stat.cache_exits; | 1757 | ++vcpu->stat.cache_exits; |
1765 | trace_kvm_exit(vcpu, CACHE_EXITS); | 1758 | trace_kvm_exit(vcpu, CACHE_EXITS); |
1766 | er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu); | 1759 | er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu); |
1767 | break; | 1760 | break; |
1768 | 1761 | ||
1769 | default: | 1762 | default: |
1770 | kvm_err("Instruction emulation not supported (%p/%#x)\n", opc, | 1763 | kvm_err("Instruction emulation not supported (%p/%#x)\n", opc, |
1771 | inst); | 1764 | inst); |
1772 | kvm_arch_vcpu_dump_regs(vcpu); | 1765 | kvm_arch_vcpu_dump_regs(vcpu); |
1773 | er = EMULATE_FAIL; | 1766 | er = EMULATE_FAIL; |
1774 | break; | 1767 | break; |
1775 | } | 1768 | } |
1776 | 1769 | ||
1777 | return er; | 1770 | return er; |
1778 | } | 1771 | } |
1779 | 1772 | ||
1780 | enum emulation_result kvm_mips_emulate_syscall(unsigned long cause, | 1773 | enum emulation_result kvm_mips_emulate_syscall(unsigned long cause, |
1781 | uint32_t *opc, | 1774 | uint32_t *opc, |
1782 | struct kvm_run *run, | 1775 | struct kvm_run *run, |
1783 | struct kvm_vcpu *vcpu) | 1776 | struct kvm_vcpu *vcpu) |
1784 | { | 1777 | { |
1785 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1778 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1786 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1779 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1787 | enum emulation_result er = EMULATE_DONE; | 1780 | enum emulation_result er = EMULATE_DONE; |
1788 | 1781 | ||
1789 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 1782 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
1790 | /* save old pc */ | 1783 | /* save old pc */ |
1791 | kvm_write_c0_guest_epc(cop0, arch->pc); | 1784 | kvm_write_c0_guest_epc(cop0, arch->pc); |
1792 | kvm_set_c0_guest_status(cop0, ST0_EXL); | 1785 | kvm_set_c0_guest_status(cop0, ST0_EXL); |
1793 | 1786 | ||
1794 | if (cause & CAUSEF_BD) | 1787 | if (cause & CAUSEF_BD) |
1795 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | 1788 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); |
1796 | else | 1789 | else |
1797 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | 1790 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); |
1798 | 1791 | ||
1799 | kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc); | 1792 | kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc); |
1800 | 1793 | ||
1801 | kvm_change_c0_guest_cause(cop0, (0xff), | 1794 | kvm_change_c0_guest_cause(cop0, (0xff), |
1802 | (T_SYSCALL << CAUSEB_EXCCODE)); | 1795 | (T_SYSCALL << CAUSEB_EXCCODE)); |
1803 | 1796 | ||
1804 | /* Set PC to the exception entry point */ | 1797 | /* Set PC to the exception entry point */ |
1805 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 1798 | arch->pc = KVM_GUEST_KSEG0 + 0x180; |
1806 | 1799 | ||
1807 | } else { | 1800 | } else { |
1808 | kvm_err("Trying to deliver SYSCALL when EXL is already set\n"); | 1801 | kvm_err("Trying to deliver SYSCALL when EXL is already set\n"); |
1809 | er = EMULATE_FAIL; | 1802 | er = EMULATE_FAIL; |
1810 | } | 1803 | } |
1811 | 1804 | ||
1812 | return er; | 1805 | return er; |
1813 | } | 1806 | } |
1814 | 1807 | ||
1815 | enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause, | 1808 | enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause, |
1816 | uint32_t *opc, | 1809 | uint32_t *opc, |
1817 | struct kvm_run *run, | 1810 | struct kvm_run *run, |
1818 | struct kvm_vcpu *vcpu) | 1811 | struct kvm_vcpu *vcpu) |
1819 | { | 1812 | { |
1820 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1813 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1821 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1814 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1822 | unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | | 1815 | unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | |
1823 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); | 1816 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
1824 | 1817 | ||
1825 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 1818 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
1826 | /* save old pc */ | 1819 | /* save old pc */ |
1827 | kvm_write_c0_guest_epc(cop0, arch->pc); | 1820 | kvm_write_c0_guest_epc(cop0, arch->pc); |
1828 | kvm_set_c0_guest_status(cop0, ST0_EXL); | 1821 | kvm_set_c0_guest_status(cop0, ST0_EXL); |
1829 | 1822 | ||
1830 | if (cause & CAUSEF_BD) | 1823 | if (cause & CAUSEF_BD) |
1831 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | 1824 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); |
1832 | else | 1825 | else |
1833 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | 1826 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); |
1834 | 1827 | ||
1835 | kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n", | 1828 | kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n", |
1836 | arch->pc); | 1829 | arch->pc); |
1837 | 1830 | ||
1838 | /* set pc to the exception entry point */ | 1831 | /* set pc to the exception entry point */ |
1839 | arch->pc = KVM_GUEST_KSEG0 + 0x0; | 1832 | arch->pc = KVM_GUEST_KSEG0 + 0x0; |
1840 | 1833 | ||
1841 | } else { | 1834 | } else { |
1842 | kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", | 1835 | kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", |
1843 | arch->pc); | 1836 | arch->pc); |
1844 | 1837 | ||
1845 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 1838 | arch->pc = KVM_GUEST_KSEG0 + 0x180; |
1846 | } | 1839 | } |
1847 | 1840 | ||
1848 | kvm_change_c0_guest_cause(cop0, (0xff), | 1841 | kvm_change_c0_guest_cause(cop0, (0xff), |
1849 | (T_TLB_LD_MISS << CAUSEB_EXCCODE)); | 1842 | (T_TLB_LD_MISS << CAUSEB_EXCCODE)); |
1850 | 1843 | ||
1851 | /* setup badvaddr, context and entryhi registers for the guest */ | 1844 | /* setup badvaddr, context and entryhi registers for the guest */ |
1852 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | 1845 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); |
1853 | /* XXXKYMA: is the context register used by linux??? */ | 1846 | /* XXXKYMA: is the context register used by linux??? */ |
1854 | kvm_write_c0_guest_entryhi(cop0, entryhi); | 1847 | kvm_write_c0_guest_entryhi(cop0, entryhi); |
1855 | /* Blow away the shadow host TLBs */ | 1848 | /* Blow away the shadow host TLBs */ |
1856 | kvm_mips_flush_host_tlb(1); | 1849 | kvm_mips_flush_host_tlb(1); |
1857 | 1850 | ||
1858 | return EMULATE_DONE; | 1851 | return EMULATE_DONE; |
1859 | } | 1852 | } |
1860 | 1853 | ||
1861 | enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause, | 1854 | enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause, |
1862 | uint32_t *opc, | 1855 | uint32_t *opc, |
1863 | struct kvm_run *run, | 1856 | struct kvm_run *run, |
1864 | struct kvm_vcpu *vcpu) | 1857 | struct kvm_vcpu *vcpu) |
1865 | { | 1858 | { |
1866 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1859 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1867 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1860 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1868 | unsigned long entryhi = | 1861 | unsigned long entryhi = |
1869 | (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | 1862 | (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
1870 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); | 1863 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
1871 | 1864 | ||
1872 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 1865 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
1873 | /* save old pc */ | 1866 | /* save old pc */ |
1874 | kvm_write_c0_guest_epc(cop0, arch->pc); | 1867 | kvm_write_c0_guest_epc(cop0, arch->pc); |
1875 | kvm_set_c0_guest_status(cop0, ST0_EXL); | 1868 | kvm_set_c0_guest_status(cop0, ST0_EXL); |
1876 | 1869 | ||
1877 | if (cause & CAUSEF_BD) | 1870 | if (cause & CAUSEF_BD) |
1878 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | 1871 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); |
1879 | else | 1872 | else |
1880 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | 1873 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); |
1881 | 1874 | ||
1882 | kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n", | 1875 | kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n", |
1883 | arch->pc); | 1876 | arch->pc); |
1884 | 1877 | ||
1885 | /* set pc to the exception entry point */ | 1878 | /* set pc to the exception entry point */ |
1886 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 1879 | arch->pc = KVM_GUEST_KSEG0 + 0x180; |
1887 | 1880 | ||
1888 | } else { | 1881 | } else { |
1889 | kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", | 1882 | kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", |
1890 | arch->pc); | 1883 | arch->pc); |
1891 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 1884 | arch->pc = KVM_GUEST_KSEG0 + 0x180; |
1892 | } | 1885 | } |
1893 | 1886 | ||
1894 | kvm_change_c0_guest_cause(cop0, (0xff), | 1887 | kvm_change_c0_guest_cause(cop0, (0xff), |
1895 | (T_TLB_LD_MISS << CAUSEB_EXCCODE)); | 1888 | (T_TLB_LD_MISS << CAUSEB_EXCCODE)); |
1896 | 1889 | ||
1897 | /* setup badvaddr, context and entryhi registers for the guest */ | 1890 | /* setup badvaddr, context and entryhi registers for the guest */ |
1898 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | 1891 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); |
1899 | /* XXXKYMA: is the context register used by linux??? */ | 1892 | /* XXXKYMA: is the context register used by linux??? */ |
1900 | kvm_write_c0_guest_entryhi(cop0, entryhi); | 1893 | kvm_write_c0_guest_entryhi(cop0, entryhi); |
1901 | /* Blow away the shadow host TLBs */ | 1894 | /* Blow away the shadow host TLBs */ |
1902 | kvm_mips_flush_host_tlb(1); | 1895 | kvm_mips_flush_host_tlb(1); |
1903 | 1896 | ||
1904 | return EMULATE_DONE; | 1897 | return EMULATE_DONE; |
1905 | } | 1898 | } |
1906 | 1899 | ||
1907 | enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause, | 1900 | enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause, |
1908 | uint32_t *opc, | 1901 | uint32_t *opc, |
1909 | struct kvm_run *run, | 1902 | struct kvm_run *run, |
1910 | struct kvm_vcpu *vcpu) | 1903 | struct kvm_vcpu *vcpu) |
1911 | { | 1904 | { |
1912 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1905 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1913 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1906 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1914 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | 1907 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
1915 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); | 1908 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
1916 | 1909 | ||
1917 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 1910 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
1918 | /* save old pc */ | 1911 | /* save old pc */ |
1919 | kvm_write_c0_guest_epc(cop0, arch->pc); | 1912 | kvm_write_c0_guest_epc(cop0, arch->pc); |
1920 | kvm_set_c0_guest_status(cop0, ST0_EXL); | 1913 | kvm_set_c0_guest_status(cop0, ST0_EXL); |
1921 | 1914 | ||
1922 | if (cause & CAUSEF_BD) | 1915 | if (cause & CAUSEF_BD) |
1923 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | 1916 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); |
1924 | else | 1917 | else |
1925 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | 1918 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); |
1926 | 1919 | ||
1927 | kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", | 1920 | kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", |
1928 | arch->pc); | 1921 | arch->pc); |
1929 | 1922 | ||
1930 | /* Set PC to the exception entry point */ | 1923 | /* Set PC to the exception entry point */ |
1931 | arch->pc = KVM_GUEST_KSEG0 + 0x0; | 1924 | arch->pc = KVM_GUEST_KSEG0 + 0x0; |
1932 | } else { | 1925 | } else { |
1933 | kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", | 1926 | kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", |
1934 | arch->pc); | 1927 | arch->pc); |
1935 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 1928 | arch->pc = KVM_GUEST_KSEG0 + 0x180; |
1936 | } | 1929 | } |
1937 | 1930 | ||
1938 | kvm_change_c0_guest_cause(cop0, (0xff), | 1931 | kvm_change_c0_guest_cause(cop0, (0xff), |
1939 | (T_TLB_ST_MISS << CAUSEB_EXCCODE)); | 1932 | (T_TLB_ST_MISS << CAUSEB_EXCCODE)); |
1940 | 1933 | ||
1941 | /* setup badvaddr, context and entryhi registers for the guest */ | 1934 | /* setup badvaddr, context and entryhi registers for the guest */ |
1942 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | 1935 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); |
1943 | /* XXXKYMA: is the context register used by linux??? */ | 1936 | /* XXXKYMA: is the context register used by linux??? */ |
1944 | kvm_write_c0_guest_entryhi(cop0, entryhi); | 1937 | kvm_write_c0_guest_entryhi(cop0, entryhi); |
1945 | /* Blow away the shadow host TLBs */ | 1938 | /* Blow away the shadow host TLBs */ |
1946 | kvm_mips_flush_host_tlb(1); | 1939 | kvm_mips_flush_host_tlb(1); |
1947 | 1940 | ||
1948 | return EMULATE_DONE; | 1941 | return EMULATE_DONE; |
1949 | } | 1942 | } |
1950 | 1943 | ||
1951 | enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause, | 1944 | enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause, |
1952 | uint32_t *opc, | 1945 | uint32_t *opc, |
1953 | struct kvm_run *run, | 1946 | struct kvm_run *run, |
1954 | struct kvm_vcpu *vcpu) | 1947 | struct kvm_vcpu *vcpu) |
1955 | { | 1948 | { |
1956 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1949 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1957 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1950 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1958 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | 1951 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
1959 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); | 1952 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
1960 | 1953 | ||
1961 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 1954 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
1962 | /* save old pc */ | 1955 | /* save old pc */ |
1963 | kvm_write_c0_guest_epc(cop0, arch->pc); | 1956 | kvm_write_c0_guest_epc(cop0, arch->pc); |
1964 | kvm_set_c0_guest_status(cop0, ST0_EXL); | 1957 | kvm_set_c0_guest_status(cop0, ST0_EXL); |
1965 | 1958 | ||
1966 | if (cause & CAUSEF_BD) | 1959 | if (cause & CAUSEF_BD) |
1967 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | 1960 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); |
1968 | else | 1961 | else |
1969 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | 1962 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); |
1970 | 1963 | ||
1971 | kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", | 1964 | kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", |
1972 | arch->pc); | 1965 | arch->pc); |
1973 | 1966 | ||
1974 | /* Set PC to the exception entry point */ | 1967 | /* Set PC to the exception entry point */ |
1975 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 1968 | arch->pc = KVM_GUEST_KSEG0 + 0x180; |
1976 | } else { | 1969 | } else { |
1977 | kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", | 1970 | kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", |
1978 | arch->pc); | 1971 | arch->pc); |
1979 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 1972 | arch->pc = KVM_GUEST_KSEG0 + 0x180; |
1980 | } | 1973 | } |
1981 | 1974 | ||
1982 | kvm_change_c0_guest_cause(cop0, (0xff), | 1975 | kvm_change_c0_guest_cause(cop0, (0xff), |
1983 | (T_TLB_ST_MISS << CAUSEB_EXCCODE)); | 1976 | (T_TLB_ST_MISS << CAUSEB_EXCCODE)); |
1984 | 1977 | ||
1985 | /* setup badvaddr, context and entryhi registers for the guest */ | 1978 | /* setup badvaddr, context and entryhi registers for the guest */ |
1986 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | 1979 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); |
1987 | /* XXXKYMA: is the context register used by linux??? */ | 1980 | /* XXXKYMA: is the context register used by linux??? */ |
1988 | kvm_write_c0_guest_entryhi(cop0, entryhi); | 1981 | kvm_write_c0_guest_entryhi(cop0, entryhi); |
1989 | /* Blow away the shadow host TLBs */ | 1982 | /* Blow away the shadow host TLBs */ |
1990 | kvm_mips_flush_host_tlb(1); | 1983 | kvm_mips_flush_host_tlb(1); |
1991 | 1984 | ||
1992 | return EMULATE_DONE; | 1985 | return EMULATE_DONE; |
1993 | } | 1986 | } |
1994 | 1987 | ||
1995 | /* TLBMOD: store into address matching TLB with Dirty bit off */ | 1988 | /* TLBMOD: store into address matching TLB with Dirty bit off */ |
1996 | enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, | 1989 | enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, |
1997 | struct kvm_run *run, | 1990 | struct kvm_run *run, |
1998 | struct kvm_vcpu *vcpu) | 1991 | struct kvm_vcpu *vcpu) |
1999 | { | 1992 | { |
2000 | enum emulation_result er = EMULATE_DONE; | 1993 | enum emulation_result er = EMULATE_DONE; |
2001 | #ifdef DEBUG | 1994 | #ifdef DEBUG |
2002 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1995 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
2003 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | 1996 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
2004 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); | 1997 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
2005 | int index; | 1998 | int index; |
2006 | 1999 | ||
2007 | /* If address not in the guest TLB, then we are in trouble */ | 2000 | /* If address not in the guest TLB, then we are in trouble */ |
2008 | index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); | 2001 | index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); |
2009 | if (index < 0) { | 2002 | if (index < 0) { |
2010 | /* XXXKYMA Invalidate and retry */ | 2003 | /* XXXKYMA Invalidate and retry */ |
2011 | kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr); | 2004 | kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr); |
2012 | kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n", | 2005 | kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n", |
2013 | __func__, entryhi); | 2006 | __func__, entryhi); |
2014 | kvm_mips_dump_guest_tlbs(vcpu); | 2007 | kvm_mips_dump_guest_tlbs(vcpu); |
2015 | kvm_mips_dump_host_tlbs(); | 2008 | kvm_mips_dump_host_tlbs(); |
2016 | return EMULATE_FAIL; | 2009 | return EMULATE_FAIL; |
2017 | } | 2010 | } |
2018 | #endif | 2011 | #endif |
2019 | 2012 | ||
2020 | er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu); | 2013 | er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu); |
2021 | return er; | 2014 | return er; |
2022 | } | 2015 | } |
2023 | 2016 | ||
2024 | enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause, | 2017 | enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause, |
2025 | uint32_t *opc, | 2018 | uint32_t *opc, |
2026 | struct kvm_run *run, | 2019 | struct kvm_run *run, |
2027 | struct kvm_vcpu *vcpu) | 2020 | struct kvm_vcpu *vcpu) |
2028 | { | 2021 | { |
2029 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 2022 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
2030 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | 2023 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
2031 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); | 2024 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
2032 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 2025 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
2033 | 2026 | ||
2034 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 2027 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
2035 | /* save old pc */ | 2028 | /* save old pc */ |
2036 | kvm_write_c0_guest_epc(cop0, arch->pc); | 2029 | kvm_write_c0_guest_epc(cop0, arch->pc); |
2037 | kvm_set_c0_guest_status(cop0, ST0_EXL); | 2030 | kvm_set_c0_guest_status(cop0, ST0_EXL); |
2038 | 2031 | ||
2039 | if (cause & CAUSEF_BD) | 2032 | if (cause & CAUSEF_BD) |
2040 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | 2033 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); |
2041 | else | 2034 | else |
2042 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | 2035 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); |
2043 | 2036 | ||
2044 | kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n", | 2037 | kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n", |
2045 | arch->pc); | 2038 | arch->pc); |
2046 | 2039 | ||
2047 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 2040 | arch->pc = KVM_GUEST_KSEG0 + 0x180; |
2048 | } else { | 2041 | } else { |
2049 | kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n", | 2042 | kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n", |
2050 | arch->pc); | 2043 | arch->pc); |
2051 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 2044 | arch->pc = KVM_GUEST_KSEG0 + 0x180; |
2052 | } | 2045 | } |
2053 | 2046 | ||
2054 | kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE)); | 2047 | kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE)); |
2055 | 2048 | ||
2056 | /* setup badvaddr, context and entryhi registers for the guest */ | 2049 | /* setup badvaddr, context and entryhi registers for the guest */ |
2057 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | 2050 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); |
2058 | /* XXXKYMA: is the context register used by linux??? */ | 2051 | /* XXXKYMA: is the context register used by linux??? */ |
2059 | kvm_write_c0_guest_entryhi(cop0, entryhi); | 2052 | kvm_write_c0_guest_entryhi(cop0, entryhi); |
2060 | /* Blow away the shadow host TLBs */ | 2053 | /* Blow away the shadow host TLBs */ |
2061 | kvm_mips_flush_host_tlb(1); | 2054 | kvm_mips_flush_host_tlb(1); |
2062 | 2055 | ||
2063 | return EMULATE_DONE; | 2056 | return EMULATE_DONE; |
2064 | } | 2057 | } |
2065 | 2058 | ||
2066 | enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause, | 2059 | enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause, |
2067 | uint32_t *opc, | 2060 | uint32_t *opc, |
2068 | struct kvm_run *run, | 2061 | struct kvm_run *run, |
2069 | struct kvm_vcpu *vcpu) | 2062 | struct kvm_vcpu *vcpu) |
2070 | { | 2063 | { |
2071 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 2064 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
2072 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 2065 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
2073 | 2066 | ||
2074 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 2067 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
2075 | /* save old pc */ | 2068 | /* save old pc */ |
2076 | kvm_write_c0_guest_epc(cop0, arch->pc); | 2069 | kvm_write_c0_guest_epc(cop0, arch->pc); |
2077 | kvm_set_c0_guest_status(cop0, ST0_EXL); | 2070 | kvm_set_c0_guest_status(cop0, ST0_EXL); |
2078 | 2071 | ||
2079 | if (cause & CAUSEF_BD) | 2072 | if (cause & CAUSEF_BD) |
2080 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | 2073 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); |
2081 | else | 2074 | else |
2082 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | 2075 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); |
2083 | 2076 | ||
2084 | } | 2077 | } |
2085 | 2078 | ||
2086 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 2079 | arch->pc = KVM_GUEST_KSEG0 + 0x180; |
2087 | 2080 | ||
2088 | kvm_change_c0_guest_cause(cop0, (0xff), | 2081 | kvm_change_c0_guest_cause(cop0, (0xff), |
2089 | (T_COP_UNUSABLE << CAUSEB_EXCCODE)); | 2082 | (T_COP_UNUSABLE << CAUSEB_EXCCODE)); |
2090 | kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE)); | 2083 | kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE)); |
2091 | 2084 | ||
2092 | return EMULATE_DONE; | 2085 | return EMULATE_DONE; |
2093 | } | 2086 | } |
2094 | 2087 | ||
2095 | enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause, | 2088 | enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause, |
2096 | uint32_t *opc, | 2089 | uint32_t *opc, |
2097 | struct kvm_run *run, | 2090 | struct kvm_run *run, |
2098 | struct kvm_vcpu *vcpu) | 2091 | struct kvm_vcpu *vcpu) |
2099 | { | 2092 | { |
2100 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 2093 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
2101 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 2094 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
2102 | enum emulation_result er = EMULATE_DONE; | 2095 | enum emulation_result er = EMULATE_DONE; |
2103 | 2096 | ||
2104 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 2097 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
2105 | /* save old pc */ | 2098 | /* save old pc */ |
2106 | kvm_write_c0_guest_epc(cop0, arch->pc); | 2099 | kvm_write_c0_guest_epc(cop0, arch->pc); |
2107 | kvm_set_c0_guest_status(cop0, ST0_EXL); | 2100 | kvm_set_c0_guest_status(cop0, ST0_EXL); |
2108 | 2101 | ||
2109 | if (cause & CAUSEF_BD) | 2102 | if (cause & CAUSEF_BD) |
2110 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | 2103 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); |
2111 | else | 2104 | else |
2112 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | 2105 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); |
2113 | 2106 | ||
2114 | kvm_debug("Delivering RI @ pc %#lx\n", arch->pc); | 2107 | kvm_debug("Delivering RI @ pc %#lx\n", arch->pc); |
2115 | 2108 | ||
2116 | kvm_change_c0_guest_cause(cop0, (0xff), | 2109 | kvm_change_c0_guest_cause(cop0, (0xff), |
2117 | (T_RES_INST << CAUSEB_EXCCODE)); | 2110 | (T_RES_INST << CAUSEB_EXCCODE)); |
2118 | 2111 | ||
2119 | /* Set PC to the exception entry point */ | 2112 | /* Set PC to the exception entry point */ |
2120 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 2113 | arch->pc = KVM_GUEST_KSEG0 + 0x180; |
2121 | 2114 | ||
2122 | } else { | 2115 | } else { |
2123 | kvm_err("Trying to deliver RI when EXL is already set\n"); | 2116 | kvm_err("Trying to deliver RI when EXL is already set\n"); |
2124 | er = EMULATE_FAIL; | 2117 | er = EMULATE_FAIL; |
2125 | } | 2118 | } |
2126 | 2119 | ||
2127 | return er; | 2120 | return er; |
2128 | } | 2121 | } |
2129 | 2122 | ||
2130 | enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause, | 2123 | enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause, |
2131 | uint32_t *opc, | 2124 | uint32_t *opc, |
2132 | struct kvm_run *run, | 2125 | struct kvm_run *run, |
2133 | struct kvm_vcpu *vcpu) | 2126 | struct kvm_vcpu *vcpu) |
2134 | { | 2127 | { |
2135 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 2128 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
2136 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 2129 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
2137 | enum emulation_result er = EMULATE_DONE; | 2130 | enum emulation_result er = EMULATE_DONE; |
2138 | 2131 | ||
2139 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 2132 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
2140 | /* save old pc */ | 2133 | /* save old pc */ |
2141 | kvm_write_c0_guest_epc(cop0, arch->pc); | 2134 | kvm_write_c0_guest_epc(cop0, arch->pc); |
2142 | kvm_set_c0_guest_status(cop0, ST0_EXL); | 2135 | kvm_set_c0_guest_status(cop0, ST0_EXL); |
2143 | 2136 | ||
2144 | if (cause & CAUSEF_BD) | 2137 | if (cause & CAUSEF_BD) |
2145 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | 2138 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); |
2146 | else | 2139 | else |
2147 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | 2140 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); |
2148 | 2141 | ||
2149 | kvm_debug("Delivering BP @ pc %#lx\n", arch->pc); | 2142 | kvm_debug("Delivering BP @ pc %#lx\n", arch->pc); |
2150 | 2143 | ||
2151 | kvm_change_c0_guest_cause(cop0, (0xff), | 2144 | kvm_change_c0_guest_cause(cop0, (0xff), |
2152 | (T_BREAK << CAUSEB_EXCCODE)); | 2145 | (T_BREAK << CAUSEB_EXCCODE)); |
2153 | 2146 | ||
2154 | /* Set PC to the exception entry point */ | 2147 | /* Set PC to the exception entry point */ |
2155 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 2148 | arch->pc = KVM_GUEST_KSEG0 + 0x180; |
2156 | 2149 | ||
2157 | } else { | 2150 | } else { |
2158 | kvm_err("Trying to deliver BP when EXL is already set\n"); | 2151 | kvm_err("Trying to deliver BP when EXL is already set\n"); |
2159 | er = EMULATE_FAIL; | 2152 | er = EMULATE_FAIL; |
2160 | } | 2153 | } |
2161 | 2154 | ||
2162 | return er; | 2155 | return er; |
2163 | } | 2156 | } |
2164 | 2157 | ||
2165 | enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause, | 2158 | enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause, |
2166 | uint32_t *opc, | 2159 | uint32_t *opc, |
2167 | struct kvm_run *run, | 2160 | struct kvm_run *run, |
2168 | struct kvm_vcpu *vcpu) | 2161 | struct kvm_vcpu *vcpu) |
2169 | { | 2162 | { |
2170 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 2163 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
2171 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 2164 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
2172 | enum emulation_result er = EMULATE_DONE; | 2165 | enum emulation_result er = EMULATE_DONE; |
2173 | 2166 | ||
2174 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 2167 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
2175 | /* save old pc */ | 2168 | /* save old pc */ |
2176 | kvm_write_c0_guest_epc(cop0, arch->pc); | 2169 | kvm_write_c0_guest_epc(cop0, arch->pc); |
2177 | kvm_set_c0_guest_status(cop0, ST0_EXL); | 2170 | kvm_set_c0_guest_status(cop0, ST0_EXL); |
2178 | 2171 | ||
2179 | if (cause & CAUSEF_BD) | 2172 | if (cause & CAUSEF_BD) |
2180 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | 2173 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); |
2181 | else | 2174 | else |
2182 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | 2175 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); |
2183 | 2176 | ||
2184 | kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc); | 2177 | kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc); |
2185 | 2178 | ||
2186 | kvm_change_c0_guest_cause(cop0, (0xff), | 2179 | kvm_change_c0_guest_cause(cop0, (0xff), |
2187 | (T_TRAP << CAUSEB_EXCCODE)); | 2180 | (T_TRAP << CAUSEB_EXCCODE)); |
2188 | 2181 | ||
2189 | /* Set PC to the exception entry point */ | 2182 | /* Set PC to the exception entry point */ |
2190 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 2183 | arch->pc = KVM_GUEST_KSEG0 + 0x180; |
2191 | 2184 | ||
2192 | } else { | 2185 | } else { |
2193 | kvm_err("Trying to deliver TRAP when EXL is already set\n"); | 2186 | kvm_err("Trying to deliver TRAP when EXL is already set\n"); |
2194 | er = EMULATE_FAIL; | 2187 | er = EMULATE_FAIL; |
2195 | } | 2188 | } |
2196 | 2189 | ||
2197 | return er; | 2190 | return er; |
2198 | } | 2191 | } |
2199 | 2192 | ||
2200 | enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause, | 2193 | enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause, |
2201 | uint32_t *opc, | 2194 | uint32_t *opc, |
2202 | struct kvm_run *run, | 2195 | struct kvm_run *run, |
2203 | struct kvm_vcpu *vcpu) | 2196 | struct kvm_vcpu *vcpu) |
2204 | { | 2197 | { |
2205 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 2198 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
2206 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 2199 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
2207 | enum emulation_result er = EMULATE_DONE; | 2200 | enum emulation_result er = EMULATE_DONE; |
2208 | 2201 | ||
2209 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 2202 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
2210 | /* save old pc */ | 2203 | /* save old pc */ |
2211 | kvm_write_c0_guest_epc(cop0, arch->pc); | 2204 | kvm_write_c0_guest_epc(cop0, arch->pc); |
2212 | kvm_set_c0_guest_status(cop0, ST0_EXL); | 2205 | kvm_set_c0_guest_status(cop0, ST0_EXL); |
2213 | 2206 | ||
2214 | if (cause & CAUSEF_BD) | 2207 | if (cause & CAUSEF_BD) |
2215 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | 2208 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); |
2216 | else | 2209 | else |
2217 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | 2210 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); |
2218 | 2211 | ||
2219 | kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc); | 2212 | kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc); |
2220 | 2213 | ||
2221 | kvm_change_c0_guest_cause(cop0, (0xff), | 2214 | kvm_change_c0_guest_cause(cop0, (0xff), |
2222 | (T_MSAFPE << CAUSEB_EXCCODE)); | 2215 | (T_MSAFPE << CAUSEB_EXCCODE)); |
2223 | 2216 | ||
2224 | /* Set PC to the exception entry point */ | 2217 | /* Set PC to the exception entry point */ |
2225 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 2218 | arch->pc = KVM_GUEST_KSEG0 + 0x180; |
2226 | 2219 | ||
2227 | } else { | 2220 | } else { |
2228 | kvm_err("Trying to deliver MSAFPE when EXL is already set\n"); | 2221 | kvm_err("Trying to deliver MSAFPE when EXL is already set\n"); |
2229 | er = EMULATE_FAIL; | 2222 | er = EMULATE_FAIL; |
2230 | } | 2223 | } |
2231 | 2224 | ||
2232 | return er; | 2225 | return er; |
2233 | } | 2226 | } |
2234 | 2227 | ||
2235 | enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause, | 2228 | enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause, |
2236 | uint32_t *opc, | 2229 | uint32_t *opc, |
2237 | struct kvm_run *run, | 2230 | struct kvm_run *run, |
2238 | struct kvm_vcpu *vcpu) | 2231 | struct kvm_vcpu *vcpu) |
2239 | { | 2232 | { |
2240 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 2233 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
2241 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 2234 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
2242 | enum emulation_result er = EMULATE_DONE; | 2235 | enum emulation_result er = EMULATE_DONE; |
2243 | 2236 | ||
2244 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 2237 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
2245 | /* save old pc */ | 2238 | /* save old pc */ |
2246 | kvm_write_c0_guest_epc(cop0, arch->pc); | 2239 | kvm_write_c0_guest_epc(cop0, arch->pc); |
2247 | kvm_set_c0_guest_status(cop0, ST0_EXL); | 2240 | kvm_set_c0_guest_status(cop0, ST0_EXL); |
2248 | 2241 | ||
2249 | if (cause & CAUSEF_BD) | 2242 | if (cause & CAUSEF_BD) |
2250 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | 2243 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); |
2251 | else | 2244 | else |
2252 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | 2245 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); |
2253 | 2246 | ||
2254 | kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc); | 2247 | kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc); |
2255 | 2248 | ||
2256 | kvm_change_c0_guest_cause(cop0, (0xff), | 2249 | kvm_change_c0_guest_cause(cop0, (0xff), |
2257 | (T_FPE << CAUSEB_EXCCODE)); | 2250 | (T_FPE << CAUSEB_EXCCODE)); |
2258 | 2251 | ||
2259 | /* Set PC to the exception entry point */ | 2252 | /* Set PC to the exception entry point */ |
2260 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 2253 | arch->pc = KVM_GUEST_KSEG0 + 0x180; |
2261 | 2254 | ||
2262 | } else { | 2255 | } else { |
2263 | kvm_err("Trying to deliver FPE when EXL is already set\n"); | 2256 | kvm_err("Trying to deliver FPE when EXL is already set\n"); |
2264 | er = EMULATE_FAIL; | 2257 | er = EMULATE_FAIL; |
2265 | } | 2258 | } |
2266 | 2259 | ||
2267 | return er; | 2260 | return er; |
2268 | } | 2261 | } |
2269 | 2262 | ||
2270 | enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause, | 2263 | enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause, |
2271 | uint32_t *opc, | 2264 | uint32_t *opc, |
2272 | struct kvm_run *run, | 2265 | struct kvm_run *run, |
2273 | struct kvm_vcpu *vcpu) | 2266 | struct kvm_vcpu *vcpu) |
2274 | { | 2267 | { |
2275 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 2268 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
2276 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 2269 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
2277 | enum emulation_result er = EMULATE_DONE; | 2270 | enum emulation_result er = EMULATE_DONE; |
2278 | 2271 | ||
2279 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 2272 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
2280 | /* save old pc */ | 2273 | /* save old pc */ |
2281 | kvm_write_c0_guest_epc(cop0, arch->pc); | 2274 | kvm_write_c0_guest_epc(cop0, arch->pc); |
2282 | kvm_set_c0_guest_status(cop0, ST0_EXL); | 2275 | kvm_set_c0_guest_status(cop0, ST0_EXL); |
2283 | 2276 | ||
2284 | if (cause & CAUSEF_BD) | 2277 | if (cause & CAUSEF_BD) |
2285 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | 2278 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); |
2286 | else | 2279 | else |
2287 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | 2280 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); |
2288 | 2281 | ||
2289 | kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc); | 2282 | kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc); |
2290 | 2283 | ||
2291 | kvm_change_c0_guest_cause(cop0, (0xff), | 2284 | kvm_change_c0_guest_cause(cop0, (0xff), |
2292 | (T_MSADIS << CAUSEB_EXCCODE)); | 2285 | (T_MSADIS << CAUSEB_EXCCODE)); |
2293 | 2286 | ||
2294 | /* Set PC to the exception entry point */ | 2287 | /* Set PC to the exception entry point */ |
2295 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 2288 | arch->pc = KVM_GUEST_KSEG0 + 0x180; |
2296 | 2289 | ||
2297 | } else { | 2290 | } else { |
2298 | kvm_err("Trying to deliver MSADIS when EXL is already set\n"); | 2291 | kvm_err("Trying to deliver MSADIS when EXL is already set\n"); |
2299 | er = EMULATE_FAIL; | 2292 | er = EMULATE_FAIL; |
2300 | } | 2293 | } |
2301 | 2294 | ||
2302 | return er; | 2295 | return er; |
2303 | } | 2296 | } |
2304 | 2297 | ||
2305 | /* ll/sc, rdhwr, sync emulation */ | 2298 | /* ll/sc, rdhwr, sync emulation */ |
2306 | 2299 | ||
2307 | #define OPCODE 0xfc000000 | 2300 | #define OPCODE 0xfc000000 |
2308 | #define BASE 0x03e00000 | 2301 | #define BASE 0x03e00000 |
2309 | #define RT 0x001f0000 | 2302 | #define RT 0x001f0000 |
2310 | #define OFFSET 0x0000ffff | 2303 | #define OFFSET 0x0000ffff |
2311 | #define LL 0xc0000000 | 2304 | #define LL 0xc0000000 |
2312 | #define SC 0xe0000000 | 2305 | #define SC 0xe0000000 |
2313 | #define SPEC0 0x00000000 | 2306 | #define SPEC0 0x00000000 |
2314 | #define SPEC3 0x7c000000 | 2307 | #define SPEC3 0x7c000000 |
2315 | #define RD 0x0000f800 | 2308 | #define RD 0x0000f800 |
2316 | #define FUNC 0x0000003f | 2309 | #define FUNC 0x0000003f |
2317 | #define SYNC 0x0000000f | 2310 | #define SYNC 0x0000000f |
2318 | #define RDHWR 0x0000003b | 2311 | #define RDHWR 0x0000003b |
2319 | 2312 | ||
2320 | enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc, | 2313 | enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc, |
2321 | struct kvm_run *run, | 2314 | struct kvm_run *run, |
2322 | struct kvm_vcpu *vcpu) | 2315 | struct kvm_vcpu *vcpu) |
2323 | { | 2316 | { |
2324 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 2317 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
2325 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 2318 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
2326 | enum emulation_result er = EMULATE_DONE; | 2319 | enum emulation_result er = EMULATE_DONE; |
2327 | unsigned long curr_pc; | 2320 | unsigned long curr_pc; |
2328 | uint32_t inst; | 2321 | uint32_t inst; |
2329 | 2322 | ||
2330 | /* | 2323 | /* |
2331 | * Update PC and hold onto current PC in case there is | 2324 | * Update PC and hold onto current PC in case there is |
2332 | * an error and we want to rollback the PC | 2325 | * an error and we want to rollback the PC |
2333 | */ | 2326 | */ |
2334 | curr_pc = vcpu->arch.pc; | 2327 | curr_pc = vcpu->arch.pc; |
2335 | er = update_pc(vcpu, cause); | 2328 | er = update_pc(vcpu, cause); |
2336 | if (er == EMULATE_FAIL) | 2329 | if (er == EMULATE_FAIL) |
2337 | return er; | 2330 | return er; |
2338 | 2331 | ||
2339 | /* Fetch the instruction. */ | 2332 | /* Fetch the instruction. */ |
2340 | if (cause & CAUSEF_BD) | 2333 | if (cause & CAUSEF_BD) |
2341 | opc += 1; | 2334 | opc += 1; |
2342 | 2335 | ||
2343 | inst = kvm_get_inst(opc, vcpu); | 2336 | inst = kvm_get_inst(opc, vcpu); |
2344 | 2337 | ||
2345 | if (inst == KVM_INVALID_INST) { | 2338 | if (inst == KVM_INVALID_INST) { |
2346 | kvm_err("%s: Cannot get inst @ %p\n", __func__, opc); | 2339 | kvm_err("%s: Cannot get inst @ %p\n", __func__, opc); |
2347 | return EMULATE_FAIL; | 2340 | return EMULATE_FAIL; |
2348 | } | 2341 | } |
2349 | 2342 | ||
2350 | if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) { | 2343 | if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) { |
2351 | int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); | 2344 | int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); |
2352 | int rd = (inst & RD) >> 11; | 2345 | int rd = (inst & RD) >> 11; |
2353 | int rt = (inst & RT) >> 16; | 2346 | int rt = (inst & RT) >> 16; |
2354 | /* If usermode, check RDHWR rd is allowed by guest HWREna */ | 2347 | /* If usermode, check RDHWR rd is allowed by guest HWREna */ |
2355 | if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) { | 2348 | if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) { |
2356 | kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n", | 2349 | kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n", |
2357 | rd, opc); | 2350 | rd, opc); |
2358 | goto emulate_ri; | 2351 | goto emulate_ri; |
2359 | } | 2352 | } |
2360 | switch (rd) { | 2353 | switch (rd) { |
2361 | case 0: /* CPU number */ | 2354 | case 0: /* CPU number */ |
2362 | arch->gprs[rt] = 0; | 2355 | arch->gprs[rt] = 0; |
2363 | break; | 2356 | break; |
2364 | case 1: /* SYNCI length */ | 2357 | case 1: /* SYNCI length */ |
2365 | arch->gprs[rt] = min(current_cpu_data.dcache.linesz, | 2358 | arch->gprs[rt] = min(current_cpu_data.dcache.linesz, |
2366 | current_cpu_data.icache.linesz); | 2359 | current_cpu_data.icache.linesz); |
2367 | break; | 2360 | break; |
2368 | case 2: /* Read count register */ | 2361 | case 2: /* Read count register */ |
2369 | arch->gprs[rt] = kvm_mips_read_count(vcpu); | 2362 | arch->gprs[rt] = kvm_mips_read_count(vcpu); |
2370 | break; | 2363 | break; |
2371 | case 3: /* Count register resolution */ | 2364 | case 3: /* Count register resolution */ |
2372 | switch (current_cpu_data.cputype) { | 2365 | switch (current_cpu_data.cputype) { |
2373 | case CPU_20KC: | 2366 | case CPU_20KC: |
2374 | case CPU_25KF: | 2367 | case CPU_25KF: |
2375 | arch->gprs[rt] = 1; | 2368 | arch->gprs[rt] = 1; |
2376 | break; | 2369 | break; |
2377 | default: | 2370 | default: |
2378 | arch->gprs[rt] = 2; | 2371 | arch->gprs[rt] = 2; |
2379 | } | 2372 | } |
2380 | break; | 2373 | break; |
2381 | case 29: | 2374 | case 29: |
2382 | arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0); | 2375 | arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0); |
2383 | break; | 2376 | break; |
2384 | 2377 | ||
2385 | default: | 2378 | default: |
2386 | kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc); | 2379 | kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc); |
2387 | goto emulate_ri; | 2380 | goto emulate_ri; |
2388 | } | 2381 | } |
2389 | } else { | 2382 | } else { |
2390 | kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst); | 2383 | kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst); |
2391 | goto emulate_ri; | 2384 | goto emulate_ri; |
2392 | } | 2385 | } |
2393 | 2386 | ||
2394 | return EMULATE_DONE; | 2387 | return EMULATE_DONE; |
2395 | 2388 | ||
2396 | emulate_ri: | 2389 | emulate_ri: |
2397 | /* | 2390 | /* |
2398 | * Rollback PC (if in branch delay slot then the PC already points to | 2391 | * Rollback PC (if in branch delay slot then the PC already points to |
2399 | * branch target), and pass the RI exception to the guest OS. | 2392 | * branch target), and pass the RI exception to the guest OS. |
2400 | */ | 2393 | */ |
2401 | vcpu->arch.pc = curr_pc; | 2394 | vcpu->arch.pc = curr_pc; |
2402 | return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); | 2395 | return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); |
2403 | } | 2396 | } |
2404 | 2397 | ||
2405 | enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, | 2398 | enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, |
2406 | struct kvm_run *run) | 2399 | struct kvm_run *run) |
2407 | { | 2400 | { |
2408 | unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; | 2401 | unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; |
2409 | enum emulation_result er = EMULATE_DONE; | 2402 | enum emulation_result er = EMULATE_DONE; |
2410 | 2403 | ||
2411 | if (run->mmio.len > sizeof(*gpr)) { | 2404 | if (run->mmio.len > sizeof(*gpr)) { |
2412 | kvm_err("Bad MMIO length: %d", run->mmio.len); | 2405 | kvm_err("Bad MMIO length: %d", run->mmio.len); |
2413 | er = EMULATE_FAIL; | 2406 | er = EMULATE_FAIL; |
2414 | goto done; | 2407 | goto done; |
2415 | } | 2408 | } |
2416 | 2409 | ||
2417 | er = update_pc(vcpu, vcpu->arch.pending_load_cause); | 2410 | er = update_pc(vcpu, vcpu->arch.pending_load_cause); |
2418 | if (er == EMULATE_FAIL) | 2411 | if (er == EMULATE_FAIL) |
2419 | return er; | 2412 | return er; |
2420 | 2413 | ||
2421 | switch (run->mmio.len) { | 2414 | switch (run->mmio.len) { |
2422 | case 4: | 2415 | case 4: |
2423 | *gpr = *(int32_t *) run->mmio.data; | 2416 | *gpr = *(int32_t *) run->mmio.data; |
2424 | break; | 2417 | break; |
2425 | 2418 | ||
2426 | case 2: | 2419 | case 2: |
2427 | if (vcpu->mmio_needed == 2) | 2420 | if (vcpu->mmio_needed == 2) |
2428 | *gpr = *(int16_t *) run->mmio.data; | 2421 | *gpr = *(int16_t *) run->mmio.data; |
2429 | else | 2422 | else |
2430 | *gpr = *(uint16_t *)run->mmio.data; | 2423 | *gpr = *(uint16_t *)run->mmio.data; |
2431 | 2424 | ||
2432 | break; | 2425 | break; |
2433 | case 1: | 2426 | case 1: |
2434 | if (vcpu->mmio_needed == 2) | 2427 | if (vcpu->mmio_needed == 2) |
2435 | *gpr = *(int8_t *) run->mmio.data; | 2428 | *gpr = *(int8_t *) run->mmio.data; |
2436 | else | 2429 | else |
2437 | *gpr = *(u8 *) run->mmio.data; | 2430 | *gpr = *(u8 *) run->mmio.data; |
2438 | break; | 2431 | break; |
2439 | } | 2432 | } |
2440 | 2433 | ||
2441 | if (vcpu->arch.pending_load_cause & CAUSEF_BD) | 2434 | if (vcpu->arch.pending_load_cause & CAUSEF_BD) |
2442 | kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", | 2435 | kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", |
2443 | vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, | 2436 | vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, |
2444 | vcpu->mmio_needed); | 2437 | vcpu->mmio_needed); |
2445 | 2438 | ||
2446 | done: | 2439 | done: |
2447 | return er; | 2440 | return er; |
2448 | } | 2441 | } |
2449 | 2442 | ||
2450 | static enum emulation_result kvm_mips_emulate_exc(unsigned long cause, | 2443 | static enum emulation_result kvm_mips_emulate_exc(unsigned long cause, |
2451 | uint32_t *opc, | 2444 | uint32_t *opc, |
2452 | struct kvm_run *run, | 2445 | struct kvm_run *run, |
2453 | struct kvm_vcpu *vcpu) | 2446 | struct kvm_vcpu *vcpu) |
2454 | { | 2447 | { |
2455 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | 2448 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; |
2456 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 2449 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
2457 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 2450 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
2458 | enum emulation_result er = EMULATE_DONE; | 2451 | enum emulation_result er = EMULATE_DONE; |
2459 | 2452 | ||
2460 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 2453 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
2461 | /* save old pc */ | 2454 | /* save old pc */ |
2462 | kvm_write_c0_guest_epc(cop0, arch->pc); | 2455 | kvm_write_c0_guest_epc(cop0, arch->pc); |
2463 | kvm_set_c0_guest_status(cop0, ST0_EXL); | 2456 | kvm_set_c0_guest_status(cop0, ST0_EXL); |
2464 | 2457 | ||
2465 | if (cause & CAUSEF_BD) | 2458 | if (cause & CAUSEF_BD) |
2466 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | 2459 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); |
2467 | else | 2460 | else |
2468 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | 2461 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); |
2469 | 2462 | ||
2470 | kvm_change_c0_guest_cause(cop0, (0xff), | 2463 | kvm_change_c0_guest_cause(cop0, (0xff), |
2471 | (exccode << CAUSEB_EXCCODE)); | 2464 | (exccode << CAUSEB_EXCCODE)); |
2472 | 2465 | ||
2473 | /* Set PC to the exception entry point */ | 2466 | /* Set PC to the exception entry point */ |
2474 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 2467 | arch->pc = KVM_GUEST_KSEG0 + 0x180; |
2475 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | 2468 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); |
2476 | 2469 | ||
2477 | kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n", | 2470 | kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n", |
2478 | exccode, kvm_read_c0_guest_epc(cop0), | 2471 | exccode, kvm_read_c0_guest_epc(cop0), |
2479 | kvm_read_c0_guest_badvaddr(cop0)); | 2472 | kvm_read_c0_guest_badvaddr(cop0)); |
2480 | } else { | 2473 | } else { |
2481 | kvm_err("Trying to deliver EXC when EXL is already set\n"); | 2474 | kvm_err("Trying to deliver EXC when EXL is already set\n"); |
2482 | er = EMULATE_FAIL; | 2475 | er = EMULATE_FAIL; |
2483 | } | 2476 | } |
2484 | 2477 | ||
2485 | return er; | 2478 | return er; |
2486 | } | 2479 | } |
2487 | 2480 | ||
2488 | enum emulation_result kvm_mips_check_privilege(unsigned long cause, | 2481 | enum emulation_result kvm_mips_check_privilege(unsigned long cause, |
2489 | uint32_t *opc, | 2482 | uint32_t *opc, |
2490 | struct kvm_run *run, | 2483 | struct kvm_run *run, |
2491 | struct kvm_vcpu *vcpu) | 2484 | struct kvm_vcpu *vcpu) |
2492 | { | 2485 | { |
2493 | enum emulation_result er = EMULATE_DONE; | 2486 | enum emulation_result er = EMULATE_DONE; |
2494 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | 2487 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; |
2495 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | 2488 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
2496 | 2489 | ||
2497 | int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); | 2490 | int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); |
2498 | 2491 | ||
2499 | if (usermode) { | 2492 | if (usermode) { |
2500 | switch (exccode) { | 2493 | switch (exccode) { |
2501 | case T_INT: | 2494 | case T_INT: |
2502 | case T_SYSCALL: | 2495 | case T_SYSCALL: |
2503 | case T_BREAK: | 2496 | case T_BREAK: |
2504 | case T_RES_INST: | 2497 | case T_RES_INST: |
2505 | case T_TRAP: | 2498 | case T_TRAP: |
2506 | case T_MSAFPE: | 2499 | case T_MSAFPE: |
2507 | case T_FPE: | 2500 | case T_FPE: |
2508 | case T_MSADIS: | 2501 | case T_MSADIS: |
2509 | break; | 2502 | break; |
2510 | 2503 | ||
2511 | case T_COP_UNUSABLE: | 2504 | case T_COP_UNUSABLE: |
2512 | if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0) | 2505 | if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0) |
2513 | er = EMULATE_PRIV_FAIL; | 2506 | er = EMULATE_PRIV_FAIL; |
2514 | break; | 2507 | break; |
2515 | 2508 | ||
2516 | case T_TLB_MOD: | 2509 | case T_TLB_MOD: |
2517 | break; | 2510 | break; |
2518 | 2511 | ||
2519 | case T_TLB_LD_MISS: | 2512 | case T_TLB_LD_MISS: |
2520 | /* | 2513 | /* |
2521 | * We we are accessing Guest kernel space, then send an | 2514 | * We we are accessing Guest kernel space, then send an |
2522 | * address error exception to the guest | 2515 | * address error exception to the guest |
2523 | */ | 2516 | */ |
2524 | if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { | 2517 | if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { |
2525 | kvm_debug("%s: LD MISS @ %#lx\n", __func__, | 2518 | kvm_debug("%s: LD MISS @ %#lx\n", __func__, |
2526 | badvaddr); | 2519 | badvaddr); |
2527 | cause &= ~0xff; | 2520 | cause &= ~0xff; |
2528 | cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE); | 2521 | cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE); |
2529 | er = EMULATE_PRIV_FAIL; | 2522 | er = EMULATE_PRIV_FAIL; |
2530 | } | 2523 | } |
2531 | break; | 2524 | break; |
2532 | 2525 | ||
2533 | case T_TLB_ST_MISS: | 2526 | case T_TLB_ST_MISS: |
2534 | /* | 2527 | /* |
2535 | * We we are accessing Guest kernel space, then send an | 2528 | * We we are accessing Guest kernel space, then send an |
2536 | * address error exception to the guest | 2529 | * address error exception to the guest |
2537 | */ | 2530 | */ |
2538 | if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { | 2531 | if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { |
2539 | kvm_debug("%s: ST MISS @ %#lx\n", __func__, | 2532 | kvm_debug("%s: ST MISS @ %#lx\n", __func__, |
2540 | badvaddr); | 2533 | badvaddr); |
2541 | cause &= ~0xff; | 2534 | cause &= ~0xff; |
2542 | cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE); | 2535 | cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE); |
2543 | er = EMULATE_PRIV_FAIL; | 2536 | er = EMULATE_PRIV_FAIL; |
2544 | } | 2537 | } |
2545 | break; | 2538 | break; |
2546 | 2539 | ||
2547 | case T_ADDR_ERR_ST: | 2540 | case T_ADDR_ERR_ST: |
2548 | kvm_debug("%s: address error ST @ %#lx\n", __func__, | 2541 | kvm_debug("%s: address error ST @ %#lx\n", __func__, |
2549 | badvaddr); | 2542 | badvaddr); |
2550 | if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { | 2543 | if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { |
2551 | cause &= ~0xff; | 2544 | cause &= ~0xff; |
2552 | cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE); | 2545 | cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE); |
2553 | } | 2546 | } |
2554 | er = EMULATE_PRIV_FAIL; | 2547 | er = EMULATE_PRIV_FAIL; |
2555 | break; | 2548 | break; |
2556 | case T_ADDR_ERR_LD: | 2549 | case T_ADDR_ERR_LD: |
2557 | kvm_debug("%s: address error LD @ %#lx\n", __func__, | 2550 | kvm_debug("%s: address error LD @ %#lx\n", __func__, |
2558 | badvaddr); | 2551 | badvaddr); |
2559 | if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { | 2552 | if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { |
2560 | cause &= ~0xff; | 2553 | cause &= ~0xff; |
2561 | cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE); | 2554 | cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE); |
2562 | } | 2555 | } |
2563 | er = EMULATE_PRIV_FAIL; | 2556 | er = EMULATE_PRIV_FAIL; |
2564 | break; | 2557 | break; |
2565 | default: | 2558 | default: |
2566 | er = EMULATE_PRIV_FAIL; | 2559 | er = EMULATE_PRIV_FAIL; |
2567 | break; | 2560 | break; |
2568 | } | 2561 | } |
2569 | } | 2562 | } |
2570 | 2563 | ||
2571 | if (er == EMULATE_PRIV_FAIL) | 2564 | if (er == EMULATE_PRIV_FAIL) |
2572 | kvm_mips_emulate_exc(cause, opc, run, vcpu); | 2565 | kvm_mips_emulate_exc(cause, opc, run, vcpu); |
2573 | 2566 | ||
2574 | return er; | 2567 | return er; |
2575 | } | 2568 | } |
2576 | 2569 | ||
2577 | /* | 2570 | /* |
2578 | * User Address (UA) fault, this could happen if | 2571 | * User Address (UA) fault, this could happen if |
2579 | * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this | 2572 | * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this |
2580 | * case we pass on the fault to the guest kernel and let it handle it. | 2573 | * case we pass on the fault to the guest kernel and let it handle it. |
2581 | * (2) TLB entry is present in the Guest TLB but not in the shadow, in this | 2574 | * (2) TLB entry is present in the Guest TLB but not in the shadow, in this |
2582 | * case we inject the TLB from the Guest TLB into the shadow host TLB | 2575 | * case we inject the TLB from the Guest TLB into the shadow host TLB |
2583 | */ | 2576 | */ |
2584 | enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause, | 2577 | enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause, |
2585 | uint32_t *opc, | 2578 | uint32_t *opc, |
2586 | struct kvm_run *run, | 2579 | struct kvm_run *run, |
2587 | struct kvm_vcpu *vcpu) | 2580 | struct kvm_vcpu *vcpu) |
2588 | { | 2581 | { |
2589 | enum emulation_result er = EMULATE_DONE; | 2582 | enum emulation_result er = EMULATE_DONE; |
2590 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | 2583 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; |
2591 | unsigned long va = vcpu->arch.host_cp0_badvaddr; | 2584 | unsigned long va = vcpu->arch.host_cp0_badvaddr; |
2592 | int index; | 2585 | int index; |
2593 | 2586 | ||
2594 | kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n", | 2587 | kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n", |
2595 | vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi); | 2588 | vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi); |
2596 | 2589 | ||
2597 | /* | 2590 | /* |
2598 | * KVM would not have got the exception if this entry was valid in the | 2591 | * KVM would not have got the exception if this entry was valid in the |
2599 | * shadow host TLB. Check the Guest TLB, if the entry is not there then | 2592 | * shadow host TLB. Check the Guest TLB, if the entry is not there then |
2600 | * send the guest an exception. The guest exc handler should then inject | 2593 | * send the guest an exception. The guest exc handler should then inject |
2601 | * an entry into the guest TLB. | 2594 | * an entry into the guest TLB. |
2602 | */ | 2595 | */ |
2603 | index = kvm_mips_guest_tlb_lookup(vcpu, | 2596 | index = kvm_mips_guest_tlb_lookup(vcpu, |
2604 | (va & VPN2_MASK) | | 2597 | (va & VPN2_MASK) | |
2605 | (kvm_read_c0_guest_entryhi | 2598 | (kvm_read_c0_guest_entryhi |
2606 | (vcpu->arch.cop0) & ASID_MASK)); | 2599 | (vcpu->arch.cop0) & ASID_MASK)); |
2607 | if (index < 0) { | 2600 | if (index < 0) { |
2608 | if (exccode == T_TLB_LD_MISS) { | 2601 | if (exccode == T_TLB_LD_MISS) { |
2609 | er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); | 2602 | er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); |
2610 | } else if (exccode == T_TLB_ST_MISS) { | 2603 | } else if (exccode == T_TLB_ST_MISS) { |
2611 | er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu); | 2604 | er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu); |
2612 | } else { | 2605 | } else { |
2613 | kvm_err("%s: invalid exc code: %d\n", __func__, | 2606 | kvm_err("%s: invalid exc code: %d\n", __func__, |
2614 | exccode); | 2607 | exccode); |
2615 | er = EMULATE_FAIL; | 2608 | er = EMULATE_FAIL; |
2616 | } | 2609 | } |
2617 | } else { | 2610 | } else { |
2618 | struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; | 2611 | struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; |
2619 | 2612 | ||
2620 | /* | 2613 | /* |
2621 | * Check if the entry is valid, if not then setup a TLB invalid | 2614 | * Check if the entry is valid, if not then setup a TLB invalid |
2622 | * exception to the guest | 2615 | * exception to the guest |
2623 | */ | 2616 | */ |
2624 | if (!TLB_IS_VALID(*tlb, va)) { | 2617 | if (!TLB_IS_VALID(*tlb, va)) { |
2625 | if (exccode == T_TLB_LD_MISS) { | 2618 | if (exccode == T_TLB_LD_MISS) { |
2626 | er = kvm_mips_emulate_tlbinv_ld(cause, opc, run, | 2619 | er = kvm_mips_emulate_tlbinv_ld(cause, opc, run, |
2627 | vcpu); | 2620 | vcpu); |
2628 | } else if (exccode == T_TLB_ST_MISS) { | 2621 | } else if (exccode == T_TLB_ST_MISS) { |
2629 | er = kvm_mips_emulate_tlbinv_st(cause, opc, run, | 2622 | er = kvm_mips_emulate_tlbinv_st(cause, opc, run, |
2630 | vcpu); | 2623 | vcpu); |
arch/mips/kvm/trap_emul.c
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel | 6 | * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel |
7 | * | 7 | * |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/vmalloc.h> | 15 | #include <linux/vmalloc.h> |
16 | 16 | ||
17 | #include <linux/kvm_host.h> | 17 | #include <linux/kvm_host.h> |
18 | 18 | ||
19 | #include "opcode.h" | 19 | #include "opcode.h" |
20 | #include "interrupt.h" | 20 | #include "interrupt.h" |
21 | 21 | ||
22 | static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) | 22 | static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) |
23 | { | 23 | { |
24 | gpa_t gpa; | 24 | gpa_t gpa; |
25 | uint32_t kseg = KSEGX(gva); | 25 | uint32_t kseg = KSEGX(gva); |
26 | 26 | ||
27 | if ((kseg == CKSEG0) || (kseg == CKSEG1)) | 27 | if ((kseg == CKSEG0) || (kseg == CKSEG1)) |
28 | gpa = CPHYSADDR(gva); | 28 | gpa = CPHYSADDR(gva); |
29 | else { | 29 | else { |
30 | kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); | 30 | kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); |
31 | kvm_mips_dump_host_tlbs(); | 31 | kvm_mips_dump_host_tlbs(); |
32 | gpa = KVM_INVALID_ADDR; | 32 | gpa = KVM_INVALID_ADDR; |
33 | } | 33 | } |
34 | 34 | ||
35 | kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa); | 35 | kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa); |
36 | 36 | ||
37 | return gpa; | 37 | return gpa; |
38 | } | 38 | } |
39 | 39 | ||
40 | static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) | 40 | static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) |
41 | { | 41 | { |
42 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 42 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
43 | struct kvm_run *run = vcpu->run; | 43 | struct kvm_run *run = vcpu->run; |
44 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | 44 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; |
45 | unsigned long cause = vcpu->arch.host_cp0_cause; | 45 | unsigned long cause = vcpu->arch.host_cp0_cause; |
46 | enum emulation_result er = EMULATE_DONE; | 46 | enum emulation_result er = EMULATE_DONE; |
47 | int ret = RESUME_GUEST; | 47 | int ret = RESUME_GUEST; |
48 | 48 | ||
49 | if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { | 49 | if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { |
50 | /* FPU Unusable */ | 50 | /* FPU Unusable */ |
51 | if (!kvm_mips_guest_has_fpu(&vcpu->arch) || | 51 | if (!kvm_mips_guest_has_fpu(&vcpu->arch) || |
52 | (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) { | 52 | (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) { |
53 | /* | 53 | /* |
54 | * Unusable/no FPU in guest: | 54 | * Unusable/no FPU in guest: |
55 | * deliver guest COP1 Unusable Exception | 55 | * deliver guest COP1 Unusable Exception |
56 | */ | 56 | */ |
57 | er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); | 57 | er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); |
58 | } else { | 58 | } else { |
59 | /* Restore FPU state */ | 59 | /* Restore FPU state */ |
60 | kvm_own_fpu(vcpu); | 60 | kvm_own_fpu(vcpu); |
61 | er = EMULATE_DONE; | 61 | er = EMULATE_DONE; |
62 | } | 62 | } |
63 | } else { | 63 | } else { |
64 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); | 64 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
65 | } | 65 | } |
66 | 66 | ||
67 | switch (er) { | 67 | switch (er) { |
68 | case EMULATE_DONE: | 68 | case EMULATE_DONE: |
69 | ret = RESUME_GUEST; | 69 | ret = RESUME_GUEST; |
70 | break; | 70 | break; |
71 | 71 | ||
72 | case EMULATE_FAIL: | 72 | case EMULATE_FAIL: |
73 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 73 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
74 | ret = RESUME_HOST; | 74 | ret = RESUME_HOST; |
75 | break; | 75 | break; |
76 | 76 | ||
77 | case EMULATE_WAIT: | 77 | case EMULATE_WAIT: |
78 | run->exit_reason = KVM_EXIT_INTR; | 78 | run->exit_reason = KVM_EXIT_INTR; |
79 | ret = RESUME_HOST; | 79 | ret = RESUME_HOST; |
80 | break; | 80 | break; |
81 | 81 | ||
82 | default: | 82 | default: |
83 | BUG(); | 83 | BUG(); |
84 | } | 84 | } |
85 | return ret; | 85 | return ret; |
86 | } | 86 | } |
87 | 87 | ||
88 | static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) | 88 | static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) |
89 | { | 89 | { |
90 | struct kvm_run *run = vcpu->run; | 90 | struct kvm_run *run = vcpu->run; |
91 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | 91 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; |
92 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | 92 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
93 | unsigned long cause = vcpu->arch.host_cp0_cause; | 93 | unsigned long cause = vcpu->arch.host_cp0_cause; |
94 | enum emulation_result er = EMULATE_DONE; | 94 | enum emulation_result er = EMULATE_DONE; |
95 | int ret = RESUME_GUEST; | 95 | int ret = RESUME_GUEST; |
96 | 96 | ||
97 | if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 | 97 | if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 |
98 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { | 98 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { |
99 | kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", | 99 | kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", |
100 | cause, opc, badvaddr); | 100 | cause, opc, badvaddr); |
101 | er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu); | 101 | er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu); |
102 | 102 | ||
103 | if (er == EMULATE_DONE) | 103 | if (er == EMULATE_DONE) |
104 | ret = RESUME_GUEST; | 104 | ret = RESUME_GUEST; |
105 | else { | 105 | else { |
106 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 106 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
107 | ret = RESUME_HOST; | 107 | ret = RESUME_HOST; |
108 | } | 108 | } |
109 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { | 109 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { |
110 | /* | 110 | /* |
111 | * XXXKYMA: The guest kernel does not expect to get this fault | 111 | * XXXKYMA: The guest kernel does not expect to get this fault |
112 | * when we are not using HIGHMEM. Need to address this in a | 112 | * when we are not using HIGHMEM. Need to address this in a |
113 | * HIGHMEM kernel | 113 | * HIGHMEM kernel |
114 | */ | 114 | */ |
115 | kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n", | 115 | kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n", |
116 | cause, opc, badvaddr); | 116 | cause, opc, badvaddr); |
117 | kvm_mips_dump_host_tlbs(); | 117 | kvm_mips_dump_host_tlbs(); |
118 | kvm_arch_vcpu_dump_regs(vcpu); | 118 | kvm_arch_vcpu_dump_regs(vcpu); |
119 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 119 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
120 | ret = RESUME_HOST; | 120 | ret = RESUME_HOST; |
121 | } else { | 121 | } else { |
122 | kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", | 122 | kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", |
123 | cause, opc, badvaddr); | 123 | cause, opc, badvaddr); |
124 | kvm_mips_dump_host_tlbs(); | 124 | kvm_mips_dump_host_tlbs(); |
125 | kvm_arch_vcpu_dump_regs(vcpu); | 125 | kvm_arch_vcpu_dump_regs(vcpu); |
126 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 126 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
127 | ret = RESUME_HOST; | 127 | ret = RESUME_HOST; |
128 | } | 128 | } |
129 | return ret; | 129 | return ret; |
130 | } | 130 | } |
131 | 131 | ||
132 | static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) | 132 | static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) |
133 | { | 133 | { |
134 | struct kvm_run *run = vcpu->run; | 134 | struct kvm_run *run = vcpu->run; |
135 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | 135 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; |
136 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | 136 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
137 | unsigned long cause = vcpu->arch.host_cp0_cause; | 137 | unsigned long cause = vcpu->arch.host_cp0_cause; |
138 | enum emulation_result er = EMULATE_DONE; | 138 | enum emulation_result er = EMULATE_DONE; |
139 | int ret = RESUME_GUEST; | 139 | int ret = RESUME_GUEST; |
140 | 140 | ||
141 | if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) | 141 | if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) |
142 | && KVM_GUEST_KERNEL_MODE(vcpu)) { | 142 | && KVM_GUEST_KERNEL_MODE(vcpu)) { |
143 | if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) { | 143 | if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) { |
144 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 144 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
145 | ret = RESUME_HOST; | 145 | ret = RESUME_HOST; |
146 | } | 146 | } |
147 | } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 | 147 | } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 |
148 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { | 148 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { |
149 | kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", | 149 | kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", |
150 | cause, opc, badvaddr); | 150 | cause, opc, badvaddr); |
151 | er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); | 151 | er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); |
152 | if (er == EMULATE_DONE) | 152 | if (er == EMULATE_DONE) |
153 | ret = RESUME_GUEST; | 153 | ret = RESUME_GUEST; |
154 | else { | 154 | else { |
155 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 155 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
156 | ret = RESUME_HOST; | 156 | ret = RESUME_HOST; |
157 | } | 157 | } |
158 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { | 158 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { |
159 | /* | 159 | /* |
160 | * All KSEG0 faults are handled by KVM, as the guest kernel does | 160 | * All KSEG0 faults are handled by KVM, as the guest kernel does |
161 | * not expect to ever get them | 161 | * not expect to ever get them |
162 | */ | 162 | */ |
163 | if (kvm_mips_handle_kseg0_tlb_fault | 163 | if (kvm_mips_handle_kseg0_tlb_fault |
164 | (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { | 164 | (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { |
165 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 165 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
166 | ret = RESUME_HOST; | 166 | ret = RESUME_HOST; |
167 | } | 167 | } |
168 | } else { | 168 | } else { |
169 | kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", | 169 | kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", |
170 | cause, opc, badvaddr); | 170 | cause, opc, badvaddr); |
171 | kvm_mips_dump_host_tlbs(); | 171 | kvm_mips_dump_host_tlbs(); |
172 | kvm_arch_vcpu_dump_regs(vcpu); | 172 | kvm_arch_vcpu_dump_regs(vcpu); |
173 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 173 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
174 | ret = RESUME_HOST; | 174 | ret = RESUME_HOST; |
175 | } | 175 | } |
176 | return ret; | 176 | return ret; |
177 | } | 177 | } |
178 | 178 | ||
179 | static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) | 179 | static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) |
180 | { | 180 | { |
181 | struct kvm_run *run = vcpu->run; | 181 | struct kvm_run *run = vcpu->run; |
182 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | 182 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; |
183 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | 183 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
184 | unsigned long cause = vcpu->arch.host_cp0_cause; | 184 | unsigned long cause = vcpu->arch.host_cp0_cause; |
185 | enum emulation_result er = EMULATE_DONE; | 185 | enum emulation_result er = EMULATE_DONE; |
186 | int ret = RESUME_GUEST; | 186 | int ret = RESUME_GUEST; |
187 | 187 | ||
188 | if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) | 188 | if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) |
189 | && KVM_GUEST_KERNEL_MODE(vcpu)) { | 189 | && KVM_GUEST_KERNEL_MODE(vcpu)) { |
190 | if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) { | 190 | if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) { |
191 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 191 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
192 | ret = RESUME_HOST; | 192 | ret = RESUME_HOST; |
193 | } | 193 | } |
194 | } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 | 194 | } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 |
195 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { | 195 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { |
196 | kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n", | 196 | kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n", |
197 | vcpu->arch.pc, badvaddr); | 197 | vcpu->arch.pc, badvaddr); |
198 | 198 | ||
199 | /* | 199 | /* |
200 | * User Address (UA) fault, this could happen if | 200 | * User Address (UA) fault, this could happen if |
201 | * (1) TLB entry not present/valid in both Guest and shadow host | 201 | * (1) TLB entry not present/valid in both Guest and shadow host |
202 | * TLBs, in this case we pass on the fault to the guest | 202 | * TLBs, in this case we pass on the fault to the guest |
203 | * kernel and let it handle it. | 203 | * kernel and let it handle it. |
204 | * (2) TLB entry is present in the Guest TLB but not in the | 204 | * (2) TLB entry is present in the Guest TLB but not in the |
205 | * shadow, in this case we inject the TLB from the Guest TLB | 205 | * shadow, in this case we inject the TLB from the Guest TLB |
206 | * into the shadow host TLB | 206 | * into the shadow host TLB |
207 | */ | 207 | */ |
208 | 208 | ||
209 | er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); | 209 | er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); |
210 | if (er == EMULATE_DONE) | 210 | if (er == EMULATE_DONE) |
211 | ret = RESUME_GUEST; | 211 | ret = RESUME_GUEST; |
212 | else { | 212 | else { |
213 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 213 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
214 | ret = RESUME_HOST; | 214 | ret = RESUME_HOST; |
215 | } | 215 | } |
216 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { | 216 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { |
217 | if (kvm_mips_handle_kseg0_tlb_fault | 217 | if (kvm_mips_handle_kseg0_tlb_fault |
218 | (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { | 218 | (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { |
219 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 219 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
220 | ret = RESUME_HOST; | 220 | ret = RESUME_HOST; |
221 | } | 221 | } |
222 | } else { | 222 | } else { |
223 | kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", | 223 | kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", |
224 | cause, opc, badvaddr); | 224 | cause, opc, badvaddr); |
225 | kvm_mips_dump_host_tlbs(); | 225 | kvm_mips_dump_host_tlbs(); |
226 | kvm_arch_vcpu_dump_regs(vcpu); | 226 | kvm_arch_vcpu_dump_regs(vcpu); |
227 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 227 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
228 | ret = RESUME_HOST; | 228 | ret = RESUME_HOST; |
229 | } | 229 | } |
230 | return ret; | 230 | return ret; |
231 | } | 231 | } |
232 | 232 | ||
233 | static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) | 233 | static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) |
234 | { | 234 | { |
235 | struct kvm_run *run = vcpu->run; | 235 | struct kvm_run *run = vcpu->run; |
236 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | 236 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; |
237 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | 237 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
238 | unsigned long cause = vcpu->arch.host_cp0_cause; | 238 | unsigned long cause = vcpu->arch.host_cp0_cause; |
239 | enum emulation_result er = EMULATE_DONE; | 239 | enum emulation_result er = EMULATE_DONE; |
240 | int ret = RESUME_GUEST; | 240 | int ret = RESUME_GUEST; |
241 | 241 | ||
242 | if (KVM_GUEST_KERNEL_MODE(vcpu) | 242 | if (KVM_GUEST_KERNEL_MODE(vcpu) |
243 | && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { | 243 | && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { |
244 | kvm_debug("Emulate Store to MMIO space\n"); | 244 | kvm_debug("Emulate Store to MMIO space\n"); |
245 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); | 245 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
246 | if (er == EMULATE_FAIL) { | 246 | if (er == EMULATE_FAIL) { |
247 | kvm_err("Emulate Store to MMIO space failed\n"); | 247 | kvm_err("Emulate Store to MMIO space failed\n"); |
248 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 248 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
249 | ret = RESUME_HOST; | 249 | ret = RESUME_HOST; |
250 | } else { | 250 | } else { |
251 | run->exit_reason = KVM_EXIT_MMIO; | 251 | run->exit_reason = KVM_EXIT_MMIO; |
252 | ret = RESUME_HOST; | 252 | ret = RESUME_HOST; |
253 | } | 253 | } |
254 | } else { | 254 | } else { |
255 | kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n", | 255 | kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n", |
256 | cause, opc, badvaddr); | 256 | cause, opc, badvaddr); |
257 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 257 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
258 | ret = RESUME_HOST; | 258 | ret = RESUME_HOST; |
259 | } | 259 | } |
260 | return ret; | 260 | return ret; |
261 | } | 261 | } |
262 | 262 | ||
263 | static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) | 263 | static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) |
264 | { | 264 | { |
265 | struct kvm_run *run = vcpu->run; | 265 | struct kvm_run *run = vcpu->run; |
266 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | 266 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; |
267 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | 267 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
268 | unsigned long cause = vcpu->arch.host_cp0_cause; | 268 | unsigned long cause = vcpu->arch.host_cp0_cause; |
269 | enum emulation_result er = EMULATE_DONE; | 269 | enum emulation_result er = EMULATE_DONE; |
270 | int ret = RESUME_GUEST; | 270 | int ret = RESUME_GUEST; |
271 | 271 | ||
272 | if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) { | 272 | if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) { |
273 | kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr); | 273 | kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr); |
274 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); | 274 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
275 | if (er == EMULATE_FAIL) { | 275 | if (er == EMULATE_FAIL) { |
276 | kvm_err("Emulate Load from MMIO space failed\n"); | 276 | kvm_err("Emulate Load from MMIO space failed\n"); |
277 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 277 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
278 | ret = RESUME_HOST; | 278 | ret = RESUME_HOST; |
279 | } else { | 279 | } else { |
280 | run->exit_reason = KVM_EXIT_MMIO; | 280 | run->exit_reason = KVM_EXIT_MMIO; |
281 | ret = RESUME_HOST; | 281 | ret = RESUME_HOST; |
282 | } | 282 | } |
283 | } else { | 283 | } else { |
284 | kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n", | 284 | kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n", |
285 | cause, opc, badvaddr); | 285 | cause, opc, badvaddr); |
286 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 286 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
287 | ret = RESUME_HOST; | 287 | ret = RESUME_HOST; |
288 | er = EMULATE_FAIL; | 288 | er = EMULATE_FAIL; |
289 | } | 289 | } |
290 | return ret; | 290 | return ret; |
291 | } | 291 | } |
292 | 292 | ||
293 | static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu) | 293 | static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu) |
294 | { | 294 | { |
295 | struct kvm_run *run = vcpu->run; | 295 | struct kvm_run *run = vcpu->run; |
296 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | 296 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; |
297 | unsigned long cause = vcpu->arch.host_cp0_cause; | 297 | unsigned long cause = vcpu->arch.host_cp0_cause; |
298 | enum emulation_result er = EMULATE_DONE; | 298 | enum emulation_result er = EMULATE_DONE; |
299 | int ret = RESUME_GUEST; | 299 | int ret = RESUME_GUEST; |
300 | 300 | ||
301 | er = kvm_mips_emulate_syscall(cause, opc, run, vcpu); | 301 | er = kvm_mips_emulate_syscall(cause, opc, run, vcpu); |
302 | if (er == EMULATE_DONE) | 302 | if (er == EMULATE_DONE) |
303 | ret = RESUME_GUEST; | 303 | ret = RESUME_GUEST; |
304 | else { | 304 | else { |
305 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 305 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
306 | ret = RESUME_HOST; | 306 | ret = RESUME_HOST; |
307 | } | 307 | } |
308 | return ret; | 308 | return ret; |
309 | } | 309 | } |
310 | 310 | ||
311 | static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu) | 311 | static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu) |
312 | { | 312 | { |
313 | struct kvm_run *run = vcpu->run; | 313 | struct kvm_run *run = vcpu->run; |
314 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | 314 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; |
315 | unsigned long cause = vcpu->arch.host_cp0_cause; | 315 | unsigned long cause = vcpu->arch.host_cp0_cause; |
316 | enum emulation_result er = EMULATE_DONE; | 316 | enum emulation_result er = EMULATE_DONE; |
317 | int ret = RESUME_GUEST; | 317 | int ret = RESUME_GUEST; |
318 | 318 | ||
319 | er = kvm_mips_handle_ri(cause, opc, run, vcpu); | 319 | er = kvm_mips_handle_ri(cause, opc, run, vcpu); |
320 | if (er == EMULATE_DONE) | 320 | if (er == EMULATE_DONE) |
321 | ret = RESUME_GUEST; | 321 | ret = RESUME_GUEST; |
322 | else { | 322 | else { |
323 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 323 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
324 | ret = RESUME_HOST; | 324 | ret = RESUME_HOST; |
325 | } | 325 | } |
326 | return ret; | 326 | return ret; |
327 | } | 327 | } |
328 | 328 | ||
329 | static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) | 329 | static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) |
330 | { | 330 | { |
331 | struct kvm_run *run = vcpu->run; | 331 | struct kvm_run *run = vcpu->run; |
332 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | 332 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; |
333 | unsigned long cause = vcpu->arch.host_cp0_cause; | 333 | unsigned long cause = vcpu->arch.host_cp0_cause; |
334 | enum emulation_result er = EMULATE_DONE; | 334 | enum emulation_result er = EMULATE_DONE; |
335 | int ret = RESUME_GUEST; | 335 | int ret = RESUME_GUEST; |
336 | 336 | ||
337 | er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu); | 337 | er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu); |
338 | if (er == EMULATE_DONE) | 338 | if (er == EMULATE_DONE) |
339 | ret = RESUME_GUEST; | 339 | ret = RESUME_GUEST; |
340 | else { | 340 | else { |
341 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 341 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
342 | ret = RESUME_HOST; | 342 | ret = RESUME_HOST; |
343 | } | 343 | } |
344 | return ret; | 344 | return ret; |
345 | } | 345 | } |
346 | 346 | ||
347 | static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu) | 347 | static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu) |
348 | { | 348 | { |
349 | struct kvm_run *run = vcpu->run; | 349 | struct kvm_run *run = vcpu->run; |
350 | uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; | 350 | uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; |
351 | unsigned long cause = vcpu->arch.host_cp0_cause; | 351 | unsigned long cause = vcpu->arch.host_cp0_cause; |
352 | enum emulation_result er = EMULATE_DONE; | 352 | enum emulation_result er = EMULATE_DONE; |
353 | int ret = RESUME_GUEST; | 353 | int ret = RESUME_GUEST; |
354 | 354 | ||
355 | er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu); | 355 | er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu); |
356 | if (er == EMULATE_DONE) { | 356 | if (er == EMULATE_DONE) { |
357 | ret = RESUME_GUEST; | 357 | ret = RESUME_GUEST; |
358 | } else { | 358 | } else { |
359 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 359 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
360 | ret = RESUME_HOST; | 360 | ret = RESUME_HOST; |
361 | } | 361 | } |
362 | return ret; | 362 | return ret; |
363 | } | 363 | } |
364 | 364 | ||
365 | static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu) | 365 | static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu) |
366 | { | 366 | { |
367 | struct kvm_run *run = vcpu->run; | 367 | struct kvm_run *run = vcpu->run; |
368 | uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; | 368 | uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; |
369 | unsigned long cause = vcpu->arch.host_cp0_cause; | 369 | unsigned long cause = vcpu->arch.host_cp0_cause; |
370 | enum emulation_result er = EMULATE_DONE; | 370 | enum emulation_result er = EMULATE_DONE; |
371 | int ret = RESUME_GUEST; | 371 | int ret = RESUME_GUEST; |
372 | 372 | ||
373 | er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu); | 373 | er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu); |
374 | if (er == EMULATE_DONE) { | 374 | if (er == EMULATE_DONE) { |
375 | ret = RESUME_GUEST; | 375 | ret = RESUME_GUEST; |
376 | } else { | 376 | } else { |
377 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 377 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
378 | ret = RESUME_HOST; | 378 | ret = RESUME_HOST; |
379 | } | 379 | } |
380 | return ret; | 380 | return ret; |
381 | } | 381 | } |
382 | 382 | ||
383 | static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu) | 383 | static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu) |
384 | { | 384 | { |
385 | struct kvm_run *run = vcpu->run; | 385 | struct kvm_run *run = vcpu->run; |
386 | uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; | 386 | uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc; |
387 | unsigned long cause = vcpu->arch.host_cp0_cause; | 387 | unsigned long cause = vcpu->arch.host_cp0_cause; |
388 | enum emulation_result er = EMULATE_DONE; | 388 | enum emulation_result er = EMULATE_DONE; |
389 | int ret = RESUME_GUEST; | 389 | int ret = RESUME_GUEST; |
390 | 390 | ||
391 | er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu); | 391 | er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu); |
392 | if (er == EMULATE_DONE) { | 392 | if (er == EMULATE_DONE) { |
393 | ret = RESUME_GUEST; | 393 | ret = RESUME_GUEST; |
394 | } else { | 394 | } else { |
395 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 395 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
396 | ret = RESUME_HOST; | 396 | ret = RESUME_HOST; |
397 | } | 397 | } |
398 | return ret; | 398 | return ret; |
399 | } | 399 | } |
400 | 400 | ||
401 | /** | 401 | /** |
402 | * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root. | 402 | * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root. |
403 | * @vcpu: Virtual CPU context. | 403 | * @vcpu: Virtual CPU context. |
404 | * | 404 | * |
405 | * Handle when the guest attempts to use MSA when it is disabled. | 405 | * Handle when the guest attempts to use MSA when it is disabled. |
406 | */ | 406 | */ |
407 | static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) | 407 | static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) |
408 | { | 408 | { |
409 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 409 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
410 | struct kvm_run *run = vcpu->run; | 410 | struct kvm_run *run = vcpu->run; |
411 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; | 411 | uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; |
412 | unsigned long cause = vcpu->arch.host_cp0_cause; | 412 | unsigned long cause = vcpu->arch.host_cp0_cause; |
413 | enum emulation_result er = EMULATE_DONE; | 413 | enum emulation_result er = EMULATE_DONE; |
414 | int ret = RESUME_GUEST; | 414 | int ret = RESUME_GUEST; |
415 | 415 | ||
416 | if (!kvm_mips_guest_has_msa(&vcpu->arch) || | 416 | if (!kvm_mips_guest_has_msa(&vcpu->arch) || |
417 | (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) { | 417 | (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) { |
418 | /* | 418 | /* |
419 | * No MSA in guest, or FPU enabled and not in FR=1 mode, | 419 | * No MSA in guest, or FPU enabled and not in FR=1 mode, |
420 | * guest reserved instruction exception | 420 | * guest reserved instruction exception |
421 | */ | 421 | */ |
422 | er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); | 422 | er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); |
423 | } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) { | 423 | } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) { |
424 | /* MSA disabled by guest, guest MSA disabled exception */ | 424 | /* MSA disabled by guest, guest MSA disabled exception */ |
425 | er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu); | 425 | er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu); |
426 | } else { | 426 | } else { |
427 | /* Restore MSA/FPU state */ | 427 | /* Restore MSA/FPU state */ |
428 | kvm_own_msa(vcpu); | 428 | kvm_own_msa(vcpu); |
429 | er = EMULATE_DONE; | 429 | er = EMULATE_DONE; |
430 | } | 430 | } |
431 | 431 | ||
432 | switch (er) { | 432 | switch (er) { |
433 | case EMULATE_DONE: | 433 | case EMULATE_DONE: |
434 | ret = RESUME_GUEST; | 434 | ret = RESUME_GUEST; |
435 | break; | 435 | break; |
436 | 436 | ||
437 | case EMULATE_FAIL: | 437 | case EMULATE_FAIL: |
438 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 438 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
439 | ret = RESUME_HOST; | 439 | ret = RESUME_HOST; |
440 | break; | 440 | break; |
441 | 441 | ||
442 | default: | 442 | default: |
443 | BUG(); | 443 | BUG(); |
444 | } | 444 | } |
445 | return ret; | 445 | return ret; |
446 | } | 446 | } |
447 | 447 | ||
448 | static int kvm_trap_emul_vm_init(struct kvm *kvm) | 448 | static int kvm_trap_emul_vm_init(struct kvm *kvm) |
449 | { | 449 | { |
450 | return 0; | 450 | return 0; |
451 | } | 451 | } |
452 | 452 | ||
453 | static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu) | 453 | static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu) |
454 | { | 454 | { |
455 | return 0; | 455 | return 0; |
456 | } | 456 | } |
457 | 457 | ||
458 | static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) | 458 | static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) |
459 | { | 459 | { |
460 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 460 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
461 | uint32_t config1; | 461 | uint32_t config1; |
462 | int vcpu_id = vcpu->vcpu_id; | 462 | int vcpu_id = vcpu->vcpu_id; |
463 | 463 | ||
464 | /* | 464 | /* |
465 | * Arch specific stuff, set up config registers properly so that the | 465 | * Arch specific stuff, set up config registers properly so that the |
466 | * guest will come up as expected, for now we simulate a MIPS 24kc | 466 | * guest will come up as expected, for now we simulate a MIPS 24kc |
467 | */ | 467 | */ |
468 | kvm_write_c0_guest_prid(cop0, 0x00019300); | 468 | kvm_write_c0_guest_prid(cop0, 0x00019300); |
469 | /* Have config1, Cacheable, noncoherent, write-back, write allocate */ | 469 | /* Have config1, Cacheable, noncoherent, write-back, write allocate */ |
470 | kvm_write_c0_guest_config(cop0, MIPS_CONF_M | (0x3 << CP0C0_K0) | | 470 | kvm_write_c0_guest_config(cop0, MIPS_CONF_M | (0x3 << CP0C0_K0) | |
471 | (0x1 << CP0C0_AR) | | 471 | (0x1 << CP0C0_AR) | |
472 | (MMU_TYPE_R4000 << CP0C0_MT)); | 472 | (MMU_TYPE_R4000 << CP0C0_MT)); |
473 | 473 | ||
474 | /* Read the cache characteristics from the host Config1 Register */ | 474 | /* Read the cache characteristics from the host Config1 Register */ |
475 | config1 = (read_c0_config1() & ~0x7f); | 475 | config1 = (read_c0_config1() & ~0x7f); |
476 | 476 | ||
477 | /* Set up MMU size */ | 477 | /* Set up MMU size */ |
478 | config1 &= ~(0x3f << 25); | 478 | config1 &= ~(0x3f << 25); |
479 | config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25); | 479 | config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25); |
480 | 480 | ||
481 | /* We unset some bits that we aren't emulating */ | 481 | /* We unset some bits that we aren't emulating */ |
482 | config1 &= | 482 | config1 &= |
483 | ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) | | 483 | ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) | |
484 | (1 << CP0C1_WR) | (1 << CP0C1_CA)); | 484 | (1 << CP0C1_WR) | (1 << CP0C1_CA)); |
485 | kvm_write_c0_guest_config1(cop0, config1); | 485 | kvm_write_c0_guest_config1(cop0, config1); |
486 | 486 | ||
487 | /* Have config3, no tertiary/secondary caches implemented */ | 487 | /* Have config3, no tertiary/secondary caches implemented */ |
488 | kvm_write_c0_guest_config2(cop0, MIPS_CONF_M); | 488 | kvm_write_c0_guest_config2(cop0, MIPS_CONF_M); |
489 | /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */ | 489 | /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */ |
490 | 490 | ||
491 | /* Have config4, UserLocal */ | 491 | /* Have config4, UserLocal */ |
492 | kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI); | 492 | kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI); |
493 | 493 | ||
494 | /* Have config5 */ | 494 | /* Have config5 */ |
495 | kvm_write_c0_guest_config4(cop0, MIPS_CONF_M); | 495 | kvm_write_c0_guest_config4(cop0, MIPS_CONF_M); |
496 | 496 | ||
497 | /* No config6 */ | 497 | /* No config6 */ |
498 | kvm_write_c0_guest_config5(cop0, 0); | 498 | kvm_write_c0_guest_config5(cop0, 0); |
499 | 499 | ||
500 | /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ | 500 | /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ |
501 | kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); | 501 | kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); |
502 | 502 | ||
503 | /* | 503 | /* |
504 | * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) | 504 | * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) |
505 | */ | 505 | */ |
506 | kvm_write_c0_guest_intctl(cop0, 0xFC000000); | 506 | kvm_write_c0_guest_intctl(cop0, 0xFC000000); |
507 | 507 | ||
508 | /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */ | 508 | /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */ |
509 | kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF)); | 509 | kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF)); |
510 | 510 | ||
511 | return 0; | 511 | return 0; |
512 | } | 512 | } |
513 | 513 | ||
514 | static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu, | 514 | static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu, |
515 | const struct kvm_one_reg *reg, | 515 | const struct kvm_one_reg *reg, |
516 | s64 *v) | 516 | s64 *v) |
517 | { | 517 | { |
518 | switch (reg->id) { | 518 | switch (reg->id) { |
519 | case KVM_REG_MIPS_CP0_COUNT: | 519 | case KVM_REG_MIPS_CP0_COUNT: |
520 | *v = kvm_mips_read_count(vcpu); | 520 | *v = kvm_mips_read_count(vcpu); |
521 | break; | 521 | break; |
522 | case KVM_REG_MIPS_COUNT_CTL: | 522 | case KVM_REG_MIPS_COUNT_CTL: |
523 | *v = vcpu->arch.count_ctl; | 523 | *v = vcpu->arch.count_ctl; |
524 | break; | 524 | break; |
525 | case KVM_REG_MIPS_COUNT_RESUME: | 525 | case KVM_REG_MIPS_COUNT_RESUME: |
526 | *v = ktime_to_ns(vcpu->arch.count_resume); | 526 | *v = ktime_to_ns(vcpu->arch.count_resume); |
527 | break; | 527 | break; |
528 | case KVM_REG_MIPS_COUNT_HZ: | 528 | case KVM_REG_MIPS_COUNT_HZ: |
529 | *v = vcpu->arch.count_hz; | 529 | *v = vcpu->arch.count_hz; |
530 | break; | 530 | break; |
531 | default: | 531 | default: |
532 | return -EINVAL; | 532 | return -EINVAL; |
533 | } | 533 | } |
534 | return 0; | 534 | return 0; |
535 | } | 535 | } |
536 | 536 | ||
537 | static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, | 537 | static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, |
538 | const struct kvm_one_reg *reg, | 538 | const struct kvm_one_reg *reg, |
539 | s64 v) | 539 | s64 v) |
540 | { | 540 | { |
541 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 541 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
542 | int ret = 0; | 542 | int ret = 0; |
543 | unsigned int cur, change; | 543 | unsigned int cur, change; |
544 | 544 | ||
545 | switch (reg->id) { | 545 | switch (reg->id) { |
546 | case KVM_REG_MIPS_CP0_COUNT: | 546 | case KVM_REG_MIPS_CP0_COUNT: |
547 | kvm_mips_write_count(vcpu, v); | 547 | kvm_mips_write_count(vcpu, v); |
548 | break; | 548 | break; |
549 | case KVM_REG_MIPS_CP0_COMPARE: | 549 | case KVM_REG_MIPS_CP0_COMPARE: |
550 | kvm_mips_write_compare(vcpu, v); | 550 | kvm_mips_write_compare(vcpu, v, false); |
551 | break; | 551 | break; |
552 | case KVM_REG_MIPS_CP0_CAUSE: | 552 | case KVM_REG_MIPS_CP0_CAUSE: |
553 | /* | 553 | /* |
554 | * If the timer is stopped or started (DC bit) it must look | 554 | * If the timer is stopped or started (DC bit) it must look |
555 | * atomic with changes to the interrupt pending bits (TI, IRQ5). | 555 | * atomic with changes to the interrupt pending bits (TI, IRQ5). |
556 | * A timer interrupt should not happen in between. | 556 | * A timer interrupt should not happen in between. |
557 | */ | 557 | */ |
558 | if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) { | 558 | if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) { |
559 | if (v & CAUSEF_DC) { | 559 | if (v & CAUSEF_DC) { |
560 | /* disable timer first */ | 560 | /* disable timer first */ |
561 | kvm_mips_count_disable_cause(vcpu); | 561 | kvm_mips_count_disable_cause(vcpu); |
562 | kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v); | 562 | kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v); |
563 | } else { | 563 | } else { |
564 | /* enable timer last */ | 564 | /* enable timer last */ |
565 | kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v); | 565 | kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v); |
566 | kvm_mips_count_enable_cause(vcpu); | 566 | kvm_mips_count_enable_cause(vcpu); |
567 | } | 567 | } |
568 | } else { | 568 | } else { |
569 | kvm_write_c0_guest_cause(cop0, v); | 569 | kvm_write_c0_guest_cause(cop0, v); |
570 | } | 570 | } |
571 | break; | 571 | break; |
572 | case KVM_REG_MIPS_CP0_CONFIG: | 572 | case KVM_REG_MIPS_CP0_CONFIG: |
573 | /* read-only for now */ | 573 | /* read-only for now */ |
574 | break; | 574 | break; |
575 | case KVM_REG_MIPS_CP0_CONFIG1: | 575 | case KVM_REG_MIPS_CP0_CONFIG1: |
576 | cur = kvm_read_c0_guest_config1(cop0); | 576 | cur = kvm_read_c0_guest_config1(cop0); |
577 | change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu); | 577 | change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu); |
578 | if (change) { | 578 | if (change) { |
579 | v = cur ^ change; | 579 | v = cur ^ change; |
580 | kvm_write_c0_guest_config1(cop0, v); | 580 | kvm_write_c0_guest_config1(cop0, v); |
581 | } | 581 | } |
582 | break; | 582 | break; |
583 | case KVM_REG_MIPS_CP0_CONFIG2: | 583 | case KVM_REG_MIPS_CP0_CONFIG2: |
584 | /* read-only for now */ | 584 | /* read-only for now */ |
585 | break; | 585 | break; |
586 | case KVM_REG_MIPS_CP0_CONFIG3: | 586 | case KVM_REG_MIPS_CP0_CONFIG3: |
587 | cur = kvm_read_c0_guest_config3(cop0); | 587 | cur = kvm_read_c0_guest_config3(cop0); |
588 | change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu); | 588 | change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu); |
589 | if (change) { | 589 | if (change) { |
590 | v = cur ^ change; | 590 | v = cur ^ change; |
591 | kvm_write_c0_guest_config3(cop0, v); | 591 | kvm_write_c0_guest_config3(cop0, v); |
592 | } | 592 | } |
593 | break; | 593 | break; |
594 | case KVM_REG_MIPS_CP0_CONFIG4: | 594 | case KVM_REG_MIPS_CP0_CONFIG4: |
595 | cur = kvm_read_c0_guest_config4(cop0); | 595 | cur = kvm_read_c0_guest_config4(cop0); |
596 | change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu); | 596 | change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu); |
597 | if (change) { | 597 | if (change) { |
598 | v = cur ^ change; | 598 | v = cur ^ change; |
599 | kvm_write_c0_guest_config4(cop0, v); | 599 | kvm_write_c0_guest_config4(cop0, v); |
600 | } | 600 | } |
601 | break; | 601 | break; |
602 | case KVM_REG_MIPS_CP0_CONFIG5: | 602 | case KVM_REG_MIPS_CP0_CONFIG5: |
603 | cur = kvm_read_c0_guest_config5(cop0); | 603 | cur = kvm_read_c0_guest_config5(cop0); |
604 | change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu); | 604 | change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu); |
605 | if (change) { | 605 | if (change) { |
606 | v = cur ^ change; | 606 | v = cur ^ change; |
607 | kvm_write_c0_guest_config5(cop0, v); | 607 | kvm_write_c0_guest_config5(cop0, v); |
608 | } | 608 | } |
609 | break; | 609 | break; |
610 | case KVM_REG_MIPS_COUNT_CTL: | 610 | case KVM_REG_MIPS_COUNT_CTL: |
611 | ret = kvm_mips_set_count_ctl(vcpu, v); | 611 | ret = kvm_mips_set_count_ctl(vcpu, v); |
612 | break; | 612 | break; |
613 | case KVM_REG_MIPS_COUNT_RESUME: | 613 | case KVM_REG_MIPS_COUNT_RESUME: |
614 | ret = kvm_mips_set_count_resume(vcpu, v); | 614 | ret = kvm_mips_set_count_resume(vcpu, v); |
615 | break; | 615 | break; |
616 | case KVM_REG_MIPS_COUNT_HZ: | 616 | case KVM_REG_MIPS_COUNT_HZ: |
617 | ret = kvm_mips_set_count_hz(vcpu, v); | 617 | ret = kvm_mips_set_count_hz(vcpu, v); |
618 | break; | 618 | break; |
619 | default: | 619 | default: |
620 | return -EINVAL; | 620 | return -EINVAL; |
621 | } | 621 | } |
622 | return ret; | 622 | return ret; |
623 | } | 623 | } |
624 | 624 | ||
625 | static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu) | 625 | static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu) |
626 | { | 626 | { |
627 | kvm_lose_fpu(vcpu); | 627 | kvm_lose_fpu(vcpu); |
628 | 628 | ||
629 | return 0; | 629 | return 0; |
630 | } | 630 | } |
631 | 631 | ||
632 | static int kvm_trap_emul_vcpu_set_regs(struct kvm_vcpu *vcpu) | 632 | static int kvm_trap_emul_vcpu_set_regs(struct kvm_vcpu *vcpu) |
633 | { | 633 | { |
634 | return 0; | 634 | return 0; |
635 | } | 635 | } |
636 | 636 | ||
637 | static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { | 637 | static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { |
638 | /* exit handlers */ | 638 | /* exit handlers */ |
639 | .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, | 639 | .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, |
640 | .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod, | 640 | .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod, |
641 | .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss, | 641 | .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss, |
642 | .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss, | 642 | .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss, |
643 | .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st, | 643 | .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st, |
644 | .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld, | 644 | .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld, |
645 | .handle_syscall = kvm_trap_emul_handle_syscall, | 645 | .handle_syscall = kvm_trap_emul_handle_syscall, |
646 | .handle_res_inst = kvm_trap_emul_handle_res_inst, | 646 | .handle_res_inst = kvm_trap_emul_handle_res_inst, |
647 | .handle_break = kvm_trap_emul_handle_break, | 647 | .handle_break = kvm_trap_emul_handle_break, |
648 | .handle_trap = kvm_trap_emul_handle_trap, | 648 | .handle_trap = kvm_trap_emul_handle_trap, |
649 | .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe, | 649 | .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe, |
650 | .handle_fpe = kvm_trap_emul_handle_fpe, | 650 | .handle_fpe = kvm_trap_emul_handle_fpe, |
651 | .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled, | 651 | .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled, |
652 | 652 | ||
653 | .vm_init = kvm_trap_emul_vm_init, | 653 | .vm_init = kvm_trap_emul_vm_init, |
654 | .vcpu_init = kvm_trap_emul_vcpu_init, | 654 | .vcpu_init = kvm_trap_emul_vcpu_init, |
655 | .vcpu_setup = kvm_trap_emul_vcpu_setup, | 655 | .vcpu_setup = kvm_trap_emul_vcpu_setup, |
656 | .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb, | 656 | .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb, |
657 | .queue_timer_int = kvm_mips_queue_timer_int_cb, | 657 | .queue_timer_int = kvm_mips_queue_timer_int_cb, |
658 | .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb, | 658 | .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb, |
659 | .queue_io_int = kvm_mips_queue_io_int_cb, | 659 | .queue_io_int = kvm_mips_queue_io_int_cb, |
660 | .dequeue_io_int = kvm_mips_dequeue_io_int_cb, | 660 | .dequeue_io_int = kvm_mips_dequeue_io_int_cb, |
661 | .irq_deliver = kvm_mips_irq_deliver_cb, | 661 | .irq_deliver = kvm_mips_irq_deliver_cb, |
662 | .irq_clear = kvm_mips_irq_clear_cb, | 662 | .irq_clear = kvm_mips_irq_clear_cb, |
663 | .get_one_reg = kvm_trap_emul_get_one_reg, | 663 | .get_one_reg = kvm_trap_emul_get_one_reg, |
664 | .set_one_reg = kvm_trap_emul_set_one_reg, | 664 | .set_one_reg = kvm_trap_emul_set_one_reg, |
665 | .vcpu_get_regs = kvm_trap_emul_vcpu_get_regs, | 665 | .vcpu_get_regs = kvm_trap_emul_vcpu_get_regs, |
666 | .vcpu_set_regs = kvm_trap_emul_vcpu_set_regs, | 666 | .vcpu_set_regs = kvm_trap_emul_vcpu_set_regs, |
667 | }; | 667 | }; |
668 | 668 | ||
669 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) | 669 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) |
670 | { | 670 | { |
671 | *install_callbacks = &kvm_trap_emul_callbacks; | 671 | *install_callbacks = &kvm_trap_emul_callbacks; |
672 | return 0; | 672 | return 0; |
673 | } | 673 | } |
674 | 674 |