Commit dfd4d47e9a71c5a35eb67a44cd311efbe1846b7e
Committed by
Avi Kivity
1 parent
b59049720d
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
KVM: PPC: booke: Improve timer register emulation
Decrementers are now properly driven by TCR/TSR, and the guest has full read/write access to these registers. The decrementer keeps ticking (and setting the TSR bit) regardless of whether the interrupts are enabled with TCR. The decrementer stops at zero, rather than going negative. Decrementers (and FITs, once implemented) are delivered as level-triggered interrupts -- dequeued when the TSR bit is cleared, not on delivery. Signed-off-by: Liu Yu <yu.liu@freescale.com> [scottwood@freescale.com: significant changes] Signed-off-by: Scott Wood <scottwood@freescale.com> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Showing 8 changed files with 115 additions and 70 deletions Inline Diff
arch/powerpc/include/asm/kvm_host.h
1 | /* | 1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | 2 | * This program is free software; you can redistribute it and/or modify |
3 | * it under the terms of the GNU General Public License, version 2, as | 3 | * it under the terms of the GNU General Public License, version 2, as |
4 | * published by the Free Software Foundation. | 4 | * published by the Free Software Foundation. |
5 | * | 5 | * |
6 | * This program is distributed in the hope that it will be useful, | 6 | * This program is distributed in the hope that it will be useful, |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
9 | * GNU General Public License for more details. | 9 | * GNU General Public License for more details. |
10 | * | 10 | * |
11 | * You should have received a copy of the GNU General Public License | 11 | * You should have received a copy of the GNU General Public License |
12 | * along with this program; if not, write to the Free Software | 12 | * along with this program; if not, write to the Free Software |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
14 | * | 14 | * |
15 | * Copyright IBM Corp. 2007 | 15 | * Copyright IBM Corp. 2007 |
16 | * | 16 | * |
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | 17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #ifndef __POWERPC_KVM_HOST_H__ | 20 | #ifndef __POWERPC_KVM_HOST_H__ |
21 | #define __POWERPC_KVM_HOST_H__ | 21 | #define __POWERPC_KVM_HOST_H__ |
22 | 22 | ||
23 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
24 | #include <linux/hrtimer.h> | 24 | #include <linux/hrtimer.h> |
25 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
27 | #include <linux/kvm_types.h> | 27 | #include <linux/kvm_types.h> |
28 | #include <linux/threads.h> | 28 | #include <linux/threads.h> |
29 | #include <linux/spinlock.h> | 29 | #include <linux/spinlock.h> |
30 | #include <linux/kvm_para.h> | 30 | #include <linux/kvm_para.h> |
31 | #include <linux/list.h> | 31 | #include <linux/list.h> |
32 | #include <linux/atomic.h> | 32 | #include <linux/atomic.h> |
33 | #include <asm/kvm_asm.h> | 33 | #include <asm/kvm_asm.h> |
34 | #include <asm/processor.h> | 34 | #include <asm/processor.h> |
35 | 35 | ||
36 | #define KVM_MAX_VCPUS NR_CPUS | 36 | #define KVM_MAX_VCPUS NR_CPUS |
37 | #define KVM_MAX_VCORES NR_CPUS | 37 | #define KVM_MAX_VCORES NR_CPUS |
38 | #define KVM_MEMORY_SLOTS 32 | 38 | #define KVM_MEMORY_SLOTS 32 |
39 | /* memory slots that does not exposed to userspace */ | 39 | /* memory slots that does not exposed to userspace */ |
40 | #define KVM_PRIVATE_MEM_SLOTS 4 | 40 | #define KVM_PRIVATE_MEM_SLOTS 4 |
41 | 41 | ||
42 | #ifdef CONFIG_KVM_MMIO | 42 | #ifdef CONFIG_KVM_MMIO |
43 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | 43 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 |
44 | #endif | 44 | #endif |
45 | 45 | ||
46 | /* We don't currently support large pages. */ | 46 | /* We don't currently support large pages. */ |
47 | #define KVM_HPAGE_GFN_SHIFT(x) 0 | 47 | #define KVM_HPAGE_GFN_SHIFT(x) 0 |
48 | #define KVM_NR_PAGE_SIZES 1 | 48 | #define KVM_NR_PAGE_SIZES 1 |
49 | #define KVM_PAGES_PER_HPAGE(x) (1UL<<31) | 49 | #define KVM_PAGES_PER_HPAGE(x) (1UL<<31) |
50 | 50 | ||
51 | #define HPTEG_CACHE_NUM (1 << 15) | 51 | #define HPTEG_CACHE_NUM (1 << 15) |
52 | #define HPTEG_HASH_BITS_PTE 13 | 52 | #define HPTEG_HASH_BITS_PTE 13 |
53 | #define HPTEG_HASH_BITS_PTE_LONG 12 | 53 | #define HPTEG_HASH_BITS_PTE_LONG 12 |
54 | #define HPTEG_HASH_BITS_VPTE 13 | 54 | #define HPTEG_HASH_BITS_VPTE 13 |
55 | #define HPTEG_HASH_BITS_VPTE_LONG 5 | 55 | #define HPTEG_HASH_BITS_VPTE_LONG 5 |
56 | #define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE) | 56 | #define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE) |
57 | #define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG) | 57 | #define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG) |
58 | #define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE) | 58 | #define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE) |
59 | #define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG) | 59 | #define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG) |
60 | 60 | ||
61 | /* Physical Address Mask - allowed range of real mode RAM access */ | 61 | /* Physical Address Mask - allowed range of real mode RAM access */ |
62 | #define KVM_PAM 0x0fffffffffffffffULL | 62 | #define KVM_PAM 0x0fffffffffffffffULL |
63 | 63 | ||
64 | struct kvm; | 64 | struct kvm; |
65 | struct kvm_run; | 65 | struct kvm_run; |
66 | struct kvm_vcpu; | 66 | struct kvm_vcpu; |
67 | 67 | ||
68 | struct lppaca; | 68 | struct lppaca; |
69 | struct slb_shadow; | 69 | struct slb_shadow; |
70 | struct dtl; | 70 | struct dtl; |
71 | 71 | ||
72 | struct kvm_vm_stat { | 72 | struct kvm_vm_stat { |
73 | u32 remote_tlb_flush; | 73 | u32 remote_tlb_flush; |
74 | }; | 74 | }; |
75 | 75 | ||
76 | struct kvm_vcpu_stat { | 76 | struct kvm_vcpu_stat { |
77 | u32 sum_exits; | 77 | u32 sum_exits; |
78 | u32 mmio_exits; | 78 | u32 mmio_exits; |
79 | u32 dcr_exits; | 79 | u32 dcr_exits; |
80 | u32 signal_exits; | 80 | u32 signal_exits; |
81 | u32 light_exits; | 81 | u32 light_exits; |
82 | /* Account for special types of light exits: */ | 82 | /* Account for special types of light exits: */ |
83 | u32 itlb_real_miss_exits; | 83 | u32 itlb_real_miss_exits; |
84 | u32 itlb_virt_miss_exits; | 84 | u32 itlb_virt_miss_exits; |
85 | u32 dtlb_real_miss_exits; | 85 | u32 dtlb_real_miss_exits; |
86 | u32 dtlb_virt_miss_exits; | 86 | u32 dtlb_virt_miss_exits; |
87 | u32 syscall_exits; | 87 | u32 syscall_exits; |
88 | u32 isi_exits; | 88 | u32 isi_exits; |
89 | u32 dsi_exits; | 89 | u32 dsi_exits; |
90 | u32 emulated_inst_exits; | 90 | u32 emulated_inst_exits; |
91 | u32 dec_exits; | 91 | u32 dec_exits; |
92 | u32 ext_intr_exits; | 92 | u32 ext_intr_exits; |
93 | u32 halt_wakeup; | 93 | u32 halt_wakeup; |
94 | #ifdef CONFIG_PPC_BOOK3S | 94 | #ifdef CONFIG_PPC_BOOK3S |
95 | u32 pf_storage; | 95 | u32 pf_storage; |
96 | u32 pf_instruc; | 96 | u32 pf_instruc; |
97 | u32 sp_storage; | 97 | u32 sp_storage; |
98 | u32 sp_instruc; | 98 | u32 sp_instruc; |
99 | u32 queue_intr; | 99 | u32 queue_intr; |
100 | u32 ld; | 100 | u32 ld; |
101 | u32 ld_slow; | 101 | u32 ld_slow; |
102 | u32 st; | 102 | u32 st; |
103 | u32 st_slow; | 103 | u32 st_slow; |
104 | #endif | 104 | #endif |
105 | }; | 105 | }; |
106 | 106 | ||
107 | enum kvm_exit_types { | 107 | enum kvm_exit_types { |
108 | MMIO_EXITS, | 108 | MMIO_EXITS, |
109 | DCR_EXITS, | 109 | DCR_EXITS, |
110 | SIGNAL_EXITS, | 110 | SIGNAL_EXITS, |
111 | ITLB_REAL_MISS_EXITS, | 111 | ITLB_REAL_MISS_EXITS, |
112 | ITLB_VIRT_MISS_EXITS, | 112 | ITLB_VIRT_MISS_EXITS, |
113 | DTLB_REAL_MISS_EXITS, | 113 | DTLB_REAL_MISS_EXITS, |
114 | DTLB_VIRT_MISS_EXITS, | 114 | DTLB_VIRT_MISS_EXITS, |
115 | SYSCALL_EXITS, | 115 | SYSCALL_EXITS, |
116 | ISI_EXITS, | 116 | ISI_EXITS, |
117 | DSI_EXITS, | 117 | DSI_EXITS, |
118 | EMULATED_INST_EXITS, | 118 | EMULATED_INST_EXITS, |
119 | EMULATED_MTMSRWE_EXITS, | 119 | EMULATED_MTMSRWE_EXITS, |
120 | EMULATED_WRTEE_EXITS, | 120 | EMULATED_WRTEE_EXITS, |
121 | EMULATED_MTSPR_EXITS, | 121 | EMULATED_MTSPR_EXITS, |
122 | EMULATED_MFSPR_EXITS, | 122 | EMULATED_MFSPR_EXITS, |
123 | EMULATED_MTMSR_EXITS, | 123 | EMULATED_MTMSR_EXITS, |
124 | EMULATED_MFMSR_EXITS, | 124 | EMULATED_MFMSR_EXITS, |
125 | EMULATED_TLBSX_EXITS, | 125 | EMULATED_TLBSX_EXITS, |
126 | EMULATED_TLBWE_EXITS, | 126 | EMULATED_TLBWE_EXITS, |
127 | EMULATED_RFI_EXITS, | 127 | EMULATED_RFI_EXITS, |
128 | DEC_EXITS, | 128 | DEC_EXITS, |
129 | EXT_INTR_EXITS, | 129 | EXT_INTR_EXITS, |
130 | HALT_WAKEUP, | 130 | HALT_WAKEUP, |
131 | USR_PR_INST, | 131 | USR_PR_INST, |
132 | FP_UNAVAIL, | 132 | FP_UNAVAIL, |
133 | DEBUG_EXITS, | 133 | DEBUG_EXITS, |
134 | TIMEINGUEST, | 134 | TIMEINGUEST, |
135 | __NUMBER_OF_KVM_EXIT_TYPES | 135 | __NUMBER_OF_KVM_EXIT_TYPES |
136 | }; | 136 | }; |
137 | 137 | ||
138 | /* allow access to big endian 32bit upper/lower parts and 64bit var */ | 138 | /* allow access to big endian 32bit upper/lower parts and 64bit var */ |
139 | struct kvmppc_exit_timing { | 139 | struct kvmppc_exit_timing { |
140 | union { | 140 | union { |
141 | u64 tv64; | 141 | u64 tv64; |
142 | struct { | 142 | struct { |
143 | u32 tbu, tbl; | 143 | u32 tbu, tbl; |
144 | } tv32; | 144 | } tv32; |
145 | }; | 145 | }; |
146 | }; | 146 | }; |
147 | 147 | ||
148 | struct kvmppc_pginfo { | 148 | struct kvmppc_pginfo { |
149 | unsigned long pfn; | 149 | unsigned long pfn; |
150 | atomic_t refcnt; | 150 | atomic_t refcnt; |
151 | }; | 151 | }; |
152 | 152 | ||
153 | struct kvmppc_spapr_tce_table { | 153 | struct kvmppc_spapr_tce_table { |
154 | struct list_head list; | 154 | struct list_head list; |
155 | struct kvm *kvm; | 155 | struct kvm *kvm; |
156 | u64 liobn; | 156 | u64 liobn; |
157 | u32 window_size; | 157 | u32 window_size; |
158 | struct page *pages[0]; | 158 | struct page *pages[0]; |
159 | }; | 159 | }; |
160 | 160 | ||
161 | struct kvmppc_rma_info { | 161 | struct kvmppc_rma_info { |
162 | void *base_virt; | 162 | void *base_virt; |
163 | unsigned long base_pfn; | 163 | unsigned long base_pfn; |
164 | unsigned long npages; | 164 | unsigned long npages; |
165 | struct list_head list; | 165 | struct list_head list; |
166 | atomic_t use_count; | 166 | atomic_t use_count; |
167 | }; | 167 | }; |
168 | 168 | ||
169 | struct kvm_arch { | 169 | struct kvm_arch { |
170 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 170 | #ifdef CONFIG_KVM_BOOK3S_64_HV |
171 | unsigned long hpt_virt; | 171 | unsigned long hpt_virt; |
172 | unsigned long ram_npages; | 172 | unsigned long ram_npages; |
173 | unsigned long ram_psize; | 173 | unsigned long ram_psize; |
174 | unsigned long ram_porder; | 174 | unsigned long ram_porder; |
175 | struct kvmppc_pginfo *ram_pginfo; | 175 | struct kvmppc_pginfo *ram_pginfo; |
176 | unsigned int lpid; | 176 | unsigned int lpid; |
177 | unsigned int host_lpid; | 177 | unsigned int host_lpid; |
178 | unsigned long host_lpcr; | 178 | unsigned long host_lpcr; |
179 | unsigned long sdr1; | 179 | unsigned long sdr1; |
180 | unsigned long host_sdr1; | 180 | unsigned long host_sdr1; |
181 | int tlbie_lock; | 181 | int tlbie_lock; |
182 | int n_rma_pages; | 182 | int n_rma_pages; |
183 | unsigned long lpcr; | 183 | unsigned long lpcr; |
184 | unsigned long rmor; | 184 | unsigned long rmor; |
185 | struct kvmppc_rma_info *rma; | 185 | struct kvmppc_rma_info *rma; |
186 | struct list_head spapr_tce_tables; | 186 | struct list_head spapr_tce_tables; |
187 | unsigned short last_vcpu[NR_CPUS]; | 187 | unsigned short last_vcpu[NR_CPUS]; |
188 | struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; | 188 | struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; |
189 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ | 189 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ |
190 | }; | 190 | }; |
191 | 191 | ||
192 | /* | 192 | /* |
193 | * Struct for a virtual core. | 193 | * Struct for a virtual core. |
194 | * Note: entry_exit_count combines an entry count in the bottom 8 bits | 194 | * Note: entry_exit_count combines an entry count in the bottom 8 bits |
195 | * and an exit count in the next 8 bits. This is so that we can | 195 | * and an exit count in the next 8 bits. This is so that we can |
196 | * atomically increment the entry count iff the exit count is 0 | 196 | * atomically increment the entry count iff the exit count is 0 |
197 | * without taking the lock. | 197 | * without taking the lock. |
198 | */ | 198 | */ |
199 | struct kvmppc_vcore { | 199 | struct kvmppc_vcore { |
200 | int n_runnable; | 200 | int n_runnable; |
201 | int n_busy; | 201 | int n_busy; |
202 | int num_threads; | 202 | int num_threads; |
203 | int entry_exit_count; | 203 | int entry_exit_count; |
204 | int n_woken; | 204 | int n_woken; |
205 | int nap_count; | 205 | int nap_count; |
206 | int napping_threads; | 206 | int napping_threads; |
207 | u16 pcpu; | 207 | u16 pcpu; |
208 | u8 vcore_state; | 208 | u8 vcore_state; |
209 | u8 in_guest; | 209 | u8 in_guest; |
210 | struct list_head runnable_threads; | 210 | struct list_head runnable_threads; |
211 | spinlock_t lock; | 211 | spinlock_t lock; |
212 | wait_queue_head_t wq; | 212 | wait_queue_head_t wq; |
213 | }; | 213 | }; |
214 | 214 | ||
215 | #define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) | 215 | #define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) |
216 | #define VCORE_EXIT_COUNT(vc) ((vc)->entry_exit_count >> 8) | 216 | #define VCORE_EXIT_COUNT(vc) ((vc)->entry_exit_count >> 8) |
217 | 217 | ||
218 | /* Values for vcore_state */ | 218 | /* Values for vcore_state */ |
219 | #define VCORE_INACTIVE 0 | 219 | #define VCORE_INACTIVE 0 |
220 | #define VCORE_RUNNING 1 | 220 | #define VCORE_RUNNING 1 |
221 | #define VCORE_EXITING 2 | 221 | #define VCORE_EXITING 2 |
222 | #define VCORE_SLEEPING 3 | 222 | #define VCORE_SLEEPING 3 |
223 | 223 | ||
224 | struct kvmppc_pte { | 224 | struct kvmppc_pte { |
225 | ulong eaddr; | 225 | ulong eaddr; |
226 | u64 vpage; | 226 | u64 vpage; |
227 | ulong raddr; | 227 | ulong raddr; |
228 | bool may_read : 1; | 228 | bool may_read : 1; |
229 | bool may_write : 1; | 229 | bool may_write : 1; |
230 | bool may_execute : 1; | 230 | bool may_execute : 1; |
231 | }; | 231 | }; |
232 | 232 | ||
233 | struct kvmppc_mmu { | 233 | struct kvmppc_mmu { |
234 | /* book3s_64 only */ | 234 | /* book3s_64 only */ |
235 | void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs); | 235 | void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs); |
236 | u64 (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr); | 236 | u64 (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr); |
237 | u64 (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr); | 237 | u64 (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr); |
238 | void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr); | 238 | void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr); |
239 | void (*slbia)(struct kvm_vcpu *vcpu); | 239 | void (*slbia)(struct kvm_vcpu *vcpu); |
240 | /* book3s */ | 240 | /* book3s */ |
241 | void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value); | 241 | void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value); |
242 | u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum); | 242 | u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum); |
243 | int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data); | 243 | int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data); |
244 | void (*reset_msr)(struct kvm_vcpu *vcpu); | 244 | void (*reset_msr)(struct kvm_vcpu *vcpu); |
245 | void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large); | 245 | void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large); |
246 | int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid); | 246 | int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid); |
247 | u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data); | 247 | u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data); |
248 | bool (*is_dcbz32)(struct kvm_vcpu *vcpu); | 248 | bool (*is_dcbz32)(struct kvm_vcpu *vcpu); |
249 | }; | 249 | }; |
250 | 250 | ||
251 | struct kvmppc_slb { | 251 | struct kvmppc_slb { |
252 | u64 esid; | 252 | u64 esid; |
253 | u64 vsid; | 253 | u64 vsid; |
254 | u64 orige; | 254 | u64 orige; |
255 | u64 origv; | 255 | u64 origv; |
256 | bool valid : 1; | 256 | bool valid : 1; |
257 | bool Ks : 1; | 257 | bool Ks : 1; |
258 | bool Kp : 1; | 258 | bool Kp : 1; |
259 | bool nx : 1; | 259 | bool nx : 1; |
260 | bool large : 1; /* PTEs are 16MB */ | 260 | bool large : 1; /* PTEs are 16MB */ |
261 | bool tb : 1; /* 1TB segment */ | 261 | bool tb : 1; /* 1TB segment */ |
262 | bool class : 1; | 262 | bool class : 1; |
263 | }; | 263 | }; |
264 | 264 | ||
265 | struct kvm_vcpu_arch { | 265 | struct kvm_vcpu_arch { |
266 | ulong host_stack; | 266 | ulong host_stack; |
267 | u32 host_pid; | 267 | u32 host_pid; |
268 | #ifdef CONFIG_PPC_BOOK3S | 268 | #ifdef CONFIG_PPC_BOOK3S |
269 | struct kvmppc_slb slb[64]; | 269 | struct kvmppc_slb slb[64]; |
270 | int slb_max; /* 1 + index of last valid entry in slb[] */ | 270 | int slb_max; /* 1 + index of last valid entry in slb[] */ |
271 | int slb_nr; /* total number of entries in SLB */ | 271 | int slb_nr; /* total number of entries in SLB */ |
272 | struct kvmppc_mmu mmu; | 272 | struct kvmppc_mmu mmu; |
273 | #endif | 273 | #endif |
274 | 274 | ||
275 | ulong gpr[32]; | 275 | ulong gpr[32]; |
276 | 276 | ||
277 | u64 fpr[32]; | 277 | u64 fpr[32]; |
278 | u64 fpscr; | 278 | u64 fpscr; |
279 | 279 | ||
280 | #ifdef CONFIG_SPE | 280 | #ifdef CONFIG_SPE |
281 | ulong evr[32]; | 281 | ulong evr[32]; |
282 | ulong spefscr; | 282 | ulong spefscr; |
283 | ulong host_spefscr; | 283 | ulong host_spefscr; |
284 | u64 acc; | 284 | u64 acc; |
285 | #endif | 285 | #endif |
286 | #ifdef CONFIG_ALTIVEC | 286 | #ifdef CONFIG_ALTIVEC |
287 | vector128 vr[32]; | 287 | vector128 vr[32]; |
288 | vector128 vscr; | 288 | vector128 vscr; |
289 | #endif | 289 | #endif |
290 | 290 | ||
291 | #ifdef CONFIG_VSX | 291 | #ifdef CONFIG_VSX |
292 | u64 vsr[64]; | 292 | u64 vsr[64]; |
293 | #endif | 293 | #endif |
294 | 294 | ||
295 | #ifdef CONFIG_PPC_BOOK3S | 295 | #ifdef CONFIG_PPC_BOOK3S |
296 | /* For Gekko paired singles */ | 296 | /* For Gekko paired singles */ |
297 | u32 qpr[32]; | 297 | u32 qpr[32]; |
298 | #endif | 298 | #endif |
299 | 299 | ||
300 | ulong pc; | 300 | ulong pc; |
301 | ulong ctr; | 301 | ulong ctr; |
302 | ulong lr; | 302 | ulong lr; |
303 | 303 | ||
304 | ulong xer; | 304 | ulong xer; |
305 | u32 cr; | 305 | u32 cr; |
306 | 306 | ||
307 | #ifdef CONFIG_PPC_BOOK3S | 307 | #ifdef CONFIG_PPC_BOOK3S |
308 | ulong hflags; | 308 | ulong hflags; |
309 | ulong guest_owned_ext; | 309 | ulong guest_owned_ext; |
310 | ulong purr; | 310 | ulong purr; |
311 | ulong spurr; | 311 | ulong spurr; |
312 | ulong dscr; | 312 | ulong dscr; |
313 | ulong amr; | 313 | ulong amr; |
314 | ulong uamor; | 314 | ulong uamor; |
315 | u32 ctrl; | 315 | u32 ctrl; |
316 | ulong dabr; | 316 | ulong dabr; |
317 | #endif | 317 | #endif |
318 | u32 vrsave; /* also USPRG0 */ | 318 | u32 vrsave; /* also USPRG0 */ |
319 | u32 mmucr; | 319 | u32 mmucr; |
320 | ulong shadow_msr; | 320 | ulong shadow_msr; |
321 | ulong csrr0; | 321 | ulong csrr0; |
322 | ulong csrr1; | 322 | ulong csrr1; |
323 | ulong dsrr0; | 323 | ulong dsrr0; |
324 | ulong dsrr1; | 324 | ulong dsrr1; |
325 | ulong mcsrr0; | 325 | ulong mcsrr0; |
326 | ulong mcsrr1; | 326 | ulong mcsrr1; |
327 | ulong mcsr; | 327 | ulong mcsr; |
328 | u32 dec; | 328 | u32 dec; |
329 | u32 decar; | 329 | u32 decar; |
330 | u32 tbl; | 330 | u32 tbl; |
331 | u32 tbu; | 331 | u32 tbu; |
332 | u32 tcr; | 332 | u32 tcr; |
333 | u32 tsr; | 333 | ulong tsr; /* we need to perform set/clr_bits() which requires ulong */ |
334 | u32 ivor[64]; | 334 | u32 ivor[64]; |
335 | ulong ivpr; | 335 | ulong ivpr; |
336 | u32 pvr; | 336 | u32 pvr; |
337 | 337 | ||
338 | u32 shadow_pid; | 338 | u32 shadow_pid; |
339 | u32 shadow_pid1; | 339 | u32 shadow_pid1; |
340 | u32 pid; | 340 | u32 pid; |
341 | u32 swap_pid; | 341 | u32 swap_pid; |
342 | 342 | ||
343 | u32 ccr0; | 343 | u32 ccr0; |
344 | u32 ccr1; | 344 | u32 ccr1; |
345 | u32 dbcr0; | 345 | u32 dbcr0; |
346 | u32 dbcr1; | 346 | u32 dbcr1; |
347 | u32 dbsr; | 347 | u32 dbsr; |
348 | 348 | ||
349 | u64 mmcr[3]; | 349 | u64 mmcr[3]; |
350 | u32 pmc[8]; | 350 | u32 pmc[8]; |
351 | 351 | ||
352 | #ifdef CONFIG_KVM_EXIT_TIMING | 352 | #ifdef CONFIG_KVM_EXIT_TIMING |
353 | struct mutex exit_timing_lock; | 353 | struct mutex exit_timing_lock; |
354 | struct kvmppc_exit_timing timing_exit; | 354 | struct kvmppc_exit_timing timing_exit; |
355 | struct kvmppc_exit_timing timing_last_enter; | 355 | struct kvmppc_exit_timing timing_last_enter; |
356 | u32 last_exit_type; | 356 | u32 last_exit_type; |
357 | u32 timing_count_type[__NUMBER_OF_KVM_EXIT_TYPES]; | 357 | u32 timing_count_type[__NUMBER_OF_KVM_EXIT_TYPES]; |
358 | u64 timing_sum_duration[__NUMBER_OF_KVM_EXIT_TYPES]; | 358 | u64 timing_sum_duration[__NUMBER_OF_KVM_EXIT_TYPES]; |
359 | u64 timing_sum_quad_duration[__NUMBER_OF_KVM_EXIT_TYPES]; | 359 | u64 timing_sum_quad_duration[__NUMBER_OF_KVM_EXIT_TYPES]; |
360 | u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES]; | 360 | u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES]; |
361 | u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES]; | 361 | u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES]; |
362 | u64 timing_last_exit; | 362 | u64 timing_last_exit; |
363 | struct dentry *debugfs_exit_timing; | 363 | struct dentry *debugfs_exit_timing; |
364 | #endif | 364 | #endif |
365 | 365 | ||
366 | #ifdef CONFIG_PPC_BOOK3S | 366 | #ifdef CONFIG_PPC_BOOK3S |
367 | ulong fault_dar; | 367 | ulong fault_dar; |
368 | u32 fault_dsisr; | 368 | u32 fault_dsisr; |
369 | #endif | 369 | #endif |
370 | 370 | ||
371 | #ifdef CONFIG_BOOKE | 371 | #ifdef CONFIG_BOOKE |
372 | ulong fault_dear; | 372 | ulong fault_dear; |
373 | ulong fault_esr; | 373 | ulong fault_esr; |
374 | ulong queued_dear; | 374 | ulong queued_dear; |
375 | ulong queued_esr; | 375 | ulong queued_esr; |
376 | #endif | 376 | #endif |
377 | gpa_t paddr_accessed; | 377 | gpa_t paddr_accessed; |
378 | 378 | ||
379 | u8 io_gpr; /* GPR used as IO source/target */ | 379 | u8 io_gpr; /* GPR used as IO source/target */ |
380 | u8 mmio_is_bigendian; | 380 | u8 mmio_is_bigendian; |
381 | u8 mmio_sign_extend; | 381 | u8 mmio_sign_extend; |
382 | u8 dcr_needed; | 382 | u8 dcr_needed; |
383 | u8 dcr_is_write; | 383 | u8 dcr_is_write; |
384 | u8 osi_needed; | 384 | u8 osi_needed; |
385 | u8 osi_enabled; | 385 | u8 osi_enabled; |
386 | u8 papr_enabled; | 386 | u8 papr_enabled; |
387 | u8 sane; | 387 | u8 sane; |
388 | u8 cpu_type; | 388 | u8 cpu_type; |
389 | u8 hcall_needed; | 389 | u8 hcall_needed; |
390 | 390 | ||
391 | u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */ | 391 | u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */ |
392 | 392 | ||
393 | struct hrtimer dec_timer; | 393 | struct hrtimer dec_timer; |
394 | struct tasklet_struct tasklet; | 394 | struct tasklet_struct tasklet; |
395 | u64 dec_jiffies; | 395 | u64 dec_jiffies; |
396 | u64 dec_expires; | 396 | u64 dec_expires; |
397 | unsigned long pending_exceptions; | 397 | unsigned long pending_exceptions; |
398 | u16 last_cpu; | 398 | u16 last_cpu; |
399 | u8 ceded; | 399 | u8 ceded; |
400 | u8 prodded; | 400 | u8 prodded; |
401 | u32 last_inst; | 401 | u32 last_inst; |
402 | 402 | ||
403 | struct lppaca *vpa; | 403 | struct lppaca *vpa; |
404 | struct slb_shadow *slb_shadow; | 404 | struct slb_shadow *slb_shadow; |
405 | struct dtl *dtl; | 405 | struct dtl *dtl; |
406 | struct dtl *dtl_end; | 406 | struct dtl *dtl_end; |
407 | 407 | ||
408 | wait_queue_head_t *wqp; | 408 | wait_queue_head_t *wqp; |
409 | struct kvmppc_vcore *vcore; | 409 | struct kvmppc_vcore *vcore; |
410 | int ret; | 410 | int ret; |
411 | int trap; | 411 | int trap; |
412 | int state; | 412 | int state; |
413 | int ptid; | 413 | int ptid; |
414 | bool timer_running; | 414 | bool timer_running; |
415 | wait_queue_head_t cpu_run; | 415 | wait_queue_head_t cpu_run; |
416 | 416 | ||
417 | struct kvm_vcpu_arch_shared *shared; | 417 | struct kvm_vcpu_arch_shared *shared; |
418 | unsigned long magic_page_pa; /* phys addr to map the magic page to */ | 418 | unsigned long magic_page_pa; /* phys addr to map the magic page to */ |
419 | unsigned long magic_page_ea; /* effect. addr to map the magic page to */ | 419 | unsigned long magic_page_ea; /* effect. addr to map the magic page to */ |
420 | 420 | ||
421 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 421 | #ifdef CONFIG_KVM_BOOK3S_64_HV |
422 | struct kvm_vcpu_arch_shared shregs; | 422 | struct kvm_vcpu_arch_shared shregs; |
423 | 423 | ||
424 | struct list_head run_list; | 424 | struct list_head run_list; |
425 | struct task_struct *run_task; | 425 | struct task_struct *run_task; |
426 | struct kvm_run *kvm_run; | 426 | struct kvm_run *kvm_run; |
427 | #endif | 427 | #endif |
428 | }; | 428 | }; |
429 | 429 | ||
430 | /* Values for vcpu->arch.state */ | 430 | /* Values for vcpu->arch.state */ |
431 | #define KVMPPC_VCPU_STOPPED 0 | 431 | #define KVMPPC_VCPU_STOPPED 0 |
432 | #define KVMPPC_VCPU_BUSY_IN_HOST 1 | 432 | #define KVMPPC_VCPU_BUSY_IN_HOST 1 |
433 | #define KVMPPC_VCPU_RUNNABLE 2 | 433 | #define KVMPPC_VCPU_RUNNABLE 2 |
434 | 434 | ||
435 | #endif /* __POWERPC_KVM_HOST_H__ */ | 435 | #endif /* __POWERPC_KVM_HOST_H__ */ |
436 | 436 |
arch/powerpc/include/asm/kvm_ppc.h
1 | /* | 1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | 2 | * This program is free software; you can redistribute it and/or modify |
3 | * it under the terms of the GNU General Public License, version 2, as | 3 | * it under the terms of the GNU General Public License, version 2, as |
4 | * published by the Free Software Foundation. | 4 | * published by the Free Software Foundation. |
5 | * | 5 | * |
6 | * This program is distributed in the hope that it will be useful, | 6 | * This program is distributed in the hope that it will be useful, |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
9 | * GNU General Public License for more details. | 9 | * GNU General Public License for more details. |
10 | * | 10 | * |
11 | * You should have received a copy of the GNU General Public License | 11 | * You should have received a copy of the GNU General Public License |
12 | * along with this program; if not, write to the Free Software | 12 | * along with this program; if not, write to the Free Software |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
14 | * | 14 | * |
15 | * Copyright IBM Corp. 2008 | 15 | * Copyright IBM Corp. 2008 |
16 | * | 16 | * |
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | 17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #ifndef __POWERPC_KVM_PPC_H__ | 20 | #ifndef __POWERPC_KVM_PPC_H__ |
21 | #define __POWERPC_KVM_PPC_H__ | 21 | #define __POWERPC_KVM_PPC_H__ |
22 | 22 | ||
23 | /* This file exists just so we can dereference kvm_vcpu, avoiding nested header | 23 | /* This file exists just so we can dereference kvm_vcpu, avoiding nested header |
24 | * dependencies. */ | 24 | * dependencies. */ |
25 | 25 | ||
26 | #include <linux/mutex.h> | 26 | #include <linux/mutex.h> |
27 | #include <linux/timer.h> | 27 | #include <linux/timer.h> |
28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
29 | #include <linux/kvm_types.h> | 29 | #include <linux/kvm_types.h> |
30 | #include <linux/kvm_host.h> | 30 | #include <linux/kvm_host.h> |
31 | #ifdef CONFIG_PPC_BOOK3S | 31 | #ifdef CONFIG_PPC_BOOK3S |
32 | #include <asm/kvm_book3s.h> | 32 | #include <asm/kvm_book3s.h> |
33 | #else | 33 | #else |
34 | #include <asm/kvm_booke.h> | 34 | #include <asm/kvm_booke.h> |
35 | #endif | 35 | #endif |
36 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER | 36 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER |
37 | #include <asm/paca.h> | 37 | #include <asm/paca.h> |
38 | #endif | 38 | #endif |
39 | 39 | ||
40 | enum emulation_result { | 40 | enum emulation_result { |
41 | EMULATE_DONE, /* no further processing */ | 41 | EMULATE_DONE, /* no further processing */ |
42 | EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ | 42 | EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ |
43 | EMULATE_DO_DCR, /* kvm_run filled with DCR request */ | 43 | EMULATE_DO_DCR, /* kvm_run filled with DCR request */ |
44 | EMULATE_FAIL, /* can't emulate this instruction */ | 44 | EMULATE_FAIL, /* can't emulate this instruction */ |
45 | EMULATE_AGAIN, /* something went wrong. go again */ | 45 | EMULATE_AGAIN, /* something went wrong. go again */ |
46 | }; | 46 | }; |
47 | 47 | ||
48 | extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); | 48 | extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); |
49 | extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); | 49 | extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); |
50 | extern char kvmppc_handlers_start[]; | 50 | extern char kvmppc_handlers_start[]; |
51 | extern unsigned long kvmppc_handler_len; | 51 | extern unsigned long kvmppc_handler_len; |
52 | extern void kvmppc_handler_highmem(void); | 52 | extern void kvmppc_handler_highmem(void); |
53 | 53 | ||
54 | extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); | 54 | extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); |
55 | extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | 55 | extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
56 | unsigned int rt, unsigned int bytes, | 56 | unsigned int rt, unsigned int bytes, |
57 | int is_bigendian); | 57 | int is_bigendian); |
58 | extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, | 58 | extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, |
59 | unsigned int rt, unsigned int bytes, | 59 | unsigned int rt, unsigned int bytes, |
60 | int is_bigendian); | 60 | int is_bigendian); |
61 | extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | 61 | extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
62 | u64 val, unsigned int bytes, int is_bigendian); | 62 | u64 val, unsigned int bytes, int is_bigendian); |
63 | 63 | ||
64 | extern int kvmppc_emulate_instruction(struct kvm_run *run, | 64 | extern int kvmppc_emulate_instruction(struct kvm_run *run, |
65 | struct kvm_vcpu *vcpu); | 65 | struct kvm_vcpu *vcpu); |
66 | extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); | 66 | extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); |
67 | extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); | 67 | extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); |
68 | extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb); | 68 | extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb); |
69 | extern void kvmppc_decrementer_func(unsigned long data); | ||
69 | extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu); | 70 | extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu); |
70 | 71 | ||
71 | /* Core-specific hooks */ | 72 | /* Core-specific hooks */ |
72 | 73 | ||
73 | extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, | 74 | extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, |
74 | unsigned int gtlb_idx); | 75 | unsigned int gtlb_idx); |
75 | extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); | 76 | extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); |
76 | extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); | 77 | extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); |
77 | extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu); | 78 | extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu); |
78 | extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu); | 79 | extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu); |
79 | extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); | 80 | extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); |
80 | extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); | 81 | extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); |
81 | extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, | 82 | extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, |
82 | gva_t eaddr); | 83 | gva_t eaddr); |
83 | extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu); | 84 | extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu); |
84 | extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu); | 85 | extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu); |
85 | 86 | ||
86 | extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, | 87 | extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, |
87 | unsigned int id); | 88 | unsigned int id); |
88 | extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu); | 89 | extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu); |
89 | extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu); | 90 | extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu); |
90 | extern int kvmppc_core_check_processor_compat(void); | 91 | extern int kvmppc_core_check_processor_compat(void); |
91 | extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, | 92 | extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, |
92 | struct kvm_translation *tr); | 93 | struct kvm_translation *tr); |
93 | 94 | ||
94 | extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | 95 | extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
95 | extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); | 96 | extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); |
96 | 97 | ||
97 | extern void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu); | 98 | extern void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu); |
98 | extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); | 99 | extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); |
99 | extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags); | 100 | extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags); |
100 | extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); | 101 | extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); |
101 | extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu); | 102 | extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu); |
102 | extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, | 103 | extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, |
103 | struct kvm_interrupt *irq); | 104 | struct kvm_interrupt *irq); |
104 | extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, | 105 | extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, |
105 | struct kvm_interrupt *irq); | 106 | struct kvm_interrupt *irq); |
106 | 107 | ||
107 | extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | 108 | extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, |
108 | unsigned int op, int *advance); | 109 | unsigned int op, int *advance); |
109 | extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); | 110 | extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); |
110 | extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); | 111 | extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); |
111 | 112 | ||
112 | extern int kvmppc_booke_init(void); | 113 | extern int kvmppc_booke_init(void); |
113 | extern void kvmppc_booke_exit(void); | 114 | extern void kvmppc_booke_exit(void); |
114 | 115 | ||
115 | extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); | 116 | extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); |
116 | extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu); | 117 | extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu); |
117 | extern void kvmppc_map_magic(struct kvm_vcpu *vcpu); | 118 | extern void kvmppc_map_magic(struct kvm_vcpu *vcpu); |
118 | 119 | ||
119 | extern long kvmppc_alloc_hpt(struct kvm *kvm); | 120 | extern long kvmppc_alloc_hpt(struct kvm *kvm); |
120 | extern void kvmppc_free_hpt(struct kvm *kvm); | 121 | extern void kvmppc_free_hpt(struct kvm *kvm); |
121 | extern long kvmppc_prepare_vrma(struct kvm *kvm, | 122 | extern long kvmppc_prepare_vrma(struct kvm *kvm, |
122 | struct kvm_userspace_memory_region *mem); | 123 | struct kvm_userspace_memory_region *mem); |
123 | extern void kvmppc_map_vrma(struct kvm *kvm, | 124 | extern void kvmppc_map_vrma(struct kvm *kvm, |
124 | struct kvm_userspace_memory_region *mem); | 125 | struct kvm_userspace_memory_region *mem); |
125 | extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); | 126 | extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); |
126 | extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, | 127 | extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, |
127 | struct kvm_create_spapr_tce *args); | 128 | struct kvm_create_spapr_tce *args); |
128 | extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, | 129 | extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, |
129 | struct kvm_allocate_rma *rma); | 130 | struct kvm_allocate_rma *rma); |
130 | extern struct kvmppc_rma_info *kvm_alloc_rma(void); | 131 | extern struct kvmppc_rma_info *kvm_alloc_rma(void); |
131 | extern void kvm_release_rma(struct kvmppc_rma_info *ri); | 132 | extern void kvm_release_rma(struct kvmppc_rma_info *ri); |
132 | extern int kvmppc_core_init_vm(struct kvm *kvm); | 133 | extern int kvmppc_core_init_vm(struct kvm *kvm); |
133 | extern void kvmppc_core_destroy_vm(struct kvm *kvm); | 134 | extern void kvmppc_core_destroy_vm(struct kvm *kvm); |
134 | extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, | 135 | extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
135 | struct kvm_userspace_memory_region *mem); | 136 | struct kvm_userspace_memory_region *mem); |
136 | extern void kvmppc_core_commit_memory_region(struct kvm *kvm, | 137 | extern void kvmppc_core_commit_memory_region(struct kvm *kvm, |
137 | struct kvm_userspace_memory_region *mem); | 138 | struct kvm_userspace_memory_region *mem); |
138 | 139 | ||
139 | /* | 140 | /* |
140 | * Cuts out inst bits with ordering according to spec. | 141 | * Cuts out inst bits with ordering according to spec. |
141 | * That means the leftmost bit is zero. All given bits are included. | 142 | * That means the leftmost bit is zero. All given bits are included. |
142 | */ | 143 | */ |
143 | static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb) | 144 | static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb) |
144 | { | 145 | { |
145 | u32 r; | 146 | u32 r; |
146 | u32 mask; | 147 | u32 mask; |
147 | 148 | ||
148 | BUG_ON(msb > lsb); | 149 | BUG_ON(msb > lsb); |
149 | 150 | ||
150 | mask = (1 << (lsb - msb + 1)) - 1; | 151 | mask = (1 << (lsb - msb + 1)) - 1; |
151 | r = (inst >> (63 - lsb)) & mask; | 152 | r = (inst >> (63 - lsb)) & mask; |
152 | 153 | ||
153 | return r; | 154 | return r; |
154 | } | 155 | } |
155 | 156 | ||
156 | /* | 157 | /* |
157 | * Replaces inst bits with ordering according to spec. | 158 | * Replaces inst bits with ordering according to spec. |
158 | */ | 159 | */ |
159 | static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value) | 160 | static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value) |
160 | { | 161 | { |
161 | u32 r; | 162 | u32 r; |
162 | u32 mask; | 163 | u32 mask; |
163 | 164 | ||
164 | BUG_ON(msb > lsb); | 165 | BUG_ON(msb > lsb); |
165 | 166 | ||
166 | mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb); | 167 | mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb); |
167 | r = (inst & ~mask) | ((value << (63 - lsb)) & mask); | 168 | r = (inst & ~mask) | ((value << (63 - lsb)) & mask); |
168 | 169 | ||
169 | return r; | 170 | return r; |
170 | } | 171 | } |
171 | 172 | ||
172 | void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); | 173 | void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); |
173 | int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); | 174 | int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); |
174 | 175 | ||
175 | void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); | 176 | void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); |
176 | int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); | 177 | int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); |
177 | 178 | ||
178 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid); | 179 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid); |
179 | 180 | ||
180 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 181 | #ifdef CONFIG_KVM_BOOK3S_64_HV |
181 | static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) | 182 | static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) |
182 | { | 183 | { |
183 | paca[cpu].kvm_hstate.xics_phys = addr; | 184 | paca[cpu].kvm_hstate.xics_phys = addr; |
184 | } | 185 | } |
185 | 186 | ||
186 | extern void kvm_rma_init(void); | 187 | extern void kvm_rma_init(void); |
187 | 188 | ||
188 | #else | 189 | #else |
189 | static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) | 190 | static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) |
190 | {} | 191 | {} |
191 | 192 | ||
192 | static inline void kvm_rma_init(void) | 193 | static inline void kvm_rma_init(void) |
193 | {} | 194 | {} |
194 | #endif | 195 | #endif |
195 | 196 | ||
196 | int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, | 197 | int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, |
197 | struct kvm_config_tlb *cfg); | 198 | struct kvm_config_tlb *cfg); |
198 | int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, | 199 | int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, |
199 | struct kvm_dirty_tlb *cfg); | 200 | struct kvm_dirty_tlb *cfg); |
200 | 201 | ||
201 | #endif /* __POWERPC_KVM_PPC_H__ */ | 202 | #endif /* __POWERPC_KVM_PPC_H__ */ |
202 | 203 |
arch/powerpc/kvm/book3s.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. | 2 | * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. |
3 | * | 3 | * |
4 | * Authors: | 4 | * Authors: |
5 | * Alexander Graf <agraf@suse.de> | 5 | * Alexander Graf <agraf@suse.de> |
6 | * Kevin Wolf <mail@kevin-wolf.de> | 6 | * Kevin Wolf <mail@kevin-wolf.de> |
7 | * | 7 | * |
8 | * Description: | 8 | * Description: |
9 | * This file is derived from arch/powerpc/kvm/44x.c, | 9 | * This file is derived from arch/powerpc/kvm/44x.c, |
10 | * by Hollis Blanchard <hollisb@us.ibm.com>. | 10 | * by Hollis Blanchard <hollisb@us.ibm.com>. |
11 | * | 11 | * |
12 | * This program is free software; you can redistribute it and/or modify | 12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License, version 2, as | 13 | * it under the terms of the GNU General Public License, version 2, as |
14 | * published by the Free Software Foundation. | 14 | * published by the Free Software Foundation. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/kvm_host.h> | 17 | #include <linux/kvm_host.h> |
18 | #include <linux/err.h> | 18 | #include <linux/err.h> |
19 | #include <linux/export.h> | 19 | #include <linux/export.h> |
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | 21 | ||
22 | #include <asm/reg.h> | 22 | #include <asm/reg.h> |
23 | #include <asm/cputable.h> | 23 | #include <asm/cputable.h> |
24 | #include <asm/cacheflush.h> | 24 | #include <asm/cacheflush.h> |
25 | #include <asm/tlbflush.h> | 25 | #include <asm/tlbflush.h> |
26 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
27 | #include <asm/io.h> | 27 | #include <asm/io.h> |
28 | #include <asm/kvm_ppc.h> | 28 | #include <asm/kvm_ppc.h> |
29 | #include <asm/kvm_book3s.h> | 29 | #include <asm/kvm_book3s.h> |
30 | #include <asm/mmu_context.h> | 30 | #include <asm/mmu_context.h> |
31 | #include <asm/page.h> | 31 | #include <asm/page.h> |
32 | #include <linux/gfp.h> | 32 | #include <linux/gfp.h> |
33 | #include <linux/sched.h> | 33 | #include <linux/sched.h> |
34 | #include <linux/vmalloc.h> | 34 | #include <linux/vmalloc.h> |
35 | #include <linux/highmem.h> | 35 | #include <linux/highmem.h> |
36 | 36 | ||
37 | #include "trace.h" | 37 | #include "trace.h" |
38 | 38 | ||
39 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | 39 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU |
40 | 40 | ||
41 | /* #define EXIT_DEBUG */ | 41 | /* #define EXIT_DEBUG */ |
42 | 42 | ||
43 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 43 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
44 | { "exits", VCPU_STAT(sum_exits) }, | 44 | { "exits", VCPU_STAT(sum_exits) }, |
45 | { "mmio", VCPU_STAT(mmio_exits) }, | 45 | { "mmio", VCPU_STAT(mmio_exits) }, |
46 | { "sig", VCPU_STAT(signal_exits) }, | 46 | { "sig", VCPU_STAT(signal_exits) }, |
47 | { "sysc", VCPU_STAT(syscall_exits) }, | 47 | { "sysc", VCPU_STAT(syscall_exits) }, |
48 | { "inst_emu", VCPU_STAT(emulated_inst_exits) }, | 48 | { "inst_emu", VCPU_STAT(emulated_inst_exits) }, |
49 | { "dec", VCPU_STAT(dec_exits) }, | 49 | { "dec", VCPU_STAT(dec_exits) }, |
50 | { "ext_intr", VCPU_STAT(ext_intr_exits) }, | 50 | { "ext_intr", VCPU_STAT(ext_intr_exits) }, |
51 | { "queue_intr", VCPU_STAT(queue_intr) }, | 51 | { "queue_intr", VCPU_STAT(queue_intr) }, |
52 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, | 52 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, |
53 | { "pf_storage", VCPU_STAT(pf_storage) }, | 53 | { "pf_storage", VCPU_STAT(pf_storage) }, |
54 | { "sp_storage", VCPU_STAT(sp_storage) }, | 54 | { "sp_storage", VCPU_STAT(sp_storage) }, |
55 | { "pf_instruc", VCPU_STAT(pf_instruc) }, | 55 | { "pf_instruc", VCPU_STAT(pf_instruc) }, |
56 | { "sp_instruc", VCPU_STAT(sp_instruc) }, | 56 | { "sp_instruc", VCPU_STAT(sp_instruc) }, |
57 | { "ld", VCPU_STAT(ld) }, | 57 | { "ld", VCPU_STAT(ld) }, |
58 | { "ld_slow", VCPU_STAT(ld_slow) }, | 58 | { "ld_slow", VCPU_STAT(ld_slow) }, |
59 | { "st", VCPU_STAT(st) }, | 59 | { "st", VCPU_STAT(st) }, |
60 | { "st_slow", VCPU_STAT(st_slow) }, | 60 | { "st_slow", VCPU_STAT(st_slow) }, |
61 | { NULL } | 61 | { NULL } |
62 | }; | 62 | }; |
63 | 63 | ||
64 | void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) | 64 | void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) |
65 | { | 65 | { |
66 | } | 66 | } |
67 | 67 | ||
68 | void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) | 68 | void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) |
69 | { | 69 | { |
70 | } | 70 | } |
71 | 71 | ||
72 | void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) | 72 | void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) |
73 | { | 73 | { |
74 | vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu); | 74 | vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu); |
75 | vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags; | 75 | vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags; |
76 | kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); | 76 | kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec); |
77 | vcpu->arch.mmu.reset_msr(vcpu); | 77 | vcpu->arch.mmu.reset_msr(vcpu); |
78 | } | 78 | } |
79 | 79 | ||
80 | static int kvmppc_book3s_vec2irqprio(unsigned int vec) | 80 | static int kvmppc_book3s_vec2irqprio(unsigned int vec) |
81 | { | 81 | { |
82 | unsigned int prio; | 82 | unsigned int prio; |
83 | 83 | ||
84 | switch (vec) { | 84 | switch (vec) { |
85 | case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break; | 85 | case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break; |
86 | case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break; | 86 | case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break; |
87 | case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break; | 87 | case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break; |
88 | case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break; | 88 | case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break; |
89 | case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break; | 89 | case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break; |
90 | case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break; | 90 | case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break; |
91 | case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break; | 91 | case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break; |
92 | case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break; | 92 | case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break; |
93 | case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break; | 93 | case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break; |
94 | case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break; | 94 | case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break; |
95 | case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break; | 95 | case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break; |
96 | case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break; | 96 | case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break; |
97 | case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break; | 97 | case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break; |
98 | case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break; | 98 | case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break; |
99 | case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break; | 99 | case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break; |
100 | case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break; | 100 | case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break; |
101 | default: prio = BOOK3S_IRQPRIO_MAX; break; | 101 | default: prio = BOOK3S_IRQPRIO_MAX; break; |
102 | } | 102 | } |
103 | 103 | ||
104 | return prio; | 104 | return prio; |
105 | } | 105 | } |
106 | 106 | ||
107 | static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, | 107 | static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, |
108 | unsigned int vec) | 108 | unsigned int vec) |
109 | { | 109 | { |
110 | unsigned long old_pending = vcpu->arch.pending_exceptions; | 110 | unsigned long old_pending = vcpu->arch.pending_exceptions; |
111 | 111 | ||
112 | clear_bit(kvmppc_book3s_vec2irqprio(vec), | 112 | clear_bit(kvmppc_book3s_vec2irqprio(vec), |
113 | &vcpu->arch.pending_exceptions); | 113 | &vcpu->arch.pending_exceptions); |
114 | 114 | ||
115 | kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions, | 115 | kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions, |
116 | old_pending); | 116 | old_pending); |
117 | } | 117 | } |
118 | 118 | ||
119 | void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) | 119 | void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) |
120 | { | 120 | { |
121 | vcpu->stat.queue_intr++; | 121 | vcpu->stat.queue_intr++; |
122 | 122 | ||
123 | set_bit(kvmppc_book3s_vec2irqprio(vec), | 123 | set_bit(kvmppc_book3s_vec2irqprio(vec), |
124 | &vcpu->arch.pending_exceptions); | 124 | &vcpu->arch.pending_exceptions); |
125 | #ifdef EXIT_DEBUG | 125 | #ifdef EXIT_DEBUG |
126 | printk(KERN_INFO "Queueing interrupt %x\n", vec); | 126 | printk(KERN_INFO "Queueing interrupt %x\n", vec); |
127 | #endif | 127 | #endif |
128 | } | 128 | } |
129 | 129 | ||
130 | 130 | ||
131 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) | 131 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) |
132 | { | 132 | { |
133 | /* might as well deliver this straight away */ | 133 | /* might as well deliver this straight away */ |
134 | kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags); | 134 | kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags); |
135 | } | 135 | } |
136 | 136 | ||
137 | void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) | 137 | void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) |
138 | { | 138 | { |
139 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); | 139 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); |
140 | } | 140 | } |
141 | 141 | ||
142 | int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) | 142 | int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) |
143 | { | 143 | { |
144 | return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); | 144 | return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); |
145 | } | 145 | } |
146 | 146 | ||
147 | void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) | 147 | void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) |
148 | { | 148 | { |
149 | kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); | 149 | kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); |
150 | } | 150 | } |
151 | 151 | ||
152 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, | 152 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, |
153 | struct kvm_interrupt *irq) | 153 | struct kvm_interrupt *irq) |
154 | { | 154 | { |
155 | unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL; | 155 | unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL; |
156 | 156 | ||
157 | if (irq->irq == KVM_INTERRUPT_SET_LEVEL) | 157 | if (irq->irq == KVM_INTERRUPT_SET_LEVEL) |
158 | vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL; | 158 | vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL; |
159 | 159 | ||
160 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 160 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
161 | } | 161 | } |
162 | 162 | ||
163 | void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, | 163 | void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, |
164 | struct kvm_interrupt *irq) | 164 | struct kvm_interrupt *irq) |
165 | { | 165 | { |
166 | kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); | 166 | kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); |
167 | kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL); | 167 | kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL); |
168 | } | 168 | } |
169 | 169 | ||
170 | int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) | 170 | int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) |
171 | { | 171 | { |
172 | int deliver = 1; | 172 | int deliver = 1; |
173 | int vec = 0; | 173 | int vec = 0; |
174 | bool crit = kvmppc_critical_section(vcpu); | 174 | bool crit = kvmppc_critical_section(vcpu); |
175 | 175 | ||
176 | switch (priority) { | 176 | switch (priority) { |
177 | case BOOK3S_IRQPRIO_DECREMENTER: | 177 | case BOOK3S_IRQPRIO_DECREMENTER: |
178 | deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit; | 178 | deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit; |
179 | vec = BOOK3S_INTERRUPT_DECREMENTER; | 179 | vec = BOOK3S_INTERRUPT_DECREMENTER; |
180 | break; | 180 | break; |
181 | case BOOK3S_IRQPRIO_EXTERNAL: | 181 | case BOOK3S_IRQPRIO_EXTERNAL: |
182 | case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: | 182 | case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: |
183 | deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit; | 183 | deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit; |
184 | vec = BOOK3S_INTERRUPT_EXTERNAL; | 184 | vec = BOOK3S_INTERRUPT_EXTERNAL; |
185 | break; | 185 | break; |
186 | case BOOK3S_IRQPRIO_SYSTEM_RESET: | 186 | case BOOK3S_IRQPRIO_SYSTEM_RESET: |
187 | vec = BOOK3S_INTERRUPT_SYSTEM_RESET; | 187 | vec = BOOK3S_INTERRUPT_SYSTEM_RESET; |
188 | break; | 188 | break; |
189 | case BOOK3S_IRQPRIO_MACHINE_CHECK: | 189 | case BOOK3S_IRQPRIO_MACHINE_CHECK: |
190 | vec = BOOK3S_INTERRUPT_MACHINE_CHECK; | 190 | vec = BOOK3S_INTERRUPT_MACHINE_CHECK; |
191 | break; | 191 | break; |
192 | case BOOK3S_IRQPRIO_DATA_STORAGE: | 192 | case BOOK3S_IRQPRIO_DATA_STORAGE: |
193 | vec = BOOK3S_INTERRUPT_DATA_STORAGE; | 193 | vec = BOOK3S_INTERRUPT_DATA_STORAGE; |
194 | break; | 194 | break; |
195 | case BOOK3S_IRQPRIO_INST_STORAGE: | 195 | case BOOK3S_IRQPRIO_INST_STORAGE: |
196 | vec = BOOK3S_INTERRUPT_INST_STORAGE; | 196 | vec = BOOK3S_INTERRUPT_INST_STORAGE; |
197 | break; | 197 | break; |
198 | case BOOK3S_IRQPRIO_DATA_SEGMENT: | 198 | case BOOK3S_IRQPRIO_DATA_SEGMENT: |
199 | vec = BOOK3S_INTERRUPT_DATA_SEGMENT; | 199 | vec = BOOK3S_INTERRUPT_DATA_SEGMENT; |
200 | break; | 200 | break; |
201 | case BOOK3S_IRQPRIO_INST_SEGMENT: | 201 | case BOOK3S_IRQPRIO_INST_SEGMENT: |
202 | vec = BOOK3S_INTERRUPT_INST_SEGMENT; | 202 | vec = BOOK3S_INTERRUPT_INST_SEGMENT; |
203 | break; | 203 | break; |
204 | case BOOK3S_IRQPRIO_ALIGNMENT: | 204 | case BOOK3S_IRQPRIO_ALIGNMENT: |
205 | vec = BOOK3S_INTERRUPT_ALIGNMENT; | 205 | vec = BOOK3S_INTERRUPT_ALIGNMENT; |
206 | break; | 206 | break; |
207 | case BOOK3S_IRQPRIO_PROGRAM: | 207 | case BOOK3S_IRQPRIO_PROGRAM: |
208 | vec = BOOK3S_INTERRUPT_PROGRAM; | 208 | vec = BOOK3S_INTERRUPT_PROGRAM; |
209 | break; | 209 | break; |
210 | case BOOK3S_IRQPRIO_VSX: | 210 | case BOOK3S_IRQPRIO_VSX: |
211 | vec = BOOK3S_INTERRUPT_VSX; | 211 | vec = BOOK3S_INTERRUPT_VSX; |
212 | break; | 212 | break; |
213 | case BOOK3S_IRQPRIO_ALTIVEC: | 213 | case BOOK3S_IRQPRIO_ALTIVEC: |
214 | vec = BOOK3S_INTERRUPT_ALTIVEC; | 214 | vec = BOOK3S_INTERRUPT_ALTIVEC; |
215 | break; | 215 | break; |
216 | case BOOK3S_IRQPRIO_FP_UNAVAIL: | 216 | case BOOK3S_IRQPRIO_FP_UNAVAIL: |
217 | vec = BOOK3S_INTERRUPT_FP_UNAVAIL; | 217 | vec = BOOK3S_INTERRUPT_FP_UNAVAIL; |
218 | break; | 218 | break; |
219 | case BOOK3S_IRQPRIO_SYSCALL: | 219 | case BOOK3S_IRQPRIO_SYSCALL: |
220 | vec = BOOK3S_INTERRUPT_SYSCALL; | 220 | vec = BOOK3S_INTERRUPT_SYSCALL; |
221 | break; | 221 | break; |
222 | case BOOK3S_IRQPRIO_DEBUG: | 222 | case BOOK3S_IRQPRIO_DEBUG: |
223 | vec = BOOK3S_INTERRUPT_TRACE; | 223 | vec = BOOK3S_INTERRUPT_TRACE; |
224 | break; | 224 | break; |
225 | case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR: | 225 | case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR: |
226 | vec = BOOK3S_INTERRUPT_PERFMON; | 226 | vec = BOOK3S_INTERRUPT_PERFMON; |
227 | break; | 227 | break; |
228 | default: | 228 | default: |
229 | deliver = 0; | 229 | deliver = 0; |
230 | printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority); | 230 | printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority); |
231 | break; | 231 | break; |
232 | } | 232 | } |
233 | 233 | ||
234 | #if 0 | 234 | #if 0 |
235 | printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver); | 235 | printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver); |
236 | #endif | 236 | #endif |
237 | 237 | ||
238 | if (deliver) | 238 | if (deliver) |
239 | kvmppc_inject_interrupt(vcpu, vec, 0); | 239 | kvmppc_inject_interrupt(vcpu, vec, 0); |
240 | 240 | ||
241 | return deliver; | 241 | return deliver; |
242 | } | 242 | } |
243 | 243 | ||
244 | /* | 244 | /* |
245 | * This function determines if an irqprio should be cleared once issued. | 245 | * This function determines if an irqprio should be cleared once issued. |
246 | */ | 246 | */ |
247 | static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority) | 247 | static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority) |
248 | { | 248 | { |
249 | switch (priority) { | 249 | switch (priority) { |
250 | case BOOK3S_IRQPRIO_DECREMENTER: | 250 | case BOOK3S_IRQPRIO_DECREMENTER: |
251 | /* DEC interrupts get cleared by mtdec */ | 251 | /* DEC interrupts get cleared by mtdec */ |
252 | return false; | 252 | return false; |
253 | case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: | 253 | case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: |
254 | /* External interrupts get cleared by userspace */ | 254 | /* External interrupts get cleared by userspace */ |
255 | return false; | 255 | return false; |
256 | } | 256 | } |
257 | 257 | ||
258 | return true; | 258 | return true; |
259 | } | 259 | } |
260 | 260 | ||
261 | void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) | 261 | void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) |
262 | { | 262 | { |
263 | unsigned long *pending = &vcpu->arch.pending_exceptions; | 263 | unsigned long *pending = &vcpu->arch.pending_exceptions; |
264 | unsigned long old_pending = vcpu->arch.pending_exceptions; | 264 | unsigned long old_pending = vcpu->arch.pending_exceptions; |
265 | unsigned int priority; | 265 | unsigned int priority; |
266 | 266 | ||
267 | #ifdef EXIT_DEBUG | 267 | #ifdef EXIT_DEBUG |
268 | if (vcpu->arch.pending_exceptions) | 268 | if (vcpu->arch.pending_exceptions) |
269 | printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); | 269 | printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); |
270 | #endif | 270 | #endif |
271 | priority = __ffs(*pending); | 271 | priority = __ffs(*pending); |
272 | while (priority < BOOK3S_IRQPRIO_MAX) { | 272 | while (priority < BOOK3S_IRQPRIO_MAX) { |
273 | if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && | 273 | if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && |
274 | clear_irqprio(vcpu, priority)) { | 274 | clear_irqprio(vcpu, priority)) { |
275 | clear_bit(priority, &vcpu->arch.pending_exceptions); | 275 | clear_bit(priority, &vcpu->arch.pending_exceptions); |
276 | break; | 276 | break; |
277 | } | 277 | } |
278 | 278 | ||
279 | priority = find_next_bit(pending, | 279 | priority = find_next_bit(pending, |
280 | BITS_PER_BYTE * sizeof(*pending), | 280 | BITS_PER_BYTE * sizeof(*pending), |
281 | priority + 1); | 281 | priority + 1); |
282 | } | 282 | } |
283 | 283 | ||
284 | /* Tell the guest about our interrupt status */ | 284 | /* Tell the guest about our interrupt status */ |
285 | kvmppc_update_int_pending(vcpu, *pending, old_pending); | 285 | kvmppc_update_int_pending(vcpu, *pending, old_pending); |
286 | } | 286 | } |
287 | 287 | ||
288 | pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) | 288 | pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) |
289 | { | 289 | { |
290 | ulong mp_pa = vcpu->arch.magic_page_pa; | 290 | ulong mp_pa = vcpu->arch.magic_page_pa; |
291 | 291 | ||
292 | /* Magic page override */ | 292 | /* Magic page override */ |
293 | if (unlikely(mp_pa) && | 293 | if (unlikely(mp_pa) && |
294 | unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) == | 294 | unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) == |
295 | ((mp_pa & PAGE_MASK) & KVM_PAM))) { | 295 | ((mp_pa & PAGE_MASK) & KVM_PAM))) { |
296 | ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; | 296 | ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; |
297 | pfn_t pfn; | 297 | pfn_t pfn; |
298 | 298 | ||
299 | pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT; | 299 | pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT; |
300 | get_page(pfn_to_page(pfn)); | 300 | get_page(pfn_to_page(pfn)); |
301 | return pfn; | 301 | return pfn; |
302 | } | 302 | } |
303 | 303 | ||
304 | return gfn_to_pfn(vcpu->kvm, gfn); | 304 | return gfn_to_pfn(vcpu->kvm, gfn); |
305 | } | 305 | } |
306 | 306 | ||
307 | static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, | 307 | static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, |
308 | struct kvmppc_pte *pte) | 308 | struct kvmppc_pte *pte) |
309 | { | 309 | { |
310 | int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR)); | 310 | int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR)); |
311 | int r; | 311 | int r; |
312 | 312 | ||
313 | if (relocated) { | 313 | if (relocated) { |
314 | r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data); | 314 | r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data); |
315 | } else { | 315 | } else { |
316 | pte->eaddr = eaddr; | 316 | pte->eaddr = eaddr; |
317 | pte->raddr = eaddr & KVM_PAM; | 317 | pte->raddr = eaddr & KVM_PAM; |
318 | pte->vpage = VSID_REAL | eaddr >> 12; | 318 | pte->vpage = VSID_REAL | eaddr >> 12; |
319 | pte->may_read = true; | 319 | pte->may_read = true; |
320 | pte->may_write = true; | 320 | pte->may_write = true; |
321 | pte->may_execute = true; | 321 | pte->may_execute = true; |
322 | r = 0; | 322 | r = 0; |
323 | } | 323 | } |
324 | 324 | ||
325 | return r; | 325 | return r; |
326 | } | 326 | } |
327 | 327 | ||
328 | static hva_t kvmppc_bad_hva(void) | 328 | static hva_t kvmppc_bad_hva(void) |
329 | { | 329 | { |
330 | return PAGE_OFFSET; | 330 | return PAGE_OFFSET; |
331 | } | 331 | } |
332 | 332 | ||
333 | static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte, | 333 | static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte, |
334 | bool read) | 334 | bool read) |
335 | { | 335 | { |
336 | hva_t hpage; | 336 | hva_t hpage; |
337 | 337 | ||
338 | if (read && !pte->may_read) | 338 | if (read && !pte->may_read) |
339 | goto err; | 339 | goto err; |
340 | 340 | ||
341 | if (!read && !pte->may_write) | 341 | if (!read && !pte->may_write) |
342 | goto err; | 342 | goto err; |
343 | 343 | ||
344 | hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT); | 344 | hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT); |
345 | if (kvm_is_error_hva(hpage)) | 345 | if (kvm_is_error_hva(hpage)) |
346 | goto err; | 346 | goto err; |
347 | 347 | ||
348 | return hpage | (pte->raddr & ~PAGE_MASK); | 348 | return hpage | (pte->raddr & ~PAGE_MASK); |
349 | err: | 349 | err: |
350 | return kvmppc_bad_hva(); | 350 | return kvmppc_bad_hva(); |
351 | } | 351 | } |
352 | 352 | ||
353 | int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | 353 | int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, |
354 | bool data) | 354 | bool data) |
355 | { | 355 | { |
356 | struct kvmppc_pte pte; | 356 | struct kvmppc_pte pte; |
357 | 357 | ||
358 | vcpu->stat.st++; | 358 | vcpu->stat.st++; |
359 | 359 | ||
360 | if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) | 360 | if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) |
361 | return -ENOENT; | 361 | return -ENOENT; |
362 | 362 | ||
363 | *eaddr = pte.raddr; | 363 | *eaddr = pte.raddr; |
364 | 364 | ||
365 | if (!pte.may_write) | 365 | if (!pte.may_write) |
366 | return -EPERM; | 366 | return -EPERM; |
367 | 367 | ||
368 | if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) | 368 | if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) |
369 | return EMULATE_DO_MMIO; | 369 | return EMULATE_DO_MMIO; |
370 | 370 | ||
371 | return EMULATE_DONE; | 371 | return EMULATE_DONE; |
372 | } | 372 | } |
373 | 373 | ||
374 | int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | 374 | int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, |
375 | bool data) | 375 | bool data) |
376 | { | 376 | { |
377 | struct kvmppc_pte pte; | 377 | struct kvmppc_pte pte; |
378 | hva_t hva = *eaddr; | 378 | hva_t hva = *eaddr; |
379 | 379 | ||
380 | vcpu->stat.ld++; | 380 | vcpu->stat.ld++; |
381 | 381 | ||
382 | if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) | 382 | if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) |
383 | goto nopte; | 383 | goto nopte; |
384 | 384 | ||
385 | *eaddr = pte.raddr; | 385 | *eaddr = pte.raddr; |
386 | 386 | ||
387 | hva = kvmppc_pte_to_hva(vcpu, &pte, true); | 387 | hva = kvmppc_pte_to_hva(vcpu, &pte, true); |
388 | if (kvm_is_error_hva(hva)) | 388 | if (kvm_is_error_hva(hva)) |
389 | goto mmio; | 389 | goto mmio; |
390 | 390 | ||
391 | if (copy_from_user(ptr, (void __user *)hva, size)) { | 391 | if (copy_from_user(ptr, (void __user *)hva, size)) { |
392 | printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva); | 392 | printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva); |
393 | goto mmio; | 393 | goto mmio; |
394 | } | 394 | } |
395 | 395 | ||
396 | return EMULATE_DONE; | 396 | return EMULATE_DONE; |
397 | 397 | ||
398 | nopte: | 398 | nopte: |
399 | return -ENOENT; | 399 | return -ENOENT; |
400 | mmio: | 400 | mmio: |
401 | return EMULATE_DO_MMIO; | 401 | return EMULATE_DO_MMIO; |
402 | } | 402 | } |
403 | 403 | ||
404 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 404 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
405 | { | 405 | { |
406 | return 0; | 406 | return 0; |
407 | } | 407 | } |
408 | 408 | ||
409 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | 409 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
410 | { | 410 | { |
411 | int i; | 411 | int i; |
412 | 412 | ||
413 | regs->pc = kvmppc_get_pc(vcpu); | 413 | regs->pc = kvmppc_get_pc(vcpu); |
414 | regs->cr = kvmppc_get_cr(vcpu); | 414 | regs->cr = kvmppc_get_cr(vcpu); |
415 | regs->ctr = kvmppc_get_ctr(vcpu); | 415 | regs->ctr = kvmppc_get_ctr(vcpu); |
416 | regs->lr = kvmppc_get_lr(vcpu); | 416 | regs->lr = kvmppc_get_lr(vcpu); |
417 | regs->xer = kvmppc_get_xer(vcpu); | 417 | regs->xer = kvmppc_get_xer(vcpu); |
418 | regs->msr = vcpu->arch.shared->msr; | 418 | regs->msr = vcpu->arch.shared->msr; |
419 | regs->srr0 = vcpu->arch.shared->srr0; | 419 | regs->srr0 = vcpu->arch.shared->srr0; |
420 | regs->srr1 = vcpu->arch.shared->srr1; | 420 | regs->srr1 = vcpu->arch.shared->srr1; |
421 | regs->pid = vcpu->arch.pid; | 421 | regs->pid = vcpu->arch.pid; |
422 | regs->sprg0 = vcpu->arch.shared->sprg0; | 422 | regs->sprg0 = vcpu->arch.shared->sprg0; |
423 | regs->sprg1 = vcpu->arch.shared->sprg1; | 423 | regs->sprg1 = vcpu->arch.shared->sprg1; |
424 | regs->sprg2 = vcpu->arch.shared->sprg2; | 424 | regs->sprg2 = vcpu->arch.shared->sprg2; |
425 | regs->sprg3 = vcpu->arch.shared->sprg3; | 425 | regs->sprg3 = vcpu->arch.shared->sprg3; |
426 | regs->sprg4 = vcpu->arch.shared->sprg4; | 426 | regs->sprg4 = vcpu->arch.shared->sprg4; |
427 | regs->sprg5 = vcpu->arch.shared->sprg5; | 427 | regs->sprg5 = vcpu->arch.shared->sprg5; |
428 | regs->sprg6 = vcpu->arch.shared->sprg6; | 428 | regs->sprg6 = vcpu->arch.shared->sprg6; |
429 | regs->sprg7 = vcpu->arch.shared->sprg7; | 429 | regs->sprg7 = vcpu->arch.shared->sprg7; |
430 | 430 | ||
431 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 431 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
432 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); | 432 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); |
433 | 433 | ||
434 | return 0; | 434 | return 0; |
435 | } | 435 | } |
436 | 436 | ||
437 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | 437 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
438 | { | 438 | { |
439 | int i; | 439 | int i; |
440 | 440 | ||
441 | kvmppc_set_pc(vcpu, regs->pc); | 441 | kvmppc_set_pc(vcpu, regs->pc); |
442 | kvmppc_set_cr(vcpu, regs->cr); | 442 | kvmppc_set_cr(vcpu, regs->cr); |
443 | kvmppc_set_ctr(vcpu, regs->ctr); | 443 | kvmppc_set_ctr(vcpu, regs->ctr); |
444 | kvmppc_set_lr(vcpu, regs->lr); | 444 | kvmppc_set_lr(vcpu, regs->lr); |
445 | kvmppc_set_xer(vcpu, regs->xer); | 445 | kvmppc_set_xer(vcpu, regs->xer); |
446 | kvmppc_set_msr(vcpu, regs->msr); | 446 | kvmppc_set_msr(vcpu, regs->msr); |
447 | vcpu->arch.shared->srr0 = regs->srr0; | 447 | vcpu->arch.shared->srr0 = regs->srr0; |
448 | vcpu->arch.shared->srr1 = regs->srr1; | 448 | vcpu->arch.shared->srr1 = regs->srr1; |
449 | vcpu->arch.shared->sprg0 = regs->sprg0; | 449 | vcpu->arch.shared->sprg0 = regs->sprg0; |
450 | vcpu->arch.shared->sprg1 = regs->sprg1; | 450 | vcpu->arch.shared->sprg1 = regs->sprg1; |
451 | vcpu->arch.shared->sprg2 = regs->sprg2; | 451 | vcpu->arch.shared->sprg2 = regs->sprg2; |
452 | vcpu->arch.shared->sprg3 = regs->sprg3; | 452 | vcpu->arch.shared->sprg3 = regs->sprg3; |
453 | vcpu->arch.shared->sprg4 = regs->sprg4; | 453 | vcpu->arch.shared->sprg4 = regs->sprg4; |
454 | vcpu->arch.shared->sprg5 = regs->sprg5; | 454 | vcpu->arch.shared->sprg5 = regs->sprg5; |
455 | vcpu->arch.shared->sprg6 = regs->sprg6; | 455 | vcpu->arch.shared->sprg6 = regs->sprg6; |
456 | vcpu->arch.shared->sprg7 = regs->sprg7; | 456 | vcpu->arch.shared->sprg7 = regs->sprg7; |
457 | 457 | ||
458 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 458 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
459 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); | 459 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); |
460 | 460 | ||
461 | return 0; | 461 | return 0; |
462 | } | 462 | } |
463 | 463 | ||
464 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | 464 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
465 | { | 465 | { |
466 | return -ENOTSUPP; | 466 | return -ENOTSUPP; |
467 | } | 467 | } |
468 | 468 | ||
469 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | 469 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
470 | { | 470 | { |
471 | return -ENOTSUPP; | 471 | return -ENOTSUPP; |
472 | } | 472 | } |
473 | 473 | ||
474 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | 474 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
475 | struct kvm_translation *tr) | 475 | struct kvm_translation *tr) |
476 | { | 476 | { |
477 | return 0; | 477 | return 0; |
478 | } | 478 | } |
479 | 479 | ||
480 | /* | 480 | /* |
481 | * Get (and clear) the dirty memory log for a memory slot. | 481 | * Get (and clear) the dirty memory log for a memory slot. |
482 | */ | 482 | */ |
483 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | 483 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
484 | struct kvm_dirty_log *log) | 484 | struct kvm_dirty_log *log) |
485 | { | 485 | { |
486 | struct kvm_memory_slot *memslot; | 486 | struct kvm_memory_slot *memslot; |
487 | struct kvm_vcpu *vcpu; | 487 | struct kvm_vcpu *vcpu; |
488 | ulong ga, ga_end; | 488 | ulong ga, ga_end; |
489 | int is_dirty = 0; | 489 | int is_dirty = 0; |
490 | int r; | 490 | int r; |
491 | unsigned long n; | 491 | unsigned long n; |
492 | 492 | ||
493 | mutex_lock(&kvm->slots_lock); | 493 | mutex_lock(&kvm->slots_lock); |
494 | 494 | ||
495 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | 495 | r = kvm_get_dirty_log(kvm, log, &is_dirty); |
496 | if (r) | 496 | if (r) |
497 | goto out; | 497 | goto out; |
498 | 498 | ||
499 | /* If nothing is dirty, don't bother messing with page tables. */ | 499 | /* If nothing is dirty, don't bother messing with page tables. */ |
500 | if (is_dirty) { | 500 | if (is_dirty) { |
501 | memslot = id_to_memslot(kvm->memslots, log->slot); | 501 | memslot = id_to_memslot(kvm->memslots, log->slot); |
502 | 502 | ||
503 | ga = memslot->base_gfn << PAGE_SHIFT; | 503 | ga = memslot->base_gfn << PAGE_SHIFT; |
504 | ga_end = ga + (memslot->npages << PAGE_SHIFT); | 504 | ga_end = ga + (memslot->npages << PAGE_SHIFT); |
505 | 505 | ||
506 | kvm_for_each_vcpu(n, vcpu, kvm) | 506 | kvm_for_each_vcpu(n, vcpu, kvm) |
507 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); | 507 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); |
508 | 508 | ||
509 | n = kvm_dirty_bitmap_bytes(memslot); | 509 | n = kvm_dirty_bitmap_bytes(memslot); |
510 | memset(memslot->dirty_bitmap, 0, n); | 510 | memset(memslot->dirty_bitmap, 0, n); |
511 | } | 511 | } |
512 | 512 | ||
513 | r = 0; | 513 | r = 0; |
514 | out: | 514 | out: |
515 | mutex_unlock(&kvm->slots_lock); | 515 | mutex_unlock(&kvm->slots_lock); |
516 | return r; | 516 | return r; |
517 | } | 517 | } |
518 | |||
519 | void kvmppc_decrementer_func(unsigned long data) | ||
520 | { | ||
521 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; | ||
522 | |||
523 | kvmppc_core_queue_dec(vcpu); | ||
524 | kvm_vcpu_kick(vcpu); | ||
525 | } | ||
518 | 526 |
arch/powerpc/kvm/booke.c
1 | /* | 1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | 2 | * This program is free software; you can redistribute it and/or modify |
3 | * it under the terms of the GNU General Public License, version 2, as | 3 | * it under the terms of the GNU General Public License, version 2, as |
4 | * published by the Free Software Foundation. | 4 | * published by the Free Software Foundation. |
5 | * | 5 | * |
6 | * This program is distributed in the hope that it will be useful, | 6 | * This program is distributed in the hope that it will be useful, |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
9 | * GNU General Public License for more details. | 9 | * GNU General Public License for more details. |
10 | * | 10 | * |
11 | * You should have received a copy of the GNU General Public License | 11 | * You should have received a copy of the GNU General Public License |
12 | * along with this program; if not, write to the Free Software | 12 | * along with this program; if not, write to the Free Software |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
14 | * | 14 | * |
15 | * Copyright IBM Corp. 2007 | 15 | * Copyright IBM Corp. 2007 |
16 | * Copyright 2010-2011 Freescale Semiconductor, Inc. | 16 | * Copyright 2010-2011 Freescale Semiconductor, Inc. |
17 | * | 17 | * |
18 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | 18 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> |
19 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | 19 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/errno.h> | 22 | #include <linux/errno.h> |
23 | #include <linux/err.h> | 23 | #include <linux/err.h> |
24 | #include <linux/kvm_host.h> | 24 | #include <linux/kvm_host.h> |
25 | #include <linux/gfp.h> | 25 | #include <linux/gfp.h> |
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/vmalloc.h> | 27 | #include <linux/vmalloc.h> |
28 | #include <linux/fs.h> | 28 | #include <linux/fs.h> |
29 | 29 | ||
30 | #include <asm/cputable.h> | 30 | #include <asm/cputable.h> |
31 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
32 | #include <asm/kvm_ppc.h> | 32 | #include <asm/kvm_ppc.h> |
33 | #include "timing.h" | 33 | #include "timing.h" |
34 | #include <asm/cacheflush.h> | 34 | #include <asm/cacheflush.h> |
35 | 35 | ||
36 | #include "booke.h" | 36 | #include "booke.h" |
37 | 37 | ||
38 | unsigned long kvmppc_booke_handlers; | 38 | unsigned long kvmppc_booke_handlers; |
39 | 39 | ||
40 | #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM | 40 | #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM |
41 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | 41 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU |
42 | 42 | ||
43 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 43 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
44 | { "mmio", VCPU_STAT(mmio_exits) }, | 44 | { "mmio", VCPU_STAT(mmio_exits) }, |
45 | { "dcr", VCPU_STAT(dcr_exits) }, | 45 | { "dcr", VCPU_STAT(dcr_exits) }, |
46 | { "sig", VCPU_STAT(signal_exits) }, | 46 | { "sig", VCPU_STAT(signal_exits) }, |
47 | { "itlb_r", VCPU_STAT(itlb_real_miss_exits) }, | 47 | { "itlb_r", VCPU_STAT(itlb_real_miss_exits) }, |
48 | { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) }, | 48 | { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) }, |
49 | { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) }, | 49 | { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) }, |
50 | { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) }, | 50 | { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) }, |
51 | { "sysc", VCPU_STAT(syscall_exits) }, | 51 | { "sysc", VCPU_STAT(syscall_exits) }, |
52 | { "isi", VCPU_STAT(isi_exits) }, | 52 | { "isi", VCPU_STAT(isi_exits) }, |
53 | { "dsi", VCPU_STAT(dsi_exits) }, | 53 | { "dsi", VCPU_STAT(dsi_exits) }, |
54 | { "inst_emu", VCPU_STAT(emulated_inst_exits) }, | 54 | { "inst_emu", VCPU_STAT(emulated_inst_exits) }, |
55 | { "dec", VCPU_STAT(dec_exits) }, | 55 | { "dec", VCPU_STAT(dec_exits) }, |
56 | { "ext_intr", VCPU_STAT(ext_intr_exits) }, | 56 | { "ext_intr", VCPU_STAT(ext_intr_exits) }, |
57 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, | 57 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, |
58 | { NULL } | 58 | { NULL } |
59 | }; | 59 | }; |
60 | 60 | ||
61 | /* TODO: use vcpu_printf() */ | 61 | /* TODO: use vcpu_printf() */ |
62 | void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) | 62 | void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) |
63 | { | 63 | { |
64 | int i; | 64 | int i; |
65 | 65 | ||
66 | printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr); | 66 | printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr); |
67 | printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); | 67 | printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); |
68 | printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, | 68 | printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, |
69 | vcpu->arch.shared->srr1); | 69 | vcpu->arch.shared->srr1); |
70 | 70 | ||
71 | printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); | 71 | printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); |
72 | 72 | ||
73 | for (i = 0; i < 32; i += 4) { | 73 | for (i = 0; i < 32; i += 4) { |
74 | printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i, | 74 | printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i, |
75 | kvmppc_get_gpr(vcpu, i), | 75 | kvmppc_get_gpr(vcpu, i), |
76 | kvmppc_get_gpr(vcpu, i+1), | 76 | kvmppc_get_gpr(vcpu, i+1), |
77 | kvmppc_get_gpr(vcpu, i+2), | 77 | kvmppc_get_gpr(vcpu, i+2), |
78 | kvmppc_get_gpr(vcpu, i+3)); | 78 | kvmppc_get_gpr(vcpu, i+3)); |
79 | } | 79 | } |
80 | } | 80 | } |
81 | 81 | ||
82 | #ifdef CONFIG_SPE | 82 | #ifdef CONFIG_SPE |
83 | void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu) | 83 | void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu) |
84 | { | 84 | { |
85 | preempt_disable(); | 85 | preempt_disable(); |
86 | enable_kernel_spe(); | 86 | enable_kernel_spe(); |
87 | kvmppc_save_guest_spe(vcpu); | 87 | kvmppc_save_guest_spe(vcpu); |
88 | vcpu->arch.shadow_msr &= ~MSR_SPE; | 88 | vcpu->arch.shadow_msr &= ~MSR_SPE; |
89 | preempt_enable(); | 89 | preempt_enable(); |
90 | } | 90 | } |
91 | 91 | ||
92 | static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu) | 92 | static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu) |
93 | { | 93 | { |
94 | preempt_disable(); | 94 | preempt_disable(); |
95 | enable_kernel_spe(); | 95 | enable_kernel_spe(); |
96 | kvmppc_load_guest_spe(vcpu); | 96 | kvmppc_load_guest_spe(vcpu); |
97 | vcpu->arch.shadow_msr |= MSR_SPE; | 97 | vcpu->arch.shadow_msr |= MSR_SPE; |
98 | preempt_enable(); | 98 | preempt_enable(); |
99 | } | 99 | } |
100 | 100 | ||
101 | static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu) | 101 | static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu) |
102 | { | 102 | { |
103 | if (vcpu->arch.shared->msr & MSR_SPE) { | 103 | if (vcpu->arch.shared->msr & MSR_SPE) { |
104 | if (!(vcpu->arch.shadow_msr & MSR_SPE)) | 104 | if (!(vcpu->arch.shadow_msr & MSR_SPE)) |
105 | kvmppc_vcpu_enable_spe(vcpu); | 105 | kvmppc_vcpu_enable_spe(vcpu); |
106 | } else if (vcpu->arch.shadow_msr & MSR_SPE) { | 106 | } else if (vcpu->arch.shadow_msr & MSR_SPE) { |
107 | kvmppc_vcpu_disable_spe(vcpu); | 107 | kvmppc_vcpu_disable_spe(vcpu); |
108 | } | 108 | } |
109 | } | 109 | } |
110 | #else | 110 | #else |
111 | static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu) | 111 | static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu) |
112 | { | 112 | { |
113 | } | 113 | } |
114 | #endif | 114 | #endif |
115 | 115 | ||
116 | /* | 116 | /* |
117 | * Helper function for "full" MSR writes. No need to call this if only | 117 | * Helper function for "full" MSR writes. No need to call this if only |
118 | * EE/CE/ME/DE/RI are changing. | 118 | * EE/CE/ME/DE/RI are changing. |
119 | */ | 119 | */ |
120 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) | 120 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) |
121 | { | 121 | { |
122 | u32 old_msr = vcpu->arch.shared->msr; | 122 | u32 old_msr = vcpu->arch.shared->msr; |
123 | 123 | ||
124 | vcpu->arch.shared->msr = new_msr; | 124 | vcpu->arch.shared->msr = new_msr; |
125 | 125 | ||
126 | kvmppc_mmu_msr_notify(vcpu, old_msr); | 126 | kvmppc_mmu_msr_notify(vcpu, old_msr); |
127 | kvmppc_vcpu_sync_spe(vcpu); | 127 | kvmppc_vcpu_sync_spe(vcpu); |
128 | } | 128 | } |
129 | 129 | ||
130 | static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, | 130 | static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, |
131 | unsigned int priority) | 131 | unsigned int priority) |
132 | { | 132 | { |
133 | set_bit(priority, &vcpu->arch.pending_exceptions); | 133 | set_bit(priority, &vcpu->arch.pending_exceptions); |
134 | } | 134 | } |
135 | 135 | ||
136 | static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, | 136 | static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, |
137 | ulong dear_flags, ulong esr_flags) | 137 | ulong dear_flags, ulong esr_flags) |
138 | { | 138 | { |
139 | vcpu->arch.queued_dear = dear_flags; | 139 | vcpu->arch.queued_dear = dear_flags; |
140 | vcpu->arch.queued_esr = esr_flags; | 140 | vcpu->arch.queued_esr = esr_flags; |
141 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); | 141 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); |
142 | } | 142 | } |
143 | 143 | ||
144 | static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, | 144 | static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, |
145 | ulong dear_flags, ulong esr_flags) | 145 | ulong dear_flags, ulong esr_flags) |
146 | { | 146 | { |
147 | vcpu->arch.queued_dear = dear_flags; | 147 | vcpu->arch.queued_dear = dear_flags; |
148 | vcpu->arch.queued_esr = esr_flags; | 148 | vcpu->arch.queued_esr = esr_flags; |
149 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); | 149 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); |
150 | } | 150 | } |
151 | 151 | ||
152 | static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, | 152 | static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, |
153 | ulong esr_flags) | 153 | ulong esr_flags) |
154 | { | 154 | { |
155 | vcpu->arch.queued_esr = esr_flags; | 155 | vcpu->arch.queued_esr = esr_flags; |
156 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); | 156 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); |
157 | } | 157 | } |
158 | 158 | ||
159 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags) | 159 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags) |
160 | { | 160 | { |
161 | vcpu->arch.queued_esr = esr_flags; | 161 | vcpu->arch.queued_esr = esr_flags; |
162 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); | 162 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); |
163 | } | 163 | } |
164 | 164 | ||
165 | void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) | 165 | void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) |
166 | { | 166 | { |
167 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER); | 167 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER); |
168 | } | 168 | } |
169 | 169 | ||
170 | int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) | 170 | int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) |
171 | { | 171 | { |
172 | return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); | 172 | return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); |
173 | } | 173 | } |
174 | 174 | ||
175 | void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) | 175 | void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) |
176 | { | 176 | { |
177 | clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); | 177 | clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); |
178 | } | 178 | } |
179 | 179 | ||
180 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, | 180 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, |
181 | struct kvm_interrupt *irq) | 181 | struct kvm_interrupt *irq) |
182 | { | 182 | { |
183 | unsigned int prio = BOOKE_IRQPRIO_EXTERNAL; | 183 | unsigned int prio = BOOKE_IRQPRIO_EXTERNAL; |
184 | 184 | ||
185 | if (irq->irq == KVM_INTERRUPT_SET_LEVEL) | 185 | if (irq->irq == KVM_INTERRUPT_SET_LEVEL) |
186 | prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL; | 186 | prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL; |
187 | 187 | ||
188 | kvmppc_booke_queue_irqprio(vcpu, prio); | 188 | kvmppc_booke_queue_irqprio(vcpu, prio); |
189 | } | 189 | } |
190 | 190 | ||
191 | void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, | 191 | void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, |
192 | struct kvm_interrupt *irq) | 192 | struct kvm_interrupt *irq) |
193 | { | 193 | { |
194 | clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); | 194 | clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); |
195 | clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); | 195 | clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); |
196 | } | 196 | } |
197 | 197 | ||
198 | /* Deliver the interrupt of the corresponding priority, if possible. */ | 198 | /* Deliver the interrupt of the corresponding priority, if possible. */ |
199 | static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | 199 | static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, |
200 | unsigned int priority) | 200 | unsigned int priority) |
201 | { | 201 | { |
202 | int allowed = 0; | 202 | int allowed = 0; |
203 | ulong uninitialized_var(msr_mask); | 203 | ulong uninitialized_var(msr_mask); |
204 | bool update_esr = false, update_dear = false; | 204 | bool update_esr = false, update_dear = false; |
205 | ulong crit_raw = vcpu->arch.shared->critical; | 205 | ulong crit_raw = vcpu->arch.shared->critical; |
206 | ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); | 206 | ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); |
207 | bool crit; | 207 | bool crit; |
208 | bool keep_irq = false; | 208 | bool keep_irq = false; |
209 | 209 | ||
210 | /* Truncate crit indicators in 32 bit mode */ | 210 | /* Truncate crit indicators in 32 bit mode */ |
211 | if (!(vcpu->arch.shared->msr & MSR_SF)) { | 211 | if (!(vcpu->arch.shared->msr & MSR_SF)) { |
212 | crit_raw &= 0xffffffff; | 212 | crit_raw &= 0xffffffff; |
213 | crit_r1 &= 0xffffffff; | 213 | crit_r1 &= 0xffffffff; |
214 | } | 214 | } |
215 | 215 | ||
216 | /* Critical section when crit == r1 */ | 216 | /* Critical section when crit == r1 */ |
217 | crit = (crit_raw == crit_r1); | 217 | crit = (crit_raw == crit_r1); |
218 | /* ... and we're in supervisor mode */ | 218 | /* ... and we're in supervisor mode */ |
219 | crit = crit && !(vcpu->arch.shared->msr & MSR_PR); | 219 | crit = crit && !(vcpu->arch.shared->msr & MSR_PR); |
220 | 220 | ||
221 | if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) { | 221 | if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) { |
222 | priority = BOOKE_IRQPRIO_EXTERNAL; | 222 | priority = BOOKE_IRQPRIO_EXTERNAL; |
223 | keep_irq = true; | 223 | keep_irq = true; |
224 | } | 224 | } |
225 | 225 | ||
226 | switch (priority) { | 226 | switch (priority) { |
227 | case BOOKE_IRQPRIO_DTLB_MISS: | 227 | case BOOKE_IRQPRIO_DTLB_MISS: |
228 | case BOOKE_IRQPRIO_DATA_STORAGE: | 228 | case BOOKE_IRQPRIO_DATA_STORAGE: |
229 | update_dear = true; | 229 | update_dear = true; |
230 | /* fall through */ | 230 | /* fall through */ |
231 | case BOOKE_IRQPRIO_INST_STORAGE: | 231 | case BOOKE_IRQPRIO_INST_STORAGE: |
232 | case BOOKE_IRQPRIO_PROGRAM: | 232 | case BOOKE_IRQPRIO_PROGRAM: |
233 | update_esr = true; | 233 | update_esr = true; |
234 | /* fall through */ | 234 | /* fall through */ |
235 | case BOOKE_IRQPRIO_ITLB_MISS: | 235 | case BOOKE_IRQPRIO_ITLB_MISS: |
236 | case BOOKE_IRQPRIO_SYSCALL: | 236 | case BOOKE_IRQPRIO_SYSCALL: |
237 | case BOOKE_IRQPRIO_FP_UNAVAIL: | 237 | case BOOKE_IRQPRIO_FP_UNAVAIL: |
238 | case BOOKE_IRQPRIO_SPE_UNAVAIL: | 238 | case BOOKE_IRQPRIO_SPE_UNAVAIL: |
239 | case BOOKE_IRQPRIO_SPE_FP_DATA: | 239 | case BOOKE_IRQPRIO_SPE_FP_DATA: |
240 | case BOOKE_IRQPRIO_SPE_FP_ROUND: | 240 | case BOOKE_IRQPRIO_SPE_FP_ROUND: |
241 | case BOOKE_IRQPRIO_AP_UNAVAIL: | 241 | case BOOKE_IRQPRIO_AP_UNAVAIL: |
242 | case BOOKE_IRQPRIO_ALIGNMENT: | 242 | case BOOKE_IRQPRIO_ALIGNMENT: |
243 | allowed = 1; | 243 | allowed = 1; |
244 | msr_mask = MSR_CE|MSR_ME|MSR_DE; | 244 | msr_mask = MSR_CE|MSR_ME|MSR_DE; |
245 | break; | 245 | break; |
246 | case BOOKE_IRQPRIO_CRITICAL: | 246 | case BOOKE_IRQPRIO_CRITICAL: |
247 | case BOOKE_IRQPRIO_WATCHDOG: | 247 | case BOOKE_IRQPRIO_WATCHDOG: |
248 | allowed = vcpu->arch.shared->msr & MSR_CE; | 248 | allowed = vcpu->arch.shared->msr & MSR_CE; |
249 | msr_mask = MSR_ME; | 249 | msr_mask = MSR_ME; |
250 | break; | 250 | break; |
251 | case BOOKE_IRQPRIO_MACHINE_CHECK: | 251 | case BOOKE_IRQPRIO_MACHINE_CHECK: |
252 | allowed = vcpu->arch.shared->msr & MSR_ME; | 252 | allowed = vcpu->arch.shared->msr & MSR_ME; |
253 | msr_mask = 0; | 253 | msr_mask = 0; |
254 | break; | 254 | break; |
255 | case BOOKE_IRQPRIO_EXTERNAL: | ||
256 | case BOOKE_IRQPRIO_DECREMENTER: | 255 | case BOOKE_IRQPRIO_DECREMENTER: |
257 | case BOOKE_IRQPRIO_FIT: | 256 | case BOOKE_IRQPRIO_FIT: |
257 | keep_irq = true; | ||
258 | /* fall through */ | ||
259 | case BOOKE_IRQPRIO_EXTERNAL: | ||
258 | allowed = vcpu->arch.shared->msr & MSR_EE; | 260 | allowed = vcpu->arch.shared->msr & MSR_EE; |
259 | allowed = allowed && !crit; | 261 | allowed = allowed && !crit; |
260 | msr_mask = MSR_CE|MSR_ME|MSR_DE; | 262 | msr_mask = MSR_CE|MSR_ME|MSR_DE; |
261 | break; | 263 | break; |
262 | case BOOKE_IRQPRIO_DEBUG: | 264 | case BOOKE_IRQPRIO_DEBUG: |
263 | allowed = vcpu->arch.shared->msr & MSR_DE; | 265 | allowed = vcpu->arch.shared->msr & MSR_DE; |
264 | msr_mask = MSR_ME; | 266 | msr_mask = MSR_ME; |
265 | break; | 267 | break; |
266 | } | 268 | } |
267 | 269 | ||
268 | if (allowed) { | 270 | if (allowed) { |
269 | vcpu->arch.shared->srr0 = vcpu->arch.pc; | 271 | vcpu->arch.shared->srr0 = vcpu->arch.pc; |
270 | vcpu->arch.shared->srr1 = vcpu->arch.shared->msr; | 272 | vcpu->arch.shared->srr1 = vcpu->arch.shared->msr; |
271 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; | 273 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; |
272 | if (update_esr == true) | 274 | if (update_esr == true) |
273 | vcpu->arch.shared->esr = vcpu->arch.queued_esr; | 275 | vcpu->arch.shared->esr = vcpu->arch.queued_esr; |
274 | if (update_dear == true) | 276 | if (update_dear == true) |
275 | vcpu->arch.shared->dar = vcpu->arch.queued_dear; | 277 | vcpu->arch.shared->dar = vcpu->arch.queued_dear; |
276 | kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask); | 278 | kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask); |
277 | 279 | ||
278 | if (!keep_irq) | 280 | if (!keep_irq) |
279 | clear_bit(priority, &vcpu->arch.pending_exceptions); | 281 | clear_bit(priority, &vcpu->arch.pending_exceptions); |
280 | } | 282 | } |
281 | 283 | ||
282 | return allowed; | 284 | return allowed; |
283 | } | 285 | } |
284 | 286 | ||
287 | static void update_timer_ints(struct kvm_vcpu *vcpu) | ||
288 | { | ||
289 | if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS)) | ||
290 | kvmppc_core_queue_dec(vcpu); | ||
291 | else | ||
292 | kvmppc_core_dequeue_dec(vcpu); | ||
293 | } | ||
294 | |||
285 | static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu) | 295 | static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu) |
286 | { | 296 | { |
287 | unsigned long *pending = &vcpu->arch.pending_exceptions; | 297 | unsigned long *pending = &vcpu->arch.pending_exceptions; |
288 | unsigned int priority; | 298 | unsigned int priority; |
289 | 299 | ||
300 | if (vcpu->requests) { | ||
301 | if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) { | ||
302 | smp_mb(); | ||
303 | update_timer_ints(vcpu); | ||
304 | } | ||
305 | } | ||
306 | |||
290 | priority = __ffs(*pending); | 307 | priority = __ffs(*pending); |
291 | while (priority <= BOOKE_IRQPRIO_MAX) { | 308 | while (priority <= BOOKE_IRQPRIO_MAX) { |
292 | if (kvmppc_booke_irqprio_deliver(vcpu, priority)) | 309 | if (kvmppc_booke_irqprio_deliver(vcpu, priority)) |
293 | break; | 310 | break; |
294 | 311 | ||
295 | priority = find_next_bit(pending, | 312 | priority = find_next_bit(pending, |
296 | BITS_PER_BYTE * sizeof(*pending), | 313 | BITS_PER_BYTE * sizeof(*pending), |
297 | priority + 1); | 314 | priority + 1); |
298 | } | 315 | } |
299 | 316 | ||
300 | /* Tell the guest about our interrupt status */ | 317 | /* Tell the guest about our interrupt status */ |
301 | vcpu->arch.shared->int_pending = !!*pending; | 318 | vcpu->arch.shared->int_pending = !!*pending; |
302 | } | 319 | } |
303 | 320 | ||
304 | /* Check pending exceptions and deliver one, if possible. */ | 321 | /* Check pending exceptions and deliver one, if possible. */ |
305 | void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) | 322 | void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) |
306 | { | 323 | { |
307 | WARN_ON_ONCE(!irqs_disabled()); | 324 | WARN_ON_ONCE(!irqs_disabled()); |
308 | 325 | ||
309 | kvmppc_core_check_exceptions(vcpu); | 326 | kvmppc_core_check_exceptions(vcpu); |
310 | 327 | ||
311 | if (vcpu->arch.shared->msr & MSR_WE) { | 328 | if (vcpu->arch.shared->msr & MSR_WE) { |
312 | local_irq_enable(); | 329 | local_irq_enable(); |
313 | kvm_vcpu_block(vcpu); | 330 | kvm_vcpu_block(vcpu); |
314 | local_irq_disable(); | 331 | local_irq_disable(); |
315 | 332 | ||
316 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); | 333 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); |
317 | kvmppc_core_check_exceptions(vcpu); | 334 | kvmppc_core_check_exceptions(vcpu); |
318 | }; | 335 | }; |
319 | } | 336 | } |
320 | 337 | ||
321 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 338 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
322 | { | 339 | { |
323 | int ret; | 340 | int ret; |
324 | 341 | ||
325 | if (!vcpu->arch.sane) { | 342 | if (!vcpu->arch.sane) { |
326 | kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 343 | kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
327 | return -EINVAL; | 344 | return -EINVAL; |
328 | } | 345 | } |
329 | 346 | ||
330 | local_irq_disable(); | 347 | local_irq_disable(); |
331 | 348 | ||
332 | kvmppc_core_prepare_to_enter(vcpu); | 349 | kvmppc_core_prepare_to_enter(vcpu); |
333 | 350 | ||
334 | if (signal_pending(current)) { | 351 | if (signal_pending(current)) { |
335 | kvm_run->exit_reason = KVM_EXIT_INTR; | 352 | kvm_run->exit_reason = KVM_EXIT_INTR; |
336 | ret = -EINTR; | 353 | ret = -EINTR; |
337 | goto out; | 354 | goto out; |
338 | } | 355 | } |
339 | 356 | ||
340 | kvm_guest_enter(); | 357 | kvm_guest_enter(); |
341 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); | 358 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); |
342 | kvm_guest_exit(); | 359 | kvm_guest_exit(); |
343 | 360 | ||
344 | out: | 361 | out: |
345 | local_irq_enable(); | 362 | local_irq_enable(); |
346 | return ret; | 363 | return ret; |
347 | } | 364 | } |
348 | 365 | ||
349 | /** | 366 | /** |
350 | * kvmppc_handle_exit | 367 | * kvmppc_handle_exit |
351 | * | 368 | * |
352 | * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) | 369 | * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) |
353 | */ | 370 | */ |
354 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | 371 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, |
355 | unsigned int exit_nr) | 372 | unsigned int exit_nr) |
356 | { | 373 | { |
357 | enum emulation_result er; | 374 | enum emulation_result er; |
358 | int r = RESUME_HOST; | 375 | int r = RESUME_HOST; |
359 | 376 | ||
360 | /* update before a new last_exit_type is rewritten */ | 377 | /* update before a new last_exit_type is rewritten */ |
361 | kvmppc_update_timing_stats(vcpu); | 378 | kvmppc_update_timing_stats(vcpu); |
362 | 379 | ||
363 | local_irq_enable(); | 380 | local_irq_enable(); |
364 | 381 | ||
365 | run->exit_reason = KVM_EXIT_UNKNOWN; | 382 | run->exit_reason = KVM_EXIT_UNKNOWN; |
366 | run->ready_for_interrupt_injection = 1; | 383 | run->ready_for_interrupt_injection = 1; |
367 | 384 | ||
368 | switch (exit_nr) { | 385 | switch (exit_nr) { |
369 | case BOOKE_INTERRUPT_MACHINE_CHECK: | 386 | case BOOKE_INTERRUPT_MACHINE_CHECK: |
370 | printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR)); | 387 | printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR)); |
371 | kvmppc_dump_vcpu(vcpu); | 388 | kvmppc_dump_vcpu(vcpu); |
372 | r = RESUME_HOST; | 389 | r = RESUME_HOST; |
373 | break; | 390 | break; |
374 | 391 | ||
375 | case BOOKE_INTERRUPT_EXTERNAL: | 392 | case BOOKE_INTERRUPT_EXTERNAL: |
376 | kvmppc_account_exit(vcpu, EXT_INTR_EXITS); | 393 | kvmppc_account_exit(vcpu, EXT_INTR_EXITS); |
377 | if (need_resched()) | 394 | if (need_resched()) |
378 | cond_resched(); | 395 | cond_resched(); |
379 | r = RESUME_GUEST; | 396 | r = RESUME_GUEST; |
380 | break; | 397 | break; |
381 | 398 | ||
382 | case BOOKE_INTERRUPT_DECREMENTER: | 399 | case BOOKE_INTERRUPT_DECREMENTER: |
383 | /* Since we switched IVPR back to the host's value, the host | 400 | /* Since we switched IVPR back to the host's value, the host |
384 | * handled this interrupt the moment we enabled interrupts. | 401 | * handled this interrupt the moment we enabled interrupts. |
385 | * Now we just offer it a chance to reschedule the guest. */ | 402 | * Now we just offer it a chance to reschedule the guest. */ |
386 | kvmppc_account_exit(vcpu, DEC_EXITS); | 403 | kvmppc_account_exit(vcpu, DEC_EXITS); |
387 | if (need_resched()) | 404 | if (need_resched()) |
388 | cond_resched(); | 405 | cond_resched(); |
389 | r = RESUME_GUEST; | 406 | r = RESUME_GUEST; |
390 | break; | 407 | break; |
391 | 408 | ||
392 | case BOOKE_INTERRUPT_PROGRAM: | 409 | case BOOKE_INTERRUPT_PROGRAM: |
393 | if (vcpu->arch.shared->msr & MSR_PR) { | 410 | if (vcpu->arch.shared->msr & MSR_PR) { |
394 | /* Program traps generated by user-level software must be handled | 411 | /* Program traps generated by user-level software must be handled |
395 | * by the guest kernel. */ | 412 | * by the guest kernel. */ |
396 | kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); | 413 | kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); |
397 | r = RESUME_GUEST; | 414 | r = RESUME_GUEST; |
398 | kvmppc_account_exit(vcpu, USR_PR_INST); | 415 | kvmppc_account_exit(vcpu, USR_PR_INST); |
399 | break; | 416 | break; |
400 | } | 417 | } |
401 | 418 | ||
402 | er = kvmppc_emulate_instruction(run, vcpu); | 419 | er = kvmppc_emulate_instruction(run, vcpu); |
403 | switch (er) { | 420 | switch (er) { |
404 | case EMULATE_DONE: | 421 | case EMULATE_DONE: |
405 | /* don't overwrite subtypes, just account kvm_stats */ | 422 | /* don't overwrite subtypes, just account kvm_stats */ |
406 | kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS); | 423 | kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS); |
407 | /* Future optimization: only reload non-volatiles if | 424 | /* Future optimization: only reload non-volatiles if |
408 | * they were actually modified by emulation. */ | 425 | * they were actually modified by emulation. */ |
409 | r = RESUME_GUEST_NV; | 426 | r = RESUME_GUEST_NV; |
410 | break; | 427 | break; |
411 | case EMULATE_DO_DCR: | 428 | case EMULATE_DO_DCR: |
412 | run->exit_reason = KVM_EXIT_DCR; | 429 | run->exit_reason = KVM_EXIT_DCR; |
413 | r = RESUME_HOST; | 430 | r = RESUME_HOST; |
414 | break; | 431 | break; |
415 | case EMULATE_FAIL: | 432 | case EMULATE_FAIL: |
416 | /* XXX Deliver Program interrupt to guest. */ | 433 | /* XXX Deliver Program interrupt to guest. */ |
417 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | 434 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", |
418 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); | 435 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); |
419 | /* For debugging, encode the failing instruction and | 436 | /* For debugging, encode the failing instruction and |
420 | * report it to userspace. */ | 437 | * report it to userspace. */ |
421 | run->hw.hardware_exit_reason = ~0ULL << 32; | 438 | run->hw.hardware_exit_reason = ~0ULL << 32; |
422 | run->hw.hardware_exit_reason |= vcpu->arch.last_inst; | 439 | run->hw.hardware_exit_reason |= vcpu->arch.last_inst; |
423 | r = RESUME_HOST; | 440 | r = RESUME_HOST; |
424 | break; | 441 | break; |
425 | default: | 442 | default: |
426 | BUG(); | 443 | BUG(); |
427 | } | 444 | } |
428 | break; | 445 | break; |
429 | 446 | ||
430 | case BOOKE_INTERRUPT_FP_UNAVAIL: | 447 | case BOOKE_INTERRUPT_FP_UNAVAIL: |
431 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); | 448 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); |
432 | kvmppc_account_exit(vcpu, FP_UNAVAIL); | 449 | kvmppc_account_exit(vcpu, FP_UNAVAIL); |
433 | r = RESUME_GUEST; | 450 | r = RESUME_GUEST; |
434 | break; | 451 | break; |
435 | 452 | ||
436 | #ifdef CONFIG_SPE | 453 | #ifdef CONFIG_SPE |
437 | case BOOKE_INTERRUPT_SPE_UNAVAIL: { | 454 | case BOOKE_INTERRUPT_SPE_UNAVAIL: { |
438 | if (vcpu->arch.shared->msr & MSR_SPE) | 455 | if (vcpu->arch.shared->msr & MSR_SPE) |
439 | kvmppc_vcpu_enable_spe(vcpu); | 456 | kvmppc_vcpu_enable_spe(vcpu); |
440 | else | 457 | else |
441 | kvmppc_booke_queue_irqprio(vcpu, | 458 | kvmppc_booke_queue_irqprio(vcpu, |
442 | BOOKE_IRQPRIO_SPE_UNAVAIL); | 459 | BOOKE_IRQPRIO_SPE_UNAVAIL); |
443 | r = RESUME_GUEST; | 460 | r = RESUME_GUEST; |
444 | break; | 461 | break; |
445 | } | 462 | } |
446 | 463 | ||
447 | case BOOKE_INTERRUPT_SPE_FP_DATA: | 464 | case BOOKE_INTERRUPT_SPE_FP_DATA: |
448 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA); | 465 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA); |
449 | r = RESUME_GUEST; | 466 | r = RESUME_GUEST; |
450 | break; | 467 | break; |
451 | 468 | ||
452 | case BOOKE_INTERRUPT_SPE_FP_ROUND: | 469 | case BOOKE_INTERRUPT_SPE_FP_ROUND: |
453 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND); | 470 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND); |
454 | r = RESUME_GUEST; | 471 | r = RESUME_GUEST; |
455 | break; | 472 | break; |
456 | #else | 473 | #else |
457 | case BOOKE_INTERRUPT_SPE_UNAVAIL: | 474 | case BOOKE_INTERRUPT_SPE_UNAVAIL: |
458 | /* | 475 | /* |
459 | * Guest wants SPE, but host kernel doesn't support it. Send | 476 | * Guest wants SPE, but host kernel doesn't support it. Send |
460 | * an "unimplemented operation" program check to the guest. | 477 | * an "unimplemented operation" program check to the guest. |
461 | */ | 478 | */ |
462 | kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV); | 479 | kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV); |
463 | r = RESUME_GUEST; | 480 | r = RESUME_GUEST; |
464 | break; | 481 | break; |
465 | 482 | ||
466 | /* | 483 | /* |
467 | * These really should never happen without CONFIG_SPE, | 484 | * These really should never happen without CONFIG_SPE, |
468 | * as we should never enable the real MSR[SPE] in the guest. | 485 | * as we should never enable the real MSR[SPE] in the guest. |
469 | */ | 486 | */ |
470 | case BOOKE_INTERRUPT_SPE_FP_DATA: | 487 | case BOOKE_INTERRUPT_SPE_FP_DATA: |
471 | case BOOKE_INTERRUPT_SPE_FP_ROUND: | 488 | case BOOKE_INTERRUPT_SPE_FP_ROUND: |
472 | printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n", | 489 | printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n", |
473 | __func__, exit_nr, vcpu->arch.pc); | 490 | __func__, exit_nr, vcpu->arch.pc); |
474 | run->hw.hardware_exit_reason = exit_nr; | 491 | run->hw.hardware_exit_reason = exit_nr; |
475 | r = RESUME_HOST; | 492 | r = RESUME_HOST; |
476 | break; | 493 | break; |
477 | #endif | 494 | #endif |
478 | 495 | ||
479 | case BOOKE_INTERRUPT_DATA_STORAGE: | 496 | case BOOKE_INTERRUPT_DATA_STORAGE: |
480 | kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear, | 497 | kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear, |
481 | vcpu->arch.fault_esr); | 498 | vcpu->arch.fault_esr); |
482 | kvmppc_account_exit(vcpu, DSI_EXITS); | 499 | kvmppc_account_exit(vcpu, DSI_EXITS); |
483 | r = RESUME_GUEST; | 500 | r = RESUME_GUEST; |
484 | break; | 501 | break; |
485 | 502 | ||
486 | case BOOKE_INTERRUPT_INST_STORAGE: | 503 | case BOOKE_INTERRUPT_INST_STORAGE: |
487 | kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr); | 504 | kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr); |
488 | kvmppc_account_exit(vcpu, ISI_EXITS); | 505 | kvmppc_account_exit(vcpu, ISI_EXITS); |
489 | r = RESUME_GUEST; | 506 | r = RESUME_GUEST; |
490 | break; | 507 | break; |
491 | 508 | ||
492 | case BOOKE_INTERRUPT_SYSCALL: | 509 | case BOOKE_INTERRUPT_SYSCALL: |
493 | if (!(vcpu->arch.shared->msr & MSR_PR) && | 510 | if (!(vcpu->arch.shared->msr & MSR_PR) && |
494 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { | 511 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { |
495 | /* KVM PV hypercalls */ | 512 | /* KVM PV hypercalls */ |
496 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | 513 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); |
497 | r = RESUME_GUEST; | 514 | r = RESUME_GUEST; |
498 | } else { | 515 | } else { |
499 | /* Guest syscalls */ | 516 | /* Guest syscalls */ |
500 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL); | 517 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL); |
501 | } | 518 | } |
502 | kvmppc_account_exit(vcpu, SYSCALL_EXITS); | 519 | kvmppc_account_exit(vcpu, SYSCALL_EXITS); |
503 | r = RESUME_GUEST; | 520 | r = RESUME_GUEST; |
504 | break; | 521 | break; |
505 | 522 | ||
506 | case BOOKE_INTERRUPT_DTLB_MISS: { | 523 | case BOOKE_INTERRUPT_DTLB_MISS: { |
507 | unsigned long eaddr = vcpu->arch.fault_dear; | 524 | unsigned long eaddr = vcpu->arch.fault_dear; |
508 | int gtlb_index; | 525 | int gtlb_index; |
509 | gpa_t gpaddr; | 526 | gpa_t gpaddr; |
510 | gfn_t gfn; | 527 | gfn_t gfn; |
511 | 528 | ||
512 | #ifdef CONFIG_KVM_E500 | 529 | #ifdef CONFIG_KVM_E500 |
513 | if (!(vcpu->arch.shared->msr & MSR_PR) && | 530 | if (!(vcpu->arch.shared->msr & MSR_PR) && |
514 | (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { | 531 | (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { |
515 | kvmppc_map_magic(vcpu); | 532 | kvmppc_map_magic(vcpu); |
516 | kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS); | 533 | kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS); |
517 | r = RESUME_GUEST; | 534 | r = RESUME_GUEST; |
518 | 535 | ||
519 | break; | 536 | break; |
520 | } | 537 | } |
521 | #endif | 538 | #endif |
522 | 539 | ||
523 | /* Check the guest TLB. */ | 540 | /* Check the guest TLB. */ |
524 | gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); | 541 | gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); |
525 | if (gtlb_index < 0) { | 542 | if (gtlb_index < 0) { |
526 | /* The guest didn't have a mapping for it. */ | 543 | /* The guest didn't have a mapping for it. */ |
527 | kvmppc_core_queue_dtlb_miss(vcpu, | 544 | kvmppc_core_queue_dtlb_miss(vcpu, |
528 | vcpu->arch.fault_dear, | 545 | vcpu->arch.fault_dear, |
529 | vcpu->arch.fault_esr); | 546 | vcpu->arch.fault_esr); |
530 | kvmppc_mmu_dtlb_miss(vcpu); | 547 | kvmppc_mmu_dtlb_miss(vcpu); |
531 | kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS); | 548 | kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS); |
532 | r = RESUME_GUEST; | 549 | r = RESUME_GUEST; |
533 | break; | 550 | break; |
534 | } | 551 | } |
535 | 552 | ||
536 | gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); | 553 | gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); |
537 | gfn = gpaddr >> PAGE_SHIFT; | 554 | gfn = gpaddr >> PAGE_SHIFT; |
538 | 555 | ||
539 | if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { | 556 | if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { |
540 | /* The guest TLB had a mapping, but the shadow TLB | 557 | /* The guest TLB had a mapping, but the shadow TLB |
541 | * didn't, and it is RAM. This could be because: | 558 | * didn't, and it is RAM. This could be because: |
542 | * a) the entry is mapping the host kernel, or | 559 | * a) the entry is mapping the host kernel, or |
543 | * b) the guest used a large mapping which we're faking | 560 | * b) the guest used a large mapping which we're faking |
544 | * Either way, we need to satisfy the fault without | 561 | * Either way, we need to satisfy the fault without |
545 | * invoking the guest. */ | 562 | * invoking the guest. */ |
546 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); | 563 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); |
547 | kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS); | 564 | kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS); |
548 | r = RESUME_GUEST; | 565 | r = RESUME_GUEST; |
549 | } else { | 566 | } else { |
550 | /* Guest has mapped and accessed a page which is not | 567 | /* Guest has mapped and accessed a page which is not |
551 | * actually RAM. */ | 568 | * actually RAM. */ |
552 | vcpu->arch.paddr_accessed = gpaddr; | 569 | vcpu->arch.paddr_accessed = gpaddr; |
553 | r = kvmppc_emulate_mmio(run, vcpu); | 570 | r = kvmppc_emulate_mmio(run, vcpu); |
554 | kvmppc_account_exit(vcpu, MMIO_EXITS); | 571 | kvmppc_account_exit(vcpu, MMIO_EXITS); |
555 | } | 572 | } |
556 | 573 | ||
557 | break; | 574 | break; |
558 | } | 575 | } |
559 | 576 | ||
560 | case BOOKE_INTERRUPT_ITLB_MISS: { | 577 | case BOOKE_INTERRUPT_ITLB_MISS: { |
561 | unsigned long eaddr = vcpu->arch.pc; | 578 | unsigned long eaddr = vcpu->arch.pc; |
562 | gpa_t gpaddr; | 579 | gpa_t gpaddr; |
563 | gfn_t gfn; | 580 | gfn_t gfn; |
564 | int gtlb_index; | 581 | int gtlb_index; |
565 | 582 | ||
566 | r = RESUME_GUEST; | 583 | r = RESUME_GUEST; |
567 | 584 | ||
568 | /* Check the guest TLB. */ | 585 | /* Check the guest TLB. */ |
569 | gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr); | 586 | gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr); |
570 | if (gtlb_index < 0) { | 587 | if (gtlb_index < 0) { |
571 | /* The guest didn't have a mapping for it. */ | 588 | /* The guest didn't have a mapping for it. */ |
572 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS); | 589 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS); |
573 | kvmppc_mmu_itlb_miss(vcpu); | 590 | kvmppc_mmu_itlb_miss(vcpu); |
574 | kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS); | 591 | kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS); |
575 | break; | 592 | break; |
576 | } | 593 | } |
577 | 594 | ||
578 | kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS); | 595 | kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS); |
579 | 596 | ||
580 | gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); | 597 | gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); |
581 | gfn = gpaddr >> PAGE_SHIFT; | 598 | gfn = gpaddr >> PAGE_SHIFT; |
582 | 599 | ||
583 | if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { | 600 | if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { |
584 | /* The guest TLB had a mapping, but the shadow TLB | 601 | /* The guest TLB had a mapping, but the shadow TLB |
585 | * didn't. This could be because: | 602 | * didn't. This could be because: |
586 | * a) the entry is mapping the host kernel, or | 603 | * a) the entry is mapping the host kernel, or |
587 | * b) the guest used a large mapping which we're faking | 604 | * b) the guest used a large mapping which we're faking |
588 | * Either way, we need to satisfy the fault without | 605 | * Either way, we need to satisfy the fault without |
589 | * invoking the guest. */ | 606 | * invoking the guest. */ |
590 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); | 607 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); |
591 | } else { | 608 | } else { |
592 | /* Guest mapped and leaped at non-RAM! */ | 609 | /* Guest mapped and leaped at non-RAM! */ |
593 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); | 610 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); |
594 | } | 611 | } |
595 | 612 | ||
596 | break; | 613 | break; |
597 | } | 614 | } |
598 | 615 | ||
599 | case BOOKE_INTERRUPT_DEBUG: { | 616 | case BOOKE_INTERRUPT_DEBUG: { |
600 | u32 dbsr; | 617 | u32 dbsr; |
601 | 618 | ||
602 | vcpu->arch.pc = mfspr(SPRN_CSRR0); | 619 | vcpu->arch.pc = mfspr(SPRN_CSRR0); |
603 | 620 | ||
604 | /* clear IAC events in DBSR register */ | 621 | /* clear IAC events in DBSR register */ |
605 | dbsr = mfspr(SPRN_DBSR); | 622 | dbsr = mfspr(SPRN_DBSR); |
606 | dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4; | 623 | dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4; |
607 | mtspr(SPRN_DBSR, dbsr); | 624 | mtspr(SPRN_DBSR, dbsr); |
608 | 625 | ||
609 | run->exit_reason = KVM_EXIT_DEBUG; | 626 | run->exit_reason = KVM_EXIT_DEBUG; |
610 | kvmppc_account_exit(vcpu, DEBUG_EXITS); | 627 | kvmppc_account_exit(vcpu, DEBUG_EXITS); |
611 | r = RESUME_HOST; | 628 | r = RESUME_HOST; |
612 | break; | 629 | break; |
613 | } | 630 | } |
614 | 631 | ||
615 | default: | 632 | default: |
616 | printk(KERN_EMERG "exit_nr %d\n", exit_nr); | 633 | printk(KERN_EMERG "exit_nr %d\n", exit_nr); |
617 | BUG(); | 634 | BUG(); |
618 | } | 635 | } |
619 | 636 | ||
620 | local_irq_disable(); | 637 | local_irq_disable(); |
621 | 638 | ||
622 | kvmppc_core_prepare_to_enter(vcpu); | 639 | kvmppc_core_prepare_to_enter(vcpu); |
623 | 640 | ||
624 | if (!(r & RESUME_HOST)) { | 641 | if (!(r & RESUME_HOST)) { |
625 | /* To avoid clobbering exit_reason, only check for signals if | 642 | /* To avoid clobbering exit_reason, only check for signals if |
626 | * we aren't already exiting to userspace for some other | 643 | * we aren't already exiting to userspace for some other |
627 | * reason. */ | 644 | * reason. */ |
628 | if (signal_pending(current)) { | 645 | if (signal_pending(current)) { |
629 | run->exit_reason = KVM_EXIT_INTR; | 646 | run->exit_reason = KVM_EXIT_INTR; |
630 | r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); | 647 | r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); |
631 | kvmppc_account_exit(vcpu, SIGNAL_EXITS); | 648 | kvmppc_account_exit(vcpu, SIGNAL_EXITS); |
632 | } | 649 | } |
633 | } | 650 | } |
634 | 651 | ||
635 | return r; | 652 | return r; |
636 | } | 653 | } |
637 | 654 | ||
638 | /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ | 655 | /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ |
639 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 656 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
640 | { | 657 | { |
641 | int i; | 658 | int i; |
642 | int r; | 659 | int r; |
643 | 660 | ||
644 | vcpu->arch.pc = 0; | 661 | vcpu->arch.pc = 0; |
645 | vcpu->arch.shared->msr = 0; | 662 | vcpu->arch.shared->msr = 0; |
646 | vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS; | 663 | vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS; |
647 | vcpu->arch.shared->pir = vcpu->vcpu_id; | 664 | vcpu->arch.shared->pir = vcpu->vcpu_id; |
648 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ | 665 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ |
649 | 666 | ||
650 | vcpu->arch.shadow_pid = 1; | 667 | vcpu->arch.shadow_pid = 1; |
651 | 668 | ||
652 | /* Eye-catching numbers so we know if the guest takes an interrupt | 669 | /* Eye-catching numbers so we know if the guest takes an interrupt |
653 | * before it's programmed its own IVPR/IVORs. */ | 670 | * before it's programmed its own IVPR/IVORs. */ |
654 | vcpu->arch.ivpr = 0x55550000; | 671 | vcpu->arch.ivpr = 0x55550000; |
655 | for (i = 0; i < BOOKE_IRQPRIO_MAX; i++) | 672 | for (i = 0; i < BOOKE_IRQPRIO_MAX; i++) |
656 | vcpu->arch.ivor[i] = 0x7700 | i * 4; | 673 | vcpu->arch.ivor[i] = 0x7700 | i * 4; |
657 | 674 | ||
658 | kvmppc_init_timing_stats(vcpu); | 675 | kvmppc_init_timing_stats(vcpu); |
659 | 676 | ||
660 | r = kvmppc_core_vcpu_setup(vcpu); | 677 | r = kvmppc_core_vcpu_setup(vcpu); |
661 | kvmppc_sanity_check(vcpu); | 678 | kvmppc_sanity_check(vcpu); |
662 | return r; | 679 | return r; |
663 | } | 680 | } |
664 | 681 | ||
665 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | 682 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
666 | { | 683 | { |
667 | int i; | 684 | int i; |
668 | 685 | ||
669 | regs->pc = vcpu->arch.pc; | 686 | regs->pc = vcpu->arch.pc; |
670 | regs->cr = kvmppc_get_cr(vcpu); | 687 | regs->cr = kvmppc_get_cr(vcpu); |
671 | regs->ctr = vcpu->arch.ctr; | 688 | regs->ctr = vcpu->arch.ctr; |
672 | regs->lr = vcpu->arch.lr; | 689 | regs->lr = vcpu->arch.lr; |
673 | regs->xer = kvmppc_get_xer(vcpu); | 690 | regs->xer = kvmppc_get_xer(vcpu); |
674 | regs->msr = vcpu->arch.shared->msr; | 691 | regs->msr = vcpu->arch.shared->msr; |
675 | regs->srr0 = vcpu->arch.shared->srr0; | 692 | regs->srr0 = vcpu->arch.shared->srr0; |
676 | regs->srr1 = vcpu->arch.shared->srr1; | 693 | regs->srr1 = vcpu->arch.shared->srr1; |
677 | regs->pid = vcpu->arch.pid; | 694 | regs->pid = vcpu->arch.pid; |
678 | regs->sprg0 = vcpu->arch.shared->sprg0; | 695 | regs->sprg0 = vcpu->arch.shared->sprg0; |
679 | regs->sprg1 = vcpu->arch.shared->sprg1; | 696 | regs->sprg1 = vcpu->arch.shared->sprg1; |
680 | regs->sprg2 = vcpu->arch.shared->sprg2; | 697 | regs->sprg2 = vcpu->arch.shared->sprg2; |
681 | regs->sprg3 = vcpu->arch.shared->sprg3; | 698 | regs->sprg3 = vcpu->arch.shared->sprg3; |
682 | regs->sprg4 = vcpu->arch.shared->sprg4; | 699 | regs->sprg4 = vcpu->arch.shared->sprg4; |
683 | regs->sprg5 = vcpu->arch.shared->sprg5; | 700 | regs->sprg5 = vcpu->arch.shared->sprg5; |
684 | regs->sprg6 = vcpu->arch.shared->sprg6; | 701 | regs->sprg6 = vcpu->arch.shared->sprg6; |
685 | regs->sprg7 = vcpu->arch.shared->sprg7; | 702 | regs->sprg7 = vcpu->arch.shared->sprg7; |
686 | 703 | ||
687 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 704 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
688 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); | 705 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); |
689 | 706 | ||
690 | return 0; | 707 | return 0; |
691 | } | 708 | } |
692 | 709 | ||
693 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | 710 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
694 | { | 711 | { |
695 | int i; | 712 | int i; |
696 | 713 | ||
697 | vcpu->arch.pc = regs->pc; | 714 | vcpu->arch.pc = regs->pc; |
698 | kvmppc_set_cr(vcpu, regs->cr); | 715 | kvmppc_set_cr(vcpu, regs->cr); |
699 | vcpu->arch.ctr = regs->ctr; | 716 | vcpu->arch.ctr = regs->ctr; |
700 | vcpu->arch.lr = regs->lr; | 717 | vcpu->arch.lr = regs->lr; |
701 | kvmppc_set_xer(vcpu, regs->xer); | 718 | kvmppc_set_xer(vcpu, regs->xer); |
702 | kvmppc_set_msr(vcpu, regs->msr); | 719 | kvmppc_set_msr(vcpu, regs->msr); |
703 | vcpu->arch.shared->srr0 = regs->srr0; | 720 | vcpu->arch.shared->srr0 = regs->srr0; |
704 | vcpu->arch.shared->srr1 = regs->srr1; | 721 | vcpu->arch.shared->srr1 = regs->srr1; |
705 | kvmppc_set_pid(vcpu, regs->pid); | 722 | kvmppc_set_pid(vcpu, regs->pid); |
706 | vcpu->arch.shared->sprg0 = regs->sprg0; | 723 | vcpu->arch.shared->sprg0 = regs->sprg0; |
707 | vcpu->arch.shared->sprg1 = regs->sprg1; | 724 | vcpu->arch.shared->sprg1 = regs->sprg1; |
708 | vcpu->arch.shared->sprg2 = regs->sprg2; | 725 | vcpu->arch.shared->sprg2 = regs->sprg2; |
709 | vcpu->arch.shared->sprg3 = regs->sprg3; | 726 | vcpu->arch.shared->sprg3 = regs->sprg3; |
710 | vcpu->arch.shared->sprg4 = regs->sprg4; | 727 | vcpu->arch.shared->sprg4 = regs->sprg4; |
711 | vcpu->arch.shared->sprg5 = regs->sprg5; | 728 | vcpu->arch.shared->sprg5 = regs->sprg5; |
712 | vcpu->arch.shared->sprg6 = regs->sprg6; | 729 | vcpu->arch.shared->sprg6 = regs->sprg6; |
713 | vcpu->arch.shared->sprg7 = regs->sprg7; | 730 | vcpu->arch.shared->sprg7 = regs->sprg7; |
714 | 731 | ||
715 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 732 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
716 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); | 733 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); |
717 | 734 | ||
718 | return 0; | 735 | return 0; |
719 | } | 736 | } |
720 | 737 | ||
721 | static void get_sregs_base(struct kvm_vcpu *vcpu, | 738 | static void get_sregs_base(struct kvm_vcpu *vcpu, |
722 | struct kvm_sregs *sregs) | 739 | struct kvm_sregs *sregs) |
723 | { | 740 | { |
724 | u64 tb = get_tb(); | 741 | u64 tb = get_tb(); |
725 | 742 | ||
726 | sregs->u.e.features |= KVM_SREGS_E_BASE; | 743 | sregs->u.e.features |= KVM_SREGS_E_BASE; |
727 | 744 | ||
728 | sregs->u.e.csrr0 = vcpu->arch.csrr0; | 745 | sregs->u.e.csrr0 = vcpu->arch.csrr0; |
729 | sregs->u.e.csrr1 = vcpu->arch.csrr1; | 746 | sregs->u.e.csrr1 = vcpu->arch.csrr1; |
730 | sregs->u.e.mcsr = vcpu->arch.mcsr; | 747 | sregs->u.e.mcsr = vcpu->arch.mcsr; |
731 | sregs->u.e.esr = vcpu->arch.shared->esr; | 748 | sregs->u.e.esr = vcpu->arch.shared->esr; |
732 | sregs->u.e.dear = vcpu->arch.shared->dar; | 749 | sregs->u.e.dear = vcpu->arch.shared->dar; |
733 | sregs->u.e.tsr = vcpu->arch.tsr; | 750 | sregs->u.e.tsr = vcpu->arch.tsr; |
734 | sregs->u.e.tcr = vcpu->arch.tcr; | 751 | sregs->u.e.tcr = vcpu->arch.tcr; |
735 | sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); | 752 | sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); |
736 | sregs->u.e.tb = tb; | 753 | sregs->u.e.tb = tb; |
737 | sregs->u.e.vrsave = vcpu->arch.vrsave; | 754 | sregs->u.e.vrsave = vcpu->arch.vrsave; |
738 | } | 755 | } |
739 | 756 | ||
740 | static int set_sregs_base(struct kvm_vcpu *vcpu, | 757 | static int set_sregs_base(struct kvm_vcpu *vcpu, |
741 | struct kvm_sregs *sregs) | 758 | struct kvm_sregs *sregs) |
742 | { | 759 | { |
743 | if (!(sregs->u.e.features & KVM_SREGS_E_BASE)) | 760 | if (!(sregs->u.e.features & KVM_SREGS_E_BASE)) |
744 | return 0; | 761 | return 0; |
745 | 762 | ||
746 | vcpu->arch.csrr0 = sregs->u.e.csrr0; | 763 | vcpu->arch.csrr0 = sregs->u.e.csrr0; |
747 | vcpu->arch.csrr1 = sregs->u.e.csrr1; | 764 | vcpu->arch.csrr1 = sregs->u.e.csrr1; |
748 | vcpu->arch.mcsr = sregs->u.e.mcsr; | 765 | vcpu->arch.mcsr = sregs->u.e.mcsr; |
749 | vcpu->arch.shared->esr = sregs->u.e.esr; | 766 | vcpu->arch.shared->esr = sregs->u.e.esr; |
750 | vcpu->arch.shared->dar = sregs->u.e.dear; | 767 | vcpu->arch.shared->dar = sregs->u.e.dear; |
751 | vcpu->arch.vrsave = sregs->u.e.vrsave; | 768 | vcpu->arch.vrsave = sregs->u.e.vrsave; |
752 | vcpu->arch.tcr = sregs->u.e.tcr; | 769 | kvmppc_set_tcr(vcpu, sregs->u.e.tcr); |
753 | 770 | ||
754 | if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) | 771 | if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) { |
755 | vcpu->arch.dec = sregs->u.e.dec; | 772 | vcpu->arch.dec = sregs->u.e.dec; |
773 | kvmppc_emulate_dec(vcpu); | ||
774 | } | ||
756 | 775 | ||
757 | kvmppc_emulate_dec(vcpu); | ||
758 | |||
759 | if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) { | 776 | if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) { |
760 | /* | 777 | vcpu->arch.tsr = sregs->u.e.tsr; |
761 | * FIXME: existing KVM timer handling is incomplete. | 778 | update_timer_ints(vcpu); |
762 | * TSR cannot be read by the guest, and its value in | ||
763 | * vcpu->arch is always zero. For now, just handle | ||
764 | * the case where the caller is trying to inject a | ||
765 | * decrementer interrupt. | ||
766 | */ | ||
767 | |||
768 | if ((sregs->u.e.tsr & TSR_DIS) && | ||
769 | (vcpu->arch.tcr & TCR_DIE)) | ||
770 | kvmppc_core_queue_dec(vcpu); | ||
771 | } | 779 | } |
772 | 780 | ||
773 | return 0; | 781 | return 0; |
774 | } | 782 | } |
775 | 783 | ||
776 | static void get_sregs_arch206(struct kvm_vcpu *vcpu, | 784 | static void get_sregs_arch206(struct kvm_vcpu *vcpu, |
777 | struct kvm_sregs *sregs) | 785 | struct kvm_sregs *sregs) |
778 | { | 786 | { |
779 | sregs->u.e.features |= KVM_SREGS_E_ARCH206; | 787 | sregs->u.e.features |= KVM_SREGS_E_ARCH206; |
780 | 788 | ||
781 | sregs->u.e.pir = vcpu->vcpu_id; | 789 | sregs->u.e.pir = vcpu->vcpu_id; |
782 | sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0; | 790 | sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0; |
783 | sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1; | 791 | sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1; |
784 | sregs->u.e.decar = vcpu->arch.decar; | 792 | sregs->u.e.decar = vcpu->arch.decar; |
785 | sregs->u.e.ivpr = vcpu->arch.ivpr; | 793 | sregs->u.e.ivpr = vcpu->arch.ivpr; |
786 | } | 794 | } |
787 | 795 | ||
788 | static int set_sregs_arch206(struct kvm_vcpu *vcpu, | 796 | static int set_sregs_arch206(struct kvm_vcpu *vcpu, |
789 | struct kvm_sregs *sregs) | 797 | struct kvm_sregs *sregs) |
790 | { | 798 | { |
791 | if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206)) | 799 | if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206)) |
792 | return 0; | 800 | return 0; |
793 | 801 | ||
794 | if (sregs->u.e.pir != vcpu->vcpu_id) | 802 | if (sregs->u.e.pir != vcpu->vcpu_id) |
795 | return -EINVAL; | 803 | return -EINVAL; |
796 | 804 | ||
797 | vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0; | 805 | vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0; |
798 | vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1; | 806 | vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1; |
799 | vcpu->arch.decar = sregs->u.e.decar; | 807 | vcpu->arch.decar = sregs->u.e.decar; |
800 | vcpu->arch.ivpr = sregs->u.e.ivpr; | 808 | vcpu->arch.ivpr = sregs->u.e.ivpr; |
801 | 809 | ||
802 | return 0; | 810 | return 0; |
803 | } | 811 | } |
804 | 812 | ||
805 | void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | 813 | void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) |
806 | { | 814 | { |
807 | sregs->u.e.features |= KVM_SREGS_E_IVOR; | 815 | sregs->u.e.features |= KVM_SREGS_E_IVOR; |
808 | 816 | ||
809 | sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; | 817 | sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; |
810 | sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; | 818 | sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; |
811 | sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; | 819 | sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; |
812 | sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; | 820 | sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; |
813 | sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; | 821 | sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; |
814 | sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; | 822 | sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; |
815 | sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; | 823 | sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; |
816 | sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; | 824 | sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; |
817 | sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; | 825 | sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; |
818 | sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; | 826 | sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; |
819 | sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; | 827 | sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; |
820 | sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; | 828 | sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; |
821 | sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; | 829 | sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; |
822 | sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; | 830 | sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; |
823 | sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; | 831 | sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; |
824 | sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; | 832 | sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; |
825 | } | 833 | } |
826 | 834 | ||
827 | int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | 835 | int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) |
828 | { | 836 | { |
829 | if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) | 837 | if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) |
830 | return 0; | 838 | return 0; |
831 | 839 | ||
832 | vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0]; | 840 | vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0]; |
833 | vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1]; | 841 | vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1]; |
834 | vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2]; | 842 | vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2]; |
835 | vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3]; | 843 | vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3]; |
836 | vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4]; | 844 | vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4]; |
837 | vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5]; | 845 | vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5]; |
838 | vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6]; | 846 | vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6]; |
839 | vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7]; | 847 | vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7]; |
840 | vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8]; | 848 | vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8]; |
841 | vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9]; | 849 | vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9]; |
842 | vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10]; | 850 | vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10]; |
843 | vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11]; | 851 | vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11]; |
844 | vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12]; | 852 | vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12]; |
845 | vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13]; | 853 | vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13]; |
846 | vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14]; | 854 | vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14]; |
847 | vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15]; | 855 | vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15]; |
848 | 856 | ||
849 | return 0; | 857 | return 0; |
850 | } | 858 | } |
851 | 859 | ||
852 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | 860 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
853 | struct kvm_sregs *sregs) | 861 | struct kvm_sregs *sregs) |
854 | { | 862 | { |
855 | sregs->pvr = vcpu->arch.pvr; | 863 | sregs->pvr = vcpu->arch.pvr; |
856 | 864 | ||
857 | get_sregs_base(vcpu, sregs); | 865 | get_sregs_base(vcpu, sregs); |
858 | get_sregs_arch206(vcpu, sregs); | 866 | get_sregs_arch206(vcpu, sregs); |
859 | kvmppc_core_get_sregs(vcpu, sregs); | 867 | kvmppc_core_get_sregs(vcpu, sregs); |
860 | return 0; | 868 | return 0; |
861 | } | 869 | } |
862 | 870 | ||
863 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | 871 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
864 | struct kvm_sregs *sregs) | 872 | struct kvm_sregs *sregs) |
865 | { | 873 | { |
866 | int ret; | 874 | int ret; |
867 | 875 | ||
868 | if (vcpu->arch.pvr != sregs->pvr) | 876 | if (vcpu->arch.pvr != sregs->pvr) |
869 | return -EINVAL; | 877 | return -EINVAL; |
870 | 878 | ||
871 | ret = set_sregs_base(vcpu, sregs); | 879 | ret = set_sregs_base(vcpu, sregs); |
872 | if (ret < 0) | 880 | if (ret < 0) |
873 | return ret; | 881 | return ret; |
874 | 882 | ||
875 | ret = set_sregs_arch206(vcpu, sregs); | 883 | ret = set_sregs_arch206(vcpu, sregs); |
876 | if (ret < 0) | 884 | if (ret < 0) |
877 | return ret; | 885 | return ret; |
878 | 886 | ||
879 | return kvmppc_core_set_sregs(vcpu, sregs); | 887 | return kvmppc_core_set_sregs(vcpu, sregs); |
880 | } | 888 | } |
881 | 889 | ||
882 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | 890 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
883 | { | 891 | { |
884 | return -ENOTSUPP; | 892 | return -ENOTSUPP; |
885 | } | 893 | } |
886 | 894 | ||
887 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | 895 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
888 | { | 896 | { |
889 | return -ENOTSUPP; | 897 | return -ENOTSUPP; |
890 | } | 898 | } |
891 | 899 | ||
892 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | 900 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
893 | struct kvm_translation *tr) | 901 | struct kvm_translation *tr) |
894 | { | 902 | { |
895 | int r; | 903 | int r; |
896 | 904 | ||
897 | r = kvmppc_core_vcpu_translate(vcpu, tr); | 905 | r = kvmppc_core_vcpu_translate(vcpu, tr); |
898 | return r; | 906 | return r; |
899 | } | 907 | } |
900 | 908 | ||
901 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) | 909 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) |
902 | { | 910 | { |
903 | return -ENOTSUPP; | 911 | return -ENOTSUPP; |
904 | } | 912 | } |
905 | 913 | ||
906 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, | 914 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
907 | struct kvm_userspace_memory_region *mem) | 915 | struct kvm_userspace_memory_region *mem) |
908 | { | 916 | { |
909 | return 0; | 917 | return 0; |
910 | } | 918 | } |
911 | 919 | ||
912 | void kvmppc_core_commit_memory_region(struct kvm *kvm, | 920 | void kvmppc_core_commit_memory_region(struct kvm *kvm, |
913 | struct kvm_userspace_memory_region *mem) | 921 | struct kvm_userspace_memory_region *mem) |
914 | { | 922 | { |
915 | } | 923 | } |
916 | 924 | ||
917 | int kvmppc_core_init_vm(struct kvm *kvm) | 925 | int kvmppc_core_init_vm(struct kvm *kvm) |
918 | { | 926 | { |
919 | return 0; | 927 | return 0; |
920 | } | 928 | } |
921 | 929 | ||
922 | void kvmppc_core_destroy_vm(struct kvm *kvm) | 930 | void kvmppc_core_destroy_vm(struct kvm *kvm) |
923 | { | 931 | { |
932 | } | ||
933 | |||
934 | void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr) | ||
935 | { | ||
936 | vcpu->arch.tcr = new_tcr; | ||
937 | update_timer_ints(vcpu); | ||
938 | } | ||
939 | |||
940 | void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits) | ||
941 | { | ||
942 | set_bits(tsr_bits, &vcpu->arch.tsr); | ||
943 | smp_wmb(); | ||
944 | kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); | ||
945 | kvm_vcpu_kick(vcpu); | ||
946 | } | ||
947 | |||
948 | void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits) | ||
949 | { | ||
950 | clear_bits(tsr_bits, &vcpu->arch.tsr); | ||
951 | update_timer_ints(vcpu); | ||
952 | } | ||
953 | |||
954 | void kvmppc_decrementer_func(unsigned long data) | ||
955 | { | ||
956 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; | ||
957 | |||
958 | kvmppc_set_tsr_bits(vcpu, TSR_DIS); | ||
924 | } | 959 | } |
925 | 960 | ||
926 | int __init kvmppc_booke_init(void) | 961 | int __init kvmppc_booke_init(void) |
927 | { | 962 | { |
928 | unsigned long ivor[16]; | 963 | unsigned long ivor[16]; |
929 | unsigned long max_ivor = 0; | 964 | unsigned long max_ivor = 0; |
930 | int i; | 965 | int i; |
931 | 966 | ||
932 | /* We install our own exception handlers by hijacking IVPR. IVPR must | 967 | /* We install our own exception handlers by hijacking IVPR. IVPR must |
933 | * be 16-bit aligned, so we need a 64KB allocation. */ | 968 | * be 16-bit aligned, so we need a 64KB allocation. */ |
934 | kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO, | 969 | kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO, |
935 | VCPU_SIZE_ORDER); | 970 | VCPU_SIZE_ORDER); |
936 | if (!kvmppc_booke_handlers) | 971 | if (!kvmppc_booke_handlers) |
937 | return -ENOMEM; | 972 | return -ENOMEM; |
938 | 973 | ||
939 | /* XXX make sure our handlers are smaller than Linux's */ | 974 | /* XXX make sure our handlers are smaller than Linux's */ |
940 | 975 | ||
941 | /* Copy our interrupt handlers to match host IVORs. That way we don't | 976 | /* Copy our interrupt handlers to match host IVORs. That way we don't |
942 | * have to swap the IVORs on every guest/host transition. */ | 977 | * have to swap the IVORs on every guest/host transition. */ |
943 | ivor[0] = mfspr(SPRN_IVOR0); | 978 | ivor[0] = mfspr(SPRN_IVOR0); |
944 | ivor[1] = mfspr(SPRN_IVOR1); | 979 | ivor[1] = mfspr(SPRN_IVOR1); |
945 | ivor[2] = mfspr(SPRN_IVOR2); | 980 | ivor[2] = mfspr(SPRN_IVOR2); |
946 | ivor[3] = mfspr(SPRN_IVOR3); | 981 | ivor[3] = mfspr(SPRN_IVOR3); |
947 | ivor[4] = mfspr(SPRN_IVOR4); | 982 | ivor[4] = mfspr(SPRN_IVOR4); |
948 | ivor[5] = mfspr(SPRN_IVOR5); | 983 | ivor[5] = mfspr(SPRN_IVOR5); |
949 | ivor[6] = mfspr(SPRN_IVOR6); | 984 | ivor[6] = mfspr(SPRN_IVOR6); |
950 | ivor[7] = mfspr(SPRN_IVOR7); | 985 | ivor[7] = mfspr(SPRN_IVOR7); |
951 | ivor[8] = mfspr(SPRN_IVOR8); | 986 | ivor[8] = mfspr(SPRN_IVOR8); |
952 | ivor[9] = mfspr(SPRN_IVOR9); | 987 | ivor[9] = mfspr(SPRN_IVOR9); |
953 | ivor[10] = mfspr(SPRN_IVOR10); | 988 | ivor[10] = mfspr(SPRN_IVOR10); |
954 | ivor[11] = mfspr(SPRN_IVOR11); | 989 | ivor[11] = mfspr(SPRN_IVOR11); |
955 | ivor[12] = mfspr(SPRN_IVOR12); | 990 | ivor[12] = mfspr(SPRN_IVOR12); |
956 | ivor[13] = mfspr(SPRN_IVOR13); | 991 | ivor[13] = mfspr(SPRN_IVOR13); |
957 | ivor[14] = mfspr(SPRN_IVOR14); | 992 | ivor[14] = mfspr(SPRN_IVOR14); |
958 | ivor[15] = mfspr(SPRN_IVOR15); | 993 | ivor[15] = mfspr(SPRN_IVOR15); |
959 | 994 | ||
960 | for (i = 0; i < 16; i++) { | 995 | for (i = 0; i < 16; i++) { |
961 | if (ivor[i] > max_ivor) | 996 | if (ivor[i] > max_ivor) |
962 | max_ivor = ivor[i]; | 997 | max_ivor = ivor[i]; |
963 | 998 | ||
964 | memcpy((void *)kvmppc_booke_handlers + ivor[i], | 999 | memcpy((void *)kvmppc_booke_handlers + ivor[i], |
965 | kvmppc_handlers_start + i * kvmppc_handler_len, | 1000 | kvmppc_handlers_start + i * kvmppc_handler_len, |
966 | kvmppc_handler_len); | 1001 | kvmppc_handler_len); |
967 | } | 1002 | } |
arch/powerpc/kvm/booke.h
1 | /* | 1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | 2 | * This program is free software; you can redistribute it and/or modify |
3 | * it under the terms of the GNU General Public License, version 2, as | 3 | * it under the terms of the GNU General Public License, version 2, as |
4 | * published by the Free Software Foundation. | 4 | * published by the Free Software Foundation. |
5 | * | 5 | * |
6 | * This program is distributed in the hope that it will be useful, | 6 | * This program is distributed in the hope that it will be useful, |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
9 | * GNU General Public License for more details. | 9 | * GNU General Public License for more details. |
10 | * | 10 | * |
11 | * You should have received a copy of the GNU General Public License | 11 | * You should have received a copy of the GNU General Public License |
12 | * along with this program; if not, write to the Free Software | 12 | * along with this program; if not, write to the Free Software |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
14 | * | 14 | * |
15 | * Copyright IBM Corp. 2008 | 15 | * Copyright IBM Corp. 2008 |
16 | * | 16 | * |
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | 17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #ifndef __KVM_BOOKE_H__ | 20 | #ifndef __KVM_BOOKE_H__ |
21 | #define __KVM_BOOKE_H__ | 21 | #define __KVM_BOOKE_H__ |
22 | 22 | ||
23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
24 | #include <linux/kvm_host.h> | 24 | #include <linux/kvm_host.h> |
25 | #include <asm/kvm_ppc.h> | 25 | #include <asm/kvm_ppc.h> |
26 | #include "timing.h" | 26 | #include "timing.h" |
27 | 27 | ||
28 | /* interrupt priortity ordering */ | 28 | /* interrupt priortity ordering */ |
29 | #define BOOKE_IRQPRIO_DATA_STORAGE 0 | 29 | #define BOOKE_IRQPRIO_DATA_STORAGE 0 |
30 | #define BOOKE_IRQPRIO_INST_STORAGE 1 | 30 | #define BOOKE_IRQPRIO_INST_STORAGE 1 |
31 | #define BOOKE_IRQPRIO_ALIGNMENT 2 | 31 | #define BOOKE_IRQPRIO_ALIGNMENT 2 |
32 | #define BOOKE_IRQPRIO_PROGRAM 3 | 32 | #define BOOKE_IRQPRIO_PROGRAM 3 |
33 | #define BOOKE_IRQPRIO_FP_UNAVAIL 4 | 33 | #define BOOKE_IRQPRIO_FP_UNAVAIL 4 |
34 | #define BOOKE_IRQPRIO_SPE_UNAVAIL 5 | 34 | #define BOOKE_IRQPRIO_SPE_UNAVAIL 5 |
35 | #define BOOKE_IRQPRIO_SPE_FP_DATA 6 | 35 | #define BOOKE_IRQPRIO_SPE_FP_DATA 6 |
36 | #define BOOKE_IRQPRIO_SPE_FP_ROUND 7 | 36 | #define BOOKE_IRQPRIO_SPE_FP_ROUND 7 |
37 | #define BOOKE_IRQPRIO_SYSCALL 8 | 37 | #define BOOKE_IRQPRIO_SYSCALL 8 |
38 | #define BOOKE_IRQPRIO_AP_UNAVAIL 9 | 38 | #define BOOKE_IRQPRIO_AP_UNAVAIL 9 |
39 | #define BOOKE_IRQPRIO_DTLB_MISS 10 | 39 | #define BOOKE_IRQPRIO_DTLB_MISS 10 |
40 | #define BOOKE_IRQPRIO_ITLB_MISS 11 | 40 | #define BOOKE_IRQPRIO_ITLB_MISS 11 |
41 | #define BOOKE_IRQPRIO_MACHINE_CHECK 12 | 41 | #define BOOKE_IRQPRIO_MACHINE_CHECK 12 |
42 | #define BOOKE_IRQPRIO_DEBUG 13 | 42 | #define BOOKE_IRQPRIO_DEBUG 13 |
43 | #define BOOKE_IRQPRIO_CRITICAL 14 | 43 | #define BOOKE_IRQPRIO_CRITICAL 14 |
44 | #define BOOKE_IRQPRIO_WATCHDOG 15 | 44 | #define BOOKE_IRQPRIO_WATCHDOG 15 |
45 | #define BOOKE_IRQPRIO_EXTERNAL 16 | 45 | #define BOOKE_IRQPRIO_EXTERNAL 16 |
46 | #define BOOKE_IRQPRIO_FIT 17 | 46 | #define BOOKE_IRQPRIO_FIT 17 |
47 | #define BOOKE_IRQPRIO_DECREMENTER 18 | 47 | #define BOOKE_IRQPRIO_DECREMENTER 18 |
48 | #define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19 | 48 | #define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19 |
49 | /* Internal pseudo-irqprio for level triggered externals */ | 49 | /* Internal pseudo-irqprio for level triggered externals */ |
50 | #define BOOKE_IRQPRIO_EXTERNAL_LEVEL 20 | 50 | #define BOOKE_IRQPRIO_EXTERNAL_LEVEL 20 |
51 | #define BOOKE_IRQPRIO_MAX 20 | 51 | #define BOOKE_IRQPRIO_MAX 20 |
52 | 52 | ||
53 | extern unsigned long kvmppc_booke_handlers; | 53 | extern unsigned long kvmppc_booke_handlers; |
54 | 54 | ||
55 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr); | 55 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr); |
56 | void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr); | 56 | void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr); |
57 | 57 | ||
58 | void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr); | ||
59 | void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits); | ||
60 | void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits); | ||
61 | |||
58 | int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | 62 | int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, |
59 | unsigned int inst, int *advance); | 63 | unsigned int inst, int *advance); |
60 | int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); | 64 | int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); |
61 | int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); | 65 | int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); |
62 | 66 | ||
63 | /* low-level asm code to transfer guest state */ | 67 | /* low-level asm code to transfer guest state */ |
64 | void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu); | 68 | void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu); |
65 | void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu); | 69 | void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu); |
66 | 70 | ||
67 | /* high-level function, manages flags, host state */ | 71 | /* high-level function, manages flags, host state */ |
68 | void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu); | 72 | void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu); |
69 | 73 | ||
70 | #endif /* __KVM_BOOKE_H__ */ | 74 | #endif /* __KVM_BOOKE_H__ */ |
71 | 75 |
arch/powerpc/kvm/booke_emulate.c
1 | /* | 1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | 2 | * This program is free software; you can redistribute it and/or modify |
3 | * it under the terms of the GNU General Public License, version 2, as | 3 | * it under the terms of the GNU General Public License, version 2, as |
4 | * published by the Free Software Foundation. | 4 | * published by the Free Software Foundation. |
5 | * | 5 | * |
6 | * This program is distributed in the hope that it will be useful, | 6 | * This program is distributed in the hope that it will be useful, |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
9 | * GNU General Public License for more details. | 9 | * GNU General Public License for more details. |
10 | * | 10 | * |
11 | * You should have received a copy of the GNU General Public License | 11 | * You should have received a copy of the GNU General Public License |
12 | * along with this program; if not, write to the Free Software | 12 | * along with this program; if not, write to the Free Software |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
14 | * | 14 | * |
15 | * Copyright IBM Corp. 2008 | 15 | * Copyright IBM Corp. 2008 |
16 | * Copyright 2011 Freescale Semiconductor, Inc. | ||
16 | * | 17 | * |
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | 18 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> |
18 | */ | 19 | */ |
19 | 20 | ||
20 | #include <linux/kvm_host.h> | 21 | #include <linux/kvm_host.h> |
21 | #include <asm/disassemble.h> | 22 | #include <asm/disassemble.h> |
22 | 23 | ||
23 | #include "booke.h" | 24 | #include "booke.h" |
24 | 25 | ||
25 | #define OP_19_XOP_RFI 50 | 26 | #define OP_19_XOP_RFI 50 |
26 | 27 | ||
27 | #define OP_31_XOP_MFMSR 83 | 28 | #define OP_31_XOP_MFMSR 83 |
28 | #define OP_31_XOP_WRTEE 131 | 29 | #define OP_31_XOP_WRTEE 131 |
29 | #define OP_31_XOP_MTMSR 146 | 30 | #define OP_31_XOP_MTMSR 146 |
30 | #define OP_31_XOP_WRTEEI 163 | 31 | #define OP_31_XOP_WRTEEI 163 |
31 | 32 | ||
32 | static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) | 33 | static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) |
33 | { | 34 | { |
34 | vcpu->arch.pc = vcpu->arch.shared->srr0; | 35 | vcpu->arch.pc = vcpu->arch.shared->srr0; |
35 | kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); | 36 | kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); |
36 | } | 37 | } |
37 | 38 | ||
38 | int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | 39 | int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, |
39 | unsigned int inst, int *advance) | 40 | unsigned int inst, int *advance) |
40 | { | 41 | { |
41 | int emulated = EMULATE_DONE; | 42 | int emulated = EMULATE_DONE; |
42 | int rs; | 43 | int rs; |
43 | int rt; | 44 | int rt; |
44 | 45 | ||
45 | switch (get_op(inst)) { | 46 | switch (get_op(inst)) { |
46 | case 19: | 47 | case 19: |
47 | switch (get_xop(inst)) { | 48 | switch (get_xop(inst)) { |
48 | case OP_19_XOP_RFI: | 49 | case OP_19_XOP_RFI: |
49 | kvmppc_emul_rfi(vcpu); | 50 | kvmppc_emul_rfi(vcpu); |
50 | kvmppc_set_exit_type(vcpu, EMULATED_RFI_EXITS); | 51 | kvmppc_set_exit_type(vcpu, EMULATED_RFI_EXITS); |
51 | *advance = 0; | 52 | *advance = 0; |
52 | break; | 53 | break; |
53 | 54 | ||
54 | default: | 55 | default: |
55 | emulated = EMULATE_FAIL; | 56 | emulated = EMULATE_FAIL; |
56 | break; | 57 | break; |
57 | } | 58 | } |
58 | break; | 59 | break; |
59 | 60 | ||
60 | case 31: | 61 | case 31: |
61 | switch (get_xop(inst)) { | 62 | switch (get_xop(inst)) { |
62 | 63 | ||
63 | case OP_31_XOP_MFMSR: | 64 | case OP_31_XOP_MFMSR: |
64 | rt = get_rt(inst); | 65 | rt = get_rt(inst); |
65 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); | 66 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); |
66 | kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); | 67 | kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); |
67 | break; | 68 | break; |
68 | 69 | ||
69 | case OP_31_XOP_MTMSR: | 70 | case OP_31_XOP_MTMSR: |
70 | rs = get_rs(inst); | 71 | rs = get_rs(inst); |
71 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); | 72 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); |
72 | kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); | 73 | kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); |
73 | break; | 74 | break; |
74 | 75 | ||
75 | case OP_31_XOP_WRTEE: | 76 | case OP_31_XOP_WRTEE: |
76 | rs = get_rs(inst); | 77 | rs = get_rs(inst); |
77 | vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) | 78 | vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) |
78 | | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); | 79 | | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); |
79 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); | 80 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); |
80 | break; | 81 | break; |
81 | 82 | ||
82 | case OP_31_XOP_WRTEEI: | 83 | case OP_31_XOP_WRTEEI: |
83 | vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) | 84 | vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) |
84 | | (inst & MSR_EE); | 85 | | (inst & MSR_EE); |
85 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); | 86 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); |
86 | break; | 87 | break; |
87 | 88 | ||
88 | default: | 89 | default: |
89 | emulated = EMULATE_FAIL; | 90 | emulated = EMULATE_FAIL; |
90 | } | 91 | } |
91 | 92 | ||
92 | break; | 93 | break; |
93 | 94 | ||
94 | default: | 95 | default: |
95 | emulated = EMULATE_FAIL; | 96 | emulated = EMULATE_FAIL; |
96 | } | 97 | } |
97 | 98 | ||
98 | return emulated; | 99 | return emulated; |
99 | } | 100 | } |
100 | 101 | ||
101 | int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 102 | int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) |
102 | { | 103 | { |
103 | int emulated = EMULATE_DONE; | 104 | int emulated = EMULATE_DONE; |
104 | ulong spr_val = kvmppc_get_gpr(vcpu, rs); | 105 | ulong spr_val = kvmppc_get_gpr(vcpu, rs); |
105 | 106 | ||
106 | switch (sprn) { | 107 | switch (sprn) { |
107 | case SPRN_DEAR: | 108 | case SPRN_DEAR: |
108 | vcpu->arch.shared->dar = spr_val; break; | 109 | vcpu->arch.shared->dar = spr_val; break; |
109 | case SPRN_ESR: | 110 | case SPRN_ESR: |
110 | vcpu->arch.shared->esr = spr_val; break; | 111 | vcpu->arch.shared->esr = spr_val; break; |
111 | case SPRN_DBCR0: | 112 | case SPRN_DBCR0: |
112 | vcpu->arch.dbcr0 = spr_val; break; | 113 | vcpu->arch.dbcr0 = spr_val; break; |
113 | case SPRN_DBCR1: | 114 | case SPRN_DBCR1: |
114 | vcpu->arch.dbcr1 = spr_val; break; | 115 | vcpu->arch.dbcr1 = spr_val; break; |
115 | case SPRN_DBSR: | 116 | case SPRN_DBSR: |
116 | vcpu->arch.dbsr &= ~spr_val; break; | 117 | vcpu->arch.dbsr &= ~spr_val; break; |
117 | case SPRN_TSR: | 118 | case SPRN_TSR: |
118 | vcpu->arch.tsr &= ~spr_val; break; | 119 | kvmppc_clr_tsr_bits(vcpu, spr_val); |
120 | break; | ||
119 | case SPRN_TCR: | 121 | case SPRN_TCR: |
120 | vcpu->arch.tcr = spr_val; | 122 | kvmppc_set_tcr(vcpu, spr_val); |
121 | kvmppc_emulate_dec(vcpu); | ||
122 | break; | 123 | break; |
123 | 124 | ||
124 | /* Note: SPRG4-7 are user-readable. These values are | 125 | /* Note: SPRG4-7 are user-readable. These values are |
125 | * loaded into the real SPRGs when resuming the | 126 | * loaded into the real SPRGs when resuming the |
126 | * guest. */ | 127 | * guest. */ |
127 | case SPRN_SPRG4: | 128 | case SPRN_SPRG4: |
128 | vcpu->arch.shared->sprg4 = spr_val; break; | 129 | vcpu->arch.shared->sprg4 = spr_val; break; |
129 | case SPRN_SPRG5: | 130 | case SPRN_SPRG5: |
130 | vcpu->arch.shared->sprg5 = spr_val; break; | 131 | vcpu->arch.shared->sprg5 = spr_val; break; |
131 | case SPRN_SPRG6: | 132 | case SPRN_SPRG6: |
132 | vcpu->arch.shared->sprg6 = spr_val; break; | 133 | vcpu->arch.shared->sprg6 = spr_val; break; |
133 | case SPRN_SPRG7: | 134 | case SPRN_SPRG7: |
134 | vcpu->arch.shared->sprg7 = spr_val; break; | 135 | vcpu->arch.shared->sprg7 = spr_val; break; |
135 | 136 | ||
136 | case SPRN_IVPR: | 137 | case SPRN_IVPR: |
137 | vcpu->arch.ivpr = spr_val; | 138 | vcpu->arch.ivpr = spr_val; |
138 | break; | 139 | break; |
139 | case SPRN_IVOR0: | 140 | case SPRN_IVOR0: |
140 | vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val; | 141 | vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val; |
141 | break; | 142 | break; |
142 | case SPRN_IVOR1: | 143 | case SPRN_IVOR1: |
143 | vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = spr_val; | 144 | vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = spr_val; |
144 | break; | 145 | break; |
145 | case SPRN_IVOR2: | 146 | case SPRN_IVOR2: |
146 | vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val; | 147 | vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val; |
147 | break; | 148 | break; |
148 | case SPRN_IVOR3: | 149 | case SPRN_IVOR3: |
149 | vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val; | 150 | vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val; |
150 | break; | 151 | break; |
151 | case SPRN_IVOR4: | 152 | case SPRN_IVOR4: |
152 | vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = spr_val; | 153 | vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = spr_val; |
153 | break; | 154 | break; |
154 | case SPRN_IVOR5: | 155 | case SPRN_IVOR5: |
155 | vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = spr_val; | 156 | vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = spr_val; |
156 | break; | 157 | break; |
157 | case SPRN_IVOR6: | 158 | case SPRN_IVOR6: |
158 | vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = spr_val; | 159 | vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = spr_val; |
159 | break; | 160 | break; |
160 | case SPRN_IVOR7: | 161 | case SPRN_IVOR7: |
161 | vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = spr_val; | 162 | vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = spr_val; |
162 | break; | 163 | break; |
163 | case SPRN_IVOR8: | 164 | case SPRN_IVOR8: |
164 | vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val; | 165 | vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val; |
165 | break; | 166 | break; |
166 | case SPRN_IVOR9: | 167 | case SPRN_IVOR9: |
167 | vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val; | 168 | vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val; |
168 | break; | 169 | break; |
169 | case SPRN_IVOR10: | 170 | case SPRN_IVOR10: |
170 | vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = spr_val; | 171 | vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = spr_val; |
171 | break; | 172 | break; |
172 | case SPRN_IVOR11: | 173 | case SPRN_IVOR11: |
173 | vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = spr_val; | 174 | vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = spr_val; |
174 | break; | 175 | break; |
175 | case SPRN_IVOR12: | 176 | case SPRN_IVOR12: |
176 | vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = spr_val; | 177 | vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = spr_val; |
177 | break; | 178 | break; |
178 | case SPRN_IVOR13: | 179 | case SPRN_IVOR13: |
179 | vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = spr_val; | 180 | vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = spr_val; |
180 | break; | 181 | break; |
181 | case SPRN_IVOR14: | 182 | case SPRN_IVOR14: |
182 | vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = spr_val; | 183 | vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = spr_val; |
183 | break; | 184 | break; |
184 | case SPRN_IVOR15: | 185 | case SPRN_IVOR15: |
185 | vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = spr_val; | 186 | vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = spr_val; |
186 | break; | 187 | break; |
187 | 188 | ||
188 | default: | 189 | default: |
189 | emulated = EMULATE_FAIL; | 190 | emulated = EMULATE_FAIL; |
190 | } | 191 | } |
191 | 192 | ||
192 | return emulated; | 193 | return emulated; |
193 | } | 194 | } |
194 | 195 | ||
195 | int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | 196 | int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) |
196 | { | 197 | { |
197 | int emulated = EMULATE_DONE; | 198 | int emulated = EMULATE_DONE; |
198 | 199 | ||
199 | switch (sprn) { | 200 | switch (sprn) { |
200 | case SPRN_IVPR: | 201 | case SPRN_IVPR: |
201 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break; | 202 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break; |
202 | case SPRN_DEAR: | 203 | case SPRN_DEAR: |
203 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break; | 204 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break; |
204 | case SPRN_ESR: | 205 | case SPRN_ESR: |
205 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->esr); break; | 206 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->esr); break; |
206 | case SPRN_DBCR0: | 207 | case SPRN_DBCR0: |
207 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break; | 208 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break; |
208 | case SPRN_DBCR1: | 209 | case SPRN_DBCR1: |
209 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break; | 210 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break; |
210 | case SPRN_DBSR: | 211 | case SPRN_DBSR: |
211 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break; | 212 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break; |
213 | case SPRN_TSR: | ||
214 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.tsr); break; | ||
215 | case SPRN_TCR: | ||
216 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.tcr); break; | ||
212 | 217 | ||
213 | case SPRN_IVOR0: | 218 | case SPRN_IVOR0: |
214 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]); | 219 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]); |
215 | break; | 220 | break; |
216 | case SPRN_IVOR1: | 221 | case SPRN_IVOR1: |
217 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]); | 222 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]); |
218 | break; | 223 | break; |
219 | case SPRN_IVOR2: | 224 | case SPRN_IVOR2: |
220 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]); | 225 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]); |
221 | break; | 226 | break; |
222 | case SPRN_IVOR3: | 227 | case SPRN_IVOR3: |
223 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]); | 228 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]); |
224 | break; | 229 | break; |
225 | case SPRN_IVOR4: | 230 | case SPRN_IVOR4: |
226 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]); | 231 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]); |
227 | break; | 232 | break; |
228 | case SPRN_IVOR5: | 233 | case SPRN_IVOR5: |
229 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]); | 234 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]); |
230 | break; | 235 | break; |
231 | case SPRN_IVOR6: | 236 | case SPRN_IVOR6: |
232 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]); | 237 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]); |
233 | break; | 238 | break; |
234 | case SPRN_IVOR7: | 239 | case SPRN_IVOR7: |
235 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]); | 240 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]); |
236 | break; | 241 | break; |
237 | case SPRN_IVOR8: | 242 | case SPRN_IVOR8: |
238 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]); | 243 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]); |
239 | break; | 244 | break; |
240 | case SPRN_IVOR9: | 245 | case SPRN_IVOR9: |
241 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]); | 246 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]); |
242 | break; | 247 | break; |
243 | case SPRN_IVOR10: | 248 | case SPRN_IVOR10: |
244 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]); | 249 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]); |
245 | break; | 250 | break; |
246 | case SPRN_IVOR11: | 251 | case SPRN_IVOR11: |
247 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]); | 252 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]); |
248 | break; | 253 | break; |
249 | case SPRN_IVOR12: | 254 | case SPRN_IVOR12: |
250 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]); | 255 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]); |
251 | break; | 256 | break; |
252 | case SPRN_IVOR13: | 257 | case SPRN_IVOR13: |
253 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]); | 258 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]); |
254 | break; | 259 | break; |
255 | case SPRN_IVOR14: | 260 | case SPRN_IVOR14: |
256 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]); | 261 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]); |
257 | break; | 262 | break; |
258 | case SPRN_IVOR15: | 263 | case SPRN_IVOR15: |
259 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]); | 264 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]); |
260 | break; | 265 | break; |
261 | 266 | ||
262 | default: | 267 | default: |
263 | emulated = EMULATE_FAIL; | 268 | emulated = EMULATE_FAIL; |
264 | } | 269 | } |
265 | 270 | ||
266 | return emulated; | 271 | return emulated; |
267 | } | 272 | } |
arch/powerpc/kvm/emulate.c
1 | /* | 1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | 2 | * This program is free software; you can redistribute it and/or modify |
3 | * it under the terms of the GNU General Public License, version 2, as | 3 | * it under the terms of the GNU General Public License, version 2, as |
4 | * published by the Free Software Foundation. | 4 | * published by the Free Software Foundation. |
5 | * | 5 | * |
6 | * This program is distributed in the hope that it will be useful, | 6 | * This program is distributed in the hope that it will be useful, |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
9 | * GNU General Public License for more details. | 9 | * GNU General Public License for more details. |
10 | * | 10 | * |
11 | * You should have received a copy of the GNU General Public License | 11 | * You should have received a copy of the GNU General Public License |
12 | * along with this program; if not, write to the Free Software | 12 | * along with this program; if not, write to the Free Software |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
14 | * | 14 | * |
15 | * Copyright IBM Corp. 2007 | 15 | * Copyright IBM Corp. 2007 |
16 | * Copyright 2011 Freescale Semiconductor, Inc. | ||
16 | * | 17 | * |
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | 18 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> |
18 | */ | 19 | */ |
19 | 20 | ||
20 | #include <linux/jiffies.h> | 21 | #include <linux/jiffies.h> |
21 | #include <linux/hrtimer.h> | 22 | #include <linux/hrtimer.h> |
22 | #include <linux/types.h> | 23 | #include <linux/types.h> |
23 | #include <linux/string.h> | 24 | #include <linux/string.h> |
24 | #include <linux/kvm_host.h> | 25 | #include <linux/kvm_host.h> |
25 | 26 | ||
26 | #include <asm/reg.h> | 27 | #include <asm/reg.h> |
27 | #include <asm/time.h> | 28 | #include <asm/time.h> |
28 | #include <asm/byteorder.h> | 29 | #include <asm/byteorder.h> |
29 | #include <asm/kvm_ppc.h> | 30 | #include <asm/kvm_ppc.h> |
30 | #include <asm/disassemble.h> | 31 | #include <asm/disassemble.h> |
31 | #include "timing.h" | 32 | #include "timing.h" |
32 | #include "trace.h" | 33 | #include "trace.h" |
33 | 34 | ||
34 | #define OP_TRAP 3 | 35 | #define OP_TRAP 3 |
35 | #define OP_TRAP_64 2 | 36 | #define OP_TRAP_64 2 |
36 | 37 | ||
37 | #define OP_31_XOP_LWZX 23 | 38 | #define OP_31_XOP_LWZX 23 |
38 | #define OP_31_XOP_LBZX 87 | 39 | #define OP_31_XOP_LBZX 87 |
39 | #define OP_31_XOP_STWX 151 | 40 | #define OP_31_XOP_STWX 151 |
40 | #define OP_31_XOP_STBX 215 | 41 | #define OP_31_XOP_STBX 215 |
41 | #define OP_31_XOP_LBZUX 119 | 42 | #define OP_31_XOP_LBZUX 119 |
42 | #define OP_31_XOP_STBUX 247 | 43 | #define OP_31_XOP_STBUX 247 |
43 | #define OP_31_XOP_LHZX 279 | 44 | #define OP_31_XOP_LHZX 279 |
44 | #define OP_31_XOP_LHZUX 311 | 45 | #define OP_31_XOP_LHZUX 311 |
45 | #define OP_31_XOP_MFSPR 339 | 46 | #define OP_31_XOP_MFSPR 339 |
46 | #define OP_31_XOP_LHAX 343 | 47 | #define OP_31_XOP_LHAX 343 |
47 | #define OP_31_XOP_STHX 407 | 48 | #define OP_31_XOP_STHX 407 |
48 | #define OP_31_XOP_STHUX 439 | 49 | #define OP_31_XOP_STHUX 439 |
49 | #define OP_31_XOP_MTSPR 467 | 50 | #define OP_31_XOP_MTSPR 467 |
50 | #define OP_31_XOP_DCBI 470 | 51 | #define OP_31_XOP_DCBI 470 |
51 | #define OP_31_XOP_LWBRX 534 | 52 | #define OP_31_XOP_LWBRX 534 |
52 | #define OP_31_XOP_TLBSYNC 566 | 53 | #define OP_31_XOP_TLBSYNC 566 |
53 | #define OP_31_XOP_STWBRX 662 | 54 | #define OP_31_XOP_STWBRX 662 |
54 | #define OP_31_XOP_LHBRX 790 | 55 | #define OP_31_XOP_LHBRX 790 |
55 | #define OP_31_XOP_STHBRX 918 | 56 | #define OP_31_XOP_STHBRX 918 |
56 | 57 | ||
57 | #define OP_LWZ 32 | 58 | #define OP_LWZ 32 |
58 | #define OP_LWZU 33 | 59 | #define OP_LWZU 33 |
59 | #define OP_LBZ 34 | 60 | #define OP_LBZ 34 |
60 | #define OP_LBZU 35 | 61 | #define OP_LBZU 35 |
61 | #define OP_STW 36 | 62 | #define OP_STW 36 |
62 | #define OP_STWU 37 | 63 | #define OP_STWU 37 |
63 | #define OP_STB 38 | 64 | #define OP_STB 38 |
64 | #define OP_STBU 39 | 65 | #define OP_STBU 39 |
65 | #define OP_LHZ 40 | 66 | #define OP_LHZ 40 |
66 | #define OP_LHZU 41 | 67 | #define OP_LHZU 41 |
67 | #define OP_LHA 42 | 68 | #define OP_LHA 42 |
68 | #define OP_LHAU 43 | 69 | #define OP_LHAU 43 |
69 | #define OP_STH 44 | 70 | #define OP_STH 44 |
70 | #define OP_STHU 45 | 71 | #define OP_STHU 45 |
71 | 72 | ||
72 | #ifdef CONFIG_PPC_BOOK3S | ||
73 | static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu) | ||
74 | { | ||
75 | return 1; | ||
76 | } | ||
77 | #else | ||
78 | static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu) | ||
79 | { | ||
80 | /* On BOOKE, DEC = 0 is as good as decrementer not enabled */ | ||
81 | return (vcpu->arch.tcr & TCR_DIE) && vcpu->arch.dec; | ||
82 | } | ||
83 | #endif | ||
84 | |||
85 | void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) | 73 | void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) |
86 | { | 74 | { |
87 | unsigned long dec_nsec; | 75 | unsigned long dec_nsec; |
88 | unsigned long long dec_time; | 76 | unsigned long long dec_time; |
89 | 77 | ||
90 | pr_debug("mtDEC: %x\n", vcpu->arch.dec); | 78 | pr_debug("mtDEC: %x\n", vcpu->arch.dec); |
79 | hrtimer_try_to_cancel(&vcpu->arch.dec_timer); | ||
80 | |||
91 | #ifdef CONFIG_PPC_BOOK3S | 81 | #ifdef CONFIG_PPC_BOOK3S |
92 | /* mtdec lowers the interrupt line when positive. */ | 82 | /* mtdec lowers the interrupt line when positive. */ |
93 | kvmppc_core_dequeue_dec(vcpu); | 83 | kvmppc_core_dequeue_dec(vcpu); |
94 | 84 | ||
95 | /* POWER4+ triggers a dec interrupt if the value is < 0 */ | 85 | /* POWER4+ triggers a dec interrupt if the value is < 0 */ |
96 | if (vcpu->arch.dec & 0x80000000) { | 86 | if (vcpu->arch.dec & 0x80000000) { |
97 | hrtimer_try_to_cancel(&vcpu->arch.dec_timer); | ||
98 | kvmppc_core_queue_dec(vcpu); | 87 | kvmppc_core_queue_dec(vcpu); |
99 | return; | 88 | return; |
100 | } | 89 | } |
101 | #endif | 90 | #endif |
102 | if (kvmppc_dec_enabled(vcpu)) { | ||
103 | /* The decrementer ticks at the same rate as the timebase, so | ||
104 | * that's how we convert the guest DEC value to the number of | ||
105 | * host ticks. */ | ||
106 | 91 | ||
107 | hrtimer_try_to_cancel(&vcpu->arch.dec_timer); | 92 | #ifdef CONFIG_BOOKE |
108 | dec_time = vcpu->arch.dec; | 93 | /* On BOOKE, DEC = 0 is as good as decrementer not enabled */ |
109 | dec_time *= 1000; | 94 | if (vcpu->arch.dec == 0) |
110 | do_div(dec_time, tb_ticks_per_usec); | 95 | return; |
111 | dec_nsec = do_div(dec_time, NSEC_PER_SEC); | 96 | #endif |
112 | hrtimer_start(&vcpu->arch.dec_timer, | 97 | |
113 | ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL); | 98 | /* |
114 | vcpu->arch.dec_jiffies = get_tb(); | 99 | * The decrementer ticks at the same rate as the timebase, so |
115 | } else { | 100 | * that's how we convert the guest DEC value to the number of |
116 | hrtimer_try_to_cancel(&vcpu->arch.dec_timer); | 101 | * host ticks. |
117 | } | 102 | */ |
103 | |||
104 | dec_time = vcpu->arch.dec; | ||
105 | dec_time *= 1000; | ||
106 | do_div(dec_time, tb_ticks_per_usec); | ||
107 | dec_nsec = do_div(dec_time, NSEC_PER_SEC); | ||
108 | hrtimer_start(&vcpu->arch.dec_timer, | ||
109 | ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL); | ||
110 | vcpu->arch.dec_jiffies = get_tb(); | ||
118 | } | 111 | } |
119 | 112 | ||
120 | u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb) | 113 | u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb) |
121 | { | 114 | { |
122 | u64 jd = tb - vcpu->arch.dec_jiffies; | 115 | u64 jd = tb - vcpu->arch.dec_jiffies; |
116 | |||
117 | #ifdef CONFIG_BOOKE | ||
118 | if (vcpu->arch.dec < jd) | ||
119 | return 0; | ||
120 | #endif | ||
121 | |||
123 | return vcpu->arch.dec - jd; | 122 | return vcpu->arch.dec - jd; |
124 | } | 123 | } |
125 | 124 | ||
126 | /* XXX to do: | 125 | /* XXX to do: |
127 | * lhax | 126 | * lhax |
128 | * lhaux | 127 | * lhaux |
129 | * lswx | 128 | * lswx |
130 | * lswi | 129 | * lswi |
131 | * stswx | 130 | * stswx |
132 | * stswi | 131 | * stswi |
133 | * lha | 132 | * lha |
134 | * lhau | 133 | * lhau |
135 | * lmw | 134 | * lmw |
136 | * stmw | 135 | * stmw |
137 | * | 136 | * |
138 | * XXX is_bigendian should depend on MMU mapping or MSR[LE] | 137 | * XXX is_bigendian should depend on MMU mapping or MSR[LE] |
139 | */ | 138 | */ |
140 | /* XXX Should probably auto-generate instruction decoding for a particular core | 139 | /* XXX Should probably auto-generate instruction decoding for a particular core |
141 | * from opcode tables in the future. */ | 140 | * from opcode tables in the future. */ |
142 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | 141 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) |
143 | { | 142 | { |
144 | u32 inst = kvmppc_get_last_inst(vcpu); | 143 | u32 inst = kvmppc_get_last_inst(vcpu); |
145 | u32 ea; | 144 | u32 ea; |
146 | int ra; | 145 | int ra; |
147 | int rb; | 146 | int rb; |
148 | int rs; | 147 | int rs; |
149 | int rt; | 148 | int rt; |
150 | int sprn; | 149 | int sprn; |
151 | enum emulation_result emulated = EMULATE_DONE; | 150 | enum emulation_result emulated = EMULATE_DONE; |
152 | int advance = 1; | 151 | int advance = 1; |
153 | 152 | ||
154 | /* this default type might be overwritten by subcategories */ | 153 | /* this default type might be overwritten by subcategories */ |
155 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); | 154 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); |
156 | 155 | ||
157 | pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); | 156 | pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); |
158 | 157 | ||
159 | switch (get_op(inst)) { | 158 | switch (get_op(inst)) { |
160 | case OP_TRAP: | 159 | case OP_TRAP: |
161 | #ifdef CONFIG_PPC_BOOK3S | 160 | #ifdef CONFIG_PPC_BOOK3S |
162 | case OP_TRAP_64: | 161 | case OP_TRAP_64: |
163 | kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); | 162 | kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); |
164 | #else | 163 | #else |
165 | kvmppc_core_queue_program(vcpu, | 164 | kvmppc_core_queue_program(vcpu, |
166 | vcpu->arch.shared->esr | ESR_PTR); | 165 | vcpu->arch.shared->esr | ESR_PTR); |
167 | #endif | 166 | #endif |
168 | advance = 0; | 167 | advance = 0; |
169 | break; | 168 | break; |
170 | 169 | ||
171 | case 31: | 170 | case 31: |
172 | switch (get_xop(inst)) { | 171 | switch (get_xop(inst)) { |
173 | 172 | ||
174 | case OP_31_XOP_LWZX: | 173 | case OP_31_XOP_LWZX: |
175 | rt = get_rt(inst); | 174 | rt = get_rt(inst); |
176 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 175 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); |
177 | break; | 176 | break; |
178 | 177 | ||
179 | case OP_31_XOP_LBZX: | 178 | case OP_31_XOP_LBZX: |
180 | rt = get_rt(inst); | 179 | rt = get_rt(inst); |
181 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 180 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
182 | break; | 181 | break; |
183 | 182 | ||
184 | case OP_31_XOP_LBZUX: | 183 | case OP_31_XOP_LBZUX: |
185 | rt = get_rt(inst); | 184 | rt = get_rt(inst); |
186 | ra = get_ra(inst); | 185 | ra = get_ra(inst); |
187 | rb = get_rb(inst); | 186 | rb = get_rb(inst); |
188 | 187 | ||
189 | ea = kvmppc_get_gpr(vcpu, rb); | 188 | ea = kvmppc_get_gpr(vcpu, rb); |
190 | if (ra) | 189 | if (ra) |
191 | ea += kvmppc_get_gpr(vcpu, ra); | 190 | ea += kvmppc_get_gpr(vcpu, ra); |
192 | 191 | ||
193 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 192 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
194 | kvmppc_set_gpr(vcpu, ra, ea); | 193 | kvmppc_set_gpr(vcpu, ra, ea); |
195 | break; | 194 | break; |
196 | 195 | ||
197 | case OP_31_XOP_STWX: | 196 | case OP_31_XOP_STWX: |
198 | rs = get_rs(inst); | 197 | rs = get_rs(inst); |
199 | emulated = kvmppc_handle_store(run, vcpu, | 198 | emulated = kvmppc_handle_store(run, vcpu, |
200 | kvmppc_get_gpr(vcpu, rs), | 199 | kvmppc_get_gpr(vcpu, rs), |
201 | 4, 1); | 200 | 4, 1); |
202 | break; | 201 | break; |
203 | 202 | ||
204 | case OP_31_XOP_STBX: | 203 | case OP_31_XOP_STBX: |
205 | rs = get_rs(inst); | 204 | rs = get_rs(inst); |
206 | emulated = kvmppc_handle_store(run, vcpu, | 205 | emulated = kvmppc_handle_store(run, vcpu, |
207 | kvmppc_get_gpr(vcpu, rs), | 206 | kvmppc_get_gpr(vcpu, rs), |
208 | 1, 1); | 207 | 1, 1); |
209 | break; | 208 | break; |
210 | 209 | ||
211 | case OP_31_XOP_STBUX: | 210 | case OP_31_XOP_STBUX: |
212 | rs = get_rs(inst); | 211 | rs = get_rs(inst); |
213 | ra = get_ra(inst); | 212 | ra = get_ra(inst); |
214 | rb = get_rb(inst); | 213 | rb = get_rb(inst); |
215 | 214 | ||
216 | ea = kvmppc_get_gpr(vcpu, rb); | 215 | ea = kvmppc_get_gpr(vcpu, rb); |
217 | if (ra) | 216 | if (ra) |
218 | ea += kvmppc_get_gpr(vcpu, ra); | 217 | ea += kvmppc_get_gpr(vcpu, ra); |
219 | 218 | ||
220 | emulated = kvmppc_handle_store(run, vcpu, | 219 | emulated = kvmppc_handle_store(run, vcpu, |
221 | kvmppc_get_gpr(vcpu, rs), | 220 | kvmppc_get_gpr(vcpu, rs), |
222 | 1, 1); | 221 | 1, 1); |
223 | kvmppc_set_gpr(vcpu, rs, ea); | 222 | kvmppc_set_gpr(vcpu, rs, ea); |
224 | break; | 223 | break; |
225 | 224 | ||
226 | case OP_31_XOP_LHAX: | 225 | case OP_31_XOP_LHAX: |
227 | rt = get_rt(inst); | 226 | rt = get_rt(inst); |
228 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | 227 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); |
229 | break; | 228 | break; |
230 | 229 | ||
231 | case OP_31_XOP_LHZX: | 230 | case OP_31_XOP_LHZX: |
232 | rt = get_rt(inst); | 231 | rt = get_rt(inst); |
233 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 232 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
234 | break; | 233 | break; |
235 | 234 | ||
236 | case OP_31_XOP_LHZUX: | 235 | case OP_31_XOP_LHZUX: |
237 | rt = get_rt(inst); | 236 | rt = get_rt(inst); |
238 | ra = get_ra(inst); | 237 | ra = get_ra(inst); |
239 | rb = get_rb(inst); | 238 | rb = get_rb(inst); |
240 | 239 | ||
241 | ea = kvmppc_get_gpr(vcpu, rb); | 240 | ea = kvmppc_get_gpr(vcpu, rb); |
242 | if (ra) | 241 | if (ra) |
243 | ea += kvmppc_get_gpr(vcpu, ra); | 242 | ea += kvmppc_get_gpr(vcpu, ra); |
244 | 243 | ||
245 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 244 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
246 | kvmppc_set_gpr(vcpu, ra, ea); | 245 | kvmppc_set_gpr(vcpu, ra, ea); |
247 | break; | 246 | break; |
248 | 247 | ||
249 | case OP_31_XOP_MFSPR: | 248 | case OP_31_XOP_MFSPR: |
250 | sprn = get_sprn(inst); | 249 | sprn = get_sprn(inst); |
251 | rt = get_rt(inst); | 250 | rt = get_rt(inst); |
252 | 251 | ||
253 | switch (sprn) { | 252 | switch (sprn) { |
254 | case SPRN_SRR0: | 253 | case SPRN_SRR0: |
255 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0); | 254 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0); |
256 | break; | 255 | break; |
257 | case SPRN_SRR1: | 256 | case SPRN_SRR1: |
258 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1); | 257 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1); |
259 | break; | 258 | break; |
260 | case SPRN_PVR: | 259 | case SPRN_PVR: |
261 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; | 260 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; |
262 | case SPRN_PIR: | 261 | case SPRN_PIR: |
263 | kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break; | 262 | kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break; |
264 | case SPRN_MSSSR0: | 263 | case SPRN_MSSSR0: |
265 | kvmppc_set_gpr(vcpu, rt, 0); break; | 264 | kvmppc_set_gpr(vcpu, rt, 0); break; |
266 | 265 | ||
267 | /* Note: mftb and TBRL/TBWL are user-accessible, so | 266 | /* Note: mftb and TBRL/TBWL are user-accessible, so |
268 | * the guest can always access the real TB anyways. | 267 | * the guest can always access the real TB anyways. |
269 | * In fact, we probably will never see these traps. */ | 268 | * In fact, we probably will never see these traps. */ |
270 | case SPRN_TBWL: | 269 | case SPRN_TBWL: |
271 | kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break; | 270 | kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break; |
272 | case SPRN_TBWU: | 271 | case SPRN_TBWU: |
273 | kvmppc_set_gpr(vcpu, rt, get_tb()); break; | 272 | kvmppc_set_gpr(vcpu, rt, get_tb()); break; |
274 | 273 | ||
275 | case SPRN_SPRG0: | 274 | case SPRN_SPRG0: |
276 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0); | 275 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0); |
277 | break; | 276 | break; |
278 | case SPRN_SPRG1: | 277 | case SPRN_SPRG1: |
279 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1); | 278 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1); |
280 | break; | 279 | break; |
281 | case SPRN_SPRG2: | 280 | case SPRN_SPRG2: |
282 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2); | 281 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2); |
283 | break; | 282 | break; |
284 | case SPRN_SPRG3: | 283 | case SPRN_SPRG3: |
285 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3); | 284 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3); |
286 | break; | 285 | break; |
287 | /* Note: SPRG4-7 are user-readable, so we don't get | 286 | /* Note: SPRG4-7 are user-readable, so we don't get |
288 | * a trap. */ | 287 | * a trap. */ |
289 | 288 | ||
290 | case SPRN_DEC: | 289 | case SPRN_DEC: |
291 | { | 290 | { |
292 | kvmppc_set_gpr(vcpu, rt, | 291 | kvmppc_set_gpr(vcpu, rt, |
293 | kvmppc_get_dec(vcpu, get_tb())); | 292 | kvmppc_get_dec(vcpu, get_tb())); |
294 | break; | 293 | break; |
295 | } | 294 | } |
296 | default: | 295 | default: |
297 | emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); | 296 | emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); |
298 | if (emulated == EMULATE_FAIL) { | 297 | if (emulated == EMULATE_FAIL) { |
299 | printk("mfspr: unknown spr %x\n", sprn); | 298 | printk("mfspr: unknown spr %x\n", sprn); |
300 | kvmppc_set_gpr(vcpu, rt, 0); | 299 | kvmppc_set_gpr(vcpu, rt, 0); |
301 | } | 300 | } |
302 | break; | 301 | break; |
303 | } | 302 | } |
304 | kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); | 303 | kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); |
305 | break; | 304 | break; |
306 | 305 | ||
307 | case OP_31_XOP_STHX: | 306 | case OP_31_XOP_STHX: |
308 | rs = get_rs(inst); | 307 | rs = get_rs(inst); |
309 | ra = get_ra(inst); | 308 | ra = get_ra(inst); |
310 | rb = get_rb(inst); | 309 | rb = get_rb(inst); |
311 | 310 | ||
312 | emulated = kvmppc_handle_store(run, vcpu, | 311 | emulated = kvmppc_handle_store(run, vcpu, |
313 | kvmppc_get_gpr(vcpu, rs), | 312 | kvmppc_get_gpr(vcpu, rs), |
314 | 2, 1); | 313 | 2, 1); |
315 | break; | 314 | break; |
316 | 315 | ||
317 | case OP_31_XOP_STHUX: | 316 | case OP_31_XOP_STHUX: |
318 | rs = get_rs(inst); | 317 | rs = get_rs(inst); |
319 | ra = get_ra(inst); | 318 | ra = get_ra(inst); |
320 | rb = get_rb(inst); | 319 | rb = get_rb(inst); |
321 | 320 | ||
322 | ea = kvmppc_get_gpr(vcpu, rb); | 321 | ea = kvmppc_get_gpr(vcpu, rb); |
323 | if (ra) | 322 | if (ra) |
324 | ea += kvmppc_get_gpr(vcpu, ra); | 323 | ea += kvmppc_get_gpr(vcpu, ra); |
325 | 324 | ||
326 | emulated = kvmppc_handle_store(run, vcpu, | 325 | emulated = kvmppc_handle_store(run, vcpu, |
327 | kvmppc_get_gpr(vcpu, rs), | 326 | kvmppc_get_gpr(vcpu, rs), |
328 | 2, 1); | 327 | 2, 1); |
329 | kvmppc_set_gpr(vcpu, ra, ea); | 328 | kvmppc_set_gpr(vcpu, ra, ea); |
330 | break; | 329 | break; |
331 | 330 | ||
332 | case OP_31_XOP_MTSPR: | 331 | case OP_31_XOP_MTSPR: |
333 | sprn = get_sprn(inst); | 332 | sprn = get_sprn(inst); |
334 | rs = get_rs(inst); | 333 | rs = get_rs(inst); |
335 | switch (sprn) { | 334 | switch (sprn) { |
336 | case SPRN_SRR0: | 335 | case SPRN_SRR0: |
337 | vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs); | 336 | vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs); |
338 | break; | 337 | break; |
339 | case SPRN_SRR1: | 338 | case SPRN_SRR1: |
340 | vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs); | 339 | vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs); |
341 | break; | 340 | break; |
342 | 341 | ||
343 | /* XXX We need to context-switch the timebase for | 342 | /* XXX We need to context-switch the timebase for |
344 | * watchdog and FIT. */ | 343 | * watchdog and FIT. */ |
345 | case SPRN_TBWL: break; | 344 | case SPRN_TBWL: break; |
346 | case SPRN_TBWU: break; | 345 | case SPRN_TBWU: break; |
347 | 346 | ||
348 | case SPRN_MSSSR0: break; | 347 | case SPRN_MSSSR0: break; |
349 | 348 | ||
350 | case SPRN_DEC: | 349 | case SPRN_DEC: |
351 | vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs); | 350 | vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs); |
352 | kvmppc_emulate_dec(vcpu); | 351 | kvmppc_emulate_dec(vcpu); |
353 | break; | 352 | break; |
354 | 353 | ||
355 | case SPRN_SPRG0: | 354 | case SPRN_SPRG0: |
356 | vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs); | 355 | vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs); |
357 | break; | 356 | break; |
358 | case SPRN_SPRG1: | 357 | case SPRN_SPRG1: |
359 | vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs); | 358 | vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs); |
360 | break; | 359 | break; |
361 | case SPRN_SPRG2: | 360 | case SPRN_SPRG2: |
362 | vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs); | 361 | vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs); |
363 | break; | 362 | break; |
364 | case SPRN_SPRG3: | 363 | case SPRN_SPRG3: |
365 | vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs); | 364 | vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs); |
366 | break; | 365 | break; |
367 | 366 | ||
368 | default: | 367 | default: |
369 | emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); | 368 | emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); |
370 | if (emulated == EMULATE_FAIL) | 369 | if (emulated == EMULATE_FAIL) |
371 | printk("mtspr: unknown spr %x\n", sprn); | 370 | printk("mtspr: unknown spr %x\n", sprn); |
372 | break; | 371 | break; |
373 | } | 372 | } |
374 | kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); | 373 | kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); |
375 | break; | 374 | break; |
376 | 375 | ||
377 | case OP_31_XOP_DCBI: | 376 | case OP_31_XOP_DCBI: |
378 | /* Do nothing. The guest is performing dcbi because | 377 | /* Do nothing. The guest is performing dcbi because |
379 | * hardware DMA is not snooped by the dcache, but | 378 | * hardware DMA is not snooped by the dcache, but |
380 | * emulated DMA either goes through the dcache as | 379 | * emulated DMA either goes through the dcache as |
381 | * normal writes, or the host kernel has handled dcache | 380 | * normal writes, or the host kernel has handled dcache |
382 | * coherence. */ | 381 | * coherence. */ |
383 | break; | 382 | break; |
384 | 383 | ||
385 | case OP_31_XOP_LWBRX: | 384 | case OP_31_XOP_LWBRX: |
386 | rt = get_rt(inst); | 385 | rt = get_rt(inst); |
387 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); | 386 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); |
388 | break; | 387 | break; |
389 | 388 | ||
390 | case OP_31_XOP_TLBSYNC: | 389 | case OP_31_XOP_TLBSYNC: |
391 | break; | 390 | break; |
392 | 391 | ||
393 | case OP_31_XOP_STWBRX: | 392 | case OP_31_XOP_STWBRX: |
394 | rs = get_rs(inst); | 393 | rs = get_rs(inst); |
395 | ra = get_ra(inst); | 394 | ra = get_ra(inst); |
396 | rb = get_rb(inst); | 395 | rb = get_rb(inst); |
397 | 396 | ||
398 | emulated = kvmppc_handle_store(run, vcpu, | 397 | emulated = kvmppc_handle_store(run, vcpu, |
399 | kvmppc_get_gpr(vcpu, rs), | 398 | kvmppc_get_gpr(vcpu, rs), |
400 | 4, 0); | 399 | 4, 0); |
401 | break; | 400 | break; |
402 | 401 | ||
403 | case OP_31_XOP_LHBRX: | 402 | case OP_31_XOP_LHBRX: |
404 | rt = get_rt(inst); | 403 | rt = get_rt(inst); |
405 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); | 404 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); |
406 | break; | 405 | break; |
407 | 406 | ||
408 | case OP_31_XOP_STHBRX: | 407 | case OP_31_XOP_STHBRX: |
409 | rs = get_rs(inst); | 408 | rs = get_rs(inst); |
410 | ra = get_ra(inst); | 409 | ra = get_ra(inst); |
411 | rb = get_rb(inst); | 410 | rb = get_rb(inst); |
412 | 411 | ||
413 | emulated = kvmppc_handle_store(run, vcpu, | 412 | emulated = kvmppc_handle_store(run, vcpu, |
414 | kvmppc_get_gpr(vcpu, rs), | 413 | kvmppc_get_gpr(vcpu, rs), |
415 | 2, 0); | 414 | 2, 0); |
416 | break; | 415 | break; |
417 | 416 | ||
418 | default: | 417 | default: |
419 | /* Attempt core-specific emulation below. */ | 418 | /* Attempt core-specific emulation below. */ |
420 | emulated = EMULATE_FAIL; | 419 | emulated = EMULATE_FAIL; |
421 | } | 420 | } |
422 | break; | 421 | break; |
423 | 422 | ||
424 | case OP_LWZ: | 423 | case OP_LWZ: |
425 | rt = get_rt(inst); | 424 | rt = get_rt(inst); |
426 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 425 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); |
427 | break; | 426 | break; |
428 | 427 | ||
429 | case OP_LWZU: | 428 | case OP_LWZU: |
430 | ra = get_ra(inst); | 429 | ra = get_ra(inst); |
431 | rt = get_rt(inst); | 430 | rt = get_rt(inst); |
432 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 431 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); |
433 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); | 432 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
434 | break; | 433 | break; |
435 | 434 | ||
436 | case OP_LBZ: | 435 | case OP_LBZ: |
437 | rt = get_rt(inst); | 436 | rt = get_rt(inst); |
438 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 437 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
439 | break; | 438 | break; |
440 | 439 | ||
441 | case OP_LBZU: | 440 | case OP_LBZU: |
442 | ra = get_ra(inst); | 441 | ra = get_ra(inst); |
443 | rt = get_rt(inst); | 442 | rt = get_rt(inst); |
444 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 443 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
445 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); | 444 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
446 | break; | 445 | break; |
447 | 446 | ||
448 | case OP_STW: | 447 | case OP_STW: |
449 | rs = get_rs(inst); | 448 | rs = get_rs(inst); |
450 | emulated = kvmppc_handle_store(run, vcpu, | 449 | emulated = kvmppc_handle_store(run, vcpu, |
451 | kvmppc_get_gpr(vcpu, rs), | 450 | kvmppc_get_gpr(vcpu, rs), |
452 | 4, 1); | 451 | 4, 1); |
453 | break; | 452 | break; |
454 | 453 | ||
455 | case OP_STWU: | 454 | case OP_STWU: |
456 | ra = get_ra(inst); | 455 | ra = get_ra(inst); |
457 | rs = get_rs(inst); | 456 | rs = get_rs(inst); |
458 | emulated = kvmppc_handle_store(run, vcpu, | 457 | emulated = kvmppc_handle_store(run, vcpu, |
459 | kvmppc_get_gpr(vcpu, rs), | 458 | kvmppc_get_gpr(vcpu, rs), |
460 | 4, 1); | 459 | 4, 1); |
461 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); | 460 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
462 | break; | 461 | break; |
463 | 462 | ||
464 | case OP_STB: | 463 | case OP_STB: |
465 | rs = get_rs(inst); | 464 | rs = get_rs(inst); |
466 | emulated = kvmppc_handle_store(run, vcpu, | 465 | emulated = kvmppc_handle_store(run, vcpu, |
467 | kvmppc_get_gpr(vcpu, rs), | 466 | kvmppc_get_gpr(vcpu, rs), |
468 | 1, 1); | 467 | 1, 1); |
469 | break; | 468 | break; |
470 | 469 | ||
471 | case OP_STBU: | 470 | case OP_STBU: |
472 | ra = get_ra(inst); | 471 | ra = get_ra(inst); |
473 | rs = get_rs(inst); | 472 | rs = get_rs(inst); |
474 | emulated = kvmppc_handle_store(run, vcpu, | 473 | emulated = kvmppc_handle_store(run, vcpu, |
475 | kvmppc_get_gpr(vcpu, rs), | 474 | kvmppc_get_gpr(vcpu, rs), |
476 | 1, 1); | 475 | 1, 1); |
477 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); | 476 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
478 | break; | 477 | break; |
479 | 478 | ||
480 | case OP_LHZ: | 479 | case OP_LHZ: |
481 | rt = get_rt(inst); | 480 | rt = get_rt(inst); |
482 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 481 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
483 | break; | 482 | break; |
484 | 483 | ||
485 | case OP_LHZU: | 484 | case OP_LHZU: |
486 | ra = get_ra(inst); | 485 | ra = get_ra(inst); |
487 | rt = get_rt(inst); | 486 | rt = get_rt(inst); |
488 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 487 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
489 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); | 488 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
490 | break; | 489 | break; |
491 | 490 | ||
492 | case OP_LHA: | 491 | case OP_LHA: |
493 | rt = get_rt(inst); | 492 | rt = get_rt(inst); |
494 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | 493 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); |
495 | break; | 494 | break; |
496 | 495 | ||
497 | case OP_LHAU: | 496 | case OP_LHAU: |
498 | ra = get_ra(inst); | 497 | ra = get_ra(inst); |
499 | rt = get_rt(inst); | 498 | rt = get_rt(inst); |
500 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | 499 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); |
501 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); | 500 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
502 | break; | 501 | break; |
503 | 502 | ||
504 | case OP_STH: | 503 | case OP_STH: |
505 | rs = get_rs(inst); | 504 | rs = get_rs(inst); |
506 | emulated = kvmppc_handle_store(run, vcpu, | 505 | emulated = kvmppc_handle_store(run, vcpu, |
507 | kvmppc_get_gpr(vcpu, rs), | 506 | kvmppc_get_gpr(vcpu, rs), |
508 | 2, 1); | 507 | 2, 1); |
509 | break; | 508 | break; |
510 | 509 | ||
511 | case OP_STHU: | 510 | case OP_STHU: |
512 | ra = get_ra(inst); | 511 | ra = get_ra(inst); |
513 | rs = get_rs(inst); | 512 | rs = get_rs(inst); |
514 | emulated = kvmppc_handle_store(run, vcpu, | 513 | emulated = kvmppc_handle_store(run, vcpu, |
515 | kvmppc_get_gpr(vcpu, rs), | 514 | kvmppc_get_gpr(vcpu, rs), |
516 | 2, 1); | 515 | 2, 1); |
517 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); | 516 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
518 | break; | 517 | break; |
519 | 518 | ||
520 | default: | 519 | default: |
521 | emulated = EMULATE_FAIL; | 520 | emulated = EMULATE_FAIL; |
522 | } | 521 | } |
523 | 522 | ||
524 | if (emulated == EMULATE_FAIL) { | 523 | if (emulated == EMULATE_FAIL) { |
525 | emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); | 524 | emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); |
526 | if (emulated == EMULATE_AGAIN) { | 525 | if (emulated == EMULATE_AGAIN) { |
527 | advance = 0; | 526 | advance = 0; |
arch/powerpc/kvm/powerpc.c
1 | /* | 1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | 2 | * This program is free software; you can redistribute it and/or modify |
3 | * it under the terms of the GNU General Public License, version 2, as | 3 | * it under the terms of the GNU General Public License, version 2, as |
4 | * published by the Free Software Foundation. | 4 | * published by the Free Software Foundation. |
5 | * | 5 | * |
6 | * This program is distributed in the hope that it will be useful, | 6 | * This program is distributed in the hope that it will be useful, |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
9 | * GNU General Public License for more details. | 9 | * GNU General Public License for more details. |
10 | * | 10 | * |
11 | * You should have received a copy of the GNU General Public License | 11 | * You should have received a copy of the GNU General Public License |
12 | * along with this program; if not, write to the Free Software | 12 | * along with this program; if not, write to the Free Software |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
14 | * | 14 | * |
15 | * Copyright IBM Corp. 2007 | 15 | * Copyright IBM Corp. 2007 |
16 | * | 16 | * |
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | 17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> |
18 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | 18 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
22 | #include <linux/err.h> | 22 | #include <linux/err.h> |
23 | #include <linux/kvm_host.h> | 23 | #include <linux/kvm_host.h> |
24 | #include <linux/vmalloc.h> | 24 | #include <linux/vmalloc.h> |
25 | #include <linux/hrtimer.h> | 25 | #include <linux/hrtimer.h> |
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <asm/cputable.h> | 28 | #include <asm/cputable.h> |
29 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
30 | #include <asm/kvm_ppc.h> | 30 | #include <asm/kvm_ppc.h> |
31 | #include <asm/tlbflush.h> | 31 | #include <asm/tlbflush.h> |
32 | #include <asm/cputhreads.h> | 32 | #include <asm/cputhreads.h> |
33 | #include "timing.h" | 33 | #include "timing.h" |
34 | #include "../mm/mmu_decl.h" | 34 | #include "../mm/mmu_decl.h" |
35 | 35 | ||
36 | #define CREATE_TRACE_POINTS | 36 | #define CREATE_TRACE_POINTS |
37 | #include "trace.h" | 37 | #include "trace.h" |
38 | 38 | ||
39 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) | 39 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
40 | { | 40 | { |
41 | return !(v->arch.shared->msr & MSR_WE) || | 41 | return !(v->arch.shared->msr & MSR_WE) || |
42 | !!(v->arch.pending_exceptions); | 42 | !!(v->arch.pending_exceptions) || |
43 | v->requests; | ||
43 | } | 44 | } |
44 | 45 | ||
45 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) | 46 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) |
46 | { | 47 | { |
47 | int nr = kvmppc_get_gpr(vcpu, 11); | 48 | int nr = kvmppc_get_gpr(vcpu, 11); |
48 | int r; | 49 | int r; |
49 | unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); | 50 | unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); |
50 | unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); | 51 | unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); |
51 | unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); | 52 | unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); |
52 | unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); | 53 | unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); |
53 | unsigned long r2 = 0; | 54 | unsigned long r2 = 0; |
54 | 55 | ||
55 | if (!(vcpu->arch.shared->msr & MSR_SF)) { | 56 | if (!(vcpu->arch.shared->msr & MSR_SF)) { |
56 | /* 32 bit mode */ | 57 | /* 32 bit mode */ |
57 | param1 &= 0xffffffff; | 58 | param1 &= 0xffffffff; |
58 | param2 &= 0xffffffff; | 59 | param2 &= 0xffffffff; |
59 | param3 &= 0xffffffff; | 60 | param3 &= 0xffffffff; |
60 | param4 &= 0xffffffff; | 61 | param4 &= 0xffffffff; |
61 | } | 62 | } |
62 | 63 | ||
63 | switch (nr) { | 64 | switch (nr) { |
64 | case HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE: | 65 | case HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE: |
65 | { | 66 | { |
66 | vcpu->arch.magic_page_pa = param1; | 67 | vcpu->arch.magic_page_pa = param1; |
67 | vcpu->arch.magic_page_ea = param2; | 68 | vcpu->arch.magic_page_ea = param2; |
68 | 69 | ||
69 | r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; | 70 | r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; |
70 | 71 | ||
71 | r = HC_EV_SUCCESS; | 72 | r = HC_EV_SUCCESS; |
72 | break; | 73 | break; |
73 | } | 74 | } |
74 | case HC_VENDOR_KVM | KVM_HC_FEATURES: | 75 | case HC_VENDOR_KVM | KVM_HC_FEATURES: |
75 | r = HC_EV_SUCCESS; | 76 | r = HC_EV_SUCCESS; |
76 | #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500) | 77 | #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500) |
77 | /* XXX Missing magic page on 44x */ | 78 | /* XXX Missing magic page on 44x */ |
78 | r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); | 79 | r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); |
79 | #endif | 80 | #endif |
80 | 81 | ||
81 | /* Second return value is in r4 */ | 82 | /* Second return value is in r4 */ |
82 | break; | 83 | break; |
83 | default: | 84 | default: |
84 | r = HC_EV_UNIMPLEMENTED; | 85 | r = HC_EV_UNIMPLEMENTED; |
85 | break; | 86 | break; |
86 | } | 87 | } |
87 | 88 | ||
88 | kvmppc_set_gpr(vcpu, 4, r2); | 89 | kvmppc_set_gpr(vcpu, 4, r2); |
89 | 90 | ||
90 | return r; | 91 | return r; |
91 | } | 92 | } |
92 | 93 | ||
93 | int kvmppc_sanity_check(struct kvm_vcpu *vcpu) | 94 | int kvmppc_sanity_check(struct kvm_vcpu *vcpu) |
94 | { | 95 | { |
95 | int r = false; | 96 | int r = false; |
96 | 97 | ||
97 | /* We have to know what CPU to virtualize */ | 98 | /* We have to know what CPU to virtualize */ |
98 | if (!vcpu->arch.pvr) | 99 | if (!vcpu->arch.pvr) |
99 | goto out; | 100 | goto out; |
100 | 101 | ||
101 | /* PAPR only works with book3s_64 */ | 102 | /* PAPR only works with book3s_64 */ |
102 | if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) | 103 | if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) |
103 | goto out; | 104 | goto out; |
104 | 105 | ||
105 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 106 | #ifdef CONFIG_KVM_BOOK3S_64_HV |
106 | /* HV KVM can only do PAPR mode for now */ | 107 | /* HV KVM can only do PAPR mode for now */ |
107 | if (!vcpu->arch.papr_enabled) | 108 | if (!vcpu->arch.papr_enabled) |
108 | goto out; | 109 | goto out; |
109 | #endif | 110 | #endif |
110 | 111 | ||
111 | r = true; | 112 | r = true; |
112 | 113 | ||
113 | out: | 114 | out: |
114 | vcpu->arch.sane = r; | 115 | vcpu->arch.sane = r; |
115 | return r ? 0 : -EINVAL; | 116 | return r ? 0 : -EINVAL; |
116 | } | 117 | } |
117 | 118 | ||
118 | int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) | 119 | int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) |
119 | { | 120 | { |
120 | enum emulation_result er; | 121 | enum emulation_result er; |
121 | int r; | 122 | int r; |
122 | 123 | ||
123 | er = kvmppc_emulate_instruction(run, vcpu); | 124 | er = kvmppc_emulate_instruction(run, vcpu); |
124 | switch (er) { | 125 | switch (er) { |
125 | case EMULATE_DONE: | 126 | case EMULATE_DONE: |
126 | /* Future optimization: only reload non-volatiles if they were | 127 | /* Future optimization: only reload non-volatiles if they were |
127 | * actually modified. */ | 128 | * actually modified. */ |
128 | r = RESUME_GUEST_NV; | 129 | r = RESUME_GUEST_NV; |
129 | break; | 130 | break; |
130 | case EMULATE_DO_MMIO: | 131 | case EMULATE_DO_MMIO: |
131 | run->exit_reason = KVM_EXIT_MMIO; | 132 | run->exit_reason = KVM_EXIT_MMIO; |
132 | /* We must reload nonvolatiles because "update" load/store | 133 | /* We must reload nonvolatiles because "update" load/store |
133 | * instructions modify register state. */ | 134 | * instructions modify register state. */ |
134 | /* Future optimization: only reload non-volatiles if they were | 135 | /* Future optimization: only reload non-volatiles if they were |
135 | * actually modified. */ | 136 | * actually modified. */ |
136 | r = RESUME_HOST_NV; | 137 | r = RESUME_HOST_NV; |
137 | break; | 138 | break; |
138 | case EMULATE_FAIL: | 139 | case EMULATE_FAIL: |
139 | /* XXX Deliver Program interrupt to guest. */ | 140 | /* XXX Deliver Program interrupt to guest. */ |
140 | printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, | 141 | printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, |
141 | kvmppc_get_last_inst(vcpu)); | 142 | kvmppc_get_last_inst(vcpu)); |
142 | r = RESUME_HOST; | 143 | r = RESUME_HOST; |
143 | break; | 144 | break; |
144 | default: | 145 | default: |
145 | BUG(); | 146 | BUG(); |
146 | } | 147 | } |
147 | 148 | ||
148 | return r; | 149 | return r; |
149 | } | 150 | } |
150 | 151 | ||
151 | int kvm_arch_hardware_enable(void *garbage) | 152 | int kvm_arch_hardware_enable(void *garbage) |
152 | { | 153 | { |
153 | return 0; | 154 | return 0; |
154 | } | 155 | } |
155 | 156 | ||
156 | void kvm_arch_hardware_disable(void *garbage) | 157 | void kvm_arch_hardware_disable(void *garbage) |
157 | { | 158 | { |
158 | } | 159 | } |
159 | 160 | ||
160 | int kvm_arch_hardware_setup(void) | 161 | int kvm_arch_hardware_setup(void) |
161 | { | 162 | { |
162 | return 0; | 163 | return 0; |
163 | } | 164 | } |
164 | 165 | ||
165 | void kvm_arch_hardware_unsetup(void) | 166 | void kvm_arch_hardware_unsetup(void) |
166 | { | 167 | { |
167 | } | 168 | } |
168 | 169 | ||
169 | void kvm_arch_check_processor_compat(void *rtn) | 170 | void kvm_arch_check_processor_compat(void *rtn) |
170 | { | 171 | { |
171 | *(int *)rtn = kvmppc_core_check_processor_compat(); | 172 | *(int *)rtn = kvmppc_core_check_processor_compat(); |
172 | } | 173 | } |
173 | 174 | ||
174 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | 175 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
175 | { | 176 | { |
176 | if (type) | 177 | if (type) |
177 | return -EINVAL; | 178 | return -EINVAL; |
178 | 179 | ||
179 | return kvmppc_core_init_vm(kvm); | 180 | return kvmppc_core_init_vm(kvm); |
180 | } | 181 | } |
181 | 182 | ||
182 | void kvm_arch_destroy_vm(struct kvm *kvm) | 183 | void kvm_arch_destroy_vm(struct kvm *kvm) |
183 | { | 184 | { |
184 | unsigned int i; | 185 | unsigned int i; |
185 | struct kvm_vcpu *vcpu; | 186 | struct kvm_vcpu *vcpu; |
186 | 187 | ||
187 | kvm_for_each_vcpu(i, vcpu, kvm) | 188 | kvm_for_each_vcpu(i, vcpu, kvm) |
188 | kvm_arch_vcpu_free(vcpu); | 189 | kvm_arch_vcpu_free(vcpu); |
189 | 190 | ||
190 | mutex_lock(&kvm->lock); | 191 | mutex_lock(&kvm->lock); |
191 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) | 192 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) |
192 | kvm->vcpus[i] = NULL; | 193 | kvm->vcpus[i] = NULL; |
193 | 194 | ||
194 | atomic_set(&kvm->online_vcpus, 0); | 195 | atomic_set(&kvm->online_vcpus, 0); |
195 | 196 | ||
196 | kvmppc_core_destroy_vm(kvm); | 197 | kvmppc_core_destroy_vm(kvm); |
197 | 198 | ||
198 | mutex_unlock(&kvm->lock); | 199 | mutex_unlock(&kvm->lock); |
199 | } | 200 | } |
200 | 201 | ||
201 | void kvm_arch_sync_events(struct kvm *kvm) | 202 | void kvm_arch_sync_events(struct kvm *kvm) |
202 | { | 203 | { |
203 | } | 204 | } |
204 | 205 | ||
205 | int kvm_dev_ioctl_check_extension(long ext) | 206 | int kvm_dev_ioctl_check_extension(long ext) |
206 | { | 207 | { |
207 | int r; | 208 | int r; |
208 | 209 | ||
209 | switch (ext) { | 210 | switch (ext) { |
210 | #ifdef CONFIG_BOOKE | 211 | #ifdef CONFIG_BOOKE |
211 | case KVM_CAP_PPC_BOOKE_SREGS: | 212 | case KVM_CAP_PPC_BOOKE_SREGS: |
212 | #else | 213 | #else |
213 | case KVM_CAP_PPC_SEGSTATE: | 214 | case KVM_CAP_PPC_SEGSTATE: |
214 | case KVM_CAP_PPC_PAPR: | 215 | case KVM_CAP_PPC_PAPR: |
215 | #endif | 216 | #endif |
216 | case KVM_CAP_PPC_UNSET_IRQ: | 217 | case KVM_CAP_PPC_UNSET_IRQ: |
217 | case KVM_CAP_PPC_IRQ_LEVEL: | 218 | case KVM_CAP_PPC_IRQ_LEVEL: |
218 | case KVM_CAP_ENABLE_CAP: | 219 | case KVM_CAP_ENABLE_CAP: |
219 | r = 1; | 220 | r = 1; |
220 | break; | 221 | break; |
221 | #ifndef CONFIG_KVM_BOOK3S_64_HV | 222 | #ifndef CONFIG_KVM_BOOK3S_64_HV |
222 | case KVM_CAP_PPC_PAIRED_SINGLES: | 223 | case KVM_CAP_PPC_PAIRED_SINGLES: |
223 | case KVM_CAP_PPC_OSI: | 224 | case KVM_CAP_PPC_OSI: |
224 | case KVM_CAP_PPC_GET_PVINFO: | 225 | case KVM_CAP_PPC_GET_PVINFO: |
225 | #ifdef CONFIG_KVM_E500 | 226 | #ifdef CONFIG_KVM_E500 |
226 | case KVM_CAP_SW_TLB: | 227 | case KVM_CAP_SW_TLB: |
227 | #endif | 228 | #endif |
228 | r = 1; | 229 | r = 1; |
229 | break; | 230 | break; |
230 | case KVM_CAP_COALESCED_MMIO: | 231 | case KVM_CAP_COALESCED_MMIO: |
231 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | 232 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; |
232 | break; | 233 | break; |
233 | #endif | 234 | #endif |
234 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 235 | #ifdef CONFIG_KVM_BOOK3S_64_HV |
235 | case KVM_CAP_SPAPR_TCE: | 236 | case KVM_CAP_SPAPR_TCE: |
236 | r = 1; | 237 | r = 1; |
237 | break; | 238 | break; |
238 | case KVM_CAP_PPC_SMT: | 239 | case KVM_CAP_PPC_SMT: |
239 | r = threads_per_core; | 240 | r = threads_per_core; |
240 | break; | 241 | break; |
241 | case KVM_CAP_PPC_RMA: | 242 | case KVM_CAP_PPC_RMA: |
242 | r = 1; | 243 | r = 1; |
243 | /* PPC970 requires an RMA */ | 244 | /* PPC970 requires an RMA */ |
244 | if (cpu_has_feature(CPU_FTR_ARCH_201)) | 245 | if (cpu_has_feature(CPU_FTR_ARCH_201)) |
245 | r = 2; | 246 | r = 2; |
246 | break; | 247 | break; |
247 | #endif | 248 | #endif |
248 | default: | 249 | default: |
249 | r = 0; | 250 | r = 0; |
250 | break; | 251 | break; |
251 | } | 252 | } |
252 | return r; | 253 | return r; |
253 | 254 | ||
254 | } | 255 | } |
255 | 256 | ||
256 | long kvm_arch_dev_ioctl(struct file *filp, | 257 | long kvm_arch_dev_ioctl(struct file *filp, |
257 | unsigned int ioctl, unsigned long arg) | 258 | unsigned int ioctl, unsigned long arg) |
258 | { | 259 | { |
259 | return -EINVAL; | 260 | return -EINVAL; |
260 | } | 261 | } |
261 | 262 | ||
262 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | 263 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
263 | struct kvm_memory_slot *memslot, | 264 | struct kvm_memory_slot *memslot, |
264 | struct kvm_memory_slot old, | 265 | struct kvm_memory_slot old, |
265 | struct kvm_userspace_memory_region *mem, | 266 | struct kvm_userspace_memory_region *mem, |
266 | int user_alloc) | 267 | int user_alloc) |
267 | { | 268 | { |
268 | return kvmppc_core_prepare_memory_region(kvm, mem); | 269 | return kvmppc_core_prepare_memory_region(kvm, mem); |
269 | } | 270 | } |
270 | 271 | ||
271 | void kvm_arch_commit_memory_region(struct kvm *kvm, | 272 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
272 | struct kvm_userspace_memory_region *mem, | 273 | struct kvm_userspace_memory_region *mem, |
273 | struct kvm_memory_slot old, | 274 | struct kvm_memory_slot old, |
274 | int user_alloc) | 275 | int user_alloc) |
275 | { | 276 | { |
276 | kvmppc_core_commit_memory_region(kvm, mem); | 277 | kvmppc_core_commit_memory_region(kvm, mem); |
277 | } | 278 | } |
278 | 279 | ||
279 | 280 | ||
280 | void kvm_arch_flush_shadow(struct kvm *kvm) | 281 | void kvm_arch_flush_shadow(struct kvm *kvm) |
281 | { | 282 | { |
282 | } | 283 | } |
283 | 284 | ||
284 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | 285 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) |
285 | { | 286 | { |
286 | struct kvm_vcpu *vcpu; | 287 | struct kvm_vcpu *vcpu; |
287 | vcpu = kvmppc_core_vcpu_create(kvm, id); | 288 | vcpu = kvmppc_core_vcpu_create(kvm, id); |
288 | vcpu->arch.wqp = &vcpu->wq; | 289 | vcpu->arch.wqp = &vcpu->wq; |
289 | if (!IS_ERR(vcpu)) | 290 | if (!IS_ERR(vcpu)) |
290 | kvmppc_create_vcpu_debugfs(vcpu, id); | 291 | kvmppc_create_vcpu_debugfs(vcpu, id); |
291 | return vcpu; | 292 | return vcpu; |
292 | } | 293 | } |
293 | 294 | ||
294 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) | 295 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) |
295 | { | 296 | { |
296 | /* Make sure we're not using the vcpu anymore */ | 297 | /* Make sure we're not using the vcpu anymore */ |
297 | hrtimer_cancel(&vcpu->arch.dec_timer); | 298 | hrtimer_cancel(&vcpu->arch.dec_timer); |
298 | tasklet_kill(&vcpu->arch.tasklet); | 299 | tasklet_kill(&vcpu->arch.tasklet); |
299 | 300 | ||
300 | kvmppc_remove_vcpu_debugfs(vcpu); | 301 | kvmppc_remove_vcpu_debugfs(vcpu); |
301 | kvmppc_core_vcpu_free(vcpu); | 302 | kvmppc_core_vcpu_free(vcpu); |
302 | } | 303 | } |
303 | 304 | ||
304 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | 305 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
305 | { | 306 | { |
306 | kvm_arch_vcpu_free(vcpu); | 307 | kvm_arch_vcpu_free(vcpu); |
307 | } | 308 | } |
308 | 309 | ||
309 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | 310 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
310 | { | 311 | { |
311 | return kvmppc_core_pending_dec(vcpu); | 312 | return kvmppc_core_pending_dec(vcpu); |
312 | } | 313 | } |
313 | 314 | ||
314 | static void kvmppc_decrementer_func(unsigned long data) | ||
315 | { | ||
316 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; | ||
317 | |||
318 | kvmppc_core_queue_dec(vcpu); | ||
319 | |||
320 | if (waitqueue_active(vcpu->arch.wqp)) { | ||
321 | wake_up_interruptible(vcpu->arch.wqp); | ||
322 | vcpu->stat.halt_wakeup++; | ||
323 | } | ||
324 | } | ||
325 | |||
326 | /* | 315 | /* |
327 | * low level hrtimer wake routine. Because this runs in hardirq context | 316 | * low level hrtimer wake routine. Because this runs in hardirq context |
328 | * we schedule a tasklet to do the real work. | 317 | * we schedule a tasklet to do the real work. |
329 | */ | 318 | */ |
330 | enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) | 319 | enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) |
331 | { | 320 | { |
332 | struct kvm_vcpu *vcpu; | 321 | struct kvm_vcpu *vcpu; |
333 | 322 | ||
334 | vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); | 323 | vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); |
335 | tasklet_schedule(&vcpu->arch.tasklet); | 324 | tasklet_schedule(&vcpu->arch.tasklet); |
336 | 325 | ||
337 | return HRTIMER_NORESTART; | 326 | return HRTIMER_NORESTART; |
338 | } | 327 | } |
339 | 328 | ||
340 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | 329 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
341 | { | 330 | { |
342 | hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); | 331 | hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
343 | tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); | 332 | tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); |
344 | vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; | 333 | vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; |
345 | vcpu->arch.dec_expires = ~(u64)0; | 334 | vcpu->arch.dec_expires = ~(u64)0; |
346 | 335 | ||
347 | #ifdef CONFIG_KVM_EXIT_TIMING | 336 | #ifdef CONFIG_KVM_EXIT_TIMING |
348 | mutex_init(&vcpu->arch.exit_timing_lock); | 337 | mutex_init(&vcpu->arch.exit_timing_lock); |
349 | #endif | 338 | #endif |
350 | 339 | ||
351 | return 0; | 340 | return 0; |
352 | } | 341 | } |
353 | 342 | ||
354 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | 343 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) |
355 | { | 344 | { |
356 | kvmppc_mmu_destroy(vcpu); | 345 | kvmppc_mmu_destroy(vcpu); |
357 | } | 346 | } |
358 | 347 | ||
359 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 348 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
360 | { | 349 | { |
361 | #ifdef CONFIG_BOOKE | 350 | #ifdef CONFIG_BOOKE |
362 | /* | 351 | /* |
363 | * vrsave (formerly usprg0) isn't used by Linux, but may | 352 | * vrsave (formerly usprg0) isn't used by Linux, but may |
364 | * be used by the guest. | 353 | * be used by the guest. |
365 | * | 354 | * |
366 | * On non-booke this is associated with Altivec and | 355 | * On non-booke this is associated with Altivec and |
367 | * is handled by code in book3s.c. | 356 | * is handled by code in book3s.c. |
368 | */ | 357 | */ |
369 | mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); | 358 | mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); |
370 | #endif | 359 | #endif |
371 | kvmppc_core_vcpu_load(vcpu, cpu); | 360 | kvmppc_core_vcpu_load(vcpu, cpu); |
372 | vcpu->cpu = smp_processor_id(); | 361 | vcpu->cpu = smp_processor_id(); |
373 | } | 362 | } |
374 | 363 | ||
375 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 364 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
376 | { | 365 | { |
377 | kvmppc_core_vcpu_put(vcpu); | 366 | kvmppc_core_vcpu_put(vcpu); |
378 | #ifdef CONFIG_BOOKE | 367 | #ifdef CONFIG_BOOKE |
379 | vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); | 368 | vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); |
380 | #endif | 369 | #endif |
381 | vcpu->cpu = -1; | 370 | vcpu->cpu = -1; |
382 | } | 371 | } |
383 | 372 | ||
384 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | 373 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
385 | struct kvm_guest_debug *dbg) | 374 | struct kvm_guest_debug *dbg) |
386 | { | 375 | { |
387 | return -EINVAL; | 376 | return -EINVAL; |
388 | } | 377 | } |
389 | 378 | ||
390 | static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, | 379 | static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, |
391 | struct kvm_run *run) | 380 | struct kvm_run *run) |
392 | { | 381 | { |
393 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data); | 382 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data); |
394 | } | 383 | } |
395 | 384 | ||
396 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, | 385 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, |
397 | struct kvm_run *run) | 386 | struct kvm_run *run) |
398 | { | 387 | { |
399 | u64 uninitialized_var(gpr); | 388 | u64 uninitialized_var(gpr); |
400 | 389 | ||
401 | if (run->mmio.len > sizeof(gpr)) { | 390 | if (run->mmio.len > sizeof(gpr)) { |
402 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); | 391 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); |
403 | return; | 392 | return; |
404 | } | 393 | } |
405 | 394 | ||
406 | if (vcpu->arch.mmio_is_bigendian) { | 395 | if (vcpu->arch.mmio_is_bigendian) { |
407 | switch (run->mmio.len) { | 396 | switch (run->mmio.len) { |
408 | case 8: gpr = *(u64 *)run->mmio.data; break; | 397 | case 8: gpr = *(u64 *)run->mmio.data; break; |
409 | case 4: gpr = *(u32 *)run->mmio.data; break; | 398 | case 4: gpr = *(u32 *)run->mmio.data; break; |
410 | case 2: gpr = *(u16 *)run->mmio.data; break; | 399 | case 2: gpr = *(u16 *)run->mmio.data; break; |
411 | case 1: gpr = *(u8 *)run->mmio.data; break; | 400 | case 1: gpr = *(u8 *)run->mmio.data; break; |
412 | } | 401 | } |
413 | } else { | 402 | } else { |
414 | /* Convert BE data from userland back to LE. */ | 403 | /* Convert BE data from userland back to LE. */ |
415 | switch (run->mmio.len) { | 404 | switch (run->mmio.len) { |
416 | case 4: gpr = ld_le32((u32 *)run->mmio.data); break; | 405 | case 4: gpr = ld_le32((u32 *)run->mmio.data); break; |
417 | case 2: gpr = ld_le16((u16 *)run->mmio.data); break; | 406 | case 2: gpr = ld_le16((u16 *)run->mmio.data); break; |
418 | case 1: gpr = *(u8 *)run->mmio.data; break; | 407 | case 1: gpr = *(u8 *)run->mmio.data; break; |
419 | } | 408 | } |
420 | } | 409 | } |
421 | 410 | ||
422 | if (vcpu->arch.mmio_sign_extend) { | 411 | if (vcpu->arch.mmio_sign_extend) { |
423 | switch (run->mmio.len) { | 412 | switch (run->mmio.len) { |
424 | #ifdef CONFIG_PPC64 | 413 | #ifdef CONFIG_PPC64 |
425 | case 4: | 414 | case 4: |
426 | gpr = (s64)(s32)gpr; | 415 | gpr = (s64)(s32)gpr; |
427 | break; | 416 | break; |
428 | #endif | 417 | #endif |
429 | case 2: | 418 | case 2: |
430 | gpr = (s64)(s16)gpr; | 419 | gpr = (s64)(s16)gpr; |
431 | break; | 420 | break; |
432 | case 1: | 421 | case 1: |
433 | gpr = (s64)(s8)gpr; | 422 | gpr = (s64)(s8)gpr; |
434 | break; | 423 | break; |
435 | } | 424 | } |
436 | } | 425 | } |
437 | 426 | ||
438 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); | 427 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); |
439 | 428 | ||
440 | switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) { | 429 | switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) { |
441 | case KVM_REG_GPR: | 430 | case KVM_REG_GPR: |
442 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); | 431 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); |
443 | break; | 432 | break; |
444 | case KVM_REG_FPR: | 433 | case KVM_REG_FPR: |
445 | vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; | 434 | vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; |
446 | break; | 435 | break; |
447 | #ifdef CONFIG_PPC_BOOK3S | 436 | #ifdef CONFIG_PPC_BOOK3S |
448 | case KVM_REG_QPR: | 437 | case KVM_REG_QPR: |
449 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; | 438 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; |
450 | break; | 439 | break; |
451 | case KVM_REG_FQPR: | 440 | case KVM_REG_FQPR: |
452 | vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; | 441 | vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; |
453 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; | 442 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; |
454 | break; | 443 | break; |
455 | #endif | 444 | #endif |
456 | default: | 445 | default: |
457 | BUG(); | 446 | BUG(); |
458 | } | 447 | } |
459 | } | 448 | } |
460 | 449 | ||
461 | int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | 450 | int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
462 | unsigned int rt, unsigned int bytes, int is_bigendian) | 451 | unsigned int rt, unsigned int bytes, int is_bigendian) |
463 | { | 452 | { |
464 | if (bytes > sizeof(run->mmio.data)) { | 453 | if (bytes > sizeof(run->mmio.data)) { |
465 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, | 454 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, |
466 | run->mmio.len); | 455 | run->mmio.len); |
467 | } | 456 | } |
468 | 457 | ||
469 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; | 458 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; |
470 | run->mmio.len = bytes; | 459 | run->mmio.len = bytes; |
471 | run->mmio.is_write = 0; | 460 | run->mmio.is_write = 0; |
472 | 461 | ||
473 | vcpu->arch.io_gpr = rt; | 462 | vcpu->arch.io_gpr = rt; |
474 | vcpu->arch.mmio_is_bigendian = is_bigendian; | 463 | vcpu->arch.mmio_is_bigendian = is_bigendian; |
475 | vcpu->mmio_needed = 1; | 464 | vcpu->mmio_needed = 1; |
476 | vcpu->mmio_is_write = 0; | 465 | vcpu->mmio_is_write = 0; |
477 | vcpu->arch.mmio_sign_extend = 0; | 466 | vcpu->arch.mmio_sign_extend = 0; |
478 | 467 | ||
479 | return EMULATE_DO_MMIO; | 468 | return EMULATE_DO_MMIO; |
480 | } | 469 | } |
481 | 470 | ||
482 | /* Same as above, but sign extends */ | 471 | /* Same as above, but sign extends */ |
483 | int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, | 472 | int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, |
484 | unsigned int rt, unsigned int bytes, int is_bigendian) | 473 | unsigned int rt, unsigned int bytes, int is_bigendian) |
485 | { | 474 | { |
486 | int r; | 475 | int r; |
487 | 476 | ||
488 | r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); | 477 | r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); |
489 | vcpu->arch.mmio_sign_extend = 1; | 478 | vcpu->arch.mmio_sign_extend = 1; |
490 | 479 | ||
491 | return r; | 480 | return r; |
492 | } | 481 | } |
493 | 482 | ||
494 | int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | 483 | int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
495 | u64 val, unsigned int bytes, int is_bigendian) | 484 | u64 val, unsigned int bytes, int is_bigendian) |
496 | { | 485 | { |
497 | void *data = run->mmio.data; | 486 | void *data = run->mmio.data; |
498 | 487 | ||
499 | if (bytes > sizeof(run->mmio.data)) { | 488 | if (bytes > sizeof(run->mmio.data)) { |
500 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, | 489 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, |
501 | run->mmio.len); | 490 | run->mmio.len); |
502 | } | 491 | } |
503 | 492 | ||
504 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; | 493 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; |
505 | run->mmio.len = bytes; | 494 | run->mmio.len = bytes; |
506 | run->mmio.is_write = 1; | 495 | run->mmio.is_write = 1; |
507 | vcpu->mmio_needed = 1; | 496 | vcpu->mmio_needed = 1; |
508 | vcpu->mmio_is_write = 1; | 497 | vcpu->mmio_is_write = 1; |
509 | 498 | ||
510 | /* Store the value at the lowest bytes in 'data'. */ | 499 | /* Store the value at the lowest bytes in 'data'. */ |
511 | if (is_bigendian) { | 500 | if (is_bigendian) { |
512 | switch (bytes) { | 501 | switch (bytes) { |
513 | case 8: *(u64 *)data = val; break; | 502 | case 8: *(u64 *)data = val; break; |
514 | case 4: *(u32 *)data = val; break; | 503 | case 4: *(u32 *)data = val; break; |
515 | case 2: *(u16 *)data = val; break; | 504 | case 2: *(u16 *)data = val; break; |
516 | case 1: *(u8 *)data = val; break; | 505 | case 1: *(u8 *)data = val; break; |
517 | } | 506 | } |
518 | } else { | 507 | } else { |
519 | /* Store LE value into 'data'. */ | 508 | /* Store LE value into 'data'. */ |
520 | switch (bytes) { | 509 | switch (bytes) { |
521 | case 4: st_le32(data, val); break; | 510 | case 4: st_le32(data, val); break; |
522 | case 2: st_le16(data, val); break; | 511 | case 2: st_le16(data, val); break; |
523 | case 1: *(u8 *)data = val; break; | 512 | case 1: *(u8 *)data = val; break; |
524 | } | 513 | } |
525 | } | 514 | } |
526 | 515 | ||
527 | return EMULATE_DO_MMIO; | 516 | return EMULATE_DO_MMIO; |
528 | } | 517 | } |
529 | 518 | ||
530 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | 519 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) |
531 | { | 520 | { |
532 | int r; | 521 | int r; |
533 | sigset_t sigsaved; | 522 | sigset_t sigsaved; |
534 | 523 | ||
535 | if (vcpu->sigset_active) | 524 | if (vcpu->sigset_active) |
536 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | 525 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); |
537 | 526 | ||
538 | if (vcpu->mmio_needed) { | 527 | if (vcpu->mmio_needed) { |
539 | if (!vcpu->mmio_is_write) | 528 | if (!vcpu->mmio_is_write) |
540 | kvmppc_complete_mmio_load(vcpu, run); | 529 | kvmppc_complete_mmio_load(vcpu, run); |
541 | vcpu->mmio_needed = 0; | 530 | vcpu->mmio_needed = 0; |
542 | } else if (vcpu->arch.dcr_needed) { | 531 | } else if (vcpu->arch.dcr_needed) { |
543 | if (!vcpu->arch.dcr_is_write) | 532 | if (!vcpu->arch.dcr_is_write) |
544 | kvmppc_complete_dcr_load(vcpu, run); | 533 | kvmppc_complete_dcr_load(vcpu, run); |
545 | vcpu->arch.dcr_needed = 0; | 534 | vcpu->arch.dcr_needed = 0; |
546 | } else if (vcpu->arch.osi_needed) { | 535 | } else if (vcpu->arch.osi_needed) { |
547 | u64 *gprs = run->osi.gprs; | 536 | u64 *gprs = run->osi.gprs; |
548 | int i; | 537 | int i; |
549 | 538 | ||
550 | for (i = 0; i < 32; i++) | 539 | for (i = 0; i < 32; i++) |
551 | kvmppc_set_gpr(vcpu, i, gprs[i]); | 540 | kvmppc_set_gpr(vcpu, i, gprs[i]); |
552 | vcpu->arch.osi_needed = 0; | 541 | vcpu->arch.osi_needed = 0; |
553 | } else if (vcpu->arch.hcall_needed) { | 542 | } else if (vcpu->arch.hcall_needed) { |
554 | int i; | 543 | int i; |
555 | 544 | ||
556 | kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); | 545 | kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); |
557 | for (i = 0; i < 9; ++i) | 546 | for (i = 0; i < 9; ++i) |
558 | kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); | 547 | kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); |
559 | vcpu->arch.hcall_needed = 0; | 548 | vcpu->arch.hcall_needed = 0; |
560 | } | 549 | } |
561 | 550 | ||
562 | r = kvmppc_vcpu_run(run, vcpu); | 551 | r = kvmppc_vcpu_run(run, vcpu); |
563 | 552 | ||
564 | if (vcpu->sigset_active) | 553 | if (vcpu->sigset_active) |
565 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | 554 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); |
566 | 555 | ||
567 | return r; | 556 | return r; |
568 | } | 557 | } |
569 | 558 | ||
559 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu) | ||
560 | { | ||
561 | if (waitqueue_active(&vcpu->wq)) { | ||
562 | wake_up_interruptible(vcpu->arch.wqp); | ||
563 | vcpu->stat.halt_wakeup++; | ||
564 | } else if (vcpu->cpu != -1) { | ||
565 | smp_send_reschedule(vcpu->cpu); | ||
566 | } | ||
567 | } | ||
568 | |||
570 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) | 569 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) |
571 | { | 570 | { |
572 | if (irq->irq == KVM_INTERRUPT_UNSET) { | 571 | if (irq->irq == KVM_INTERRUPT_UNSET) { |
573 | kvmppc_core_dequeue_external(vcpu, irq); | 572 | kvmppc_core_dequeue_external(vcpu, irq); |
574 | return 0; | 573 | return 0; |
575 | } | 574 | } |
576 | 575 | ||
577 | kvmppc_core_queue_external(vcpu, irq); | 576 | kvmppc_core_queue_external(vcpu, irq); |
578 | 577 | kvm_vcpu_kick(vcpu); | |
579 | if (waitqueue_active(vcpu->arch.wqp)) { | ||
580 | wake_up_interruptible(vcpu->arch.wqp); | ||
581 | vcpu->stat.halt_wakeup++; | ||
582 | } else if (vcpu->cpu != -1) { | ||
583 | smp_send_reschedule(vcpu->cpu); | ||
584 | } | ||
585 | 578 | ||
586 | return 0; | 579 | return 0; |
587 | } | 580 | } |
588 | 581 | ||
589 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, | 582 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, |
590 | struct kvm_enable_cap *cap) | 583 | struct kvm_enable_cap *cap) |
591 | { | 584 | { |
592 | int r; | 585 | int r; |
593 | 586 | ||
594 | if (cap->flags) | 587 | if (cap->flags) |
595 | return -EINVAL; | 588 | return -EINVAL; |
596 | 589 | ||
597 | switch (cap->cap) { | 590 | switch (cap->cap) { |
598 | case KVM_CAP_PPC_OSI: | 591 | case KVM_CAP_PPC_OSI: |
599 | r = 0; | 592 | r = 0; |
600 | vcpu->arch.osi_enabled = true; | 593 | vcpu->arch.osi_enabled = true; |
601 | break; | 594 | break; |
602 | case KVM_CAP_PPC_PAPR: | 595 | case KVM_CAP_PPC_PAPR: |
603 | r = 0; | 596 | r = 0; |
604 | vcpu->arch.papr_enabled = true; | 597 | vcpu->arch.papr_enabled = true; |
605 | break; | 598 | break; |
606 | #ifdef CONFIG_KVM_E500 | 599 | #ifdef CONFIG_KVM_E500 |
607 | case KVM_CAP_SW_TLB: { | 600 | case KVM_CAP_SW_TLB: { |
608 | struct kvm_config_tlb cfg; | 601 | struct kvm_config_tlb cfg; |
609 | void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; | 602 | void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; |
610 | 603 | ||
611 | r = -EFAULT; | 604 | r = -EFAULT; |
612 | if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) | 605 | if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) |
613 | break; | 606 | break; |
614 | 607 | ||
615 | r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); | 608 | r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); |
616 | break; | 609 | break; |
617 | } | 610 | } |
618 | #endif | 611 | #endif |
619 | default: | 612 | default: |
620 | r = -EINVAL; | 613 | r = -EINVAL; |
621 | break; | 614 | break; |
622 | } | 615 | } |
623 | 616 | ||
624 | if (!r) | 617 | if (!r) |
625 | r = kvmppc_sanity_check(vcpu); | 618 | r = kvmppc_sanity_check(vcpu); |
626 | 619 | ||
627 | return r; | 620 | return r; |
628 | } | 621 | } |
629 | 622 | ||
630 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | 623 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
631 | struct kvm_mp_state *mp_state) | 624 | struct kvm_mp_state *mp_state) |
632 | { | 625 | { |
633 | return -EINVAL; | 626 | return -EINVAL; |
634 | } | 627 | } |
635 | 628 | ||
636 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | 629 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
637 | struct kvm_mp_state *mp_state) | 630 | struct kvm_mp_state *mp_state) |
638 | { | 631 | { |
639 | return -EINVAL; | 632 | return -EINVAL; |
640 | } | 633 | } |
641 | 634 | ||
642 | long kvm_arch_vcpu_ioctl(struct file *filp, | 635 | long kvm_arch_vcpu_ioctl(struct file *filp, |
643 | unsigned int ioctl, unsigned long arg) | 636 | unsigned int ioctl, unsigned long arg) |
644 | { | 637 | { |
645 | struct kvm_vcpu *vcpu = filp->private_data; | 638 | struct kvm_vcpu *vcpu = filp->private_data; |
646 | void __user *argp = (void __user *)arg; | 639 | void __user *argp = (void __user *)arg; |
647 | long r; | 640 | long r; |
648 | 641 | ||
649 | switch (ioctl) { | 642 | switch (ioctl) { |
650 | case KVM_INTERRUPT: { | 643 | case KVM_INTERRUPT: { |
651 | struct kvm_interrupt irq; | 644 | struct kvm_interrupt irq; |
652 | r = -EFAULT; | 645 | r = -EFAULT; |
653 | if (copy_from_user(&irq, argp, sizeof(irq))) | 646 | if (copy_from_user(&irq, argp, sizeof(irq))) |
654 | goto out; | 647 | goto out; |
655 | r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); | 648 | r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); |
656 | goto out; | 649 | goto out; |
657 | } | 650 | } |
658 | 651 | ||
659 | case KVM_ENABLE_CAP: | 652 | case KVM_ENABLE_CAP: |
660 | { | 653 | { |
661 | struct kvm_enable_cap cap; | 654 | struct kvm_enable_cap cap; |
662 | r = -EFAULT; | 655 | r = -EFAULT; |
663 | if (copy_from_user(&cap, argp, sizeof(cap))) | 656 | if (copy_from_user(&cap, argp, sizeof(cap))) |
664 | goto out; | 657 | goto out; |
665 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); | 658 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); |
666 | break; | 659 | break; |
667 | } | 660 | } |
668 | 661 | ||
669 | #ifdef CONFIG_KVM_E500 | 662 | #ifdef CONFIG_KVM_E500 |
670 | case KVM_DIRTY_TLB: { | 663 | case KVM_DIRTY_TLB: { |
671 | struct kvm_dirty_tlb dirty; | 664 | struct kvm_dirty_tlb dirty; |
672 | r = -EFAULT; | 665 | r = -EFAULT; |
673 | if (copy_from_user(&dirty, argp, sizeof(dirty))) | 666 | if (copy_from_user(&dirty, argp, sizeof(dirty))) |
674 | goto out; | 667 | goto out; |
675 | r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); | 668 | r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); |
676 | break; | 669 | break; |
677 | } | 670 | } |
678 | #endif | 671 | #endif |
679 | 672 | ||
680 | default: | 673 | default: |
681 | r = -EINVAL; | 674 | r = -EINVAL; |
682 | } | 675 | } |
683 | 676 | ||
684 | out: | 677 | out: |
685 | return r; | 678 | return r; |
686 | } | 679 | } |
687 | 680 | ||
688 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) | 681 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
689 | { | 682 | { |
690 | return VM_FAULT_SIGBUS; | 683 | return VM_FAULT_SIGBUS; |
691 | } | 684 | } |
692 | 685 | ||
693 | static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) | 686 | static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) |
694 | { | 687 | { |
695 | u32 inst_lis = 0x3c000000; | 688 | u32 inst_lis = 0x3c000000; |
696 | u32 inst_ori = 0x60000000; | 689 | u32 inst_ori = 0x60000000; |
697 | u32 inst_nop = 0x60000000; | 690 | u32 inst_nop = 0x60000000; |
698 | u32 inst_sc = 0x44000002; | 691 | u32 inst_sc = 0x44000002; |
699 | u32 inst_imm_mask = 0xffff; | 692 | u32 inst_imm_mask = 0xffff; |
700 | 693 | ||
701 | /* | 694 | /* |
702 | * The hypercall to get into KVM from within guest context is as | 695 | * The hypercall to get into KVM from within guest context is as |
703 | * follows: | 696 | * follows: |
704 | * | 697 | * |
705 | * lis r0, r0, KVM_SC_MAGIC_R0@h | 698 | * lis r0, r0, KVM_SC_MAGIC_R0@h |
706 | * ori r0, KVM_SC_MAGIC_R0@l | 699 | * ori r0, KVM_SC_MAGIC_R0@l |
707 | * sc | 700 | * sc |
708 | * nop | 701 | * nop |
709 | */ | 702 | */ |
710 | pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask); | 703 | pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask); |
711 | pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask); | 704 | pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask); |
712 | pvinfo->hcall[2] = inst_sc; | 705 | pvinfo->hcall[2] = inst_sc; |
713 | pvinfo->hcall[3] = inst_nop; | 706 | pvinfo->hcall[3] = inst_nop; |
714 | 707 | ||
715 | return 0; | 708 | return 0; |
716 | } | 709 | } |
717 | 710 | ||
718 | long kvm_arch_vm_ioctl(struct file *filp, | 711 | long kvm_arch_vm_ioctl(struct file *filp, |
719 | unsigned int ioctl, unsigned long arg) | 712 | unsigned int ioctl, unsigned long arg) |
720 | { | 713 | { |
721 | void __user *argp = (void __user *)arg; | 714 | void __user *argp = (void __user *)arg; |
722 | long r; | 715 | long r; |
723 | 716 | ||
724 | switch (ioctl) { | 717 | switch (ioctl) { |
725 | case KVM_PPC_GET_PVINFO: { | 718 | case KVM_PPC_GET_PVINFO: { |
726 | struct kvm_ppc_pvinfo pvinfo; | 719 | struct kvm_ppc_pvinfo pvinfo; |
727 | memset(&pvinfo, 0, sizeof(pvinfo)); | 720 | memset(&pvinfo, 0, sizeof(pvinfo)); |
728 | r = kvm_vm_ioctl_get_pvinfo(&pvinfo); | 721 | r = kvm_vm_ioctl_get_pvinfo(&pvinfo); |
729 | if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { | 722 | if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { |
730 | r = -EFAULT; | 723 | r = -EFAULT; |
731 | goto out; | 724 | goto out; |
732 | } | 725 | } |
733 | 726 | ||
734 | break; | 727 | break; |
735 | } | 728 | } |
736 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 729 | #ifdef CONFIG_KVM_BOOK3S_64_HV |
737 | case KVM_CREATE_SPAPR_TCE: { | 730 | case KVM_CREATE_SPAPR_TCE: { |
738 | struct kvm_create_spapr_tce create_tce; | 731 | struct kvm_create_spapr_tce create_tce; |
739 | struct kvm *kvm = filp->private_data; | 732 | struct kvm *kvm = filp->private_data; |
740 | 733 | ||
741 | r = -EFAULT; | 734 | r = -EFAULT; |
742 | if (copy_from_user(&create_tce, argp, sizeof(create_tce))) | 735 | if (copy_from_user(&create_tce, argp, sizeof(create_tce))) |
743 | goto out; | 736 | goto out; |
744 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); | 737 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); |
745 | goto out; | 738 | goto out; |
746 | } | 739 | } |
747 | 740 | ||
748 | case KVM_ALLOCATE_RMA: { | 741 | case KVM_ALLOCATE_RMA: { |
749 | struct kvm *kvm = filp->private_data; | 742 | struct kvm *kvm = filp->private_data; |
750 | struct kvm_allocate_rma rma; | 743 | struct kvm_allocate_rma rma; |
751 | 744 | ||
752 | r = kvm_vm_ioctl_allocate_rma(kvm, &rma); | 745 | r = kvm_vm_ioctl_allocate_rma(kvm, &rma); |
753 | if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma))) | 746 | if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma))) |
754 | r = -EFAULT; | 747 | r = -EFAULT; |
755 | break; | 748 | break; |
756 | } | 749 | } |
757 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ | 750 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ |
758 | 751 | ||
759 | default: | 752 | default: |
760 | r = -ENOTTY; | 753 | r = -ENOTTY; |
761 | } | 754 | } |
762 | 755 | ||
763 | out: | 756 | out: |
764 | return r; | 757 | return r; |