Commit 02512b2bd63385d1f34f6956860dedbfc9ac20d7
Exists in
ti-lsk-linux-4.1.y
and in
10 other branches
Merge tag 'kvm-arm-fixes-3.19-2' of git://git.kernel.org/pub/scm/linux/kernel/gi…
…t/kvmarm/kvmarm into kvm-master Second round of fixes for KVM/ARM for 3.19. Fixes memory corruption issues on APM platforms and swapping issues on DMA-coherent systems.
Showing 14 changed files Inline Diff
- arch/arm/include/asm/kvm_emulate.h
- arch/arm/include/asm/kvm_host.h
- arch/arm/include/asm/kvm_mmu.h
- arch/arm/kvm/arm.c
- arch/arm/kvm/coproc.c
- arch/arm/kvm/coproc.h
- arch/arm/kvm/coproc_a15.c
- arch/arm/kvm/coproc_a7.c
- arch/arm/kvm/mmu.c
- arch/arm/kvm/trace.h
- arch/arm64/include/asm/kvm_emulate.h
- arch/arm64/include/asm/kvm_host.h
- arch/arm64/include/asm/kvm_mmu.h
- arch/arm64/kvm/sys_regs.c
arch/arm/include/asm/kvm_emulate.h
1 | /* | 1 | /* |
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | 2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | 3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License, version 2, as | 6 | * it under the terms of the GNU General Public License, version 2, as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it will be useful, | 9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #ifndef __ARM_KVM_EMULATE_H__ | 19 | #ifndef __ARM_KVM_EMULATE_H__ |
20 | #define __ARM_KVM_EMULATE_H__ | 20 | #define __ARM_KVM_EMULATE_H__ |
21 | 21 | ||
22 | #include <linux/kvm_host.h> | 22 | #include <linux/kvm_host.h> |
23 | #include <asm/kvm_asm.h> | 23 | #include <asm/kvm_asm.h> |
24 | #include <asm/kvm_mmio.h> | 24 | #include <asm/kvm_mmio.h> |
25 | #include <asm/kvm_arm.h> | 25 | #include <asm/kvm_arm.h> |
26 | 26 | ||
27 | unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); | 27 | unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); |
28 | unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); | 28 | unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); |
29 | 29 | ||
30 | bool kvm_condition_valid(struct kvm_vcpu *vcpu); | 30 | bool kvm_condition_valid(struct kvm_vcpu *vcpu); |
31 | void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); | 31 | void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); |
32 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); | 32 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); |
33 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); | 33 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); |
34 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); | 34 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); |
35 | 35 | ||
36 | static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) | 36 | static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) |
37 | { | 37 | { |
38 | vcpu->arch.hcr = HCR_GUEST_MASK; | 38 | vcpu->arch.hcr = HCR_GUEST_MASK; |
39 | } | 39 | } |
40 | 40 | ||
41 | static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu) | ||
42 | { | ||
43 | return vcpu->arch.hcr; | ||
44 | } | ||
45 | |||
46 | static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr) | ||
47 | { | ||
48 | vcpu->arch.hcr = hcr; | ||
49 | } | ||
50 | |||
41 | static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) | 51 | static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) |
42 | { | 52 | { |
43 | return 1; | 53 | return 1; |
44 | } | 54 | } |
45 | 55 | ||
46 | static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu) | 56 | static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu) |
47 | { | 57 | { |
48 | return &vcpu->arch.regs.usr_regs.ARM_pc; | 58 | return &vcpu->arch.regs.usr_regs.ARM_pc; |
49 | } | 59 | } |
50 | 60 | ||
51 | static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu) | 61 | static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu) |
52 | { | 62 | { |
53 | return &vcpu->arch.regs.usr_regs.ARM_cpsr; | 63 | return &vcpu->arch.regs.usr_regs.ARM_cpsr; |
54 | } | 64 | } |
55 | 65 | ||
56 | static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) | 66 | static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) |
57 | { | 67 | { |
58 | *vcpu_cpsr(vcpu) |= PSR_T_BIT; | 68 | *vcpu_cpsr(vcpu) |= PSR_T_BIT; |
59 | } | 69 | } |
60 | 70 | ||
61 | static inline bool mode_has_spsr(struct kvm_vcpu *vcpu) | 71 | static inline bool mode_has_spsr(struct kvm_vcpu *vcpu) |
62 | { | 72 | { |
63 | unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; | 73 | unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; |
64 | return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE); | 74 | return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE); |
65 | } | 75 | } |
66 | 76 | ||
67 | static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) | 77 | static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) |
68 | { | 78 | { |
69 | unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; | 79 | unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; |
70 | return cpsr_mode > USR_MODE;; | 80 | return cpsr_mode > USR_MODE;; |
71 | } | 81 | } |
72 | 82 | ||
73 | static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu) | 83 | static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu) |
74 | { | 84 | { |
75 | return vcpu->arch.fault.hsr; | 85 | return vcpu->arch.fault.hsr; |
76 | } | 86 | } |
77 | 87 | ||
78 | static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu) | 88 | static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu) |
79 | { | 89 | { |
80 | return vcpu->arch.fault.hxfar; | 90 | return vcpu->arch.fault.hxfar; |
81 | } | 91 | } |
82 | 92 | ||
83 | static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu) | 93 | static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu) |
84 | { | 94 | { |
85 | return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8; | 95 | return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8; |
86 | } | 96 | } |
87 | 97 | ||
88 | static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu) | 98 | static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu) |
89 | { | 99 | { |
90 | return vcpu->arch.fault.hyp_pc; | 100 | return vcpu->arch.fault.hyp_pc; |
91 | } | 101 | } |
92 | 102 | ||
93 | static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu) | 103 | static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu) |
94 | { | 104 | { |
95 | return kvm_vcpu_get_hsr(vcpu) & HSR_ISV; | 105 | return kvm_vcpu_get_hsr(vcpu) & HSR_ISV; |
96 | } | 106 | } |
97 | 107 | ||
98 | static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu) | 108 | static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu) |
99 | { | 109 | { |
100 | return kvm_vcpu_get_hsr(vcpu) & HSR_WNR; | 110 | return kvm_vcpu_get_hsr(vcpu) & HSR_WNR; |
101 | } | 111 | } |
102 | 112 | ||
103 | static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu) | 113 | static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu) |
104 | { | 114 | { |
105 | return kvm_vcpu_get_hsr(vcpu) & HSR_SSE; | 115 | return kvm_vcpu_get_hsr(vcpu) & HSR_SSE; |
106 | } | 116 | } |
107 | 117 | ||
108 | static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu) | 118 | static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu) |
109 | { | 119 | { |
110 | return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; | 120 | return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT; |
111 | } | 121 | } |
112 | 122 | ||
113 | static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu) | 123 | static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu) |
114 | { | 124 | { |
115 | return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA; | 125 | return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA; |
116 | } | 126 | } |
117 | 127 | ||
118 | static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) | 128 | static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) |
119 | { | 129 | { |
120 | return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; | 130 | return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; |
121 | } | 131 | } |
122 | 132 | ||
123 | /* Get Access Size from a data abort */ | 133 | /* Get Access Size from a data abort */ |
124 | static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu) | 134 | static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu) |
125 | { | 135 | { |
126 | switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) { | 136 | switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) { |
127 | case 0: | 137 | case 0: |
128 | return 1; | 138 | return 1; |
129 | case 1: | 139 | case 1: |
130 | return 2; | 140 | return 2; |
131 | case 2: | 141 | case 2: |
132 | return 4; | 142 | return 4; |
133 | default: | 143 | default: |
134 | kvm_err("Hardware is weird: SAS 0b11 is reserved\n"); | 144 | kvm_err("Hardware is weird: SAS 0b11 is reserved\n"); |
135 | return -EFAULT; | 145 | return -EFAULT; |
136 | } | 146 | } |
137 | } | 147 | } |
138 | 148 | ||
139 | /* This one is not specific to Data Abort */ | 149 | /* This one is not specific to Data Abort */ |
140 | static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu) | 150 | static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu) |
141 | { | 151 | { |
142 | return kvm_vcpu_get_hsr(vcpu) & HSR_IL; | 152 | return kvm_vcpu_get_hsr(vcpu) & HSR_IL; |
143 | } | 153 | } |
144 | 154 | ||
145 | static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu) | 155 | static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu) |
146 | { | 156 | { |
147 | return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT; | 157 | return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT; |
148 | } | 158 | } |
149 | 159 | ||
150 | static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu) | 160 | static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu) |
151 | { | 161 | { |
152 | return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT; | 162 | return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT; |
153 | } | 163 | } |
154 | 164 | ||
155 | static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) | 165 | static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu) |
156 | { | 166 | { |
157 | return kvm_vcpu_get_hsr(vcpu) & HSR_FSC; | 167 | return kvm_vcpu_get_hsr(vcpu) & HSR_FSC; |
158 | } | 168 | } |
159 | 169 | ||
160 | static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu) | 170 | static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu) |
161 | { | 171 | { |
162 | return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE; | 172 | return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE; |
163 | } | 173 | } |
164 | 174 | ||
165 | static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) | 175 | static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) |
166 | { | 176 | { |
167 | return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK; | 177 | return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK; |
168 | } | 178 | } |
169 | 179 | ||
170 | static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) | 180 | static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) |
171 | { | 181 | { |
172 | return vcpu->arch.cp15[c0_MPIDR]; | 182 | return vcpu->arch.cp15[c0_MPIDR]; |
173 | } | 183 | } |
174 | 184 | ||
175 | static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) | 185 | static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) |
176 | { | 186 | { |
177 | *vcpu_cpsr(vcpu) |= PSR_E_BIT; | 187 | *vcpu_cpsr(vcpu) |= PSR_E_BIT; |
178 | } | 188 | } |
179 | 189 | ||
180 | static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) | 190 | static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) |
181 | { | 191 | { |
182 | return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT); | 192 | return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT); |
183 | } | 193 | } |
184 | 194 | ||
185 | static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, | 195 | static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, |
186 | unsigned long data, | 196 | unsigned long data, |
187 | unsigned int len) | 197 | unsigned int len) |
188 | { | 198 | { |
189 | if (kvm_vcpu_is_be(vcpu)) { | 199 | if (kvm_vcpu_is_be(vcpu)) { |
190 | switch (len) { | 200 | switch (len) { |
191 | case 1: | 201 | case 1: |
192 | return data & 0xff; | 202 | return data & 0xff; |
193 | case 2: | 203 | case 2: |
194 | return be16_to_cpu(data & 0xffff); | 204 | return be16_to_cpu(data & 0xffff); |
195 | default: | 205 | default: |
196 | return be32_to_cpu(data); | 206 | return be32_to_cpu(data); |
197 | } | 207 | } |
198 | } else { | 208 | } else { |
199 | switch (len) { | 209 | switch (len) { |
200 | case 1: | 210 | case 1: |
201 | return data & 0xff; | 211 | return data & 0xff; |
202 | case 2: | 212 | case 2: |
203 | return le16_to_cpu(data & 0xffff); | 213 | return le16_to_cpu(data & 0xffff); |
204 | default: | 214 | default: |
205 | return le32_to_cpu(data); | 215 | return le32_to_cpu(data); |
206 | } | 216 | } |
207 | } | 217 | } |
208 | } | 218 | } |
209 | 219 | ||
210 | static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, | 220 | static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, |
211 | unsigned long data, | 221 | unsigned long data, |
212 | unsigned int len) | 222 | unsigned int len) |
213 | { | 223 | { |
214 | if (kvm_vcpu_is_be(vcpu)) { | 224 | if (kvm_vcpu_is_be(vcpu)) { |
215 | switch (len) { | 225 | switch (len) { |
216 | case 1: | 226 | case 1: |
217 | return data & 0xff; | 227 | return data & 0xff; |
218 | case 2: | 228 | case 2: |
219 | return cpu_to_be16(data & 0xffff); | 229 | return cpu_to_be16(data & 0xffff); |
220 | default: | 230 | default: |
221 | return cpu_to_be32(data); | 231 | return cpu_to_be32(data); |
222 | } | 232 | } |
223 | } else { | 233 | } else { |
224 | switch (len) { | 234 | switch (len) { |
225 | case 1: | 235 | case 1: |
226 | return data & 0xff; | 236 | return data & 0xff; |
227 | case 2: | 237 | case 2: |
228 | return cpu_to_le16(data & 0xffff); | 238 | return cpu_to_le16(data & 0xffff); |
229 | default: | 239 | default: |
230 | return cpu_to_le32(data); | 240 | return cpu_to_le32(data); |
231 | } | 241 | } |
232 | } | 242 | } |
233 | } | 243 | } |
234 | 244 | ||
235 | #endif /* __ARM_KVM_EMULATE_H__ */ | 245 | #endif /* __ARM_KVM_EMULATE_H__ */ |
236 | 246 |
arch/arm/include/asm/kvm_host.h
1 | /* | 1 | /* |
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | 2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | 3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License, version 2, as | 6 | * it under the terms of the GNU General Public License, version 2, as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it will be useful, | 9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #ifndef __ARM_KVM_HOST_H__ | 19 | #ifndef __ARM_KVM_HOST_H__ |
20 | #define __ARM_KVM_HOST_H__ | 20 | #define __ARM_KVM_HOST_H__ |
21 | 21 | ||
22 | #include <linux/types.h> | 22 | #include <linux/types.h> |
23 | #include <linux/kvm_types.h> | 23 | #include <linux/kvm_types.h> |
24 | #include <asm/kvm.h> | 24 | #include <asm/kvm.h> |
25 | #include <asm/kvm_asm.h> | 25 | #include <asm/kvm_asm.h> |
26 | #include <asm/kvm_mmio.h> | 26 | #include <asm/kvm_mmio.h> |
27 | #include <asm/fpstate.h> | 27 | #include <asm/fpstate.h> |
28 | #include <kvm/arm_arch_timer.h> | 28 | #include <kvm/arm_arch_timer.h> |
29 | 29 | ||
30 | #if defined(CONFIG_KVM_ARM_MAX_VCPUS) | 30 | #if defined(CONFIG_KVM_ARM_MAX_VCPUS) |
31 | #define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS | 31 | #define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS |
32 | #else | 32 | #else |
33 | #define KVM_MAX_VCPUS 0 | 33 | #define KVM_MAX_VCPUS 0 |
34 | #endif | 34 | #endif |
35 | 35 | ||
36 | #define KVM_USER_MEM_SLOTS 32 | 36 | #define KVM_USER_MEM_SLOTS 32 |
37 | #define KVM_PRIVATE_MEM_SLOTS 4 | 37 | #define KVM_PRIVATE_MEM_SLOTS 4 |
38 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | 38 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 |
39 | #define KVM_HAVE_ONE_REG | 39 | #define KVM_HAVE_ONE_REG |
40 | 40 | ||
41 | #define KVM_VCPU_MAX_FEATURES 2 | 41 | #define KVM_VCPU_MAX_FEATURES 2 |
42 | 42 | ||
43 | #include <kvm/arm_vgic.h> | 43 | #include <kvm/arm_vgic.h> |
44 | 44 | ||
45 | u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); | 45 | u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); |
46 | int __attribute_const__ kvm_target_cpu(void); | 46 | int __attribute_const__ kvm_target_cpu(void); |
47 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu); | 47 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu); |
48 | void kvm_reset_coprocs(struct kvm_vcpu *vcpu); | 48 | void kvm_reset_coprocs(struct kvm_vcpu *vcpu); |
49 | 49 | ||
50 | struct kvm_arch { | 50 | struct kvm_arch { |
51 | /* VTTBR value associated with below pgd and vmid */ | 51 | /* VTTBR value associated with below pgd and vmid */ |
52 | u64 vttbr; | 52 | u64 vttbr; |
53 | 53 | ||
54 | /* Timer */ | 54 | /* Timer */ |
55 | struct arch_timer_kvm timer; | 55 | struct arch_timer_kvm timer; |
56 | 56 | ||
57 | /* | 57 | /* |
58 | * Anything that is not used directly from assembly code goes | 58 | * Anything that is not used directly from assembly code goes |
59 | * here. | 59 | * here. |
60 | */ | 60 | */ |
61 | 61 | ||
62 | /* The VMID generation used for the virt. memory system */ | 62 | /* The VMID generation used for the virt. memory system */ |
63 | u64 vmid_gen; | 63 | u64 vmid_gen; |
64 | u32 vmid; | 64 | u32 vmid; |
65 | 65 | ||
66 | /* Stage-2 page table */ | 66 | /* Stage-2 page table */ |
67 | pgd_t *pgd; | 67 | pgd_t *pgd; |
68 | 68 | ||
69 | /* Interrupt controller */ | 69 | /* Interrupt controller */ |
70 | struct vgic_dist vgic; | 70 | struct vgic_dist vgic; |
71 | }; | 71 | }; |
72 | 72 | ||
73 | #define KVM_NR_MEM_OBJS 40 | 73 | #define KVM_NR_MEM_OBJS 40 |
74 | 74 | ||
75 | /* | 75 | /* |
76 | * We don't want allocation failures within the mmu code, so we preallocate | 76 | * We don't want allocation failures within the mmu code, so we preallocate |
77 | * enough memory for a single page fault in a cache. | 77 | * enough memory for a single page fault in a cache. |
78 | */ | 78 | */ |
79 | struct kvm_mmu_memory_cache { | 79 | struct kvm_mmu_memory_cache { |
80 | int nobjs; | 80 | int nobjs; |
81 | void *objects[KVM_NR_MEM_OBJS]; | 81 | void *objects[KVM_NR_MEM_OBJS]; |
82 | }; | 82 | }; |
83 | 83 | ||
84 | struct kvm_vcpu_fault_info { | 84 | struct kvm_vcpu_fault_info { |
85 | u32 hsr; /* Hyp Syndrome Register */ | 85 | u32 hsr; /* Hyp Syndrome Register */ |
86 | u32 hxfar; /* Hyp Data/Inst. Fault Address Register */ | 86 | u32 hxfar; /* Hyp Data/Inst. Fault Address Register */ |
87 | u32 hpfar; /* Hyp IPA Fault Address Register */ | 87 | u32 hpfar; /* Hyp IPA Fault Address Register */ |
88 | u32 hyp_pc; /* PC when exception was taken from Hyp mode */ | 88 | u32 hyp_pc; /* PC when exception was taken from Hyp mode */ |
89 | }; | 89 | }; |
90 | 90 | ||
91 | typedef struct vfp_hard_struct kvm_cpu_context_t; | 91 | typedef struct vfp_hard_struct kvm_cpu_context_t; |
92 | 92 | ||
93 | struct kvm_vcpu_arch { | 93 | struct kvm_vcpu_arch { |
94 | struct kvm_regs regs; | 94 | struct kvm_regs regs; |
95 | 95 | ||
96 | int target; /* Processor target */ | 96 | int target; /* Processor target */ |
97 | DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); | 97 | DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); |
98 | 98 | ||
99 | /* System control coprocessor (cp15) */ | 99 | /* System control coprocessor (cp15) */ |
100 | u32 cp15[NR_CP15_REGS]; | 100 | u32 cp15[NR_CP15_REGS]; |
101 | 101 | ||
102 | /* The CPU type we expose to the VM */ | 102 | /* The CPU type we expose to the VM */ |
103 | u32 midr; | 103 | u32 midr; |
104 | 104 | ||
105 | /* HYP trapping configuration */ | 105 | /* HYP trapping configuration */ |
106 | u32 hcr; | 106 | u32 hcr; |
107 | 107 | ||
108 | /* Interrupt related fields */ | 108 | /* Interrupt related fields */ |
109 | u32 irq_lines; /* IRQ and FIQ levels */ | 109 | u32 irq_lines; /* IRQ and FIQ levels */ |
110 | 110 | ||
111 | /* Exception Information */ | 111 | /* Exception Information */ |
112 | struct kvm_vcpu_fault_info fault; | 112 | struct kvm_vcpu_fault_info fault; |
113 | 113 | ||
114 | /* Floating point registers (VFP and Advanced SIMD/NEON) */ | 114 | /* Floating point registers (VFP and Advanced SIMD/NEON) */ |
115 | struct vfp_hard_struct vfp_guest; | 115 | struct vfp_hard_struct vfp_guest; |
116 | 116 | ||
117 | /* Host FP context */ | 117 | /* Host FP context */ |
118 | kvm_cpu_context_t *host_cpu_context; | 118 | kvm_cpu_context_t *host_cpu_context; |
119 | 119 | ||
120 | /* VGIC state */ | 120 | /* VGIC state */ |
121 | struct vgic_cpu vgic_cpu; | 121 | struct vgic_cpu vgic_cpu; |
122 | struct arch_timer_cpu timer_cpu; | 122 | struct arch_timer_cpu timer_cpu; |
123 | 123 | ||
124 | /* | 124 | /* |
125 | * Anything that is not used directly from assembly code goes | 125 | * Anything that is not used directly from assembly code goes |
126 | * here. | 126 | * here. |
127 | */ | 127 | */ |
128 | /* dcache set/way operation pending */ | ||
129 | int last_pcpu; | ||
130 | cpumask_t require_dcache_flush; | ||
131 | 128 | ||
132 | /* Don't run the guest on this vcpu */ | 129 | /* Don't run the guest on this vcpu */ |
133 | bool pause; | 130 | bool pause; |
134 | 131 | ||
135 | /* IO related fields */ | 132 | /* IO related fields */ |
136 | struct kvm_decode mmio_decode; | 133 | struct kvm_decode mmio_decode; |
137 | 134 | ||
138 | /* Cache some mmu pages needed inside spinlock regions */ | 135 | /* Cache some mmu pages needed inside spinlock regions */ |
139 | struct kvm_mmu_memory_cache mmu_page_cache; | 136 | struct kvm_mmu_memory_cache mmu_page_cache; |
140 | 137 | ||
141 | /* Detect first run of a vcpu */ | 138 | /* Detect first run of a vcpu */ |
142 | bool has_run_once; | 139 | bool has_run_once; |
143 | }; | 140 | }; |
144 | 141 | ||
145 | struct kvm_vm_stat { | 142 | struct kvm_vm_stat { |
146 | u32 remote_tlb_flush; | 143 | u32 remote_tlb_flush; |
147 | }; | 144 | }; |
148 | 145 | ||
149 | struct kvm_vcpu_stat { | 146 | struct kvm_vcpu_stat { |
150 | u32 halt_wakeup; | 147 | u32 halt_wakeup; |
151 | }; | 148 | }; |
152 | 149 | ||
153 | int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); | 150 | int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); |
154 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); | 151 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); |
155 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); | 152 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); |
156 | int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); | 153 | int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); |
157 | int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); | 154 | int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); |
158 | u64 kvm_call_hyp(void *hypfn, ...); | 155 | u64 kvm_call_hyp(void *hypfn, ...); |
159 | void force_vm_exit(const cpumask_t *mask); | 156 | void force_vm_exit(const cpumask_t *mask); |
160 | 157 | ||
161 | #define KVM_ARCH_WANT_MMU_NOTIFIER | 158 | #define KVM_ARCH_WANT_MMU_NOTIFIER |
162 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | 159 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); |
163 | int kvm_unmap_hva_range(struct kvm *kvm, | 160 | int kvm_unmap_hva_range(struct kvm *kvm, |
164 | unsigned long start, unsigned long end); | 161 | unsigned long start, unsigned long end); |
165 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); | 162 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); |
166 | 163 | ||
167 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); | 164 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); |
168 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); | 165 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); |
169 | 166 | ||
170 | /* We do not have shadow page tables, hence the empty hooks */ | 167 | /* We do not have shadow page tables, hence the empty hooks */ |
171 | static inline int kvm_age_hva(struct kvm *kvm, unsigned long start, | 168 | static inline int kvm_age_hva(struct kvm *kvm, unsigned long start, |
172 | unsigned long end) | 169 | unsigned long end) |
173 | { | 170 | { |
174 | return 0; | 171 | return 0; |
175 | } | 172 | } |
176 | 173 | ||
177 | static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | 174 | static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) |
178 | { | 175 | { |
179 | return 0; | 176 | return 0; |
180 | } | 177 | } |
181 | 178 | ||
182 | static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, | 179 | static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, |
183 | unsigned long address) | 180 | unsigned long address) |
184 | { | 181 | { |
185 | } | 182 | } |
186 | 183 | ||
187 | struct kvm_vcpu *kvm_arm_get_running_vcpu(void); | 184 | struct kvm_vcpu *kvm_arm_get_running_vcpu(void); |
188 | struct kvm_vcpu __percpu **kvm_get_running_vcpus(void); | 185 | struct kvm_vcpu __percpu **kvm_get_running_vcpus(void); |
189 | 186 | ||
190 | int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); | 187 | int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); |
191 | unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu); | 188 | unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu); |
192 | int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); | 189 | int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); |
193 | int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); | 190 | int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); |
194 | 191 | ||
195 | int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, | 192 | int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, |
196 | int exception_index); | 193 | int exception_index); |
197 | 194 | ||
198 | static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, | 195 | static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, |
199 | phys_addr_t pgd_ptr, | 196 | phys_addr_t pgd_ptr, |
200 | unsigned long hyp_stack_ptr, | 197 | unsigned long hyp_stack_ptr, |
201 | unsigned long vector_ptr) | 198 | unsigned long vector_ptr) |
202 | { | 199 | { |
203 | /* | 200 | /* |
204 | * Call initialization code, and switch to the full blown HYP | 201 | * Call initialization code, and switch to the full blown HYP |
205 | * code. The init code doesn't need to preserve these | 202 | * code. The init code doesn't need to preserve these |
206 | * registers as r0-r3 are already callee saved according to | 203 | * registers as r0-r3 are already callee saved according to |
207 | * the AAPCS. | 204 | * the AAPCS. |
208 | * Note that we slightly misuse the prototype by casing the | 205 | * Note that we slightly misuse the prototype by casing the |
209 | * stack pointer to a void *. | 206 | * stack pointer to a void *. |
210 | * | 207 | * |
211 | * We don't have enough registers to perform the full init in | 208 | * We don't have enough registers to perform the full init in |
212 | * one go. Install the boot PGD first, and then install the | 209 | * one go. Install the boot PGD first, and then install the |
213 | * runtime PGD, stack pointer and vectors. The PGDs are always | 210 | * runtime PGD, stack pointer and vectors. The PGDs are always |
214 | * passed as the third argument, in order to be passed into | 211 | * passed as the third argument, in order to be passed into |
215 | * r2-r3 to the init code (yes, this is compliant with the | 212 | * r2-r3 to the init code (yes, this is compliant with the |
216 | * PCS!). | 213 | * PCS!). |
217 | */ | 214 | */ |
218 | 215 | ||
219 | kvm_call_hyp(NULL, 0, boot_pgd_ptr); | 216 | kvm_call_hyp(NULL, 0, boot_pgd_ptr); |
220 | 217 | ||
221 | kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr); | 218 | kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr); |
222 | } | 219 | } |
223 | 220 | ||
224 | static inline int kvm_arch_dev_ioctl_check_extension(long ext) | 221 | static inline int kvm_arch_dev_ioctl_check_extension(long ext) |
225 | { | 222 | { |
226 | return 0; | 223 | return 0; |
227 | } | 224 | } |
228 | 225 | ||
229 | static inline void vgic_arch_setup(const struct vgic_params *vgic) | 226 | static inline void vgic_arch_setup(const struct vgic_params *vgic) |
230 | { | 227 | { |
231 | BUG_ON(vgic->type != VGIC_V2); | 228 | BUG_ON(vgic->type != VGIC_V2); |
232 | } | 229 | } |
233 | 230 | ||
234 | int kvm_perf_init(void); | 231 | int kvm_perf_init(void); |
235 | int kvm_perf_teardown(void); | 232 | int kvm_perf_teardown(void); |
236 | 233 | ||
237 | static inline void kvm_arch_hardware_disable(void) {} | 234 | static inline void kvm_arch_hardware_disable(void) {} |
238 | static inline void kvm_arch_hardware_unsetup(void) {} | 235 | static inline void kvm_arch_hardware_unsetup(void) {} |
239 | static inline void kvm_arch_sync_events(struct kvm *kvm) {} | 236 | static inline void kvm_arch_sync_events(struct kvm *kvm) {} |
240 | static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} | 237 | static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} |
241 | static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} | 238 | static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} |
242 | 239 | ||
243 | #endif /* __ARM_KVM_HOST_H__ */ | 240 | #endif /* __ARM_KVM_HOST_H__ */ |
244 | 241 |
arch/arm/include/asm/kvm_mmu.h
1 | /* | 1 | /* |
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | 2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | 3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License, version 2, as | 6 | * it under the terms of the GNU General Public License, version 2, as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it will be useful, | 9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #ifndef __ARM_KVM_MMU_H__ | 19 | #ifndef __ARM_KVM_MMU_H__ |
20 | #define __ARM_KVM_MMU_H__ | 20 | #define __ARM_KVM_MMU_H__ |
21 | 21 | ||
22 | #include <asm/memory.h> | 22 | #include <asm/memory.h> |
23 | #include <asm/page.h> | 23 | #include <asm/page.h> |
24 | 24 | ||
25 | /* | 25 | /* |
26 | * We directly use the kernel VA for the HYP, as we can directly share | 26 | * We directly use the kernel VA for the HYP, as we can directly share |
27 | * the mapping (HTTBR "covers" TTBR1). | 27 | * the mapping (HTTBR "covers" TTBR1). |
28 | */ | 28 | */ |
29 | #define HYP_PAGE_OFFSET_MASK UL(~0) | 29 | #define HYP_PAGE_OFFSET_MASK UL(~0) |
30 | #define HYP_PAGE_OFFSET PAGE_OFFSET | 30 | #define HYP_PAGE_OFFSET PAGE_OFFSET |
31 | #define KERN_TO_HYP(kva) (kva) | 31 | #define KERN_TO_HYP(kva) (kva) |
32 | 32 | ||
33 | /* | 33 | /* |
34 | * Our virtual mapping for the boot-time MMU-enable code. Must be | 34 | * Our virtual mapping for the boot-time MMU-enable code. Must be |
35 | * shared across all the page-tables. Conveniently, we use the vectors | 35 | * shared across all the page-tables. Conveniently, we use the vectors |
36 | * page, where no kernel data will ever be shared with HYP. | 36 | * page, where no kernel data will ever be shared with HYP. |
37 | */ | 37 | */ |
38 | #define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE) | 38 | #define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE) |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels. | 41 | * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels. |
42 | */ | 42 | */ |
43 | #define KVM_MMU_CACHE_MIN_PAGES 2 | 43 | #define KVM_MMU_CACHE_MIN_PAGES 2 |
44 | 44 | ||
45 | #ifndef __ASSEMBLY__ | 45 | #ifndef __ASSEMBLY__ |
46 | 46 | ||
47 | #include <linux/highmem.h> | ||
47 | #include <asm/cacheflush.h> | 48 | #include <asm/cacheflush.h> |
48 | #include <asm/pgalloc.h> | 49 | #include <asm/pgalloc.h> |
49 | 50 | ||
50 | int create_hyp_mappings(void *from, void *to); | 51 | int create_hyp_mappings(void *from, void *to); |
51 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t); | 52 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t); |
52 | void free_boot_hyp_pgd(void); | 53 | void free_boot_hyp_pgd(void); |
53 | void free_hyp_pgds(void); | 54 | void free_hyp_pgds(void); |
54 | 55 | ||
55 | void stage2_unmap_vm(struct kvm *kvm); | 56 | void stage2_unmap_vm(struct kvm *kvm); |
56 | int kvm_alloc_stage2_pgd(struct kvm *kvm); | 57 | int kvm_alloc_stage2_pgd(struct kvm *kvm); |
57 | void kvm_free_stage2_pgd(struct kvm *kvm); | 58 | void kvm_free_stage2_pgd(struct kvm *kvm); |
58 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, | 59 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, |
59 | phys_addr_t pa, unsigned long size, bool writable); | 60 | phys_addr_t pa, unsigned long size, bool writable); |
60 | 61 | ||
61 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); | 62 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); |
62 | 63 | ||
63 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); | 64 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); |
64 | 65 | ||
65 | phys_addr_t kvm_mmu_get_httbr(void); | 66 | phys_addr_t kvm_mmu_get_httbr(void); |
66 | phys_addr_t kvm_mmu_get_boot_httbr(void); | 67 | phys_addr_t kvm_mmu_get_boot_httbr(void); |
67 | phys_addr_t kvm_get_idmap_vector(void); | 68 | phys_addr_t kvm_get_idmap_vector(void); |
68 | int kvm_mmu_init(void); | 69 | int kvm_mmu_init(void); |
69 | void kvm_clear_hyp_idmap(void); | 70 | void kvm_clear_hyp_idmap(void); |
70 | 71 | ||
71 | static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd) | 72 | static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd) |
72 | { | 73 | { |
73 | *pmd = new_pmd; | 74 | *pmd = new_pmd; |
74 | flush_pmd_entry(pmd); | 75 | flush_pmd_entry(pmd); |
75 | } | 76 | } |
76 | 77 | ||
77 | static inline void kvm_set_pte(pte_t *pte, pte_t new_pte) | 78 | static inline void kvm_set_pte(pte_t *pte, pte_t new_pte) |
78 | { | 79 | { |
79 | *pte = new_pte; | 80 | *pte = new_pte; |
80 | /* | 81 | /* |
81 | * flush_pmd_entry just takes a void pointer and cleans the necessary | 82 | * flush_pmd_entry just takes a void pointer and cleans the necessary |
82 | * cache entries, so we can reuse the function for ptes. | 83 | * cache entries, so we can reuse the function for ptes. |
83 | */ | 84 | */ |
84 | flush_pmd_entry(pte); | 85 | flush_pmd_entry(pte); |
85 | } | 86 | } |
86 | 87 | ||
87 | static inline void kvm_clean_pgd(pgd_t *pgd) | 88 | static inline void kvm_clean_pgd(pgd_t *pgd) |
88 | { | 89 | { |
89 | clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); | 90 | clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); |
90 | } | 91 | } |
91 | 92 | ||
92 | static inline void kvm_clean_pmd(pmd_t *pmd) | 93 | static inline void kvm_clean_pmd(pmd_t *pmd) |
93 | { | 94 | { |
94 | clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t)); | 95 | clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t)); |
95 | } | 96 | } |
96 | 97 | ||
97 | static inline void kvm_clean_pmd_entry(pmd_t *pmd) | 98 | static inline void kvm_clean_pmd_entry(pmd_t *pmd) |
98 | { | 99 | { |
99 | clean_pmd_entry(pmd); | 100 | clean_pmd_entry(pmd); |
100 | } | 101 | } |
101 | 102 | ||
102 | static inline void kvm_clean_pte(pte_t *pte) | 103 | static inline void kvm_clean_pte(pte_t *pte) |
103 | { | 104 | { |
104 | clean_pte_table(pte); | 105 | clean_pte_table(pte); |
105 | } | 106 | } |
106 | 107 | ||
107 | static inline void kvm_set_s2pte_writable(pte_t *pte) | 108 | static inline void kvm_set_s2pte_writable(pte_t *pte) |
108 | { | 109 | { |
109 | pte_val(*pte) |= L_PTE_S2_RDWR; | 110 | pte_val(*pte) |= L_PTE_S2_RDWR; |
110 | } | 111 | } |
111 | 112 | ||
112 | static inline void kvm_set_s2pmd_writable(pmd_t *pmd) | 113 | static inline void kvm_set_s2pmd_writable(pmd_t *pmd) |
113 | { | 114 | { |
114 | pmd_val(*pmd) |= L_PMD_S2_RDWR; | 115 | pmd_val(*pmd) |= L_PMD_S2_RDWR; |
115 | } | 116 | } |
116 | 117 | ||
117 | /* Open coded p*d_addr_end that can deal with 64bit addresses */ | 118 | /* Open coded p*d_addr_end that can deal with 64bit addresses */ |
118 | #define kvm_pgd_addr_end(addr, end) \ | 119 | #define kvm_pgd_addr_end(addr, end) \ |
119 | ({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ | 120 | ({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ |
120 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ | 121 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ |
121 | }) | 122 | }) |
122 | 123 | ||
123 | #define kvm_pud_addr_end(addr,end) (end) | 124 | #define kvm_pud_addr_end(addr,end) (end) |
124 | 125 | ||
125 | #define kvm_pmd_addr_end(addr, end) \ | 126 | #define kvm_pmd_addr_end(addr, end) \ |
126 | ({ u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ | 127 | ({ u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ |
127 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ | 128 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ |
128 | }) | 129 | }) |
129 | 130 | ||
130 | static inline bool kvm_page_empty(void *ptr) | 131 | static inline bool kvm_page_empty(void *ptr) |
131 | { | 132 | { |
132 | struct page *ptr_page = virt_to_page(ptr); | 133 | struct page *ptr_page = virt_to_page(ptr); |
133 | return page_count(ptr_page) == 1; | 134 | return page_count(ptr_page) == 1; |
134 | } | 135 | } |
135 | 136 | ||
136 | 137 | ||
137 | #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) | 138 | #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) |
138 | #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) | 139 | #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) |
139 | #define kvm_pud_table_empty(kvm, pudp) (0) | 140 | #define kvm_pud_table_empty(kvm, pudp) (0) |
140 | 141 | ||
141 | #define KVM_PREALLOC_LEVEL 0 | 142 | #define KVM_PREALLOC_LEVEL 0 |
142 | 143 | ||
143 | static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) | 144 | static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) |
144 | { | 145 | { |
145 | return 0; | 146 | return 0; |
146 | } | 147 | } |
147 | 148 | ||
148 | static inline void kvm_free_hwpgd(struct kvm *kvm) { } | 149 | static inline void kvm_free_hwpgd(struct kvm *kvm) { } |
149 | 150 | ||
150 | static inline void *kvm_get_hwpgd(struct kvm *kvm) | 151 | static inline void *kvm_get_hwpgd(struct kvm *kvm) |
151 | { | 152 | { |
152 | return kvm->arch.pgd; | 153 | return kvm->arch.pgd; |
153 | } | 154 | } |
154 | 155 | ||
155 | struct kvm; | 156 | struct kvm; |
156 | 157 | ||
157 | #define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l)) | 158 | #define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l)) |
158 | 159 | ||
159 | static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) | 160 | static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) |
160 | { | 161 | { |
161 | return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; | 162 | return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; |
162 | } | 163 | } |
163 | 164 | ||
164 | static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, | 165 | static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, |
165 | unsigned long size, | 166 | unsigned long size, |
166 | bool ipa_uncached) | 167 | bool ipa_uncached) |
167 | { | 168 | { |
168 | if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) | ||
169 | kvm_flush_dcache_to_poc((void *)hva, size); | ||
170 | |||
171 | /* | 169 | /* |
172 | * If we are going to insert an instruction page and the icache is | 170 | * If we are going to insert an instruction page and the icache is |
173 | * either VIPT or PIPT, there is a potential problem where the host | 171 | * either VIPT or PIPT, there is a potential problem where the host |
174 | * (or another VM) may have used the same page as this guest, and we | 172 | * (or another VM) may have used the same page as this guest, and we |
175 | * read incorrect data from the icache. If we're using a PIPT cache, | 173 | * read incorrect data from the icache. If we're using a PIPT cache, |
176 | * we can invalidate just that page, but if we are using a VIPT cache | 174 | * we can invalidate just that page, but if we are using a VIPT cache |
177 | * we need to invalidate the entire icache - damn shame - as written | 175 | * we need to invalidate the entire icache - damn shame - as written |
178 | * in the ARM ARM (DDI 0406C.b - Page B3-1393). | 176 | * in the ARM ARM (DDI 0406C.b - Page B3-1393). |
179 | * | 177 | * |
180 | * VIVT caches are tagged using both the ASID and the VMID and doesn't | 178 | * VIVT caches are tagged using both the ASID and the VMID and doesn't |
181 | * need any kind of flushing (DDI 0406C.b - Page B3-1392). | 179 | * need any kind of flushing (DDI 0406C.b - Page B3-1392). |
180 | * | ||
181 | * We need to do this through a kernel mapping (using the | ||
182 | * user-space mapping has proved to be the wrong | ||
183 | * solution). For that, we need to kmap one page at a time, | ||
184 | * and iterate over the range. | ||
182 | */ | 185 | */ |
183 | if (icache_is_pipt()) { | 186 | |
184 | __cpuc_coherent_user_range(hva, hva + size); | 187 | bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached; |
185 | } else if (!icache_is_vivt_asid_tagged()) { | 188 | |
189 | VM_BUG_ON(size & PAGE_MASK); | ||
190 | |||
191 | if (!need_flush && !icache_is_pipt()) | ||
192 | goto vipt_cache; | ||
193 | |||
194 | while (size) { | ||
195 | void *va = kmap_atomic_pfn(pfn); | ||
196 | |||
197 | if (need_flush) | ||
198 | kvm_flush_dcache_to_poc(va, PAGE_SIZE); | ||
199 | |||
200 | if (icache_is_pipt()) | ||
201 | __cpuc_coherent_user_range((unsigned long)va, | ||
202 | (unsigned long)va + PAGE_SIZE); | ||
203 | |||
204 | size -= PAGE_SIZE; | ||
205 | pfn++; | ||
206 | |||
207 | kunmap_atomic(va); | ||
208 | } | ||
209 | |||
210 | vipt_cache: | ||
211 | if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) { | ||
186 | /* any kind of VIPT cache */ | 212 | /* any kind of VIPT cache */ |
187 | __flush_icache_all(); | 213 | __flush_icache_all(); |
188 | } | 214 | } |
189 | } | 215 | } |
190 | 216 | ||
217 | static inline void __kvm_flush_dcache_pte(pte_t pte) | ||
218 | { | ||
219 | void *va = kmap_atomic(pte_page(pte)); | ||
220 | |||
221 | kvm_flush_dcache_to_poc(va, PAGE_SIZE); | ||
222 | |||
223 | kunmap_atomic(va); | ||
224 | } | ||
225 | |||
226 | static inline void __kvm_flush_dcache_pmd(pmd_t pmd) | ||
227 | { | ||
228 | unsigned long size = PMD_SIZE; | ||
229 | pfn_t pfn = pmd_pfn(pmd); | ||
230 | |||
231 | while (size) { | ||
232 | void *va = kmap_atomic_pfn(pfn); | ||
233 | |||
234 | kvm_flush_dcache_to_poc(va, PAGE_SIZE); | ||
235 | |||
236 | pfn++; | ||
237 | size -= PAGE_SIZE; | ||
238 | |||
239 | kunmap_atomic(va); | ||
240 | } | ||
241 | } | ||
242 | |||
243 | static inline void __kvm_flush_dcache_pud(pud_t pud) | ||
244 | { | ||
245 | } | ||
246 | |||
191 | #define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x)) | 247 | #define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x)) |
192 | 248 | ||
193 | void stage2_flush_vm(struct kvm *kvm); | 249 | void kvm_set_way_flush(struct kvm_vcpu *vcpu); |
250 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); | ||
194 | 251 | ||
195 | #endif /* !__ASSEMBLY__ */ | 252 | #endif /* !__ASSEMBLY__ */ |
arch/arm/kvm/arm.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | 2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | 3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License, version 2, as | 6 | * it under the terms of the GNU General Public License, version 2, as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it will be useful, | 9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/cpu.h> | 19 | #include <linux/cpu.h> |
20 | #include <linux/cpu_pm.h> | 20 | #include <linux/cpu_pm.h> |
21 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
22 | #include <linux/err.h> | 22 | #include <linux/err.h> |
23 | #include <linux/kvm_host.h> | 23 | #include <linux/kvm_host.h> |
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/vmalloc.h> | 25 | #include <linux/vmalloc.h> |
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | #include <linux/mman.h> | 27 | #include <linux/mman.h> |
28 | #include <linux/sched.h> | 28 | #include <linux/sched.h> |
29 | #include <linux/kvm.h> | 29 | #include <linux/kvm.h> |
30 | #include <trace/events/kvm.h> | 30 | #include <trace/events/kvm.h> |
31 | 31 | ||
32 | #define CREATE_TRACE_POINTS | 32 | #define CREATE_TRACE_POINTS |
33 | #include "trace.h" | 33 | #include "trace.h" |
34 | 34 | ||
35 | #include <asm/uaccess.h> | 35 | #include <asm/uaccess.h> |
36 | #include <asm/ptrace.h> | 36 | #include <asm/ptrace.h> |
37 | #include <asm/mman.h> | 37 | #include <asm/mman.h> |
38 | #include <asm/tlbflush.h> | 38 | #include <asm/tlbflush.h> |
39 | #include <asm/cacheflush.h> | 39 | #include <asm/cacheflush.h> |
40 | #include <asm/virt.h> | 40 | #include <asm/virt.h> |
41 | #include <asm/kvm_arm.h> | 41 | #include <asm/kvm_arm.h> |
42 | #include <asm/kvm_asm.h> | 42 | #include <asm/kvm_asm.h> |
43 | #include <asm/kvm_mmu.h> | 43 | #include <asm/kvm_mmu.h> |
44 | #include <asm/kvm_emulate.h> | 44 | #include <asm/kvm_emulate.h> |
45 | #include <asm/kvm_coproc.h> | 45 | #include <asm/kvm_coproc.h> |
46 | #include <asm/kvm_psci.h> | 46 | #include <asm/kvm_psci.h> |
47 | 47 | ||
48 | #ifdef REQUIRES_VIRT | 48 | #ifdef REQUIRES_VIRT |
49 | __asm__(".arch_extension virt"); | 49 | __asm__(".arch_extension virt"); |
50 | #endif | 50 | #endif |
51 | 51 | ||
52 | static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); | 52 | static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); |
53 | static kvm_cpu_context_t __percpu *kvm_host_cpu_state; | 53 | static kvm_cpu_context_t __percpu *kvm_host_cpu_state; |
54 | static unsigned long hyp_default_vectors; | 54 | static unsigned long hyp_default_vectors; |
55 | 55 | ||
56 | /* Per-CPU variable containing the currently running vcpu. */ | 56 | /* Per-CPU variable containing the currently running vcpu. */ |
57 | static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu); | 57 | static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu); |
58 | 58 | ||
59 | /* The VMID used in the VTTBR */ | 59 | /* The VMID used in the VTTBR */ |
60 | static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); | 60 | static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); |
61 | static u8 kvm_next_vmid; | 61 | static u8 kvm_next_vmid; |
62 | static DEFINE_SPINLOCK(kvm_vmid_lock); | 62 | static DEFINE_SPINLOCK(kvm_vmid_lock); |
63 | 63 | ||
64 | static bool vgic_present; | 64 | static bool vgic_present; |
65 | 65 | ||
66 | static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) | 66 | static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) |
67 | { | 67 | { |
68 | BUG_ON(preemptible()); | 68 | BUG_ON(preemptible()); |
69 | __this_cpu_write(kvm_arm_running_vcpu, vcpu); | 69 | __this_cpu_write(kvm_arm_running_vcpu, vcpu); |
70 | } | 70 | } |
71 | 71 | ||
72 | /** | 72 | /** |
73 | * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU. | 73 | * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU. |
74 | * Must be called from non-preemptible context | 74 | * Must be called from non-preemptible context |
75 | */ | 75 | */ |
76 | struct kvm_vcpu *kvm_arm_get_running_vcpu(void) | 76 | struct kvm_vcpu *kvm_arm_get_running_vcpu(void) |
77 | { | 77 | { |
78 | BUG_ON(preemptible()); | 78 | BUG_ON(preemptible()); |
79 | return __this_cpu_read(kvm_arm_running_vcpu); | 79 | return __this_cpu_read(kvm_arm_running_vcpu); |
80 | } | 80 | } |
81 | 81 | ||
82 | /** | 82 | /** |
83 | * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus. | 83 | * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus. |
84 | */ | 84 | */ |
85 | struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) | 85 | struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) |
86 | { | 86 | { |
87 | return &kvm_arm_running_vcpu; | 87 | return &kvm_arm_running_vcpu; |
88 | } | 88 | } |
89 | 89 | ||
90 | int kvm_arch_hardware_enable(void) | 90 | int kvm_arch_hardware_enable(void) |
91 | { | 91 | { |
92 | return 0; | 92 | return 0; |
93 | } | 93 | } |
94 | 94 | ||
95 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) | 95 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) |
96 | { | 96 | { |
97 | return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; | 97 | return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; |
98 | } | 98 | } |
99 | 99 | ||
100 | int kvm_arch_hardware_setup(void) | 100 | int kvm_arch_hardware_setup(void) |
101 | { | 101 | { |
102 | return 0; | 102 | return 0; |
103 | } | 103 | } |
104 | 104 | ||
105 | void kvm_arch_check_processor_compat(void *rtn) | 105 | void kvm_arch_check_processor_compat(void *rtn) |
106 | { | 106 | { |
107 | *(int *)rtn = 0; | 107 | *(int *)rtn = 0; |
108 | } | 108 | } |
109 | 109 | ||
110 | 110 | ||
111 | /** | 111 | /** |
112 | * kvm_arch_init_vm - initializes a VM data structure | 112 | * kvm_arch_init_vm - initializes a VM data structure |
113 | * @kvm: pointer to the KVM struct | 113 | * @kvm: pointer to the KVM struct |
114 | */ | 114 | */ |
115 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | 115 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
116 | { | 116 | { |
117 | int ret = 0; | 117 | int ret = 0; |
118 | 118 | ||
119 | if (type) | 119 | if (type) |
120 | return -EINVAL; | 120 | return -EINVAL; |
121 | 121 | ||
122 | ret = kvm_alloc_stage2_pgd(kvm); | 122 | ret = kvm_alloc_stage2_pgd(kvm); |
123 | if (ret) | 123 | if (ret) |
124 | goto out_fail_alloc; | 124 | goto out_fail_alloc; |
125 | 125 | ||
126 | ret = create_hyp_mappings(kvm, kvm + 1); | 126 | ret = create_hyp_mappings(kvm, kvm + 1); |
127 | if (ret) | 127 | if (ret) |
128 | goto out_free_stage2_pgd; | 128 | goto out_free_stage2_pgd; |
129 | 129 | ||
130 | kvm_timer_init(kvm); | 130 | kvm_timer_init(kvm); |
131 | 131 | ||
132 | /* Mark the initial VMID generation invalid */ | 132 | /* Mark the initial VMID generation invalid */ |
133 | kvm->arch.vmid_gen = 0; | 133 | kvm->arch.vmid_gen = 0; |
134 | 134 | ||
135 | return ret; | 135 | return ret; |
136 | out_free_stage2_pgd: | 136 | out_free_stage2_pgd: |
137 | kvm_free_stage2_pgd(kvm); | 137 | kvm_free_stage2_pgd(kvm); |
138 | out_fail_alloc: | 138 | out_fail_alloc: |
139 | return ret; | 139 | return ret; |
140 | } | 140 | } |
141 | 141 | ||
142 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) | 142 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
143 | { | 143 | { |
144 | return VM_FAULT_SIGBUS; | 144 | return VM_FAULT_SIGBUS; |
145 | } | 145 | } |
146 | 146 | ||
147 | 147 | ||
148 | /** | 148 | /** |
149 | * kvm_arch_destroy_vm - destroy the VM data structure | 149 | * kvm_arch_destroy_vm - destroy the VM data structure |
150 | * @kvm: pointer to the KVM struct | 150 | * @kvm: pointer to the KVM struct |
151 | */ | 151 | */ |
152 | void kvm_arch_destroy_vm(struct kvm *kvm) | 152 | void kvm_arch_destroy_vm(struct kvm *kvm) |
153 | { | 153 | { |
154 | int i; | 154 | int i; |
155 | 155 | ||
156 | kvm_free_stage2_pgd(kvm); | 156 | kvm_free_stage2_pgd(kvm); |
157 | 157 | ||
158 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | 158 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { |
159 | if (kvm->vcpus[i]) { | 159 | if (kvm->vcpus[i]) { |
160 | kvm_arch_vcpu_free(kvm->vcpus[i]); | 160 | kvm_arch_vcpu_free(kvm->vcpus[i]); |
161 | kvm->vcpus[i] = NULL; | 161 | kvm->vcpus[i] = NULL; |
162 | } | 162 | } |
163 | } | 163 | } |
164 | 164 | ||
165 | kvm_vgic_destroy(kvm); | 165 | kvm_vgic_destroy(kvm); |
166 | } | 166 | } |
167 | 167 | ||
168 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | 168 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
169 | { | 169 | { |
170 | int r; | 170 | int r; |
171 | switch (ext) { | 171 | switch (ext) { |
172 | case KVM_CAP_IRQCHIP: | 172 | case KVM_CAP_IRQCHIP: |
173 | r = vgic_present; | 173 | r = vgic_present; |
174 | break; | 174 | break; |
175 | case KVM_CAP_DEVICE_CTRL: | 175 | case KVM_CAP_DEVICE_CTRL: |
176 | case KVM_CAP_USER_MEMORY: | 176 | case KVM_CAP_USER_MEMORY: |
177 | case KVM_CAP_SYNC_MMU: | 177 | case KVM_CAP_SYNC_MMU: |
178 | case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: | 178 | case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: |
179 | case KVM_CAP_ONE_REG: | 179 | case KVM_CAP_ONE_REG: |
180 | case KVM_CAP_ARM_PSCI: | 180 | case KVM_CAP_ARM_PSCI: |
181 | case KVM_CAP_ARM_PSCI_0_2: | 181 | case KVM_CAP_ARM_PSCI_0_2: |
182 | case KVM_CAP_READONLY_MEM: | 182 | case KVM_CAP_READONLY_MEM: |
183 | r = 1; | 183 | r = 1; |
184 | break; | 184 | break; |
185 | case KVM_CAP_COALESCED_MMIO: | 185 | case KVM_CAP_COALESCED_MMIO: |
186 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | 186 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; |
187 | break; | 187 | break; |
188 | case KVM_CAP_ARM_SET_DEVICE_ADDR: | 188 | case KVM_CAP_ARM_SET_DEVICE_ADDR: |
189 | r = 1; | 189 | r = 1; |
190 | break; | 190 | break; |
191 | case KVM_CAP_NR_VCPUS: | 191 | case KVM_CAP_NR_VCPUS: |
192 | r = num_online_cpus(); | 192 | r = num_online_cpus(); |
193 | break; | 193 | break; |
194 | case KVM_CAP_MAX_VCPUS: | 194 | case KVM_CAP_MAX_VCPUS: |
195 | r = KVM_MAX_VCPUS; | 195 | r = KVM_MAX_VCPUS; |
196 | break; | 196 | break; |
197 | default: | 197 | default: |
198 | r = kvm_arch_dev_ioctl_check_extension(ext); | 198 | r = kvm_arch_dev_ioctl_check_extension(ext); |
199 | break; | 199 | break; |
200 | } | 200 | } |
201 | return r; | 201 | return r; |
202 | } | 202 | } |
203 | 203 | ||
204 | long kvm_arch_dev_ioctl(struct file *filp, | 204 | long kvm_arch_dev_ioctl(struct file *filp, |
205 | unsigned int ioctl, unsigned long arg) | 205 | unsigned int ioctl, unsigned long arg) |
206 | { | 206 | { |
207 | return -EINVAL; | 207 | return -EINVAL; |
208 | } | 208 | } |
209 | 209 | ||
210 | 210 | ||
211 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | 211 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) |
212 | { | 212 | { |
213 | int err; | 213 | int err; |
214 | struct kvm_vcpu *vcpu; | 214 | struct kvm_vcpu *vcpu; |
215 | 215 | ||
216 | if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) { | 216 | if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) { |
217 | err = -EBUSY; | 217 | err = -EBUSY; |
218 | goto out; | 218 | goto out; |
219 | } | 219 | } |
220 | 220 | ||
221 | vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); | 221 | vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); |
222 | if (!vcpu) { | 222 | if (!vcpu) { |
223 | err = -ENOMEM; | 223 | err = -ENOMEM; |
224 | goto out; | 224 | goto out; |
225 | } | 225 | } |
226 | 226 | ||
227 | err = kvm_vcpu_init(vcpu, kvm, id); | 227 | err = kvm_vcpu_init(vcpu, kvm, id); |
228 | if (err) | 228 | if (err) |
229 | goto free_vcpu; | 229 | goto free_vcpu; |
230 | 230 | ||
231 | err = create_hyp_mappings(vcpu, vcpu + 1); | 231 | err = create_hyp_mappings(vcpu, vcpu + 1); |
232 | if (err) | 232 | if (err) |
233 | goto vcpu_uninit; | 233 | goto vcpu_uninit; |
234 | 234 | ||
235 | return vcpu; | 235 | return vcpu; |
236 | vcpu_uninit: | 236 | vcpu_uninit: |
237 | kvm_vcpu_uninit(vcpu); | 237 | kvm_vcpu_uninit(vcpu); |
238 | free_vcpu: | 238 | free_vcpu: |
239 | kmem_cache_free(kvm_vcpu_cache, vcpu); | 239 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
240 | out: | 240 | out: |
241 | return ERR_PTR(err); | 241 | return ERR_PTR(err); |
242 | } | 242 | } |
243 | 243 | ||
244 | int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) | 244 | int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
245 | { | 245 | { |
246 | return 0; | 246 | return 0; |
247 | } | 247 | } |
248 | 248 | ||
249 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) | 249 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) |
250 | { | 250 | { |
251 | kvm_mmu_free_memory_caches(vcpu); | 251 | kvm_mmu_free_memory_caches(vcpu); |
252 | kvm_timer_vcpu_terminate(vcpu); | 252 | kvm_timer_vcpu_terminate(vcpu); |
253 | kvm_vgic_vcpu_destroy(vcpu); | 253 | kvm_vgic_vcpu_destroy(vcpu); |
254 | kmem_cache_free(kvm_vcpu_cache, vcpu); | 254 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
255 | } | 255 | } |
256 | 256 | ||
257 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | 257 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
258 | { | 258 | { |
259 | kvm_arch_vcpu_free(vcpu); | 259 | kvm_arch_vcpu_free(vcpu); |
260 | } | 260 | } |
261 | 261 | ||
262 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | 262 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
263 | { | 263 | { |
264 | return 0; | 264 | return 0; |
265 | } | 265 | } |
266 | 266 | ||
267 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | 267 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
268 | { | 268 | { |
269 | /* Force users to call KVM_ARM_VCPU_INIT */ | 269 | /* Force users to call KVM_ARM_VCPU_INIT */ |
270 | vcpu->arch.target = -1; | 270 | vcpu->arch.target = -1; |
271 | bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); | 271 | bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); |
272 | 272 | ||
273 | /* Set up the timer */ | 273 | /* Set up the timer */ |
274 | kvm_timer_vcpu_init(vcpu); | 274 | kvm_timer_vcpu_init(vcpu); |
275 | 275 | ||
276 | return 0; | 276 | return 0; |
277 | } | 277 | } |
278 | 278 | ||
279 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 279 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
280 | { | 280 | { |
281 | vcpu->cpu = cpu; | 281 | vcpu->cpu = cpu; |
282 | vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); | 282 | vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); |
283 | 283 | ||
284 | /* | ||
285 | * Check whether this vcpu requires the cache to be flushed on | ||
286 | * this physical CPU. This is a consequence of doing dcache | ||
287 | * operations by set/way on this vcpu. We do it here to be in | ||
288 | * a non-preemptible section. | ||
289 | */ | ||
290 | if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush)) | ||
291 | flush_cache_all(); /* We'd really want v7_flush_dcache_all() */ | ||
292 | |||
293 | kvm_arm_set_running_vcpu(vcpu); | 284 | kvm_arm_set_running_vcpu(vcpu); |
294 | } | 285 | } |
295 | 286 | ||
296 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 287 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
297 | { | 288 | { |
298 | /* | 289 | /* |
299 | * The arch-generic KVM code expects the cpu field of a vcpu to be -1 | 290 | * The arch-generic KVM code expects the cpu field of a vcpu to be -1 |
300 | * if the vcpu is no longer assigned to a cpu. This is used for the | 291 | * if the vcpu is no longer assigned to a cpu. This is used for the |
301 | * optimized make_all_cpus_request path. | 292 | * optimized make_all_cpus_request path. |
302 | */ | 293 | */ |
303 | vcpu->cpu = -1; | 294 | vcpu->cpu = -1; |
304 | 295 | ||
305 | kvm_arm_set_running_vcpu(NULL); | 296 | kvm_arm_set_running_vcpu(NULL); |
306 | } | 297 | } |
307 | 298 | ||
308 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | 299 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
309 | struct kvm_guest_debug *dbg) | 300 | struct kvm_guest_debug *dbg) |
310 | { | 301 | { |
311 | return -EINVAL; | 302 | return -EINVAL; |
312 | } | 303 | } |
313 | 304 | ||
314 | 305 | ||
315 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | 306 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
316 | struct kvm_mp_state *mp_state) | 307 | struct kvm_mp_state *mp_state) |
317 | { | 308 | { |
318 | return -EINVAL; | 309 | return -EINVAL; |
319 | } | 310 | } |
320 | 311 | ||
321 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | 312 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
322 | struct kvm_mp_state *mp_state) | 313 | struct kvm_mp_state *mp_state) |
323 | { | 314 | { |
324 | return -EINVAL; | 315 | return -EINVAL; |
325 | } | 316 | } |
326 | 317 | ||
327 | /** | 318 | /** |
328 | * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled | 319 | * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled |
329 | * @v: The VCPU pointer | 320 | * @v: The VCPU pointer |
330 | * | 321 | * |
331 | * If the guest CPU is not waiting for interrupts or an interrupt line is | 322 | * If the guest CPU is not waiting for interrupts or an interrupt line is |
332 | * asserted, the CPU is by definition runnable. | 323 | * asserted, the CPU is by definition runnable. |
333 | */ | 324 | */ |
334 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) | 325 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
335 | { | 326 | { |
336 | return !!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v); | 327 | return !!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v); |
337 | } | 328 | } |
338 | 329 | ||
339 | /* Just ensure a guest exit from a particular CPU */ | 330 | /* Just ensure a guest exit from a particular CPU */ |
340 | static void exit_vm_noop(void *info) | 331 | static void exit_vm_noop(void *info) |
341 | { | 332 | { |
342 | } | 333 | } |
343 | 334 | ||
344 | void force_vm_exit(const cpumask_t *mask) | 335 | void force_vm_exit(const cpumask_t *mask) |
345 | { | 336 | { |
346 | smp_call_function_many(mask, exit_vm_noop, NULL, true); | 337 | smp_call_function_many(mask, exit_vm_noop, NULL, true); |
347 | } | 338 | } |
348 | 339 | ||
349 | /** | 340 | /** |
350 | * need_new_vmid_gen - check that the VMID is still valid | 341 | * need_new_vmid_gen - check that the VMID is still valid |
351 | * @kvm: The VM's VMID to checkt | 342 | * @kvm: The VM's VMID to checkt |
352 | * | 343 | * |
353 | * return true if there is a new generation of VMIDs being used | 344 | * return true if there is a new generation of VMIDs being used |
354 | * | 345 | * |
355 | * The hardware supports only 256 values with the value zero reserved for the | 346 | * The hardware supports only 256 values with the value zero reserved for the |
356 | * host, so we check if an assigned value belongs to a previous generation, | 347 | * host, so we check if an assigned value belongs to a previous generation, |
357 | * which which requires us to assign a new value. If we're the first to use a | 348 | * which which requires us to assign a new value. If we're the first to use a |
358 | * VMID for the new generation, we must flush necessary caches and TLBs on all | 349 | * VMID for the new generation, we must flush necessary caches and TLBs on all |
359 | * CPUs. | 350 | * CPUs. |
360 | */ | 351 | */ |
361 | static bool need_new_vmid_gen(struct kvm *kvm) | 352 | static bool need_new_vmid_gen(struct kvm *kvm) |
362 | { | 353 | { |
363 | return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen)); | 354 | return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen)); |
364 | } | 355 | } |
365 | 356 | ||
366 | /** | 357 | /** |
367 | * update_vttbr - Update the VTTBR with a valid VMID before the guest runs | 358 | * update_vttbr - Update the VTTBR with a valid VMID before the guest runs |
368 | * @kvm The guest that we are about to run | 359 | * @kvm The guest that we are about to run |
369 | * | 360 | * |
370 | * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the | 361 | * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the |
371 | * VM has a valid VMID, otherwise assigns a new one and flushes corresponding | 362 | * VM has a valid VMID, otherwise assigns a new one and flushes corresponding |
372 | * caches and TLBs. | 363 | * caches and TLBs. |
373 | */ | 364 | */ |
374 | static void update_vttbr(struct kvm *kvm) | 365 | static void update_vttbr(struct kvm *kvm) |
375 | { | 366 | { |
376 | phys_addr_t pgd_phys; | 367 | phys_addr_t pgd_phys; |
377 | u64 vmid; | 368 | u64 vmid; |
378 | 369 | ||
379 | if (!need_new_vmid_gen(kvm)) | 370 | if (!need_new_vmid_gen(kvm)) |
380 | return; | 371 | return; |
381 | 372 | ||
382 | spin_lock(&kvm_vmid_lock); | 373 | spin_lock(&kvm_vmid_lock); |
383 | 374 | ||
384 | /* | 375 | /* |
385 | * We need to re-check the vmid_gen here to ensure that if another vcpu | 376 | * We need to re-check the vmid_gen here to ensure that if another vcpu |
386 | * already allocated a valid vmid for this vm, then this vcpu should | 377 | * already allocated a valid vmid for this vm, then this vcpu should |
387 | * use the same vmid. | 378 | * use the same vmid. |
388 | */ | 379 | */ |
389 | if (!need_new_vmid_gen(kvm)) { | 380 | if (!need_new_vmid_gen(kvm)) { |
390 | spin_unlock(&kvm_vmid_lock); | 381 | spin_unlock(&kvm_vmid_lock); |
391 | return; | 382 | return; |
392 | } | 383 | } |
393 | 384 | ||
394 | /* First user of a new VMID generation? */ | 385 | /* First user of a new VMID generation? */ |
395 | if (unlikely(kvm_next_vmid == 0)) { | 386 | if (unlikely(kvm_next_vmid == 0)) { |
396 | atomic64_inc(&kvm_vmid_gen); | 387 | atomic64_inc(&kvm_vmid_gen); |
397 | kvm_next_vmid = 1; | 388 | kvm_next_vmid = 1; |
398 | 389 | ||
399 | /* | 390 | /* |
400 | * On SMP we know no other CPUs can use this CPU's or each | 391 | * On SMP we know no other CPUs can use this CPU's or each |
401 | * other's VMID after force_vm_exit returns since the | 392 | * other's VMID after force_vm_exit returns since the |
402 | * kvm_vmid_lock blocks them from reentry to the guest. | 393 | * kvm_vmid_lock blocks them from reentry to the guest. |
403 | */ | 394 | */ |
404 | force_vm_exit(cpu_all_mask); | 395 | force_vm_exit(cpu_all_mask); |
405 | /* | 396 | /* |
406 | * Now broadcast TLB + ICACHE invalidation over the inner | 397 | * Now broadcast TLB + ICACHE invalidation over the inner |
407 | * shareable domain to make sure all data structures are | 398 | * shareable domain to make sure all data structures are |
408 | * clean. | 399 | * clean. |
409 | */ | 400 | */ |
410 | kvm_call_hyp(__kvm_flush_vm_context); | 401 | kvm_call_hyp(__kvm_flush_vm_context); |
411 | } | 402 | } |
412 | 403 | ||
413 | kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); | 404 | kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); |
414 | kvm->arch.vmid = kvm_next_vmid; | 405 | kvm->arch.vmid = kvm_next_vmid; |
415 | kvm_next_vmid++; | 406 | kvm_next_vmid++; |
416 | 407 | ||
417 | /* update vttbr to be used with the new vmid */ | 408 | /* update vttbr to be used with the new vmid */ |
418 | pgd_phys = virt_to_phys(kvm_get_hwpgd(kvm)); | 409 | pgd_phys = virt_to_phys(kvm_get_hwpgd(kvm)); |
419 | BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK); | 410 | BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK); |
420 | vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; | 411 | vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; |
421 | kvm->arch.vttbr = pgd_phys | vmid; | 412 | kvm->arch.vttbr = pgd_phys | vmid; |
422 | 413 | ||
423 | spin_unlock(&kvm_vmid_lock); | 414 | spin_unlock(&kvm_vmid_lock); |
424 | } | 415 | } |
425 | 416 | ||
426 | static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) | 417 | static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) |
427 | { | 418 | { |
428 | struct kvm *kvm = vcpu->kvm; | 419 | struct kvm *kvm = vcpu->kvm; |
429 | int ret; | 420 | int ret; |
430 | 421 | ||
431 | if (likely(vcpu->arch.has_run_once)) | 422 | if (likely(vcpu->arch.has_run_once)) |
432 | return 0; | 423 | return 0; |
433 | 424 | ||
434 | vcpu->arch.has_run_once = true; | 425 | vcpu->arch.has_run_once = true; |
435 | 426 | ||
436 | /* | 427 | /* |
437 | * Map the VGIC hardware resources before running a vcpu the first | 428 | * Map the VGIC hardware resources before running a vcpu the first |
438 | * time on this VM. | 429 | * time on this VM. |
439 | */ | 430 | */ |
440 | if (unlikely(!vgic_ready(kvm))) { | 431 | if (unlikely(!vgic_ready(kvm))) { |
441 | ret = kvm_vgic_map_resources(kvm); | 432 | ret = kvm_vgic_map_resources(kvm); |
442 | if (ret) | 433 | if (ret) |
443 | return ret; | 434 | return ret; |
444 | } | 435 | } |
445 | 436 | ||
446 | /* | 437 | /* |
447 | * Enable the arch timers only if we have an in-kernel VGIC | 438 | * Enable the arch timers only if we have an in-kernel VGIC |
448 | * and it has been properly initialized, since we cannot handle | 439 | * and it has been properly initialized, since we cannot handle |
449 | * interrupts from the virtual timer with a userspace gic. | 440 | * interrupts from the virtual timer with a userspace gic. |
450 | */ | 441 | */ |
451 | if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) | 442 | if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) |
452 | kvm_timer_enable(kvm); | 443 | kvm_timer_enable(kvm); |
453 | 444 | ||
454 | return 0; | 445 | return 0; |
455 | } | 446 | } |
456 | 447 | ||
457 | static void vcpu_pause(struct kvm_vcpu *vcpu) | 448 | static void vcpu_pause(struct kvm_vcpu *vcpu) |
458 | { | 449 | { |
459 | wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); | 450 | wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); |
460 | 451 | ||
461 | wait_event_interruptible(*wq, !vcpu->arch.pause); | 452 | wait_event_interruptible(*wq, !vcpu->arch.pause); |
462 | } | 453 | } |
463 | 454 | ||
464 | static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) | 455 | static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) |
465 | { | 456 | { |
466 | return vcpu->arch.target >= 0; | 457 | return vcpu->arch.target >= 0; |
467 | } | 458 | } |
468 | 459 | ||
469 | /** | 460 | /** |
470 | * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code | 461 | * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code |
471 | * @vcpu: The VCPU pointer | 462 | * @vcpu: The VCPU pointer |
472 | * @run: The kvm_run structure pointer used for userspace state exchange | 463 | * @run: The kvm_run structure pointer used for userspace state exchange |
473 | * | 464 | * |
474 | * This function is called through the VCPU_RUN ioctl called from user space. It | 465 | * This function is called through the VCPU_RUN ioctl called from user space. It |
475 | * will execute VM code in a loop until the time slice for the process is used | 466 | * will execute VM code in a loop until the time slice for the process is used |
476 | * or some emulation is needed from user space in which case the function will | 467 | * or some emulation is needed from user space in which case the function will |
477 | * return with return value 0 and with the kvm_run structure filled in with the | 468 | * return with return value 0 and with the kvm_run structure filled in with the |
478 | * required data for the requested emulation. | 469 | * required data for the requested emulation. |
479 | */ | 470 | */ |
480 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | 471 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) |
481 | { | 472 | { |
482 | int ret; | 473 | int ret; |
483 | sigset_t sigsaved; | 474 | sigset_t sigsaved; |
484 | 475 | ||
485 | if (unlikely(!kvm_vcpu_initialized(vcpu))) | 476 | if (unlikely(!kvm_vcpu_initialized(vcpu))) |
486 | return -ENOEXEC; | 477 | return -ENOEXEC; |
487 | 478 | ||
488 | ret = kvm_vcpu_first_run_init(vcpu); | 479 | ret = kvm_vcpu_first_run_init(vcpu); |
489 | if (ret) | 480 | if (ret) |
490 | return ret; | 481 | return ret; |
491 | 482 | ||
492 | if (run->exit_reason == KVM_EXIT_MMIO) { | 483 | if (run->exit_reason == KVM_EXIT_MMIO) { |
493 | ret = kvm_handle_mmio_return(vcpu, vcpu->run); | 484 | ret = kvm_handle_mmio_return(vcpu, vcpu->run); |
494 | if (ret) | 485 | if (ret) |
495 | return ret; | 486 | return ret; |
496 | } | 487 | } |
497 | 488 | ||
498 | if (vcpu->sigset_active) | 489 | if (vcpu->sigset_active) |
499 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | 490 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); |
500 | 491 | ||
501 | ret = 1; | 492 | ret = 1; |
502 | run->exit_reason = KVM_EXIT_UNKNOWN; | 493 | run->exit_reason = KVM_EXIT_UNKNOWN; |
503 | while (ret > 0) { | 494 | while (ret > 0) { |
504 | /* | 495 | /* |
505 | * Check conditions before entering the guest | 496 | * Check conditions before entering the guest |
506 | */ | 497 | */ |
507 | cond_resched(); | 498 | cond_resched(); |
508 | 499 | ||
509 | update_vttbr(vcpu->kvm); | 500 | update_vttbr(vcpu->kvm); |
510 | 501 | ||
511 | if (vcpu->arch.pause) | 502 | if (vcpu->arch.pause) |
512 | vcpu_pause(vcpu); | 503 | vcpu_pause(vcpu); |
513 | 504 | ||
514 | kvm_vgic_flush_hwstate(vcpu); | 505 | kvm_vgic_flush_hwstate(vcpu); |
515 | kvm_timer_flush_hwstate(vcpu); | 506 | kvm_timer_flush_hwstate(vcpu); |
516 | 507 | ||
517 | local_irq_disable(); | 508 | local_irq_disable(); |
518 | 509 | ||
519 | /* | 510 | /* |
520 | * Re-check atomic conditions | 511 | * Re-check atomic conditions |
521 | */ | 512 | */ |
522 | if (signal_pending(current)) { | 513 | if (signal_pending(current)) { |
523 | ret = -EINTR; | 514 | ret = -EINTR; |
524 | run->exit_reason = KVM_EXIT_INTR; | 515 | run->exit_reason = KVM_EXIT_INTR; |
525 | } | 516 | } |
526 | 517 | ||
527 | if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) { | 518 | if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) { |
528 | local_irq_enable(); | 519 | local_irq_enable(); |
529 | kvm_timer_sync_hwstate(vcpu); | 520 | kvm_timer_sync_hwstate(vcpu); |
530 | kvm_vgic_sync_hwstate(vcpu); | 521 | kvm_vgic_sync_hwstate(vcpu); |
531 | continue; | 522 | continue; |
532 | } | 523 | } |
533 | 524 | ||
534 | /************************************************************** | 525 | /************************************************************** |
535 | * Enter the guest | 526 | * Enter the guest |
536 | */ | 527 | */ |
537 | trace_kvm_entry(*vcpu_pc(vcpu)); | 528 | trace_kvm_entry(*vcpu_pc(vcpu)); |
538 | kvm_guest_enter(); | 529 | kvm_guest_enter(); |
539 | vcpu->mode = IN_GUEST_MODE; | 530 | vcpu->mode = IN_GUEST_MODE; |
540 | 531 | ||
541 | ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); | 532 | ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); |
542 | 533 | ||
543 | vcpu->mode = OUTSIDE_GUEST_MODE; | 534 | vcpu->mode = OUTSIDE_GUEST_MODE; |
544 | vcpu->arch.last_pcpu = smp_processor_id(); | ||
545 | kvm_guest_exit(); | 535 | kvm_guest_exit(); |
546 | trace_kvm_exit(*vcpu_pc(vcpu)); | 536 | trace_kvm_exit(*vcpu_pc(vcpu)); |
547 | /* | 537 | /* |
548 | * We may have taken a host interrupt in HYP mode (ie | 538 | * We may have taken a host interrupt in HYP mode (ie |
549 | * while executing the guest). This interrupt is still | 539 | * while executing the guest). This interrupt is still |
550 | * pending, as we haven't serviced it yet! | 540 | * pending, as we haven't serviced it yet! |
551 | * | 541 | * |
552 | * We're now back in SVC mode, with interrupts | 542 | * We're now back in SVC mode, with interrupts |
553 | * disabled. Enabling the interrupts now will have | 543 | * disabled. Enabling the interrupts now will have |
554 | * the effect of taking the interrupt again, in SVC | 544 | * the effect of taking the interrupt again, in SVC |
555 | * mode this time. | 545 | * mode this time. |
556 | */ | 546 | */ |
557 | local_irq_enable(); | 547 | local_irq_enable(); |
558 | 548 | ||
559 | /* | 549 | /* |
560 | * Back from guest | 550 | * Back from guest |
561 | *************************************************************/ | 551 | *************************************************************/ |
562 | 552 | ||
563 | kvm_timer_sync_hwstate(vcpu); | 553 | kvm_timer_sync_hwstate(vcpu); |
564 | kvm_vgic_sync_hwstate(vcpu); | 554 | kvm_vgic_sync_hwstate(vcpu); |
565 | 555 | ||
566 | ret = handle_exit(vcpu, run, ret); | 556 | ret = handle_exit(vcpu, run, ret); |
567 | } | 557 | } |
568 | 558 | ||
569 | if (vcpu->sigset_active) | 559 | if (vcpu->sigset_active) |
570 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | 560 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); |
571 | return ret; | 561 | return ret; |
572 | } | 562 | } |
573 | 563 | ||
574 | static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) | 564 | static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) |
575 | { | 565 | { |
576 | int bit_index; | 566 | int bit_index; |
577 | bool set; | 567 | bool set; |
578 | unsigned long *ptr; | 568 | unsigned long *ptr; |
579 | 569 | ||
580 | if (number == KVM_ARM_IRQ_CPU_IRQ) | 570 | if (number == KVM_ARM_IRQ_CPU_IRQ) |
581 | bit_index = __ffs(HCR_VI); | 571 | bit_index = __ffs(HCR_VI); |
582 | else /* KVM_ARM_IRQ_CPU_FIQ */ | 572 | else /* KVM_ARM_IRQ_CPU_FIQ */ |
583 | bit_index = __ffs(HCR_VF); | 573 | bit_index = __ffs(HCR_VF); |
584 | 574 | ||
585 | ptr = (unsigned long *)&vcpu->arch.irq_lines; | 575 | ptr = (unsigned long *)&vcpu->arch.irq_lines; |
586 | if (level) | 576 | if (level) |
587 | set = test_and_set_bit(bit_index, ptr); | 577 | set = test_and_set_bit(bit_index, ptr); |
588 | else | 578 | else |
589 | set = test_and_clear_bit(bit_index, ptr); | 579 | set = test_and_clear_bit(bit_index, ptr); |
590 | 580 | ||
591 | /* | 581 | /* |
592 | * If we didn't change anything, no need to wake up or kick other CPUs | 582 | * If we didn't change anything, no need to wake up or kick other CPUs |
593 | */ | 583 | */ |
594 | if (set == level) | 584 | if (set == level) |
595 | return 0; | 585 | return 0; |
596 | 586 | ||
597 | /* | 587 | /* |
598 | * The vcpu irq_lines field was updated, wake up sleeping VCPUs and | 588 | * The vcpu irq_lines field was updated, wake up sleeping VCPUs and |
599 | * trigger a world-switch round on the running physical CPU to set the | 589 | * trigger a world-switch round on the running physical CPU to set the |
600 | * virtual IRQ/FIQ fields in the HCR appropriately. | 590 | * virtual IRQ/FIQ fields in the HCR appropriately. |
601 | */ | 591 | */ |
602 | kvm_vcpu_kick(vcpu); | 592 | kvm_vcpu_kick(vcpu); |
603 | 593 | ||
604 | return 0; | 594 | return 0; |
605 | } | 595 | } |
606 | 596 | ||
607 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, | 597 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, |
608 | bool line_status) | 598 | bool line_status) |
609 | { | 599 | { |
610 | u32 irq = irq_level->irq; | 600 | u32 irq = irq_level->irq; |
611 | unsigned int irq_type, vcpu_idx, irq_num; | 601 | unsigned int irq_type, vcpu_idx, irq_num; |
612 | int nrcpus = atomic_read(&kvm->online_vcpus); | 602 | int nrcpus = atomic_read(&kvm->online_vcpus); |
613 | struct kvm_vcpu *vcpu = NULL; | 603 | struct kvm_vcpu *vcpu = NULL; |
614 | bool level = irq_level->level; | 604 | bool level = irq_level->level; |
615 | 605 | ||
616 | irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK; | 606 | irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK; |
617 | vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; | 607 | vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; |
618 | irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK; | 608 | irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK; |
619 | 609 | ||
620 | trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); | 610 | trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); |
621 | 611 | ||
622 | switch (irq_type) { | 612 | switch (irq_type) { |
623 | case KVM_ARM_IRQ_TYPE_CPU: | 613 | case KVM_ARM_IRQ_TYPE_CPU: |
624 | if (irqchip_in_kernel(kvm)) | 614 | if (irqchip_in_kernel(kvm)) |
625 | return -ENXIO; | 615 | return -ENXIO; |
626 | 616 | ||
627 | if (vcpu_idx >= nrcpus) | 617 | if (vcpu_idx >= nrcpus) |
628 | return -EINVAL; | 618 | return -EINVAL; |
629 | 619 | ||
630 | vcpu = kvm_get_vcpu(kvm, vcpu_idx); | 620 | vcpu = kvm_get_vcpu(kvm, vcpu_idx); |
631 | if (!vcpu) | 621 | if (!vcpu) |
632 | return -EINVAL; | 622 | return -EINVAL; |
633 | 623 | ||
634 | if (irq_num > KVM_ARM_IRQ_CPU_FIQ) | 624 | if (irq_num > KVM_ARM_IRQ_CPU_FIQ) |
635 | return -EINVAL; | 625 | return -EINVAL; |
636 | 626 | ||
637 | return vcpu_interrupt_line(vcpu, irq_num, level); | 627 | return vcpu_interrupt_line(vcpu, irq_num, level); |
638 | case KVM_ARM_IRQ_TYPE_PPI: | 628 | case KVM_ARM_IRQ_TYPE_PPI: |
639 | if (!irqchip_in_kernel(kvm)) | 629 | if (!irqchip_in_kernel(kvm)) |
640 | return -ENXIO; | 630 | return -ENXIO; |
641 | 631 | ||
642 | if (vcpu_idx >= nrcpus) | 632 | if (vcpu_idx >= nrcpus) |
643 | return -EINVAL; | 633 | return -EINVAL; |
644 | 634 | ||
645 | vcpu = kvm_get_vcpu(kvm, vcpu_idx); | 635 | vcpu = kvm_get_vcpu(kvm, vcpu_idx); |
646 | if (!vcpu) | 636 | if (!vcpu) |
647 | return -EINVAL; | 637 | return -EINVAL; |
648 | 638 | ||
649 | if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS) | 639 | if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS) |
650 | return -EINVAL; | 640 | return -EINVAL; |
651 | 641 | ||
652 | return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level); | 642 | return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level); |
653 | case KVM_ARM_IRQ_TYPE_SPI: | 643 | case KVM_ARM_IRQ_TYPE_SPI: |
654 | if (!irqchip_in_kernel(kvm)) | 644 | if (!irqchip_in_kernel(kvm)) |
655 | return -ENXIO; | 645 | return -ENXIO; |
656 | 646 | ||
657 | if (irq_num < VGIC_NR_PRIVATE_IRQS || | 647 | if (irq_num < VGIC_NR_PRIVATE_IRQS || |
658 | irq_num > KVM_ARM_IRQ_GIC_MAX) | 648 | irq_num > KVM_ARM_IRQ_GIC_MAX) |
659 | return -EINVAL; | 649 | return -EINVAL; |
660 | 650 | ||
661 | return kvm_vgic_inject_irq(kvm, 0, irq_num, level); | 651 | return kvm_vgic_inject_irq(kvm, 0, irq_num, level); |
662 | } | 652 | } |
663 | 653 | ||
664 | return -EINVAL; | 654 | return -EINVAL; |
665 | } | 655 | } |
666 | 656 | ||
667 | static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, | 657 | static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, |
668 | const struct kvm_vcpu_init *init) | 658 | const struct kvm_vcpu_init *init) |
669 | { | 659 | { |
670 | unsigned int i; | 660 | unsigned int i; |
671 | int phys_target = kvm_target_cpu(); | 661 | int phys_target = kvm_target_cpu(); |
672 | 662 | ||
673 | if (init->target != phys_target) | 663 | if (init->target != phys_target) |
674 | return -EINVAL; | 664 | return -EINVAL; |
675 | 665 | ||
676 | /* | 666 | /* |
677 | * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must | 667 | * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must |
678 | * use the same target. | 668 | * use the same target. |
679 | */ | 669 | */ |
680 | if (vcpu->arch.target != -1 && vcpu->arch.target != init->target) | 670 | if (vcpu->arch.target != -1 && vcpu->arch.target != init->target) |
681 | return -EINVAL; | 671 | return -EINVAL; |
682 | 672 | ||
683 | /* -ENOENT for unknown features, -EINVAL for invalid combinations. */ | 673 | /* -ENOENT for unknown features, -EINVAL for invalid combinations. */ |
684 | for (i = 0; i < sizeof(init->features) * 8; i++) { | 674 | for (i = 0; i < sizeof(init->features) * 8; i++) { |
685 | bool set = (init->features[i / 32] & (1 << (i % 32))); | 675 | bool set = (init->features[i / 32] & (1 << (i % 32))); |
686 | 676 | ||
687 | if (set && i >= KVM_VCPU_MAX_FEATURES) | 677 | if (set && i >= KVM_VCPU_MAX_FEATURES) |
688 | return -ENOENT; | 678 | return -ENOENT; |
689 | 679 | ||
690 | /* | 680 | /* |
691 | * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must | 681 | * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must |
692 | * use the same feature set. | 682 | * use the same feature set. |
693 | */ | 683 | */ |
694 | if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES && | 684 | if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES && |
695 | test_bit(i, vcpu->arch.features) != set) | 685 | test_bit(i, vcpu->arch.features) != set) |
696 | return -EINVAL; | 686 | return -EINVAL; |
697 | 687 | ||
698 | if (set) | 688 | if (set) |
699 | set_bit(i, vcpu->arch.features); | 689 | set_bit(i, vcpu->arch.features); |
700 | } | 690 | } |
701 | 691 | ||
702 | vcpu->arch.target = phys_target; | 692 | vcpu->arch.target = phys_target; |
703 | 693 | ||
704 | /* Now we know what it is, we can reset it. */ | 694 | /* Now we know what it is, we can reset it. */ |
705 | return kvm_reset_vcpu(vcpu); | 695 | return kvm_reset_vcpu(vcpu); |
706 | } | 696 | } |
707 | 697 | ||
708 | 698 | ||
709 | static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, | 699 | static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, |
710 | struct kvm_vcpu_init *init) | 700 | struct kvm_vcpu_init *init) |
711 | { | 701 | { |
712 | int ret; | 702 | int ret; |
713 | 703 | ||
714 | ret = kvm_vcpu_set_target(vcpu, init); | 704 | ret = kvm_vcpu_set_target(vcpu, init); |
715 | if (ret) | 705 | if (ret) |
716 | return ret; | 706 | return ret; |
717 | 707 | ||
718 | /* | 708 | /* |
719 | * Ensure a rebooted VM will fault in RAM pages and detect if the | 709 | * Ensure a rebooted VM will fault in RAM pages and detect if the |
720 | * guest MMU is turned off and flush the caches as needed. | 710 | * guest MMU is turned off and flush the caches as needed. |
721 | */ | 711 | */ |
722 | if (vcpu->arch.has_run_once) | 712 | if (vcpu->arch.has_run_once) |
723 | stage2_unmap_vm(vcpu->kvm); | 713 | stage2_unmap_vm(vcpu->kvm); |
724 | 714 | ||
725 | vcpu_reset_hcr(vcpu); | 715 | vcpu_reset_hcr(vcpu); |
726 | 716 | ||
727 | /* | 717 | /* |
728 | * Handle the "start in power-off" case by marking the VCPU as paused. | 718 | * Handle the "start in power-off" case by marking the VCPU as paused. |
729 | */ | 719 | */ |
730 | if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) | 720 | if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) |
731 | vcpu->arch.pause = true; | 721 | vcpu->arch.pause = true; |
732 | else | 722 | else |
733 | vcpu->arch.pause = false; | 723 | vcpu->arch.pause = false; |
734 | 724 | ||
735 | return 0; | 725 | return 0; |
736 | } | 726 | } |
737 | 727 | ||
738 | long kvm_arch_vcpu_ioctl(struct file *filp, | 728 | long kvm_arch_vcpu_ioctl(struct file *filp, |
739 | unsigned int ioctl, unsigned long arg) | 729 | unsigned int ioctl, unsigned long arg) |
740 | { | 730 | { |
741 | struct kvm_vcpu *vcpu = filp->private_data; | 731 | struct kvm_vcpu *vcpu = filp->private_data; |
742 | void __user *argp = (void __user *)arg; | 732 | void __user *argp = (void __user *)arg; |
743 | 733 | ||
744 | switch (ioctl) { | 734 | switch (ioctl) { |
745 | case KVM_ARM_VCPU_INIT: { | 735 | case KVM_ARM_VCPU_INIT: { |
746 | struct kvm_vcpu_init init; | 736 | struct kvm_vcpu_init init; |
747 | 737 | ||
748 | if (copy_from_user(&init, argp, sizeof(init))) | 738 | if (copy_from_user(&init, argp, sizeof(init))) |
749 | return -EFAULT; | 739 | return -EFAULT; |
750 | 740 | ||
751 | return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init); | 741 | return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init); |
752 | } | 742 | } |
753 | case KVM_SET_ONE_REG: | 743 | case KVM_SET_ONE_REG: |
754 | case KVM_GET_ONE_REG: { | 744 | case KVM_GET_ONE_REG: { |
755 | struct kvm_one_reg reg; | 745 | struct kvm_one_reg reg; |
756 | 746 | ||
757 | if (unlikely(!kvm_vcpu_initialized(vcpu))) | 747 | if (unlikely(!kvm_vcpu_initialized(vcpu))) |
758 | return -ENOEXEC; | 748 | return -ENOEXEC; |
759 | 749 | ||
760 | if (copy_from_user(®, argp, sizeof(reg))) | 750 | if (copy_from_user(®, argp, sizeof(reg))) |
761 | return -EFAULT; | 751 | return -EFAULT; |
762 | if (ioctl == KVM_SET_ONE_REG) | 752 | if (ioctl == KVM_SET_ONE_REG) |
763 | return kvm_arm_set_reg(vcpu, ®); | 753 | return kvm_arm_set_reg(vcpu, ®); |
764 | else | 754 | else |
765 | return kvm_arm_get_reg(vcpu, ®); | 755 | return kvm_arm_get_reg(vcpu, ®); |
766 | } | 756 | } |
767 | case KVM_GET_REG_LIST: { | 757 | case KVM_GET_REG_LIST: { |
768 | struct kvm_reg_list __user *user_list = argp; | 758 | struct kvm_reg_list __user *user_list = argp; |
769 | struct kvm_reg_list reg_list; | 759 | struct kvm_reg_list reg_list; |
770 | unsigned n; | 760 | unsigned n; |
771 | 761 | ||
772 | if (unlikely(!kvm_vcpu_initialized(vcpu))) | 762 | if (unlikely(!kvm_vcpu_initialized(vcpu))) |
773 | return -ENOEXEC; | 763 | return -ENOEXEC; |
774 | 764 | ||
775 | if (copy_from_user(®_list, user_list, sizeof(reg_list))) | 765 | if (copy_from_user(®_list, user_list, sizeof(reg_list))) |
776 | return -EFAULT; | 766 | return -EFAULT; |
777 | n = reg_list.n; | 767 | n = reg_list.n; |
778 | reg_list.n = kvm_arm_num_regs(vcpu); | 768 | reg_list.n = kvm_arm_num_regs(vcpu); |
779 | if (copy_to_user(user_list, ®_list, sizeof(reg_list))) | 769 | if (copy_to_user(user_list, ®_list, sizeof(reg_list))) |
780 | return -EFAULT; | 770 | return -EFAULT; |
781 | if (n < reg_list.n) | 771 | if (n < reg_list.n) |
782 | return -E2BIG; | 772 | return -E2BIG; |
783 | return kvm_arm_copy_reg_indices(vcpu, user_list->reg); | 773 | return kvm_arm_copy_reg_indices(vcpu, user_list->reg); |
784 | } | 774 | } |
785 | default: | 775 | default: |
786 | return -EINVAL; | 776 | return -EINVAL; |
787 | } | 777 | } |
788 | } | 778 | } |
789 | 779 | ||
790 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) | 780 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) |
791 | { | 781 | { |
792 | return -EINVAL; | 782 | return -EINVAL; |
793 | } | 783 | } |
794 | 784 | ||
795 | static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, | 785 | static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, |
796 | struct kvm_arm_device_addr *dev_addr) | 786 | struct kvm_arm_device_addr *dev_addr) |
797 | { | 787 | { |
798 | unsigned long dev_id, type; | 788 | unsigned long dev_id, type; |
799 | 789 | ||
800 | dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >> | 790 | dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >> |
801 | KVM_ARM_DEVICE_ID_SHIFT; | 791 | KVM_ARM_DEVICE_ID_SHIFT; |
802 | type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >> | 792 | type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >> |
803 | KVM_ARM_DEVICE_TYPE_SHIFT; | 793 | KVM_ARM_DEVICE_TYPE_SHIFT; |
804 | 794 | ||
805 | switch (dev_id) { | 795 | switch (dev_id) { |
806 | case KVM_ARM_DEVICE_VGIC_V2: | 796 | case KVM_ARM_DEVICE_VGIC_V2: |
807 | if (!vgic_present) | 797 | if (!vgic_present) |
808 | return -ENXIO; | 798 | return -ENXIO; |
809 | return kvm_vgic_addr(kvm, type, &dev_addr->addr, true); | 799 | return kvm_vgic_addr(kvm, type, &dev_addr->addr, true); |
810 | default: | 800 | default: |
811 | return -ENODEV; | 801 | return -ENODEV; |
812 | } | 802 | } |
813 | } | 803 | } |
814 | 804 | ||
815 | long kvm_arch_vm_ioctl(struct file *filp, | 805 | long kvm_arch_vm_ioctl(struct file *filp, |
816 | unsigned int ioctl, unsigned long arg) | 806 | unsigned int ioctl, unsigned long arg) |
817 | { | 807 | { |
818 | struct kvm *kvm = filp->private_data; | 808 | struct kvm *kvm = filp->private_data; |
819 | void __user *argp = (void __user *)arg; | 809 | void __user *argp = (void __user *)arg; |
820 | 810 | ||
821 | switch (ioctl) { | 811 | switch (ioctl) { |
822 | case KVM_CREATE_IRQCHIP: { | 812 | case KVM_CREATE_IRQCHIP: { |
823 | if (vgic_present) | 813 | if (vgic_present) |
824 | return kvm_vgic_create(kvm); | 814 | return kvm_vgic_create(kvm); |
825 | else | 815 | else |
826 | return -ENXIO; | 816 | return -ENXIO; |
827 | } | 817 | } |
828 | case KVM_ARM_SET_DEVICE_ADDR: { | 818 | case KVM_ARM_SET_DEVICE_ADDR: { |
829 | struct kvm_arm_device_addr dev_addr; | 819 | struct kvm_arm_device_addr dev_addr; |
830 | 820 | ||
831 | if (copy_from_user(&dev_addr, argp, sizeof(dev_addr))) | 821 | if (copy_from_user(&dev_addr, argp, sizeof(dev_addr))) |
832 | return -EFAULT; | 822 | return -EFAULT; |
833 | return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr); | 823 | return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr); |
834 | } | 824 | } |
835 | case KVM_ARM_PREFERRED_TARGET: { | 825 | case KVM_ARM_PREFERRED_TARGET: { |
836 | int err; | 826 | int err; |
837 | struct kvm_vcpu_init init; | 827 | struct kvm_vcpu_init init; |
838 | 828 | ||
839 | err = kvm_vcpu_preferred_target(&init); | 829 | err = kvm_vcpu_preferred_target(&init); |
840 | if (err) | 830 | if (err) |
841 | return err; | 831 | return err; |
842 | 832 | ||
843 | if (copy_to_user(argp, &init, sizeof(init))) | 833 | if (copy_to_user(argp, &init, sizeof(init))) |
844 | return -EFAULT; | 834 | return -EFAULT; |
845 | 835 | ||
846 | return 0; | 836 | return 0; |
847 | } | 837 | } |
848 | default: | 838 | default: |
849 | return -EINVAL; | 839 | return -EINVAL; |
850 | } | 840 | } |
851 | } | 841 | } |
852 | 842 | ||
853 | static void cpu_init_hyp_mode(void *dummy) | 843 | static void cpu_init_hyp_mode(void *dummy) |
854 | { | 844 | { |
855 | phys_addr_t boot_pgd_ptr; | 845 | phys_addr_t boot_pgd_ptr; |
856 | phys_addr_t pgd_ptr; | 846 | phys_addr_t pgd_ptr; |
857 | unsigned long hyp_stack_ptr; | 847 | unsigned long hyp_stack_ptr; |
858 | unsigned long stack_page; | 848 | unsigned long stack_page; |
859 | unsigned long vector_ptr; | 849 | unsigned long vector_ptr; |
860 | 850 | ||
861 | /* Switch from the HYP stub to our own HYP init vector */ | 851 | /* Switch from the HYP stub to our own HYP init vector */ |
862 | __hyp_set_vectors(kvm_get_idmap_vector()); | 852 | __hyp_set_vectors(kvm_get_idmap_vector()); |
863 | 853 | ||
864 | boot_pgd_ptr = kvm_mmu_get_boot_httbr(); | 854 | boot_pgd_ptr = kvm_mmu_get_boot_httbr(); |
865 | pgd_ptr = kvm_mmu_get_httbr(); | 855 | pgd_ptr = kvm_mmu_get_httbr(); |
866 | stack_page = __this_cpu_read(kvm_arm_hyp_stack_page); | 856 | stack_page = __this_cpu_read(kvm_arm_hyp_stack_page); |
867 | hyp_stack_ptr = stack_page + PAGE_SIZE; | 857 | hyp_stack_ptr = stack_page + PAGE_SIZE; |
868 | vector_ptr = (unsigned long)__kvm_hyp_vector; | 858 | vector_ptr = (unsigned long)__kvm_hyp_vector; |
869 | 859 | ||
870 | __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr); | 860 | __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr); |
871 | } | 861 | } |
872 | 862 | ||
873 | static int hyp_init_cpu_notify(struct notifier_block *self, | 863 | static int hyp_init_cpu_notify(struct notifier_block *self, |
874 | unsigned long action, void *cpu) | 864 | unsigned long action, void *cpu) |
875 | { | 865 | { |
876 | switch (action) { | 866 | switch (action) { |
877 | case CPU_STARTING: | 867 | case CPU_STARTING: |
878 | case CPU_STARTING_FROZEN: | 868 | case CPU_STARTING_FROZEN: |
879 | if (__hyp_get_vectors() == hyp_default_vectors) | 869 | if (__hyp_get_vectors() == hyp_default_vectors) |
880 | cpu_init_hyp_mode(NULL); | 870 | cpu_init_hyp_mode(NULL); |
881 | break; | 871 | break; |
882 | } | 872 | } |
883 | 873 | ||
884 | return NOTIFY_OK; | 874 | return NOTIFY_OK; |
885 | } | 875 | } |
886 | 876 | ||
887 | static struct notifier_block hyp_init_cpu_nb = { | 877 | static struct notifier_block hyp_init_cpu_nb = { |
888 | .notifier_call = hyp_init_cpu_notify, | 878 | .notifier_call = hyp_init_cpu_notify, |
889 | }; | 879 | }; |
890 | 880 | ||
891 | #ifdef CONFIG_CPU_PM | 881 | #ifdef CONFIG_CPU_PM |
892 | static int hyp_init_cpu_pm_notifier(struct notifier_block *self, | 882 | static int hyp_init_cpu_pm_notifier(struct notifier_block *self, |
893 | unsigned long cmd, | 883 | unsigned long cmd, |
894 | void *v) | 884 | void *v) |
895 | { | 885 | { |
896 | if (cmd == CPU_PM_EXIT && | 886 | if (cmd == CPU_PM_EXIT && |
897 | __hyp_get_vectors() == hyp_default_vectors) { | 887 | __hyp_get_vectors() == hyp_default_vectors) { |
898 | cpu_init_hyp_mode(NULL); | 888 | cpu_init_hyp_mode(NULL); |
899 | return NOTIFY_OK; | 889 | return NOTIFY_OK; |
900 | } | 890 | } |
901 | 891 | ||
902 | return NOTIFY_DONE; | 892 | return NOTIFY_DONE; |
903 | } | 893 | } |
904 | 894 | ||
905 | static struct notifier_block hyp_init_cpu_pm_nb = { | 895 | static struct notifier_block hyp_init_cpu_pm_nb = { |
906 | .notifier_call = hyp_init_cpu_pm_notifier, | 896 | .notifier_call = hyp_init_cpu_pm_notifier, |
907 | }; | 897 | }; |
908 | 898 | ||
909 | static void __init hyp_cpu_pm_init(void) | 899 | static void __init hyp_cpu_pm_init(void) |
910 | { | 900 | { |
911 | cpu_pm_register_notifier(&hyp_init_cpu_pm_nb); | 901 | cpu_pm_register_notifier(&hyp_init_cpu_pm_nb); |
912 | } | 902 | } |
913 | #else | 903 | #else |
914 | static inline void hyp_cpu_pm_init(void) | 904 | static inline void hyp_cpu_pm_init(void) |
915 | { | 905 | { |
916 | } | 906 | } |
917 | #endif | 907 | #endif |
918 | 908 | ||
919 | /** | 909 | /** |
920 | * Inits Hyp-mode on all online CPUs | 910 | * Inits Hyp-mode on all online CPUs |
921 | */ | 911 | */ |
922 | static int init_hyp_mode(void) | 912 | static int init_hyp_mode(void) |
923 | { | 913 | { |
924 | int cpu; | 914 | int cpu; |
925 | int err = 0; | 915 | int err = 0; |
926 | 916 | ||
927 | /* | 917 | /* |
928 | * Allocate Hyp PGD and setup Hyp identity mapping | 918 | * Allocate Hyp PGD and setup Hyp identity mapping |
929 | */ | 919 | */ |
930 | err = kvm_mmu_init(); | 920 | err = kvm_mmu_init(); |
931 | if (err) | 921 | if (err) |
932 | goto out_err; | 922 | goto out_err; |
933 | 923 | ||
934 | /* | 924 | /* |
935 | * It is probably enough to obtain the default on one | 925 | * It is probably enough to obtain the default on one |
936 | * CPU. It's unlikely to be different on the others. | 926 | * CPU. It's unlikely to be different on the others. |
937 | */ | 927 | */ |
938 | hyp_default_vectors = __hyp_get_vectors(); | 928 | hyp_default_vectors = __hyp_get_vectors(); |
939 | 929 | ||
940 | /* | 930 | /* |
941 | * Allocate stack pages for Hypervisor-mode | 931 | * Allocate stack pages for Hypervisor-mode |
942 | */ | 932 | */ |
943 | for_each_possible_cpu(cpu) { | 933 | for_each_possible_cpu(cpu) { |
944 | unsigned long stack_page; | 934 | unsigned long stack_page; |
945 | 935 | ||
946 | stack_page = __get_free_page(GFP_KERNEL); | 936 | stack_page = __get_free_page(GFP_KERNEL); |
947 | if (!stack_page) { | 937 | if (!stack_page) { |
948 | err = -ENOMEM; | 938 | err = -ENOMEM; |
949 | goto out_free_stack_pages; | 939 | goto out_free_stack_pages; |
950 | } | 940 | } |
951 | 941 | ||
952 | per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; | 942 | per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; |
953 | } | 943 | } |
954 | 944 | ||
955 | /* | 945 | /* |
956 | * Map the Hyp-code called directly from the host | 946 | * Map the Hyp-code called directly from the host |
957 | */ | 947 | */ |
958 | err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end); | 948 | err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end); |
959 | if (err) { | 949 | if (err) { |
960 | kvm_err("Cannot map world-switch code\n"); | 950 | kvm_err("Cannot map world-switch code\n"); |
961 | goto out_free_mappings; | 951 | goto out_free_mappings; |
962 | } | 952 | } |
963 | 953 | ||
964 | /* | 954 | /* |
965 | * Map the Hyp stack pages | 955 | * Map the Hyp stack pages |
966 | */ | 956 | */ |
967 | for_each_possible_cpu(cpu) { | 957 | for_each_possible_cpu(cpu) { |
968 | char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); | 958 | char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); |
969 | err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE); | 959 | err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE); |
970 | 960 | ||
971 | if (err) { | 961 | if (err) { |
972 | kvm_err("Cannot map hyp stack\n"); | 962 | kvm_err("Cannot map hyp stack\n"); |
973 | goto out_free_mappings; | 963 | goto out_free_mappings; |
974 | } | 964 | } |
975 | } | 965 | } |
976 | 966 | ||
977 | /* | 967 | /* |
978 | * Map the host CPU structures | 968 | * Map the host CPU structures |
979 | */ | 969 | */ |
980 | kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t); | 970 | kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t); |
981 | if (!kvm_host_cpu_state) { | 971 | if (!kvm_host_cpu_state) { |
982 | err = -ENOMEM; | 972 | err = -ENOMEM; |
983 | kvm_err("Cannot allocate host CPU state\n"); | 973 | kvm_err("Cannot allocate host CPU state\n"); |
984 | goto out_free_mappings; | 974 | goto out_free_mappings; |
985 | } | 975 | } |
986 | 976 | ||
987 | for_each_possible_cpu(cpu) { | 977 | for_each_possible_cpu(cpu) { |
988 | kvm_cpu_context_t *cpu_ctxt; | 978 | kvm_cpu_context_t *cpu_ctxt; |
989 | 979 | ||
990 | cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu); | 980 | cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu); |
991 | err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1); | 981 | err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1); |
992 | 982 | ||
993 | if (err) { | 983 | if (err) { |
994 | kvm_err("Cannot map host CPU state: %d\n", err); | 984 | kvm_err("Cannot map host CPU state: %d\n", err); |
995 | goto out_free_context; | 985 | goto out_free_context; |
996 | } | 986 | } |
997 | } | 987 | } |
998 | 988 | ||
999 | /* | 989 | /* |
1000 | * Execute the init code on each CPU. | 990 | * Execute the init code on each CPU. |
1001 | */ | 991 | */ |
1002 | on_each_cpu(cpu_init_hyp_mode, NULL, 1); | 992 | on_each_cpu(cpu_init_hyp_mode, NULL, 1); |
1003 | 993 | ||
1004 | /* | 994 | /* |
1005 | * Init HYP view of VGIC | 995 | * Init HYP view of VGIC |
1006 | */ | 996 | */ |
1007 | err = kvm_vgic_hyp_init(); | 997 | err = kvm_vgic_hyp_init(); |
1008 | if (err) | 998 | if (err) |
1009 | goto out_free_context; | 999 | goto out_free_context; |
1010 | 1000 | ||
1011 | #ifdef CONFIG_KVM_ARM_VGIC | 1001 | #ifdef CONFIG_KVM_ARM_VGIC |
1012 | vgic_present = true; | 1002 | vgic_present = true; |
1013 | #endif | 1003 | #endif |
1014 | 1004 | ||
1015 | /* | 1005 | /* |
1016 | * Init HYP architected timer support | 1006 | * Init HYP architected timer support |
1017 | */ | 1007 | */ |
1018 | err = kvm_timer_hyp_init(); | 1008 | err = kvm_timer_hyp_init(); |
1019 | if (err) | 1009 | if (err) |
1020 | goto out_free_mappings; | 1010 | goto out_free_mappings; |
1021 | 1011 | ||
1022 | #ifndef CONFIG_HOTPLUG_CPU | 1012 | #ifndef CONFIG_HOTPLUG_CPU |
1023 | free_boot_hyp_pgd(); | 1013 | free_boot_hyp_pgd(); |
1024 | #endif | 1014 | #endif |
1025 | 1015 | ||
1026 | kvm_perf_init(); | 1016 | kvm_perf_init(); |
1027 | 1017 | ||
1028 | kvm_info("Hyp mode initialized successfully\n"); | 1018 | kvm_info("Hyp mode initialized successfully\n"); |
1029 | 1019 | ||
1030 | return 0; | 1020 | return 0; |
1031 | out_free_context: | 1021 | out_free_context: |
1032 | free_percpu(kvm_host_cpu_state); | 1022 | free_percpu(kvm_host_cpu_state); |
1033 | out_free_mappings: | 1023 | out_free_mappings: |
1034 | free_hyp_pgds(); | 1024 | free_hyp_pgds(); |
1035 | out_free_stack_pages: | 1025 | out_free_stack_pages: |
1036 | for_each_possible_cpu(cpu) | 1026 | for_each_possible_cpu(cpu) |
1037 | free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); | 1027 | free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); |
1038 | out_err: | 1028 | out_err: |
1039 | kvm_err("error initializing Hyp mode: %d\n", err); | 1029 | kvm_err("error initializing Hyp mode: %d\n", err); |
1040 | return err; | 1030 | return err; |
1041 | } | 1031 | } |
1042 | 1032 | ||
1043 | static void check_kvm_target_cpu(void *ret) | 1033 | static void check_kvm_target_cpu(void *ret) |
1044 | { | 1034 | { |
1045 | *(int *)ret = kvm_target_cpu(); | 1035 | *(int *)ret = kvm_target_cpu(); |
1046 | } | 1036 | } |
1047 | 1037 | ||
1048 | /** | 1038 | /** |
1049 | * Initialize Hyp-mode and memory mappings on all CPUs. | 1039 | * Initialize Hyp-mode and memory mappings on all CPUs. |
1050 | */ | 1040 | */ |
1051 | int kvm_arch_init(void *opaque) | 1041 | int kvm_arch_init(void *opaque) |
1052 | { | 1042 | { |
1053 | int err; | 1043 | int err; |
1054 | int ret, cpu; | 1044 | int ret, cpu; |
1055 | 1045 | ||
1056 | if (!is_hyp_mode_available()) { | 1046 | if (!is_hyp_mode_available()) { |
1057 | kvm_err("HYP mode not available\n"); | 1047 | kvm_err("HYP mode not available\n"); |
1058 | return -ENODEV; | 1048 | return -ENODEV; |
1059 | } | 1049 | } |
1060 | 1050 | ||
1061 | for_each_online_cpu(cpu) { | 1051 | for_each_online_cpu(cpu) { |
1062 | smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1); | 1052 | smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1); |
1063 | if (ret < 0) { | 1053 | if (ret < 0) { |
1064 | kvm_err("Error, CPU %d not supported!\n", cpu); | 1054 | kvm_err("Error, CPU %d not supported!\n", cpu); |
1065 | return -ENODEV; | 1055 | return -ENODEV; |
1066 | } | 1056 | } |
1067 | } | 1057 | } |
1068 | 1058 | ||
1069 | cpu_notifier_register_begin(); | 1059 | cpu_notifier_register_begin(); |
1070 | 1060 | ||
1071 | err = init_hyp_mode(); | 1061 | err = init_hyp_mode(); |
1072 | if (err) | 1062 | if (err) |
1073 | goto out_err; | 1063 | goto out_err; |
1074 | 1064 | ||
1075 | err = __register_cpu_notifier(&hyp_init_cpu_nb); | 1065 | err = __register_cpu_notifier(&hyp_init_cpu_nb); |
1076 | if (err) { | 1066 | if (err) { |
1077 | kvm_err("Cannot register HYP init CPU notifier (%d)\n", err); | 1067 | kvm_err("Cannot register HYP init CPU notifier (%d)\n", err); |
1078 | goto out_err; | 1068 | goto out_err; |
1079 | } | 1069 | } |
1080 | 1070 | ||
1081 | cpu_notifier_register_done(); | 1071 | cpu_notifier_register_done(); |
1082 | 1072 | ||
1083 | hyp_cpu_pm_init(); | 1073 | hyp_cpu_pm_init(); |
1084 | 1074 | ||
1085 | kvm_coproc_table_init(); | 1075 | kvm_coproc_table_init(); |
1086 | return 0; | 1076 | return 0; |
1087 | out_err: | 1077 | out_err: |
1088 | cpu_notifier_register_done(); | 1078 | cpu_notifier_register_done(); |
1089 | return err; | 1079 | return err; |
1090 | } | 1080 | } |
1091 | 1081 | ||
1092 | /* NOP: Compiling as a module not supported */ | 1082 | /* NOP: Compiling as a module not supported */ |
1093 | void kvm_arch_exit(void) | 1083 | void kvm_arch_exit(void) |
1094 | { | 1084 | { |
1095 | kvm_perf_teardown(); | 1085 | kvm_perf_teardown(); |
1096 | } | 1086 | } |
1097 | 1087 | ||
1098 | static int arm_init(void) | 1088 | static int arm_init(void) |
1099 | { | 1089 | { |
1100 | int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); | 1090 | int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); |
1101 | return rc; | 1091 | return rc; |
1102 | } | 1092 | } |
1103 | 1093 | ||
1104 | module_init(arm_init); | 1094 | module_init(arm_init); |
1105 | 1095 |
arch/arm/kvm/coproc.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | 2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
3 | * Authors: Rusty Russell <rusty@rustcorp.com.au> | 3 | * Authors: Rusty Russell <rusty@rustcorp.com.au> |
4 | * Christoffer Dall <c.dall@virtualopensystems.com> | 4 | * Christoffer Dall <c.dall@virtualopensystems.com> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License, version 2, as | 7 | * it under the terms of the GNU General Public License, version 2, as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | * | 9 | * |
10 | * This program is distributed in the hope that it will be useful, | 10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. | 13 | * GNU General Public License for more details. |
14 | * | 14 | * |
15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software | 16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 17 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
18 | */ | 18 | */ |
19 | #include <linux/mm.h> | 19 | #include <linux/mm.h> |
20 | #include <linux/kvm_host.h> | 20 | #include <linux/kvm_host.h> |
21 | #include <linux/uaccess.h> | 21 | #include <linux/uaccess.h> |
22 | #include <asm/kvm_arm.h> | 22 | #include <asm/kvm_arm.h> |
23 | #include <asm/kvm_host.h> | 23 | #include <asm/kvm_host.h> |
24 | #include <asm/kvm_emulate.h> | 24 | #include <asm/kvm_emulate.h> |
25 | #include <asm/kvm_coproc.h> | 25 | #include <asm/kvm_coproc.h> |
26 | #include <asm/kvm_mmu.h> | 26 | #include <asm/kvm_mmu.h> |
27 | #include <asm/cacheflush.h> | 27 | #include <asm/cacheflush.h> |
28 | #include <asm/cputype.h> | 28 | #include <asm/cputype.h> |
29 | #include <trace/events/kvm.h> | 29 | #include <trace/events/kvm.h> |
30 | #include <asm/vfp.h> | 30 | #include <asm/vfp.h> |
31 | #include "../vfp/vfpinstr.h" | 31 | #include "../vfp/vfpinstr.h" |
32 | 32 | ||
33 | #include "trace.h" | 33 | #include "trace.h" |
34 | #include "coproc.h" | 34 | #include "coproc.h" |
35 | 35 | ||
36 | 36 | ||
37 | /****************************************************************************** | 37 | /****************************************************************************** |
38 | * Co-processor emulation | 38 | * Co-processor emulation |
39 | *****************************************************************************/ | 39 | *****************************************************************************/ |
40 | 40 | ||
41 | /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ | 41 | /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ |
42 | static u32 cache_levels; | 42 | static u32 cache_levels; |
43 | 43 | ||
44 | /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ | 44 | /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ |
45 | #define CSSELR_MAX 12 | 45 | #define CSSELR_MAX 12 |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * kvm_vcpu_arch.cp15 holds cp15 registers as an array of u32, but some | 48 | * kvm_vcpu_arch.cp15 holds cp15 registers as an array of u32, but some |
49 | * of cp15 registers can be viewed either as couple of two u32 registers | 49 | * of cp15 registers can be viewed either as couple of two u32 registers |
50 | * or one u64 register. Current u64 register encoding is that least | 50 | * or one u64 register. Current u64 register encoding is that least |
51 | * significant u32 word is followed by most significant u32 word. | 51 | * significant u32 word is followed by most significant u32 word. |
52 | */ | 52 | */ |
53 | static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu, | 53 | static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu, |
54 | const struct coproc_reg *r, | 54 | const struct coproc_reg *r, |
55 | u64 val) | 55 | u64 val) |
56 | { | 56 | { |
57 | vcpu->arch.cp15[r->reg] = val & 0xffffffff; | 57 | vcpu->arch.cp15[r->reg] = val & 0xffffffff; |
58 | vcpu->arch.cp15[r->reg + 1] = val >> 32; | 58 | vcpu->arch.cp15[r->reg + 1] = val >> 32; |
59 | } | 59 | } |
60 | 60 | ||
61 | static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu, | 61 | static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu, |
62 | const struct coproc_reg *r) | 62 | const struct coproc_reg *r) |
63 | { | 63 | { |
64 | u64 val; | 64 | u64 val; |
65 | 65 | ||
66 | val = vcpu->arch.cp15[r->reg + 1]; | 66 | val = vcpu->arch.cp15[r->reg + 1]; |
67 | val = val << 32; | 67 | val = val << 32; |
68 | val = val | vcpu->arch.cp15[r->reg]; | 68 | val = val | vcpu->arch.cp15[r->reg]; |
69 | return val; | 69 | return val; |
70 | } | 70 | } |
71 | 71 | ||
72 | int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run) | 72 | int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run) |
73 | { | 73 | { |
74 | kvm_inject_undefined(vcpu); | 74 | kvm_inject_undefined(vcpu); |
75 | return 1; | 75 | return 1; |
76 | } | 76 | } |
77 | 77 | ||
78 | int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run) | 78 | int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run) |
79 | { | 79 | { |
80 | /* | 80 | /* |
81 | * We can get here, if the host has been built without VFPv3 support, | 81 | * We can get here, if the host has been built without VFPv3 support, |
82 | * but the guest attempted a floating point operation. | 82 | * but the guest attempted a floating point operation. |
83 | */ | 83 | */ |
84 | kvm_inject_undefined(vcpu); | 84 | kvm_inject_undefined(vcpu); |
85 | return 1; | 85 | return 1; |
86 | } | 86 | } |
87 | 87 | ||
88 | int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) | 88 | int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) |
89 | { | 89 | { |
90 | kvm_inject_undefined(vcpu); | 90 | kvm_inject_undefined(vcpu); |
91 | return 1; | 91 | return 1; |
92 | } | 92 | } |
93 | 93 | ||
94 | int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) | 94 | int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) |
95 | { | 95 | { |
96 | kvm_inject_undefined(vcpu); | 96 | kvm_inject_undefined(vcpu); |
97 | return 1; | 97 | return 1; |
98 | } | 98 | } |
99 | 99 | ||
100 | static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) | 100 | static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) |
101 | { | 101 | { |
102 | /* | 102 | /* |
103 | * Compute guest MPIDR. We build a virtual cluster out of the | 103 | * Compute guest MPIDR. We build a virtual cluster out of the |
104 | * vcpu_id, but we read the 'U' bit from the underlying | 104 | * vcpu_id, but we read the 'U' bit from the underlying |
105 | * hardware directly. | 105 | * hardware directly. |
106 | */ | 106 | */ |
107 | vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | | 107 | vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | |
108 | ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) | | 108 | ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) | |
109 | (vcpu->vcpu_id & 3)); | 109 | (vcpu->vcpu_id & 3)); |
110 | } | 110 | } |
111 | 111 | ||
112 | /* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */ | 112 | /* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */ |
113 | static bool access_actlr(struct kvm_vcpu *vcpu, | 113 | static bool access_actlr(struct kvm_vcpu *vcpu, |
114 | const struct coproc_params *p, | 114 | const struct coproc_params *p, |
115 | const struct coproc_reg *r) | 115 | const struct coproc_reg *r) |
116 | { | 116 | { |
117 | if (p->is_write) | 117 | if (p->is_write) |
118 | return ignore_write(vcpu, p); | 118 | return ignore_write(vcpu, p); |
119 | 119 | ||
120 | *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR]; | 120 | *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR]; |
121 | return true; | 121 | return true; |
122 | } | 122 | } |
123 | 123 | ||
124 | /* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */ | 124 | /* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */ |
125 | static bool access_cbar(struct kvm_vcpu *vcpu, | 125 | static bool access_cbar(struct kvm_vcpu *vcpu, |
126 | const struct coproc_params *p, | 126 | const struct coproc_params *p, |
127 | const struct coproc_reg *r) | 127 | const struct coproc_reg *r) |
128 | { | 128 | { |
129 | if (p->is_write) | 129 | if (p->is_write) |
130 | return write_to_read_only(vcpu, p); | 130 | return write_to_read_only(vcpu, p); |
131 | return read_zero(vcpu, p); | 131 | return read_zero(vcpu, p); |
132 | } | 132 | } |
133 | 133 | ||
134 | /* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */ | 134 | /* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */ |
135 | static bool access_l2ctlr(struct kvm_vcpu *vcpu, | 135 | static bool access_l2ctlr(struct kvm_vcpu *vcpu, |
136 | const struct coproc_params *p, | 136 | const struct coproc_params *p, |
137 | const struct coproc_reg *r) | 137 | const struct coproc_reg *r) |
138 | { | 138 | { |
139 | if (p->is_write) | 139 | if (p->is_write) |
140 | return ignore_write(vcpu, p); | 140 | return ignore_write(vcpu, p); |
141 | 141 | ||
142 | *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR]; | 142 | *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR]; |
143 | return true; | 143 | return true; |
144 | } | 144 | } |
145 | 145 | ||
146 | static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) | 146 | static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) |
147 | { | 147 | { |
148 | u32 l2ctlr, ncores; | 148 | u32 l2ctlr, ncores; |
149 | 149 | ||
150 | asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr)); | 150 | asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr)); |
151 | l2ctlr &= ~(3 << 24); | 151 | l2ctlr &= ~(3 << 24); |
152 | ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1; | 152 | ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1; |
153 | /* How many cores in the current cluster and the next ones */ | 153 | /* How many cores in the current cluster and the next ones */ |
154 | ncores -= (vcpu->vcpu_id & ~3); | 154 | ncores -= (vcpu->vcpu_id & ~3); |
155 | /* Cap it to the maximum number of cores in a single cluster */ | 155 | /* Cap it to the maximum number of cores in a single cluster */ |
156 | ncores = min(ncores, 3U); | 156 | ncores = min(ncores, 3U); |
157 | l2ctlr |= (ncores & 3) << 24; | 157 | l2ctlr |= (ncores & 3) << 24; |
158 | 158 | ||
159 | vcpu->arch.cp15[c9_L2CTLR] = l2ctlr; | 159 | vcpu->arch.cp15[c9_L2CTLR] = l2ctlr; |
160 | } | 160 | } |
161 | 161 | ||
162 | static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) | 162 | static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) |
163 | { | 163 | { |
164 | u32 actlr; | 164 | u32 actlr; |
165 | 165 | ||
166 | /* ACTLR contains SMP bit: make sure you create all cpus first! */ | 166 | /* ACTLR contains SMP bit: make sure you create all cpus first! */ |
167 | asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr)); | 167 | asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr)); |
168 | /* Make the SMP bit consistent with the guest configuration */ | 168 | /* Make the SMP bit consistent with the guest configuration */ |
169 | if (atomic_read(&vcpu->kvm->online_vcpus) > 1) | 169 | if (atomic_read(&vcpu->kvm->online_vcpus) > 1) |
170 | actlr |= 1U << 6; | 170 | actlr |= 1U << 6; |
171 | else | 171 | else |
172 | actlr &= ~(1U << 6); | 172 | actlr &= ~(1U << 6); |
173 | 173 | ||
174 | vcpu->arch.cp15[c1_ACTLR] = actlr; | 174 | vcpu->arch.cp15[c1_ACTLR] = actlr; |
175 | } | 175 | } |
176 | 176 | ||
177 | /* | 177 | /* |
178 | * TRM entries: A7:4.3.50, A15:4.3.49 | 178 | * TRM entries: A7:4.3.50, A15:4.3.49 |
179 | * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). | 179 | * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). |
180 | */ | 180 | */ |
181 | static bool access_l2ectlr(struct kvm_vcpu *vcpu, | 181 | static bool access_l2ectlr(struct kvm_vcpu *vcpu, |
182 | const struct coproc_params *p, | 182 | const struct coproc_params *p, |
183 | const struct coproc_reg *r) | 183 | const struct coproc_reg *r) |
184 | { | 184 | { |
185 | if (p->is_write) | 185 | if (p->is_write) |
186 | return ignore_write(vcpu, p); | 186 | return ignore_write(vcpu, p); |
187 | 187 | ||
188 | *vcpu_reg(vcpu, p->Rt1) = 0; | 188 | *vcpu_reg(vcpu, p->Rt1) = 0; |
189 | return true; | 189 | return true; |
190 | } | 190 | } |
191 | 191 | ||
192 | /* See note at ARM ARM B1.14.4 */ | 192 | /* |
193 | * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). | ||
194 | */ | ||
193 | static bool access_dcsw(struct kvm_vcpu *vcpu, | 195 | static bool access_dcsw(struct kvm_vcpu *vcpu, |
194 | const struct coproc_params *p, | 196 | const struct coproc_params *p, |
195 | const struct coproc_reg *r) | 197 | const struct coproc_reg *r) |
196 | { | 198 | { |
197 | unsigned long val; | ||
198 | int cpu; | ||
199 | |||
200 | if (!p->is_write) | 199 | if (!p->is_write) |
201 | return read_from_write_only(vcpu, p); | 200 | return read_from_write_only(vcpu, p); |
202 | 201 | ||
203 | cpu = get_cpu(); | 202 | kvm_set_way_flush(vcpu); |
204 | |||
205 | cpumask_setall(&vcpu->arch.require_dcache_flush); | ||
206 | cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); | ||
207 | |||
208 | /* If we were already preempted, take the long way around */ | ||
209 | if (cpu != vcpu->arch.last_pcpu) { | ||
210 | flush_cache_all(); | ||
211 | goto done; | ||
212 | } | ||
213 | |||
214 | val = *vcpu_reg(vcpu, p->Rt1); | ||
215 | |||
216 | switch (p->CRm) { | ||
217 | case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ | ||
218 | case 14: /* DCCISW */ | ||
219 | asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val)); | ||
220 | break; | ||
221 | |||
222 | case 10: /* DCCSW */ | ||
223 | asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val)); | ||
224 | break; | ||
225 | } | ||
226 | |||
227 | done: | ||
228 | put_cpu(); | ||
229 | |||
230 | return true; | 203 | return true; |
231 | } | 204 | } |
232 | 205 | ||
233 | /* | 206 | /* |
234 | * Generic accessor for VM registers. Only called as long as HCR_TVM | 207 | * Generic accessor for VM registers. Only called as long as HCR_TVM |
235 | * is set. | 208 | * is set. If the guest enables the MMU, we stop trapping the VM |
209 | * sys_regs and leave it in complete control of the caches. | ||
210 | * | ||
211 | * Used by the cpu-specific code. | ||
236 | */ | 212 | */ |
237 | static bool access_vm_reg(struct kvm_vcpu *vcpu, | 213 | bool access_vm_reg(struct kvm_vcpu *vcpu, |
238 | const struct coproc_params *p, | 214 | const struct coproc_params *p, |
239 | const struct coproc_reg *r) | 215 | const struct coproc_reg *r) |
240 | { | 216 | { |
217 | bool was_enabled = vcpu_has_cache_enabled(vcpu); | ||
218 | |||
241 | BUG_ON(!p->is_write); | 219 | BUG_ON(!p->is_write); |
242 | 220 | ||
243 | vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1); | 221 | vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1); |
244 | if (p->is_64bit) | 222 | if (p->is_64bit) |
245 | vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2); | 223 | vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2); |
246 | 224 | ||
247 | return true; | 225 | kvm_toggle_cache(vcpu, was_enabled); |
248 | } | ||
249 | |||
250 | /* | ||
251 | * SCTLR accessor. Only called as long as HCR_TVM is set. If the | ||
252 | * guest enables the MMU, we stop trapping the VM sys_regs and leave | ||
253 | * it in complete control of the caches. | ||
254 | * | ||
255 | * Used by the cpu-specific code. | ||
256 | */ | ||
257 | bool access_sctlr(struct kvm_vcpu *vcpu, | ||
258 | const struct coproc_params *p, | ||
259 | const struct coproc_reg *r) | ||
260 | { | ||
261 | access_vm_reg(vcpu, p, r); | ||
262 | |||
263 | if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */ | ||
264 | vcpu->arch.hcr &= ~HCR_TVM; | ||
265 | stage2_flush_vm(vcpu->kvm); | ||
266 | } | ||
267 | |||
268 | return true; | 226 | return true; |
269 | } | 227 | } |
270 | 228 | ||
271 | /* | 229 | /* |
272 | * We could trap ID_DFR0 and tell the guest we don't support performance | 230 | * We could trap ID_DFR0 and tell the guest we don't support performance |
273 | * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was | 231 | * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was |
274 | * NAKed, so it will read the PMCR anyway. | 232 | * NAKed, so it will read the PMCR anyway. |
275 | * | 233 | * |
276 | * Therefore we tell the guest we have 0 counters. Unfortunately, we | 234 | * Therefore we tell the guest we have 0 counters. Unfortunately, we |
277 | * must always support PMCCNTR (the cycle counter): we just RAZ/WI for | 235 | * must always support PMCCNTR (the cycle counter): we just RAZ/WI for |
278 | * all PM registers, which doesn't crash the guest kernel at least. | 236 | * all PM registers, which doesn't crash the guest kernel at least. |
279 | */ | 237 | */ |
280 | static bool pm_fake(struct kvm_vcpu *vcpu, | 238 | static bool pm_fake(struct kvm_vcpu *vcpu, |
281 | const struct coproc_params *p, | 239 | const struct coproc_params *p, |
282 | const struct coproc_reg *r) | 240 | const struct coproc_reg *r) |
283 | { | 241 | { |
284 | if (p->is_write) | 242 | if (p->is_write) |
285 | return ignore_write(vcpu, p); | 243 | return ignore_write(vcpu, p); |
286 | else | 244 | else |
287 | return read_zero(vcpu, p); | 245 | return read_zero(vcpu, p); |
288 | } | 246 | } |
289 | 247 | ||
290 | #define access_pmcr pm_fake | 248 | #define access_pmcr pm_fake |
291 | #define access_pmcntenset pm_fake | 249 | #define access_pmcntenset pm_fake |
292 | #define access_pmcntenclr pm_fake | 250 | #define access_pmcntenclr pm_fake |
293 | #define access_pmovsr pm_fake | 251 | #define access_pmovsr pm_fake |
294 | #define access_pmselr pm_fake | 252 | #define access_pmselr pm_fake |
295 | #define access_pmceid0 pm_fake | 253 | #define access_pmceid0 pm_fake |
296 | #define access_pmceid1 pm_fake | 254 | #define access_pmceid1 pm_fake |
297 | #define access_pmccntr pm_fake | 255 | #define access_pmccntr pm_fake |
298 | #define access_pmxevtyper pm_fake | 256 | #define access_pmxevtyper pm_fake |
299 | #define access_pmxevcntr pm_fake | 257 | #define access_pmxevcntr pm_fake |
300 | #define access_pmuserenr pm_fake | 258 | #define access_pmuserenr pm_fake |
301 | #define access_pmintenset pm_fake | 259 | #define access_pmintenset pm_fake |
302 | #define access_pmintenclr pm_fake | 260 | #define access_pmintenclr pm_fake |
303 | 261 | ||
304 | /* Architected CP15 registers. | 262 | /* Architected CP15 registers. |
305 | * CRn denotes the primary register number, but is copied to the CRm in the | 263 | * CRn denotes the primary register number, but is copied to the CRm in the |
306 | * user space API for 64-bit register access in line with the terminology used | 264 | * user space API for 64-bit register access in line with the terminology used |
307 | * in the ARM ARM. | 265 | * in the ARM ARM. |
308 | * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit | 266 | * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit |
309 | * registers preceding 32-bit ones. | 267 | * registers preceding 32-bit ones. |
310 | */ | 268 | */ |
311 | static const struct coproc_reg cp15_regs[] = { | 269 | static const struct coproc_reg cp15_regs[] = { |
312 | /* MPIDR: we use VMPIDR for guest access. */ | 270 | /* MPIDR: we use VMPIDR for guest access. */ |
313 | { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32, | 271 | { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32, |
314 | NULL, reset_mpidr, c0_MPIDR }, | 272 | NULL, reset_mpidr, c0_MPIDR }, |
315 | 273 | ||
316 | /* CSSELR: swapped by interrupt.S. */ | 274 | /* CSSELR: swapped by interrupt.S. */ |
317 | { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32, | 275 | { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32, |
318 | NULL, reset_unknown, c0_CSSELR }, | 276 | NULL, reset_unknown, c0_CSSELR }, |
319 | 277 | ||
320 | /* ACTLR: trapped by HCR.TAC bit. */ | 278 | /* ACTLR: trapped by HCR.TAC bit. */ |
321 | { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32, | 279 | { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32, |
322 | access_actlr, reset_actlr, c1_ACTLR }, | 280 | access_actlr, reset_actlr, c1_ACTLR }, |
323 | 281 | ||
324 | /* CPACR: swapped by interrupt.S. */ | 282 | /* CPACR: swapped by interrupt.S. */ |
325 | { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32, | 283 | { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32, |
326 | NULL, reset_val, c1_CPACR, 0x00000000 }, | 284 | NULL, reset_val, c1_CPACR, 0x00000000 }, |
327 | 285 | ||
328 | /* TTBR0/TTBR1/TTBCR: swapped by interrupt.S. */ | 286 | /* TTBR0/TTBR1/TTBCR: swapped by interrupt.S. */ |
329 | { CRm64( 2), Op1( 0), is64, access_vm_reg, reset_unknown64, c2_TTBR0 }, | 287 | { CRm64( 2), Op1( 0), is64, access_vm_reg, reset_unknown64, c2_TTBR0 }, |
330 | { CRn(2), CRm( 0), Op1( 0), Op2( 0), is32, | 288 | { CRn(2), CRm( 0), Op1( 0), Op2( 0), is32, |
331 | access_vm_reg, reset_unknown, c2_TTBR0 }, | 289 | access_vm_reg, reset_unknown, c2_TTBR0 }, |
332 | { CRn(2), CRm( 0), Op1( 0), Op2( 1), is32, | 290 | { CRn(2), CRm( 0), Op1( 0), Op2( 1), is32, |
333 | access_vm_reg, reset_unknown, c2_TTBR1 }, | 291 | access_vm_reg, reset_unknown, c2_TTBR1 }, |
334 | { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, | 292 | { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, |
335 | access_vm_reg, reset_val, c2_TTBCR, 0x00000000 }, | 293 | access_vm_reg, reset_val, c2_TTBCR, 0x00000000 }, |
336 | { CRm64( 2), Op1( 1), is64, access_vm_reg, reset_unknown64, c2_TTBR1 }, | 294 | { CRm64( 2), Op1( 1), is64, access_vm_reg, reset_unknown64, c2_TTBR1 }, |
337 | 295 | ||
338 | 296 | ||
339 | /* DACR: swapped by interrupt.S. */ | 297 | /* DACR: swapped by interrupt.S. */ |
340 | { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32, | 298 | { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32, |
341 | access_vm_reg, reset_unknown, c3_DACR }, | 299 | access_vm_reg, reset_unknown, c3_DACR }, |
342 | 300 | ||
343 | /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */ | 301 | /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */ |
344 | { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32, | 302 | { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32, |
345 | access_vm_reg, reset_unknown, c5_DFSR }, | 303 | access_vm_reg, reset_unknown, c5_DFSR }, |
346 | { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32, | 304 | { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32, |
347 | access_vm_reg, reset_unknown, c5_IFSR }, | 305 | access_vm_reg, reset_unknown, c5_IFSR }, |
348 | { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32, | 306 | { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32, |
349 | access_vm_reg, reset_unknown, c5_ADFSR }, | 307 | access_vm_reg, reset_unknown, c5_ADFSR }, |
350 | { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32, | 308 | { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32, |
351 | access_vm_reg, reset_unknown, c5_AIFSR }, | 309 | access_vm_reg, reset_unknown, c5_AIFSR }, |
352 | 310 | ||
353 | /* DFAR/IFAR: swapped by interrupt.S. */ | 311 | /* DFAR/IFAR: swapped by interrupt.S. */ |
354 | { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32, | 312 | { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32, |
355 | access_vm_reg, reset_unknown, c6_DFAR }, | 313 | access_vm_reg, reset_unknown, c6_DFAR }, |
356 | { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32, | 314 | { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32, |
357 | access_vm_reg, reset_unknown, c6_IFAR }, | 315 | access_vm_reg, reset_unknown, c6_IFAR }, |
358 | 316 | ||
359 | /* PAR swapped by interrupt.S */ | 317 | /* PAR swapped by interrupt.S */ |
360 | { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, | 318 | { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, |
361 | 319 | ||
362 | /* | 320 | /* |
363 | * DC{C,I,CI}SW operations: | 321 | * DC{C,I,CI}SW operations: |
364 | */ | 322 | */ |
365 | { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw}, | 323 | { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw}, |
366 | { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw}, | 324 | { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw}, |
367 | { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw}, | 325 | { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw}, |
368 | /* | 326 | /* |
369 | * L2CTLR access (guest wants to know #CPUs). | 327 | * L2CTLR access (guest wants to know #CPUs). |
370 | */ | 328 | */ |
371 | { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32, | 329 | { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32, |
372 | access_l2ctlr, reset_l2ctlr, c9_L2CTLR }, | 330 | access_l2ctlr, reset_l2ctlr, c9_L2CTLR }, |
373 | { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr}, | 331 | { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr}, |
374 | 332 | ||
375 | /* | 333 | /* |
376 | * Dummy performance monitor implementation. | 334 | * Dummy performance monitor implementation. |
377 | */ | 335 | */ |
378 | { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr}, | 336 | { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr}, |
379 | { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset}, | 337 | { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset}, |
380 | { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr}, | 338 | { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr}, |
381 | { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr}, | 339 | { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr}, |
382 | { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr}, | 340 | { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr}, |
383 | { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0}, | 341 | { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0}, |
384 | { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1}, | 342 | { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1}, |
385 | { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr}, | 343 | { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr}, |
386 | { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper}, | 344 | { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper}, |
387 | { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr}, | 345 | { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr}, |
388 | { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr}, | 346 | { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr}, |
389 | { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset}, | 347 | { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset}, |
390 | { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr}, | 348 | { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr}, |
391 | 349 | ||
392 | /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */ | 350 | /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */ |
393 | { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32, | 351 | { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32, |
394 | access_vm_reg, reset_unknown, c10_PRRR}, | 352 | access_vm_reg, reset_unknown, c10_PRRR}, |
395 | { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32, | 353 | { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32, |
396 | access_vm_reg, reset_unknown, c10_NMRR}, | 354 | access_vm_reg, reset_unknown, c10_NMRR}, |
397 | 355 | ||
398 | /* AMAIR0/AMAIR1: swapped by interrupt.S. */ | 356 | /* AMAIR0/AMAIR1: swapped by interrupt.S. */ |
399 | { CRn(10), CRm( 3), Op1( 0), Op2( 0), is32, | 357 | { CRn(10), CRm( 3), Op1( 0), Op2( 0), is32, |
400 | access_vm_reg, reset_unknown, c10_AMAIR0}, | 358 | access_vm_reg, reset_unknown, c10_AMAIR0}, |
401 | { CRn(10), CRm( 3), Op1( 0), Op2( 1), is32, | 359 | { CRn(10), CRm( 3), Op1( 0), Op2( 1), is32, |
402 | access_vm_reg, reset_unknown, c10_AMAIR1}, | 360 | access_vm_reg, reset_unknown, c10_AMAIR1}, |
403 | 361 | ||
404 | /* VBAR: swapped by interrupt.S. */ | 362 | /* VBAR: swapped by interrupt.S. */ |
405 | { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32, | 363 | { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32, |
406 | NULL, reset_val, c12_VBAR, 0x00000000 }, | 364 | NULL, reset_val, c12_VBAR, 0x00000000 }, |
407 | 365 | ||
408 | /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */ | 366 | /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */ |
409 | { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32, | 367 | { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32, |
410 | access_vm_reg, reset_val, c13_CID, 0x00000000 }, | 368 | access_vm_reg, reset_val, c13_CID, 0x00000000 }, |
411 | { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32, | 369 | { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32, |
412 | NULL, reset_unknown, c13_TID_URW }, | 370 | NULL, reset_unknown, c13_TID_URW }, |
413 | { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32, | 371 | { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32, |
414 | NULL, reset_unknown, c13_TID_URO }, | 372 | NULL, reset_unknown, c13_TID_URO }, |
415 | { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32, | 373 | { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32, |
416 | NULL, reset_unknown, c13_TID_PRIV }, | 374 | NULL, reset_unknown, c13_TID_PRIV }, |
417 | 375 | ||
418 | /* CNTKCTL: swapped by interrupt.S. */ | 376 | /* CNTKCTL: swapped by interrupt.S. */ |
419 | { CRn(14), CRm( 1), Op1( 0), Op2( 0), is32, | 377 | { CRn(14), CRm( 1), Op1( 0), Op2( 0), is32, |
420 | NULL, reset_val, c14_CNTKCTL, 0x00000000 }, | 378 | NULL, reset_val, c14_CNTKCTL, 0x00000000 }, |
421 | 379 | ||
422 | /* The Configuration Base Address Register. */ | 380 | /* The Configuration Base Address Register. */ |
423 | { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar}, | 381 | { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar}, |
424 | }; | 382 | }; |
425 | 383 | ||
426 | /* Target specific emulation tables */ | 384 | /* Target specific emulation tables */ |
427 | static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS]; | 385 | static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS]; |
428 | 386 | ||
429 | void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table) | 387 | void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table) |
430 | { | 388 | { |
431 | unsigned int i; | 389 | unsigned int i; |
432 | 390 | ||
433 | for (i = 1; i < table->num; i++) | 391 | for (i = 1; i < table->num; i++) |
434 | BUG_ON(cmp_reg(&table->table[i-1], | 392 | BUG_ON(cmp_reg(&table->table[i-1], |
435 | &table->table[i]) >= 0); | 393 | &table->table[i]) >= 0); |
436 | 394 | ||
437 | target_tables[table->target] = table; | 395 | target_tables[table->target] = table; |
438 | } | 396 | } |
439 | 397 | ||
440 | /* Get specific register table for this target. */ | 398 | /* Get specific register table for this target. */ |
441 | static const struct coproc_reg *get_target_table(unsigned target, size_t *num) | 399 | static const struct coproc_reg *get_target_table(unsigned target, size_t *num) |
442 | { | 400 | { |
443 | struct kvm_coproc_target_table *table; | 401 | struct kvm_coproc_target_table *table; |
444 | 402 | ||
445 | table = target_tables[target]; | 403 | table = target_tables[target]; |
446 | *num = table->num; | 404 | *num = table->num; |
447 | return table->table; | 405 | return table->table; |
448 | } | 406 | } |
449 | 407 | ||
450 | static const struct coproc_reg *find_reg(const struct coproc_params *params, | 408 | static const struct coproc_reg *find_reg(const struct coproc_params *params, |
451 | const struct coproc_reg table[], | 409 | const struct coproc_reg table[], |
452 | unsigned int num) | 410 | unsigned int num) |
453 | { | 411 | { |
454 | unsigned int i; | 412 | unsigned int i; |
455 | 413 | ||
456 | for (i = 0; i < num; i++) { | 414 | for (i = 0; i < num; i++) { |
457 | const struct coproc_reg *r = &table[i]; | 415 | const struct coproc_reg *r = &table[i]; |
458 | 416 | ||
459 | if (params->is_64bit != r->is_64) | 417 | if (params->is_64bit != r->is_64) |
460 | continue; | 418 | continue; |
461 | if (params->CRn != r->CRn) | 419 | if (params->CRn != r->CRn) |
462 | continue; | 420 | continue; |
463 | if (params->CRm != r->CRm) | 421 | if (params->CRm != r->CRm) |
464 | continue; | 422 | continue; |
465 | if (params->Op1 != r->Op1) | 423 | if (params->Op1 != r->Op1) |
466 | continue; | 424 | continue; |
467 | if (params->Op2 != r->Op2) | 425 | if (params->Op2 != r->Op2) |
468 | continue; | 426 | continue; |
469 | 427 | ||
470 | return r; | 428 | return r; |
471 | } | 429 | } |
472 | return NULL; | 430 | return NULL; |
473 | } | 431 | } |
474 | 432 | ||
475 | static int emulate_cp15(struct kvm_vcpu *vcpu, | 433 | static int emulate_cp15(struct kvm_vcpu *vcpu, |
476 | const struct coproc_params *params) | 434 | const struct coproc_params *params) |
477 | { | 435 | { |
478 | size_t num; | 436 | size_t num; |
479 | const struct coproc_reg *table, *r; | 437 | const struct coproc_reg *table, *r; |
480 | 438 | ||
481 | trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn, | 439 | trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn, |
482 | params->CRm, params->Op2, params->is_write); | 440 | params->CRm, params->Op2, params->is_write); |
483 | 441 | ||
484 | table = get_target_table(vcpu->arch.target, &num); | 442 | table = get_target_table(vcpu->arch.target, &num); |
485 | 443 | ||
486 | /* Search target-specific then generic table. */ | 444 | /* Search target-specific then generic table. */ |
487 | r = find_reg(params, table, num); | 445 | r = find_reg(params, table, num); |
488 | if (!r) | 446 | if (!r) |
489 | r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs)); | 447 | r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs)); |
490 | 448 | ||
491 | if (likely(r)) { | 449 | if (likely(r)) { |
492 | /* If we don't have an accessor, we should never get here! */ | 450 | /* If we don't have an accessor, we should never get here! */ |
493 | BUG_ON(!r->access); | 451 | BUG_ON(!r->access); |
494 | 452 | ||
495 | if (likely(r->access(vcpu, params, r))) { | 453 | if (likely(r->access(vcpu, params, r))) { |
496 | /* Skip instruction, since it was emulated */ | 454 | /* Skip instruction, since it was emulated */ |
497 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | 455 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); |
498 | return 1; | 456 | return 1; |
499 | } | 457 | } |
500 | /* If access function fails, it should complain. */ | 458 | /* If access function fails, it should complain. */ |
501 | } else { | 459 | } else { |
502 | kvm_err("Unsupported guest CP15 access at: %08lx\n", | 460 | kvm_err("Unsupported guest CP15 access at: %08lx\n", |
503 | *vcpu_pc(vcpu)); | 461 | *vcpu_pc(vcpu)); |
504 | print_cp_instr(params); | 462 | print_cp_instr(params); |
505 | } | 463 | } |
506 | kvm_inject_undefined(vcpu); | 464 | kvm_inject_undefined(vcpu); |
507 | return 1; | 465 | return 1; |
508 | } | 466 | } |
509 | 467 | ||
510 | /** | 468 | /** |
511 | * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access | 469 | * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access |
512 | * @vcpu: The VCPU pointer | 470 | * @vcpu: The VCPU pointer |
513 | * @run: The kvm_run struct | 471 | * @run: The kvm_run struct |
514 | */ | 472 | */ |
515 | int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) | 473 | int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) |
516 | { | 474 | { |
517 | struct coproc_params params; | 475 | struct coproc_params params; |
518 | 476 | ||
519 | params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf; | 477 | params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf; |
520 | params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf; | 478 | params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf; |
521 | params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0); | 479 | params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0); |
522 | params.is_64bit = true; | 480 | params.is_64bit = true; |
523 | 481 | ||
524 | params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf; | 482 | params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf; |
525 | params.Op2 = 0; | 483 | params.Op2 = 0; |
526 | params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; | 484 | params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; |
527 | params.CRm = 0; | 485 | params.CRm = 0; |
528 | 486 | ||
529 | return emulate_cp15(vcpu, ¶ms); | 487 | return emulate_cp15(vcpu, ¶ms); |
530 | } | 488 | } |
531 | 489 | ||
532 | static void reset_coproc_regs(struct kvm_vcpu *vcpu, | 490 | static void reset_coproc_regs(struct kvm_vcpu *vcpu, |
533 | const struct coproc_reg *table, size_t num) | 491 | const struct coproc_reg *table, size_t num) |
534 | { | 492 | { |
535 | unsigned long i; | 493 | unsigned long i; |
536 | 494 | ||
537 | for (i = 0; i < num; i++) | 495 | for (i = 0; i < num; i++) |
538 | if (table[i].reset) | 496 | if (table[i].reset) |
539 | table[i].reset(vcpu, &table[i]); | 497 | table[i].reset(vcpu, &table[i]); |
540 | } | 498 | } |
541 | 499 | ||
542 | /** | 500 | /** |
543 | * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access | 501 | * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access |
544 | * @vcpu: The VCPU pointer | 502 | * @vcpu: The VCPU pointer |
545 | * @run: The kvm_run struct | 503 | * @run: The kvm_run struct |
546 | */ | 504 | */ |
547 | int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) | 505 | int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) |
548 | { | 506 | { |
549 | struct coproc_params params; | 507 | struct coproc_params params; |
550 | 508 | ||
551 | params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf; | 509 | params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf; |
552 | params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf; | 510 | params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf; |
553 | params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0); | 511 | params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0); |
554 | params.is_64bit = false; | 512 | params.is_64bit = false; |
555 | 513 | ||
556 | params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; | 514 | params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; |
557 | params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7; | 515 | params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7; |
558 | params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7; | 516 | params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7; |
559 | params.Rt2 = 0; | 517 | params.Rt2 = 0; |
560 | 518 | ||
561 | return emulate_cp15(vcpu, ¶ms); | 519 | return emulate_cp15(vcpu, ¶ms); |
562 | } | 520 | } |
563 | 521 | ||
564 | /****************************************************************************** | 522 | /****************************************************************************** |
565 | * Userspace API | 523 | * Userspace API |
566 | *****************************************************************************/ | 524 | *****************************************************************************/ |
567 | 525 | ||
568 | static bool index_to_params(u64 id, struct coproc_params *params) | 526 | static bool index_to_params(u64 id, struct coproc_params *params) |
569 | { | 527 | { |
570 | switch (id & KVM_REG_SIZE_MASK) { | 528 | switch (id & KVM_REG_SIZE_MASK) { |
571 | case KVM_REG_SIZE_U32: | 529 | case KVM_REG_SIZE_U32: |
572 | /* Any unused index bits means it's not valid. */ | 530 | /* Any unused index bits means it's not valid. */ |
573 | if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | 531 | if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK |
574 | | KVM_REG_ARM_COPROC_MASK | 532 | | KVM_REG_ARM_COPROC_MASK |
575 | | KVM_REG_ARM_32_CRN_MASK | 533 | | KVM_REG_ARM_32_CRN_MASK |
576 | | KVM_REG_ARM_CRM_MASK | 534 | | KVM_REG_ARM_CRM_MASK |
577 | | KVM_REG_ARM_OPC1_MASK | 535 | | KVM_REG_ARM_OPC1_MASK |
578 | | KVM_REG_ARM_32_OPC2_MASK)) | 536 | | KVM_REG_ARM_32_OPC2_MASK)) |
579 | return false; | 537 | return false; |
580 | 538 | ||
581 | params->is_64bit = false; | 539 | params->is_64bit = false; |
582 | params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK) | 540 | params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK) |
583 | >> KVM_REG_ARM_32_CRN_SHIFT); | 541 | >> KVM_REG_ARM_32_CRN_SHIFT); |
584 | params->CRm = ((id & KVM_REG_ARM_CRM_MASK) | 542 | params->CRm = ((id & KVM_REG_ARM_CRM_MASK) |
585 | >> KVM_REG_ARM_CRM_SHIFT); | 543 | >> KVM_REG_ARM_CRM_SHIFT); |
586 | params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) | 544 | params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) |
587 | >> KVM_REG_ARM_OPC1_SHIFT); | 545 | >> KVM_REG_ARM_OPC1_SHIFT); |
588 | params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK) | 546 | params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK) |
589 | >> KVM_REG_ARM_32_OPC2_SHIFT); | 547 | >> KVM_REG_ARM_32_OPC2_SHIFT); |
590 | return true; | 548 | return true; |
591 | case KVM_REG_SIZE_U64: | 549 | case KVM_REG_SIZE_U64: |
592 | /* Any unused index bits means it's not valid. */ | 550 | /* Any unused index bits means it's not valid. */ |
593 | if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | 551 | if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK |
594 | | KVM_REG_ARM_COPROC_MASK | 552 | | KVM_REG_ARM_COPROC_MASK |
595 | | KVM_REG_ARM_CRM_MASK | 553 | | KVM_REG_ARM_CRM_MASK |
596 | | KVM_REG_ARM_OPC1_MASK)) | 554 | | KVM_REG_ARM_OPC1_MASK)) |
597 | return false; | 555 | return false; |
598 | params->is_64bit = true; | 556 | params->is_64bit = true; |
599 | /* CRm to CRn: see cp15_to_index for details */ | 557 | /* CRm to CRn: see cp15_to_index for details */ |
600 | params->CRn = ((id & KVM_REG_ARM_CRM_MASK) | 558 | params->CRn = ((id & KVM_REG_ARM_CRM_MASK) |
601 | >> KVM_REG_ARM_CRM_SHIFT); | 559 | >> KVM_REG_ARM_CRM_SHIFT); |
602 | params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) | 560 | params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) |
603 | >> KVM_REG_ARM_OPC1_SHIFT); | 561 | >> KVM_REG_ARM_OPC1_SHIFT); |
604 | params->Op2 = 0; | 562 | params->Op2 = 0; |
605 | params->CRm = 0; | 563 | params->CRm = 0; |
606 | return true; | 564 | return true; |
607 | default: | 565 | default: |
608 | return false; | 566 | return false; |
609 | } | 567 | } |
610 | } | 568 | } |
611 | 569 | ||
612 | /* Decode an index value, and find the cp15 coproc_reg entry. */ | 570 | /* Decode an index value, and find the cp15 coproc_reg entry. */ |
613 | static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu, | 571 | static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu, |
614 | u64 id) | 572 | u64 id) |
615 | { | 573 | { |
616 | size_t num; | 574 | size_t num; |
617 | const struct coproc_reg *table, *r; | 575 | const struct coproc_reg *table, *r; |
618 | struct coproc_params params; | 576 | struct coproc_params params; |
619 | 577 | ||
620 | /* We only do cp15 for now. */ | 578 | /* We only do cp15 for now. */ |
621 | if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15) | 579 | if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15) |
622 | return NULL; | 580 | return NULL; |
623 | 581 | ||
624 | if (!index_to_params(id, ¶ms)) | 582 | if (!index_to_params(id, ¶ms)) |
625 | return NULL; | 583 | return NULL; |
626 | 584 | ||
627 | table = get_target_table(vcpu->arch.target, &num); | 585 | table = get_target_table(vcpu->arch.target, &num); |
628 | r = find_reg(¶ms, table, num); | 586 | r = find_reg(¶ms, table, num); |
629 | if (!r) | 587 | if (!r) |
630 | r = find_reg(¶ms, cp15_regs, ARRAY_SIZE(cp15_regs)); | 588 | r = find_reg(¶ms, cp15_regs, ARRAY_SIZE(cp15_regs)); |
631 | 589 | ||
632 | /* Not saved in the cp15 array? */ | 590 | /* Not saved in the cp15 array? */ |
633 | if (r && !r->reg) | 591 | if (r && !r->reg) |
634 | r = NULL; | 592 | r = NULL; |
635 | 593 | ||
636 | return r; | 594 | return r; |
637 | } | 595 | } |
638 | 596 | ||
639 | /* | 597 | /* |
640 | * These are the invariant cp15 registers: we let the guest see the host | 598 | * These are the invariant cp15 registers: we let the guest see the host |
641 | * versions of these, so they're part of the guest state. | 599 | * versions of these, so they're part of the guest state. |
642 | * | 600 | * |
643 | * A future CPU may provide a mechanism to present different values to | 601 | * A future CPU may provide a mechanism to present different values to |
644 | * the guest, or a future kvm may trap them. | 602 | * the guest, or a future kvm may trap them. |
645 | */ | 603 | */ |
646 | /* Unfortunately, there's no register-argument for mrc, so generate. */ | 604 | /* Unfortunately, there's no register-argument for mrc, so generate. */ |
647 | #define FUNCTION_FOR32(crn, crm, op1, op2, name) \ | 605 | #define FUNCTION_FOR32(crn, crm, op1, op2, name) \ |
648 | static void get_##name(struct kvm_vcpu *v, \ | 606 | static void get_##name(struct kvm_vcpu *v, \ |
649 | const struct coproc_reg *r) \ | 607 | const struct coproc_reg *r) \ |
650 | { \ | 608 | { \ |
651 | u32 val; \ | 609 | u32 val; \ |
652 | \ | 610 | \ |
653 | asm volatile("mrc p15, " __stringify(op1) \ | 611 | asm volatile("mrc p15, " __stringify(op1) \ |
654 | ", %0, c" __stringify(crn) \ | 612 | ", %0, c" __stringify(crn) \ |
655 | ", c" __stringify(crm) \ | 613 | ", c" __stringify(crm) \ |
656 | ", " __stringify(op2) "\n" : "=r" (val)); \ | 614 | ", " __stringify(op2) "\n" : "=r" (val)); \ |
657 | ((struct coproc_reg *)r)->val = val; \ | 615 | ((struct coproc_reg *)r)->val = val; \ |
658 | } | 616 | } |
659 | 617 | ||
660 | FUNCTION_FOR32(0, 0, 0, 0, MIDR) | 618 | FUNCTION_FOR32(0, 0, 0, 0, MIDR) |
661 | FUNCTION_FOR32(0, 0, 0, 1, CTR) | 619 | FUNCTION_FOR32(0, 0, 0, 1, CTR) |
662 | FUNCTION_FOR32(0, 0, 0, 2, TCMTR) | 620 | FUNCTION_FOR32(0, 0, 0, 2, TCMTR) |
663 | FUNCTION_FOR32(0, 0, 0, 3, TLBTR) | 621 | FUNCTION_FOR32(0, 0, 0, 3, TLBTR) |
664 | FUNCTION_FOR32(0, 0, 0, 6, REVIDR) | 622 | FUNCTION_FOR32(0, 0, 0, 6, REVIDR) |
665 | FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0) | 623 | FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0) |
666 | FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1) | 624 | FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1) |
667 | FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0) | 625 | FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0) |
668 | FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0) | 626 | FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0) |
669 | FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0) | 627 | FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0) |
670 | FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1) | 628 | FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1) |
671 | FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2) | 629 | FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2) |
672 | FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3) | 630 | FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3) |
673 | FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0) | 631 | FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0) |
674 | FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1) | 632 | FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1) |
675 | FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2) | 633 | FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2) |
676 | FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3) | 634 | FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3) |
677 | FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4) | 635 | FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4) |
678 | FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5) | 636 | FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5) |
679 | FUNCTION_FOR32(0, 0, 1, 1, CLIDR) | 637 | FUNCTION_FOR32(0, 0, 1, 1, CLIDR) |
680 | FUNCTION_FOR32(0, 0, 1, 7, AIDR) | 638 | FUNCTION_FOR32(0, 0, 1, 7, AIDR) |
681 | 639 | ||
682 | /* ->val is filled in by kvm_invariant_coproc_table_init() */ | 640 | /* ->val is filled in by kvm_invariant_coproc_table_init() */ |
683 | static struct coproc_reg invariant_cp15[] = { | 641 | static struct coproc_reg invariant_cp15[] = { |
684 | { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR }, | 642 | { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR }, |
685 | { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR }, | 643 | { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR }, |
686 | { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR }, | 644 | { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR }, |
687 | { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR }, | 645 | { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR }, |
688 | { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR }, | 646 | { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR }, |
689 | 647 | ||
690 | { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 }, | 648 | { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 }, |
691 | { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 }, | 649 | { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 }, |
692 | { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 }, | 650 | { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 }, |
693 | { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 }, | 651 | { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 }, |
694 | { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 }, | 652 | { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 }, |
695 | { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 }, | 653 | { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 }, |
696 | { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 }, | 654 | { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 }, |
697 | { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 }, | 655 | { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 }, |
698 | 656 | ||
699 | { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 }, | 657 | { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 }, |
700 | { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 }, | 658 | { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 }, |
701 | { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 }, | 659 | { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 }, |
702 | { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 }, | 660 | { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 }, |
703 | { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 }, | 661 | { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 }, |
704 | { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 }, | 662 | { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 }, |
705 | 663 | ||
706 | { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR }, | 664 | { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR }, |
707 | { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR }, | 665 | { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR }, |
708 | }; | 666 | }; |
709 | 667 | ||
710 | /* | 668 | /* |
711 | * Reads a register value from a userspace address to a kernel | 669 | * Reads a register value from a userspace address to a kernel |
712 | * variable. Make sure that register size matches sizeof(*__val). | 670 | * variable. Make sure that register size matches sizeof(*__val). |
713 | */ | 671 | */ |
714 | static int reg_from_user(void *val, const void __user *uaddr, u64 id) | 672 | static int reg_from_user(void *val, const void __user *uaddr, u64 id) |
715 | { | 673 | { |
716 | if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) | 674 | if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) |
717 | return -EFAULT; | 675 | return -EFAULT; |
718 | return 0; | 676 | return 0; |
719 | } | 677 | } |
720 | 678 | ||
721 | /* | 679 | /* |
722 | * Writes a register value to a userspace address from a kernel variable. | 680 | * Writes a register value to a userspace address from a kernel variable. |
723 | * Make sure that register size matches sizeof(*__val). | 681 | * Make sure that register size matches sizeof(*__val). |
724 | */ | 682 | */ |
725 | static int reg_to_user(void __user *uaddr, const void *val, u64 id) | 683 | static int reg_to_user(void __user *uaddr, const void *val, u64 id) |
726 | { | 684 | { |
727 | if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) | 685 | if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) |
728 | return -EFAULT; | 686 | return -EFAULT; |
729 | return 0; | 687 | return 0; |
730 | } | 688 | } |
731 | 689 | ||
732 | static int get_invariant_cp15(u64 id, void __user *uaddr) | 690 | static int get_invariant_cp15(u64 id, void __user *uaddr) |
733 | { | 691 | { |
734 | struct coproc_params params; | 692 | struct coproc_params params; |
735 | const struct coproc_reg *r; | 693 | const struct coproc_reg *r; |
736 | int ret; | 694 | int ret; |
737 | 695 | ||
738 | if (!index_to_params(id, ¶ms)) | 696 | if (!index_to_params(id, ¶ms)) |
739 | return -ENOENT; | 697 | return -ENOENT; |
740 | 698 | ||
741 | r = find_reg(¶ms, invariant_cp15, ARRAY_SIZE(invariant_cp15)); | 699 | r = find_reg(¶ms, invariant_cp15, ARRAY_SIZE(invariant_cp15)); |
742 | if (!r) | 700 | if (!r) |
743 | return -ENOENT; | 701 | return -ENOENT; |
744 | 702 | ||
745 | ret = -ENOENT; | 703 | ret = -ENOENT; |
746 | if (KVM_REG_SIZE(id) == 4) { | 704 | if (KVM_REG_SIZE(id) == 4) { |
747 | u32 val = r->val; | 705 | u32 val = r->val; |
748 | 706 | ||
749 | ret = reg_to_user(uaddr, &val, id); | 707 | ret = reg_to_user(uaddr, &val, id); |
750 | } else if (KVM_REG_SIZE(id) == 8) { | 708 | } else if (KVM_REG_SIZE(id) == 8) { |
751 | ret = reg_to_user(uaddr, &r->val, id); | 709 | ret = reg_to_user(uaddr, &r->val, id); |
752 | } | 710 | } |
753 | return ret; | 711 | return ret; |
754 | } | 712 | } |
755 | 713 | ||
756 | static int set_invariant_cp15(u64 id, void __user *uaddr) | 714 | static int set_invariant_cp15(u64 id, void __user *uaddr) |
757 | { | 715 | { |
758 | struct coproc_params params; | 716 | struct coproc_params params; |
759 | const struct coproc_reg *r; | 717 | const struct coproc_reg *r; |
760 | int err; | 718 | int err; |
761 | u64 val; | 719 | u64 val; |
762 | 720 | ||
763 | if (!index_to_params(id, ¶ms)) | 721 | if (!index_to_params(id, ¶ms)) |
764 | return -ENOENT; | 722 | return -ENOENT; |
765 | r = find_reg(¶ms, invariant_cp15, ARRAY_SIZE(invariant_cp15)); | 723 | r = find_reg(¶ms, invariant_cp15, ARRAY_SIZE(invariant_cp15)); |
766 | if (!r) | 724 | if (!r) |
767 | return -ENOENT; | 725 | return -ENOENT; |
768 | 726 | ||
769 | err = -ENOENT; | 727 | err = -ENOENT; |
770 | if (KVM_REG_SIZE(id) == 4) { | 728 | if (KVM_REG_SIZE(id) == 4) { |
771 | u32 val32; | 729 | u32 val32; |
772 | 730 | ||
773 | err = reg_from_user(&val32, uaddr, id); | 731 | err = reg_from_user(&val32, uaddr, id); |
774 | if (!err) | 732 | if (!err) |
775 | val = val32; | 733 | val = val32; |
776 | } else if (KVM_REG_SIZE(id) == 8) { | 734 | } else if (KVM_REG_SIZE(id) == 8) { |
777 | err = reg_from_user(&val, uaddr, id); | 735 | err = reg_from_user(&val, uaddr, id); |
778 | } | 736 | } |
779 | if (err) | 737 | if (err) |
780 | return err; | 738 | return err; |
781 | 739 | ||
782 | /* This is what we mean by invariant: you can't change it. */ | 740 | /* This is what we mean by invariant: you can't change it. */ |
783 | if (r->val != val) | 741 | if (r->val != val) |
784 | return -EINVAL; | 742 | return -EINVAL; |
785 | 743 | ||
786 | return 0; | 744 | return 0; |
787 | } | 745 | } |
788 | 746 | ||
789 | static bool is_valid_cache(u32 val) | 747 | static bool is_valid_cache(u32 val) |
790 | { | 748 | { |
791 | u32 level, ctype; | 749 | u32 level, ctype; |
792 | 750 | ||
793 | if (val >= CSSELR_MAX) | 751 | if (val >= CSSELR_MAX) |
794 | return false; | 752 | return false; |
795 | 753 | ||
796 | /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ | 754 | /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ |
797 | level = (val >> 1); | 755 | level = (val >> 1); |
798 | ctype = (cache_levels >> (level * 3)) & 7; | 756 | ctype = (cache_levels >> (level * 3)) & 7; |
799 | 757 | ||
800 | switch (ctype) { | 758 | switch (ctype) { |
801 | case 0: /* No cache */ | 759 | case 0: /* No cache */ |
802 | return false; | 760 | return false; |
803 | case 1: /* Instruction cache only */ | 761 | case 1: /* Instruction cache only */ |
804 | return (val & 1); | 762 | return (val & 1); |
805 | case 2: /* Data cache only */ | 763 | case 2: /* Data cache only */ |
806 | case 4: /* Unified cache */ | 764 | case 4: /* Unified cache */ |
807 | return !(val & 1); | 765 | return !(val & 1); |
808 | case 3: /* Separate instruction and data caches */ | 766 | case 3: /* Separate instruction and data caches */ |
809 | return true; | 767 | return true; |
810 | default: /* Reserved: we can't know instruction or data. */ | 768 | default: /* Reserved: we can't know instruction or data. */ |
811 | return false; | 769 | return false; |
812 | } | 770 | } |
813 | } | 771 | } |
814 | 772 | ||
815 | /* Which cache CCSIDR represents depends on CSSELR value. */ | 773 | /* Which cache CCSIDR represents depends on CSSELR value. */ |
816 | static u32 get_ccsidr(u32 csselr) | 774 | static u32 get_ccsidr(u32 csselr) |
817 | { | 775 | { |
818 | u32 ccsidr; | 776 | u32 ccsidr; |
819 | 777 | ||
820 | /* Make sure noone else changes CSSELR during this! */ | 778 | /* Make sure noone else changes CSSELR during this! */ |
821 | local_irq_disable(); | 779 | local_irq_disable(); |
822 | /* Put value into CSSELR */ | 780 | /* Put value into CSSELR */ |
823 | asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr)); | 781 | asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr)); |
824 | isb(); | 782 | isb(); |
825 | /* Read result out of CCSIDR */ | 783 | /* Read result out of CCSIDR */ |
826 | asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr)); | 784 | asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr)); |
827 | local_irq_enable(); | 785 | local_irq_enable(); |
828 | 786 | ||
829 | return ccsidr; | 787 | return ccsidr; |
830 | } | 788 | } |
831 | 789 | ||
832 | static int demux_c15_get(u64 id, void __user *uaddr) | 790 | static int demux_c15_get(u64 id, void __user *uaddr) |
833 | { | 791 | { |
834 | u32 val; | 792 | u32 val; |
835 | u32 __user *uval = uaddr; | 793 | u32 __user *uval = uaddr; |
836 | 794 | ||
837 | /* Fail if we have unknown bits set. */ | 795 | /* Fail if we have unknown bits set. */ |
838 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | 796 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK |
839 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) | 797 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) |
840 | return -ENOENT; | 798 | return -ENOENT; |
841 | 799 | ||
842 | switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { | 800 | switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { |
843 | case KVM_REG_ARM_DEMUX_ID_CCSIDR: | 801 | case KVM_REG_ARM_DEMUX_ID_CCSIDR: |
844 | if (KVM_REG_SIZE(id) != 4) | 802 | if (KVM_REG_SIZE(id) != 4) |
845 | return -ENOENT; | 803 | return -ENOENT; |
846 | val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) | 804 | val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) |
847 | >> KVM_REG_ARM_DEMUX_VAL_SHIFT; | 805 | >> KVM_REG_ARM_DEMUX_VAL_SHIFT; |
848 | if (!is_valid_cache(val)) | 806 | if (!is_valid_cache(val)) |
849 | return -ENOENT; | 807 | return -ENOENT; |
850 | 808 | ||
851 | return put_user(get_ccsidr(val), uval); | 809 | return put_user(get_ccsidr(val), uval); |
852 | default: | 810 | default: |
853 | return -ENOENT; | 811 | return -ENOENT; |
854 | } | 812 | } |
855 | } | 813 | } |
856 | 814 | ||
857 | static int demux_c15_set(u64 id, void __user *uaddr) | 815 | static int demux_c15_set(u64 id, void __user *uaddr) |
858 | { | 816 | { |
859 | u32 val, newval; | 817 | u32 val, newval; |
860 | u32 __user *uval = uaddr; | 818 | u32 __user *uval = uaddr; |
861 | 819 | ||
862 | /* Fail if we have unknown bits set. */ | 820 | /* Fail if we have unknown bits set. */ |
863 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | 821 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK |
864 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) | 822 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) |
865 | return -ENOENT; | 823 | return -ENOENT; |
866 | 824 | ||
867 | switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { | 825 | switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { |
868 | case KVM_REG_ARM_DEMUX_ID_CCSIDR: | 826 | case KVM_REG_ARM_DEMUX_ID_CCSIDR: |
869 | if (KVM_REG_SIZE(id) != 4) | 827 | if (KVM_REG_SIZE(id) != 4) |
870 | return -ENOENT; | 828 | return -ENOENT; |
871 | val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) | 829 | val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) |
872 | >> KVM_REG_ARM_DEMUX_VAL_SHIFT; | 830 | >> KVM_REG_ARM_DEMUX_VAL_SHIFT; |
873 | if (!is_valid_cache(val)) | 831 | if (!is_valid_cache(val)) |
874 | return -ENOENT; | 832 | return -ENOENT; |
875 | 833 | ||
876 | if (get_user(newval, uval)) | 834 | if (get_user(newval, uval)) |
877 | return -EFAULT; | 835 | return -EFAULT; |
878 | 836 | ||
879 | /* This is also invariant: you can't change it. */ | 837 | /* This is also invariant: you can't change it. */ |
880 | if (newval != get_ccsidr(val)) | 838 | if (newval != get_ccsidr(val)) |
881 | return -EINVAL; | 839 | return -EINVAL; |
882 | return 0; | 840 | return 0; |
883 | default: | 841 | default: |
884 | return -ENOENT; | 842 | return -ENOENT; |
885 | } | 843 | } |
886 | } | 844 | } |
887 | 845 | ||
888 | #ifdef CONFIG_VFPv3 | 846 | #ifdef CONFIG_VFPv3 |
889 | static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC, | 847 | static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC, |
890 | KVM_REG_ARM_VFP_FPSCR, | 848 | KVM_REG_ARM_VFP_FPSCR, |
891 | KVM_REG_ARM_VFP_FPINST, | 849 | KVM_REG_ARM_VFP_FPINST, |
892 | KVM_REG_ARM_VFP_FPINST2, | 850 | KVM_REG_ARM_VFP_FPINST2, |
893 | KVM_REG_ARM_VFP_MVFR0, | 851 | KVM_REG_ARM_VFP_MVFR0, |
894 | KVM_REG_ARM_VFP_MVFR1, | 852 | KVM_REG_ARM_VFP_MVFR1, |
895 | KVM_REG_ARM_VFP_FPSID }; | 853 | KVM_REG_ARM_VFP_FPSID }; |
896 | 854 | ||
897 | static unsigned int num_fp_regs(void) | 855 | static unsigned int num_fp_regs(void) |
898 | { | 856 | { |
899 | if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2) | 857 | if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2) |
900 | return 32; | 858 | return 32; |
901 | else | 859 | else |
902 | return 16; | 860 | return 16; |
903 | } | 861 | } |
904 | 862 | ||
905 | static unsigned int num_vfp_regs(void) | 863 | static unsigned int num_vfp_regs(void) |
906 | { | 864 | { |
907 | /* Normal FP regs + control regs. */ | 865 | /* Normal FP regs + control regs. */ |
908 | return num_fp_regs() + ARRAY_SIZE(vfp_sysregs); | 866 | return num_fp_regs() + ARRAY_SIZE(vfp_sysregs); |
909 | } | 867 | } |
910 | 868 | ||
911 | static int copy_vfp_regids(u64 __user *uindices) | 869 | static int copy_vfp_regids(u64 __user *uindices) |
912 | { | 870 | { |
913 | unsigned int i; | 871 | unsigned int i; |
914 | const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP; | 872 | const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP; |
915 | const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP; | 873 | const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP; |
916 | 874 | ||
917 | for (i = 0; i < num_fp_regs(); i++) { | 875 | for (i = 0; i < num_fp_regs(); i++) { |
918 | if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i, | 876 | if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i, |
919 | uindices)) | 877 | uindices)) |
920 | return -EFAULT; | 878 | return -EFAULT; |
921 | uindices++; | 879 | uindices++; |
922 | } | 880 | } |
923 | 881 | ||
924 | for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) { | 882 | for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) { |
925 | if (put_user(u32reg | vfp_sysregs[i], uindices)) | 883 | if (put_user(u32reg | vfp_sysregs[i], uindices)) |
926 | return -EFAULT; | 884 | return -EFAULT; |
927 | uindices++; | 885 | uindices++; |
928 | } | 886 | } |
929 | 887 | ||
930 | return num_vfp_regs(); | 888 | return num_vfp_regs(); |
931 | } | 889 | } |
932 | 890 | ||
933 | static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) | 891 | static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) |
934 | { | 892 | { |
935 | u32 vfpid = (id & KVM_REG_ARM_VFP_MASK); | 893 | u32 vfpid = (id & KVM_REG_ARM_VFP_MASK); |
936 | u32 val; | 894 | u32 val; |
937 | 895 | ||
938 | /* Fail if we have unknown bits set. */ | 896 | /* Fail if we have unknown bits set. */ |
939 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | 897 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK |
940 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) | 898 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) |
941 | return -ENOENT; | 899 | return -ENOENT; |
942 | 900 | ||
943 | if (vfpid < num_fp_regs()) { | 901 | if (vfpid < num_fp_regs()) { |
944 | if (KVM_REG_SIZE(id) != 8) | 902 | if (KVM_REG_SIZE(id) != 8) |
945 | return -ENOENT; | 903 | return -ENOENT; |
946 | return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpregs[vfpid], | 904 | return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpregs[vfpid], |
947 | id); | 905 | id); |
948 | } | 906 | } |
949 | 907 | ||
950 | /* FP control registers are all 32 bit. */ | 908 | /* FP control registers are all 32 bit. */ |
951 | if (KVM_REG_SIZE(id) != 4) | 909 | if (KVM_REG_SIZE(id) != 4) |
952 | return -ENOENT; | 910 | return -ENOENT; |
953 | 911 | ||
954 | switch (vfpid) { | 912 | switch (vfpid) { |
955 | case KVM_REG_ARM_VFP_FPEXC: | 913 | case KVM_REG_ARM_VFP_FPEXC: |
956 | return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpexc, id); | 914 | return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpexc, id); |
957 | case KVM_REG_ARM_VFP_FPSCR: | 915 | case KVM_REG_ARM_VFP_FPSCR: |
958 | return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpscr, id); | 916 | return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpscr, id); |
959 | case KVM_REG_ARM_VFP_FPINST: | 917 | case KVM_REG_ARM_VFP_FPINST: |
960 | return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst, id); | 918 | return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst, id); |
961 | case KVM_REG_ARM_VFP_FPINST2: | 919 | case KVM_REG_ARM_VFP_FPINST2: |
962 | return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst2, id); | 920 | return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst2, id); |
963 | case KVM_REG_ARM_VFP_MVFR0: | 921 | case KVM_REG_ARM_VFP_MVFR0: |
964 | val = fmrx(MVFR0); | 922 | val = fmrx(MVFR0); |
965 | return reg_to_user(uaddr, &val, id); | 923 | return reg_to_user(uaddr, &val, id); |
966 | case KVM_REG_ARM_VFP_MVFR1: | 924 | case KVM_REG_ARM_VFP_MVFR1: |
967 | val = fmrx(MVFR1); | 925 | val = fmrx(MVFR1); |
968 | return reg_to_user(uaddr, &val, id); | 926 | return reg_to_user(uaddr, &val, id); |
969 | case KVM_REG_ARM_VFP_FPSID: | 927 | case KVM_REG_ARM_VFP_FPSID: |
970 | val = fmrx(FPSID); | 928 | val = fmrx(FPSID); |
971 | return reg_to_user(uaddr, &val, id); | 929 | return reg_to_user(uaddr, &val, id); |
972 | default: | 930 | default: |
973 | return -ENOENT; | 931 | return -ENOENT; |
974 | } | 932 | } |
975 | } | 933 | } |
976 | 934 | ||
977 | static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) | 935 | static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) |
978 | { | 936 | { |
979 | u32 vfpid = (id & KVM_REG_ARM_VFP_MASK); | 937 | u32 vfpid = (id & KVM_REG_ARM_VFP_MASK); |
980 | u32 val; | 938 | u32 val; |
981 | 939 | ||
982 | /* Fail if we have unknown bits set. */ | 940 | /* Fail if we have unknown bits set. */ |
983 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | 941 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK |
984 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) | 942 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) |
985 | return -ENOENT; | 943 | return -ENOENT; |
986 | 944 | ||
987 | if (vfpid < num_fp_regs()) { | 945 | if (vfpid < num_fp_regs()) { |
988 | if (KVM_REG_SIZE(id) != 8) | 946 | if (KVM_REG_SIZE(id) != 8) |
989 | return -ENOENT; | 947 | return -ENOENT; |
990 | return reg_from_user(&vcpu->arch.vfp_guest.fpregs[vfpid], | 948 | return reg_from_user(&vcpu->arch.vfp_guest.fpregs[vfpid], |
991 | uaddr, id); | 949 | uaddr, id); |
992 | } | 950 | } |
993 | 951 | ||
994 | /* FP control registers are all 32 bit. */ | 952 | /* FP control registers are all 32 bit. */ |
995 | if (KVM_REG_SIZE(id) != 4) | 953 | if (KVM_REG_SIZE(id) != 4) |
996 | return -ENOENT; | 954 | return -ENOENT; |
997 | 955 | ||
998 | switch (vfpid) { | 956 | switch (vfpid) { |
999 | case KVM_REG_ARM_VFP_FPEXC: | 957 | case KVM_REG_ARM_VFP_FPEXC: |
1000 | return reg_from_user(&vcpu->arch.vfp_guest.fpexc, uaddr, id); | 958 | return reg_from_user(&vcpu->arch.vfp_guest.fpexc, uaddr, id); |
1001 | case KVM_REG_ARM_VFP_FPSCR: | 959 | case KVM_REG_ARM_VFP_FPSCR: |
1002 | return reg_from_user(&vcpu->arch.vfp_guest.fpscr, uaddr, id); | 960 | return reg_from_user(&vcpu->arch.vfp_guest.fpscr, uaddr, id); |
1003 | case KVM_REG_ARM_VFP_FPINST: | 961 | case KVM_REG_ARM_VFP_FPINST: |
1004 | return reg_from_user(&vcpu->arch.vfp_guest.fpinst, uaddr, id); | 962 | return reg_from_user(&vcpu->arch.vfp_guest.fpinst, uaddr, id); |
1005 | case KVM_REG_ARM_VFP_FPINST2: | 963 | case KVM_REG_ARM_VFP_FPINST2: |
1006 | return reg_from_user(&vcpu->arch.vfp_guest.fpinst2, uaddr, id); | 964 | return reg_from_user(&vcpu->arch.vfp_guest.fpinst2, uaddr, id); |
1007 | /* These are invariant. */ | 965 | /* These are invariant. */ |
1008 | case KVM_REG_ARM_VFP_MVFR0: | 966 | case KVM_REG_ARM_VFP_MVFR0: |
1009 | if (reg_from_user(&val, uaddr, id)) | 967 | if (reg_from_user(&val, uaddr, id)) |
1010 | return -EFAULT; | 968 | return -EFAULT; |
1011 | if (val != fmrx(MVFR0)) | 969 | if (val != fmrx(MVFR0)) |
1012 | return -EINVAL; | 970 | return -EINVAL; |
1013 | return 0; | 971 | return 0; |
1014 | case KVM_REG_ARM_VFP_MVFR1: | 972 | case KVM_REG_ARM_VFP_MVFR1: |
1015 | if (reg_from_user(&val, uaddr, id)) | 973 | if (reg_from_user(&val, uaddr, id)) |
1016 | return -EFAULT; | 974 | return -EFAULT; |
1017 | if (val != fmrx(MVFR1)) | 975 | if (val != fmrx(MVFR1)) |
1018 | return -EINVAL; | 976 | return -EINVAL; |
1019 | return 0; | 977 | return 0; |
1020 | case KVM_REG_ARM_VFP_FPSID: | 978 | case KVM_REG_ARM_VFP_FPSID: |
1021 | if (reg_from_user(&val, uaddr, id)) | 979 | if (reg_from_user(&val, uaddr, id)) |
1022 | return -EFAULT; | 980 | return -EFAULT; |
1023 | if (val != fmrx(FPSID)) | 981 | if (val != fmrx(FPSID)) |
1024 | return -EINVAL; | 982 | return -EINVAL; |
1025 | return 0; | 983 | return 0; |
1026 | default: | 984 | default: |
1027 | return -ENOENT; | 985 | return -ENOENT; |
1028 | } | 986 | } |
1029 | } | 987 | } |
1030 | #else /* !CONFIG_VFPv3 */ | 988 | #else /* !CONFIG_VFPv3 */ |
1031 | static unsigned int num_vfp_regs(void) | 989 | static unsigned int num_vfp_regs(void) |
1032 | { | 990 | { |
1033 | return 0; | 991 | return 0; |
1034 | } | 992 | } |
1035 | 993 | ||
1036 | static int copy_vfp_regids(u64 __user *uindices) | 994 | static int copy_vfp_regids(u64 __user *uindices) |
1037 | { | 995 | { |
1038 | return 0; | 996 | return 0; |
1039 | } | 997 | } |
1040 | 998 | ||
1041 | static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) | 999 | static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) |
1042 | { | 1000 | { |
1043 | return -ENOENT; | 1001 | return -ENOENT; |
1044 | } | 1002 | } |
1045 | 1003 | ||
1046 | static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) | 1004 | static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) |
1047 | { | 1005 | { |
1048 | return -ENOENT; | 1006 | return -ENOENT; |
1049 | } | 1007 | } |
1050 | #endif /* !CONFIG_VFPv3 */ | 1008 | #endif /* !CONFIG_VFPv3 */ |
1051 | 1009 | ||
1052 | int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | 1010 | int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
1053 | { | 1011 | { |
1054 | const struct coproc_reg *r; | 1012 | const struct coproc_reg *r; |
1055 | void __user *uaddr = (void __user *)(long)reg->addr; | 1013 | void __user *uaddr = (void __user *)(long)reg->addr; |
1056 | int ret; | 1014 | int ret; |
1057 | 1015 | ||
1058 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) | 1016 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) |
1059 | return demux_c15_get(reg->id, uaddr); | 1017 | return demux_c15_get(reg->id, uaddr); |
1060 | 1018 | ||
1061 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP) | 1019 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP) |
1062 | return vfp_get_reg(vcpu, reg->id, uaddr); | 1020 | return vfp_get_reg(vcpu, reg->id, uaddr); |
1063 | 1021 | ||
1064 | r = index_to_coproc_reg(vcpu, reg->id); | 1022 | r = index_to_coproc_reg(vcpu, reg->id); |
1065 | if (!r) | 1023 | if (!r) |
1066 | return get_invariant_cp15(reg->id, uaddr); | 1024 | return get_invariant_cp15(reg->id, uaddr); |
1067 | 1025 | ||
1068 | ret = -ENOENT; | 1026 | ret = -ENOENT; |
1069 | if (KVM_REG_SIZE(reg->id) == 8) { | 1027 | if (KVM_REG_SIZE(reg->id) == 8) { |
1070 | u64 val; | 1028 | u64 val; |
1071 | 1029 | ||
1072 | val = vcpu_cp15_reg64_get(vcpu, r); | 1030 | val = vcpu_cp15_reg64_get(vcpu, r); |
1073 | ret = reg_to_user(uaddr, &val, reg->id); | 1031 | ret = reg_to_user(uaddr, &val, reg->id); |
1074 | } else if (KVM_REG_SIZE(reg->id) == 4) { | 1032 | } else if (KVM_REG_SIZE(reg->id) == 4) { |
1075 | ret = reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id); | 1033 | ret = reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id); |
1076 | } | 1034 | } |
1077 | 1035 | ||
1078 | return ret; | 1036 | return ret; |
1079 | } | 1037 | } |
1080 | 1038 | ||
1081 | int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | 1039 | int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
1082 | { | 1040 | { |
1083 | const struct coproc_reg *r; | 1041 | const struct coproc_reg *r; |
1084 | void __user *uaddr = (void __user *)(long)reg->addr; | 1042 | void __user *uaddr = (void __user *)(long)reg->addr; |
1085 | int ret; | 1043 | int ret; |
1086 | 1044 | ||
1087 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) | 1045 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) |
1088 | return demux_c15_set(reg->id, uaddr); | 1046 | return demux_c15_set(reg->id, uaddr); |
1089 | 1047 | ||
1090 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP) | 1048 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP) |
1091 | return vfp_set_reg(vcpu, reg->id, uaddr); | 1049 | return vfp_set_reg(vcpu, reg->id, uaddr); |
1092 | 1050 | ||
1093 | r = index_to_coproc_reg(vcpu, reg->id); | 1051 | r = index_to_coproc_reg(vcpu, reg->id); |
1094 | if (!r) | 1052 | if (!r) |
1095 | return set_invariant_cp15(reg->id, uaddr); | 1053 | return set_invariant_cp15(reg->id, uaddr); |
1096 | 1054 | ||
1097 | ret = -ENOENT; | 1055 | ret = -ENOENT; |
1098 | if (KVM_REG_SIZE(reg->id) == 8) { | 1056 | if (KVM_REG_SIZE(reg->id) == 8) { |
1099 | u64 val; | 1057 | u64 val; |
1100 | 1058 | ||
1101 | ret = reg_from_user(&val, uaddr, reg->id); | 1059 | ret = reg_from_user(&val, uaddr, reg->id); |
1102 | if (!ret) | 1060 | if (!ret) |
1103 | vcpu_cp15_reg64_set(vcpu, r, val); | 1061 | vcpu_cp15_reg64_set(vcpu, r, val); |
1104 | } else if (KVM_REG_SIZE(reg->id) == 4) { | 1062 | } else if (KVM_REG_SIZE(reg->id) == 4) { |
1105 | ret = reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id); | 1063 | ret = reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id); |
1106 | } | 1064 | } |
1107 | 1065 | ||
1108 | return ret; | 1066 | return ret; |
1109 | } | 1067 | } |
1110 | 1068 | ||
1111 | static unsigned int num_demux_regs(void) | 1069 | static unsigned int num_demux_regs(void) |
1112 | { | 1070 | { |
1113 | unsigned int i, count = 0; | 1071 | unsigned int i, count = 0; |
1114 | 1072 | ||
1115 | for (i = 0; i < CSSELR_MAX; i++) | 1073 | for (i = 0; i < CSSELR_MAX; i++) |
1116 | if (is_valid_cache(i)) | 1074 | if (is_valid_cache(i)) |
1117 | count++; | 1075 | count++; |
1118 | 1076 | ||
1119 | return count; | 1077 | return count; |
1120 | } | 1078 | } |
1121 | 1079 | ||
1122 | static int write_demux_regids(u64 __user *uindices) | 1080 | static int write_demux_regids(u64 __user *uindices) |
1123 | { | 1081 | { |
1124 | u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; | 1082 | u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; |
1125 | unsigned int i; | 1083 | unsigned int i; |
1126 | 1084 | ||
1127 | val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; | 1085 | val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; |
1128 | for (i = 0; i < CSSELR_MAX; i++) { | 1086 | for (i = 0; i < CSSELR_MAX; i++) { |
1129 | if (!is_valid_cache(i)) | 1087 | if (!is_valid_cache(i)) |
1130 | continue; | 1088 | continue; |
1131 | if (put_user(val | i, uindices)) | 1089 | if (put_user(val | i, uindices)) |
1132 | return -EFAULT; | 1090 | return -EFAULT; |
1133 | uindices++; | 1091 | uindices++; |
1134 | } | 1092 | } |
1135 | return 0; | 1093 | return 0; |
1136 | } | 1094 | } |
1137 | 1095 | ||
1138 | static u64 cp15_to_index(const struct coproc_reg *reg) | 1096 | static u64 cp15_to_index(const struct coproc_reg *reg) |
1139 | { | 1097 | { |
1140 | u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT); | 1098 | u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT); |
1141 | if (reg->is_64) { | 1099 | if (reg->is_64) { |
1142 | val |= KVM_REG_SIZE_U64; | 1100 | val |= KVM_REG_SIZE_U64; |
1143 | val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); | 1101 | val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); |
1144 | /* | 1102 | /* |
1145 | * CRn always denotes the primary coproc. reg. nr. for the | 1103 | * CRn always denotes the primary coproc. reg. nr. for the |
1146 | * in-kernel representation, but the user space API uses the | 1104 | * in-kernel representation, but the user space API uses the |
1147 | * CRm for the encoding, because it is modelled after the | 1105 | * CRm for the encoding, because it is modelled after the |
1148 | * MRRC/MCRR instructions: see the ARM ARM rev. c page | 1106 | * MRRC/MCRR instructions: see the ARM ARM rev. c page |
1149 | * B3-1445 | 1107 | * B3-1445 |
1150 | */ | 1108 | */ |
1151 | val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT); | 1109 | val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT); |
1152 | } else { | 1110 | } else { |
1153 | val |= KVM_REG_SIZE_U32; | 1111 | val |= KVM_REG_SIZE_U32; |
1154 | val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); | 1112 | val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); |
1155 | val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT); | 1113 | val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT); |
1156 | val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); | 1114 | val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); |
1157 | val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT); | 1115 | val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT); |
1158 | } | 1116 | } |
1159 | return val; | 1117 | return val; |
1160 | } | 1118 | } |
1161 | 1119 | ||
1162 | static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind) | 1120 | static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind) |
1163 | { | 1121 | { |
1164 | if (!*uind) | 1122 | if (!*uind) |
1165 | return true; | 1123 | return true; |
1166 | 1124 | ||
1167 | if (put_user(cp15_to_index(reg), *uind)) | 1125 | if (put_user(cp15_to_index(reg), *uind)) |
1168 | return false; | 1126 | return false; |
1169 | 1127 | ||
1170 | (*uind)++; | 1128 | (*uind)++; |
1171 | return true; | 1129 | return true; |
1172 | } | 1130 | } |
1173 | 1131 | ||
1174 | /* Assumed ordered tables, see kvm_coproc_table_init. */ | 1132 | /* Assumed ordered tables, see kvm_coproc_table_init. */ |
1175 | static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind) | 1133 | static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind) |
1176 | { | 1134 | { |
1177 | const struct coproc_reg *i1, *i2, *end1, *end2; | 1135 | const struct coproc_reg *i1, *i2, *end1, *end2; |
1178 | unsigned int total = 0; | 1136 | unsigned int total = 0; |
1179 | size_t num; | 1137 | size_t num; |
1180 | 1138 | ||
1181 | /* We check for duplicates here, to allow arch-specific overrides. */ | 1139 | /* We check for duplicates here, to allow arch-specific overrides. */ |
1182 | i1 = get_target_table(vcpu->arch.target, &num); | 1140 | i1 = get_target_table(vcpu->arch.target, &num); |
1183 | end1 = i1 + num; | 1141 | end1 = i1 + num; |
1184 | i2 = cp15_regs; | 1142 | i2 = cp15_regs; |
1185 | end2 = cp15_regs + ARRAY_SIZE(cp15_regs); | 1143 | end2 = cp15_regs + ARRAY_SIZE(cp15_regs); |
1186 | 1144 | ||
1187 | BUG_ON(i1 == end1 || i2 == end2); | 1145 | BUG_ON(i1 == end1 || i2 == end2); |
1188 | 1146 | ||
1189 | /* Walk carefully, as both tables may refer to the same register. */ | 1147 | /* Walk carefully, as both tables may refer to the same register. */ |
1190 | while (i1 || i2) { | 1148 | while (i1 || i2) { |
1191 | int cmp = cmp_reg(i1, i2); | 1149 | int cmp = cmp_reg(i1, i2); |
1192 | /* target-specific overrides generic entry. */ | 1150 | /* target-specific overrides generic entry. */ |
1193 | if (cmp <= 0) { | 1151 | if (cmp <= 0) { |
1194 | /* Ignore registers we trap but don't save. */ | 1152 | /* Ignore registers we trap but don't save. */ |
1195 | if (i1->reg) { | 1153 | if (i1->reg) { |
1196 | if (!copy_reg_to_user(i1, &uind)) | 1154 | if (!copy_reg_to_user(i1, &uind)) |
1197 | return -EFAULT; | 1155 | return -EFAULT; |
1198 | total++; | 1156 | total++; |
1199 | } | 1157 | } |
1200 | } else { | 1158 | } else { |
1201 | /* Ignore registers we trap but don't save. */ | 1159 | /* Ignore registers we trap but don't save. */ |
1202 | if (i2->reg) { | 1160 | if (i2->reg) { |
1203 | if (!copy_reg_to_user(i2, &uind)) | 1161 | if (!copy_reg_to_user(i2, &uind)) |
1204 | return -EFAULT; | 1162 | return -EFAULT; |
1205 | total++; | 1163 | total++; |
1206 | } | 1164 | } |
1207 | } | 1165 | } |
1208 | 1166 | ||
1209 | if (cmp <= 0 && ++i1 == end1) | 1167 | if (cmp <= 0 && ++i1 == end1) |
1210 | i1 = NULL; | 1168 | i1 = NULL; |
1211 | if (cmp >= 0 && ++i2 == end2) | 1169 | if (cmp >= 0 && ++i2 == end2) |
1212 | i2 = NULL; | 1170 | i2 = NULL; |
1213 | } | 1171 | } |
1214 | return total; | 1172 | return total; |
1215 | } | 1173 | } |
1216 | 1174 | ||
1217 | unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu) | 1175 | unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu) |
1218 | { | 1176 | { |
1219 | return ARRAY_SIZE(invariant_cp15) | 1177 | return ARRAY_SIZE(invariant_cp15) |
1220 | + num_demux_regs() | 1178 | + num_demux_regs() |
1221 | + num_vfp_regs() | 1179 | + num_vfp_regs() |
1222 | + walk_cp15(vcpu, (u64 __user *)NULL); | 1180 | + walk_cp15(vcpu, (u64 __user *)NULL); |
1223 | } | 1181 | } |
1224 | 1182 | ||
1225 | int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | 1183 | int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) |
1226 | { | 1184 | { |
1227 | unsigned int i; | 1185 | unsigned int i; |
1228 | int err; | 1186 | int err; |
1229 | 1187 | ||
1230 | /* Then give them all the invariant registers' indices. */ | 1188 | /* Then give them all the invariant registers' indices. */ |
1231 | for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) { | 1189 | for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) { |
1232 | if (put_user(cp15_to_index(&invariant_cp15[i]), uindices)) | 1190 | if (put_user(cp15_to_index(&invariant_cp15[i]), uindices)) |
1233 | return -EFAULT; | 1191 | return -EFAULT; |
1234 | uindices++; | 1192 | uindices++; |
1235 | } | 1193 | } |
1236 | 1194 | ||
1237 | err = walk_cp15(vcpu, uindices); | 1195 | err = walk_cp15(vcpu, uindices); |
1238 | if (err < 0) | 1196 | if (err < 0) |
1239 | return err; | 1197 | return err; |
1240 | uindices += err; | 1198 | uindices += err; |
1241 | 1199 | ||
1242 | err = copy_vfp_regids(uindices); | 1200 | err = copy_vfp_regids(uindices); |
1243 | if (err < 0) | 1201 | if (err < 0) |
1244 | return err; | 1202 | return err; |
1245 | uindices += err; | 1203 | uindices += err; |
1246 | 1204 | ||
1247 | return write_demux_regids(uindices); | 1205 | return write_demux_regids(uindices); |
1248 | } | 1206 | } |
1249 | 1207 | ||
1250 | void kvm_coproc_table_init(void) | 1208 | void kvm_coproc_table_init(void) |
1251 | { | 1209 | { |
1252 | unsigned int i; | 1210 | unsigned int i; |
1253 | 1211 | ||
1254 | /* Make sure tables are unique and in order. */ | 1212 | /* Make sure tables are unique and in order. */ |
1255 | for (i = 1; i < ARRAY_SIZE(cp15_regs); i++) | 1213 | for (i = 1; i < ARRAY_SIZE(cp15_regs); i++) |
1256 | BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0); | 1214 | BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0); |
1257 | 1215 | ||
1258 | /* We abuse the reset function to overwrite the table itself. */ | 1216 | /* We abuse the reset function to overwrite the table itself. */ |
1259 | for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) | 1217 | for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) |
1260 | invariant_cp15[i].reset(NULL, &invariant_cp15[i]); | 1218 | invariant_cp15[i].reset(NULL, &invariant_cp15[i]); |
1261 | 1219 | ||
1262 | /* | 1220 | /* |
1263 | * CLIDR format is awkward, so clean it up. See ARM B4.1.20: | 1221 | * CLIDR format is awkward, so clean it up. See ARM B4.1.20: |
1264 | * | 1222 | * |
1265 | * If software reads the Cache Type fields from Ctype1 | 1223 | * If software reads the Cache Type fields from Ctype1 |
1266 | * upwards, once it has seen a value of 0b000, no caches | 1224 | * upwards, once it has seen a value of 0b000, no caches |
1267 | * exist at further-out levels of the hierarchy. So, for | 1225 | * exist at further-out levels of the hierarchy. So, for |
1268 | * example, if Ctype3 is the first Cache Type field with a | 1226 | * example, if Ctype3 is the first Cache Type field with a |
1269 | * value of 0b000, the values of Ctype4 to Ctype7 must be | 1227 | * value of 0b000, the values of Ctype4 to Ctype7 must be |
1270 | * ignored. | 1228 | * ignored. |
1271 | */ | 1229 | */ |
1272 | asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels)); | 1230 | asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels)); |
1273 | for (i = 0; i < 7; i++) | 1231 | for (i = 0; i < 7; i++) |
1274 | if (((cache_levels >> (i*3)) & 7) == 0) | 1232 | if (((cache_levels >> (i*3)) & 7) == 0) |
1275 | break; | 1233 | break; |
1276 | /* Clear all higher bits. */ | 1234 | /* Clear all higher bits. */ |
1277 | cache_levels &= (1 << (i*3))-1; | 1235 | cache_levels &= (1 << (i*3))-1; |
1278 | } | 1236 | } |
1279 | 1237 | ||
1280 | /** | 1238 | /** |
1281 | * kvm_reset_coprocs - sets cp15 registers to reset value | 1239 | * kvm_reset_coprocs - sets cp15 registers to reset value |
1282 | * @vcpu: The VCPU pointer | 1240 | * @vcpu: The VCPU pointer |
1283 | * | 1241 | * |
1284 | * This function finds the right table above and sets the registers on the | 1242 | * This function finds the right table above and sets the registers on the |
1285 | * virtual CPU struct to their architecturally defined reset values. | 1243 | * virtual CPU struct to their architecturally defined reset values. |
1286 | */ | 1244 | */ |
1287 | void kvm_reset_coprocs(struct kvm_vcpu *vcpu) | 1245 | void kvm_reset_coprocs(struct kvm_vcpu *vcpu) |
1288 | { | 1246 | { |
1289 | size_t num; | 1247 | size_t num; |
1290 | const struct coproc_reg *table; | 1248 | const struct coproc_reg *table; |
1291 | 1249 | ||
1292 | /* Catch someone adding a register without putting in reset entry. */ | 1250 | /* Catch someone adding a register without putting in reset entry. */ |
1293 | memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15)); | 1251 | memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15)); |
1294 | 1252 | ||
1295 | /* Generic chip reset first (so target could override). */ | 1253 | /* Generic chip reset first (so target could override). */ |
1296 | reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs)); | 1254 | reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs)); |
1297 | 1255 | ||
1298 | table = get_target_table(vcpu->arch.target, &num); | 1256 | table = get_target_table(vcpu->arch.target, &num); |
arch/arm/kvm/coproc.h
1 | /* | 1 | /* |
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | 2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
3 | * Authors: Christoffer Dall <c.dall@virtualopensystems.com> | 3 | * Authors: Christoffer Dall <c.dall@virtualopensystems.com> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License, version 2, as | 6 | * it under the terms of the GNU General Public License, version 2, as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it will be useful, | 9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #ifndef __ARM_KVM_COPROC_LOCAL_H__ | 19 | #ifndef __ARM_KVM_COPROC_LOCAL_H__ |
20 | #define __ARM_KVM_COPROC_LOCAL_H__ | 20 | #define __ARM_KVM_COPROC_LOCAL_H__ |
21 | 21 | ||
22 | struct coproc_params { | 22 | struct coproc_params { |
23 | unsigned long CRn; | 23 | unsigned long CRn; |
24 | unsigned long CRm; | 24 | unsigned long CRm; |
25 | unsigned long Op1; | 25 | unsigned long Op1; |
26 | unsigned long Op2; | 26 | unsigned long Op2; |
27 | unsigned long Rt1; | 27 | unsigned long Rt1; |
28 | unsigned long Rt2; | 28 | unsigned long Rt2; |
29 | bool is_64bit; | 29 | bool is_64bit; |
30 | bool is_write; | 30 | bool is_write; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | struct coproc_reg { | 33 | struct coproc_reg { |
34 | /* MRC/MCR/MRRC/MCRR instruction which accesses it. */ | 34 | /* MRC/MCR/MRRC/MCRR instruction which accesses it. */ |
35 | unsigned long CRn; | 35 | unsigned long CRn; |
36 | unsigned long CRm; | 36 | unsigned long CRm; |
37 | unsigned long Op1; | 37 | unsigned long Op1; |
38 | unsigned long Op2; | 38 | unsigned long Op2; |
39 | 39 | ||
40 | bool is_64; | 40 | bool is_64; |
41 | 41 | ||
42 | /* Trapped access from guest, if non-NULL. */ | 42 | /* Trapped access from guest, if non-NULL. */ |
43 | bool (*access)(struct kvm_vcpu *, | 43 | bool (*access)(struct kvm_vcpu *, |
44 | const struct coproc_params *, | 44 | const struct coproc_params *, |
45 | const struct coproc_reg *); | 45 | const struct coproc_reg *); |
46 | 46 | ||
47 | /* Initialization for vcpu. */ | 47 | /* Initialization for vcpu. */ |
48 | void (*reset)(struct kvm_vcpu *, const struct coproc_reg *); | 48 | void (*reset)(struct kvm_vcpu *, const struct coproc_reg *); |
49 | 49 | ||
50 | /* Index into vcpu->arch.cp15[], or 0 if we don't need to save it. */ | 50 | /* Index into vcpu->arch.cp15[], or 0 if we don't need to save it. */ |
51 | unsigned long reg; | 51 | unsigned long reg; |
52 | 52 | ||
53 | /* Value (usually reset value) */ | 53 | /* Value (usually reset value) */ |
54 | u64 val; | 54 | u64 val; |
55 | }; | 55 | }; |
56 | 56 | ||
57 | static inline void print_cp_instr(const struct coproc_params *p) | 57 | static inline void print_cp_instr(const struct coproc_params *p) |
58 | { | 58 | { |
59 | /* Look, we even formatted it for you to paste into the table! */ | 59 | /* Look, we even formatted it for you to paste into the table! */ |
60 | if (p->is_64bit) { | 60 | if (p->is_64bit) { |
61 | kvm_pr_unimpl(" { CRm64(%2lu), Op1(%2lu), is64, func_%s },\n", | 61 | kvm_pr_unimpl(" { CRm64(%2lu), Op1(%2lu), is64, func_%s },\n", |
62 | p->CRn, p->Op1, p->is_write ? "write" : "read"); | 62 | p->CRn, p->Op1, p->is_write ? "write" : "read"); |
63 | } else { | 63 | } else { |
64 | kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32," | 64 | kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32," |
65 | " func_%s },\n", | 65 | " func_%s },\n", |
66 | p->CRn, p->CRm, p->Op1, p->Op2, | 66 | p->CRn, p->CRm, p->Op1, p->Op2, |
67 | p->is_write ? "write" : "read"); | 67 | p->is_write ? "write" : "read"); |
68 | } | 68 | } |
69 | } | 69 | } |
70 | 70 | ||
71 | static inline bool ignore_write(struct kvm_vcpu *vcpu, | 71 | static inline bool ignore_write(struct kvm_vcpu *vcpu, |
72 | const struct coproc_params *p) | 72 | const struct coproc_params *p) |
73 | { | 73 | { |
74 | return true; | 74 | return true; |
75 | } | 75 | } |
76 | 76 | ||
77 | static inline bool read_zero(struct kvm_vcpu *vcpu, | 77 | static inline bool read_zero(struct kvm_vcpu *vcpu, |
78 | const struct coproc_params *p) | 78 | const struct coproc_params *p) |
79 | { | 79 | { |
80 | *vcpu_reg(vcpu, p->Rt1) = 0; | 80 | *vcpu_reg(vcpu, p->Rt1) = 0; |
81 | return true; | 81 | return true; |
82 | } | 82 | } |
83 | 83 | ||
84 | static inline bool write_to_read_only(struct kvm_vcpu *vcpu, | 84 | static inline bool write_to_read_only(struct kvm_vcpu *vcpu, |
85 | const struct coproc_params *params) | 85 | const struct coproc_params *params) |
86 | { | 86 | { |
87 | kvm_debug("CP15 write to read-only register at: %08lx\n", | 87 | kvm_debug("CP15 write to read-only register at: %08lx\n", |
88 | *vcpu_pc(vcpu)); | 88 | *vcpu_pc(vcpu)); |
89 | print_cp_instr(params); | 89 | print_cp_instr(params); |
90 | return false; | 90 | return false; |
91 | } | 91 | } |
92 | 92 | ||
93 | static inline bool read_from_write_only(struct kvm_vcpu *vcpu, | 93 | static inline bool read_from_write_only(struct kvm_vcpu *vcpu, |
94 | const struct coproc_params *params) | 94 | const struct coproc_params *params) |
95 | { | 95 | { |
96 | kvm_debug("CP15 read to write-only register at: %08lx\n", | 96 | kvm_debug("CP15 read to write-only register at: %08lx\n", |
97 | *vcpu_pc(vcpu)); | 97 | *vcpu_pc(vcpu)); |
98 | print_cp_instr(params); | 98 | print_cp_instr(params); |
99 | return false; | 99 | return false; |
100 | } | 100 | } |
101 | 101 | ||
102 | /* Reset functions */ | 102 | /* Reset functions */ |
103 | static inline void reset_unknown(struct kvm_vcpu *vcpu, | 103 | static inline void reset_unknown(struct kvm_vcpu *vcpu, |
104 | const struct coproc_reg *r) | 104 | const struct coproc_reg *r) |
105 | { | 105 | { |
106 | BUG_ON(!r->reg); | 106 | BUG_ON(!r->reg); |
107 | BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); | 107 | BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); |
108 | vcpu->arch.cp15[r->reg] = 0xdecafbad; | 108 | vcpu->arch.cp15[r->reg] = 0xdecafbad; |
109 | } | 109 | } |
110 | 110 | ||
111 | static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r) | 111 | static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r) |
112 | { | 112 | { |
113 | BUG_ON(!r->reg); | 113 | BUG_ON(!r->reg); |
114 | BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); | 114 | BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); |
115 | vcpu->arch.cp15[r->reg] = r->val; | 115 | vcpu->arch.cp15[r->reg] = r->val; |
116 | } | 116 | } |
117 | 117 | ||
118 | static inline void reset_unknown64(struct kvm_vcpu *vcpu, | 118 | static inline void reset_unknown64(struct kvm_vcpu *vcpu, |
119 | const struct coproc_reg *r) | 119 | const struct coproc_reg *r) |
120 | { | 120 | { |
121 | BUG_ON(!r->reg); | 121 | BUG_ON(!r->reg); |
122 | BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.cp15)); | 122 | BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.cp15)); |
123 | 123 | ||
124 | vcpu->arch.cp15[r->reg] = 0xdecafbad; | 124 | vcpu->arch.cp15[r->reg] = 0xdecafbad; |
125 | vcpu->arch.cp15[r->reg+1] = 0xd0c0ffee; | 125 | vcpu->arch.cp15[r->reg+1] = 0xd0c0ffee; |
126 | } | 126 | } |
127 | 127 | ||
128 | static inline int cmp_reg(const struct coproc_reg *i1, | 128 | static inline int cmp_reg(const struct coproc_reg *i1, |
129 | const struct coproc_reg *i2) | 129 | const struct coproc_reg *i2) |
130 | { | 130 | { |
131 | BUG_ON(i1 == i2); | 131 | BUG_ON(i1 == i2); |
132 | if (!i1) | 132 | if (!i1) |
133 | return 1; | 133 | return 1; |
134 | else if (!i2) | 134 | else if (!i2) |
135 | return -1; | 135 | return -1; |
136 | if (i1->CRn != i2->CRn) | 136 | if (i1->CRn != i2->CRn) |
137 | return i1->CRn - i2->CRn; | 137 | return i1->CRn - i2->CRn; |
138 | if (i1->CRm != i2->CRm) | 138 | if (i1->CRm != i2->CRm) |
139 | return i1->CRm - i2->CRm; | 139 | return i1->CRm - i2->CRm; |
140 | if (i1->Op1 != i2->Op1) | 140 | if (i1->Op1 != i2->Op1) |
141 | return i1->Op1 - i2->Op1; | 141 | return i1->Op1 - i2->Op1; |
142 | if (i1->Op2 != i2->Op2) | 142 | if (i1->Op2 != i2->Op2) |
143 | return i1->Op2 - i2->Op2; | 143 | return i1->Op2 - i2->Op2; |
144 | return i2->is_64 - i1->is_64; | 144 | return i2->is_64 - i1->is_64; |
145 | } | 145 | } |
146 | 146 | ||
147 | 147 | ||
148 | #define CRn(_x) .CRn = _x | 148 | #define CRn(_x) .CRn = _x |
149 | #define CRm(_x) .CRm = _x | 149 | #define CRm(_x) .CRm = _x |
150 | #define CRm64(_x) .CRn = _x, .CRm = 0 | 150 | #define CRm64(_x) .CRn = _x, .CRm = 0 |
151 | #define Op1(_x) .Op1 = _x | 151 | #define Op1(_x) .Op1 = _x |
152 | #define Op2(_x) .Op2 = _x | 152 | #define Op2(_x) .Op2 = _x |
153 | #define is64 .is_64 = true | 153 | #define is64 .is_64 = true |
154 | #define is32 .is_64 = false | 154 | #define is32 .is_64 = false |
155 | 155 | ||
156 | bool access_sctlr(struct kvm_vcpu *vcpu, | 156 | bool access_vm_reg(struct kvm_vcpu *vcpu, |
157 | const struct coproc_params *p, | 157 | const struct coproc_params *p, |
158 | const struct coproc_reg *r); | 158 | const struct coproc_reg *r); |
159 | 159 | ||
160 | #endif /* __ARM_KVM_COPROC_LOCAL_H__ */ | 160 | #endif /* __ARM_KVM_COPROC_LOCAL_H__ */ |
161 | 161 |
arch/arm/kvm/coproc_a15.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | 2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
3 | * Authors: Rusty Russell <rusty@rustcorp.au> | 3 | * Authors: Rusty Russell <rusty@rustcorp.au> |
4 | * Christoffer Dall <c.dall@virtualopensystems.com> | 4 | * Christoffer Dall <c.dall@virtualopensystems.com> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License, version 2, as | 7 | * it under the terms of the GNU General Public License, version 2, as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | * | 9 | * |
10 | * This program is distributed in the hope that it will be useful, | 10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. | 13 | * GNU General Public License for more details. |
14 | * | 14 | * |
15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software | 16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 17 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
18 | */ | 18 | */ |
19 | #include <linux/kvm_host.h> | 19 | #include <linux/kvm_host.h> |
20 | #include <asm/kvm_coproc.h> | 20 | #include <asm/kvm_coproc.h> |
21 | #include <asm/kvm_emulate.h> | 21 | #include <asm/kvm_emulate.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | 23 | ||
24 | #include "coproc.h" | 24 | #include "coproc.h" |
25 | 25 | ||
26 | /* | 26 | /* |
27 | * A15-specific CP15 registers. | 27 | * A15-specific CP15 registers. |
28 | * CRn denotes the primary register number, but is copied to the CRm in the | 28 | * CRn denotes the primary register number, but is copied to the CRm in the |
29 | * user space API for 64-bit register access in line with the terminology used | 29 | * user space API for 64-bit register access in line with the terminology used |
30 | * in the ARM ARM. | 30 | * in the ARM ARM. |
31 | * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit | 31 | * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit |
32 | * registers preceding 32-bit ones. | 32 | * registers preceding 32-bit ones. |
33 | */ | 33 | */ |
34 | static const struct coproc_reg a15_regs[] = { | 34 | static const struct coproc_reg a15_regs[] = { |
35 | /* SCTLR: swapped by interrupt.S. */ | 35 | /* SCTLR: swapped by interrupt.S. */ |
36 | { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, | 36 | { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, |
37 | access_sctlr, reset_val, c1_SCTLR, 0x00C50078 }, | 37 | access_vm_reg, reset_val, c1_SCTLR, 0x00C50078 }, |
38 | }; | 38 | }; |
39 | 39 | ||
40 | static struct kvm_coproc_target_table a15_target_table = { | 40 | static struct kvm_coproc_target_table a15_target_table = { |
41 | .target = KVM_ARM_TARGET_CORTEX_A15, | 41 | .target = KVM_ARM_TARGET_CORTEX_A15, |
42 | .table = a15_regs, | 42 | .table = a15_regs, |
43 | .num = ARRAY_SIZE(a15_regs), | 43 | .num = ARRAY_SIZE(a15_regs), |
44 | }; | 44 | }; |
45 | 45 | ||
46 | static int __init coproc_a15_init(void) | 46 | static int __init coproc_a15_init(void) |
47 | { | 47 | { |
48 | kvm_register_target_coproc_table(&a15_target_table); | 48 | kvm_register_target_coproc_table(&a15_target_table); |
49 | return 0; | 49 | return 0; |
50 | } | 50 | } |
51 | late_initcall(coproc_a15_init); | 51 | late_initcall(coproc_a15_init); |
52 | 52 |
arch/arm/kvm/coproc_a7.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | 2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
3 | * Copyright (C) 2013 - ARM Ltd | 3 | * Copyright (C) 2013 - ARM Ltd |
4 | * | 4 | * |
5 | * Authors: Rusty Russell <rusty@rustcorp.au> | 5 | * Authors: Rusty Russell <rusty@rustcorp.au> |
6 | * Christoffer Dall <c.dall@virtualopensystems.com> | 6 | * Christoffer Dall <c.dall@virtualopensystems.com> |
7 | * Jonathan Austin <jonathan.austin@arm.com> | 7 | * Jonathan Austin <jonathan.austin@arm.com> |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License, version 2, as | 10 | * it under the terms of the GNU General Public License, version 2, as |
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | * | 12 | * |
13 | * This program is distributed in the hope that it will be useful, | 13 | * This program is distributed in the hope that it will be useful, |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | * GNU General Public License for more details. | 16 | * GNU General Public License for more details. |
17 | * | 17 | * |
18 | * You should have received a copy of the GNU General Public License | 18 | * You should have received a copy of the GNU General Public License |
19 | * along with this program; if not, write to the Free Software | 19 | * along with this program; if not, write to the Free Software |
20 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 20 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
21 | */ | 21 | */ |
22 | #include <linux/kvm_host.h> | 22 | #include <linux/kvm_host.h> |
23 | #include <asm/kvm_coproc.h> | 23 | #include <asm/kvm_coproc.h> |
24 | #include <asm/kvm_emulate.h> | 24 | #include <asm/kvm_emulate.h> |
25 | #include <linux/init.h> | 25 | #include <linux/init.h> |
26 | 26 | ||
27 | #include "coproc.h" | 27 | #include "coproc.h" |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * Cortex-A7 specific CP15 registers. | 30 | * Cortex-A7 specific CP15 registers. |
31 | * CRn denotes the primary register number, but is copied to the CRm in the | 31 | * CRn denotes the primary register number, but is copied to the CRm in the |
32 | * user space API for 64-bit register access in line with the terminology used | 32 | * user space API for 64-bit register access in line with the terminology used |
33 | * in the ARM ARM. | 33 | * in the ARM ARM. |
34 | * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit | 34 | * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit |
35 | * registers preceding 32-bit ones. | 35 | * registers preceding 32-bit ones. |
36 | */ | 36 | */ |
37 | static const struct coproc_reg a7_regs[] = { | 37 | static const struct coproc_reg a7_regs[] = { |
38 | /* SCTLR: swapped by interrupt.S. */ | 38 | /* SCTLR: swapped by interrupt.S. */ |
39 | { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, | 39 | { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, |
40 | access_sctlr, reset_val, c1_SCTLR, 0x00C50878 }, | 40 | access_vm_reg, reset_val, c1_SCTLR, 0x00C50878 }, |
41 | }; | 41 | }; |
42 | 42 | ||
43 | static struct kvm_coproc_target_table a7_target_table = { | 43 | static struct kvm_coproc_target_table a7_target_table = { |
44 | .target = KVM_ARM_TARGET_CORTEX_A7, | 44 | .target = KVM_ARM_TARGET_CORTEX_A7, |
45 | .table = a7_regs, | 45 | .table = a7_regs, |
46 | .num = ARRAY_SIZE(a7_regs), | 46 | .num = ARRAY_SIZE(a7_regs), |
47 | }; | 47 | }; |
48 | 48 | ||
49 | static int __init coproc_a7_init(void) | 49 | static int __init coproc_a7_init(void) |
50 | { | 50 | { |
51 | kvm_register_target_coproc_table(&a7_target_table); | 51 | kvm_register_target_coproc_table(&a7_target_table); |
52 | return 0; | 52 | return 0; |
53 | } | 53 | } |
54 | late_initcall(coproc_a7_init); | 54 | late_initcall(coproc_a7_init); |
55 | 55 |
arch/arm/kvm/mmu.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | 2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | 3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License, version 2, as | 6 | * it under the terms of the GNU General Public License, version 2, as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it will be useful, | 9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/mman.h> | 19 | #include <linux/mman.h> |
20 | #include <linux/kvm_host.h> | 20 | #include <linux/kvm_host.h> |
21 | #include <linux/io.h> | 21 | #include <linux/io.h> |
22 | #include <linux/hugetlb.h> | 22 | #include <linux/hugetlb.h> |
23 | #include <trace/events/kvm.h> | 23 | #include <trace/events/kvm.h> |
24 | #include <asm/pgalloc.h> | 24 | #include <asm/pgalloc.h> |
25 | #include <asm/cacheflush.h> | 25 | #include <asm/cacheflush.h> |
26 | #include <asm/kvm_arm.h> | 26 | #include <asm/kvm_arm.h> |
27 | #include <asm/kvm_mmu.h> | 27 | #include <asm/kvm_mmu.h> |
28 | #include <asm/kvm_mmio.h> | 28 | #include <asm/kvm_mmio.h> |
29 | #include <asm/kvm_asm.h> | 29 | #include <asm/kvm_asm.h> |
30 | #include <asm/kvm_emulate.h> | 30 | #include <asm/kvm_emulate.h> |
31 | 31 | ||
32 | #include "trace.h" | 32 | #include "trace.h" |
33 | 33 | ||
34 | extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; | 34 | extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; |
35 | 35 | ||
36 | static pgd_t *boot_hyp_pgd; | 36 | static pgd_t *boot_hyp_pgd; |
37 | static pgd_t *hyp_pgd; | 37 | static pgd_t *hyp_pgd; |
38 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); | 38 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); |
39 | 39 | ||
40 | static void *init_bounce_page; | 40 | static void *init_bounce_page; |
41 | static unsigned long hyp_idmap_start; | 41 | static unsigned long hyp_idmap_start; |
42 | static unsigned long hyp_idmap_end; | 42 | static unsigned long hyp_idmap_end; |
43 | static phys_addr_t hyp_idmap_vector; | 43 | static phys_addr_t hyp_idmap_vector; |
44 | 44 | ||
45 | #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t)) | 45 | #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t)) |
46 | 46 | ||
47 | #define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x)) | 47 | #define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x)) |
48 | 48 | ||
49 | static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) | 49 | static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
50 | { | 50 | { |
51 | /* | 51 | /* |
52 | * This function also gets called when dealing with HYP page | 52 | * This function also gets called when dealing with HYP page |
53 | * tables. As HYP doesn't have an associated struct kvm (and | 53 | * tables. As HYP doesn't have an associated struct kvm (and |
54 | * the HYP page tables are fairly static), we don't do | 54 | * the HYP page tables are fairly static), we don't do |
55 | * anything there. | 55 | * anything there. |
56 | */ | 56 | */ |
57 | if (kvm) | 57 | if (kvm) |
58 | kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); | 58 | kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); |
59 | } | 59 | } |
60 | 60 | ||
61 | /* | ||
62 | * D-Cache management functions. They take the page table entries by | ||
63 | * value, as they are flushing the cache using the kernel mapping (or | ||
64 | * kmap on 32bit). | ||
65 | */ | ||
66 | static void kvm_flush_dcache_pte(pte_t pte) | ||
67 | { | ||
68 | __kvm_flush_dcache_pte(pte); | ||
69 | } | ||
70 | |||
71 | static void kvm_flush_dcache_pmd(pmd_t pmd) | ||
72 | { | ||
73 | __kvm_flush_dcache_pmd(pmd); | ||
74 | } | ||
75 | |||
76 | static void kvm_flush_dcache_pud(pud_t pud) | ||
77 | { | ||
78 | __kvm_flush_dcache_pud(pud); | ||
79 | } | ||
80 | |||
61 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, | 81 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, |
62 | int min, int max) | 82 | int min, int max) |
63 | { | 83 | { |
64 | void *page; | 84 | void *page; |
65 | 85 | ||
66 | BUG_ON(max > KVM_NR_MEM_OBJS); | 86 | BUG_ON(max > KVM_NR_MEM_OBJS); |
67 | if (cache->nobjs >= min) | 87 | if (cache->nobjs >= min) |
68 | return 0; | 88 | return 0; |
69 | while (cache->nobjs < max) { | 89 | while (cache->nobjs < max) { |
70 | page = (void *)__get_free_page(PGALLOC_GFP); | 90 | page = (void *)__get_free_page(PGALLOC_GFP); |
71 | if (!page) | 91 | if (!page) |
72 | return -ENOMEM; | 92 | return -ENOMEM; |
73 | cache->objects[cache->nobjs++] = page; | 93 | cache->objects[cache->nobjs++] = page; |
74 | } | 94 | } |
75 | return 0; | 95 | return 0; |
76 | } | 96 | } |
77 | 97 | ||
78 | static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) | 98 | static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) |
79 | { | 99 | { |
80 | while (mc->nobjs) | 100 | while (mc->nobjs) |
81 | free_page((unsigned long)mc->objects[--mc->nobjs]); | 101 | free_page((unsigned long)mc->objects[--mc->nobjs]); |
82 | } | 102 | } |
83 | 103 | ||
84 | static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) | 104 | static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) |
85 | { | 105 | { |
86 | void *p; | 106 | void *p; |
87 | 107 | ||
88 | BUG_ON(!mc || !mc->nobjs); | 108 | BUG_ON(!mc || !mc->nobjs); |
89 | p = mc->objects[--mc->nobjs]; | 109 | p = mc->objects[--mc->nobjs]; |
90 | return p; | 110 | return p; |
91 | } | 111 | } |
92 | 112 | ||
93 | static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr) | 113 | static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr) |
94 | { | 114 | { |
95 | pud_t *pud_table __maybe_unused = pud_offset(pgd, 0); | 115 | pud_t *pud_table __maybe_unused = pud_offset(pgd, 0); |
96 | pgd_clear(pgd); | 116 | pgd_clear(pgd); |
97 | kvm_tlb_flush_vmid_ipa(kvm, addr); | 117 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
98 | pud_free(NULL, pud_table); | 118 | pud_free(NULL, pud_table); |
99 | put_page(virt_to_page(pgd)); | 119 | put_page(virt_to_page(pgd)); |
100 | } | 120 | } |
101 | 121 | ||
102 | static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) | 122 | static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) |
103 | { | 123 | { |
104 | pmd_t *pmd_table = pmd_offset(pud, 0); | 124 | pmd_t *pmd_table = pmd_offset(pud, 0); |
105 | VM_BUG_ON(pud_huge(*pud)); | 125 | VM_BUG_ON(pud_huge(*pud)); |
106 | pud_clear(pud); | 126 | pud_clear(pud); |
107 | kvm_tlb_flush_vmid_ipa(kvm, addr); | 127 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
108 | pmd_free(NULL, pmd_table); | 128 | pmd_free(NULL, pmd_table); |
109 | put_page(virt_to_page(pud)); | 129 | put_page(virt_to_page(pud)); |
110 | } | 130 | } |
111 | 131 | ||
112 | static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) | 132 | static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) |
113 | { | 133 | { |
114 | pte_t *pte_table = pte_offset_kernel(pmd, 0); | 134 | pte_t *pte_table = pte_offset_kernel(pmd, 0); |
115 | VM_BUG_ON(kvm_pmd_huge(*pmd)); | 135 | VM_BUG_ON(kvm_pmd_huge(*pmd)); |
116 | pmd_clear(pmd); | 136 | pmd_clear(pmd); |
117 | kvm_tlb_flush_vmid_ipa(kvm, addr); | 137 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
118 | pte_free_kernel(NULL, pte_table); | 138 | pte_free_kernel(NULL, pte_table); |
119 | put_page(virt_to_page(pmd)); | 139 | put_page(virt_to_page(pmd)); |
120 | } | 140 | } |
121 | 141 | ||
142 | /* | ||
143 | * Unmapping vs dcache management: | ||
144 | * | ||
145 | * If a guest maps certain memory pages as uncached, all writes will | ||
146 | * bypass the data cache and go directly to RAM. However, the CPUs | ||
147 | * can still speculate reads (not writes) and fill cache lines with | ||
148 | * data. | ||
149 | * | ||
150 | * Those cache lines will be *clean* cache lines though, so a | ||
151 | * clean+invalidate operation is equivalent to an invalidate | ||
152 | * operation, because no cache lines are marked dirty. | ||
153 | * | ||
154 | * Those clean cache lines could be filled prior to an uncached write | ||
155 | * by the guest, and the cache coherent IO subsystem would therefore | ||
156 | * end up writing old data to disk. | ||
157 | * | ||
158 | * This is why right after unmapping a page/section and invalidating | ||
159 | * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure | ||
160 | * the IO subsystem will never hit in the cache. | ||
161 | */ | ||
122 | static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, | 162 | static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, |
123 | phys_addr_t addr, phys_addr_t end) | 163 | phys_addr_t addr, phys_addr_t end) |
124 | { | 164 | { |
125 | phys_addr_t start_addr = addr; | 165 | phys_addr_t start_addr = addr; |
126 | pte_t *pte, *start_pte; | 166 | pte_t *pte, *start_pte; |
127 | 167 | ||
128 | start_pte = pte = pte_offset_kernel(pmd, addr); | 168 | start_pte = pte = pte_offset_kernel(pmd, addr); |
129 | do { | 169 | do { |
130 | if (!pte_none(*pte)) { | 170 | if (!pte_none(*pte)) { |
171 | pte_t old_pte = *pte; | ||
172 | |||
131 | kvm_set_pte(pte, __pte(0)); | 173 | kvm_set_pte(pte, __pte(0)); |
132 | put_page(virt_to_page(pte)); | ||
133 | kvm_tlb_flush_vmid_ipa(kvm, addr); | 174 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
175 | |||
176 | /* No need to invalidate the cache for device mappings */ | ||
177 | if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) | ||
178 | kvm_flush_dcache_pte(old_pte); | ||
179 | |||
180 | put_page(virt_to_page(pte)); | ||
134 | } | 181 | } |
135 | } while (pte++, addr += PAGE_SIZE, addr != end); | 182 | } while (pte++, addr += PAGE_SIZE, addr != end); |
136 | 183 | ||
137 | if (kvm_pte_table_empty(kvm, start_pte)) | 184 | if (kvm_pte_table_empty(kvm, start_pte)) |
138 | clear_pmd_entry(kvm, pmd, start_addr); | 185 | clear_pmd_entry(kvm, pmd, start_addr); |
139 | } | 186 | } |
140 | 187 | ||
141 | static void unmap_pmds(struct kvm *kvm, pud_t *pud, | 188 | static void unmap_pmds(struct kvm *kvm, pud_t *pud, |
142 | phys_addr_t addr, phys_addr_t end) | 189 | phys_addr_t addr, phys_addr_t end) |
143 | { | 190 | { |
144 | phys_addr_t next, start_addr = addr; | 191 | phys_addr_t next, start_addr = addr; |
145 | pmd_t *pmd, *start_pmd; | 192 | pmd_t *pmd, *start_pmd; |
146 | 193 | ||
147 | start_pmd = pmd = pmd_offset(pud, addr); | 194 | start_pmd = pmd = pmd_offset(pud, addr); |
148 | do { | 195 | do { |
149 | next = kvm_pmd_addr_end(addr, end); | 196 | next = kvm_pmd_addr_end(addr, end); |
150 | if (!pmd_none(*pmd)) { | 197 | if (!pmd_none(*pmd)) { |
151 | if (kvm_pmd_huge(*pmd)) { | 198 | if (kvm_pmd_huge(*pmd)) { |
199 | pmd_t old_pmd = *pmd; | ||
200 | |||
152 | pmd_clear(pmd); | 201 | pmd_clear(pmd); |
153 | kvm_tlb_flush_vmid_ipa(kvm, addr); | 202 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
203 | |||
204 | kvm_flush_dcache_pmd(old_pmd); | ||
205 | |||
154 | put_page(virt_to_page(pmd)); | 206 | put_page(virt_to_page(pmd)); |
155 | } else { | 207 | } else { |
156 | unmap_ptes(kvm, pmd, addr, next); | 208 | unmap_ptes(kvm, pmd, addr, next); |
157 | } | 209 | } |
158 | } | 210 | } |
159 | } while (pmd++, addr = next, addr != end); | 211 | } while (pmd++, addr = next, addr != end); |
160 | 212 | ||
161 | if (kvm_pmd_table_empty(kvm, start_pmd)) | 213 | if (kvm_pmd_table_empty(kvm, start_pmd)) |
162 | clear_pud_entry(kvm, pud, start_addr); | 214 | clear_pud_entry(kvm, pud, start_addr); |
163 | } | 215 | } |
164 | 216 | ||
165 | static void unmap_puds(struct kvm *kvm, pgd_t *pgd, | 217 | static void unmap_puds(struct kvm *kvm, pgd_t *pgd, |
166 | phys_addr_t addr, phys_addr_t end) | 218 | phys_addr_t addr, phys_addr_t end) |
167 | { | 219 | { |
168 | phys_addr_t next, start_addr = addr; | 220 | phys_addr_t next, start_addr = addr; |
169 | pud_t *pud, *start_pud; | 221 | pud_t *pud, *start_pud; |
170 | 222 | ||
171 | start_pud = pud = pud_offset(pgd, addr); | 223 | start_pud = pud = pud_offset(pgd, addr); |
172 | do { | 224 | do { |
173 | next = kvm_pud_addr_end(addr, end); | 225 | next = kvm_pud_addr_end(addr, end); |
174 | if (!pud_none(*pud)) { | 226 | if (!pud_none(*pud)) { |
175 | if (pud_huge(*pud)) { | 227 | if (pud_huge(*pud)) { |
228 | pud_t old_pud = *pud; | ||
229 | |||
176 | pud_clear(pud); | 230 | pud_clear(pud); |
177 | kvm_tlb_flush_vmid_ipa(kvm, addr); | 231 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
232 | |||
233 | kvm_flush_dcache_pud(old_pud); | ||
234 | |||
178 | put_page(virt_to_page(pud)); | 235 | put_page(virt_to_page(pud)); |
179 | } else { | 236 | } else { |
180 | unmap_pmds(kvm, pud, addr, next); | 237 | unmap_pmds(kvm, pud, addr, next); |
181 | } | 238 | } |
182 | } | 239 | } |
183 | } while (pud++, addr = next, addr != end); | 240 | } while (pud++, addr = next, addr != end); |
184 | 241 | ||
185 | if (kvm_pud_table_empty(kvm, start_pud)) | 242 | if (kvm_pud_table_empty(kvm, start_pud)) |
186 | clear_pgd_entry(kvm, pgd, start_addr); | 243 | clear_pgd_entry(kvm, pgd, start_addr); |
187 | } | 244 | } |
188 | 245 | ||
189 | 246 | ||
190 | static void unmap_range(struct kvm *kvm, pgd_t *pgdp, | 247 | static void unmap_range(struct kvm *kvm, pgd_t *pgdp, |
191 | phys_addr_t start, u64 size) | 248 | phys_addr_t start, u64 size) |
192 | { | 249 | { |
193 | pgd_t *pgd; | 250 | pgd_t *pgd; |
194 | phys_addr_t addr = start, end = start + size; | 251 | phys_addr_t addr = start, end = start + size; |
195 | phys_addr_t next; | 252 | phys_addr_t next; |
196 | 253 | ||
197 | pgd = pgdp + pgd_index(addr); | 254 | pgd = pgdp + pgd_index(addr); |
198 | do { | 255 | do { |
199 | next = kvm_pgd_addr_end(addr, end); | 256 | next = kvm_pgd_addr_end(addr, end); |
200 | if (!pgd_none(*pgd)) | 257 | if (!pgd_none(*pgd)) |
201 | unmap_puds(kvm, pgd, addr, next); | 258 | unmap_puds(kvm, pgd, addr, next); |
202 | } while (pgd++, addr = next, addr != end); | 259 | } while (pgd++, addr = next, addr != end); |
203 | } | 260 | } |
204 | 261 | ||
205 | static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, | 262 | static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, |
206 | phys_addr_t addr, phys_addr_t end) | 263 | phys_addr_t addr, phys_addr_t end) |
207 | { | 264 | { |
208 | pte_t *pte; | 265 | pte_t *pte; |
209 | 266 | ||
210 | pte = pte_offset_kernel(pmd, addr); | 267 | pte = pte_offset_kernel(pmd, addr); |
211 | do { | 268 | do { |
212 | if (!pte_none(*pte)) { | 269 | if (!pte_none(*pte) && |
213 | hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); | 270 | (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) |
214 | kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE); | 271 | kvm_flush_dcache_pte(*pte); |
215 | } | ||
216 | } while (pte++, addr += PAGE_SIZE, addr != end); | 272 | } while (pte++, addr += PAGE_SIZE, addr != end); |
217 | } | 273 | } |
218 | 274 | ||
219 | static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud, | 275 | static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud, |
220 | phys_addr_t addr, phys_addr_t end) | 276 | phys_addr_t addr, phys_addr_t end) |
221 | { | 277 | { |
222 | pmd_t *pmd; | 278 | pmd_t *pmd; |
223 | phys_addr_t next; | 279 | phys_addr_t next; |
224 | 280 | ||
225 | pmd = pmd_offset(pud, addr); | 281 | pmd = pmd_offset(pud, addr); |
226 | do { | 282 | do { |
227 | next = kvm_pmd_addr_end(addr, end); | 283 | next = kvm_pmd_addr_end(addr, end); |
228 | if (!pmd_none(*pmd)) { | 284 | if (!pmd_none(*pmd)) { |
229 | if (kvm_pmd_huge(*pmd)) { | 285 | if (kvm_pmd_huge(*pmd)) |
230 | hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); | 286 | kvm_flush_dcache_pmd(*pmd); |
231 | kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE); | 287 | else |
232 | } else { | ||
233 | stage2_flush_ptes(kvm, pmd, addr, next); | 288 | stage2_flush_ptes(kvm, pmd, addr, next); |
234 | } | ||
235 | } | 289 | } |
236 | } while (pmd++, addr = next, addr != end); | 290 | } while (pmd++, addr = next, addr != end); |
237 | } | 291 | } |
238 | 292 | ||
239 | static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd, | 293 | static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd, |
240 | phys_addr_t addr, phys_addr_t end) | 294 | phys_addr_t addr, phys_addr_t end) |
241 | { | 295 | { |
242 | pud_t *pud; | 296 | pud_t *pud; |
243 | phys_addr_t next; | 297 | phys_addr_t next; |
244 | 298 | ||
245 | pud = pud_offset(pgd, addr); | 299 | pud = pud_offset(pgd, addr); |
246 | do { | 300 | do { |
247 | next = kvm_pud_addr_end(addr, end); | 301 | next = kvm_pud_addr_end(addr, end); |
248 | if (!pud_none(*pud)) { | 302 | if (!pud_none(*pud)) { |
249 | if (pud_huge(*pud)) { | 303 | if (pud_huge(*pud)) |
250 | hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); | 304 | kvm_flush_dcache_pud(*pud); |
251 | kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE); | 305 | else |
252 | } else { | ||
253 | stage2_flush_pmds(kvm, pud, addr, next); | 306 | stage2_flush_pmds(kvm, pud, addr, next); |
254 | } | ||
255 | } | 307 | } |
256 | } while (pud++, addr = next, addr != end); | 308 | } while (pud++, addr = next, addr != end); |
257 | } | 309 | } |
258 | 310 | ||
259 | static void stage2_flush_memslot(struct kvm *kvm, | 311 | static void stage2_flush_memslot(struct kvm *kvm, |
260 | struct kvm_memory_slot *memslot) | 312 | struct kvm_memory_slot *memslot) |
261 | { | 313 | { |
262 | phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; | 314 | phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; |
263 | phys_addr_t end = addr + PAGE_SIZE * memslot->npages; | 315 | phys_addr_t end = addr + PAGE_SIZE * memslot->npages; |
264 | phys_addr_t next; | 316 | phys_addr_t next; |
265 | pgd_t *pgd; | 317 | pgd_t *pgd; |
266 | 318 | ||
267 | pgd = kvm->arch.pgd + pgd_index(addr); | 319 | pgd = kvm->arch.pgd + pgd_index(addr); |
268 | do { | 320 | do { |
269 | next = kvm_pgd_addr_end(addr, end); | 321 | next = kvm_pgd_addr_end(addr, end); |
270 | stage2_flush_puds(kvm, pgd, addr, next); | 322 | stage2_flush_puds(kvm, pgd, addr, next); |
271 | } while (pgd++, addr = next, addr != end); | 323 | } while (pgd++, addr = next, addr != end); |
272 | } | 324 | } |
273 | 325 | ||
274 | /** | 326 | /** |
275 | * stage2_flush_vm - Invalidate cache for pages mapped in stage 2 | 327 | * stage2_flush_vm - Invalidate cache for pages mapped in stage 2 |
276 | * @kvm: The struct kvm pointer | 328 | * @kvm: The struct kvm pointer |
277 | * | 329 | * |
278 | * Go through the stage 2 page tables and invalidate any cache lines | 330 | * Go through the stage 2 page tables and invalidate any cache lines |
279 | * backing memory already mapped to the VM. | 331 | * backing memory already mapped to the VM. |
280 | */ | 332 | */ |
281 | void stage2_flush_vm(struct kvm *kvm) | 333 | static void stage2_flush_vm(struct kvm *kvm) |
282 | { | 334 | { |
283 | struct kvm_memslots *slots; | 335 | struct kvm_memslots *slots; |
284 | struct kvm_memory_slot *memslot; | 336 | struct kvm_memory_slot *memslot; |
285 | int idx; | 337 | int idx; |
286 | 338 | ||
287 | idx = srcu_read_lock(&kvm->srcu); | 339 | idx = srcu_read_lock(&kvm->srcu); |
288 | spin_lock(&kvm->mmu_lock); | 340 | spin_lock(&kvm->mmu_lock); |
289 | 341 | ||
290 | slots = kvm_memslots(kvm); | 342 | slots = kvm_memslots(kvm); |
291 | kvm_for_each_memslot(memslot, slots) | 343 | kvm_for_each_memslot(memslot, slots) |
292 | stage2_flush_memslot(kvm, memslot); | 344 | stage2_flush_memslot(kvm, memslot); |
293 | 345 | ||
294 | spin_unlock(&kvm->mmu_lock); | 346 | spin_unlock(&kvm->mmu_lock); |
295 | srcu_read_unlock(&kvm->srcu, idx); | 347 | srcu_read_unlock(&kvm->srcu, idx); |
296 | } | 348 | } |
297 | 349 | ||
298 | /** | 350 | /** |
299 | * free_boot_hyp_pgd - free HYP boot page tables | 351 | * free_boot_hyp_pgd - free HYP boot page tables |
300 | * | 352 | * |
301 | * Free the HYP boot page tables. The bounce page is also freed. | 353 | * Free the HYP boot page tables. The bounce page is also freed. |
302 | */ | 354 | */ |
303 | void free_boot_hyp_pgd(void) | 355 | void free_boot_hyp_pgd(void) |
304 | { | 356 | { |
305 | mutex_lock(&kvm_hyp_pgd_mutex); | 357 | mutex_lock(&kvm_hyp_pgd_mutex); |
306 | 358 | ||
307 | if (boot_hyp_pgd) { | 359 | if (boot_hyp_pgd) { |
308 | unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); | 360 | unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); |
309 | unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); | 361 | unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); |
310 | free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order); | 362 | free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order); |
311 | boot_hyp_pgd = NULL; | 363 | boot_hyp_pgd = NULL; |
312 | } | 364 | } |
313 | 365 | ||
314 | if (hyp_pgd) | 366 | if (hyp_pgd) |
315 | unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); | 367 | unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); |
316 | 368 | ||
317 | free_page((unsigned long)init_bounce_page); | 369 | free_page((unsigned long)init_bounce_page); |
318 | init_bounce_page = NULL; | 370 | init_bounce_page = NULL; |
319 | 371 | ||
320 | mutex_unlock(&kvm_hyp_pgd_mutex); | 372 | mutex_unlock(&kvm_hyp_pgd_mutex); |
321 | } | 373 | } |
322 | 374 | ||
323 | /** | 375 | /** |
324 | * free_hyp_pgds - free Hyp-mode page tables | 376 | * free_hyp_pgds - free Hyp-mode page tables |
325 | * | 377 | * |
326 | * Assumes hyp_pgd is a page table used strictly in Hyp-mode and | 378 | * Assumes hyp_pgd is a page table used strictly in Hyp-mode and |
327 | * therefore contains either mappings in the kernel memory area (above | 379 | * therefore contains either mappings in the kernel memory area (above |
328 | * PAGE_OFFSET), or device mappings in the vmalloc range (from | 380 | * PAGE_OFFSET), or device mappings in the vmalloc range (from |
329 | * VMALLOC_START to VMALLOC_END). | 381 | * VMALLOC_START to VMALLOC_END). |
330 | * | 382 | * |
331 | * boot_hyp_pgd should only map two pages for the init code. | 383 | * boot_hyp_pgd should only map two pages for the init code. |
332 | */ | 384 | */ |
333 | void free_hyp_pgds(void) | 385 | void free_hyp_pgds(void) |
334 | { | 386 | { |
335 | unsigned long addr; | 387 | unsigned long addr; |
336 | 388 | ||
337 | free_boot_hyp_pgd(); | 389 | free_boot_hyp_pgd(); |
338 | 390 | ||
339 | mutex_lock(&kvm_hyp_pgd_mutex); | 391 | mutex_lock(&kvm_hyp_pgd_mutex); |
340 | 392 | ||
341 | if (hyp_pgd) { | 393 | if (hyp_pgd) { |
342 | for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) | 394 | for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) |
343 | unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); | 395 | unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); |
344 | for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) | 396 | for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) |
345 | unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); | 397 | unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); |
346 | 398 | ||
347 | free_pages((unsigned long)hyp_pgd, hyp_pgd_order); | 399 | free_pages((unsigned long)hyp_pgd, hyp_pgd_order); |
348 | hyp_pgd = NULL; | 400 | hyp_pgd = NULL; |
349 | } | 401 | } |
350 | 402 | ||
351 | mutex_unlock(&kvm_hyp_pgd_mutex); | 403 | mutex_unlock(&kvm_hyp_pgd_mutex); |
352 | } | 404 | } |
353 | 405 | ||
354 | static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, | 406 | static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, |
355 | unsigned long end, unsigned long pfn, | 407 | unsigned long end, unsigned long pfn, |
356 | pgprot_t prot) | 408 | pgprot_t prot) |
357 | { | 409 | { |
358 | pte_t *pte; | 410 | pte_t *pte; |
359 | unsigned long addr; | 411 | unsigned long addr; |
360 | 412 | ||
361 | addr = start; | 413 | addr = start; |
362 | do { | 414 | do { |
363 | pte = pte_offset_kernel(pmd, addr); | 415 | pte = pte_offset_kernel(pmd, addr); |
364 | kvm_set_pte(pte, pfn_pte(pfn, prot)); | 416 | kvm_set_pte(pte, pfn_pte(pfn, prot)); |
365 | get_page(virt_to_page(pte)); | 417 | get_page(virt_to_page(pte)); |
366 | kvm_flush_dcache_to_poc(pte, sizeof(*pte)); | 418 | kvm_flush_dcache_to_poc(pte, sizeof(*pte)); |
367 | pfn++; | 419 | pfn++; |
368 | } while (addr += PAGE_SIZE, addr != end); | 420 | } while (addr += PAGE_SIZE, addr != end); |
369 | } | 421 | } |
370 | 422 | ||
371 | static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, | 423 | static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, |
372 | unsigned long end, unsigned long pfn, | 424 | unsigned long end, unsigned long pfn, |
373 | pgprot_t prot) | 425 | pgprot_t prot) |
374 | { | 426 | { |
375 | pmd_t *pmd; | 427 | pmd_t *pmd; |
376 | pte_t *pte; | 428 | pte_t *pte; |
377 | unsigned long addr, next; | 429 | unsigned long addr, next; |
378 | 430 | ||
379 | addr = start; | 431 | addr = start; |
380 | do { | 432 | do { |
381 | pmd = pmd_offset(pud, addr); | 433 | pmd = pmd_offset(pud, addr); |
382 | 434 | ||
383 | BUG_ON(pmd_sect(*pmd)); | 435 | BUG_ON(pmd_sect(*pmd)); |
384 | 436 | ||
385 | if (pmd_none(*pmd)) { | 437 | if (pmd_none(*pmd)) { |
386 | pte = pte_alloc_one_kernel(NULL, addr); | 438 | pte = pte_alloc_one_kernel(NULL, addr); |
387 | if (!pte) { | 439 | if (!pte) { |
388 | kvm_err("Cannot allocate Hyp pte\n"); | 440 | kvm_err("Cannot allocate Hyp pte\n"); |
389 | return -ENOMEM; | 441 | return -ENOMEM; |
390 | } | 442 | } |
391 | pmd_populate_kernel(NULL, pmd, pte); | 443 | pmd_populate_kernel(NULL, pmd, pte); |
392 | get_page(virt_to_page(pmd)); | 444 | get_page(virt_to_page(pmd)); |
393 | kvm_flush_dcache_to_poc(pmd, sizeof(*pmd)); | 445 | kvm_flush_dcache_to_poc(pmd, sizeof(*pmd)); |
394 | } | 446 | } |
395 | 447 | ||
396 | next = pmd_addr_end(addr, end); | 448 | next = pmd_addr_end(addr, end); |
397 | 449 | ||
398 | create_hyp_pte_mappings(pmd, addr, next, pfn, prot); | 450 | create_hyp_pte_mappings(pmd, addr, next, pfn, prot); |
399 | pfn += (next - addr) >> PAGE_SHIFT; | 451 | pfn += (next - addr) >> PAGE_SHIFT; |
400 | } while (addr = next, addr != end); | 452 | } while (addr = next, addr != end); |
401 | 453 | ||
402 | return 0; | 454 | return 0; |
403 | } | 455 | } |
404 | 456 | ||
405 | static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start, | 457 | static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start, |
406 | unsigned long end, unsigned long pfn, | 458 | unsigned long end, unsigned long pfn, |
407 | pgprot_t prot) | 459 | pgprot_t prot) |
408 | { | 460 | { |
409 | pud_t *pud; | 461 | pud_t *pud; |
410 | pmd_t *pmd; | 462 | pmd_t *pmd; |
411 | unsigned long addr, next; | 463 | unsigned long addr, next; |
412 | int ret; | 464 | int ret; |
413 | 465 | ||
414 | addr = start; | 466 | addr = start; |
415 | do { | 467 | do { |
416 | pud = pud_offset(pgd, addr); | 468 | pud = pud_offset(pgd, addr); |
417 | 469 | ||
418 | if (pud_none_or_clear_bad(pud)) { | 470 | if (pud_none_or_clear_bad(pud)) { |
419 | pmd = pmd_alloc_one(NULL, addr); | 471 | pmd = pmd_alloc_one(NULL, addr); |
420 | if (!pmd) { | 472 | if (!pmd) { |
421 | kvm_err("Cannot allocate Hyp pmd\n"); | 473 | kvm_err("Cannot allocate Hyp pmd\n"); |
422 | return -ENOMEM; | 474 | return -ENOMEM; |
423 | } | 475 | } |
424 | pud_populate(NULL, pud, pmd); | 476 | pud_populate(NULL, pud, pmd); |
425 | get_page(virt_to_page(pud)); | 477 | get_page(virt_to_page(pud)); |
426 | kvm_flush_dcache_to_poc(pud, sizeof(*pud)); | 478 | kvm_flush_dcache_to_poc(pud, sizeof(*pud)); |
427 | } | 479 | } |
428 | 480 | ||
429 | next = pud_addr_end(addr, end); | 481 | next = pud_addr_end(addr, end); |
430 | ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot); | 482 | ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot); |
431 | if (ret) | 483 | if (ret) |
432 | return ret; | 484 | return ret; |
433 | pfn += (next - addr) >> PAGE_SHIFT; | 485 | pfn += (next - addr) >> PAGE_SHIFT; |
434 | } while (addr = next, addr != end); | 486 | } while (addr = next, addr != end); |
435 | 487 | ||
436 | return 0; | 488 | return 0; |
437 | } | 489 | } |
438 | 490 | ||
439 | static int __create_hyp_mappings(pgd_t *pgdp, | 491 | static int __create_hyp_mappings(pgd_t *pgdp, |
440 | unsigned long start, unsigned long end, | 492 | unsigned long start, unsigned long end, |
441 | unsigned long pfn, pgprot_t prot) | 493 | unsigned long pfn, pgprot_t prot) |
442 | { | 494 | { |
443 | pgd_t *pgd; | 495 | pgd_t *pgd; |
444 | pud_t *pud; | 496 | pud_t *pud; |
445 | unsigned long addr, next; | 497 | unsigned long addr, next; |
446 | int err = 0; | 498 | int err = 0; |
447 | 499 | ||
448 | mutex_lock(&kvm_hyp_pgd_mutex); | 500 | mutex_lock(&kvm_hyp_pgd_mutex); |
449 | addr = start & PAGE_MASK; | 501 | addr = start & PAGE_MASK; |
450 | end = PAGE_ALIGN(end); | 502 | end = PAGE_ALIGN(end); |
451 | do { | 503 | do { |
452 | pgd = pgdp + pgd_index(addr); | 504 | pgd = pgdp + pgd_index(addr); |
453 | 505 | ||
454 | if (pgd_none(*pgd)) { | 506 | if (pgd_none(*pgd)) { |
455 | pud = pud_alloc_one(NULL, addr); | 507 | pud = pud_alloc_one(NULL, addr); |
456 | if (!pud) { | 508 | if (!pud) { |
457 | kvm_err("Cannot allocate Hyp pud\n"); | 509 | kvm_err("Cannot allocate Hyp pud\n"); |
458 | err = -ENOMEM; | 510 | err = -ENOMEM; |
459 | goto out; | 511 | goto out; |
460 | } | 512 | } |
461 | pgd_populate(NULL, pgd, pud); | 513 | pgd_populate(NULL, pgd, pud); |
462 | get_page(virt_to_page(pgd)); | 514 | get_page(virt_to_page(pgd)); |
463 | kvm_flush_dcache_to_poc(pgd, sizeof(*pgd)); | 515 | kvm_flush_dcache_to_poc(pgd, sizeof(*pgd)); |
464 | } | 516 | } |
465 | 517 | ||
466 | next = pgd_addr_end(addr, end); | 518 | next = pgd_addr_end(addr, end); |
467 | err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot); | 519 | err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot); |
468 | if (err) | 520 | if (err) |
469 | goto out; | 521 | goto out; |
470 | pfn += (next - addr) >> PAGE_SHIFT; | 522 | pfn += (next - addr) >> PAGE_SHIFT; |
471 | } while (addr = next, addr != end); | 523 | } while (addr = next, addr != end); |
472 | out: | 524 | out: |
473 | mutex_unlock(&kvm_hyp_pgd_mutex); | 525 | mutex_unlock(&kvm_hyp_pgd_mutex); |
474 | return err; | 526 | return err; |
475 | } | 527 | } |
476 | 528 | ||
477 | static phys_addr_t kvm_kaddr_to_phys(void *kaddr) | 529 | static phys_addr_t kvm_kaddr_to_phys(void *kaddr) |
478 | { | 530 | { |
479 | if (!is_vmalloc_addr(kaddr)) { | 531 | if (!is_vmalloc_addr(kaddr)) { |
480 | BUG_ON(!virt_addr_valid(kaddr)); | 532 | BUG_ON(!virt_addr_valid(kaddr)); |
481 | return __pa(kaddr); | 533 | return __pa(kaddr); |
482 | } else { | 534 | } else { |
483 | return page_to_phys(vmalloc_to_page(kaddr)) + | 535 | return page_to_phys(vmalloc_to_page(kaddr)) + |
484 | offset_in_page(kaddr); | 536 | offset_in_page(kaddr); |
485 | } | 537 | } |
486 | } | 538 | } |
487 | 539 | ||
488 | /** | 540 | /** |
489 | * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode | 541 | * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode |
490 | * @from: The virtual kernel start address of the range | 542 | * @from: The virtual kernel start address of the range |
491 | * @to: The virtual kernel end address of the range (exclusive) | 543 | * @to: The virtual kernel end address of the range (exclusive) |
492 | * | 544 | * |
493 | * The same virtual address as the kernel virtual address is also used | 545 | * The same virtual address as the kernel virtual address is also used |
494 | * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying | 546 | * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying |
495 | * physical pages. | 547 | * physical pages. |
496 | */ | 548 | */ |
497 | int create_hyp_mappings(void *from, void *to) | 549 | int create_hyp_mappings(void *from, void *to) |
498 | { | 550 | { |
499 | phys_addr_t phys_addr; | 551 | phys_addr_t phys_addr; |
500 | unsigned long virt_addr; | 552 | unsigned long virt_addr; |
501 | unsigned long start = KERN_TO_HYP((unsigned long)from); | 553 | unsigned long start = KERN_TO_HYP((unsigned long)from); |
502 | unsigned long end = KERN_TO_HYP((unsigned long)to); | 554 | unsigned long end = KERN_TO_HYP((unsigned long)to); |
503 | 555 | ||
504 | start = start & PAGE_MASK; | 556 | start = start & PAGE_MASK; |
505 | end = PAGE_ALIGN(end); | 557 | end = PAGE_ALIGN(end); |
506 | 558 | ||
507 | for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) { | 559 | for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) { |
508 | int err; | 560 | int err; |
509 | 561 | ||
510 | phys_addr = kvm_kaddr_to_phys(from + virt_addr - start); | 562 | phys_addr = kvm_kaddr_to_phys(from + virt_addr - start); |
511 | err = __create_hyp_mappings(hyp_pgd, virt_addr, | 563 | err = __create_hyp_mappings(hyp_pgd, virt_addr, |
512 | virt_addr + PAGE_SIZE, | 564 | virt_addr + PAGE_SIZE, |
513 | __phys_to_pfn(phys_addr), | 565 | __phys_to_pfn(phys_addr), |
514 | PAGE_HYP); | 566 | PAGE_HYP); |
515 | if (err) | 567 | if (err) |
516 | return err; | 568 | return err; |
517 | } | 569 | } |
518 | 570 | ||
519 | return 0; | 571 | return 0; |
520 | } | 572 | } |
521 | 573 | ||
522 | /** | 574 | /** |
523 | * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode | 575 | * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode |
524 | * @from: The kernel start VA of the range | 576 | * @from: The kernel start VA of the range |
525 | * @to: The kernel end VA of the range (exclusive) | 577 | * @to: The kernel end VA of the range (exclusive) |
526 | * @phys_addr: The physical start address which gets mapped | 578 | * @phys_addr: The physical start address which gets mapped |
527 | * | 579 | * |
528 | * The resulting HYP VA is the same as the kernel VA, modulo | 580 | * The resulting HYP VA is the same as the kernel VA, modulo |
529 | * HYP_PAGE_OFFSET. | 581 | * HYP_PAGE_OFFSET. |
530 | */ | 582 | */ |
531 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) | 583 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) |
532 | { | 584 | { |
533 | unsigned long start = KERN_TO_HYP((unsigned long)from); | 585 | unsigned long start = KERN_TO_HYP((unsigned long)from); |
534 | unsigned long end = KERN_TO_HYP((unsigned long)to); | 586 | unsigned long end = KERN_TO_HYP((unsigned long)to); |
535 | 587 | ||
536 | /* Check for a valid kernel IO mapping */ | 588 | /* Check for a valid kernel IO mapping */ |
537 | if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1)) | 589 | if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1)) |
538 | return -EINVAL; | 590 | return -EINVAL; |
539 | 591 | ||
540 | return __create_hyp_mappings(hyp_pgd, start, end, | 592 | return __create_hyp_mappings(hyp_pgd, start, end, |
541 | __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); | 593 | __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); |
542 | } | 594 | } |
543 | 595 | ||
544 | /** | 596 | /** |
545 | * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. | 597 | * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. |
546 | * @kvm: The KVM struct pointer for the VM. | 598 | * @kvm: The KVM struct pointer for the VM. |
547 | * | 599 | * |
548 | * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can | 600 | * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can |
549 | * support either full 40-bit input addresses or limited to 32-bit input | 601 | * support either full 40-bit input addresses or limited to 32-bit input |
550 | * addresses). Clears the allocated pages. | 602 | * addresses). Clears the allocated pages. |
551 | * | 603 | * |
552 | * Note we don't need locking here as this is only called when the VM is | 604 | * Note we don't need locking here as this is only called when the VM is |
553 | * created, which can only be done once. | 605 | * created, which can only be done once. |
554 | */ | 606 | */ |
555 | int kvm_alloc_stage2_pgd(struct kvm *kvm) | 607 | int kvm_alloc_stage2_pgd(struct kvm *kvm) |
556 | { | 608 | { |
557 | int ret; | 609 | int ret; |
558 | pgd_t *pgd; | 610 | pgd_t *pgd; |
559 | 611 | ||
560 | if (kvm->arch.pgd != NULL) { | 612 | if (kvm->arch.pgd != NULL) { |
561 | kvm_err("kvm_arch already initialized?\n"); | 613 | kvm_err("kvm_arch already initialized?\n"); |
562 | return -EINVAL; | 614 | return -EINVAL; |
563 | } | 615 | } |
564 | 616 | ||
565 | if (KVM_PREALLOC_LEVEL > 0) { | 617 | if (KVM_PREALLOC_LEVEL > 0) { |
566 | /* | 618 | /* |
567 | * Allocate fake pgd for the page table manipulation macros to | 619 | * Allocate fake pgd for the page table manipulation macros to |
568 | * work. This is not used by the hardware and we have no | 620 | * work. This is not used by the hardware and we have no |
569 | * alignment requirement for this allocation. | 621 | * alignment requirement for this allocation. |
570 | */ | 622 | */ |
571 | pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t), | 623 | pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t), |
572 | GFP_KERNEL | __GFP_ZERO); | 624 | GFP_KERNEL | __GFP_ZERO); |
573 | } else { | 625 | } else { |
574 | /* | 626 | /* |
575 | * Allocate actual first-level Stage-2 page table used by the | 627 | * Allocate actual first-level Stage-2 page table used by the |
576 | * hardware for Stage-2 page table walks. | 628 | * hardware for Stage-2 page table walks. |
577 | */ | 629 | */ |
578 | pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER); | 630 | pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER); |
579 | } | 631 | } |
580 | 632 | ||
581 | if (!pgd) | 633 | if (!pgd) |
582 | return -ENOMEM; | 634 | return -ENOMEM; |
583 | 635 | ||
584 | ret = kvm_prealloc_hwpgd(kvm, pgd); | 636 | ret = kvm_prealloc_hwpgd(kvm, pgd); |
585 | if (ret) | 637 | if (ret) |
586 | goto out_err; | 638 | goto out_err; |
587 | 639 | ||
588 | kvm_clean_pgd(pgd); | 640 | kvm_clean_pgd(pgd); |
589 | kvm->arch.pgd = pgd; | 641 | kvm->arch.pgd = pgd; |
590 | return 0; | 642 | return 0; |
591 | out_err: | 643 | out_err: |
592 | if (KVM_PREALLOC_LEVEL > 0) | 644 | if (KVM_PREALLOC_LEVEL > 0) |
593 | kfree(pgd); | 645 | kfree(pgd); |
594 | else | 646 | else |
595 | free_pages((unsigned long)pgd, S2_PGD_ORDER); | 647 | free_pages((unsigned long)pgd, S2_PGD_ORDER); |
596 | return ret; | 648 | return ret; |
597 | } | 649 | } |
598 | 650 | ||
599 | /** | 651 | /** |
600 | * unmap_stage2_range -- Clear stage2 page table entries to unmap a range | 652 | * unmap_stage2_range -- Clear stage2 page table entries to unmap a range |
601 | * @kvm: The VM pointer | 653 | * @kvm: The VM pointer |
602 | * @start: The intermediate physical base address of the range to unmap | 654 | * @start: The intermediate physical base address of the range to unmap |
603 | * @size: The size of the area to unmap | 655 | * @size: The size of the area to unmap |
604 | * | 656 | * |
605 | * Clear a range of stage-2 mappings, lowering the various ref-counts. Must | 657 | * Clear a range of stage-2 mappings, lowering the various ref-counts. Must |
606 | * be called while holding mmu_lock (unless for freeing the stage2 pgd before | 658 | * be called while holding mmu_lock (unless for freeing the stage2 pgd before |
607 | * destroying the VM), otherwise another faulting VCPU may come in and mess | 659 | * destroying the VM), otherwise another faulting VCPU may come in and mess |
608 | * with things behind our backs. | 660 | * with things behind our backs. |
609 | */ | 661 | */ |
610 | static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) | 662 | static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) |
611 | { | 663 | { |
612 | unmap_range(kvm, kvm->arch.pgd, start, size); | 664 | unmap_range(kvm, kvm->arch.pgd, start, size); |
613 | } | 665 | } |
614 | 666 | ||
615 | static void stage2_unmap_memslot(struct kvm *kvm, | 667 | static void stage2_unmap_memslot(struct kvm *kvm, |
616 | struct kvm_memory_slot *memslot) | 668 | struct kvm_memory_slot *memslot) |
617 | { | 669 | { |
618 | hva_t hva = memslot->userspace_addr; | 670 | hva_t hva = memslot->userspace_addr; |
619 | phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; | 671 | phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; |
620 | phys_addr_t size = PAGE_SIZE * memslot->npages; | 672 | phys_addr_t size = PAGE_SIZE * memslot->npages; |
621 | hva_t reg_end = hva + size; | 673 | hva_t reg_end = hva + size; |
622 | 674 | ||
623 | /* | 675 | /* |
624 | * A memory region could potentially cover multiple VMAs, and any holes | 676 | * A memory region could potentially cover multiple VMAs, and any holes |
625 | * between them, so iterate over all of them to find out if we should | 677 | * between them, so iterate over all of them to find out if we should |
626 | * unmap any of them. | 678 | * unmap any of them. |
627 | * | 679 | * |
628 | * +--------------------------------------------+ | 680 | * +--------------------------------------------+ |
629 | * +---------------+----------------+ +----------------+ | 681 | * +---------------+----------------+ +----------------+ |
630 | * | : VMA 1 | VMA 2 | | VMA 3 : | | 682 | * | : VMA 1 | VMA 2 | | VMA 3 : | |
631 | * +---------------+----------------+ +----------------+ | 683 | * +---------------+----------------+ +----------------+ |
632 | * | memory region | | 684 | * | memory region | |
633 | * +--------------------------------------------+ | 685 | * +--------------------------------------------+ |
634 | */ | 686 | */ |
635 | do { | 687 | do { |
636 | struct vm_area_struct *vma = find_vma(current->mm, hva); | 688 | struct vm_area_struct *vma = find_vma(current->mm, hva); |
637 | hva_t vm_start, vm_end; | 689 | hva_t vm_start, vm_end; |
638 | 690 | ||
639 | if (!vma || vma->vm_start >= reg_end) | 691 | if (!vma || vma->vm_start >= reg_end) |
640 | break; | 692 | break; |
641 | 693 | ||
642 | /* | 694 | /* |
643 | * Take the intersection of this VMA with the memory region | 695 | * Take the intersection of this VMA with the memory region |
644 | */ | 696 | */ |
645 | vm_start = max(hva, vma->vm_start); | 697 | vm_start = max(hva, vma->vm_start); |
646 | vm_end = min(reg_end, vma->vm_end); | 698 | vm_end = min(reg_end, vma->vm_end); |
647 | 699 | ||
648 | if (!(vma->vm_flags & VM_PFNMAP)) { | 700 | if (!(vma->vm_flags & VM_PFNMAP)) { |
649 | gpa_t gpa = addr + (vm_start - memslot->userspace_addr); | 701 | gpa_t gpa = addr + (vm_start - memslot->userspace_addr); |
650 | unmap_stage2_range(kvm, gpa, vm_end - vm_start); | 702 | unmap_stage2_range(kvm, gpa, vm_end - vm_start); |
651 | } | 703 | } |
652 | hva = vm_end; | 704 | hva = vm_end; |
653 | } while (hva < reg_end); | 705 | } while (hva < reg_end); |
654 | } | 706 | } |
655 | 707 | ||
656 | /** | 708 | /** |
657 | * stage2_unmap_vm - Unmap Stage-2 RAM mappings | 709 | * stage2_unmap_vm - Unmap Stage-2 RAM mappings |
658 | * @kvm: The struct kvm pointer | 710 | * @kvm: The struct kvm pointer |
659 | * | 711 | * |
660 | * Go through the memregions and unmap any reguler RAM | 712 | * Go through the memregions and unmap any reguler RAM |
661 | * backing memory already mapped to the VM. | 713 | * backing memory already mapped to the VM. |
662 | */ | 714 | */ |
663 | void stage2_unmap_vm(struct kvm *kvm) | 715 | void stage2_unmap_vm(struct kvm *kvm) |
664 | { | 716 | { |
665 | struct kvm_memslots *slots; | 717 | struct kvm_memslots *slots; |
666 | struct kvm_memory_slot *memslot; | 718 | struct kvm_memory_slot *memslot; |
667 | int idx; | 719 | int idx; |
668 | 720 | ||
669 | idx = srcu_read_lock(&kvm->srcu); | 721 | idx = srcu_read_lock(&kvm->srcu); |
670 | spin_lock(&kvm->mmu_lock); | 722 | spin_lock(&kvm->mmu_lock); |
671 | 723 | ||
672 | slots = kvm_memslots(kvm); | 724 | slots = kvm_memslots(kvm); |
673 | kvm_for_each_memslot(memslot, slots) | 725 | kvm_for_each_memslot(memslot, slots) |
674 | stage2_unmap_memslot(kvm, memslot); | 726 | stage2_unmap_memslot(kvm, memslot); |
675 | 727 | ||
676 | spin_unlock(&kvm->mmu_lock); | 728 | spin_unlock(&kvm->mmu_lock); |
677 | srcu_read_unlock(&kvm->srcu, idx); | 729 | srcu_read_unlock(&kvm->srcu, idx); |
678 | } | 730 | } |
679 | 731 | ||
680 | /** | 732 | /** |
681 | * kvm_free_stage2_pgd - free all stage-2 tables | 733 | * kvm_free_stage2_pgd - free all stage-2 tables |
682 | * @kvm: The KVM struct pointer for the VM. | 734 | * @kvm: The KVM struct pointer for the VM. |
683 | * | 735 | * |
684 | * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all | 736 | * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all |
685 | * underlying level-2 and level-3 tables before freeing the actual level-1 table | 737 | * underlying level-2 and level-3 tables before freeing the actual level-1 table |
686 | * and setting the struct pointer to NULL. | 738 | * and setting the struct pointer to NULL. |
687 | * | 739 | * |
688 | * Note we don't need locking here as this is only called when the VM is | 740 | * Note we don't need locking here as this is only called when the VM is |
689 | * destroyed, which can only be done once. | 741 | * destroyed, which can only be done once. |
690 | */ | 742 | */ |
691 | void kvm_free_stage2_pgd(struct kvm *kvm) | 743 | void kvm_free_stage2_pgd(struct kvm *kvm) |
692 | { | 744 | { |
693 | if (kvm->arch.pgd == NULL) | 745 | if (kvm->arch.pgd == NULL) |
694 | return; | 746 | return; |
695 | 747 | ||
696 | unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); | 748 | unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); |
697 | kvm_free_hwpgd(kvm); | 749 | kvm_free_hwpgd(kvm); |
698 | if (KVM_PREALLOC_LEVEL > 0) | 750 | if (KVM_PREALLOC_LEVEL > 0) |
699 | kfree(kvm->arch.pgd); | 751 | kfree(kvm->arch.pgd); |
700 | else | 752 | else |
701 | free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER); | 753 | free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER); |
702 | kvm->arch.pgd = NULL; | 754 | kvm->arch.pgd = NULL; |
703 | } | 755 | } |
704 | 756 | ||
705 | static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, | 757 | static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, |
706 | phys_addr_t addr) | 758 | phys_addr_t addr) |
707 | { | 759 | { |
708 | pgd_t *pgd; | 760 | pgd_t *pgd; |
709 | pud_t *pud; | 761 | pud_t *pud; |
710 | 762 | ||
711 | pgd = kvm->arch.pgd + pgd_index(addr); | 763 | pgd = kvm->arch.pgd + pgd_index(addr); |
712 | if (WARN_ON(pgd_none(*pgd))) { | 764 | if (WARN_ON(pgd_none(*pgd))) { |
713 | if (!cache) | 765 | if (!cache) |
714 | return NULL; | 766 | return NULL; |
715 | pud = mmu_memory_cache_alloc(cache); | 767 | pud = mmu_memory_cache_alloc(cache); |
716 | pgd_populate(NULL, pgd, pud); | 768 | pgd_populate(NULL, pgd, pud); |
717 | get_page(virt_to_page(pgd)); | 769 | get_page(virt_to_page(pgd)); |
718 | } | 770 | } |
719 | 771 | ||
720 | return pud_offset(pgd, addr); | 772 | return pud_offset(pgd, addr); |
721 | } | 773 | } |
722 | 774 | ||
723 | static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, | 775 | static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, |
724 | phys_addr_t addr) | 776 | phys_addr_t addr) |
725 | { | 777 | { |
726 | pud_t *pud; | 778 | pud_t *pud; |
727 | pmd_t *pmd; | 779 | pmd_t *pmd; |
728 | 780 | ||
729 | pud = stage2_get_pud(kvm, cache, addr); | 781 | pud = stage2_get_pud(kvm, cache, addr); |
730 | if (pud_none(*pud)) { | 782 | if (pud_none(*pud)) { |
731 | if (!cache) | 783 | if (!cache) |
732 | return NULL; | 784 | return NULL; |
733 | pmd = mmu_memory_cache_alloc(cache); | 785 | pmd = mmu_memory_cache_alloc(cache); |
734 | pud_populate(NULL, pud, pmd); | 786 | pud_populate(NULL, pud, pmd); |
735 | get_page(virt_to_page(pud)); | 787 | get_page(virt_to_page(pud)); |
736 | } | 788 | } |
737 | 789 | ||
738 | return pmd_offset(pud, addr); | 790 | return pmd_offset(pud, addr); |
739 | } | 791 | } |
740 | 792 | ||
741 | static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache | 793 | static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache |
742 | *cache, phys_addr_t addr, const pmd_t *new_pmd) | 794 | *cache, phys_addr_t addr, const pmd_t *new_pmd) |
743 | { | 795 | { |
744 | pmd_t *pmd, old_pmd; | 796 | pmd_t *pmd, old_pmd; |
745 | 797 | ||
746 | pmd = stage2_get_pmd(kvm, cache, addr); | 798 | pmd = stage2_get_pmd(kvm, cache, addr); |
747 | VM_BUG_ON(!pmd); | 799 | VM_BUG_ON(!pmd); |
748 | 800 | ||
749 | /* | 801 | /* |
750 | * Mapping in huge pages should only happen through a fault. If a | 802 | * Mapping in huge pages should only happen through a fault. If a |
751 | * page is merged into a transparent huge page, the individual | 803 | * page is merged into a transparent huge page, the individual |
752 | * subpages of that huge page should be unmapped through MMU | 804 | * subpages of that huge page should be unmapped through MMU |
753 | * notifiers before we get here. | 805 | * notifiers before we get here. |
754 | * | 806 | * |
755 | * Merging of CompoundPages is not supported; they should become | 807 | * Merging of CompoundPages is not supported; they should become |
756 | * splitting first, unmapped, merged, and mapped back in on-demand. | 808 | * splitting first, unmapped, merged, and mapped back in on-demand. |
757 | */ | 809 | */ |
758 | VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd)); | 810 | VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd)); |
759 | 811 | ||
760 | old_pmd = *pmd; | 812 | old_pmd = *pmd; |
761 | kvm_set_pmd(pmd, *new_pmd); | 813 | kvm_set_pmd(pmd, *new_pmd); |
762 | if (pmd_present(old_pmd)) | 814 | if (pmd_present(old_pmd)) |
763 | kvm_tlb_flush_vmid_ipa(kvm, addr); | 815 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
764 | else | 816 | else |
765 | get_page(virt_to_page(pmd)); | 817 | get_page(virt_to_page(pmd)); |
766 | return 0; | 818 | return 0; |
767 | } | 819 | } |
768 | 820 | ||
769 | static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, | 821 | static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, |
770 | phys_addr_t addr, const pte_t *new_pte, bool iomap) | 822 | phys_addr_t addr, const pte_t *new_pte, bool iomap) |
771 | { | 823 | { |
772 | pmd_t *pmd; | 824 | pmd_t *pmd; |
773 | pte_t *pte, old_pte; | 825 | pte_t *pte, old_pte; |
774 | 826 | ||
775 | /* Create stage-2 page table mapping - Levels 0 and 1 */ | 827 | /* Create stage-2 page table mapping - Levels 0 and 1 */ |
776 | pmd = stage2_get_pmd(kvm, cache, addr); | 828 | pmd = stage2_get_pmd(kvm, cache, addr); |
777 | if (!pmd) { | 829 | if (!pmd) { |
778 | /* | 830 | /* |
779 | * Ignore calls from kvm_set_spte_hva for unallocated | 831 | * Ignore calls from kvm_set_spte_hva for unallocated |
780 | * address ranges. | 832 | * address ranges. |
781 | */ | 833 | */ |
782 | return 0; | 834 | return 0; |
783 | } | 835 | } |
784 | 836 | ||
785 | /* Create stage-2 page mappings - Level 2 */ | 837 | /* Create stage-2 page mappings - Level 2 */ |
786 | if (pmd_none(*pmd)) { | 838 | if (pmd_none(*pmd)) { |
787 | if (!cache) | 839 | if (!cache) |
788 | return 0; /* ignore calls from kvm_set_spte_hva */ | 840 | return 0; /* ignore calls from kvm_set_spte_hva */ |
789 | pte = mmu_memory_cache_alloc(cache); | 841 | pte = mmu_memory_cache_alloc(cache); |
790 | kvm_clean_pte(pte); | 842 | kvm_clean_pte(pte); |
791 | pmd_populate_kernel(NULL, pmd, pte); | 843 | pmd_populate_kernel(NULL, pmd, pte); |
792 | get_page(virt_to_page(pmd)); | 844 | get_page(virt_to_page(pmd)); |
793 | } | 845 | } |
794 | 846 | ||
795 | pte = pte_offset_kernel(pmd, addr); | 847 | pte = pte_offset_kernel(pmd, addr); |
796 | 848 | ||
797 | if (iomap && pte_present(*pte)) | 849 | if (iomap && pte_present(*pte)) |
798 | return -EFAULT; | 850 | return -EFAULT; |
799 | 851 | ||
800 | /* Create 2nd stage page table mapping - Level 3 */ | 852 | /* Create 2nd stage page table mapping - Level 3 */ |
801 | old_pte = *pte; | 853 | old_pte = *pte; |
802 | kvm_set_pte(pte, *new_pte); | 854 | kvm_set_pte(pte, *new_pte); |
803 | if (pte_present(old_pte)) | 855 | if (pte_present(old_pte)) |
804 | kvm_tlb_flush_vmid_ipa(kvm, addr); | 856 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
805 | else | 857 | else |
806 | get_page(virt_to_page(pte)); | 858 | get_page(virt_to_page(pte)); |
807 | 859 | ||
808 | return 0; | 860 | return 0; |
809 | } | 861 | } |
810 | 862 | ||
811 | /** | 863 | /** |
812 | * kvm_phys_addr_ioremap - map a device range to guest IPA | 864 | * kvm_phys_addr_ioremap - map a device range to guest IPA |
813 | * | 865 | * |
814 | * @kvm: The KVM pointer | 866 | * @kvm: The KVM pointer |
815 | * @guest_ipa: The IPA at which to insert the mapping | 867 | * @guest_ipa: The IPA at which to insert the mapping |
816 | * @pa: The physical address of the device | 868 | * @pa: The physical address of the device |
817 | * @size: The size of the mapping | 869 | * @size: The size of the mapping |
818 | */ | 870 | */ |
819 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, | 871 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, |
820 | phys_addr_t pa, unsigned long size, bool writable) | 872 | phys_addr_t pa, unsigned long size, bool writable) |
821 | { | 873 | { |
822 | phys_addr_t addr, end; | 874 | phys_addr_t addr, end; |
823 | int ret = 0; | 875 | int ret = 0; |
824 | unsigned long pfn; | 876 | unsigned long pfn; |
825 | struct kvm_mmu_memory_cache cache = { 0, }; | 877 | struct kvm_mmu_memory_cache cache = { 0, }; |
826 | 878 | ||
827 | end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK; | 879 | end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK; |
828 | pfn = __phys_to_pfn(pa); | 880 | pfn = __phys_to_pfn(pa); |
829 | 881 | ||
830 | for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { | 882 | for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { |
831 | pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE); | 883 | pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE); |
832 | 884 | ||
833 | if (writable) | 885 | if (writable) |
834 | kvm_set_s2pte_writable(&pte); | 886 | kvm_set_s2pte_writable(&pte); |
835 | 887 | ||
836 | ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES, | 888 | ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES, |
837 | KVM_NR_MEM_OBJS); | 889 | KVM_NR_MEM_OBJS); |
838 | if (ret) | 890 | if (ret) |
839 | goto out; | 891 | goto out; |
840 | spin_lock(&kvm->mmu_lock); | 892 | spin_lock(&kvm->mmu_lock); |
841 | ret = stage2_set_pte(kvm, &cache, addr, &pte, true); | 893 | ret = stage2_set_pte(kvm, &cache, addr, &pte, true); |
842 | spin_unlock(&kvm->mmu_lock); | 894 | spin_unlock(&kvm->mmu_lock); |
843 | if (ret) | 895 | if (ret) |
844 | goto out; | 896 | goto out; |
845 | 897 | ||
846 | pfn++; | 898 | pfn++; |
847 | } | 899 | } |
848 | 900 | ||
849 | out: | 901 | out: |
850 | mmu_free_memory_cache(&cache); | 902 | mmu_free_memory_cache(&cache); |
851 | return ret; | 903 | return ret; |
852 | } | 904 | } |
853 | 905 | ||
854 | static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap) | 906 | static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap) |
855 | { | 907 | { |
856 | pfn_t pfn = *pfnp; | 908 | pfn_t pfn = *pfnp; |
857 | gfn_t gfn = *ipap >> PAGE_SHIFT; | 909 | gfn_t gfn = *ipap >> PAGE_SHIFT; |
858 | 910 | ||
859 | if (PageTransCompound(pfn_to_page(pfn))) { | 911 | if (PageTransCompound(pfn_to_page(pfn))) { |
860 | unsigned long mask; | 912 | unsigned long mask; |
861 | /* | 913 | /* |
862 | * The address we faulted on is backed by a transparent huge | 914 | * The address we faulted on is backed by a transparent huge |
863 | * page. However, because we map the compound huge page and | 915 | * page. However, because we map the compound huge page and |
864 | * not the individual tail page, we need to transfer the | 916 | * not the individual tail page, we need to transfer the |
865 | * refcount to the head page. We have to be careful that the | 917 | * refcount to the head page. We have to be careful that the |
866 | * THP doesn't start to split while we are adjusting the | 918 | * THP doesn't start to split while we are adjusting the |
867 | * refcounts. | 919 | * refcounts. |
868 | * | 920 | * |
869 | * We are sure this doesn't happen, because mmu_notifier_retry | 921 | * We are sure this doesn't happen, because mmu_notifier_retry |
870 | * was successful and we are holding the mmu_lock, so if this | 922 | * was successful and we are holding the mmu_lock, so if this |
871 | * THP is trying to split, it will be blocked in the mmu | 923 | * THP is trying to split, it will be blocked in the mmu |
872 | * notifier before touching any of the pages, specifically | 924 | * notifier before touching any of the pages, specifically |
873 | * before being able to call __split_huge_page_refcount(). | 925 | * before being able to call __split_huge_page_refcount(). |
874 | * | 926 | * |
875 | * We can therefore safely transfer the refcount from PG_tail | 927 | * We can therefore safely transfer the refcount from PG_tail |
876 | * to PG_head and switch the pfn from a tail page to the head | 928 | * to PG_head and switch the pfn from a tail page to the head |
877 | * page accordingly. | 929 | * page accordingly. |
878 | */ | 930 | */ |
879 | mask = PTRS_PER_PMD - 1; | 931 | mask = PTRS_PER_PMD - 1; |
880 | VM_BUG_ON((gfn & mask) != (pfn & mask)); | 932 | VM_BUG_ON((gfn & mask) != (pfn & mask)); |
881 | if (pfn & mask) { | 933 | if (pfn & mask) { |
882 | *ipap &= PMD_MASK; | 934 | *ipap &= PMD_MASK; |
883 | kvm_release_pfn_clean(pfn); | 935 | kvm_release_pfn_clean(pfn); |
884 | pfn &= ~mask; | 936 | pfn &= ~mask; |
885 | kvm_get_pfn(pfn); | 937 | kvm_get_pfn(pfn); |
886 | *pfnp = pfn; | 938 | *pfnp = pfn; |
887 | } | 939 | } |
888 | 940 | ||
889 | return true; | 941 | return true; |
890 | } | 942 | } |
891 | 943 | ||
892 | return false; | 944 | return false; |
893 | } | 945 | } |
894 | 946 | ||
895 | static bool kvm_is_write_fault(struct kvm_vcpu *vcpu) | 947 | static bool kvm_is_write_fault(struct kvm_vcpu *vcpu) |
896 | { | 948 | { |
897 | if (kvm_vcpu_trap_is_iabt(vcpu)) | 949 | if (kvm_vcpu_trap_is_iabt(vcpu)) |
898 | return false; | 950 | return false; |
899 | 951 | ||
900 | return kvm_vcpu_dabt_iswrite(vcpu); | 952 | return kvm_vcpu_dabt_iswrite(vcpu); |
901 | } | 953 | } |
902 | 954 | ||
903 | static bool kvm_is_device_pfn(unsigned long pfn) | 955 | static bool kvm_is_device_pfn(unsigned long pfn) |
904 | { | 956 | { |
905 | return !pfn_valid(pfn); | 957 | return !pfn_valid(pfn); |
906 | } | 958 | } |
907 | 959 | ||
960 | static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, | ||
961 | unsigned long size, bool uncached) | ||
962 | { | ||
963 | __coherent_cache_guest_page(vcpu, pfn, size, uncached); | ||
964 | } | ||
965 | |||
908 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | 966 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
909 | struct kvm_memory_slot *memslot, unsigned long hva, | 967 | struct kvm_memory_slot *memslot, unsigned long hva, |
910 | unsigned long fault_status) | 968 | unsigned long fault_status) |
911 | { | 969 | { |
912 | int ret; | 970 | int ret; |
913 | bool write_fault, writable, hugetlb = false, force_pte = false; | 971 | bool write_fault, writable, hugetlb = false, force_pte = false; |
914 | unsigned long mmu_seq; | 972 | unsigned long mmu_seq; |
915 | gfn_t gfn = fault_ipa >> PAGE_SHIFT; | 973 | gfn_t gfn = fault_ipa >> PAGE_SHIFT; |
916 | struct kvm *kvm = vcpu->kvm; | 974 | struct kvm *kvm = vcpu->kvm; |
917 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; | 975 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; |
918 | struct vm_area_struct *vma; | 976 | struct vm_area_struct *vma; |
919 | pfn_t pfn; | 977 | pfn_t pfn; |
920 | pgprot_t mem_type = PAGE_S2; | 978 | pgprot_t mem_type = PAGE_S2; |
921 | bool fault_ipa_uncached; | 979 | bool fault_ipa_uncached; |
922 | 980 | ||
923 | write_fault = kvm_is_write_fault(vcpu); | 981 | write_fault = kvm_is_write_fault(vcpu); |
924 | if (fault_status == FSC_PERM && !write_fault) { | 982 | if (fault_status == FSC_PERM && !write_fault) { |
925 | kvm_err("Unexpected L2 read permission error\n"); | 983 | kvm_err("Unexpected L2 read permission error\n"); |
926 | return -EFAULT; | 984 | return -EFAULT; |
927 | } | 985 | } |
928 | 986 | ||
929 | /* Let's check if we will get back a huge page backed by hugetlbfs */ | 987 | /* Let's check if we will get back a huge page backed by hugetlbfs */ |
930 | down_read(¤t->mm->mmap_sem); | 988 | down_read(¤t->mm->mmap_sem); |
931 | vma = find_vma_intersection(current->mm, hva, hva + 1); | 989 | vma = find_vma_intersection(current->mm, hva, hva + 1); |
932 | if (unlikely(!vma)) { | 990 | if (unlikely(!vma)) { |
933 | kvm_err("Failed to find VMA for hva 0x%lx\n", hva); | 991 | kvm_err("Failed to find VMA for hva 0x%lx\n", hva); |
934 | up_read(¤t->mm->mmap_sem); | 992 | up_read(¤t->mm->mmap_sem); |
935 | return -EFAULT; | 993 | return -EFAULT; |
936 | } | 994 | } |
937 | 995 | ||
938 | if (is_vm_hugetlb_page(vma)) { | 996 | if (is_vm_hugetlb_page(vma)) { |
939 | hugetlb = true; | 997 | hugetlb = true; |
940 | gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT; | 998 | gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT; |
941 | } else { | 999 | } else { |
942 | /* | 1000 | /* |
943 | * Pages belonging to memslots that don't have the same | 1001 | * Pages belonging to memslots that don't have the same |
944 | * alignment for userspace and IPA cannot be mapped using | 1002 | * alignment for userspace and IPA cannot be mapped using |
945 | * block descriptors even if the pages belong to a THP for | 1003 | * block descriptors even if the pages belong to a THP for |
946 | * the process, because the stage-2 block descriptor will | 1004 | * the process, because the stage-2 block descriptor will |
947 | * cover more than a single THP and we loose atomicity for | 1005 | * cover more than a single THP and we loose atomicity for |
948 | * unmapping, updates, and splits of the THP or other pages | 1006 | * unmapping, updates, and splits of the THP or other pages |
949 | * in the stage-2 block range. | 1007 | * in the stage-2 block range. |
950 | */ | 1008 | */ |
951 | if ((memslot->userspace_addr & ~PMD_MASK) != | 1009 | if ((memslot->userspace_addr & ~PMD_MASK) != |
952 | ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK)) | 1010 | ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK)) |
953 | force_pte = true; | 1011 | force_pte = true; |
954 | } | 1012 | } |
955 | up_read(¤t->mm->mmap_sem); | 1013 | up_read(¤t->mm->mmap_sem); |
956 | 1014 | ||
957 | /* We need minimum second+third level pages */ | 1015 | /* We need minimum second+third level pages */ |
958 | ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES, | 1016 | ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES, |
959 | KVM_NR_MEM_OBJS); | 1017 | KVM_NR_MEM_OBJS); |
960 | if (ret) | 1018 | if (ret) |
961 | return ret; | 1019 | return ret; |
962 | 1020 | ||
963 | mmu_seq = vcpu->kvm->mmu_notifier_seq; | 1021 | mmu_seq = vcpu->kvm->mmu_notifier_seq; |
964 | /* | 1022 | /* |
965 | * Ensure the read of mmu_notifier_seq happens before we call | 1023 | * Ensure the read of mmu_notifier_seq happens before we call |
966 | * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk | 1024 | * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk |
967 | * the page we just got a reference to gets unmapped before we have a | 1025 | * the page we just got a reference to gets unmapped before we have a |
968 | * chance to grab the mmu_lock, which ensure that if the page gets | 1026 | * chance to grab the mmu_lock, which ensure that if the page gets |
969 | * unmapped afterwards, the call to kvm_unmap_hva will take it away | 1027 | * unmapped afterwards, the call to kvm_unmap_hva will take it away |
970 | * from us again properly. This smp_rmb() interacts with the smp_wmb() | 1028 | * from us again properly. This smp_rmb() interacts with the smp_wmb() |
971 | * in kvm_mmu_notifier_invalidate_<page|range_end>. | 1029 | * in kvm_mmu_notifier_invalidate_<page|range_end>. |
972 | */ | 1030 | */ |
973 | smp_rmb(); | 1031 | smp_rmb(); |
974 | 1032 | ||
975 | pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); | 1033 | pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); |
976 | if (is_error_pfn(pfn)) | 1034 | if (is_error_pfn(pfn)) |
977 | return -EFAULT; | 1035 | return -EFAULT; |
978 | 1036 | ||
979 | if (kvm_is_device_pfn(pfn)) | 1037 | if (kvm_is_device_pfn(pfn)) |
980 | mem_type = PAGE_S2_DEVICE; | 1038 | mem_type = PAGE_S2_DEVICE; |
981 | 1039 | ||
982 | spin_lock(&kvm->mmu_lock); | 1040 | spin_lock(&kvm->mmu_lock); |
983 | if (mmu_notifier_retry(kvm, mmu_seq)) | 1041 | if (mmu_notifier_retry(kvm, mmu_seq)) |
984 | goto out_unlock; | 1042 | goto out_unlock; |
985 | if (!hugetlb && !force_pte) | 1043 | if (!hugetlb && !force_pte) |
986 | hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa); | 1044 | hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa); |
987 | 1045 | ||
988 | fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT; | 1046 | fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT; |
989 | 1047 | ||
990 | if (hugetlb) { | 1048 | if (hugetlb) { |
991 | pmd_t new_pmd = pfn_pmd(pfn, mem_type); | 1049 | pmd_t new_pmd = pfn_pmd(pfn, mem_type); |
992 | new_pmd = pmd_mkhuge(new_pmd); | 1050 | new_pmd = pmd_mkhuge(new_pmd); |
993 | if (writable) { | 1051 | if (writable) { |
994 | kvm_set_s2pmd_writable(&new_pmd); | 1052 | kvm_set_s2pmd_writable(&new_pmd); |
995 | kvm_set_pfn_dirty(pfn); | 1053 | kvm_set_pfn_dirty(pfn); |
996 | } | 1054 | } |
997 | coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE, | 1055 | coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached); |
998 | fault_ipa_uncached); | ||
999 | ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); | 1056 | ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); |
1000 | } else { | 1057 | } else { |
1001 | pte_t new_pte = pfn_pte(pfn, mem_type); | 1058 | pte_t new_pte = pfn_pte(pfn, mem_type); |
1002 | if (writable) { | 1059 | if (writable) { |
1003 | kvm_set_s2pte_writable(&new_pte); | 1060 | kvm_set_s2pte_writable(&new_pte); |
1004 | kvm_set_pfn_dirty(pfn); | 1061 | kvm_set_pfn_dirty(pfn); |
1005 | } | 1062 | } |
1006 | coherent_cache_guest_page(vcpu, hva, PAGE_SIZE, | 1063 | coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached); |
1007 | fault_ipa_uncached); | ||
1008 | ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, | 1064 | ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, |
1009 | pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE)); | 1065 | pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE)); |
1010 | } | 1066 | } |
1011 | 1067 | ||
1012 | 1068 | ||
1013 | out_unlock: | 1069 | out_unlock: |
1014 | spin_unlock(&kvm->mmu_lock); | 1070 | spin_unlock(&kvm->mmu_lock); |
1015 | kvm_release_pfn_clean(pfn); | 1071 | kvm_release_pfn_clean(pfn); |
1016 | return ret; | 1072 | return ret; |
1017 | } | 1073 | } |
1018 | 1074 | ||
1019 | /** | 1075 | /** |
1020 | * kvm_handle_guest_abort - handles all 2nd stage aborts | 1076 | * kvm_handle_guest_abort - handles all 2nd stage aborts |
1021 | * @vcpu: the VCPU pointer | 1077 | * @vcpu: the VCPU pointer |
1022 | * @run: the kvm_run structure | 1078 | * @run: the kvm_run structure |
1023 | * | 1079 | * |
1024 | * Any abort that gets to the host is almost guaranteed to be caused by a | 1080 | * Any abort that gets to the host is almost guaranteed to be caused by a |
1025 | * missing second stage translation table entry, which can mean that either the | 1081 | * missing second stage translation table entry, which can mean that either the |
1026 | * guest simply needs more memory and we must allocate an appropriate page or it | 1082 | * guest simply needs more memory and we must allocate an appropriate page or it |
1027 | * can mean that the guest tried to access I/O memory, which is emulated by user | 1083 | * can mean that the guest tried to access I/O memory, which is emulated by user |
1028 | * space. The distinction is based on the IPA causing the fault and whether this | 1084 | * space. The distinction is based on the IPA causing the fault and whether this |
1029 | * memory region has been registered as standard RAM by user space. | 1085 | * memory region has been registered as standard RAM by user space. |
1030 | */ | 1086 | */ |
1031 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) | 1087 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) |
1032 | { | 1088 | { |
1033 | unsigned long fault_status; | 1089 | unsigned long fault_status; |
1034 | phys_addr_t fault_ipa; | 1090 | phys_addr_t fault_ipa; |
1035 | struct kvm_memory_slot *memslot; | 1091 | struct kvm_memory_slot *memslot; |
1036 | unsigned long hva; | 1092 | unsigned long hva; |
1037 | bool is_iabt, write_fault, writable; | 1093 | bool is_iabt, write_fault, writable; |
1038 | gfn_t gfn; | 1094 | gfn_t gfn; |
1039 | int ret, idx; | 1095 | int ret, idx; |
1040 | 1096 | ||
1041 | is_iabt = kvm_vcpu_trap_is_iabt(vcpu); | 1097 | is_iabt = kvm_vcpu_trap_is_iabt(vcpu); |
1042 | fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); | 1098 | fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); |
1043 | 1099 | ||
1044 | trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu), | 1100 | trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu), |
1045 | kvm_vcpu_get_hfar(vcpu), fault_ipa); | 1101 | kvm_vcpu_get_hfar(vcpu), fault_ipa); |
1046 | 1102 | ||
1047 | /* Check the stage-2 fault is trans. fault or write fault */ | 1103 | /* Check the stage-2 fault is trans. fault or write fault */ |
1048 | fault_status = kvm_vcpu_trap_get_fault_type(vcpu); | 1104 | fault_status = kvm_vcpu_trap_get_fault_type(vcpu); |
1049 | if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { | 1105 | if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { |
1050 | kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n", | 1106 | kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n", |
1051 | kvm_vcpu_trap_get_class(vcpu), | 1107 | kvm_vcpu_trap_get_class(vcpu), |
1052 | (unsigned long)kvm_vcpu_trap_get_fault(vcpu), | 1108 | (unsigned long)kvm_vcpu_trap_get_fault(vcpu), |
1053 | (unsigned long)kvm_vcpu_get_hsr(vcpu)); | 1109 | (unsigned long)kvm_vcpu_get_hsr(vcpu)); |
1054 | return -EFAULT; | 1110 | return -EFAULT; |
1055 | } | 1111 | } |
1056 | 1112 | ||
1057 | idx = srcu_read_lock(&vcpu->kvm->srcu); | 1113 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
1058 | 1114 | ||
1059 | gfn = fault_ipa >> PAGE_SHIFT; | 1115 | gfn = fault_ipa >> PAGE_SHIFT; |
1060 | memslot = gfn_to_memslot(vcpu->kvm, gfn); | 1116 | memslot = gfn_to_memslot(vcpu->kvm, gfn); |
1061 | hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); | 1117 | hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); |
1062 | write_fault = kvm_is_write_fault(vcpu); | 1118 | write_fault = kvm_is_write_fault(vcpu); |
1063 | if (kvm_is_error_hva(hva) || (write_fault && !writable)) { | 1119 | if (kvm_is_error_hva(hva) || (write_fault && !writable)) { |
1064 | if (is_iabt) { | 1120 | if (is_iabt) { |
1065 | /* Prefetch Abort on I/O address */ | 1121 | /* Prefetch Abort on I/O address */ |
1066 | kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); | 1122 | kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
1067 | ret = 1; | 1123 | ret = 1; |
1068 | goto out_unlock; | 1124 | goto out_unlock; |
1069 | } | 1125 | } |
1070 | 1126 | ||
1071 | /* | 1127 | /* |
1072 | * The IPA is reported as [MAX:12], so we need to | 1128 | * The IPA is reported as [MAX:12], so we need to |
1073 | * complement it with the bottom 12 bits from the | 1129 | * complement it with the bottom 12 bits from the |
1074 | * faulting VA. This is always 12 bits, irrespective | 1130 | * faulting VA. This is always 12 bits, irrespective |
1075 | * of the page size. | 1131 | * of the page size. |
1076 | */ | 1132 | */ |
1077 | fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1); | 1133 | fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1); |
1078 | ret = io_mem_abort(vcpu, run, fault_ipa); | 1134 | ret = io_mem_abort(vcpu, run, fault_ipa); |
1079 | goto out_unlock; | 1135 | goto out_unlock; |
1080 | } | 1136 | } |
1081 | 1137 | ||
1082 | /* Userspace should not be able to register out-of-bounds IPAs */ | 1138 | /* Userspace should not be able to register out-of-bounds IPAs */ |
1083 | VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE); | 1139 | VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE); |
1084 | 1140 | ||
1085 | ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); | 1141 | ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status); |
1086 | if (ret == 0) | 1142 | if (ret == 0) |
1087 | ret = 1; | 1143 | ret = 1; |
1088 | out_unlock: | 1144 | out_unlock: |
1089 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | 1145 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
1090 | return ret; | 1146 | return ret; |
1091 | } | 1147 | } |
1092 | 1148 | ||
1093 | static void handle_hva_to_gpa(struct kvm *kvm, | 1149 | static void handle_hva_to_gpa(struct kvm *kvm, |
1094 | unsigned long start, | 1150 | unsigned long start, |
1095 | unsigned long end, | 1151 | unsigned long end, |
1096 | void (*handler)(struct kvm *kvm, | 1152 | void (*handler)(struct kvm *kvm, |
1097 | gpa_t gpa, void *data), | 1153 | gpa_t gpa, void *data), |
1098 | void *data) | 1154 | void *data) |
1099 | { | 1155 | { |
1100 | struct kvm_memslots *slots; | 1156 | struct kvm_memslots *slots; |
1101 | struct kvm_memory_slot *memslot; | 1157 | struct kvm_memory_slot *memslot; |
1102 | 1158 | ||
1103 | slots = kvm_memslots(kvm); | 1159 | slots = kvm_memslots(kvm); |
1104 | 1160 | ||
1105 | /* we only care about the pages that the guest sees */ | 1161 | /* we only care about the pages that the guest sees */ |
1106 | kvm_for_each_memslot(memslot, slots) { | 1162 | kvm_for_each_memslot(memslot, slots) { |
1107 | unsigned long hva_start, hva_end; | 1163 | unsigned long hva_start, hva_end; |
1108 | gfn_t gfn, gfn_end; | 1164 | gfn_t gfn, gfn_end; |
1109 | 1165 | ||
1110 | hva_start = max(start, memslot->userspace_addr); | 1166 | hva_start = max(start, memslot->userspace_addr); |
1111 | hva_end = min(end, memslot->userspace_addr + | 1167 | hva_end = min(end, memslot->userspace_addr + |
1112 | (memslot->npages << PAGE_SHIFT)); | 1168 | (memslot->npages << PAGE_SHIFT)); |
1113 | if (hva_start >= hva_end) | 1169 | if (hva_start >= hva_end) |
1114 | continue; | 1170 | continue; |
1115 | 1171 | ||
1116 | /* | 1172 | /* |
1117 | * {gfn(page) | page intersects with [hva_start, hva_end)} = | 1173 | * {gfn(page) | page intersects with [hva_start, hva_end)} = |
1118 | * {gfn_start, gfn_start+1, ..., gfn_end-1}. | 1174 | * {gfn_start, gfn_start+1, ..., gfn_end-1}. |
1119 | */ | 1175 | */ |
1120 | gfn = hva_to_gfn_memslot(hva_start, memslot); | 1176 | gfn = hva_to_gfn_memslot(hva_start, memslot); |
1121 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); | 1177 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); |
1122 | 1178 | ||
1123 | for (; gfn < gfn_end; ++gfn) { | 1179 | for (; gfn < gfn_end; ++gfn) { |
1124 | gpa_t gpa = gfn << PAGE_SHIFT; | 1180 | gpa_t gpa = gfn << PAGE_SHIFT; |
1125 | handler(kvm, gpa, data); | 1181 | handler(kvm, gpa, data); |
1126 | } | 1182 | } |
1127 | } | 1183 | } |
1128 | } | 1184 | } |
1129 | 1185 | ||
1130 | static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) | 1186 | static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) |
1131 | { | 1187 | { |
1132 | unmap_stage2_range(kvm, gpa, PAGE_SIZE); | 1188 | unmap_stage2_range(kvm, gpa, PAGE_SIZE); |
1133 | } | 1189 | } |
1134 | 1190 | ||
1135 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | 1191 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) |
1136 | { | 1192 | { |
1137 | unsigned long end = hva + PAGE_SIZE; | 1193 | unsigned long end = hva + PAGE_SIZE; |
1138 | 1194 | ||
1139 | if (!kvm->arch.pgd) | 1195 | if (!kvm->arch.pgd) |
1140 | return 0; | 1196 | return 0; |
1141 | 1197 | ||
1142 | trace_kvm_unmap_hva(hva); | 1198 | trace_kvm_unmap_hva(hva); |
1143 | handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL); | 1199 | handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL); |
1144 | return 0; | 1200 | return 0; |
1145 | } | 1201 | } |
1146 | 1202 | ||
1147 | int kvm_unmap_hva_range(struct kvm *kvm, | 1203 | int kvm_unmap_hva_range(struct kvm *kvm, |
1148 | unsigned long start, unsigned long end) | 1204 | unsigned long start, unsigned long end) |
1149 | { | 1205 | { |
1150 | if (!kvm->arch.pgd) | 1206 | if (!kvm->arch.pgd) |
1151 | return 0; | 1207 | return 0; |
1152 | 1208 | ||
1153 | trace_kvm_unmap_hva_range(start, end); | 1209 | trace_kvm_unmap_hva_range(start, end); |
1154 | handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); | 1210 | handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); |
1155 | return 0; | 1211 | return 0; |
1156 | } | 1212 | } |
1157 | 1213 | ||
1158 | static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) | 1214 | static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) |
1159 | { | 1215 | { |
1160 | pte_t *pte = (pte_t *)data; | 1216 | pte_t *pte = (pte_t *)data; |
1161 | 1217 | ||
1162 | stage2_set_pte(kvm, NULL, gpa, pte, false); | 1218 | stage2_set_pte(kvm, NULL, gpa, pte, false); |
1163 | } | 1219 | } |
1164 | 1220 | ||
1165 | 1221 | ||
1166 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | 1222 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) |
1167 | { | 1223 | { |
1168 | unsigned long end = hva + PAGE_SIZE; | 1224 | unsigned long end = hva + PAGE_SIZE; |
1169 | pte_t stage2_pte; | 1225 | pte_t stage2_pte; |
1170 | 1226 | ||
1171 | if (!kvm->arch.pgd) | 1227 | if (!kvm->arch.pgd) |
1172 | return; | 1228 | return; |
1173 | 1229 | ||
1174 | trace_kvm_set_spte_hva(hva); | 1230 | trace_kvm_set_spte_hva(hva); |
1175 | stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2); | 1231 | stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2); |
1176 | handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); | 1232 | handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); |
1177 | } | 1233 | } |
1178 | 1234 | ||
1179 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) | 1235 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) |
1180 | { | 1236 | { |
1181 | mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); | 1237 | mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); |
1182 | } | 1238 | } |
1183 | 1239 | ||
1184 | phys_addr_t kvm_mmu_get_httbr(void) | 1240 | phys_addr_t kvm_mmu_get_httbr(void) |
1185 | { | 1241 | { |
1186 | return virt_to_phys(hyp_pgd); | 1242 | return virt_to_phys(hyp_pgd); |
1187 | } | 1243 | } |
1188 | 1244 | ||
1189 | phys_addr_t kvm_mmu_get_boot_httbr(void) | 1245 | phys_addr_t kvm_mmu_get_boot_httbr(void) |
1190 | { | 1246 | { |
1191 | return virt_to_phys(boot_hyp_pgd); | 1247 | return virt_to_phys(boot_hyp_pgd); |
1192 | } | 1248 | } |
1193 | 1249 | ||
1194 | phys_addr_t kvm_get_idmap_vector(void) | 1250 | phys_addr_t kvm_get_idmap_vector(void) |
1195 | { | 1251 | { |
1196 | return hyp_idmap_vector; | 1252 | return hyp_idmap_vector; |
1197 | } | 1253 | } |
1198 | 1254 | ||
1199 | int kvm_mmu_init(void) | 1255 | int kvm_mmu_init(void) |
1200 | { | 1256 | { |
1201 | int err; | 1257 | int err; |
1202 | 1258 | ||
1203 | hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start); | 1259 | hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start); |
1204 | hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end); | 1260 | hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end); |
1205 | hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init); | 1261 | hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init); |
1206 | 1262 | ||
1207 | if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) { | 1263 | if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) { |
1208 | /* | 1264 | /* |
1209 | * Our init code is crossing a page boundary. Allocate | 1265 | * Our init code is crossing a page boundary. Allocate |
1210 | * a bounce page, copy the code over and use that. | 1266 | * a bounce page, copy the code over and use that. |
1211 | */ | 1267 | */ |
1212 | size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start; | 1268 | size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start; |
1213 | phys_addr_t phys_base; | 1269 | phys_addr_t phys_base; |
1214 | 1270 | ||
1215 | init_bounce_page = (void *)__get_free_page(GFP_KERNEL); | 1271 | init_bounce_page = (void *)__get_free_page(GFP_KERNEL); |
1216 | if (!init_bounce_page) { | 1272 | if (!init_bounce_page) { |
1217 | kvm_err("Couldn't allocate HYP init bounce page\n"); | 1273 | kvm_err("Couldn't allocate HYP init bounce page\n"); |
1218 | err = -ENOMEM; | 1274 | err = -ENOMEM; |
1219 | goto out; | 1275 | goto out; |
1220 | } | 1276 | } |
1221 | 1277 | ||
1222 | memcpy(init_bounce_page, __hyp_idmap_text_start, len); | 1278 | memcpy(init_bounce_page, __hyp_idmap_text_start, len); |
1223 | /* | 1279 | /* |
1224 | * Warning: the code we just copied to the bounce page | 1280 | * Warning: the code we just copied to the bounce page |
1225 | * must be flushed to the point of coherency. | 1281 | * must be flushed to the point of coherency. |
1226 | * Otherwise, the data may be sitting in L2, and HYP | 1282 | * Otherwise, the data may be sitting in L2, and HYP |
1227 | * mode won't be able to observe it as it runs with | 1283 | * mode won't be able to observe it as it runs with |
1228 | * caches off at that point. | 1284 | * caches off at that point. |
1229 | */ | 1285 | */ |
1230 | kvm_flush_dcache_to_poc(init_bounce_page, len); | 1286 | kvm_flush_dcache_to_poc(init_bounce_page, len); |
1231 | 1287 | ||
1232 | phys_base = kvm_virt_to_phys(init_bounce_page); | 1288 | phys_base = kvm_virt_to_phys(init_bounce_page); |
1233 | hyp_idmap_vector += phys_base - hyp_idmap_start; | 1289 | hyp_idmap_vector += phys_base - hyp_idmap_start; |
1234 | hyp_idmap_start = phys_base; | 1290 | hyp_idmap_start = phys_base; |
1235 | hyp_idmap_end = phys_base + len; | 1291 | hyp_idmap_end = phys_base + len; |
1236 | 1292 | ||
1237 | kvm_info("Using HYP init bounce page @%lx\n", | 1293 | kvm_info("Using HYP init bounce page @%lx\n", |
1238 | (unsigned long)phys_base); | 1294 | (unsigned long)phys_base); |
1239 | } | 1295 | } |
1240 | 1296 | ||
1241 | hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); | 1297 | hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); |
1242 | boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); | 1298 | boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); |
1243 | 1299 | ||
1244 | if (!hyp_pgd || !boot_hyp_pgd) { | 1300 | if (!hyp_pgd || !boot_hyp_pgd) { |
1245 | kvm_err("Hyp mode PGD not allocated\n"); | 1301 | kvm_err("Hyp mode PGD not allocated\n"); |
1246 | err = -ENOMEM; | 1302 | err = -ENOMEM; |
1247 | goto out; | 1303 | goto out; |
1248 | } | 1304 | } |
1249 | 1305 | ||
1250 | /* Create the idmap in the boot page tables */ | 1306 | /* Create the idmap in the boot page tables */ |
1251 | err = __create_hyp_mappings(boot_hyp_pgd, | 1307 | err = __create_hyp_mappings(boot_hyp_pgd, |
1252 | hyp_idmap_start, hyp_idmap_end, | 1308 | hyp_idmap_start, hyp_idmap_end, |
1253 | __phys_to_pfn(hyp_idmap_start), | 1309 | __phys_to_pfn(hyp_idmap_start), |
1254 | PAGE_HYP); | 1310 | PAGE_HYP); |
1255 | 1311 | ||
1256 | if (err) { | 1312 | if (err) { |
1257 | kvm_err("Failed to idmap %lx-%lx\n", | 1313 | kvm_err("Failed to idmap %lx-%lx\n", |
1258 | hyp_idmap_start, hyp_idmap_end); | 1314 | hyp_idmap_start, hyp_idmap_end); |
1259 | goto out; | 1315 | goto out; |
1260 | } | 1316 | } |
1261 | 1317 | ||
1262 | /* Map the very same page at the trampoline VA */ | 1318 | /* Map the very same page at the trampoline VA */ |
1263 | err = __create_hyp_mappings(boot_hyp_pgd, | 1319 | err = __create_hyp_mappings(boot_hyp_pgd, |
1264 | TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE, | 1320 | TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE, |
1265 | __phys_to_pfn(hyp_idmap_start), | 1321 | __phys_to_pfn(hyp_idmap_start), |
1266 | PAGE_HYP); | 1322 | PAGE_HYP); |
1267 | if (err) { | 1323 | if (err) { |
1268 | kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n", | 1324 | kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n", |
1269 | TRAMPOLINE_VA); | 1325 | TRAMPOLINE_VA); |
1270 | goto out; | 1326 | goto out; |
1271 | } | 1327 | } |
1272 | 1328 | ||
1273 | /* Map the same page again into the runtime page tables */ | 1329 | /* Map the same page again into the runtime page tables */ |
1274 | err = __create_hyp_mappings(hyp_pgd, | 1330 | err = __create_hyp_mappings(hyp_pgd, |
1275 | TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE, | 1331 | TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE, |
1276 | __phys_to_pfn(hyp_idmap_start), | 1332 | __phys_to_pfn(hyp_idmap_start), |
1277 | PAGE_HYP); | 1333 | PAGE_HYP); |
1278 | if (err) { | 1334 | if (err) { |
1279 | kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n", | 1335 | kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n", |
1280 | TRAMPOLINE_VA); | 1336 | TRAMPOLINE_VA); |
1281 | goto out; | 1337 | goto out; |
1282 | } | 1338 | } |
1283 | 1339 | ||
1284 | return 0; | 1340 | return 0; |
1285 | out: | 1341 | out: |
1286 | free_hyp_pgds(); | 1342 | free_hyp_pgds(); |
1287 | return err; | 1343 | return err; |
1288 | } | 1344 | } |
1289 | 1345 | ||
1290 | void kvm_arch_commit_memory_region(struct kvm *kvm, | 1346 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
1291 | struct kvm_userspace_memory_region *mem, | 1347 | struct kvm_userspace_memory_region *mem, |
1292 | const struct kvm_memory_slot *old, | 1348 | const struct kvm_memory_slot *old, |
1293 | enum kvm_mr_change change) | 1349 | enum kvm_mr_change change) |
1294 | { | 1350 | { |
1295 | } | 1351 | } |
1296 | 1352 | ||
1297 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | 1353 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
1298 | struct kvm_memory_slot *memslot, | 1354 | struct kvm_memory_slot *memslot, |
1299 | struct kvm_userspace_memory_region *mem, | 1355 | struct kvm_userspace_memory_region *mem, |
1300 | enum kvm_mr_change change) | 1356 | enum kvm_mr_change change) |
1301 | { | 1357 | { |
1302 | hva_t hva = mem->userspace_addr; | 1358 | hva_t hva = mem->userspace_addr; |
1303 | hva_t reg_end = hva + mem->memory_size; | 1359 | hva_t reg_end = hva + mem->memory_size; |
1304 | bool writable = !(mem->flags & KVM_MEM_READONLY); | 1360 | bool writable = !(mem->flags & KVM_MEM_READONLY); |
1305 | int ret = 0; | 1361 | int ret = 0; |
1306 | 1362 | ||
1307 | if (change != KVM_MR_CREATE && change != KVM_MR_MOVE) | 1363 | if (change != KVM_MR_CREATE && change != KVM_MR_MOVE) |
1308 | return 0; | 1364 | return 0; |
1309 | 1365 | ||
1310 | /* | 1366 | /* |
1311 | * Prevent userspace from creating a memory region outside of the IPA | 1367 | * Prevent userspace from creating a memory region outside of the IPA |
1312 | * space addressable by the KVM guest IPA space. | 1368 | * space addressable by the KVM guest IPA space. |
1313 | */ | 1369 | */ |
1314 | if (memslot->base_gfn + memslot->npages >= | 1370 | if (memslot->base_gfn + memslot->npages >= |
1315 | (KVM_PHYS_SIZE >> PAGE_SHIFT)) | 1371 | (KVM_PHYS_SIZE >> PAGE_SHIFT)) |
1316 | return -EFAULT; | 1372 | return -EFAULT; |
1317 | 1373 | ||
1318 | /* | 1374 | /* |
1319 | * A memory region could potentially cover multiple VMAs, and any holes | 1375 | * A memory region could potentially cover multiple VMAs, and any holes |
1320 | * between them, so iterate over all of them to find out if we can map | 1376 | * between them, so iterate over all of them to find out if we can map |
1321 | * any of them right now. | 1377 | * any of them right now. |
1322 | * | 1378 | * |
1323 | * +--------------------------------------------+ | 1379 | * +--------------------------------------------+ |
1324 | * +---------------+----------------+ +----------------+ | 1380 | * +---------------+----------------+ +----------------+ |
1325 | * | : VMA 1 | VMA 2 | | VMA 3 : | | 1381 | * | : VMA 1 | VMA 2 | | VMA 3 : | |
1326 | * +---------------+----------------+ +----------------+ | 1382 | * +---------------+----------------+ +----------------+ |
1327 | * | memory region | | 1383 | * | memory region | |
1328 | * +--------------------------------------------+ | 1384 | * +--------------------------------------------+ |
1329 | */ | 1385 | */ |
1330 | do { | 1386 | do { |
1331 | struct vm_area_struct *vma = find_vma(current->mm, hva); | 1387 | struct vm_area_struct *vma = find_vma(current->mm, hva); |
1332 | hva_t vm_start, vm_end; | 1388 | hva_t vm_start, vm_end; |
1333 | 1389 | ||
1334 | if (!vma || vma->vm_start >= reg_end) | 1390 | if (!vma || vma->vm_start >= reg_end) |
1335 | break; | 1391 | break; |
1336 | 1392 | ||
1337 | /* | 1393 | /* |
1338 | * Mapping a read-only VMA is only allowed if the | 1394 | * Mapping a read-only VMA is only allowed if the |
1339 | * memory region is configured as read-only. | 1395 | * memory region is configured as read-only. |
1340 | */ | 1396 | */ |
1341 | if (writable && !(vma->vm_flags & VM_WRITE)) { | 1397 | if (writable && !(vma->vm_flags & VM_WRITE)) { |
1342 | ret = -EPERM; | 1398 | ret = -EPERM; |
1343 | break; | 1399 | break; |
1344 | } | 1400 | } |
1345 | 1401 | ||
1346 | /* | 1402 | /* |
1347 | * Take the intersection of this VMA with the memory region | 1403 | * Take the intersection of this VMA with the memory region |
1348 | */ | 1404 | */ |
1349 | vm_start = max(hva, vma->vm_start); | 1405 | vm_start = max(hva, vma->vm_start); |
1350 | vm_end = min(reg_end, vma->vm_end); | 1406 | vm_end = min(reg_end, vma->vm_end); |
1351 | 1407 | ||
1352 | if (vma->vm_flags & VM_PFNMAP) { | 1408 | if (vma->vm_flags & VM_PFNMAP) { |
1353 | gpa_t gpa = mem->guest_phys_addr + | 1409 | gpa_t gpa = mem->guest_phys_addr + |
1354 | (vm_start - mem->userspace_addr); | 1410 | (vm_start - mem->userspace_addr); |
1355 | phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) + | 1411 | phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) + |
1356 | vm_start - vma->vm_start; | 1412 | vm_start - vma->vm_start; |
1357 | 1413 | ||
1358 | ret = kvm_phys_addr_ioremap(kvm, gpa, pa, | 1414 | ret = kvm_phys_addr_ioremap(kvm, gpa, pa, |
1359 | vm_end - vm_start, | 1415 | vm_end - vm_start, |
1360 | writable); | 1416 | writable); |
1361 | if (ret) | 1417 | if (ret) |
1362 | break; | 1418 | break; |
1363 | } | 1419 | } |
1364 | hva = vm_end; | 1420 | hva = vm_end; |
1365 | } while (hva < reg_end); | 1421 | } while (hva < reg_end); |
1366 | 1422 | ||
1367 | spin_lock(&kvm->mmu_lock); | 1423 | spin_lock(&kvm->mmu_lock); |
1368 | if (ret) | 1424 | if (ret) |
1369 | unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size); | 1425 | unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size); |
1370 | else | 1426 | else |
1371 | stage2_flush_memslot(kvm, memslot); | 1427 | stage2_flush_memslot(kvm, memslot); |
1372 | spin_unlock(&kvm->mmu_lock); | 1428 | spin_unlock(&kvm->mmu_lock); |
1373 | return ret; | 1429 | return ret; |
1374 | } | 1430 | } |
1375 | 1431 | ||
1376 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, | 1432 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, |
1377 | struct kvm_memory_slot *dont) | 1433 | struct kvm_memory_slot *dont) |
1378 | { | 1434 | { |
1379 | } | 1435 | } |
1380 | 1436 | ||
1381 | int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, | 1437 | int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, |
1382 | unsigned long npages) | 1438 | unsigned long npages) |
1383 | { | 1439 | { |
1384 | /* | 1440 | /* |
1385 | * Readonly memslots are not incoherent with the caches by definition, | 1441 | * Readonly memslots are not incoherent with the caches by definition, |
1386 | * but in practice, they are used mostly to emulate ROMs or NOR flashes | 1442 | * but in practice, they are used mostly to emulate ROMs or NOR flashes |
1387 | * that the guest may consider devices and hence map as uncached. | 1443 | * that the guest may consider devices and hence map as uncached. |
1388 | * To prevent incoherency issues in these cases, tag all readonly | 1444 | * To prevent incoherency issues in these cases, tag all readonly |
1389 | * regions as incoherent. | 1445 | * regions as incoherent. |
1390 | */ | 1446 | */ |
1391 | if (slot->flags & KVM_MEM_READONLY) | 1447 | if (slot->flags & KVM_MEM_READONLY) |
1392 | slot->flags |= KVM_MEMSLOT_INCOHERENT; | 1448 | slot->flags |= KVM_MEMSLOT_INCOHERENT; |
1393 | return 0; | 1449 | return 0; |
1394 | } | 1450 | } |
1395 | 1451 | ||
1396 | void kvm_arch_memslots_updated(struct kvm *kvm) | 1452 | void kvm_arch_memslots_updated(struct kvm *kvm) |
1397 | { | 1453 | { |
1398 | } | 1454 | } |
1399 | 1455 | ||
1400 | void kvm_arch_flush_shadow_all(struct kvm *kvm) | 1456 | void kvm_arch_flush_shadow_all(struct kvm *kvm) |
1401 | { | 1457 | { |
1402 | } | 1458 | } |
1403 | 1459 | ||
1404 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | 1460 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
1405 | struct kvm_memory_slot *slot) | 1461 | struct kvm_memory_slot *slot) |
1406 | { | 1462 | { |
1407 | gpa_t gpa = slot->base_gfn << PAGE_SHIFT; | 1463 | gpa_t gpa = slot->base_gfn << PAGE_SHIFT; |
1408 | phys_addr_t size = slot->npages << PAGE_SHIFT; | 1464 | phys_addr_t size = slot->npages << PAGE_SHIFT; |
1409 | 1465 | ||
1410 | spin_lock(&kvm->mmu_lock); | 1466 | spin_lock(&kvm->mmu_lock); |
1411 | unmap_stage2_range(kvm, gpa, size); | 1467 | unmap_stage2_range(kvm, gpa, size); |
1412 | spin_unlock(&kvm->mmu_lock); | 1468 | spin_unlock(&kvm->mmu_lock); |
1469 | } | ||
1470 | |||
1471 | /* | ||
1472 | * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). | ||
1473 | * | ||
1474 | * Main problems: | ||
1475 | * - S/W ops are local to a CPU (not broadcast) | ||
1476 | * - We have line migration behind our back (speculation) | ||
1477 | * - System caches don't support S/W at all (damn!) | ||
1478 | * | ||
1479 | * In the face of the above, the best we can do is to try and convert | ||
1480 | * S/W ops to VA ops. Because the guest is not allowed to infer the | ||
1481 | * S/W to PA mapping, it can only use S/W to nuke the whole cache, | ||
1482 | * which is a rather good thing for us. | ||
1483 | * | ||
1484 | * Also, it is only used when turning caches on/off ("The expected | ||
1485 | * usage of the cache maintenance instructions that operate by set/way | ||
1486 | * is associated with the cache maintenance instructions associated | ||
1487 | * with the powerdown and powerup of caches, if this is required by | ||
1488 | * the implementation."). | ||
1489 | * | ||
1490 | * We use the following policy: | ||
1491 | * | ||
1492 | * - If we trap a S/W operation, we enable VM trapping to detect | ||
1493 | * caches being turned on/off, and do a full clean. | ||
1494 | * | ||
1495 | * - We flush the caches on both caches being turned on and off. | ||
1496 | * | ||
1497 | * - Once the caches are enabled, we stop trapping VM ops. | ||
1498 | */ | ||
1499 | void kvm_set_way_flush(struct kvm_vcpu *vcpu) | ||
1500 | { | ||
1501 | unsigned long hcr = vcpu_get_hcr(vcpu); | ||
1502 | |||
1503 | /* | ||
1504 | * If this is the first time we do a S/W operation | ||
1505 | * (i.e. HCR_TVM not set) flush the whole memory, and set the | ||
1506 | * VM trapping. | ||
1507 | * | ||
1508 | * Otherwise, rely on the VM trapping to wait for the MMU + | ||
1509 | * Caches to be turned off. At that point, we'll be able to | ||
1510 | * clean the caches again. | ||
1511 | */ | ||
1512 | if (!(hcr & HCR_TVM)) { | ||
1513 | trace_kvm_set_way_flush(*vcpu_pc(vcpu), | ||
1514 | vcpu_has_cache_enabled(vcpu)); | ||
1515 | stage2_flush_vm(vcpu->kvm); | ||
1516 | vcpu_set_hcr(vcpu, hcr | HCR_TVM); | ||
1517 | } | ||
1518 | } | ||
1519 | |||
1520 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled) | ||
1521 | { | ||
1522 | bool now_enabled = vcpu_has_cache_enabled(vcpu); | ||
1523 | |||
1524 | /* | ||
1525 | * If switching the MMU+caches on, need to invalidate the caches. | ||
1526 | * If switching it off, need to clean the caches. | ||
1527 | * Clean + invalidate does the trick always. | ||
1528 | */ | ||
1529 | if (now_enabled != was_enabled) | ||
1530 | stage2_flush_vm(vcpu->kvm); |
arch/arm/kvm/trace.h
1 | #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) | 1 | #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) |
2 | #define _TRACE_KVM_H | 2 | #define _TRACE_KVM_H |
3 | 3 | ||
4 | #include <linux/tracepoint.h> | 4 | #include <linux/tracepoint.h> |
5 | 5 | ||
6 | #undef TRACE_SYSTEM | 6 | #undef TRACE_SYSTEM |
7 | #define TRACE_SYSTEM kvm | 7 | #define TRACE_SYSTEM kvm |
8 | 8 | ||
9 | /* | 9 | /* |
10 | * Tracepoints for entry/exit to guest | 10 | * Tracepoints for entry/exit to guest |
11 | */ | 11 | */ |
12 | TRACE_EVENT(kvm_entry, | 12 | TRACE_EVENT(kvm_entry, |
13 | TP_PROTO(unsigned long vcpu_pc), | 13 | TP_PROTO(unsigned long vcpu_pc), |
14 | TP_ARGS(vcpu_pc), | 14 | TP_ARGS(vcpu_pc), |
15 | 15 | ||
16 | TP_STRUCT__entry( | 16 | TP_STRUCT__entry( |
17 | __field( unsigned long, vcpu_pc ) | 17 | __field( unsigned long, vcpu_pc ) |
18 | ), | 18 | ), |
19 | 19 | ||
20 | TP_fast_assign( | 20 | TP_fast_assign( |
21 | __entry->vcpu_pc = vcpu_pc; | 21 | __entry->vcpu_pc = vcpu_pc; |
22 | ), | 22 | ), |
23 | 23 | ||
24 | TP_printk("PC: 0x%08lx", __entry->vcpu_pc) | 24 | TP_printk("PC: 0x%08lx", __entry->vcpu_pc) |
25 | ); | 25 | ); |
26 | 26 | ||
27 | TRACE_EVENT(kvm_exit, | 27 | TRACE_EVENT(kvm_exit, |
28 | TP_PROTO(unsigned long vcpu_pc), | 28 | TP_PROTO(unsigned long vcpu_pc), |
29 | TP_ARGS(vcpu_pc), | 29 | TP_ARGS(vcpu_pc), |
30 | 30 | ||
31 | TP_STRUCT__entry( | 31 | TP_STRUCT__entry( |
32 | __field( unsigned long, vcpu_pc ) | 32 | __field( unsigned long, vcpu_pc ) |
33 | ), | 33 | ), |
34 | 34 | ||
35 | TP_fast_assign( | 35 | TP_fast_assign( |
36 | __entry->vcpu_pc = vcpu_pc; | 36 | __entry->vcpu_pc = vcpu_pc; |
37 | ), | 37 | ), |
38 | 38 | ||
39 | TP_printk("PC: 0x%08lx", __entry->vcpu_pc) | 39 | TP_printk("PC: 0x%08lx", __entry->vcpu_pc) |
40 | ); | 40 | ); |
41 | 41 | ||
42 | TRACE_EVENT(kvm_guest_fault, | 42 | TRACE_EVENT(kvm_guest_fault, |
43 | TP_PROTO(unsigned long vcpu_pc, unsigned long hsr, | 43 | TP_PROTO(unsigned long vcpu_pc, unsigned long hsr, |
44 | unsigned long hxfar, | 44 | unsigned long hxfar, |
45 | unsigned long long ipa), | 45 | unsigned long long ipa), |
46 | TP_ARGS(vcpu_pc, hsr, hxfar, ipa), | 46 | TP_ARGS(vcpu_pc, hsr, hxfar, ipa), |
47 | 47 | ||
48 | TP_STRUCT__entry( | 48 | TP_STRUCT__entry( |
49 | __field( unsigned long, vcpu_pc ) | 49 | __field( unsigned long, vcpu_pc ) |
50 | __field( unsigned long, hsr ) | 50 | __field( unsigned long, hsr ) |
51 | __field( unsigned long, hxfar ) | 51 | __field( unsigned long, hxfar ) |
52 | __field( unsigned long long, ipa ) | 52 | __field( unsigned long long, ipa ) |
53 | ), | 53 | ), |
54 | 54 | ||
55 | TP_fast_assign( | 55 | TP_fast_assign( |
56 | __entry->vcpu_pc = vcpu_pc; | 56 | __entry->vcpu_pc = vcpu_pc; |
57 | __entry->hsr = hsr; | 57 | __entry->hsr = hsr; |
58 | __entry->hxfar = hxfar; | 58 | __entry->hxfar = hxfar; |
59 | __entry->ipa = ipa; | 59 | __entry->ipa = ipa; |
60 | ), | 60 | ), |
61 | 61 | ||
62 | TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#08lx", | 62 | TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#08lx", |
63 | __entry->ipa, __entry->hsr, | 63 | __entry->ipa, __entry->hsr, |
64 | __entry->hxfar, __entry->vcpu_pc) | 64 | __entry->hxfar, __entry->vcpu_pc) |
65 | ); | 65 | ); |
66 | 66 | ||
67 | TRACE_EVENT(kvm_irq_line, | 67 | TRACE_EVENT(kvm_irq_line, |
68 | TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level), | 68 | TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level), |
69 | TP_ARGS(type, vcpu_idx, irq_num, level), | 69 | TP_ARGS(type, vcpu_idx, irq_num, level), |
70 | 70 | ||
71 | TP_STRUCT__entry( | 71 | TP_STRUCT__entry( |
72 | __field( unsigned int, type ) | 72 | __field( unsigned int, type ) |
73 | __field( int, vcpu_idx ) | 73 | __field( int, vcpu_idx ) |
74 | __field( int, irq_num ) | 74 | __field( int, irq_num ) |
75 | __field( int, level ) | 75 | __field( int, level ) |
76 | ), | 76 | ), |
77 | 77 | ||
78 | TP_fast_assign( | 78 | TP_fast_assign( |
79 | __entry->type = type; | 79 | __entry->type = type; |
80 | __entry->vcpu_idx = vcpu_idx; | 80 | __entry->vcpu_idx = vcpu_idx; |
81 | __entry->irq_num = irq_num; | 81 | __entry->irq_num = irq_num; |
82 | __entry->level = level; | 82 | __entry->level = level; |
83 | ), | 83 | ), |
84 | 84 | ||
85 | TP_printk("Inject %s interrupt (%d), vcpu->idx: %d, num: %d, level: %d", | 85 | TP_printk("Inject %s interrupt (%d), vcpu->idx: %d, num: %d, level: %d", |
86 | (__entry->type == KVM_ARM_IRQ_TYPE_CPU) ? "CPU" : | 86 | (__entry->type == KVM_ARM_IRQ_TYPE_CPU) ? "CPU" : |
87 | (__entry->type == KVM_ARM_IRQ_TYPE_PPI) ? "VGIC PPI" : | 87 | (__entry->type == KVM_ARM_IRQ_TYPE_PPI) ? "VGIC PPI" : |
88 | (__entry->type == KVM_ARM_IRQ_TYPE_SPI) ? "VGIC SPI" : "UNKNOWN", | 88 | (__entry->type == KVM_ARM_IRQ_TYPE_SPI) ? "VGIC SPI" : "UNKNOWN", |
89 | __entry->type, __entry->vcpu_idx, __entry->irq_num, __entry->level) | 89 | __entry->type, __entry->vcpu_idx, __entry->irq_num, __entry->level) |
90 | ); | 90 | ); |
91 | 91 | ||
92 | TRACE_EVENT(kvm_mmio_emulate, | 92 | TRACE_EVENT(kvm_mmio_emulate, |
93 | TP_PROTO(unsigned long vcpu_pc, unsigned long instr, | 93 | TP_PROTO(unsigned long vcpu_pc, unsigned long instr, |
94 | unsigned long cpsr), | 94 | unsigned long cpsr), |
95 | TP_ARGS(vcpu_pc, instr, cpsr), | 95 | TP_ARGS(vcpu_pc, instr, cpsr), |
96 | 96 | ||
97 | TP_STRUCT__entry( | 97 | TP_STRUCT__entry( |
98 | __field( unsigned long, vcpu_pc ) | 98 | __field( unsigned long, vcpu_pc ) |
99 | __field( unsigned long, instr ) | 99 | __field( unsigned long, instr ) |
100 | __field( unsigned long, cpsr ) | 100 | __field( unsigned long, cpsr ) |
101 | ), | 101 | ), |
102 | 102 | ||
103 | TP_fast_assign( | 103 | TP_fast_assign( |
104 | __entry->vcpu_pc = vcpu_pc; | 104 | __entry->vcpu_pc = vcpu_pc; |
105 | __entry->instr = instr; | 105 | __entry->instr = instr; |
106 | __entry->cpsr = cpsr; | 106 | __entry->cpsr = cpsr; |
107 | ), | 107 | ), |
108 | 108 | ||
109 | TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)", | 109 | TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)", |
110 | __entry->vcpu_pc, __entry->instr, __entry->cpsr) | 110 | __entry->vcpu_pc, __entry->instr, __entry->cpsr) |
111 | ); | 111 | ); |
112 | 112 | ||
113 | /* Architecturally implementation defined CP15 register access */ | 113 | /* Architecturally implementation defined CP15 register access */ |
114 | TRACE_EVENT(kvm_emulate_cp15_imp, | 114 | TRACE_EVENT(kvm_emulate_cp15_imp, |
115 | TP_PROTO(unsigned long Op1, unsigned long Rt1, unsigned long CRn, | 115 | TP_PROTO(unsigned long Op1, unsigned long Rt1, unsigned long CRn, |
116 | unsigned long CRm, unsigned long Op2, bool is_write), | 116 | unsigned long CRm, unsigned long Op2, bool is_write), |
117 | TP_ARGS(Op1, Rt1, CRn, CRm, Op2, is_write), | 117 | TP_ARGS(Op1, Rt1, CRn, CRm, Op2, is_write), |
118 | 118 | ||
119 | TP_STRUCT__entry( | 119 | TP_STRUCT__entry( |
120 | __field( unsigned int, Op1 ) | 120 | __field( unsigned int, Op1 ) |
121 | __field( unsigned int, Rt1 ) | 121 | __field( unsigned int, Rt1 ) |
122 | __field( unsigned int, CRn ) | 122 | __field( unsigned int, CRn ) |
123 | __field( unsigned int, CRm ) | 123 | __field( unsigned int, CRm ) |
124 | __field( unsigned int, Op2 ) | 124 | __field( unsigned int, Op2 ) |
125 | __field( bool, is_write ) | 125 | __field( bool, is_write ) |
126 | ), | 126 | ), |
127 | 127 | ||
128 | TP_fast_assign( | 128 | TP_fast_assign( |
129 | __entry->is_write = is_write; | 129 | __entry->is_write = is_write; |
130 | __entry->Op1 = Op1; | 130 | __entry->Op1 = Op1; |
131 | __entry->Rt1 = Rt1; | 131 | __entry->Rt1 = Rt1; |
132 | __entry->CRn = CRn; | 132 | __entry->CRn = CRn; |
133 | __entry->CRm = CRm; | 133 | __entry->CRm = CRm; |
134 | __entry->Op2 = Op2; | 134 | __entry->Op2 = Op2; |
135 | ), | 135 | ), |
136 | 136 | ||
137 | TP_printk("Implementation defined CP15: %s\tp15, %u, r%u, c%u, c%u, %u", | 137 | TP_printk("Implementation defined CP15: %s\tp15, %u, r%u, c%u, c%u, %u", |
138 | (__entry->is_write) ? "mcr" : "mrc", | 138 | (__entry->is_write) ? "mcr" : "mrc", |
139 | __entry->Op1, __entry->Rt1, __entry->CRn, | 139 | __entry->Op1, __entry->Rt1, __entry->CRn, |
140 | __entry->CRm, __entry->Op2) | 140 | __entry->CRm, __entry->Op2) |
141 | ); | 141 | ); |
142 | 142 | ||
143 | TRACE_EVENT(kvm_wfi, | 143 | TRACE_EVENT(kvm_wfi, |
144 | TP_PROTO(unsigned long vcpu_pc), | 144 | TP_PROTO(unsigned long vcpu_pc), |
145 | TP_ARGS(vcpu_pc), | 145 | TP_ARGS(vcpu_pc), |
146 | 146 | ||
147 | TP_STRUCT__entry( | 147 | TP_STRUCT__entry( |
148 | __field( unsigned long, vcpu_pc ) | 148 | __field( unsigned long, vcpu_pc ) |
149 | ), | 149 | ), |
150 | 150 | ||
151 | TP_fast_assign( | 151 | TP_fast_assign( |
152 | __entry->vcpu_pc = vcpu_pc; | 152 | __entry->vcpu_pc = vcpu_pc; |
153 | ), | 153 | ), |
154 | 154 | ||
155 | TP_printk("guest executed wfi at: 0x%08lx", __entry->vcpu_pc) | 155 | TP_printk("guest executed wfi at: 0x%08lx", __entry->vcpu_pc) |
156 | ); | 156 | ); |
157 | 157 | ||
158 | TRACE_EVENT(kvm_unmap_hva, | 158 | TRACE_EVENT(kvm_unmap_hva, |
159 | TP_PROTO(unsigned long hva), | 159 | TP_PROTO(unsigned long hva), |
160 | TP_ARGS(hva), | 160 | TP_ARGS(hva), |
161 | 161 | ||
162 | TP_STRUCT__entry( | 162 | TP_STRUCT__entry( |
163 | __field( unsigned long, hva ) | 163 | __field( unsigned long, hva ) |
164 | ), | 164 | ), |
165 | 165 | ||
166 | TP_fast_assign( | 166 | TP_fast_assign( |
167 | __entry->hva = hva; | 167 | __entry->hva = hva; |
168 | ), | 168 | ), |
169 | 169 | ||
170 | TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva) | 170 | TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva) |
171 | ); | 171 | ); |
172 | 172 | ||
173 | TRACE_EVENT(kvm_unmap_hva_range, | 173 | TRACE_EVENT(kvm_unmap_hva_range, |
174 | TP_PROTO(unsigned long start, unsigned long end), | 174 | TP_PROTO(unsigned long start, unsigned long end), |
175 | TP_ARGS(start, end), | 175 | TP_ARGS(start, end), |
176 | 176 | ||
177 | TP_STRUCT__entry( | 177 | TP_STRUCT__entry( |
178 | __field( unsigned long, start ) | 178 | __field( unsigned long, start ) |
179 | __field( unsigned long, end ) | 179 | __field( unsigned long, end ) |
180 | ), | 180 | ), |
181 | 181 | ||
182 | TP_fast_assign( | 182 | TP_fast_assign( |
183 | __entry->start = start; | 183 | __entry->start = start; |
184 | __entry->end = end; | 184 | __entry->end = end; |
185 | ), | 185 | ), |
186 | 186 | ||
187 | TP_printk("mmu notifier unmap range: %#08lx -- %#08lx", | 187 | TP_printk("mmu notifier unmap range: %#08lx -- %#08lx", |
188 | __entry->start, __entry->end) | 188 | __entry->start, __entry->end) |
189 | ); | 189 | ); |
190 | 190 | ||
191 | TRACE_EVENT(kvm_set_spte_hva, | 191 | TRACE_EVENT(kvm_set_spte_hva, |
192 | TP_PROTO(unsigned long hva), | 192 | TP_PROTO(unsigned long hva), |
193 | TP_ARGS(hva), | 193 | TP_ARGS(hva), |
194 | 194 | ||
195 | TP_STRUCT__entry( | 195 | TP_STRUCT__entry( |
196 | __field( unsigned long, hva ) | 196 | __field( unsigned long, hva ) |
197 | ), | 197 | ), |
198 | 198 | ||
199 | TP_fast_assign( | 199 | TP_fast_assign( |
200 | __entry->hva = hva; | 200 | __entry->hva = hva; |
201 | ), | 201 | ), |
202 | 202 | ||
203 | TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva) | 203 | TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva) |
204 | ); | 204 | ); |
205 | 205 | ||
206 | TRACE_EVENT(kvm_hvc, | 206 | TRACE_EVENT(kvm_hvc, |
207 | TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm), | 207 | TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm), |
208 | TP_ARGS(vcpu_pc, r0, imm), | 208 | TP_ARGS(vcpu_pc, r0, imm), |
209 | 209 | ||
210 | TP_STRUCT__entry( | 210 | TP_STRUCT__entry( |
211 | __field( unsigned long, vcpu_pc ) | 211 | __field( unsigned long, vcpu_pc ) |
212 | __field( unsigned long, r0 ) | 212 | __field( unsigned long, r0 ) |
213 | __field( unsigned long, imm ) | 213 | __field( unsigned long, imm ) |
214 | ), | 214 | ), |
215 | 215 | ||
216 | TP_fast_assign( | 216 | TP_fast_assign( |
217 | __entry->vcpu_pc = vcpu_pc; | 217 | __entry->vcpu_pc = vcpu_pc; |
218 | __entry->r0 = r0; | 218 | __entry->r0 = r0; |
219 | __entry->imm = imm; | 219 | __entry->imm = imm; |
220 | ), | 220 | ), |
221 | 221 | ||
222 | TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx", | 222 | TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx", |
223 | __entry->vcpu_pc, __entry->r0, __entry->imm) | 223 | __entry->vcpu_pc, __entry->r0, __entry->imm) |
224 | ); | 224 | ); |
225 | 225 | ||
226 | TRACE_EVENT(kvm_set_way_flush, | ||
227 | TP_PROTO(unsigned long vcpu_pc, bool cache), | ||
228 | TP_ARGS(vcpu_pc, cache), | ||
229 | |||
230 | TP_STRUCT__entry( | ||
231 | __field( unsigned long, vcpu_pc ) | ||
232 | __field( bool, cache ) | ||
233 | ), | ||
234 | |||
235 | TP_fast_assign( | ||
236 | __entry->vcpu_pc = vcpu_pc; | ||
237 | __entry->cache = cache; | ||
238 | ), | ||
239 | |||
240 | TP_printk("S/W flush at 0x%016lx (cache %s)", | ||
241 | __entry->vcpu_pc, __entry->cache ? "on" : "off") | ||
242 | ); | ||
243 | |||
244 | TRACE_EVENT(kvm_toggle_cache, | ||
245 | TP_PROTO(unsigned long vcpu_pc, bool was, bool now), | ||
246 | TP_ARGS(vcpu_pc, was, now), | ||
247 | |||
248 | TP_STRUCT__entry( | ||
249 | __field( unsigned long, vcpu_pc ) | ||
250 | __field( bool, was ) | ||
251 | __field( bool, now ) | ||
252 | ), | ||
253 | |||
254 | TP_fast_assign( | ||
255 | __entry->vcpu_pc = vcpu_pc; | ||
256 | __entry->was = was; | ||
257 | __entry->now = now; | ||
258 | ), | ||
259 | |||
260 | TP_printk("VM op at 0x%016lx (cache was %s, now %s)", | ||
261 | __entry->vcpu_pc, __entry->was ? "on" : "off", | ||
262 | __entry->now ? "on" : "off") | ||
263 | ); | ||
264 | |||
226 | #endif /* _TRACE_KVM_H */ | 265 | #endif /* _TRACE_KVM_H */ |
227 | 266 | ||
228 | #undef TRACE_INCLUDE_PATH | 267 | #undef TRACE_INCLUDE_PATH |
229 | #define TRACE_INCLUDE_PATH arch/arm/kvm | 268 | #define TRACE_INCLUDE_PATH arch/arm/kvm |
230 | #undef TRACE_INCLUDE_FILE | 269 | #undef TRACE_INCLUDE_FILE |
231 | #define TRACE_INCLUDE_FILE trace | 270 | #define TRACE_INCLUDE_FILE trace |
232 | 271 | ||
233 | /* This part must be outside protection */ | 272 | /* This part must be outside protection */ |
234 | #include <trace/define_trace.h> | 273 | #include <trace/define_trace.h> |
235 | 274 |
arch/arm64/include/asm/kvm_emulate.h
1 | /* | 1 | /* |
2 | * Copyright (C) 2012,2013 - ARM Ltd | 2 | * Copyright (C) 2012,2013 - ARM Ltd |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | 3 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
4 | * | 4 | * |
5 | * Derived from arch/arm/include/kvm_emulate.h | 5 | * Derived from arch/arm/include/kvm_emulate.h |
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | 6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
7 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | 7 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | * | 12 | * |
13 | * This program is distributed in the hope that it will be useful, | 13 | * This program is distributed in the hope that it will be useful, |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | * GNU General Public License for more details. | 16 | * GNU General Public License for more details. |
17 | * | 17 | * |
18 | * You should have received a copy of the GNU General Public License | 18 | * You should have received a copy of the GNU General Public License |
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #ifndef __ARM64_KVM_EMULATE_H__ | 22 | #ifndef __ARM64_KVM_EMULATE_H__ |
23 | #define __ARM64_KVM_EMULATE_H__ | 23 | #define __ARM64_KVM_EMULATE_H__ |
24 | 24 | ||
25 | #include <linux/kvm_host.h> | 25 | #include <linux/kvm_host.h> |
26 | #include <asm/kvm_asm.h> | 26 | #include <asm/kvm_asm.h> |
27 | #include <asm/kvm_arm.h> | 27 | #include <asm/kvm_arm.h> |
28 | #include <asm/kvm_mmio.h> | 28 | #include <asm/kvm_mmio.h> |
29 | #include <asm/ptrace.h> | 29 | #include <asm/ptrace.h> |
30 | 30 | ||
31 | unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num); | 31 | unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num); |
32 | unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu); | 32 | unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu); |
33 | 33 | ||
34 | bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); | 34 | bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); |
35 | void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); | 35 | void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); |
36 | 36 | ||
37 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); | 37 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); |
38 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); | 38 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); |
39 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); | 39 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); |
40 | 40 | ||
41 | static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) | 41 | static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) |
42 | { | 42 | { |
43 | vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; | 43 | vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; |
44 | if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) | 44 | if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) |
45 | vcpu->arch.hcr_el2 &= ~HCR_RW; | 45 | vcpu->arch.hcr_el2 &= ~HCR_RW; |
46 | } | 46 | } |
47 | 47 | ||
48 | static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu) | ||
49 | { | ||
50 | return vcpu->arch.hcr_el2; | ||
51 | } | ||
52 | |||
53 | static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr) | ||
54 | { | ||
55 | vcpu->arch.hcr_el2 = hcr; | ||
56 | } | ||
57 | |||
48 | static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) | 58 | static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) |
49 | { | 59 | { |
50 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; | 60 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; |
51 | } | 61 | } |
52 | 62 | ||
53 | static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu) | 63 | static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu) |
54 | { | 64 | { |
55 | return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1; | 65 | return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1; |
56 | } | 66 | } |
57 | 67 | ||
58 | static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) | 68 | static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) |
59 | { | 69 | { |
60 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate; | 70 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate; |
61 | } | 71 | } |
62 | 72 | ||
63 | static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) | 73 | static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) |
64 | { | 74 | { |
65 | return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT); | 75 | return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT); |
66 | } | 76 | } |
67 | 77 | ||
68 | static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) | 78 | static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) |
69 | { | 79 | { |
70 | if (vcpu_mode_is_32bit(vcpu)) | 80 | if (vcpu_mode_is_32bit(vcpu)) |
71 | return kvm_condition_valid32(vcpu); | 81 | return kvm_condition_valid32(vcpu); |
72 | 82 | ||
73 | return true; | 83 | return true; |
74 | } | 84 | } |
75 | 85 | ||
76 | static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) | 86 | static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) |
77 | { | 87 | { |
78 | if (vcpu_mode_is_32bit(vcpu)) | 88 | if (vcpu_mode_is_32bit(vcpu)) |
79 | kvm_skip_instr32(vcpu, is_wide_instr); | 89 | kvm_skip_instr32(vcpu, is_wide_instr); |
80 | else | 90 | else |
81 | *vcpu_pc(vcpu) += 4; | 91 | *vcpu_pc(vcpu) += 4; |
82 | } | 92 | } |
83 | 93 | ||
84 | static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) | 94 | static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) |
85 | { | 95 | { |
86 | *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; | 96 | *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; |
87 | } | 97 | } |
88 | 98 | ||
89 | static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num) | 99 | static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num) |
90 | { | 100 | { |
91 | if (vcpu_mode_is_32bit(vcpu)) | 101 | if (vcpu_mode_is_32bit(vcpu)) |
92 | return vcpu_reg32(vcpu, reg_num); | 102 | return vcpu_reg32(vcpu, reg_num); |
93 | 103 | ||
94 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num]; | 104 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num]; |
95 | } | 105 | } |
96 | 106 | ||
97 | /* Get vcpu SPSR for current mode */ | 107 | /* Get vcpu SPSR for current mode */ |
98 | static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu) | 108 | static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu) |
99 | { | 109 | { |
100 | if (vcpu_mode_is_32bit(vcpu)) | 110 | if (vcpu_mode_is_32bit(vcpu)) |
101 | return vcpu_spsr32(vcpu); | 111 | return vcpu_spsr32(vcpu); |
102 | 112 | ||
103 | return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1]; | 113 | return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1]; |
104 | } | 114 | } |
105 | 115 | ||
106 | static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) | 116 | static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) |
107 | { | 117 | { |
108 | u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; | 118 | u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; |
109 | 119 | ||
110 | if (vcpu_mode_is_32bit(vcpu)) | 120 | if (vcpu_mode_is_32bit(vcpu)) |
111 | return mode > COMPAT_PSR_MODE_USR; | 121 | return mode > COMPAT_PSR_MODE_USR; |
112 | 122 | ||
113 | return mode != PSR_MODE_EL0t; | 123 | return mode != PSR_MODE_EL0t; |
114 | } | 124 | } |
115 | 125 | ||
116 | static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) | 126 | static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu) |
117 | { | 127 | { |
118 | return vcpu->arch.fault.esr_el2; | 128 | return vcpu->arch.fault.esr_el2; |
119 | } | 129 | } |
120 | 130 | ||
121 | static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) | 131 | static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) |
122 | { | 132 | { |
123 | return vcpu->arch.fault.far_el2; | 133 | return vcpu->arch.fault.far_el2; |
124 | } | 134 | } |
125 | 135 | ||
126 | static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) | 136 | static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) |
127 | { | 137 | { |
128 | return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; | 138 | return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; |
129 | } | 139 | } |
130 | 140 | ||
131 | static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) | 141 | static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) |
132 | { | 142 | { |
133 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_ISV); | 143 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_ISV); |
134 | } | 144 | } |
135 | 145 | ||
136 | static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) | 146 | static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) |
137 | { | 147 | { |
138 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_WNR); | 148 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_WNR); |
139 | } | 149 | } |
140 | 150 | ||
141 | static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) | 151 | static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) |
142 | { | 152 | { |
143 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SSE); | 153 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SSE); |
144 | } | 154 | } |
145 | 155 | ||
146 | static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) | 156 | static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) |
147 | { | 157 | { |
148 | return (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SRT_MASK) >> ESR_EL2_SRT_SHIFT; | 158 | return (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SRT_MASK) >> ESR_EL2_SRT_SHIFT; |
149 | } | 159 | } |
150 | 160 | ||
151 | static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) | 161 | static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) |
152 | { | 162 | { |
153 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EA); | 163 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EA); |
154 | } | 164 | } |
155 | 165 | ||
156 | static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) | 166 | static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) |
157 | { | 167 | { |
158 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_S1PTW); | 168 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_S1PTW); |
159 | } | 169 | } |
160 | 170 | ||
161 | static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) | 171 | static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) |
162 | { | 172 | { |
163 | return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SAS) >> ESR_EL2_SAS_SHIFT); | 173 | return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SAS) >> ESR_EL2_SAS_SHIFT); |
164 | } | 174 | } |
165 | 175 | ||
166 | /* This one is not specific to Data Abort */ | 176 | /* This one is not specific to Data Abort */ |
167 | static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) | 177 | static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) |
168 | { | 178 | { |
169 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_IL); | 179 | return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_IL); |
170 | } | 180 | } |
171 | 181 | ||
172 | static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) | 182 | static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) |
173 | { | 183 | { |
174 | return kvm_vcpu_get_hsr(vcpu) >> ESR_EL2_EC_SHIFT; | 184 | return kvm_vcpu_get_hsr(vcpu) >> ESR_EL2_EC_SHIFT; |
175 | } | 185 | } |
176 | 186 | ||
177 | static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) | 187 | static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) |
178 | { | 188 | { |
179 | return kvm_vcpu_trap_get_class(vcpu) == ESR_EL2_EC_IABT; | 189 | return kvm_vcpu_trap_get_class(vcpu) == ESR_EL2_EC_IABT; |
180 | } | 190 | } |
181 | 191 | ||
182 | static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) | 192 | static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) |
183 | { | 193 | { |
184 | return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC; | 194 | return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC; |
185 | } | 195 | } |
186 | 196 | ||
187 | static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) | 197 | static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) |
188 | { | 198 | { |
189 | return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; | 199 | return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; |
190 | } | 200 | } |
191 | 201 | ||
192 | static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) | 202 | static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) |
193 | { | 203 | { |
194 | return vcpu_sys_reg(vcpu, MPIDR_EL1); | 204 | return vcpu_sys_reg(vcpu, MPIDR_EL1); |
195 | } | 205 | } |
196 | 206 | ||
197 | static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) | 207 | static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) |
198 | { | 208 | { |
199 | if (vcpu_mode_is_32bit(vcpu)) | 209 | if (vcpu_mode_is_32bit(vcpu)) |
200 | *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT; | 210 | *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT; |
201 | else | 211 | else |
202 | vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25); | 212 | vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25); |
203 | } | 213 | } |
204 | 214 | ||
205 | static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) | 215 | static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) |
206 | { | 216 | { |
207 | if (vcpu_mode_is_32bit(vcpu)) | 217 | if (vcpu_mode_is_32bit(vcpu)) |
208 | return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT); | 218 | return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT); |
209 | 219 | ||
210 | return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25)); | 220 | return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25)); |
211 | } | 221 | } |
212 | 222 | ||
213 | static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, | 223 | static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, |
214 | unsigned long data, | 224 | unsigned long data, |
215 | unsigned int len) | 225 | unsigned int len) |
216 | { | 226 | { |
217 | if (kvm_vcpu_is_be(vcpu)) { | 227 | if (kvm_vcpu_is_be(vcpu)) { |
218 | switch (len) { | 228 | switch (len) { |
219 | case 1: | 229 | case 1: |
220 | return data & 0xff; | 230 | return data & 0xff; |
221 | case 2: | 231 | case 2: |
222 | return be16_to_cpu(data & 0xffff); | 232 | return be16_to_cpu(data & 0xffff); |
223 | case 4: | 233 | case 4: |
224 | return be32_to_cpu(data & 0xffffffff); | 234 | return be32_to_cpu(data & 0xffffffff); |
225 | default: | 235 | default: |
226 | return be64_to_cpu(data); | 236 | return be64_to_cpu(data); |
227 | } | 237 | } |
228 | } else { | 238 | } else { |
229 | switch (len) { | 239 | switch (len) { |
230 | case 1: | 240 | case 1: |
231 | return data & 0xff; | 241 | return data & 0xff; |
232 | case 2: | 242 | case 2: |
233 | return le16_to_cpu(data & 0xffff); | 243 | return le16_to_cpu(data & 0xffff); |
234 | case 4: | 244 | case 4: |
235 | return le32_to_cpu(data & 0xffffffff); | 245 | return le32_to_cpu(data & 0xffffffff); |
236 | default: | 246 | default: |
237 | return le64_to_cpu(data); | 247 | return le64_to_cpu(data); |
238 | } | 248 | } |
239 | } | 249 | } |
240 | 250 | ||
241 | return data; /* Leave LE untouched */ | 251 | return data; /* Leave LE untouched */ |
242 | } | 252 | } |
243 | 253 | ||
244 | static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, | 254 | static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, |
245 | unsigned long data, | 255 | unsigned long data, |
246 | unsigned int len) | 256 | unsigned int len) |
247 | { | 257 | { |
248 | if (kvm_vcpu_is_be(vcpu)) { | 258 | if (kvm_vcpu_is_be(vcpu)) { |
249 | switch (len) { | 259 | switch (len) { |
250 | case 1: | 260 | case 1: |
251 | return data & 0xff; | 261 | return data & 0xff; |
252 | case 2: | 262 | case 2: |
253 | return cpu_to_be16(data & 0xffff); | 263 | return cpu_to_be16(data & 0xffff); |
254 | case 4: | 264 | case 4: |
255 | return cpu_to_be32(data & 0xffffffff); | 265 | return cpu_to_be32(data & 0xffffffff); |
256 | default: | 266 | default: |
257 | return cpu_to_be64(data); | 267 | return cpu_to_be64(data); |
258 | } | 268 | } |
259 | } else { | 269 | } else { |
260 | switch (len) { | 270 | switch (len) { |
261 | case 1: | 271 | case 1: |
262 | return data & 0xff; | 272 | return data & 0xff; |
263 | case 2: | 273 | case 2: |
264 | return cpu_to_le16(data & 0xffff); | 274 | return cpu_to_le16(data & 0xffff); |
265 | case 4: | 275 | case 4: |
266 | return cpu_to_le32(data & 0xffffffff); | 276 | return cpu_to_le32(data & 0xffffffff); |
267 | default: | 277 | default: |
268 | return cpu_to_le64(data); | 278 | return cpu_to_le64(data); |
269 | } | 279 | } |
270 | } | 280 | } |
271 | 281 | ||
272 | return data; /* Leave LE untouched */ | 282 | return data; /* Leave LE untouched */ |
273 | } | 283 | } |
274 | 284 | ||
275 | #endif /* __ARM64_KVM_EMULATE_H__ */ | 285 | #endif /* __ARM64_KVM_EMULATE_H__ */ |
276 | 286 |
arch/arm64/include/asm/kvm_host.h
1 | /* | 1 | /* |
2 | * Copyright (C) 2012,2013 - ARM Ltd | 2 | * Copyright (C) 2012,2013 - ARM Ltd |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | 3 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
4 | * | 4 | * |
5 | * Derived from arch/arm/include/asm/kvm_host.h: | 5 | * Derived from arch/arm/include/asm/kvm_host.h: |
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | 6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
7 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> | 7 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | * | 12 | * |
13 | * This program is distributed in the hope that it will be useful, | 13 | * This program is distributed in the hope that it will be useful, |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | * GNU General Public License for more details. | 16 | * GNU General Public License for more details. |
17 | * | 17 | * |
18 | * You should have received a copy of the GNU General Public License | 18 | * You should have received a copy of the GNU General Public License |
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #ifndef __ARM64_KVM_HOST_H__ | 22 | #ifndef __ARM64_KVM_HOST_H__ |
23 | #define __ARM64_KVM_HOST_H__ | 23 | #define __ARM64_KVM_HOST_H__ |
24 | 24 | ||
25 | #include <linux/types.h> | 25 | #include <linux/types.h> |
26 | #include <linux/kvm_types.h> | 26 | #include <linux/kvm_types.h> |
27 | #include <asm/kvm.h> | 27 | #include <asm/kvm.h> |
28 | #include <asm/kvm_asm.h> | 28 | #include <asm/kvm_asm.h> |
29 | #include <asm/kvm_mmio.h> | 29 | #include <asm/kvm_mmio.h> |
30 | 30 | ||
31 | #if defined(CONFIG_KVM_ARM_MAX_VCPUS) | 31 | #if defined(CONFIG_KVM_ARM_MAX_VCPUS) |
32 | #define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS | 32 | #define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS |
33 | #else | 33 | #else |
34 | #define KVM_MAX_VCPUS 0 | 34 | #define KVM_MAX_VCPUS 0 |
35 | #endif | 35 | #endif |
36 | 36 | ||
37 | #define KVM_USER_MEM_SLOTS 32 | 37 | #define KVM_USER_MEM_SLOTS 32 |
38 | #define KVM_PRIVATE_MEM_SLOTS 4 | 38 | #define KVM_PRIVATE_MEM_SLOTS 4 |
39 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | 39 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 |
40 | 40 | ||
41 | #include <kvm/arm_vgic.h> | 41 | #include <kvm/arm_vgic.h> |
42 | #include <kvm/arm_arch_timer.h> | 42 | #include <kvm/arm_arch_timer.h> |
43 | 43 | ||
44 | #define KVM_VCPU_MAX_FEATURES 3 | 44 | #define KVM_VCPU_MAX_FEATURES 3 |
45 | 45 | ||
46 | int __attribute_const__ kvm_target_cpu(void); | 46 | int __attribute_const__ kvm_target_cpu(void); |
47 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu); | 47 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu); |
48 | int kvm_arch_dev_ioctl_check_extension(long ext); | 48 | int kvm_arch_dev_ioctl_check_extension(long ext); |
49 | 49 | ||
50 | struct kvm_arch { | 50 | struct kvm_arch { |
51 | /* The VMID generation used for the virt. memory system */ | 51 | /* The VMID generation used for the virt. memory system */ |
52 | u64 vmid_gen; | 52 | u64 vmid_gen; |
53 | u32 vmid; | 53 | u32 vmid; |
54 | 54 | ||
55 | /* 1-level 2nd stage table and lock */ | 55 | /* 1-level 2nd stage table and lock */ |
56 | spinlock_t pgd_lock; | 56 | spinlock_t pgd_lock; |
57 | pgd_t *pgd; | 57 | pgd_t *pgd; |
58 | 58 | ||
59 | /* VTTBR value associated with above pgd and vmid */ | 59 | /* VTTBR value associated with above pgd and vmid */ |
60 | u64 vttbr; | 60 | u64 vttbr; |
61 | 61 | ||
62 | /* Interrupt controller */ | 62 | /* Interrupt controller */ |
63 | struct vgic_dist vgic; | 63 | struct vgic_dist vgic; |
64 | 64 | ||
65 | /* Timer */ | 65 | /* Timer */ |
66 | struct arch_timer_kvm timer; | 66 | struct arch_timer_kvm timer; |
67 | }; | 67 | }; |
68 | 68 | ||
69 | #define KVM_NR_MEM_OBJS 40 | 69 | #define KVM_NR_MEM_OBJS 40 |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * We don't want allocation failures within the mmu code, so we preallocate | 72 | * We don't want allocation failures within the mmu code, so we preallocate |
73 | * enough memory for a single page fault in a cache. | 73 | * enough memory for a single page fault in a cache. |
74 | */ | 74 | */ |
75 | struct kvm_mmu_memory_cache { | 75 | struct kvm_mmu_memory_cache { |
76 | int nobjs; | 76 | int nobjs; |
77 | void *objects[KVM_NR_MEM_OBJS]; | 77 | void *objects[KVM_NR_MEM_OBJS]; |
78 | }; | 78 | }; |
79 | 79 | ||
80 | struct kvm_vcpu_fault_info { | 80 | struct kvm_vcpu_fault_info { |
81 | u32 esr_el2; /* Hyp Syndrom Register */ | 81 | u32 esr_el2; /* Hyp Syndrom Register */ |
82 | u64 far_el2; /* Hyp Fault Address Register */ | 82 | u64 far_el2; /* Hyp Fault Address Register */ |
83 | u64 hpfar_el2; /* Hyp IPA Fault Address Register */ | 83 | u64 hpfar_el2; /* Hyp IPA Fault Address Register */ |
84 | }; | 84 | }; |
85 | 85 | ||
86 | struct kvm_cpu_context { | 86 | struct kvm_cpu_context { |
87 | struct kvm_regs gp_regs; | 87 | struct kvm_regs gp_regs; |
88 | union { | 88 | union { |
89 | u64 sys_regs[NR_SYS_REGS]; | 89 | u64 sys_regs[NR_SYS_REGS]; |
90 | u32 copro[NR_COPRO_REGS]; | 90 | u32 copro[NR_COPRO_REGS]; |
91 | }; | 91 | }; |
92 | }; | 92 | }; |
93 | 93 | ||
94 | typedef struct kvm_cpu_context kvm_cpu_context_t; | 94 | typedef struct kvm_cpu_context kvm_cpu_context_t; |
95 | 95 | ||
96 | struct kvm_vcpu_arch { | 96 | struct kvm_vcpu_arch { |
97 | struct kvm_cpu_context ctxt; | 97 | struct kvm_cpu_context ctxt; |
98 | 98 | ||
99 | /* HYP configuration */ | 99 | /* HYP configuration */ |
100 | u64 hcr_el2; | 100 | u64 hcr_el2; |
101 | 101 | ||
102 | /* Exception Information */ | 102 | /* Exception Information */ |
103 | struct kvm_vcpu_fault_info fault; | 103 | struct kvm_vcpu_fault_info fault; |
104 | 104 | ||
105 | /* Debug state */ | 105 | /* Debug state */ |
106 | u64 debug_flags; | 106 | u64 debug_flags; |
107 | 107 | ||
108 | /* Pointer to host CPU context */ | 108 | /* Pointer to host CPU context */ |
109 | kvm_cpu_context_t *host_cpu_context; | 109 | kvm_cpu_context_t *host_cpu_context; |
110 | 110 | ||
111 | /* VGIC state */ | 111 | /* VGIC state */ |
112 | struct vgic_cpu vgic_cpu; | 112 | struct vgic_cpu vgic_cpu; |
113 | struct arch_timer_cpu timer_cpu; | 113 | struct arch_timer_cpu timer_cpu; |
114 | 114 | ||
115 | /* | 115 | /* |
116 | * Anything that is not used directly from assembly code goes | 116 | * Anything that is not used directly from assembly code goes |
117 | * here. | 117 | * here. |
118 | */ | 118 | */ |
119 | /* dcache set/way operation pending */ | ||
120 | int last_pcpu; | ||
121 | cpumask_t require_dcache_flush; | ||
122 | 119 | ||
123 | /* Don't run the guest */ | 120 | /* Don't run the guest */ |
124 | bool pause; | 121 | bool pause; |
125 | 122 | ||
126 | /* IO related fields */ | 123 | /* IO related fields */ |
127 | struct kvm_decode mmio_decode; | 124 | struct kvm_decode mmio_decode; |
128 | 125 | ||
129 | /* Interrupt related fields */ | 126 | /* Interrupt related fields */ |
130 | u64 irq_lines; /* IRQ and FIQ levels */ | 127 | u64 irq_lines; /* IRQ and FIQ levels */ |
131 | 128 | ||
132 | /* Cache some mmu pages needed inside spinlock regions */ | 129 | /* Cache some mmu pages needed inside spinlock regions */ |
133 | struct kvm_mmu_memory_cache mmu_page_cache; | 130 | struct kvm_mmu_memory_cache mmu_page_cache; |
134 | 131 | ||
135 | /* Target CPU and feature flags */ | 132 | /* Target CPU and feature flags */ |
136 | int target; | 133 | int target; |
137 | DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); | 134 | DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); |
138 | 135 | ||
139 | /* Detect first run of a vcpu */ | 136 | /* Detect first run of a vcpu */ |
140 | bool has_run_once; | 137 | bool has_run_once; |
141 | }; | 138 | }; |
142 | 139 | ||
143 | #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) | 140 | #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) |
144 | #define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)]) | 141 | #define vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)]) |
145 | /* | 142 | /* |
146 | * CP14 and CP15 live in the same array, as they are backed by the | 143 | * CP14 and CP15 live in the same array, as they are backed by the |
147 | * same system registers. | 144 | * same system registers. |
148 | */ | 145 | */ |
149 | #define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)]) | 146 | #define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)]) |
150 | #define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)]) | 147 | #define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)]) |
151 | 148 | ||
152 | #ifdef CONFIG_CPU_BIG_ENDIAN | 149 | #ifdef CONFIG_CPU_BIG_ENDIAN |
153 | #define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r)) | 150 | #define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r)) |
154 | #define vcpu_cp15_64_low(v,r) vcpu_cp15((v),(r) + 1) | 151 | #define vcpu_cp15_64_low(v,r) vcpu_cp15((v),(r) + 1) |
155 | #else | 152 | #else |
156 | #define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r) + 1) | 153 | #define vcpu_cp15_64_high(v,r) vcpu_cp15((v),(r) + 1) |
157 | #define vcpu_cp15_64_low(v,r) vcpu_cp15((v),(r)) | 154 | #define vcpu_cp15_64_low(v,r) vcpu_cp15((v),(r)) |
158 | #endif | 155 | #endif |
159 | 156 | ||
160 | struct kvm_vm_stat { | 157 | struct kvm_vm_stat { |
161 | u32 remote_tlb_flush; | 158 | u32 remote_tlb_flush; |
162 | }; | 159 | }; |
163 | 160 | ||
164 | struct kvm_vcpu_stat { | 161 | struct kvm_vcpu_stat { |
165 | u32 halt_wakeup; | 162 | u32 halt_wakeup; |
166 | }; | 163 | }; |
167 | 164 | ||
168 | int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); | 165 | int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); |
169 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); | 166 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); |
170 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); | 167 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); |
171 | int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); | 168 | int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); |
172 | int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); | 169 | int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); |
173 | 170 | ||
174 | #define KVM_ARCH_WANT_MMU_NOTIFIER | 171 | #define KVM_ARCH_WANT_MMU_NOTIFIER |
175 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); | 172 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); |
176 | int kvm_unmap_hva_range(struct kvm *kvm, | 173 | int kvm_unmap_hva_range(struct kvm *kvm, |
177 | unsigned long start, unsigned long end); | 174 | unsigned long start, unsigned long end); |
178 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); | 175 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); |
179 | 176 | ||
180 | /* We do not have shadow page tables, hence the empty hooks */ | 177 | /* We do not have shadow page tables, hence the empty hooks */ |
181 | static inline int kvm_age_hva(struct kvm *kvm, unsigned long start, | 178 | static inline int kvm_age_hva(struct kvm *kvm, unsigned long start, |
182 | unsigned long end) | 179 | unsigned long end) |
183 | { | 180 | { |
184 | return 0; | 181 | return 0; |
185 | } | 182 | } |
186 | 183 | ||
187 | static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | 184 | static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) |
188 | { | 185 | { |
189 | return 0; | 186 | return 0; |
190 | } | 187 | } |
191 | 188 | ||
192 | static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, | 189 | static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, |
193 | unsigned long address) | 190 | unsigned long address) |
194 | { | 191 | { |
195 | } | 192 | } |
196 | 193 | ||
197 | struct kvm_vcpu *kvm_arm_get_running_vcpu(void); | 194 | struct kvm_vcpu *kvm_arm_get_running_vcpu(void); |
198 | struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); | 195 | struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); |
199 | 196 | ||
200 | u64 kvm_call_hyp(void *hypfn, ...); | 197 | u64 kvm_call_hyp(void *hypfn, ...); |
201 | void force_vm_exit(const cpumask_t *mask); | 198 | void force_vm_exit(const cpumask_t *mask); |
202 | 199 | ||
203 | int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, | 200 | int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, |
204 | int exception_index); | 201 | int exception_index); |
205 | 202 | ||
206 | int kvm_perf_init(void); | 203 | int kvm_perf_init(void); |
207 | int kvm_perf_teardown(void); | 204 | int kvm_perf_teardown(void); |
208 | 205 | ||
209 | static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, | 206 | static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, |
210 | phys_addr_t pgd_ptr, | 207 | phys_addr_t pgd_ptr, |
211 | unsigned long hyp_stack_ptr, | 208 | unsigned long hyp_stack_ptr, |
212 | unsigned long vector_ptr) | 209 | unsigned long vector_ptr) |
213 | { | 210 | { |
214 | /* | 211 | /* |
215 | * Call initialization code, and switch to the full blown | 212 | * Call initialization code, and switch to the full blown |
216 | * HYP code. | 213 | * HYP code. |
217 | */ | 214 | */ |
218 | kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr, | 215 | kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr, |
219 | hyp_stack_ptr, vector_ptr); | 216 | hyp_stack_ptr, vector_ptr); |
220 | } | 217 | } |
221 | 218 | ||
222 | struct vgic_sr_vectors { | 219 | struct vgic_sr_vectors { |
223 | void *save_vgic; | 220 | void *save_vgic; |
224 | void *restore_vgic; | 221 | void *restore_vgic; |
225 | }; | 222 | }; |
226 | 223 | ||
227 | static inline void vgic_arch_setup(const struct vgic_params *vgic) | 224 | static inline void vgic_arch_setup(const struct vgic_params *vgic) |
228 | { | 225 | { |
229 | extern struct vgic_sr_vectors __vgic_sr_vectors; | 226 | extern struct vgic_sr_vectors __vgic_sr_vectors; |
230 | 227 | ||
231 | switch(vgic->type) | 228 | switch(vgic->type) |
232 | { | 229 | { |
233 | case VGIC_V2: | 230 | case VGIC_V2: |
234 | __vgic_sr_vectors.save_vgic = __save_vgic_v2_state; | 231 | __vgic_sr_vectors.save_vgic = __save_vgic_v2_state; |
235 | __vgic_sr_vectors.restore_vgic = __restore_vgic_v2_state; | 232 | __vgic_sr_vectors.restore_vgic = __restore_vgic_v2_state; |
236 | break; | 233 | break; |
237 | 234 | ||
238 | #ifdef CONFIG_ARM_GIC_V3 | 235 | #ifdef CONFIG_ARM_GIC_V3 |
239 | case VGIC_V3: | 236 | case VGIC_V3: |
240 | __vgic_sr_vectors.save_vgic = __save_vgic_v3_state; | 237 | __vgic_sr_vectors.save_vgic = __save_vgic_v3_state; |
241 | __vgic_sr_vectors.restore_vgic = __restore_vgic_v3_state; | 238 | __vgic_sr_vectors.restore_vgic = __restore_vgic_v3_state; |
242 | break; | 239 | break; |
243 | #endif | 240 | #endif |
244 | 241 | ||
245 | default: | 242 | default: |
246 | BUG(); | 243 | BUG(); |
247 | } | 244 | } |
248 | } | 245 | } |
249 | 246 | ||
250 | static inline void kvm_arch_hardware_disable(void) {} | 247 | static inline void kvm_arch_hardware_disable(void) {} |
251 | static inline void kvm_arch_hardware_unsetup(void) {} | 248 | static inline void kvm_arch_hardware_unsetup(void) {} |
252 | static inline void kvm_arch_sync_events(struct kvm *kvm) {} | 249 | static inline void kvm_arch_sync_events(struct kvm *kvm) {} |
253 | static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} | 250 | static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} |
254 | static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} | 251 | static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} |
255 | 252 | ||
256 | #endif /* __ARM64_KVM_HOST_H__ */ | 253 | #endif /* __ARM64_KVM_HOST_H__ */ |
257 | 254 |
arch/arm64/include/asm/kvm_mmu.h
1 | /* | 1 | /* |
2 | * Copyright (C) 2012,2013 - ARM Ltd | 2 | * Copyright (C) 2012,2013 - ARM Ltd |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | 3 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License version 2 as | 6 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it will be useful, | 9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #ifndef __ARM64_KVM_MMU_H__ | 18 | #ifndef __ARM64_KVM_MMU_H__ |
19 | #define __ARM64_KVM_MMU_H__ | 19 | #define __ARM64_KVM_MMU_H__ |
20 | 20 | ||
21 | #include <asm/page.h> | 21 | #include <asm/page.h> |
22 | #include <asm/memory.h> | 22 | #include <asm/memory.h> |
23 | 23 | ||
24 | /* | 24 | /* |
25 | * As we only have the TTBR0_EL2 register, we cannot express | 25 | * As we only have the TTBR0_EL2 register, we cannot express |
26 | * "negative" addresses. This makes it impossible to directly share | 26 | * "negative" addresses. This makes it impossible to directly share |
27 | * mappings with the kernel. | 27 | * mappings with the kernel. |
28 | * | 28 | * |
29 | * Instead, give the HYP mode its own VA region at a fixed offset from | 29 | * Instead, give the HYP mode its own VA region at a fixed offset from |
30 | * the kernel by just masking the top bits (which are all ones for a | 30 | * the kernel by just masking the top bits (which are all ones for a |
31 | * kernel address). | 31 | * kernel address). |
32 | */ | 32 | */ |
33 | #define HYP_PAGE_OFFSET_SHIFT VA_BITS | 33 | #define HYP_PAGE_OFFSET_SHIFT VA_BITS |
34 | #define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1) | 34 | #define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1) |
35 | #define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK) | 35 | #define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK) |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Our virtual mapping for the idmap-ed MMU-enable code. Must be | 38 | * Our virtual mapping for the idmap-ed MMU-enable code. Must be |
39 | * shared across all the page-tables. Conveniently, we use the last | 39 | * shared across all the page-tables. Conveniently, we use the last |
40 | * possible page, where no kernel mapping will ever exist. | 40 | * possible page, where no kernel mapping will ever exist. |
41 | */ | 41 | */ |
42 | #define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK) | 42 | #define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK) |
43 | 43 | ||
44 | /* | 44 | /* |
45 | * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation | 45 | * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation |
46 | * levels in addition to the PGD and potentially the PUD which are | 46 | * levels in addition to the PGD and potentially the PUD which are |
47 | * pre-allocated (we pre-allocate the fake PGD and the PUD when the Stage-2 | 47 | * pre-allocated (we pre-allocate the fake PGD and the PUD when the Stage-2 |
48 | * tables use one level of tables less than the kernel. | 48 | * tables use one level of tables less than the kernel. |
49 | */ | 49 | */ |
50 | #ifdef CONFIG_ARM64_64K_PAGES | 50 | #ifdef CONFIG_ARM64_64K_PAGES |
51 | #define KVM_MMU_CACHE_MIN_PAGES 1 | 51 | #define KVM_MMU_CACHE_MIN_PAGES 1 |
52 | #else | 52 | #else |
53 | #define KVM_MMU_CACHE_MIN_PAGES 2 | 53 | #define KVM_MMU_CACHE_MIN_PAGES 2 |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #ifdef __ASSEMBLY__ | 56 | #ifdef __ASSEMBLY__ |
57 | 57 | ||
58 | /* | 58 | /* |
59 | * Convert a kernel VA into a HYP VA. | 59 | * Convert a kernel VA into a HYP VA. |
60 | * reg: VA to be converted. | 60 | * reg: VA to be converted. |
61 | */ | 61 | */ |
62 | .macro kern_hyp_va reg | 62 | .macro kern_hyp_va reg |
63 | and \reg, \reg, #HYP_PAGE_OFFSET_MASK | 63 | and \reg, \reg, #HYP_PAGE_OFFSET_MASK |
64 | .endm | 64 | .endm |
65 | 65 | ||
66 | #else | 66 | #else |
67 | 67 | ||
68 | #include <asm/pgalloc.h> | 68 | #include <asm/pgalloc.h> |
69 | #include <asm/cachetype.h> | 69 | #include <asm/cachetype.h> |
70 | #include <asm/cacheflush.h> | 70 | #include <asm/cacheflush.h> |
71 | 71 | ||
72 | #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) | 72 | #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) |
73 | 73 | ||
74 | /* | 74 | /* |
75 | * We currently only support a 40bit IPA. | 75 | * We currently only support a 40bit IPA. |
76 | */ | 76 | */ |
77 | #define KVM_PHYS_SHIFT (40) | 77 | #define KVM_PHYS_SHIFT (40) |
78 | #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) | 78 | #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) |
79 | #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) | 79 | #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) |
80 | 80 | ||
81 | int create_hyp_mappings(void *from, void *to); | 81 | int create_hyp_mappings(void *from, void *to); |
82 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t); | 82 | int create_hyp_io_mappings(void *from, void *to, phys_addr_t); |
83 | void free_boot_hyp_pgd(void); | 83 | void free_boot_hyp_pgd(void); |
84 | void free_hyp_pgds(void); | 84 | void free_hyp_pgds(void); |
85 | 85 | ||
86 | void stage2_unmap_vm(struct kvm *kvm); | 86 | void stage2_unmap_vm(struct kvm *kvm); |
87 | int kvm_alloc_stage2_pgd(struct kvm *kvm); | 87 | int kvm_alloc_stage2_pgd(struct kvm *kvm); |
88 | void kvm_free_stage2_pgd(struct kvm *kvm); | 88 | void kvm_free_stage2_pgd(struct kvm *kvm); |
89 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, | 89 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, |
90 | phys_addr_t pa, unsigned long size, bool writable); | 90 | phys_addr_t pa, unsigned long size, bool writable); |
91 | 91 | ||
92 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); | 92 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); |
93 | 93 | ||
94 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); | 94 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); |
95 | 95 | ||
96 | phys_addr_t kvm_mmu_get_httbr(void); | 96 | phys_addr_t kvm_mmu_get_httbr(void); |
97 | phys_addr_t kvm_mmu_get_boot_httbr(void); | 97 | phys_addr_t kvm_mmu_get_boot_httbr(void); |
98 | phys_addr_t kvm_get_idmap_vector(void); | 98 | phys_addr_t kvm_get_idmap_vector(void); |
99 | int kvm_mmu_init(void); | 99 | int kvm_mmu_init(void); |
100 | void kvm_clear_hyp_idmap(void); | 100 | void kvm_clear_hyp_idmap(void); |
101 | 101 | ||
102 | #define kvm_set_pte(ptep, pte) set_pte(ptep, pte) | 102 | #define kvm_set_pte(ptep, pte) set_pte(ptep, pte) |
103 | #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd) | 103 | #define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd) |
104 | 104 | ||
105 | static inline void kvm_clean_pgd(pgd_t *pgd) {} | 105 | static inline void kvm_clean_pgd(pgd_t *pgd) {} |
106 | static inline void kvm_clean_pmd(pmd_t *pmd) {} | 106 | static inline void kvm_clean_pmd(pmd_t *pmd) {} |
107 | static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} | 107 | static inline void kvm_clean_pmd_entry(pmd_t *pmd) {} |
108 | static inline void kvm_clean_pte(pte_t *pte) {} | 108 | static inline void kvm_clean_pte(pte_t *pte) {} |
109 | static inline void kvm_clean_pte_entry(pte_t *pte) {} | 109 | static inline void kvm_clean_pte_entry(pte_t *pte) {} |
110 | 110 | ||
111 | static inline void kvm_set_s2pte_writable(pte_t *pte) | 111 | static inline void kvm_set_s2pte_writable(pte_t *pte) |
112 | { | 112 | { |
113 | pte_val(*pte) |= PTE_S2_RDWR; | 113 | pte_val(*pte) |= PTE_S2_RDWR; |
114 | } | 114 | } |
115 | 115 | ||
116 | static inline void kvm_set_s2pmd_writable(pmd_t *pmd) | 116 | static inline void kvm_set_s2pmd_writable(pmd_t *pmd) |
117 | { | 117 | { |
118 | pmd_val(*pmd) |= PMD_S2_RDWR; | 118 | pmd_val(*pmd) |= PMD_S2_RDWR; |
119 | } | 119 | } |
120 | 120 | ||
121 | #define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end) | 121 | #define kvm_pgd_addr_end(addr, end) pgd_addr_end(addr, end) |
122 | #define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end) | 122 | #define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end) |
123 | #define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end) | 123 | #define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end) |
124 | 124 | ||
125 | /* | 125 | /* |
126 | * In the case where PGDIR_SHIFT is larger than KVM_PHYS_SHIFT, we can address | 126 | * In the case where PGDIR_SHIFT is larger than KVM_PHYS_SHIFT, we can address |
127 | * the entire IPA input range with a single pgd entry, and we would only need | 127 | * the entire IPA input range with a single pgd entry, and we would only need |
128 | * one pgd entry. Note that in this case, the pgd is actually not used by | 128 | * one pgd entry. Note that in this case, the pgd is actually not used by |
129 | * the MMU for Stage-2 translations, but is merely a fake pgd used as a data | 129 | * the MMU for Stage-2 translations, but is merely a fake pgd used as a data |
130 | * structure for the kernel pgtable macros to work. | 130 | * structure for the kernel pgtable macros to work. |
131 | */ | 131 | */ |
132 | #if PGDIR_SHIFT > KVM_PHYS_SHIFT | 132 | #if PGDIR_SHIFT > KVM_PHYS_SHIFT |
133 | #define PTRS_PER_S2_PGD_SHIFT 0 | 133 | #define PTRS_PER_S2_PGD_SHIFT 0 |
134 | #else | 134 | #else |
135 | #define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT) | 135 | #define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT) |
136 | #endif | 136 | #endif |
137 | #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) | 137 | #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) |
138 | #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) | 138 | #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) |
139 | 139 | ||
140 | /* | 140 | /* |
141 | * If we are concatenating first level stage-2 page tables, we would have less | 141 | * If we are concatenating first level stage-2 page tables, we would have less |
142 | * than or equal to 16 pointers in the fake PGD, because that's what the | 142 | * than or equal to 16 pointers in the fake PGD, because that's what the |
143 | * architecture allows. In this case, (4 - CONFIG_ARM64_PGTABLE_LEVELS) | 143 | * architecture allows. In this case, (4 - CONFIG_ARM64_PGTABLE_LEVELS) |
144 | * represents the first level for the host, and we add 1 to go to the next | 144 | * represents the first level for the host, and we add 1 to go to the next |
145 | * level (which uses contatenation) for the stage-2 tables. | 145 | * level (which uses contatenation) for the stage-2 tables. |
146 | */ | 146 | */ |
147 | #if PTRS_PER_S2_PGD <= 16 | 147 | #if PTRS_PER_S2_PGD <= 16 |
148 | #define KVM_PREALLOC_LEVEL (4 - CONFIG_ARM64_PGTABLE_LEVELS + 1) | 148 | #define KVM_PREALLOC_LEVEL (4 - CONFIG_ARM64_PGTABLE_LEVELS + 1) |
149 | #else | 149 | #else |
150 | #define KVM_PREALLOC_LEVEL (0) | 150 | #define KVM_PREALLOC_LEVEL (0) |
151 | #endif | 151 | #endif |
152 | 152 | ||
153 | /** | 153 | /** |
154 | * kvm_prealloc_hwpgd - allocate inital table for VTTBR | 154 | * kvm_prealloc_hwpgd - allocate inital table for VTTBR |
155 | * @kvm: The KVM struct pointer for the VM. | 155 | * @kvm: The KVM struct pointer for the VM. |
156 | * @pgd: The kernel pseudo pgd | 156 | * @pgd: The kernel pseudo pgd |
157 | * | 157 | * |
158 | * When the kernel uses more levels of page tables than the guest, we allocate | 158 | * When the kernel uses more levels of page tables than the guest, we allocate |
159 | * a fake PGD and pre-populate it to point to the next-level page table, which | 159 | * a fake PGD and pre-populate it to point to the next-level page table, which |
160 | * will be the real initial page table pointed to by the VTTBR. | 160 | * will be the real initial page table pointed to by the VTTBR. |
161 | * | 161 | * |
162 | * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and | 162 | * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and |
163 | * the kernel will use folded pud. When KVM_PREALLOC_LEVEL==1, we | 163 | * the kernel will use folded pud. When KVM_PREALLOC_LEVEL==1, we |
164 | * allocate 2 consecutive PUD pages. | 164 | * allocate 2 consecutive PUD pages. |
165 | */ | 165 | */ |
166 | static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) | 166 | static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd) |
167 | { | 167 | { |
168 | unsigned int i; | 168 | unsigned int i; |
169 | unsigned long hwpgd; | 169 | unsigned long hwpgd; |
170 | 170 | ||
171 | if (KVM_PREALLOC_LEVEL == 0) | 171 | if (KVM_PREALLOC_LEVEL == 0) |
172 | return 0; | 172 | return 0; |
173 | 173 | ||
174 | hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT); | 174 | hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT); |
175 | if (!hwpgd) | 175 | if (!hwpgd) |
176 | return -ENOMEM; | 176 | return -ENOMEM; |
177 | 177 | ||
178 | for (i = 0; i < PTRS_PER_S2_PGD; i++) { | 178 | for (i = 0; i < PTRS_PER_S2_PGD; i++) { |
179 | if (KVM_PREALLOC_LEVEL == 1) | 179 | if (KVM_PREALLOC_LEVEL == 1) |
180 | pgd_populate(NULL, pgd + i, | 180 | pgd_populate(NULL, pgd + i, |
181 | (pud_t *)hwpgd + i * PTRS_PER_PUD); | 181 | (pud_t *)hwpgd + i * PTRS_PER_PUD); |
182 | else if (KVM_PREALLOC_LEVEL == 2) | 182 | else if (KVM_PREALLOC_LEVEL == 2) |
183 | pud_populate(NULL, pud_offset(pgd, 0) + i, | 183 | pud_populate(NULL, pud_offset(pgd, 0) + i, |
184 | (pmd_t *)hwpgd + i * PTRS_PER_PMD); | 184 | (pmd_t *)hwpgd + i * PTRS_PER_PMD); |
185 | } | 185 | } |
186 | 186 | ||
187 | return 0; | 187 | return 0; |
188 | } | 188 | } |
189 | 189 | ||
190 | static inline void *kvm_get_hwpgd(struct kvm *kvm) | 190 | static inline void *kvm_get_hwpgd(struct kvm *kvm) |
191 | { | 191 | { |
192 | pgd_t *pgd = kvm->arch.pgd; | 192 | pgd_t *pgd = kvm->arch.pgd; |
193 | pud_t *pud; | 193 | pud_t *pud; |
194 | 194 | ||
195 | if (KVM_PREALLOC_LEVEL == 0) | 195 | if (KVM_PREALLOC_LEVEL == 0) |
196 | return pgd; | 196 | return pgd; |
197 | 197 | ||
198 | pud = pud_offset(pgd, 0); | 198 | pud = pud_offset(pgd, 0); |
199 | if (KVM_PREALLOC_LEVEL == 1) | 199 | if (KVM_PREALLOC_LEVEL == 1) |
200 | return pud; | 200 | return pud; |
201 | 201 | ||
202 | BUG_ON(KVM_PREALLOC_LEVEL != 2); | 202 | BUG_ON(KVM_PREALLOC_LEVEL != 2); |
203 | return pmd_offset(pud, 0); | 203 | return pmd_offset(pud, 0); |
204 | } | 204 | } |
205 | 205 | ||
206 | static inline void kvm_free_hwpgd(struct kvm *kvm) | 206 | static inline void kvm_free_hwpgd(struct kvm *kvm) |
207 | { | 207 | { |
208 | if (KVM_PREALLOC_LEVEL > 0) { | 208 | if (KVM_PREALLOC_LEVEL > 0) { |
209 | unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm); | 209 | unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm); |
210 | free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT); | 210 | free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT); |
211 | } | 211 | } |
212 | } | 212 | } |
213 | 213 | ||
214 | static inline bool kvm_page_empty(void *ptr) | 214 | static inline bool kvm_page_empty(void *ptr) |
215 | { | 215 | { |
216 | struct page *ptr_page = virt_to_page(ptr); | 216 | struct page *ptr_page = virt_to_page(ptr); |
217 | return page_count(ptr_page) == 1; | 217 | return page_count(ptr_page) == 1; |
218 | } | 218 | } |
219 | 219 | ||
220 | #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) | 220 | #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) |
221 | 221 | ||
222 | #ifdef __PAGETABLE_PMD_FOLDED | 222 | #ifdef __PAGETABLE_PMD_FOLDED |
223 | #define kvm_pmd_table_empty(kvm, pmdp) (0) | 223 | #define kvm_pmd_table_empty(kvm, pmdp) (0) |
224 | #else | 224 | #else |
225 | #define kvm_pmd_table_empty(kvm, pmdp) \ | 225 | #define kvm_pmd_table_empty(kvm, pmdp) \ |
226 | (kvm_page_empty(pmdp) && (!(kvm) || KVM_PREALLOC_LEVEL < 2)) | 226 | (kvm_page_empty(pmdp) && (!(kvm) || KVM_PREALLOC_LEVEL < 2)) |
227 | #endif | 227 | #endif |
228 | 228 | ||
229 | #ifdef __PAGETABLE_PUD_FOLDED | 229 | #ifdef __PAGETABLE_PUD_FOLDED |
230 | #define kvm_pud_table_empty(kvm, pudp) (0) | 230 | #define kvm_pud_table_empty(kvm, pudp) (0) |
231 | #else | 231 | #else |
232 | #define kvm_pud_table_empty(kvm, pudp) \ | 232 | #define kvm_pud_table_empty(kvm, pudp) \ |
233 | (kvm_page_empty(pudp) && (!(kvm) || KVM_PREALLOC_LEVEL < 1)) | 233 | (kvm_page_empty(pudp) && (!(kvm) || KVM_PREALLOC_LEVEL < 1)) |
234 | #endif | 234 | #endif |
235 | 235 | ||
236 | 236 | ||
237 | struct kvm; | 237 | struct kvm; |
238 | 238 | ||
239 | #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) | 239 | #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) |
240 | 240 | ||
241 | static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) | 241 | static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) |
242 | { | 242 | { |
243 | return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; | 243 | return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; |
244 | } | 244 | } |
245 | 245 | ||
246 | static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, | 246 | static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, |
247 | unsigned long size, | 247 | unsigned long size, |
248 | bool ipa_uncached) | 248 | bool ipa_uncached) |
249 | { | 249 | { |
250 | void *va = page_address(pfn_to_page(pfn)); | ||
251 | |||
250 | if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) | 252 | if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) |
251 | kvm_flush_dcache_to_poc((void *)hva, size); | 253 | kvm_flush_dcache_to_poc(va, size); |
252 | 254 | ||
253 | if (!icache_is_aliasing()) { /* PIPT */ | 255 | if (!icache_is_aliasing()) { /* PIPT */ |
254 | flush_icache_range(hva, hva + size); | 256 | flush_icache_range((unsigned long)va, |
257 | (unsigned long)va + size); | ||
255 | } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ | 258 | } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ |
256 | /* any kind of VIPT cache */ | 259 | /* any kind of VIPT cache */ |
257 | __flush_icache_all(); | 260 | __flush_icache_all(); |
258 | } | 261 | } |
259 | } | 262 | } |
260 | 263 | ||
264 | static inline void __kvm_flush_dcache_pte(pte_t pte) | ||
265 | { | ||
266 | struct page *page = pte_page(pte); | ||
267 | kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE); | ||
268 | } | ||
269 | |||
270 | static inline void __kvm_flush_dcache_pmd(pmd_t pmd) | ||
271 | { | ||
272 | struct page *page = pmd_page(pmd); | ||
273 | kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE); | ||
274 | } | ||
275 | |||
276 | static inline void __kvm_flush_dcache_pud(pud_t pud) | ||
277 | { | ||
278 | struct page *page = pud_page(pud); | ||
279 | kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE); | ||
280 | } | ||
281 | |||
261 | #define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) | 282 | #define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) |
262 | 283 | ||
263 | void stage2_flush_vm(struct kvm *kvm); | 284 | void kvm_set_way_flush(struct kvm_vcpu *vcpu); |
285 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); | ||
264 | 286 | ||
265 | #endif /* __ASSEMBLY__ */ | 287 | #endif /* __ASSEMBLY__ */ |
266 | #endif /* __ARM64_KVM_MMU_H__ */ | 288 | #endif /* __ARM64_KVM_MMU_H__ */ |
267 | 289 |
arch/arm64/kvm/sys_regs.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2012,2013 - ARM Ltd | 2 | * Copyright (C) 2012,2013 - ARM Ltd |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | 3 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
4 | * | 4 | * |
5 | * Derived from arch/arm/kvm/coproc.c: | 5 | * Derived from arch/arm/kvm/coproc.c: |
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | 6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
7 | * Authors: Rusty Russell <rusty@rustcorp.com.au> | 7 | * Authors: Rusty Russell <rusty@rustcorp.com.au> |
8 | * Christoffer Dall <c.dall@virtualopensystems.com> | 8 | * Christoffer Dall <c.dall@virtualopensystems.com> |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License, version 2, as | 11 | * it under the terms of the GNU General Public License, version 2, as |
12 | * published by the Free Software Foundation. | 12 | * published by the Free Software Foundation. |
13 | * | 13 | * |
14 | * This program is distributed in the hope that it will be useful, | 14 | * This program is distributed in the hope that it will be useful, |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
17 | * GNU General Public License for more details. | 17 | * GNU General Public License for more details. |
18 | * | 18 | * |
19 | * You should have received a copy of the GNU General Public License | 19 | * You should have received a copy of the GNU General Public License |
20 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 20 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
24 | #include <linux/kvm_host.h> | 24 | #include <linux/kvm_host.h> |
25 | #include <linux/uaccess.h> | 25 | #include <linux/uaccess.h> |
26 | #include <asm/kvm_arm.h> | 26 | #include <asm/kvm_arm.h> |
27 | #include <asm/kvm_host.h> | 27 | #include <asm/kvm_host.h> |
28 | #include <asm/kvm_emulate.h> | 28 | #include <asm/kvm_emulate.h> |
29 | #include <asm/kvm_coproc.h> | 29 | #include <asm/kvm_coproc.h> |
30 | #include <asm/kvm_mmu.h> | 30 | #include <asm/kvm_mmu.h> |
31 | #include <asm/cacheflush.h> | 31 | #include <asm/cacheflush.h> |
32 | #include <asm/cputype.h> | 32 | #include <asm/cputype.h> |
33 | #include <asm/debug-monitors.h> | 33 | #include <asm/debug-monitors.h> |
34 | #include <trace/events/kvm.h> | 34 | #include <trace/events/kvm.h> |
35 | 35 | ||
36 | #include "sys_regs.h" | 36 | #include "sys_regs.h" |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * All of this file is extremly similar to the ARM coproc.c, but the | 39 | * All of this file is extremly similar to the ARM coproc.c, but the |
40 | * types are different. My gut feeling is that it should be pretty | 40 | * types are different. My gut feeling is that it should be pretty |
41 | * easy to merge, but that would be an ABI breakage -- again. VFP | 41 | * easy to merge, but that would be an ABI breakage -- again. VFP |
42 | * would also need to be abstracted. | 42 | * would also need to be abstracted. |
43 | * | 43 | * |
44 | * For AArch32, we only take care of what is being trapped. Anything | 44 | * For AArch32, we only take care of what is being trapped. Anything |
45 | * that has to do with init and userspace access has to go via the | 45 | * that has to do with init and userspace access has to go via the |
46 | * 64bit interface. | 46 | * 64bit interface. |
47 | */ | 47 | */ |
48 | 48 | ||
49 | /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ | 49 | /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ |
50 | static u32 cache_levels; | 50 | static u32 cache_levels; |
51 | 51 | ||
52 | /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ | 52 | /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ |
53 | #define CSSELR_MAX 12 | 53 | #define CSSELR_MAX 12 |
54 | 54 | ||
55 | /* Which cache CCSIDR represents depends on CSSELR value. */ | 55 | /* Which cache CCSIDR represents depends on CSSELR value. */ |
56 | static u32 get_ccsidr(u32 csselr) | 56 | static u32 get_ccsidr(u32 csselr) |
57 | { | 57 | { |
58 | u32 ccsidr; | 58 | u32 ccsidr; |
59 | 59 | ||
60 | /* Make sure noone else changes CSSELR during this! */ | 60 | /* Make sure noone else changes CSSELR during this! */ |
61 | local_irq_disable(); | 61 | local_irq_disable(); |
62 | /* Put value into CSSELR */ | 62 | /* Put value into CSSELR */ |
63 | asm volatile("msr csselr_el1, %x0" : : "r" (csselr)); | 63 | asm volatile("msr csselr_el1, %x0" : : "r" (csselr)); |
64 | isb(); | 64 | isb(); |
65 | /* Read result out of CCSIDR */ | 65 | /* Read result out of CCSIDR */ |
66 | asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr)); | 66 | asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr)); |
67 | local_irq_enable(); | 67 | local_irq_enable(); |
68 | 68 | ||
69 | return ccsidr; | 69 | return ccsidr; |
70 | } | 70 | } |
71 | 71 | ||
72 | static void do_dc_cisw(u32 val) | 72 | /* |
73 | { | 73 | * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). |
74 | asm volatile("dc cisw, %x0" : : "r" (val)); | 74 | */ |
75 | dsb(ish); | ||
76 | } | ||
77 | |||
78 | static void do_dc_csw(u32 val) | ||
79 | { | ||
80 | asm volatile("dc csw, %x0" : : "r" (val)); | ||
81 | dsb(ish); | ||
82 | } | ||
83 | |||
84 | /* See note at ARM ARM B1.14.4 */ | ||
85 | static bool access_dcsw(struct kvm_vcpu *vcpu, | 75 | static bool access_dcsw(struct kvm_vcpu *vcpu, |
86 | const struct sys_reg_params *p, | 76 | const struct sys_reg_params *p, |
87 | const struct sys_reg_desc *r) | 77 | const struct sys_reg_desc *r) |
88 | { | 78 | { |
89 | unsigned long val; | ||
90 | int cpu; | ||
91 | |||
92 | if (!p->is_write) | 79 | if (!p->is_write) |
93 | return read_from_write_only(vcpu, p); | 80 | return read_from_write_only(vcpu, p); |
94 | 81 | ||
95 | cpu = get_cpu(); | 82 | kvm_set_way_flush(vcpu); |
96 | |||
97 | cpumask_setall(&vcpu->arch.require_dcache_flush); | ||
98 | cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); | ||
99 | |||
100 | /* If we were already preempted, take the long way around */ | ||
101 | if (cpu != vcpu->arch.last_pcpu) { | ||
102 | flush_cache_all(); | ||
103 | goto done; | ||
104 | } | ||
105 | |||
106 | val = *vcpu_reg(vcpu, p->Rt); | ||
107 | |||
108 | switch (p->CRm) { | ||
109 | case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ | ||
110 | case 14: /* DCCISW */ | ||
111 | do_dc_cisw(val); | ||
112 | break; | ||
113 | |||
114 | case 10: /* DCCSW */ | ||
115 | do_dc_csw(val); | ||
116 | break; | ||
117 | } | ||
118 | |||
119 | done: | ||
120 | put_cpu(); | ||
121 | |||
122 | return true; | 83 | return true; |
123 | } | 84 | } |
124 | 85 | ||
125 | /* | 86 | /* |
126 | * Generic accessor for VM registers. Only called as long as HCR_TVM | 87 | * Generic accessor for VM registers. Only called as long as HCR_TVM |
127 | * is set. | 88 | * is set. If the guest enables the MMU, we stop trapping the VM |
89 | * sys_regs and leave it in complete control of the caches. | ||
128 | */ | 90 | */ |
129 | static bool access_vm_reg(struct kvm_vcpu *vcpu, | 91 | static bool access_vm_reg(struct kvm_vcpu *vcpu, |
130 | const struct sys_reg_params *p, | 92 | const struct sys_reg_params *p, |
131 | const struct sys_reg_desc *r) | 93 | const struct sys_reg_desc *r) |
132 | { | 94 | { |
133 | unsigned long val; | 95 | unsigned long val; |
96 | bool was_enabled = vcpu_has_cache_enabled(vcpu); | ||
134 | 97 | ||
135 | BUG_ON(!p->is_write); | 98 | BUG_ON(!p->is_write); |
136 | 99 | ||
137 | val = *vcpu_reg(vcpu, p->Rt); | 100 | val = *vcpu_reg(vcpu, p->Rt); |
138 | if (!p->is_aarch32) { | 101 | if (!p->is_aarch32) { |
139 | vcpu_sys_reg(vcpu, r->reg) = val; | 102 | vcpu_sys_reg(vcpu, r->reg) = val; |
140 | } else { | 103 | } else { |
141 | if (!p->is_32bit) | 104 | if (!p->is_32bit) |
142 | vcpu_cp15_64_high(vcpu, r->reg) = val >> 32; | 105 | vcpu_cp15_64_high(vcpu, r->reg) = val >> 32; |
143 | vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; | 106 | vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; |
144 | } | 107 | } |
145 | 108 | ||
109 | kvm_toggle_cache(vcpu, was_enabled); | ||
146 | return true; | 110 | return true; |
147 | } | 111 | } |
148 | 112 | ||
149 | /* | ||
150 | * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the | ||
151 | * guest enables the MMU, we stop trapping the VM sys_regs and leave | ||
152 | * it in complete control of the caches. | ||
153 | */ | ||
154 | static bool access_sctlr(struct kvm_vcpu *vcpu, | ||
155 | const struct sys_reg_params *p, | ||
156 | const struct sys_reg_desc *r) | ||
157 | { | ||
158 | access_vm_reg(vcpu, p, r); | ||
159 | |||
160 | if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */ | ||
161 | vcpu->arch.hcr_el2 &= ~HCR_TVM; | ||
162 | stage2_flush_vm(vcpu->kvm); | ||
163 | } | ||
164 | |||
165 | return true; | ||
166 | } | ||
167 | |||
168 | static bool trap_raz_wi(struct kvm_vcpu *vcpu, | 113 | static bool trap_raz_wi(struct kvm_vcpu *vcpu, |
169 | const struct sys_reg_params *p, | 114 | const struct sys_reg_params *p, |
170 | const struct sys_reg_desc *r) | 115 | const struct sys_reg_desc *r) |
171 | { | 116 | { |
172 | if (p->is_write) | 117 | if (p->is_write) |
173 | return ignore_write(vcpu, p); | 118 | return ignore_write(vcpu, p); |
174 | else | 119 | else |
175 | return read_zero(vcpu, p); | 120 | return read_zero(vcpu, p); |
176 | } | 121 | } |
177 | 122 | ||
178 | static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, | 123 | static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, |
179 | const struct sys_reg_params *p, | 124 | const struct sys_reg_params *p, |
180 | const struct sys_reg_desc *r) | 125 | const struct sys_reg_desc *r) |
181 | { | 126 | { |
182 | if (p->is_write) { | 127 | if (p->is_write) { |
183 | return ignore_write(vcpu, p); | 128 | return ignore_write(vcpu, p); |
184 | } else { | 129 | } else { |
185 | *vcpu_reg(vcpu, p->Rt) = (1 << 3); | 130 | *vcpu_reg(vcpu, p->Rt) = (1 << 3); |
186 | return true; | 131 | return true; |
187 | } | 132 | } |
188 | } | 133 | } |
189 | 134 | ||
190 | static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, | 135 | static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, |
191 | const struct sys_reg_params *p, | 136 | const struct sys_reg_params *p, |
192 | const struct sys_reg_desc *r) | 137 | const struct sys_reg_desc *r) |
193 | { | 138 | { |
194 | if (p->is_write) { | 139 | if (p->is_write) { |
195 | return ignore_write(vcpu, p); | 140 | return ignore_write(vcpu, p); |
196 | } else { | 141 | } else { |
197 | u32 val; | 142 | u32 val; |
198 | asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val)); | 143 | asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val)); |
199 | *vcpu_reg(vcpu, p->Rt) = val; | 144 | *vcpu_reg(vcpu, p->Rt) = val; |
200 | return true; | 145 | return true; |
201 | } | 146 | } |
202 | } | 147 | } |
203 | 148 | ||
204 | /* | 149 | /* |
205 | * We want to avoid world-switching all the DBG registers all the | 150 | * We want to avoid world-switching all the DBG registers all the |
206 | * time: | 151 | * time: |
207 | * | 152 | * |
208 | * - If we've touched any debug register, it is likely that we're | 153 | * - If we've touched any debug register, it is likely that we're |
209 | * going to touch more of them. It then makes sense to disable the | 154 | * going to touch more of them. It then makes sense to disable the |
210 | * traps and start doing the save/restore dance | 155 | * traps and start doing the save/restore dance |
211 | * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is | 156 | * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is |
212 | * then mandatory to save/restore the registers, as the guest | 157 | * then mandatory to save/restore the registers, as the guest |
213 | * depends on them. | 158 | * depends on them. |
214 | * | 159 | * |
215 | * For this, we use a DIRTY bit, indicating the guest has modified the | 160 | * For this, we use a DIRTY bit, indicating the guest has modified the |
216 | * debug registers, used as follow: | 161 | * debug registers, used as follow: |
217 | * | 162 | * |
218 | * On guest entry: | 163 | * On guest entry: |
219 | * - If the dirty bit is set (because we're coming back from trapping), | 164 | * - If the dirty bit is set (because we're coming back from trapping), |
220 | * disable the traps, save host registers, restore guest registers. | 165 | * disable the traps, save host registers, restore guest registers. |
221 | * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), | 166 | * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), |
222 | * set the dirty bit, disable the traps, save host registers, | 167 | * set the dirty bit, disable the traps, save host registers, |
223 | * restore guest registers. | 168 | * restore guest registers. |
224 | * - Otherwise, enable the traps | 169 | * - Otherwise, enable the traps |
225 | * | 170 | * |
226 | * On guest exit: | 171 | * On guest exit: |
227 | * - If the dirty bit is set, save guest registers, restore host | 172 | * - If the dirty bit is set, save guest registers, restore host |
228 | * registers and clear the dirty bit. This ensure that the host can | 173 | * registers and clear the dirty bit. This ensure that the host can |
229 | * now use the debug registers. | 174 | * now use the debug registers. |
230 | */ | 175 | */ |
231 | static bool trap_debug_regs(struct kvm_vcpu *vcpu, | 176 | static bool trap_debug_regs(struct kvm_vcpu *vcpu, |
232 | const struct sys_reg_params *p, | 177 | const struct sys_reg_params *p, |
233 | const struct sys_reg_desc *r) | 178 | const struct sys_reg_desc *r) |
234 | { | 179 | { |
235 | if (p->is_write) { | 180 | if (p->is_write) { |
236 | vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); | 181 | vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); |
237 | vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; | 182 | vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; |
238 | } else { | 183 | } else { |
239 | *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg); | 184 | *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg); |
240 | } | 185 | } |
241 | 186 | ||
242 | return true; | 187 | return true; |
243 | } | 188 | } |
244 | 189 | ||
245 | static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) | 190 | static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) |
246 | { | 191 | { |
247 | u64 amair; | 192 | u64 amair; |
248 | 193 | ||
249 | asm volatile("mrs %0, amair_el1\n" : "=r" (amair)); | 194 | asm volatile("mrs %0, amair_el1\n" : "=r" (amair)); |
250 | vcpu_sys_reg(vcpu, AMAIR_EL1) = amair; | 195 | vcpu_sys_reg(vcpu, AMAIR_EL1) = amair; |
251 | } | 196 | } |
252 | 197 | ||
253 | static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) | 198 | static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) |
254 | { | 199 | { |
255 | /* | 200 | /* |
256 | * Simply map the vcpu_id into the Aff0 field of the MPIDR. | 201 | * Simply map the vcpu_id into the Aff0 field of the MPIDR. |
257 | */ | 202 | */ |
258 | vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff); | 203 | vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff); |
259 | } | 204 | } |
260 | 205 | ||
261 | /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ | 206 | /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ |
262 | #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ | 207 | #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ |
263 | /* DBGBVRn_EL1 */ \ | 208 | /* DBGBVRn_EL1 */ \ |
264 | { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \ | 209 | { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \ |
265 | trap_debug_regs, reset_val, (DBGBVR0_EL1 + (n)), 0 }, \ | 210 | trap_debug_regs, reset_val, (DBGBVR0_EL1 + (n)), 0 }, \ |
266 | /* DBGBCRn_EL1 */ \ | 211 | /* DBGBCRn_EL1 */ \ |
267 | { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \ | 212 | { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \ |
268 | trap_debug_regs, reset_val, (DBGBCR0_EL1 + (n)), 0 }, \ | 213 | trap_debug_regs, reset_val, (DBGBCR0_EL1 + (n)), 0 }, \ |
269 | /* DBGWVRn_EL1 */ \ | 214 | /* DBGWVRn_EL1 */ \ |
270 | { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \ | 215 | { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \ |
271 | trap_debug_regs, reset_val, (DBGWVR0_EL1 + (n)), 0 }, \ | 216 | trap_debug_regs, reset_val, (DBGWVR0_EL1 + (n)), 0 }, \ |
272 | /* DBGWCRn_EL1 */ \ | 217 | /* DBGWCRn_EL1 */ \ |
273 | { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \ | 218 | { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \ |
274 | trap_debug_regs, reset_val, (DBGWCR0_EL1 + (n)), 0 } | 219 | trap_debug_regs, reset_val, (DBGWCR0_EL1 + (n)), 0 } |
275 | 220 | ||
276 | /* | 221 | /* |
277 | * Architected system registers. | 222 | * Architected system registers. |
278 | * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 | 223 | * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 |
279 | * | 224 | * |
280 | * We could trap ID_DFR0 and tell the guest we don't support performance | 225 | * We could trap ID_DFR0 and tell the guest we don't support performance |
281 | * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was | 226 | * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was |
282 | * NAKed, so it will read the PMCR anyway. | 227 | * NAKed, so it will read the PMCR anyway. |
283 | * | 228 | * |
284 | * Therefore we tell the guest we have 0 counters. Unfortunately, we | 229 | * Therefore we tell the guest we have 0 counters. Unfortunately, we |
285 | * must always support PMCCNTR (the cycle counter): we just RAZ/WI for | 230 | * must always support PMCCNTR (the cycle counter): we just RAZ/WI for |
286 | * all PM registers, which doesn't crash the guest kernel at least. | 231 | * all PM registers, which doesn't crash the guest kernel at least. |
287 | * | 232 | * |
288 | * Debug handling: We do trap most, if not all debug related system | 233 | * Debug handling: We do trap most, if not all debug related system |
289 | * registers. The implementation is good enough to ensure that a guest | 234 | * registers. The implementation is good enough to ensure that a guest |
290 | * can use these with minimal performance degradation. The drawback is | 235 | * can use these with minimal performance degradation. The drawback is |
291 | * that we don't implement any of the external debug, none of the | 236 | * that we don't implement any of the external debug, none of the |
292 | * OSlock protocol. This should be revisited if we ever encounter a | 237 | * OSlock protocol. This should be revisited if we ever encounter a |
293 | * more demanding guest... | 238 | * more demanding guest... |
294 | */ | 239 | */ |
295 | static const struct sys_reg_desc sys_reg_descs[] = { | 240 | static const struct sys_reg_desc sys_reg_descs[] = { |
296 | /* DC ISW */ | 241 | /* DC ISW */ |
297 | { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010), | 242 | { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010), |
298 | access_dcsw }, | 243 | access_dcsw }, |
299 | /* DC CSW */ | 244 | /* DC CSW */ |
300 | { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010), | 245 | { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010), |
301 | access_dcsw }, | 246 | access_dcsw }, |
302 | /* DC CISW */ | 247 | /* DC CISW */ |
303 | { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010), | 248 | { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010), |
304 | access_dcsw }, | 249 | access_dcsw }, |
305 | 250 | ||
306 | DBG_BCR_BVR_WCR_WVR_EL1(0), | 251 | DBG_BCR_BVR_WCR_WVR_EL1(0), |
307 | DBG_BCR_BVR_WCR_WVR_EL1(1), | 252 | DBG_BCR_BVR_WCR_WVR_EL1(1), |
308 | /* MDCCINT_EL1 */ | 253 | /* MDCCINT_EL1 */ |
309 | { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000), | 254 | { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000), |
310 | trap_debug_regs, reset_val, MDCCINT_EL1, 0 }, | 255 | trap_debug_regs, reset_val, MDCCINT_EL1, 0 }, |
311 | /* MDSCR_EL1 */ | 256 | /* MDSCR_EL1 */ |
312 | { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010), | 257 | { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010), |
313 | trap_debug_regs, reset_val, MDSCR_EL1, 0 }, | 258 | trap_debug_regs, reset_val, MDSCR_EL1, 0 }, |
314 | DBG_BCR_BVR_WCR_WVR_EL1(2), | 259 | DBG_BCR_BVR_WCR_WVR_EL1(2), |
315 | DBG_BCR_BVR_WCR_WVR_EL1(3), | 260 | DBG_BCR_BVR_WCR_WVR_EL1(3), |
316 | DBG_BCR_BVR_WCR_WVR_EL1(4), | 261 | DBG_BCR_BVR_WCR_WVR_EL1(4), |
317 | DBG_BCR_BVR_WCR_WVR_EL1(5), | 262 | DBG_BCR_BVR_WCR_WVR_EL1(5), |
318 | DBG_BCR_BVR_WCR_WVR_EL1(6), | 263 | DBG_BCR_BVR_WCR_WVR_EL1(6), |
319 | DBG_BCR_BVR_WCR_WVR_EL1(7), | 264 | DBG_BCR_BVR_WCR_WVR_EL1(7), |
320 | DBG_BCR_BVR_WCR_WVR_EL1(8), | 265 | DBG_BCR_BVR_WCR_WVR_EL1(8), |
321 | DBG_BCR_BVR_WCR_WVR_EL1(9), | 266 | DBG_BCR_BVR_WCR_WVR_EL1(9), |
322 | DBG_BCR_BVR_WCR_WVR_EL1(10), | 267 | DBG_BCR_BVR_WCR_WVR_EL1(10), |
323 | DBG_BCR_BVR_WCR_WVR_EL1(11), | 268 | DBG_BCR_BVR_WCR_WVR_EL1(11), |
324 | DBG_BCR_BVR_WCR_WVR_EL1(12), | 269 | DBG_BCR_BVR_WCR_WVR_EL1(12), |
325 | DBG_BCR_BVR_WCR_WVR_EL1(13), | 270 | DBG_BCR_BVR_WCR_WVR_EL1(13), |
326 | DBG_BCR_BVR_WCR_WVR_EL1(14), | 271 | DBG_BCR_BVR_WCR_WVR_EL1(14), |
327 | DBG_BCR_BVR_WCR_WVR_EL1(15), | 272 | DBG_BCR_BVR_WCR_WVR_EL1(15), |
328 | 273 | ||
329 | /* MDRAR_EL1 */ | 274 | /* MDRAR_EL1 */ |
330 | { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), | 275 | { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), |
331 | trap_raz_wi }, | 276 | trap_raz_wi }, |
332 | /* OSLAR_EL1 */ | 277 | /* OSLAR_EL1 */ |
333 | { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100), | 278 | { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100), |
334 | trap_raz_wi }, | 279 | trap_raz_wi }, |
335 | /* OSLSR_EL1 */ | 280 | /* OSLSR_EL1 */ |
336 | { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100), | 281 | { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100), |
337 | trap_oslsr_el1 }, | 282 | trap_oslsr_el1 }, |
338 | /* OSDLR_EL1 */ | 283 | /* OSDLR_EL1 */ |
339 | { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100), | 284 | { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100), |
340 | trap_raz_wi }, | 285 | trap_raz_wi }, |
341 | /* DBGPRCR_EL1 */ | 286 | /* DBGPRCR_EL1 */ |
342 | { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100), | 287 | { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100), |
343 | trap_raz_wi }, | 288 | trap_raz_wi }, |
344 | /* DBGCLAIMSET_EL1 */ | 289 | /* DBGCLAIMSET_EL1 */ |
345 | { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110), | 290 | { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110), |
346 | trap_raz_wi }, | 291 | trap_raz_wi }, |
347 | /* DBGCLAIMCLR_EL1 */ | 292 | /* DBGCLAIMCLR_EL1 */ |
348 | { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110), | 293 | { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110), |
349 | trap_raz_wi }, | 294 | trap_raz_wi }, |
350 | /* DBGAUTHSTATUS_EL1 */ | 295 | /* DBGAUTHSTATUS_EL1 */ |
351 | { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110), | 296 | { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110), |
352 | trap_dbgauthstatus_el1 }, | 297 | trap_dbgauthstatus_el1 }, |
353 | 298 | ||
354 | /* TEECR32_EL1 */ | 299 | /* TEECR32_EL1 */ |
355 | { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), | 300 | { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), |
356 | NULL, reset_val, TEECR32_EL1, 0 }, | 301 | NULL, reset_val, TEECR32_EL1, 0 }, |
357 | /* TEEHBR32_EL1 */ | 302 | /* TEEHBR32_EL1 */ |
358 | { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000), | 303 | { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000), |
359 | NULL, reset_val, TEEHBR32_EL1, 0 }, | 304 | NULL, reset_val, TEEHBR32_EL1, 0 }, |
360 | 305 | ||
361 | /* MDCCSR_EL1 */ | 306 | /* MDCCSR_EL1 */ |
362 | { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000), | 307 | { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000), |
363 | trap_raz_wi }, | 308 | trap_raz_wi }, |
364 | /* DBGDTR_EL0 */ | 309 | /* DBGDTR_EL0 */ |
365 | { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000), | 310 | { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000), |
366 | trap_raz_wi }, | 311 | trap_raz_wi }, |
367 | /* DBGDTR[TR]X_EL0 */ | 312 | /* DBGDTR[TR]X_EL0 */ |
368 | { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000), | 313 | { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000), |
369 | trap_raz_wi }, | 314 | trap_raz_wi }, |
370 | 315 | ||
371 | /* DBGVCR32_EL2 */ | 316 | /* DBGVCR32_EL2 */ |
372 | { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000), | 317 | { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000), |
373 | NULL, reset_val, DBGVCR32_EL2, 0 }, | 318 | NULL, reset_val, DBGVCR32_EL2, 0 }, |
374 | 319 | ||
375 | /* MPIDR_EL1 */ | 320 | /* MPIDR_EL1 */ |
376 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101), | 321 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101), |
377 | NULL, reset_mpidr, MPIDR_EL1 }, | 322 | NULL, reset_mpidr, MPIDR_EL1 }, |
378 | /* SCTLR_EL1 */ | 323 | /* SCTLR_EL1 */ |
379 | { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), | 324 | { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), |
380 | access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 }, | 325 | access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, |
381 | /* CPACR_EL1 */ | 326 | /* CPACR_EL1 */ |
382 | { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), | 327 | { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), |
383 | NULL, reset_val, CPACR_EL1, 0 }, | 328 | NULL, reset_val, CPACR_EL1, 0 }, |
384 | /* TTBR0_EL1 */ | 329 | /* TTBR0_EL1 */ |
385 | { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000), | 330 | { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000), |
386 | access_vm_reg, reset_unknown, TTBR0_EL1 }, | 331 | access_vm_reg, reset_unknown, TTBR0_EL1 }, |
387 | /* TTBR1_EL1 */ | 332 | /* TTBR1_EL1 */ |
388 | { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001), | 333 | { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001), |
389 | access_vm_reg, reset_unknown, TTBR1_EL1 }, | 334 | access_vm_reg, reset_unknown, TTBR1_EL1 }, |
390 | /* TCR_EL1 */ | 335 | /* TCR_EL1 */ |
391 | { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010), | 336 | { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010), |
392 | access_vm_reg, reset_val, TCR_EL1, 0 }, | 337 | access_vm_reg, reset_val, TCR_EL1, 0 }, |
393 | 338 | ||
394 | /* AFSR0_EL1 */ | 339 | /* AFSR0_EL1 */ |
395 | { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000), | 340 | { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000), |
396 | access_vm_reg, reset_unknown, AFSR0_EL1 }, | 341 | access_vm_reg, reset_unknown, AFSR0_EL1 }, |
397 | /* AFSR1_EL1 */ | 342 | /* AFSR1_EL1 */ |
398 | { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001), | 343 | { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001), |
399 | access_vm_reg, reset_unknown, AFSR1_EL1 }, | 344 | access_vm_reg, reset_unknown, AFSR1_EL1 }, |
400 | /* ESR_EL1 */ | 345 | /* ESR_EL1 */ |
401 | { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000), | 346 | { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000), |
402 | access_vm_reg, reset_unknown, ESR_EL1 }, | 347 | access_vm_reg, reset_unknown, ESR_EL1 }, |
403 | /* FAR_EL1 */ | 348 | /* FAR_EL1 */ |
404 | { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), | 349 | { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), |
405 | access_vm_reg, reset_unknown, FAR_EL1 }, | 350 | access_vm_reg, reset_unknown, FAR_EL1 }, |
406 | /* PAR_EL1 */ | 351 | /* PAR_EL1 */ |
407 | { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000), | 352 | { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000), |
408 | NULL, reset_unknown, PAR_EL1 }, | 353 | NULL, reset_unknown, PAR_EL1 }, |
409 | 354 | ||
410 | /* PMINTENSET_EL1 */ | 355 | /* PMINTENSET_EL1 */ |
411 | { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), | 356 | { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), |
412 | trap_raz_wi }, | 357 | trap_raz_wi }, |
413 | /* PMINTENCLR_EL1 */ | 358 | /* PMINTENCLR_EL1 */ |
414 | { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010), | 359 | { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010), |
415 | trap_raz_wi }, | 360 | trap_raz_wi }, |
416 | 361 | ||
417 | /* MAIR_EL1 */ | 362 | /* MAIR_EL1 */ |
418 | { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000), | 363 | { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000), |
419 | access_vm_reg, reset_unknown, MAIR_EL1 }, | 364 | access_vm_reg, reset_unknown, MAIR_EL1 }, |
420 | /* AMAIR_EL1 */ | 365 | /* AMAIR_EL1 */ |
421 | { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000), | 366 | { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000), |
422 | access_vm_reg, reset_amair_el1, AMAIR_EL1 }, | 367 | access_vm_reg, reset_amair_el1, AMAIR_EL1 }, |
423 | 368 | ||
424 | /* VBAR_EL1 */ | 369 | /* VBAR_EL1 */ |
425 | { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000), | 370 | { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000), |
426 | NULL, reset_val, VBAR_EL1, 0 }, | 371 | NULL, reset_val, VBAR_EL1, 0 }, |
427 | 372 | ||
428 | /* ICC_SRE_EL1 */ | 373 | /* ICC_SRE_EL1 */ |
429 | { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101), | 374 | { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101), |
430 | trap_raz_wi }, | 375 | trap_raz_wi }, |
431 | 376 | ||
432 | /* CONTEXTIDR_EL1 */ | 377 | /* CONTEXTIDR_EL1 */ |
433 | { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), | 378 | { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), |
434 | access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, | 379 | access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, |
435 | /* TPIDR_EL1 */ | 380 | /* TPIDR_EL1 */ |
436 | { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100), | 381 | { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100), |
437 | NULL, reset_unknown, TPIDR_EL1 }, | 382 | NULL, reset_unknown, TPIDR_EL1 }, |
438 | 383 | ||
439 | /* CNTKCTL_EL1 */ | 384 | /* CNTKCTL_EL1 */ |
440 | { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000), | 385 | { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000), |
441 | NULL, reset_val, CNTKCTL_EL1, 0}, | 386 | NULL, reset_val, CNTKCTL_EL1, 0}, |
442 | 387 | ||
443 | /* CSSELR_EL1 */ | 388 | /* CSSELR_EL1 */ |
444 | { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), | 389 | { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), |
445 | NULL, reset_unknown, CSSELR_EL1 }, | 390 | NULL, reset_unknown, CSSELR_EL1 }, |
446 | 391 | ||
447 | /* PMCR_EL0 */ | 392 | /* PMCR_EL0 */ |
448 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000), | 393 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000), |
449 | trap_raz_wi }, | 394 | trap_raz_wi }, |
450 | /* PMCNTENSET_EL0 */ | 395 | /* PMCNTENSET_EL0 */ |
451 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001), | 396 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001), |
452 | trap_raz_wi }, | 397 | trap_raz_wi }, |
453 | /* PMCNTENCLR_EL0 */ | 398 | /* PMCNTENCLR_EL0 */ |
454 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010), | 399 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010), |
455 | trap_raz_wi }, | 400 | trap_raz_wi }, |
456 | /* PMOVSCLR_EL0 */ | 401 | /* PMOVSCLR_EL0 */ |
457 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011), | 402 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011), |
458 | trap_raz_wi }, | 403 | trap_raz_wi }, |
459 | /* PMSWINC_EL0 */ | 404 | /* PMSWINC_EL0 */ |
460 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100), | 405 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100), |
461 | trap_raz_wi }, | 406 | trap_raz_wi }, |
462 | /* PMSELR_EL0 */ | 407 | /* PMSELR_EL0 */ |
463 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101), | 408 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101), |
464 | trap_raz_wi }, | 409 | trap_raz_wi }, |
465 | /* PMCEID0_EL0 */ | 410 | /* PMCEID0_EL0 */ |
466 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110), | 411 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110), |
467 | trap_raz_wi }, | 412 | trap_raz_wi }, |
468 | /* PMCEID1_EL0 */ | 413 | /* PMCEID1_EL0 */ |
469 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111), | 414 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111), |
470 | trap_raz_wi }, | 415 | trap_raz_wi }, |
471 | /* PMCCNTR_EL0 */ | 416 | /* PMCCNTR_EL0 */ |
472 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), | 417 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), |
473 | trap_raz_wi }, | 418 | trap_raz_wi }, |
474 | /* PMXEVTYPER_EL0 */ | 419 | /* PMXEVTYPER_EL0 */ |
475 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), | 420 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), |
476 | trap_raz_wi }, | 421 | trap_raz_wi }, |
477 | /* PMXEVCNTR_EL0 */ | 422 | /* PMXEVCNTR_EL0 */ |
478 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), | 423 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), |
479 | trap_raz_wi }, | 424 | trap_raz_wi }, |
480 | /* PMUSERENR_EL0 */ | 425 | /* PMUSERENR_EL0 */ |
481 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), | 426 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), |
482 | trap_raz_wi }, | 427 | trap_raz_wi }, |
483 | /* PMOVSSET_EL0 */ | 428 | /* PMOVSSET_EL0 */ |
484 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011), | 429 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011), |
485 | trap_raz_wi }, | 430 | trap_raz_wi }, |
486 | 431 | ||
487 | /* TPIDR_EL0 */ | 432 | /* TPIDR_EL0 */ |
488 | { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010), | 433 | { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010), |
489 | NULL, reset_unknown, TPIDR_EL0 }, | 434 | NULL, reset_unknown, TPIDR_EL0 }, |
490 | /* TPIDRRO_EL0 */ | 435 | /* TPIDRRO_EL0 */ |
491 | { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011), | 436 | { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011), |
492 | NULL, reset_unknown, TPIDRRO_EL0 }, | 437 | NULL, reset_unknown, TPIDRRO_EL0 }, |
493 | 438 | ||
494 | /* DACR32_EL2 */ | 439 | /* DACR32_EL2 */ |
495 | { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000), | 440 | { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000), |
496 | NULL, reset_unknown, DACR32_EL2 }, | 441 | NULL, reset_unknown, DACR32_EL2 }, |
497 | /* IFSR32_EL2 */ | 442 | /* IFSR32_EL2 */ |
498 | { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001), | 443 | { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001), |
499 | NULL, reset_unknown, IFSR32_EL2 }, | 444 | NULL, reset_unknown, IFSR32_EL2 }, |
500 | /* FPEXC32_EL2 */ | 445 | /* FPEXC32_EL2 */ |
501 | { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000), | 446 | { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000), |
502 | NULL, reset_val, FPEXC32_EL2, 0x70 }, | 447 | NULL, reset_val, FPEXC32_EL2, 0x70 }, |
503 | }; | 448 | }; |
504 | 449 | ||
505 | static bool trap_dbgidr(struct kvm_vcpu *vcpu, | 450 | static bool trap_dbgidr(struct kvm_vcpu *vcpu, |
506 | const struct sys_reg_params *p, | 451 | const struct sys_reg_params *p, |
507 | const struct sys_reg_desc *r) | 452 | const struct sys_reg_desc *r) |
508 | { | 453 | { |
509 | if (p->is_write) { | 454 | if (p->is_write) { |
510 | return ignore_write(vcpu, p); | 455 | return ignore_write(vcpu, p); |
511 | } else { | 456 | } else { |
512 | u64 dfr = read_cpuid(ID_AA64DFR0_EL1); | 457 | u64 dfr = read_cpuid(ID_AA64DFR0_EL1); |
513 | u64 pfr = read_cpuid(ID_AA64PFR0_EL1); | 458 | u64 pfr = read_cpuid(ID_AA64PFR0_EL1); |
514 | u32 el3 = !!((pfr >> 12) & 0xf); | 459 | u32 el3 = !!((pfr >> 12) & 0xf); |
515 | 460 | ||
516 | *vcpu_reg(vcpu, p->Rt) = ((((dfr >> 20) & 0xf) << 28) | | 461 | *vcpu_reg(vcpu, p->Rt) = ((((dfr >> 20) & 0xf) << 28) | |
517 | (((dfr >> 12) & 0xf) << 24) | | 462 | (((dfr >> 12) & 0xf) << 24) | |
518 | (((dfr >> 28) & 0xf) << 20) | | 463 | (((dfr >> 28) & 0xf) << 20) | |
519 | (6 << 16) | (el3 << 14) | (el3 << 12)); | 464 | (6 << 16) | (el3 << 14) | (el3 << 12)); |
520 | return true; | 465 | return true; |
521 | } | 466 | } |
522 | } | 467 | } |
523 | 468 | ||
524 | static bool trap_debug32(struct kvm_vcpu *vcpu, | 469 | static bool trap_debug32(struct kvm_vcpu *vcpu, |
525 | const struct sys_reg_params *p, | 470 | const struct sys_reg_params *p, |
526 | const struct sys_reg_desc *r) | 471 | const struct sys_reg_desc *r) |
527 | { | 472 | { |
528 | if (p->is_write) { | 473 | if (p->is_write) { |
529 | vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); | 474 | vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); |
530 | vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; | 475 | vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; |
531 | } else { | 476 | } else { |
532 | *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg); | 477 | *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg); |
533 | } | 478 | } |
534 | 479 | ||
535 | return true; | 480 | return true; |
536 | } | 481 | } |
537 | 482 | ||
538 | #define DBG_BCR_BVR_WCR_WVR(n) \ | 483 | #define DBG_BCR_BVR_WCR_WVR(n) \ |
539 | /* DBGBVRn */ \ | 484 | /* DBGBVRn */ \ |
540 | { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_debug32, \ | 485 | { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_debug32, \ |
541 | NULL, (cp14_DBGBVR0 + (n) * 2) }, \ | 486 | NULL, (cp14_DBGBVR0 + (n) * 2) }, \ |
542 | /* DBGBCRn */ \ | 487 | /* DBGBCRn */ \ |
543 | { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_debug32, \ | 488 | { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_debug32, \ |
544 | NULL, (cp14_DBGBCR0 + (n) * 2) }, \ | 489 | NULL, (cp14_DBGBCR0 + (n) * 2) }, \ |
545 | /* DBGWVRn */ \ | 490 | /* DBGWVRn */ \ |
546 | { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_debug32, \ | 491 | { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_debug32, \ |
547 | NULL, (cp14_DBGWVR0 + (n) * 2) }, \ | 492 | NULL, (cp14_DBGWVR0 + (n) * 2) }, \ |
548 | /* DBGWCRn */ \ | 493 | /* DBGWCRn */ \ |
549 | { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_debug32, \ | 494 | { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_debug32, \ |
550 | NULL, (cp14_DBGWCR0 + (n) * 2) } | 495 | NULL, (cp14_DBGWCR0 + (n) * 2) } |
551 | 496 | ||
552 | #define DBGBXVR(n) \ | 497 | #define DBGBXVR(n) \ |
553 | { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_debug32, \ | 498 | { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_debug32, \ |
554 | NULL, cp14_DBGBXVR0 + n * 2 } | 499 | NULL, cp14_DBGBXVR0 + n * 2 } |
555 | 500 | ||
556 | /* | 501 | /* |
557 | * Trapped cp14 registers. We generally ignore most of the external | 502 | * Trapped cp14 registers. We generally ignore most of the external |
558 | * debug, on the principle that they don't really make sense to a | 503 | * debug, on the principle that they don't really make sense to a |
559 | * guest. Revisit this one day, whould this principle change. | 504 | * guest. Revisit this one day, whould this principle change. |
560 | */ | 505 | */ |
561 | static const struct sys_reg_desc cp14_regs[] = { | 506 | static const struct sys_reg_desc cp14_regs[] = { |
562 | /* DBGIDR */ | 507 | /* DBGIDR */ |
563 | { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr }, | 508 | { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr }, |
564 | /* DBGDTRRXext */ | 509 | /* DBGDTRRXext */ |
565 | { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi }, | 510 | { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi }, |
566 | 511 | ||
567 | DBG_BCR_BVR_WCR_WVR(0), | 512 | DBG_BCR_BVR_WCR_WVR(0), |
568 | /* DBGDSCRint */ | 513 | /* DBGDSCRint */ |
569 | { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi }, | 514 | { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi }, |
570 | DBG_BCR_BVR_WCR_WVR(1), | 515 | DBG_BCR_BVR_WCR_WVR(1), |
571 | /* DBGDCCINT */ | 516 | /* DBGDCCINT */ |
572 | { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 }, | 517 | { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 }, |
573 | /* DBGDSCRext */ | 518 | /* DBGDSCRext */ |
574 | { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 }, | 519 | { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 }, |
575 | DBG_BCR_BVR_WCR_WVR(2), | 520 | DBG_BCR_BVR_WCR_WVR(2), |
576 | /* DBGDTR[RT]Xint */ | 521 | /* DBGDTR[RT]Xint */ |
577 | { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi }, | 522 | { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi }, |
578 | /* DBGDTR[RT]Xext */ | 523 | /* DBGDTR[RT]Xext */ |
579 | { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi }, | 524 | { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi }, |
580 | DBG_BCR_BVR_WCR_WVR(3), | 525 | DBG_BCR_BVR_WCR_WVR(3), |
581 | DBG_BCR_BVR_WCR_WVR(4), | 526 | DBG_BCR_BVR_WCR_WVR(4), |
582 | DBG_BCR_BVR_WCR_WVR(5), | 527 | DBG_BCR_BVR_WCR_WVR(5), |
583 | /* DBGWFAR */ | 528 | /* DBGWFAR */ |
584 | { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi }, | 529 | { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi }, |
585 | /* DBGOSECCR */ | 530 | /* DBGOSECCR */ |
586 | { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi }, | 531 | { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi }, |
587 | DBG_BCR_BVR_WCR_WVR(6), | 532 | DBG_BCR_BVR_WCR_WVR(6), |
588 | /* DBGVCR */ | 533 | /* DBGVCR */ |
589 | { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 }, | 534 | { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 }, |
590 | DBG_BCR_BVR_WCR_WVR(7), | 535 | DBG_BCR_BVR_WCR_WVR(7), |
591 | DBG_BCR_BVR_WCR_WVR(8), | 536 | DBG_BCR_BVR_WCR_WVR(8), |
592 | DBG_BCR_BVR_WCR_WVR(9), | 537 | DBG_BCR_BVR_WCR_WVR(9), |
593 | DBG_BCR_BVR_WCR_WVR(10), | 538 | DBG_BCR_BVR_WCR_WVR(10), |
594 | DBG_BCR_BVR_WCR_WVR(11), | 539 | DBG_BCR_BVR_WCR_WVR(11), |
595 | DBG_BCR_BVR_WCR_WVR(12), | 540 | DBG_BCR_BVR_WCR_WVR(12), |
596 | DBG_BCR_BVR_WCR_WVR(13), | 541 | DBG_BCR_BVR_WCR_WVR(13), |
597 | DBG_BCR_BVR_WCR_WVR(14), | 542 | DBG_BCR_BVR_WCR_WVR(14), |
598 | DBG_BCR_BVR_WCR_WVR(15), | 543 | DBG_BCR_BVR_WCR_WVR(15), |
599 | 544 | ||
600 | /* DBGDRAR (32bit) */ | 545 | /* DBGDRAR (32bit) */ |
601 | { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi }, | 546 | { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi }, |
602 | 547 | ||
603 | DBGBXVR(0), | 548 | DBGBXVR(0), |
604 | /* DBGOSLAR */ | 549 | /* DBGOSLAR */ |
605 | { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi }, | 550 | { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi }, |
606 | DBGBXVR(1), | 551 | DBGBXVR(1), |
607 | /* DBGOSLSR */ | 552 | /* DBGOSLSR */ |
608 | { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 }, | 553 | { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 }, |
609 | DBGBXVR(2), | 554 | DBGBXVR(2), |
610 | DBGBXVR(3), | 555 | DBGBXVR(3), |
611 | /* DBGOSDLR */ | 556 | /* DBGOSDLR */ |
612 | { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi }, | 557 | { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi }, |
613 | DBGBXVR(4), | 558 | DBGBXVR(4), |
614 | /* DBGPRCR */ | 559 | /* DBGPRCR */ |
615 | { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi }, | 560 | { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi }, |
616 | DBGBXVR(5), | 561 | DBGBXVR(5), |
617 | DBGBXVR(6), | 562 | DBGBXVR(6), |
618 | DBGBXVR(7), | 563 | DBGBXVR(7), |
619 | DBGBXVR(8), | 564 | DBGBXVR(8), |
620 | DBGBXVR(9), | 565 | DBGBXVR(9), |
621 | DBGBXVR(10), | 566 | DBGBXVR(10), |
622 | DBGBXVR(11), | 567 | DBGBXVR(11), |
623 | DBGBXVR(12), | 568 | DBGBXVR(12), |
624 | DBGBXVR(13), | 569 | DBGBXVR(13), |
625 | DBGBXVR(14), | 570 | DBGBXVR(14), |
626 | DBGBXVR(15), | 571 | DBGBXVR(15), |
627 | 572 | ||
628 | /* DBGDSAR (32bit) */ | 573 | /* DBGDSAR (32bit) */ |
629 | { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi }, | 574 | { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi }, |
630 | 575 | ||
631 | /* DBGDEVID2 */ | 576 | /* DBGDEVID2 */ |
632 | { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi }, | 577 | { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi }, |
633 | /* DBGDEVID1 */ | 578 | /* DBGDEVID1 */ |
634 | { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi }, | 579 | { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi }, |
635 | /* DBGDEVID */ | 580 | /* DBGDEVID */ |
636 | { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi }, | 581 | { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi }, |
637 | /* DBGCLAIMSET */ | 582 | /* DBGCLAIMSET */ |
638 | { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi }, | 583 | { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi }, |
639 | /* DBGCLAIMCLR */ | 584 | /* DBGCLAIMCLR */ |
640 | { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi }, | 585 | { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi }, |
641 | /* DBGAUTHSTATUS */ | 586 | /* DBGAUTHSTATUS */ |
642 | { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 }, | 587 | { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 }, |
643 | }; | 588 | }; |
644 | 589 | ||
645 | /* Trapped cp14 64bit registers */ | 590 | /* Trapped cp14 64bit registers */ |
646 | static const struct sys_reg_desc cp14_64_regs[] = { | 591 | static const struct sys_reg_desc cp14_64_regs[] = { |
647 | /* DBGDRAR (64bit) */ | 592 | /* DBGDRAR (64bit) */ |
648 | { Op1( 0), CRm( 1), .access = trap_raz_wi }, | 593 | { Op1( 0), CRm( 1), .access = trap_raz_wi }, |
649 | 594 | ||
650 | /* DBGDSAR (64bit) */ | 595 | /* DBGDSAR (64bit) */ |
651 | { Op1( 0), CRm( 2), .access = trap_raz_wi }, | 596 | { Op1( 0), CRm( 2), .access = trap_raz_wi }, |
652 | }; | 597 | }; |
653 | 598 | ||
654 | /* | 599 | /* |
655 | * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, | 600 | * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, |
656 | * depending on the way they are accessed (as a 32bit or a 64bit | 601 | * depending on the way they are accessed (as a 32bit or a 64bit |
657 | * register). | 602 | * register). |
658 | */ | 603 | */ |
659 | static const struct sys_reg_desc cp15_regs[] = { | 604 | static const struct sys_reg_desc cp15_regs[] = { |
660 | { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR }, | 605 | { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR }, |
661 | { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, | 606 | { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, |
662 | { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, | 607 | { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, |
663 | { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, | 608 | { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, |
664 | { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR }, | 609 | { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR }, |
665 | { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR }, | 610 | { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR }, |
666 | { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR }, | 611 | { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR }, |
667 | { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR }, | 612 | { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR }, |
668 | { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR }, | 613 | { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR }, |
669 | { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR }, | 614 | { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR }, |
670 | { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR }, | 615 | { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR }, |
671 | 616 | ||
672 | /* | 617 | /* |
673 | * DC{C,I,CI}SW operations: | 618 | * DC{C,I,CI}SW operations: |
674 | */ | 619 | */ |
675 | { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw }, | 620 | { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw }, |
676 | { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, | 621 | { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, |
677 | { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, | 622 | { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, |
678 | 623 | ||
679 | /* PMU */ | 624 | /* PMU */ |
680 | { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi }, | 625 | { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi }, |
681 | { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi }, | 626 | { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi }, |
682 | { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi }, | 627 | { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi }, |
683 | { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi }, | 628 | { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi }, |
684 | { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi }, | 629 | { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi }, |
685 | { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi }, | 630 | { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi }, |
686 | { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi }, | 631 | { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi }, |
687 | { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi }, | 632 | { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi }, |
688 | { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi }, | 633 | { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi }, |
689 | { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi }, | 634 | { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi }, |
690 | { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi }, | 635 | { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi }, |
691 | { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi }, | 636 | { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi }, |
692 | { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi }, | 637 | { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi }, |
693 | 638 | ||
694 | { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, | 639 | { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, |
695 | { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, | 640 | { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, |
696 | { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 }, | 641 | { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 }, |
697 | { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, | 642 | { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, |
698 | 643 | ||
699 | /* ICC_SRE */ | 644 | /* ICC_SRE */ |
700 | { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi }, | 645 | { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi }, |
701 | 646 | ||
702 | { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, | 647 | { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, |
703 | }; | 648 | }; |
704 | 649 | ||
705 | static const struct sys_reg_desc cp15_64_regs[] = { | 650 | static const struct sys_reg_desc cp15_64_regs[] = { |
706 | { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, | 651 | { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, |
707 | { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, | 652 | { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, |
708 | }; | 653 | }; |
709 | 654 | ||
710 | /* Target specific emulation tables */ | 655 | /* Target specific emulation tables */ |
711 | static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS]; | 656 | static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS]; |
712 | 657 | ||
713 | void kvm_register_target_sys_reg_table(unsigned int target, | 658 | void kvm_register_target_sys_reg_table(unsigned int target, |
714 | struct kvm_sys_reg_target_table *table) | 659 | struct kvm_sys_reg_target_table *table) |
715 | { | 660 | { |
716 | target_tables[target] = table; | 661 | target_tables[target] = table; |
717 | } | 662 | } |
718 | 663 | ||
719 | /* Get specific register table for this target. */ | 664 | /* Get specific register table for this target. */ |
720 | static const struct sys_reg_desc *get_target_table(unsigned target, | 665 | static const struct sys_reg_desc *get_target_table(unsigned target, |
721 | bool mode_is_64, | 666 | bool mode_is_64, |
722 | size_t *num) | 667 | size_t *num) |
723 | { | 668 | { |
724 | struct kvm_sys_reg_target_table *table; | 669 | struct kvm_sys_reg_target_table *table; |
725 | 670 | ||
726 | table = target_tables[target]; | 671 | table = target_tables[target]; |
727 | if (mode_is_64) { | 672 | if (mode_is_64) { |
728 | *num = table->table64.num; | 673 | *num = table->table64.num; |
729 | return table->table64.table; | 674 | return table->table64.table; |
730 | } else { | 675 | } else { |
731 | *num = table->table32.num; | 676 | *num = table->table32.num; |
732 | return table->table32.table; | 677 | return table->table32.table; |
733 | } | 678 | } |
734 | } | 679 | } |
735 | 680 | ||
736 | static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, | 681 | static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, |
737 | const struct sys_reg_desc table[], | 682 | const struct sys_reg_desc table[], |
738 | unsigned int num) | 683 | unsigned int num) |
739 | { | 684 | { |
740 | unsigned int i; | 685 | unsigned int i; |
741 | 686 | ||
742 | for (i = 0; i < num; i++) { | 687 | for (i = 0; i < num; i++) { |
743 | const struct sys_reg_desc *r = &table[i]; | 688 | const struct sys_reg_desc *r = &table[i]; |
744 | 689 | ||
745 | if (params->Op0 != r->Op0) | 690 | if (params->Op0 != r->Op0) |
746 | continue; | 691 | continue; |
747 | if (params->Op1 != r->Op1) | 692 | if (params->Op1 != r->Op1) |
748 | continue; | 693 | continue; |
749 | if (params->CRn != r->CRn) | 694 | if (params->CRn != r->CRn) |
750 | continue; | 695 | continue; |
751 | if (params->CRm != r->CRm) | 696 | if (params->CRm != r->CRm) |
752 | continue; | 697 | continue; |
753 | if (params->Op2 != r->Op2) | 698 | if (params->Op2 != r->Op2) |
754 | continue; | 699 | continue; |
755 | 700 | ||
756 | return r; | 701 | return r; |
757 | } | 702 | } |
758 | return NULL; | 703 | return NULL; |
759 | } | 704 | } |
760 | 705 | ||
761 | int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) | 706 | int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) |
762 | { | 707 | { |
763 | kvm_inject_undefined(vcpu); | 708 | kvm_inject_undefined(vcpu); |
764 | return 1; | 709 | return 1; |
765 | } | 710 | } |
766 | 711 | ||
767 | /* | 712 | /* |
768 | * emulate_cp -- tries to match a sys_reg access in a handling table, and | 713 | * emulate_cp -- tries to match a sys_reg access in a handling table, and |
769 | * call the corresponding trap handler. | 714 | * call the corresponding trap handler. |
770 | * | 715 | * |
771 | * @params: pointer to the descriptor of the access | 716 | * @params: pointer to the descriptor of the access |
772 | * @table: array of trap descriptors | 717 | * @table: array of trap descriptors |
773 | * @num: size of the trap descriptor array | 718 | * @num: size of the trap descriptor array |
774 | * | 719 | * |
775 | * Return 0 if the access has been handled, and -1 if not. | 720 | * Return 0 if the access has been handled, and -1 if not. |
776 | */ | 721 | */ |
777 | static int emulate_cp(struct kvm_vcpu *vcpu, | 722 | static int emulate_cp(struct kvm_vcpu *vcpu, |
778 | const struct sys_reg_params *params, | 723 | const struct sys_reg_params *params, |
779 | const struct sys_reg_desc *table, | 724 | const struct sys_reg_desc *table, |
780 | size_t num) | 725 | size_t num) |
781 | { | 726 | { |
782 | const struct sys_reg_desc *r; | 727 | const struct sys_reg_desc *r; |
783 | 728 | ||
784 | if (!table) | 729 | if (!table) |
785 | return -1; /* Not handled */ | 730 | return -1; /* Not handled */ |
786 | 731 | ||
787 | r = find_reg(params, table, num); | 732 | r = find_reg(params, table, num); |
788 | 733 | ||
789 | if (r) { | 734 | if (r) { |
790 | /* | 735 | /* |
791 | * Not having an accessor means that we have | 736 | * Not having an accessor means that we have |
792 | * configured a trap that we don't know how to | 737 | * configured a trap that we don't know how to |
793 | * handle. This certainly qualifies as a gross bug | 738 | * handle. This certainly qualifies as a gross bug |
794 | * that should be fixed right away. | 739 | * that should be fixed right away. |
795 | */ | 740 | */ |
796 | BUG_ON(!r->access); | 741 | BUG_ON(!r->access); |
797 | 742 | ||
798 | if (likely(r->access(vcpu, params, r))) { | 743 | if (likely(r->access(vcpu, params, r))) { |
799 | /* Skip instruction, since it was emulated */ | 744 | /* Skip instruction, since it was emulated */ |
800 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | 745 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); |
801 | } | 746 | } |
802 | 747 | ||
803 | /* Handled */ | 748 | /* Handled */ |
804 | return 0; | 749 | return 0; |
805 | } | 750 | } |
806 | 751 | ||
807 | /* Not handled */ | 752 | /* Not handled */ |
808 | return -1; | 753 | return -1; |
809 | } | 754 | } |
810 | 755 | ||
811 | static void unhandled_cp_access(struct kvm_vcpu *vcpu, | 756 | static void unhandled_cp_access(struct kvm_vcpu *vcpu, |
812 | struct sys_reg_params *params) | 757 | struct sys_reg_params *params) |
813 | { | 758 | { |
814 | u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); | 759 | u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); |
815 | int cp; | 760 | int cp; |
816 | 761 | ||
817 | switch(hsr_ec) { | 762 | switch(hsr_ec) { |
818 | case ESR_EL2_EC_CP15_32: | 763 | case ESR_EL2_EC_CP15_32: |
819 | case ESR_EL2_EC_CP15_64: | 764 | case ESR_EL2_EC_CP15_64: |
820 | cp = 15; | 765 | cp = 15; |
821 | break; | 766 | break; |
822 | case ESR_EL2_EC_CP14_MR: | 767 | case ESR_EL2_EC_CP14_MR: |
823 | case ESR_EL2_EC_CP14_64: | 768 | case ESR_EL2_EC_CP14_64: |
824 | cp = 14; | 769 | cp = 14; |
825 | break; | 770 | break; |
826 | default: | 771 | default: |
827 | WARN_ON((cp = -1)); | 772 | WARN_ON((cp = -1)); |
828 | } | 773 | } |
829 | 774 | ||
830 | kvm_err("Unsupported guest CP%d access at: %08lx\n", | 775 | kvm_err("Unsupported guest CP%d access at: %08lx\n", |
831 | cp, *vcpu_pc(vcpu)); | 776 | cp, *vcpu_pc(vcpu)); |
832 | print_sys_reg_instr(params); | 777 | print_sys_reg_instr(params); |
833 | kvm_inject_undefined(vcpu); | 778 | kvm_inject_undefined(vcpu); |
834 | } | 779 | } |
835 | 780 | ||
836 | /** | 781 | /** |
837 | * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access | 782 | * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access |
838 | * @vcpu: The VCPU pointer | 783 | * @vcpu: The VCPU pointer |
839 | * @run: The kvm_run struct | 784 | * @run: The kvm_run struct |
840 | */ | 785 | */ |
841 | static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, | 786 | static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, |
842 | const struct sys_reg_desc *global, | 787 | const struct sys_reg_desc *global, |
843 | size_t nr_global, | 788 | size_t nr_global, |
844 | const struct sys_reg_desc *target_specific, | 789 | const struct sys_reg_desc *target_specific, |
845 | size_t nr_specific) | 790 | size_t nr_specific) |
846 | { | 791 | { |
847 | struct sys_reg_params params; | 792 | struct sys_reg_params params; |
848 | u32 hsr = kvm_vcpu_get_hsr(vcpu); | 793 | u32 hsr = kvm_vcpu_get_hsr(vcpu); |
849 | int Rt2 = (hsr >> 10) & 0xf; | 794 | int Rt2 = (hsr >> 10) & 0xf; |
850 | 795 | ||
851 | params.is_aarch32 = true; | 796 | params.is_aarch32 = true; |
852 | params.is_32bit = false; | 797 | params.is_32bit = false; |
853 | params.CRm = (hsr >> 1) & 0xf; | 798 | params.CRm = (hsr >> 1) & 0xf; |
854 | params.Rt = (hsr >> 5) & 0xf; | 799 | params.Rt = (hsr >> 5) & 0xf; |
855 | params.is_write = ((hsr & 1) == 0); | 800 | params.is_write = ((hsr & 1) == 0); |
856 | 801 | ||
857 | params.Op0 = 0; | 802 | params.Op0 = 0; |
858 | params.Op1 = (hsr >> 16) & 0xf; | 803 | params.Op1 = (hsr >> 16) & 0xf; |
859 | params.Op2 = 0; | 804 | params.Op2 = 0; |
860 | params.CRn = 0; | 805 | params.CRn = 0; |
861 | 806 | ||
862 | /* | 807 | /* |
863 | * Massive hack here. Store Rt2 in the top 32bits so we only | 808 | * Massive hack here. Store Rt2 in the top 32bits so we only |
864 | * have one register to deal with. As we use the same trap | 809 | * have one register to deal with. As we use the same trap |
865 | * backends between AArch32 and AArch64, we get away with it. | 810 | * backends between AArch32 and AArch64, we get away with it. |
866 | */ | 811 | */ |
867 | if (params.is_write) { | 812 | if (params.is_write) { |
868 | u64 val = *vcpu_reg(vcpu, params.Rt); | 813 | u64 val = *vcpu_reg(vcpu, params.Rt); |
869 | val &= 0xffffffff; | 814 | val &= 0xffffffff; |
870 | val |= *vcpu_reg(vcpu, Rt2) << 32; | 815 | val |= *vcpu_reg(vcpu, Rt2) << 32; |
871 | *vcpu_reg(vcpu, params.Rt) = val; | 816 | *vcpu_reg(vcpu, params.Rt) = val; |
872 | } | 817 | } |
873 | 818 | ||
874 | if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific)) | 819 | if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific)) |
875 | goto out; | 820 | goto out; |
876 | if (!emulate_cp(vcpu, ¶ms, global, nr_global)) | 821 | if (!emulate_cp(vcpu, ¶ms, global, nr_global)) |
877 | goto out; | 822 | goto out; |
878 | 823 | ||
879 | unhandled_cp_access(vcpu, ¶ms); | 824 | unhandled_cp_access(vcpu, ¶ms); |
880 | 825 | ||
881 | out: | 826 | out: |
882 | /* Do the opposite hack for the read side */ | 827 | /* Do the opposite hack for the read side */ |
883 | if (!params.is_write) { | 828 | if (!params.is_write) { |
884 | u64 val = *vcpu_reg(vcpu, params.Rt); | 829 | u64 val = *vcpu_reg(vcpu, params.Rt); |
885 | val >>= 32; | 830 | val >>= 32; |
886 | *vcpu_reg(vcpu, Rt2) = val; | 831 | *vcpu_reg(vcpu, Rt2) = val; |
887 | } | 832 | } |
888 | 833 | ||
889 | return 1; | 834 | return 1; |
890 | } | 835 | } |
891 | 836 | ||
892 | /** | 837 | /** |
893 | * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access | 838 | * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access |
894 | * @vcpu: The VCPU pointer | 839 | * @vcpu: The VCPU pointer |
895 | * @run: The kvm_run struct | 840 | * @run: The kvm_run struct |
896 | */ | 841 | */ |
897 | static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, | 842 | static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, |
898 | const struct sys_reg_desc *global, | 843 | const struct sys_reg_desc *global, |
899 | size_t nr_global, | 844 | size_t nr_global, |
900 | const struct sys_reg_desc *target_specific, | 845 | const struct sys_reg_desc *target_specific, |
901 | size_t nr_specific) | 846 | size_t nr_specific) |
902 | { | 847 | { |
903 | struct sys_reg_params params; | 848 | struct sys_reg_params params; |
904 | u32 hsr = kvm_vcpu_get_hsr(vcpu); | 849 | u32 hsr = kvm_vcpu_get_hsr(vcpu); |
905 | 850 | ||
906 | params.is_aarch32 = true; | 851 | params.is_aarch32 = true; |
907 | params.is_32bit = true; | 852 | params.is_32bit = true; |
908 | params.CRm = (hsr >> 1) & 0xf; | 853 | params.CRm = (hsr >> 1) & 0xf; |
909 | params.Rt = (hsr >> 5) & 0xf; | 854 | params.Rt = (hsr >> 5) & 0xf; |
910 | params.is_write = ((hsr & 1) == 0); | 855 | params.is_write = ((hsr & 1) == 0); |
911 | params.CRn = (hsr >> 10) & 0xf; | 856 | params.CRn = (hsr >> 10) & 0xf; |
912 | params.Op0 = 0; | 857 | params.Op0 = 0; |
913 | params.Op1 = (hsr >> 14) & 0x7; | 858 | params.Op1 = (hsr >> 14) & 0x7; |
914 | params.Op2 = (hsr >> 17) & 0x7; | 859 | params.Op2 = (hsr >> 17) & 0x7; |
915 | 860 | ||
916 | if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific)) | 861 | if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific)) |
917 | return 1; | 862 | return 1; |
918 | if (!emulate_cp(vcpu, ¶ms, global, nr_global)) | 863 | if (!emulate_cp(vcpu, ¶ms, global, nr_global)) |
919 | return 1; | 864 | return 1; |
920 | 865 | ||
921 | unhandled_cp_access(vcpu, ¶ms); | 866 | unhandled_cp_access(vcpu, ¶ms); |
922 | return 1; | 867 | return 1; |
923 | } | 868 | } |
924 | 869 | ||
925 | int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) | 870 | int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) |
926 | { | 871 | { |
927 | const struct sys_reg_desc *target_specific; | 872 | const struct sys_reg_desc *target_specific; |
928 | size_t num; | 873 | size_t num; |
929 | 874 | ||
930 | target_specific = get_target_table(vcpu->arch.target, false, &num); | 875 | target_specific = get_target_table(vcpu->arch.target, false, &num); |
931 | return kvm_handle_cp_64(vcpu, | 876 | return kvm_handle_cp_64(vcpu, |
932 | cp15_64_regs, ARRAY_SIZE(cp15_64_regs), | 877 | cp15_64_regs, ARRAY_SIZE(cp15_64_regs), |
933 | target_specific, num); | 878 | target_specific, num); |
934 | } | 879 | } |
935 | 880 | ||
936 | int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) | 881 | int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) |
937 | { | 882 | { |
938 | const struct sys_reg_desc *target_specific; | 883 | const struct sys_reg_desc *target_specific; |
939 | size_t num; | 884 | size_t num; |
940 | 885 | ||
941 | target_specific = get_target_table(vcpu->arch.target, false, &num); | 886 | target_specific = get_target_table(vcpu->arch.target, false, &num); |
942 | return kvm_handle_cp_32(vcpu, | 887 | return kvm_handle_cp_32(vcpu, |
943 | cp15_regs, ARRAY_SIZE(cp15_regs), | 888 | cp15_regs, ARRAY_SIZE(cp15_regs), |
944 | target_specific, num); | 889 | target_specific, num); |
945 | } | 890 | } |
946 | 891 | ||
947 | int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run) | 892 | int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run) |
948 | { | 893 | { |
949 | return kvm_handle_cp_64(vcpu, | 894 | return kvm_handle_cp_64(vcpu, |
950 | cp14_64_regs, ARRAY_SIZE(cp14_64_regs), | 895 | cp14_64_regs, ARRAY_SIZE(cp14_64_regs), |
951 | NULL, 0); | 896 | NULL, 0); |
952 | } | 897 | } |
953 | 898 | ||
954 | int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) | 899 | int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) |
955 | { | 900 | { |
956 | return kvm_handle_cp_32(vcpu, | 901 | return kvm_handle_cp_32(vcpu, |
957 | cp14_regs, ARRAY_SIZE(cp14_regs), | 902 | cp14_regs, ARRAY_SIZE(cp14_regs), |
958 | NULL, 0); | 903 | NULL, 0); |
959 | } | 904 | } |
960 | 905 | ||
961 | static int emulate_sys_reg(struct kvm_vcpu *vcpu, | 906 | static int emulate_sys_reg(struct kvm_vcpu *vcpu, |
962 | const struct sys_reg_params *params) | 907 | const struct sys_reg_params *params) |
963 | { | 908 | { |
964 | size_t num; | 909 | size_t num; |
965 | const struct sys_reg_desc *table, *r; | 910 | const struct sys_reg_desc *table, *r; |
966 | 911 | ||
967 | table = get_target_table(vcpu->arch.target, true, &num); | 912 | table = get_target_table(vcpu->arch.target, true, &num); |
968 | 913 | ||
969 | /* Search target-specific then generic table. */ | 914 | /* Search target-specific then generic table. */ |
970 | r = find_reg(params, table, num); | 915 | r = find_reg(params, table, num); |
971 | if (!r) | 916 | if (!r) |
972 | r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); | 917 | r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); |
973 | 918 | ||
974 | if (likely(r)) { | 919 | if (likely(r)) { |
975 | /* | 920 | /* |
976 | * Not having an accessor means that we have | 921 | * Not having an accessor means that we have |
977 | * configured a trap that we don't know how to | 922 | * configured a trap that we don't know how to |
978 | * handle. This certainly qualifies as a gross bug | 923 | * handle. This certainly qualifies as a gross bug |
979 | * that should be fixed right away. | 924 | * that should be fixed right away. |
980 | */ | 925 | */ |
981 | BUG_ON(!r->access); | 926 | BUG_ON(!r->access); |
982 | 927 | ||
983 | if (likely(r->access(vcpu, params, r))) { | 928 | if (likely(r->access(vcpu, params, r))) { |
984 | /* Skip instruction, since it was emulated */ | 929 | /* Skip instruction, since it was emulated */ |
985 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | 930 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); |
986 | return 1; | 931 | return 1; |
987 | } | 932 | } |
988 | /* If access function fails, it should complain. */ | 933 | /* If access function fails, it should complain. */ |
989 | } else { | 934 | } else { |
990 | kvm_err("Unsupported guest sys_reg access at: %lx\n", | 935 | kvm_err("Unsupported guest sys_reg access at: %lx\n", |
991 | *vcpu_pc(vcpu)); | 936 | *vcpu_pc(vcpu)); |
992 | print_sys_reg_instr(params); | 937 | print_sys_reg_instr(params); |
993 | } | 938 | } |
994 | kvm_inject_undefined(vcpu); | 939 | kvm_inject_undefined(vcpu); |
995 | return 1; | 940 | return 1; |
996 | } | 941 | } |
997 | 942 | ||
998 | static void reset_sys_reg_descs(struct kvm_vcpu *vcpu, | 943 | static void reset_sys_reg_descs(struct kvm_vcpu *vcpu, |
999 | const struct sys_reg_desc *table, size_t num) | 944 | const struct sys_reg_desc *table, size_t num) |
1000 | { | 945 | { |
1001 | unsigned long i; | 946 | unsigned long i; |
1002 | 947 | ||
1003 | for (i = 0; i < num; i++) | 948 | for (i = 0; i < num; i++) |
1004 | if (table[i].reset) | 949 | if (table[i].reset) |
1005 | table[i].reset(vcpu, &table[i]); | 950 | table[i].reset(vcpu, &table[i]); |
1006 | } | 951 | } |
1007 | 952 | ||
1008 | /** | 953 | /** |
1009 | * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access | 954 | * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access |
1010 | * @vcpu: The VCPU pointer | 955 | * @vcpu: The VCPU pointer |
1011 | * @run: The kvm_run struct | 956 | * @run: The kvm_run struct |
1012 | */ | 957 | */ |
1013 | int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) | 958 | int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) |
1014 | { | 959 | { |
1015 | struct sys_reg_params params; | 960 | struct sys_reg_params params; |
1016 | unsigned long esr = kvm_vcpu_get_hsr(vcpu); | 961 | unsigned long esr = kvm_vcpu_get_hsr(vcpu); |
1017 | 962 | ||
1018 | params.is_aarch32 = false; | 963 | params.is_aarch32 = false; |
1019 | params.is_32bit = false; | 964 | params.is_32bit = false; |
1020 | params.Op0 = (esr >> 20) & 3; | 965 | params.Op0 = (esr >> 20) & 3; |
1021 | params.Op1 = (esr >> 14) & 0x7; | 966 | params.Op1 = (esr >> 14) & 0x7; |
1022 | params.CRn = (esr >> 10) & 0xf; | 967 | params.CRn = (esr >> 10) & 0xf; |
1023 | params.CRm = (esr >> 1) & 0xf; | 968 | params.CRm = (esr >> 1) & 0xf; |
1024 | params.Op2 = (esr >> 17) & 0x7; | 969 | params.Op2 = (esr >> 17) & 0x7; |
1025 | params.Rt = (esr >> 5) & 0x1f; | 970 | params.Rt = (esr >> 5) & 0x1f; |
1026 | params.is_write = !(esr & 1); | 971 | params.is_write = !(esr & 1); |
1027 | 972 | ||
1028 | return emulate_sys_reg(vcpu, ¶ms); | 973 | return emulate_sys_reg(vcpu, ¶ms); |
1029 | } | 974 | } |
1030 | 975 | ||
1031 | /****************************************************************************** | 976 | /****************************************************************************** |
1032 | * Userspace API | 977 | * Userspace API |
1033 | *****************************************************************************/ | 978 | *****************************************************************************/ |
1034 | 979 | ||
1035 | static bool index_to_params(u64 id, struct sys_reg_params *params) | 980 | static bool index_to_params(u64 id, struct sys_reg_params *params) |
1036 | { | 981 | { |
1037 | switch (id & KVM_REG_SIZE_MASK) { | 982 | switch (id & KVM_REG_SIZE_MASK) { |
1038 | case KVM_REG_SIZE_U64: | 983 | case KVM_REG_SIZE_U64: |
1039 | /* Any unused index bits means it's not valid. */ | 984 | /* Any unused index bits means it's not valid. */ |
1040 | if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | 985 | if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK |
1041 | | KVM_REG_ARM_COPROC_MASK | 986 | | KVM_REG_ARM_COPROC_MASK |
1042 | | KVM_REG_ARM64_SYSREG_OP0_MASK | 987 | | KVM_REG_ARM64_SYSREG_OP0_MASK |
1043 | | KVM_REG_ARM64_SYSREG_OP1_MASK | 988 | | KVM_REG_ARM64_SYSREG_OP1_MASK |
1044 | | KVM_REG_ARM64_SYSREG_CRN_MASK | 989 | | KVM_REG_ARM64_SYSREG_CRN_MASK |
1045 | | KVM_REG_ARM64_SYSREG_CRM_MASK | 990 | | KVM_REG_ARM64_SYSREG_CRM_MASK |
1046 | | KVM_REG_ARM64_SYSREG_OP2_MASK)) | 991 | | KVM_REG_ARM64_SYSREG_OP2_MASK)) |
1047 | return false; | 992 | return false; |
1048 | params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK) | 993 | params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK) |
1049 | >> KVM_REG_ARM64_SYSREG_OP0_SHIFT); | 994 | >> KVM_REG_ARM64_SYSREG_OP0_SHIFT); |
1050 | params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK) | 995 | params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK) |
1051 | >> KVM_REG_ARM64_SYSREG_OP1_SHIFT); | 996 | >> KVM_REG_ARM64_SYSREG_OP1_SHIFT); |
1052 | params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK) | 997 | params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK) |
1053 | >> KVM_REG_ARM64_SYSREG_CRN_SHIFT); | 998 | >> KVM_REG_ARM64_SYSREG_CRN_SHIFT); |
1054 | params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK) | 999 | params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK) |
1055 | >> KVM_REG_ARM64_SYSREG_CRM_SHIFT); | 1000 | >> KVM_REG_ARM64_SYSREG_CRM_SHIFT); |
1056 | params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK) | 1001 | params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK) |
1057 | >> KVM_REG_ARM64_SYSREG_OP2_SHIFT); | 1002 | >> KVM_REG_ARM64_SYSREG_OP2_SHIFT); |
1058 | return true; | 1003 | return true; |
1059 | default: | 1004 | default: |
1060 | return false; | 1005 | return false; |
1061 | } | 1006 | } |
1062 | } | 1007 | } |
1063 | 1008 | ||
1064 | /* Decode an index value, and find the sys_reg_desc entry. */ | 1009 | /* Decode an index value, and find the sys_reg_desc entry. */ |
1065 | static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, | 1010 | static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, |
1066 | u64 id) | 1011 | u64 id) |
1067 | { | 1012 | { |
1068 | size_t num; | 1013 | size_t num; |
1069 | const struct sys_reg_desc *table, *r; | 1014 | const struct sys_reg_desc *table, *r; |
1070 | struct sys_reg_params params; | 1015 | struct sys_reg_params params; |
1071 | 1016 | ||
1072 | /* We only do sys_reg for now. */ | 1017 | /* We only do sys_reg for now. */ |
1073 | if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) | 1018 | if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) |
1074 | return NULL; | 1019 | return NULL; |
1075 | 1020 | ||
1076 | if (!index_to_params(id, ¶ms)) | 1021 | if (!index_to_params(id, ¶ms)) |
1077 | return NULL; | 1022 | return NULL; |
1078 | 1023 | ||
1079 | table = get_target_table(vcpu->arch.target, true, &num); | 1024 | table = get_target_table(vcpu->arch.target, true, &num); |
1080 | r = find_reg(¶ms, table, num); | 1025 | r = find_reg(¶ms, table, num); |
1081 | if (!r) | 1026 | if (!r) |
1082 | r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); | 1027 | r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); |
1083 | 1028 | ||
1084 | /* Not saved in the sys_reg array? */ | 1029 | /* Not saved in the sys_reg array? */ |
1085 | if (r && !r->reg) | 1030 | if (r && !r->reg) |
1086 | r = NULL; | 1031 | r = NULL; |
1087 | 1032 | ||
1088 | return r; | 1033 | return r; |
1089 | } | 1034 | } |
1090 | 1035 | ||
1091 | /* | 1036 | /* |
1092 | * These are the invariant sys_reg registers: we let the guest see the | 1037 | * These are the invariant sys_reg registers: we let the guest see the |
1093 | * host versions of these, so they're part of the guest state. | 1038 | * host versions of these, so they're part of the guest state. |
1094 | * | 1039 | * |
1095 | * A future CPU may provide a mechanism to present different values to | 1040 | * A future CPU may provide a mechanism to present different values to |
1096 | * the guest, or a future kvm may trap them. | 1041 | * the guest, or a future kvm may trap them. |
1097 | */ | 1042 | */ |
1098 | 1043 | ||
1099 | #define FUNCTION_INVARIANT(reg) \ | 1044 | #define FUNCTION_INVARIANT(reg) \ |
1100 | static void get_##reg(struct kvm_vcpu *v, \ | 1045 | static void get_##reg(struct kvm_vcpu *v, \ |
1101 | const struct sys_reg_desc *r) \ | 1046 | const struct sys_reg_desc *r) \ |
1102 | { \ | 1047 | { \ |
1103 | u64 val; \ | 1048 | u64 val; \ |
1104 | \ | 1049 | \ |
1105 | asm volatile("mrs %0, " __stringify(reg) "\n" \ | 1050 | asm volatile("mrs %0, " __stringify(reg) "\n" \ |
1106 | : "=r" (val)); \ | 1051 | : "=r" (val)); \ |
1107 | ((struct sys_reg_desc *)r)->val = val; \ | 1052 | ((struct sys_reg_desc *)r)->val = val; \ |
1108 | } | 1053 | } |
1109 | 1054 | ||
1110 | FUNCTION_INVARIANT(midr_el1) | 1055 | FUNCTION_INVARIANT(midr_el1) |
1111 | FUNCTION_INVARIANT(ctr_el0) | 1056 | FUNCTION_INVARIANT(ctr_el0) |
1112 | FUNCTION_INVARIANT(revidr_el1) | 1057 | FUNCTION_INVARIANT(revidr_el1) |
1113 | FUNCTION_INVARIANT(id_pfr0_el1) | 1058 | FUNCTION_INVARIANT(id_pfr0_el1) |
1114 | FUNCTION_INVARIANT(id_pfr1_el1) | 1059 | FUNCTION_INVARIANT(id_pfr1_el1) |
1115 | FUNCTION_INVARIANT(id_dfr0_el1) | 1060 | FUNCTION_INVARIANT(id_dfr0_el1) |
1116 | FUNCTION_INVARIANT(id_afr0_el1) | 1061 | FUNCTION_INVARIANT(id_afr0_el1) |
1117 | FUNCTION_INVARIANT(id_mmfr0_el1) | 1062 | FUNCTION_INVARIANT(id_mmfr0_el1) |
1118 | FUNCTION_INVARIANT(id_mmfr1_el1) | 1063 | FUNCTION_INVARIANT(id_mmfr1_el1) |
1119 | FUNCTION_INVARIANT(id_mmfr2_el1) | 1064 | FUNCTION_INVARIANT(id_mmfr2_el1) |
1120 | FUNCTION_INVARIANT(id_mmfr3_el1) | 1065 | FUNCTION_INVARIANT(id_mmfr3_el1) |
1121 | FUNCTION_INVARIANT(id_isar0_el1) | 1066 | FUNCTION_INVARIANT(id_isar0_el1) |
1122 | FUNCTION_INVARIANT(id_isar1_el1) | 1067 | FUNCTION_INVARIANT(id_isar1_el1) |
1123 | FUNCTION_INVARIANT(id_isar2_el1) | 1068 | FUNCTION_INVARIANT(id_isar2_el1) |
1124 | FUNCTION_INVARIANT(id_isar3_el1) | 1069 | FUNCTION_INVARIANT(id_isar3_el1) |
1125 | FUNCTION_INVARIANT(id_isar4_el1) | 1070 | FUNCTION_INVARIANT(id_isar4_el1) |
1126 | FUNCTION_INVARIANT(id_isar5_el1) | 1071 | FUNCTION_INVARIANT(id_isar5_el1) |
1127 | FUNCTION_INVARIANT(clidr_el1) | 1072 | FUNCTION_INVARIANT(clidr_el1) |
1128 | FUNCTION_INVARIANT(aidr_el1) | 1073 | FUNCTION_INVARIANT(aidr_el1) |
1129 | 1074 | ||
1130 | /* ->val is filled in by kvm_sys_reg_table_init() */ | 1075 | /* ->val is filled in by kvm_sys_reg_table_init() */ |
1131 | static struct sys_reg_desc invariant_sys_regs[] = { | 1076 | static struct sys_reg_desc invariant_sys_regs[] = { |
1132 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000), | 1077 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000), |
1133 | NULL, get_midr_el1 }, | 1078 | NULL, get_midr_el1 }, |
1134 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110), | 1079 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110), |
1135 | NULL, get_revidr_el1 }, | 1080 | NULL, get_revidr_el1 }, |
1136 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000), | 1081 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000), |
1137 | NULL, get_id_pfr0_el1 }, | 1082 | NULL, get_id_pfr0_el1 }, |
1138 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001), | 1083 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001), |
1139 | NULL, get_id_pfr1_el1 }, | 1084 | NULL, get_id_pfr1_el1 }, |
1140 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010), | 1085 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010), |
1141 | NULL, get_id_dfr0_el1 }, | 1086 | NULL, get_id_dfr0_el1 }, |
1142 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011), | 1087 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011), |
1143 | NULL, get_id_afr0_el1 }, | 1088 | NULL, get_id_afr0_el1 }, |
1144 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100), | 1089 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100), |
1145 | NULL, get_id_mmfr0_el1 }, | 1090 | NULL, get_id_mmfr0_el1 }, |
1146 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101), | 1091 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101), |
1147 | NULL, get_id_mmfr1_el1 }, | 1092 | NULL, get_id_mmfr1_el1 }, |
1148 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110), | 1093 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110), |
1149 | NULL, get_id_mmfr2_el1 }, | 1094 | NULL, get_id_mmfr2_el1 }, |
1150 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111), | 1095 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111), |
1151 | NULL, get_id_mmfr3_el1 }, | 1096 | NULL, get_id_mmfr3_el1 }, |
1152 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000), | 1097 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000), |
1153 | NULL, get_id_isar0_el1 }, | 1098 | NULL, get_id_isar0_el1 }, |
1154 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001), | 1099 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001), |
1155 | NULL, get_id_isar1_el1 }, | 1100 | NULL, get_id_isar1_el1 }, |
1156 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010), | 1101 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010), |
1157 | NULL, get_id_isar2_el1 }, | 1102 | NULL, get_id_isar2_el1 }, |
1158 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011), | 1103 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011), |
1159 | NULL, get_id_isar3_el1 }, | 1104 | NULL, get_id_isar3_el1 }, |
1160 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100), | 1105 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100), |
1161 | NULL, get_id_isar4_el1 }, | 1106 | NULL, get_id_isar4_el1 }, |
1162 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101), | 1107 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101), |
1163 | NULL, get_id_isar5_el1 }, | 1108 | NULL, get_id_isar5_el1 }, |
1164 | { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001), | 1109 | { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001), |
1165 | NULL, get_clidr_el1 }, | 1110 | NULL, get_clidr_el1 }, |
1166 | { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111), | 1111 | { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111), |
1167 | NULL, get_aidr_el1 }, | 1112 | NULL, get_aidr_el1 }, |
1168 | { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001), | 1113 | { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001), |
1169 | NULL, get_ctr_el0 }, | 1114 | NULL, get_ctr_el0 }, |
1170 | }; | 1115 | }; |
1171 | 1116 | ||
1172 | static int reg_from_user(u64 *val, const void __user *uaddr, u64 id) | 1117 | static int reg_from_user(u64 *val, const void __user *uaddr, u64 id) |
1173 | { | 1118 | { |
1174 | if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) | 1119 | if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) |
1175 | return -EFAULT; | 1120 | return -EFAULT; |
1176 | return 0; | 1121 | return 0; |
1177 | } | 1122 | } |
1178 | 1123 | ||
1179 | static int reg_to_user(void __user *uaddr, const u64 *val, u64 id) | 1124 | static int reg_to_user(void __user *uaddr, const u64 *val, u64 id) |
1180 | { | 1125 | { |
1181 | if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) | 1126 | if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) |
1182 | return -EFAULT; | 1127 | return -EFAULT; |
1183 | return 0; | 1128 | return 0; |
1184 | } | 1129 | } |
1185 | 1130 | ||
1186 | static int get_invariant_sys_reg(u64 id, void __user *uaddr) | 1131 | static int get_invariant_sys_reg(u64 id, void __user *uaddr) |
1187 | { | 1132 | { |
1188 | struct sys_reg_params params; | 1133 | struct sys_reg_params params; |
1189 | const struct sys_reg_desc *r; | 1134 | const struct sys_reg_desc *r; |
1190 | 1135 | ||
1191 | if (!index_to_params(id, ¶ms)) | 1136 | if (!index_to_params(id, ¶ms)) |
1192 | return -ENOENT; | 1137 | return -ENOENT; |
1193 | 1138 | ||
1194 | r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); | 1139 | r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); |
1195 | if (!r) | 1140 | if (!r) |
1196 | return -ENOENT; | 1141 | return -ENOENT; |
1197 | 1142 | ||
1198 | return reg_to_user(uaddr, &r->val, id); | 1143 | return reg_to_user(uaddr, &r->val, id); |
1199 | } | 1144 | } |
1200 | 1145 | ||
1201 | static int set_invariant_sys_reg(u64 id, void __user *uaddr) | 1146 | static int set_invariant_sys_reg(u64 id, void __user *uaddr) |
1202 | { | 1147 | { |
1203 | struct sys_reg_params params; | 1148 | struct sys_reg_params params; |
1204 | const struct sys_reg_desc *r; | 1149 | const struct sys_reg_desc *r; |
1205 | int err; | 1150 | int err; |
1206 | u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ | 1151 | u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ |
1207 | 1152 | ||
1208 | if (!index_to_params(id, ¶ms)) | 1153 | if (!index_to_params(id, ¶ms)) |
1209 | return -ENOENT; | 1154 | return -ENOENT; |
1210 | r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); | 1155 | r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); |
1211 | if (!r) | 1156 | if (!r) |
1212 | return -ENOENT; | 1157 | return -ENOENT; |
1213 | 1158 | ||
1214 | err = reg_from_user(&val, uaddr, id); | 1159 | err = reg_from_user(&val, uaddr, id); |
1215 | if (err) | 1160 | if (err) |
1216 | return err; | 1161 | return err; |
1217 | 1162 | ||
1218 | /* This is what we mean by invariant: you can't change it. */ | 1163 | /* This is what we mean by invariant: you can't change it. */ |
1219 | if (r->val != val) | 1164 | if (r->val != val) |
1220 | return -EINVAL; | 1165 | return -EINVAL; |
1221 | 1166 | ||
1222 | return 0; | 1167 | return 0; |
1223 | } | 1168 | } |
1224 | 1169 | ||
1225 | static bool is_valid_cache(u32 val) | 1170 | static bool is_valid_cache(u32 val) |
1226 | { | 1171 | { |
1227 | u32 level, ctype; | 1172 | u32 level, ctype; |
1228 | 1173 | ||
1229 | if (val >= CSSELR_MAX) | 1174 | if (val >= CSSELR_MAX) |
1230 | return false; | 1175 | return false; |
1231 | 1176 | ||
1232 | /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ | 1177 | /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ |
1233 | level = (val >> 1); | 1178 | level = (val >> 1); |
1234 | ctype = (cache_levels >> (level * 3)) & 7; | 1179 | ctype = (cache_levels >> (level * 3)) & 7; |
1235 | 1180 | ||
1236 | switch (ctype) { | 1181 | switch (ctype) { |
1237 | case 0: /* No cache */ | 1182 | case 0: /* No cache */ |
1238 | return false; | 1183 | return false; |
1239 | case 1: /* Instruction cache only */ | 1184 | case 1: /* Instruction cache only */ |
1240 | return (val & 1); | 1185 | return (val & 1); |
1241 | case 2: /* Data cache only */ | 1186 | case 2: /* Data cache only */ |
1242 | case 4: /* Unified cache */ | 1187 | case 4: /* Unified cache */ |
1243 | return !(val & 1); | 1188 | return !(val & 1); |
1244 | case 3: /* Separate instruction and data caches */ | 1189 | case 3: /* Separate instruction and data caches */ |
1245 | return true; | 1190 | return true; |
1246 | default: /* Reserved: we can't know instruction or data. */ | 1191 | default: /* Reserved: we can't know instruction or data. */ |
1247 | return false; | 1192 | return false; |
1248 | } | 1193 | } |
1249 | } | 1194 | } |
1250 | 1195 | ||
1251 | static int demux_c15_get(u64 id, void __user *uaddr) | 1196 | static int demux_c15_get(u64 id, void __user *uaddr) |
1252 | { | 1197 | { |
1253 | u32 val; | 1198 | u32 val; |
1254 | u32 __user *uval = uaddr; | 1199 | u32 __user *uval = uaddr; |
1255 | 1200 | ||
1256 | /* Fail if we have unknown bits set. */ | 1201 | /* Fail if we have unknown bits set. */ |
1257 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | 1202 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK |
1258 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) | 1203 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) |
1259 | return -ENOENT; | 1204 | return -ENOENT; |
1260 | 1205 | ||
1261 | switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { | 1206 | switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { |
1262 | case KVM_REG_ARM_DEMUX_ID_CCSIDR: | 1207 | case KVM_REG_ARM_DEMUX_ID_CCSIDR: |
1263 | if (KVM_REG_SIZE(id) != 4) | 1208 | if (KVM_REG_SIZE(id) != 4) |
1264 | return -ENOENT; | 1209 | return -ENOENT; |
1265 | val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) | 1210 | val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) |
1266 | >> KVM_REG_ARM_DEMUX_VAL_SHIFT; | 1211 | >> KVM_REG_ARM_DEMUX_VAL_SHIFT; |
1267 | if (!is_valid_cache(val)) | 1212 | if (!is_valid_cache(val)) |
1268 | return -ENOENT; | 1213 | return -ENOENT; |
1269 | 1214 | ||
1270 | return put_user(get_ccsidr(val), uval); | 1215 | return put_user(get_ccsidr(val), uval); |
1271 | default: | 1216 | default: |
1272 | return -ENOENT; | 1217 | return -ENOENT; |
1273 | } | 1218 | } |
1274 | } | 1219 | } |
1275 | 1220 | ||
1276 | static int demux_c15_set(u64 id, void __user *uaddr) | 1221 | static int demux_c15_set(u64 id, void __user *uaddr) |
1277 | { | 1222 | { |
1278 | u32 val, newval; | 1223 | u32 val, newval; |
1279 | u32 __user *uval = uaddr; | 1224 | u32 __user *uval = uaddr; |
1280 | 1225 | ||
1281 | /* Fail if we have unknown bits set. */ | 1226 | /* Fail if we have unknown bits set. */ |
1282 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | 1227 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK |
1283 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) | 1228 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) |
1284 | return -ENOENT; | 1229 | return -ENOENT; |
1285 | 1230 | ||
1286 | switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { | 1231 | switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { |
1287 | case KVM_REG_ARM_DEMUX_ID_CCSIDR: | 1232 | case KVM_REG_ARM_DEMUX_ID_CCSIDR: |
1288 | if (KVM_REG_SIZE(id) != 4) | 1233 | if (KVM_REG_SIZE(id) != 4) |
1289 | return -ENOENT; | 1234 | return -ENOENT; |
1290 | val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) | 1235 | val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) |
1291 | >> KVM_REG_ARM_DEMUX_VAL_SHIFT; | 1236 | >> KVM_REG_ARM_DEMUX_VAL_SHIFT; |
1292 | if (!is_valid_cache(val)) | 1237 | if (!is_valid_cache(val)) |
1293 | return -ENOENT; | 1238 | return -ENOENT; |
1294 | 1239 | ||
1295 | if (get_user(newval, uval)) | 1240 | if (get_user(newval, uval)) |
1296 | return -EFAULT; | 1241 | return -EFAULT; |
1297 | 1242 | ||
1298 | /* This is also invariant: you can't change it. */ | 1243 | /* This is also invariant: you can't change it. */ |
1299 | if (newval != get_ccsidr(val)) | 1244 | if (newval != get_ccsidr(val)) |
1300 | return -EINVAL; | 1245 | return -EINVAL; |
1301 | return 0; | 1246 | return 0; |
1302 | default: | 1247 | default: |
1303 | return -ENOENT; | 1248 | return -ENOENT; |
1304 | } | 1249 | } |
1305 | } | 1250 | } |
1306 | 1251 | ||
1307 | int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | 1252 | int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
1308 | { | 1253 | { |
1309 | const struct sys_reg_desc *r; | 1254 | const struct sys_reg_desc *r; |
1310 | void __user *uaddr = (void __user *)(unsigned long)reg->addr; | 1255 | void __user *uaddr = (void __user *)(unsigned long)reg->addr; |
1311 | 1256 | ||
1312 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) | 1257 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) |
1313 | return demux_c15_get(reg->id, uaddr); | 1258 | return demux_c15_get(reg->id, uaddr); |
1314 | 1259 | ||
1315 | if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) | 1260 | if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) |
1316 | return -ENOENT; | 1261 | return -ENOENT; |
1317 | 1262 | ||
1318 | r = index_to_sys_reg_desc(vcpu, reg->id); | 1263 | r = index_to_sys_reg_desc(vcpu, reg->id); |
1319 | if (!r) | 1264 | if (!r) |
1320 | return get_invariant_sys_reg(reg->id, uaddr); | 1265 | return get_invariant_sys_reg(reg->id, uaddr); |
1321 | 1266 | ||
1322 | return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id); | 1267 | return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id); |
1323 | } | 1268 | } |
1324 | 1269 | ||
1325 | int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | 1270 | int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
1326 | { | 1271 | { |
1327 | const struct sys_reg_desc *r; | 1272 | const struct sys_reg_desc *r; |
1328 | void __user *uaddr = (void __user *)(unsigned long)reg->addr; | 1273 | void __user *uaddr = (void __user *)(unsigned long)reg->addr; |
1329 | 1274 | ||
1330 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) | 1275 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) |
1331 | return demux_c15_set(reg->id, uaddr); | 1276 | return demux_c15_set(reg->id, uaddr); |
1332 | 1277 | ||
1333 | if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) | 1278 | if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) |
1334 | return -ENOENT; | 1279 | return -ENOENT; |
1335 | 1280 | ||
1336 | r = index_to_sys_reg_desc(vcpu, reg->id); | 1281 | r = index_to_sys_reg_desc(vcpu, reg->id); |
1337 | if (!r) | 1282 | if (!r) |
1338 | return set_invariant_sys_reg(reg->id, uaddr); | 1283 | return set_invariant_sys_reg(reg->id, uaddr); |
1339 | 1284 | ||
1340 | return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id); | 1285 | return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id); |
1341 | } | 1286 | } |
1342 | 1287 | ||
1343 | static unsigned int num_demux_regs(void) | 1288 | static unsigned int num_demux_regs(void) |
1344 | { | 1289 | { |
1345 | unsigned int i, count = 0; | 1290 | unsigned int i, count = 0; |
1346 | 1291 | ||
1347 | for (i = 0; i < CSSELR_MAX; i++) | 1292 | for (i = 0; i < CSSELR_MAX; i++) |
1348 | if (is_valid_cache(i)) | 1293 | if (is_valid_cache(i)) |
1349 | count++; | 1294 | count++; |
1350 | 1295 | ||
1351 | return count; | 1296 | return count; |
1352 | } | 1297 | } |
1353 | 1298 | ||
1354 | static int write_demux_regids(u64 __user *uindices) | 1299 | static int write_demux_regids(u64 __user *uindices) |
1355 | { | 1300 | { |
1356 | u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; | 1301 | u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; |
1357 | unsigned int i; | 1302 | unsigned int i; |
1358 | 1303 | ||
1359 | val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; | 1304 | val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; |
1360 | for (i = 0; i < CSSELR_MAX; i++) { | 1305 | for (i = 0; i < CSSELR_MAX; i++) { |
1361 | if (!is_valid_cache(i)) | 1306 | if (!is_valid_cache(i)) |
1362 | continue; | 1307 | continue; |
1363 | if (put_user(val | i, uindices)) | 1308 | if (put_user(val | i, uindices)) |
1364 | return -EFAULT; | 1309 | return -EFAULT; |
1365 | uindices++; | 1310 | uindices++; |
1366 | } | 1311 | } |
1367 | return 0; | 1312 | return 0; |
1368 | } | 1313 | } |
1369 | 1314 | ||
1370 | static u64 sys_reg_to_index(const struct sys_reg_desc *reg) | 1315 | static u64 sys_reg_to_index(const struct sys_reg_desc *reg) |
1371 | { | 1316 | { |
1372 | return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | | 1317 | return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | |
1373 | KVM_REG_ARM64_SYSREG | | 1318 | KVM_REG_ARM64_SYSREG | |
1374 | (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) | | 1319 | (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) | |
1375 | (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) | | 1320 | (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) | |
1376 | (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) | | 1321 | (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) | |
1377 | (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) | | 1322 | (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) | |
1378 | (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT)); | 1323 | (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT)); |
1379 | } | 1324 | } |
1380 | 1325 | ||
1381 | static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) | 1326 | static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) |
1382 | { | 1327 | { |
1383 | if (!*uind) | 1328 | if (!*uind) |
1384 | return true; | 1329 | return true; |
1385 | 1330 | ||
1386 | if (put_user(sys_reg_to_index(reg), *uind)) | 1331 | if (put_user(sys_reg_to_index(reg), *uind)) |
1387 | return false; | 1332 | return false; |
1388 | 1333 | ||
1389 | (*uind)++; | 1334 | (*uind)++; |
1390 | return true; | 1335 | return true; |
1391 | } | 1336 | } |
1392 | 1337 | ||
1393 | /* Assumed ordered tables, see kvm_sys_reg_table_init. */ | 1338 | /* Assumed ordered tables, see kvm_sys_reg_table_init. */ |
1394 | static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) | 1339 | static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) |
1395 | { | 1340 | { |
1396 | const struct sys_reg_desc *i1, *i2, *end1, *end2; | 1341 | const struct sys_reg_desc *i1, *i2, *end1, *end2; |
1397 | unsigned int total = 0; | 1342 | unsigned int total = 0; |
1398 | size_t num; | 1343 | size_t num; |
1399 | 1344 | ||
1400 | /* We check for duplicates here, to allow arch-specific overrides. */ | 1345 | /* We check for duplicates here, to allow arch-specific overrides. */ |
1401 | i1 = get_target_table(vcpu->arch.target, true, &num); | 1346 | i1 = get_target_table(vcpu->arch.target, true, &num); |
1402 | end1 = i1 + num; | 1347 | end1 = i1 + num; |
1403 | i2 = sys_reg_descs; | 1348 | i2 = sys_reg_descs; |
1404 | end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs); | 1349 | end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs); |
1405 | 1350 | ||
1406 | BUG_ON(i1 == end1 || i2 == end2); | 1351 | BUG_ON(i1 == end1 || i2 == end2); |
1407 | 1352 | ||
1408 | /* Walk carefully, as both tables may refer to the same register. */ | 1353 | /* Walk carefully, as both tables may refer to the same register. */ |
1409 | while (i1 || i2) { | 1354 | while (i1 || i2) { |
1410 | int cmp = cmp_sys_reg(i1, i2); | 1355 | int cmp = cmp_sys_reg(i1, i2); |
1411 | /* target-specific overrides generic entry. */ | 1356 | /* target-specific overrides generic entry. */ |
1412 | if (cmp <= 0) { | 1357 | if (cmp <= 0) { |
1413 | /* Ignore registers we trap but don't save. */ | 1358 | /* Ignore registers we trap but don't save. */ |
1414 | if (i1->reg) { | 1359 | if (i1->reg) { |
1415 | if (!copy_reg_to_user(i1, &uind)) | 1360 | if (!copy_reg_to_user(i1, &uind)) |
1416 | return -EFAULT; | 1361 | return -EFAULT; |
1417 | total++; | 1362 | total++; |
1418 | } | 1363 | } |
1419 | } else { | 1364 | } else { |
1420 | /* Ignore registers we trap but don't save. */ | 1365 | /* Ignore registers we trap but don't save. */ |
1421 | if (i2->reg) { | 1366 | if (i2->reg) { |
1422 | if (!copy_reg_to_user(i2, &uind)) | 1367 | if (!copy_reg_to_user(i2, &uind)) |
1423 | return -EFAULT; | 1368 | return -EFAULT; |
1424 | total++; | 1369 | total++; |
1425 | } | 1370 | } |
1426 | } | 1371 | } |
1427 | 1372 | ||
1428 | if (cmp <= 0 && ++i1 == end1) | 1373 | if (cmp <= 0 && ++i1 == end1) |
1429 | i1 = NULL; | 1374 | i1 = NULL; |
1430 | if (cmp >= 0 && ++i2 == end2) | 1375 | if (cmp >= 0 && ++i2 == end2) |
1431 | i2 = NULL; | 1376 | i2 = NULL; |
1432 | } | 1377 | } |
1433 | return total; | 1378 | return total; |
1434 | } | 1379 | } |
1435 | 1380 | ||
1436 | unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) | 1381 | unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) |
1437 | { | 1382 | { |
1438 | return ARRAY_SIZE(invariant_sys_regs) | 1383 | return ARRAY_SIZE(invariant_sys_regs) |
1439 | + num_demux_regs() | 1384 | + num_demux_regs() |
1440 | + walk_sys_regs(vcpu, (u64 __user *)NULL); | 1385 | + walk_sys_regs(vcpu, (u64 __user *)NULL); |
1441 | } | 1386 | } |
1442 | 1387 | ||
1443 | int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | 1388 | int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) |
1444 | { | 1389 | { |
1445 | unsigned int i; | 1390 | unsigned int i; |
1446 | int err; | 1391 | int err; |
1447 | 1392 | ||
1448 | /* Then give them all the invariant registers' indices. */ | 1393 | /* Then give them all the invariant registers' indices. */ |
1449 | for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) { | 1394 | for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) { |
1450 | if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices)) | 1395 | if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices)) |
1451 | return -EFAULT; | 1396 | return -EFAULT; |
1452 | uindices++; | 1397 | uindices++; |
1453 | } | 1398 | } |
1454 | 1399 | ||
1455 | err = walk_sys_regs(vcpu, uindices); | 1400 | err = walk_sys_regs(vcpu, uindices); |
1456 | if (err < 0) | 1401 | if (err < 0) |
1457 | return err; | 1402 | return err; |
1458 | uindices += err; | 1403 | uindices += err; |
1459 | 1404 | ||
1460 | return write_demux_regids(uindices); | 1405 | return write_demux_regids(uindices); |
1461 | } | 1406 | } |
1462 | 1407 | ||
1463 | static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n) | 1408 | static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n) |
1464 | { | 1409 | { |
1465 | unsigned int i; | 1410 | unsigned int i; |
1466 | 1411 | ||
1467 | for (i = 1; i < n; i++) { | 1412 | for (i = 1; i < n; i++) { |
1468 | if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) { | 1413 | if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) { |
1469 | kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1); | 1414 | kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1); |
1470 | return 1; | 1415 | return 1; |
1471 | } | 1416 | } |
1472 | } | 1417 | } |
1473 | 1418 | ||
1474 | return 0; | 1419 | return 0; |
1475 | } | 1420 | } |
1476 | 1421 | ||
1477 | void kvm_sys_reg_table_init(void) | 1422 | void kvm_sys_reg_table_init(void) |
1478 | { | 1423 | { |
1479 | unsigned int i; | 1424 | unsigned int i; |
1480 | struct sys_reg_desc clidr; | 1425 | struct sys_reg_desc clidr; |
1481 | 1426 | ||
1482 | /* Make sure tables are unique and in order. */ | 1427 | /* Make sure tables are unique and in order. */ |
1483 | BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs))); | 1428 | BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs))); |
1484 | BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs))); | 1429 | BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs))); |
1485 | BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs))); | 1430 | BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs))); |
1486 | BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs))); | 1431 | BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs))); |
1487 | BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs))); | 1432 | BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs))); |
1488 | BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs))); | 1433 | BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs))); |
1489 | 1434 | ||
1490 | /* We abuse the reset function to overwrite the table itself. */ | 1435 | /* We abuse the reset function to overwrite the table itself. */ |
1491 | for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) | 1436 | for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) |
1492 | invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]); | 1437 | invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]); |
1493 | 1438 | ||
1494 | /* | 1439 | /* |
1495 | * CLIDR format is awkward, so clean it up. See ARM B4.1.20: | 1440 | * CLIDR format is awkward, so clean it up. See ARM B4.1.20: |
1496 | * | 1441 | * |
1497 | * If software reads the Cache Type fields from Ctype1 | 1442 | * If software reads the Cache Type fields from Ctype1 |
1498 | * upwards, once it has seen a value of 0b000, no caches | 1443 | * upwards, once it has seen a value of 0b000, no caches |
1499 | * exist at further-out levels of the hierarchy. So, for | 1444 | * exist at further-out levels of the hierarchy. So, for |
1500 | * example, if Ctype3 is the first Cache Type field with a | 1445 | * example, if Ctype3 is the first Cache Type field with a |
1501 | * value of 0b000, the values of Ctype4 to Ctype7 must be | 1446 | * value of 0b000, the values of Ctype4 to Ctype7 must be |
1502 | * ignored. | 1447 | * ignored. |
1503 | */ | 1448 | */ |
1504 | get_clidr_el1(NULL, &clidr); /* Ugly... */ | 1449 | get_clidr_el1(NULL, &clidr); /* Ugly... */ |
1505 | cache_levels = clidr.val; | 1450 | cache_levels = clidr.val; |
1506 | for (i = 0; i < 7; i++) | 1451 | for (i = 0; i < 7; i++) |
1507 | if (((cache_levels >> (i*3)) & 7) == 0) | 1452 | if (((cache_levels >> (i*3)) & 7) == 0) |
1508 | break; | 1453 | break; |
1509 | /* Clear all higher bits. */ | 1454 | /* Clear all higher bits. */ |
1510 | cache_levels &= (1 << (i*3))-1; | 1455 | cache_levels &= (1 << (i*3))-1; |
1511 | } | 1456 | } |
1512 | 1457 | ||
1513 | /** | 1458 | /** |
1514 | * kvm_reset_sys_regs - sets system registers to reset value | 1459 | * kvm_reset_sys_regs - sets system registers to reset value |
1515 | * @vcpu: The VCPU pointer | 1460 | * @vcpu: The VCPU pointer |
1516 | * | 1461 | * |
1517 | * This function finds the right table above and sets the registers on the | 1462 | * This function finds the right table above and sets the registers on the |
1518 | * virtual CPU struct to their architecturally defined reset values. | 1463 | * virtual CPU struct to their architecturally defined reset values. |
1519 | */ | 1464 | */ |
1520 | void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) | 1465 | void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) |
1521 | { | 1466 | { |
1522 | size_t num; | 1467 | size_t num; |
1523 | const struct sys_reg_desc *table; | 1468 | const struct sys_reg_desc *table; |
1524 | 1469 | ||
1525 | /* Catch someone adding a register without putting in reset entry. */ | 1470 | /* Catch someone adding a register without putting in reset entry. */ |
1526 | memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs)); | 1471 | memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs)); |
1527 | 1472 | ||
1528 | /* Generic chip reset first (so target could override). */ | 1473 | /* Generic chip reset first (so target could override). */ |
1529 | reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); | 1474 | reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); |
1530 | 1475 | ||
1531 | table = get_target_table(vcpu->arch.target, true, &num); | 1476 | table = get_target_table(vcpu->arch.target, true, &num); |
1532 | reset_sys_reg_descs(vcpu, table, num); | 1477 | reset_sys_reg_descs(vcpu, table, num); |
1533 | 1478 | ||
1534 | for (num = 1; num < NR_SYS_REGS; num++) | 1479 | for (num = 1; num < NR_SYS_REGS; num++) |
1535 | if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242) | 1480 | if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242) |