Blame view
arch/x86/kvm/x86.h
10.7 KB
b24413180
|
1 |
/* SPDX-License-Identifier: GPL-2.0 */ |
26eef70c3
|
2 3 4 5 |
#ifndef ARCH_X86_KVM_X86_H #define ARCH_X86_KVM_X86_H #include <linux/kvm_host.h> |
8d93c874a
|
6 |
#include <asm/pvclock.h> |
3eeb3288b
|
7 |
#include "kvm_cache_regs.h" |
2f728d66e
|
8 |
#include "kvm_emulate.h" |
26eef70c3
|
9 |
|
c8e88717c
|
10 11 12 13 14 |
#define KVM_DEFAULT_PLE_GAP 128 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096 #define KVM_DEFAULT_PLE_WINDOW_GROW 2 #define KVM_DEFAULT_PLE_WINDOW_SHRINK 0 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX UINT_MAX |
8566ac8b8
|
15 16 |
#define KVM_SVM_DEFAULT_PLE_WINDOW_MAX USHRT_MAX #define KVM_SVM_DEFAULT_PLE_WINDOW 3000 |
c8e88717c
|
17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
static inline unsigned int __grow_ple_window(unsigned int val, unsigned int base, unsigned int modifier, unsigned int max) { u64 ret = val; if (modifier < 1) return base; if (modifier < base) ret *= modifier; else ret += modifier; return min(ret, (u64)max); } static inline unsigned int __shrink_ple_window(unsigned int val, unsigned int base, unsigned int modifier, unsigned int min) { if (modifier < 1) return base; if (modifier < base) val /= modifier; else val -= modifier; return max(val, min); } |
74545705c
|
47 |
#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL |
26eef70c3
|
48 49 |
static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) { |
5c7d4f9ad
|
50 |
vcpu->arch.exception.pending = false; |
664f8e26b
|
51 |
vcpu->arch.exception.injected = false; |
26eef70c3
|
52 |
} |
66fd3f7f9
|
53 54 |
static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, bool soft) |
937a7eaef
|
55 |
{ |
04140b414
|
56 |
vcpu->arch.interrupt.injected = true; |
66fd3f7f9
|
57 |
vcpu->arch.interrupt.soft = soft; |
937a7eaef
|
58 59 60 61 62 |
vcpu->arch.interrupt.nr = vector; } static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) { |
04140b414
|
63 |
vcpu->arch.interrupt.injected = false; |
937a7eaef
|
64 |
} |
3298b75c8
|
65 66 |
static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) { |
04140b414
|
67 |
return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected || |
3298b75c8
|
68 69 |
vcpu->arch.nmi_injected; } |
66fd3f7f9
|
70 71 72 73 74 |
static inline bool kvm_exception_is_soft(unsigned int nr) { return (nr == BP_VECTOR) || (nr == OF_VECTOR); } |
fc61b800f
|
75 |
|
3eeb3288b
|
76 77 78 79 |
static inline bool is_protmode(struct kvm_vcpu *vcpu) { return kvm_read_cr0_bits(vcpu, X86_CR0_PE); } |
836a1b3c3
|
80 81 82 |
static inline int is_long_mode(struct kvm_vcpu *vcpu) { #ifdef CONFIG_X86_64 |
f6801dff2
|
83 |
return vcpu->arch.efer & EFER_LMA; |
836a1b3c3
|
84 85 86 87 |
#else return 0; #endif } |
5777392e8
|
88 89 90 91 92 93 |
static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) { int cs_db, cs_l; if (!is_long_mode(vcpu)) return false; |
afaf0b2f9
|
94 |
kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l); |
5777392e8
|
95 96 |
return cs_l; } |
855feb673
|
97 98 99 100 101 102 103 104 105 |
static inline bool is_la57_mode(struct kvm_vcpu *vcpu) { #ifdef CONFIG_X86_64 return (vcpu->arch.efer & EFER_LMA) && kvm_read_cr4_bits(vcpu, X86_CR4_LA57); #else return 0; #endif } |
0447378a4
|
106 107 108 109 110 111 112 113 |
static inline bool x86_exception_has_error_code(unsigned int vector) { static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) | BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) | BIT(PF_VECTOR) | BIT(AC_VECTOR); return (1U << vector) & exception_has_error_code; } |
6539e738f
|
114 115 116 117 |
static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) { return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; } |
eeeb4f67a
|
118 119 120 121 122 |
static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu) { ++vcpu->stat.tlb_flush; kvm_x86_ops.tlb_flush_current(vcpu); } |
836a1b3c3
|
123 124 125 126 127 128 129 130 131 132 133 134 |
static inline int is_pae(struct kvm_vcpu *vcpu) { return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); } static inline int is_pse(struct kvm_vcpu *vcpu) { return kvm_read_cr4_bits(vcpu, X86_CR4_PSE); } static inline int is_paging(struct kvm_vcpu *vcpu) { |
c36fc04ef
|
135 |
return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG)); |
836a1b3c3
|
136 |
} |
bf03d4f93
|
137 138 139 140 |
static inline bool is_pae_paging(struct kvm_vcpu *vcpu) { return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu); } |
fd8cb4337
|
141 142 143 144 |
static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu) { return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48; } |
fd8cb4337
|
145 146 147 148 149 150 151 |
static inline u64 get_canonical(u64 la, u8 vaddr_bits) { return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits); } static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu) { |
fd8cb4337
|
152 |
return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la; |
fd8cb4337
|
153 |
} |
bebb106a5
|
154 155 156 |
static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, unsigned access) { |
ddfd1730f
|
157 |
u64 gen = kvm_memslots(vcpu->kvm)->generation; |
361209e05
|
158 |
if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS)) |
ddfd1730f
|
159 |
return; |
9034e6e89
|
160 161 162 163 164 |
/* * If this is a shadow nested page table, the "GVA" is * actually a nGPA. */ vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; |
871bd0346
|
165 |
vcpu->arch.mmio_access = access; |
bebb106a5
|
166 |
vcpu->arch.mmio_gfn = gfn; |
ddfd1730f
|
167 |
vcpu->arch.mmio_gen = gen; |
56f17dd3f
|
168 169 170 171 172 |
} static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu) { return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation; |
bebb106a5
|
173 174 175 |
} /* |
56f17dd3f
|
176 177 |
* Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we * clear all mmio cache info. |
bebb106a5
|
178 |
*/ |
56f17dd3f
|
179 |
#define MMIO_GVA_ANY (~(gva_t)0) |
bebb106a5
|
180 181 |
static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) { |
56f17dd3f
|
182 |
if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) |
bebb106a5
|
183 184 185 186 187 188 189 |
return; vcpu->arch.mmio_gva = 0; } static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) { |
56f17dd3f
|
190 191 |
if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva && vcpu->arch.mmio_gva == (gva & PAGE_MASK)) |
bebb106a5
|
192 193 194 195 196 197 198 |
return true; return false; } static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) { |
56f17dd3f
|
199 200 |
if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn && vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) |
bebb106a5
|
201 202 203 204 |
return true; return false; } |
489cbcf01
|
205 |
static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, int reg) |
5777392e8
|
206 207 208 209 210 |
{ unsigned long val = kvm_register_read(vcpu, reg); return is_64_bit_mode(vcpu) ? val : (u32)val; } |
27e6fb5da
|
211 |
static inline void kvm_register_writel(struct kvm_vcpu *vcpu, |
489cbcf01
|
212 |
int reg, unsigned long val) |
27e6fb5da
|
213 214 215 216 217 |
{ if (!is_64_bit_mode(vcpu)) val = (u32)val; return kvm_register_write(vcpu, reg, val); } |
41dbc6bcd
|
218 219 220 221 |
static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) { return !(kvm->arch.disabled_quirks & quirk); } |
27cbe7d61
|
222 223 |
static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu) { |
afaf0b2f9
|
224 |
return is_smm(vcpu) || kvm_x86_ops.apic_init_signal_blocked(vcpu); |
27cbe7d61
|
225 |
} |
9497e1f2e
|
226 |
void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); |
ff9d07a0e
|
227 |
|
8fe8ab46b
|
228 |
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); |
108b249c4
|
229 |
u64 get_kvmclock_ns(struct kvm *kvm); |
99e3e30ae
|
230 |
|
ce14e868a
|
231 |
int kvm_read_guest_virt(struct kvm_vcpu *vcpu, |
064aea774
|
232 233 |
gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception); |
ce14e868a
|
234 |
int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, |
6a4d75506
|
235 236 |
gva_t addr, void *val, unsigned int bytes, struct x86_exception *exception); |
082d06eda
|
237 |
int handle_ud(struct kvm_vcpu *vcpu); |
da998b46d
|
238 |
void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu); |
19efffa24
|
239 |
void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu); |
ff53604b4
|
240 |
u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); |
4566654bb
|
241 |
bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data); |
ff53604b4
|
242 243 |
int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data); int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); |
6a39bbc5d
|
244 245 |
bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int page_num); |
520040146
|
246 |
bool kvm_vector_hashing_enabled(void); |
897861479
|
247 |
void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code); |
736c291c9
|
248 |
int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, |
c60658d1d
|
249 |
int emulation_type, void *insn, int insn_len); |
404d5d7bf
|
250 |
fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu); |
4566654bb
|
251 |
|
00b27a3ef
|
252 |
extern u64 host_xcr0; |
cfc481810
|
253 |
extern u64 supported_xcr0; |
408e9a318
|
254 |
extern u64 supported_xss; |
4ff417320
|
255 |
|
615a4ae1c
|
256 257 258 259 260 |
static inline bool kvm_mpx_supported(void) { return (supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)) == (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR); } |
9ed96e87c
|
261 |
extern unsigned int min_timer_period_us; |
c4ae60e4b
|
262 |
extern bool enable_vmware_backdoor; |
0c5f81dad
|
263 |
extern int pi_inject_timer; |
54e9818f3
|
264 |
extern struct static_key kvm_no_apic_vcpu; |
b51012deb
|
265 |
|
8d93c874a
|
266 267 268 269 270 |
static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) { return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, vcpu->arch.virtual_tsc_shift); } |
b51012deb
|
271 272 273 274 275 276 277 278 279 280 281 282 283 |
/* Same "calling convention" as do_div: * - divide (n << 32) by base * - put result in n * - return remainder */ #define do_shl32_div32(n, base) \ ({ \ u32 __quot, __rem; \ asm("divl %2" : "=a" (__quot), "=d" (__rem) \ : "rm" (base), "0" (0), "1" ((u32) n)); \ n = __quot; \ __rem; \ }) |
4d5422cea
|
284 |
static inline bool kvm_mwait_in_guest(struct kvm *kvm) |
668fffa3f
|
285 |
{ |
4d5422cea
|
286 |
return kvm->arch.mwait_in_guest; |
668fffa3f
|
287 |
} |
caa057a2c
|
288 289 290 291 |
static inline bool kvm_hlt_in_guest(struct kvm *kvm) { return kvm->arch.hlt_in_guest; } |
b31c114b8
|
292 293 294 295 |
static inline bool kvm_pause_in_guest(struct kvm *kvm) { return kvm->arch.pause_in_guest; } |
b51700632
|
296 297 298 299 |
static inline bool kvm_cstate_in_guest(struct kvm *kvm) { return kvm->arch.cstate_in_guest; } |
dd60d2170
|
300 301 302 303 304 305 306 307 308 309 310 |
DECLARE_PER_CPU(struct kvm_vcpu *, current_vcpu); static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu) { __this_cpu_write(current_vcpu, vcpu); } static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu) { __this_cpu_write(current_vcpu, NULL); } |
674ea351c
|
311 312 313 314 315 316 317 318 |
static inline bool kvm_pat_valid(u64 data) { if (data & 0xF8F8F8F8F8F8F8F8ull) return false; /* 0, 1, 4, 5, 6, 7 are valid values. */ return (data | ((data & 0x0202020202020202ull) << 1)) == data; } |
9b5e85320
|
319 |
static inline bool kvm_dr7_valid(u64 data) |
b91991bf6
|
320 321 322 323 |
{ /* Bits [63:32] are reserved */ return !(data >> 32); } |
f5f6145e4
|
324 325 326 327 328 |
static inline bool kvm_dr6_valid(u64 data) { /* Bits [63:32] are reserved */ return !(data >> 32); } |
b91991bf6
|
329 |
|
139a12cfe
|
330 331 |
void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu); void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu); |
841c2be09
|
332 |
int kvm_spec_ctrl_test_value(u64 value); |
761e41693
|
333 |
int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
5a9f54435
|
334 |
bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu); |
3f3393b3c
|
335 336 |
int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, struct x86_exception *e); |
9715092f8
|
337 |
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva); |
51de8151b
|
338 |
bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type); |
674ea351c
|
339 |
|
cc4cb0176
|
340 341 342 343 344 345 346 |
/* * Internal error codes that are used to indicate that MSR emulation encountered * an error that should result in #GP in the guest, unless userspace * handles it. */ #define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation #GP condition */ #define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR filter */ |
6abe9c138
|
347 |
|
b899c1327
|
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 |
#define __cr4_reserved_bits(__cpu_has, __c) \ ({ \ u64 __reserved_bits = CR4_RESERVED_BITS; \ \ if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \ __reserved_bits |= X86_CR4_OSXSAVE; \ if (!__cpu_has(__c, X86_FEATURE_SMEP)) \ __reserved_bits |= X86_CR4_SMEP; \ if (!__cpu_has(__c, X86_FEATURE_SMAP)) \ __reserved_bits |= X86_CR4_SMAP; \ if (!__cpu_has(__c, X86_FEATURE_FSGSBASE)) \ __reserved_bits |= X86_CR4_FSGSBASE; \ if (!__cpu_has(__c, X86_FEATURE_PKU)) \ __reserved_bits |= X86_CR4_PKE; \ if (!__cpu_has(__c, X86_FEATURE_LA57)) \ __reserved_bits |= X86_CR4_LA57; \ if (!__cpu_has(__c, X86_FEATURE_UMIP)) \ __reserved_bits |= X86_CR4_UMIP; \ |
53efe527c
|
366 367 |
if (!__cpu_has(__c, X86_FEATURE_VMX)) \ __reserved_bits |= X86_CR4_VMXE; \ |
b899c1327
|
368 369 |
__reserved_bits; \ }) |
26eef70c3
|
370 |
#endif |