Blame view

arch/x86/kvm/x86.h 10.7 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  /* SPDX-License-Identifier: GPL-2.0 */
26eef70c3   Avi Kivity   KVM: Clear except...
2
3
4
5
  #ifndef ARCH_X86_KVM_X86_H
  #define ARCH_X86_KVM_X86_H
  
  #include <linux/kvm_host.h>
8d93c874a   Marcelo Tosatti   KVM: x86: move ns...
6
  #include <asm/pvclock.h>
3eeb3288b   Avi Kivity   KVM: Add a helper...
7
  #include "kvm_cache_regs.h"
2f728d66e   Sean Christopherson   KVM: x86: Move kv...
8
  #include "kvm_emulate.h"
26eef70c3   Avi Kivity   KVM: Clear except...
9

c8e88717c   Babu Moger   KVM: VMX: Bring t...
10
11
12
13
14
  #define KVM_DEFAULT_PLE_GAP		128
  #define KVM_VMX_DEFAULT_PLE_WINDOW	4096
  #define KVM_DEFAULT_PLE_WINDOW_GROW	2
  #define KVM_DEFAULT_PLE_WINDOW_SHRINK	0
  #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX	UINT_MAX
8566ac8b8   Babu Moger   KVM: SVM: Impleme...
15
16
  #define KVM_SVM_DEFAULT_PLE_WINDOW_MAX	USHRT_MAX
  #define KVM_SVM_DEFAULT_PLE_WINDOW	3000
c8e88717c   Babu Moger   KVM: VMX: Bring t...
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
  
  static inline unsigned int __grow_ple_window(unsigned int val,
  		unsigned int base, unsigned int modifier, unsigned int max)
  {
  	u64 ret = val;
  
  	if (modifier < 1)
  		return base;
  
  	if (modifier < base)
  		ret *= modifier;
  	else
  		ret += modifier;
  
  	return min(ret, (u64)max);
  }
  
  static inline unsigned int __shrink_ple_window(unsigned int val,
  		unsigned int base, unsigned int modifier, unsigned int min)
  {
  	if (modifier < 1)
  		return base;
  
  	if (modifier < base)
  		val /= modifier;
  	else
  		val -= modifier;
  
  	return max(val, min);
  }
74545705c   Radim Krčmář   KVM: x86: fix ini...
47
  #define MSR_IA32_CR_PAT_DEFAULT  0x0007040600070406ULL
26eef70c3   Avi Kivity   KVM: Clear except...
48
49
  static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
  {
5c7d4f9ad   Liran Alon   KVM: nVMX: Fix bu...
50
  	vcpu->arch.exception.pending = false;
664f8e26b   Wanpeng Li   KVM: X86: Fix los...
51
  	vcpu->arch.exception.injected = false;
26eef70c3   Avi Kivity   KVM: Clear except...
52
  }
66fd3f7f9   Gleb Natapov   KVM: Do not re-ex...
53
54
  static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
  	bool soft)
937a7eaef   Avi Kivity   KVM: Add a pendin...
55
  {
04140b414   Liran Alon   KVM: x86: Rename ...
56
  	vcpu->arch.interrupt.injected = true;
66fd3f7f9   Gleb Natapov   KVM: Do not re-ex...
57
  	vcpu->arch.interrupt.soft = soft;
937a7eaef   Avi Kivity   KVM: Add a pendin...
58
59
60
61
62
  	vcpu->arch.interrupt.nr = vector;
  }
  
  static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
  {
04140b414   Liran Alon   KVM: x86: Rename ...
63
  	vcpu->arch.interrupt.injected = false;
937a7eaef   Avi Kivity   KVM: Add a pendin...
64
  }
3298b75c8   Gleb Natapov   KVM: Unprotect a ...
65
66
  static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
  {
04140b414   Liran Alon   KVM: x86: Rename ...
67
  	return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected ||
3298b75c8   Gleb Natapov   KVM: Unprotect a ...
68
69
  		vcpu->arch.nmi_injected;
  }
66fd3f7f9   Gleb Natapov   KVM: Do not re-ex...
70
71
72
73
74
  
  static inline bool kvm_exception_is_soft(unsigned int nr)
  {
  	return (nr == BP_VECTOR) || (nr == OF_VECTOR);
  }
fc61b800f   Gleb Natapov   KVM: Add Directed...
75

3eeb3288b   Avi Kivity   KVM: Add a helper...
76
77
78
79
  static inline bool is_protmode(struct kvm_vcpu *vcpu)
  {
  	return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
  }
836a1b3c3   Avi Kivity   KVM: Move cr0/cr4...
80
81
82
  static inline int is_long_mode(struct kvm_vcpu *vcpu)
  {
  #ifdef CONFIG_X86_64
f6801dff2   Avi Kivity   KVM: Rename vcpu-...
83
  	return vcpu->arch.efer & EFER_LMA;
836a1b3c3   Avi Kivity   KVM: Move cr0/cr4...
84
85
86
87
  #else
  	return 0;
  #endif
  }
5777392e8   Nadav Amit   KVM: x86: check D...
88
89
90
91
92
93
  static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
  {
  	int cs_db, cs_l;
  
  	if (!is_long_mode(vcpu))
  		return false;
afaf0b2f9   Sean Christopherson   KVM: x86: Copy kv...
94
  	kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
5777392e8   Nadav Amit   KVM: x86: check D...
95
96
  	return cs_l;
  }
855feb673   Yu Zhang   KVM: MMU: Add 5 l...
97
98
99
100
101
102
103
104
105
  static inline bool is_la57_mode(struct kvm_vcpu *vcpu)
  {
  #ifdef CONFIG_X86_64
  	return (vcpu->arch.efer & EFER_LMA) &&
  		 kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
  #else
  	return 0;
  #endif
  }
0447378a4   Marc Orr   kvm: vmx: Nested ...
106
107
108
109
110
111
112
113
  static inline bool x86_exception_has_error_code(unsigned int vector)
  {
  	static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
  			BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
  			BIT(PF_VECTOR) | BIT(AC_VECTOR);
  
  	return (1U << vector) & exception_has_error_code;
  }
6539e738f   Joerg Roedel   KVM: MMU: Impleme...
114
115
116
117
  static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
  {
  	return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
  }
eeeb4f67a   Sean Christopherson   KVM: x86: Introdu...
118
119
120
121
122
  static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
  {
  	++vcpu->stat.tlb_flush;
  	kvm_x86_ops.tlb_flush_current(vcpu);
  }
836a1b3c3   Avi Kivity   KVM: Move cr0/cr4...
123
124
125
126
127
128
129
130
131
132
133
134
  static inline int is_pae(struct kvm_vcpu *vcpu)
  {
  	return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
  }
  
  static inline int is_pse(struct kvm_vcpu *vcpu)
  {
  	return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
  }
  
  static inline int is_paging(struct kvm_vcpu *vcpu)
  {
c36fc04ef   Davidlohr Bueso   KVM: x86: add pag...
135
  	return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
836a1b3c3   Avi Kivity   KVM: Move cr0/cr4...
136
  }
bf03d4f93   Paolo Bonzini   KVM: x86: introdu...
137
138
139
140
  static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
  {
  	return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu);
  }
fd8cb4337   Yu Zhang   KVM: MMU: Expose ...
141
142
143
144
  static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
  {
  	return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
  }
fd8cb4337   Yu Zhang   KVM: MMU: Expose ...
145
146
147
148
149
150
151
  static inline u64 get_canonical(u64 la, u8 vaddr_bits)
  {
  	return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits);
  }
  
  static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
  {
fd8cb4337   Yu Zhang   KVM: MMU: Expose ...
152
  	return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la;
fd8cb4337   Yu Zhang   KVM: MMU: Expose ...
153
  }
bebb106a5   Xiao Guangrong   KVM: MMU: cache m...
154
155
156
  static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
  					gva_t gva, gfn_t gfn, unsigned access)
  {
ddfd1730f   Sean Christopherson   KVM: x86/mmu: Do ...
157
  	u64 gen = kvm_memslots(vcpu->kvm)->generation;
361209e05   Sean Christopherson   KVM: Explicitly d...
158
  	if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
ddfd1730f   Sean Christopherson   KVM: x86/mmu: Do ...
159
  		return;
9034e6e89   Paolo Bonzini   KVM: x86: fix use...
160
161
162
163
164
  	/*
  	 * If this is a shadow nested page table, the "GVA" is
  	 * actually a nGPA.
  	 */
  	vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
871bd0346   Sean Christopherson   KVM: x86: Rename ...
165
  	vcpu->arch.mmio_access = access;
bebb106a5   Xiao Guangrong   KVM: MMU: cache m...
166
  	vcpu->arch.mmio_gfn = gfn;
ddfd1730f   Sean Christopherson   KVM: x86/mmu: Do ...
167
  	vcpu->arch.mmio_gen = gen;
56f17dd3f   David Matlack   kvm: x86: fix sta...
168
169
170
171
172
  }
  
  static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
  {
  	return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
bebb106a5   Xiao Guangrong   KVM: MMU: cache m...
173
174
175
  }
  
  /*
56f17dd3f   David Matlack   kvm: x86: fix sta...
176
177
   * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
   * clear all mmio cache info.
bebb106a5   Xiao Guangrong   KVM: MMU: cache m...
178
   */
56f17dd3f   David Matlack   kvm: x86: fix sta...
179
  #define MMIO_GVA_ANY (~(gva_t)0)
bebb106a5   Xiao Guangrong   KVM: MMU: cache m...
180
181
  static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
  {
56f17dd3f   David Matlack   kvm: x86: fix sta...
182
  	if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
bebb106a5   Xiao Guangrong   KVM: MMU: cache m...
183
184
185
186
187
188
189
  		return;
  
  	vcpu->arch.mmio_gva = 0;
  }
  
  static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
  {
56f17dd3f   David Matlack   kvm: x86: fix sta...
190
191
  	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
  	      vcpu->arch.mmio_gva == (gva & PAGE_MASK))
bebb106a5   Xiao Guangrong   KVM: MMU: cache m...
192
193
194
195
196
197
198
  		return true;
  
  	return false;
  }
  
  static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
  {
56f17dd3f   David Matlack   kvm: x86: fix sta...
199
200
  	if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
  	      vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
bebb106a5   Xiao Guangrong   KVM: MMU: cache m...
201
202
203
204
  		return true;
  
  	return false;
  }
489cbcf01   Sean Christopherson   KVM: x86: Add WAR...
205
  static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, int reg)
5777392e8   Nadav Amit   KVM: x86: check D...
206
207
208
209
210
  {
  	unsigned long val = kvm_register_read(vcpu, reg);
  
  	return is_64_bit_mode(vcpu) ? val : (u32)val;
  }
27e6fb5da   Nadav Amit   KVM: vmx: vmx ins...
211
  static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
489cbcf01   Sean Christopherson   KVM: x86: Add WAR...
212
  				       int reg, unsigned long val)
27e6fb5da   Nadav Amit   KVM: vmx: vmx ins...
213
214
215
216
217
  {
  	if (!is_64_bit_mode(vcpu))
  		val = (u32)val;
  	return kvm_register_write(vcpu, reg, val);
  }
41dbc6bcd   Paolo Bonzini   KVM: x86: introdu...
218
219
220
221
  static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
  {
  	return !(kvm->arch.disabled_quirks & quirk);
  }
27cbe7d61   Liran Alon   KVM: x86: Prevent...
222
223
  static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu)
  {
afaf0b2f9   Sean Christopherson   KVM: x86: Copy kv...
224
  	return is_smm(vcpu) || kvm_x86_ops.apic_init_signal_blocked(vcpu);
27cbe7d61   Liran Alon   KVM: x86: Prevent...
225
  }
9497e1f2e   Sean Christopherson   KVM: x86: Move tr...
226
  void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
ff9d07a0e   Zhang, Yanmin   KVM: Implement pe...
227

8fe8ab46b   Will Auld   KVM: x86: Add cod...
228
  void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
108b249c4   Paolo Bonzini   KVM: x86: introdu...
229
  u64 get_kvmclock_ns(struct kvm *kvm);
99e3e30ae   Zachary Amsden   KVM: x86: Move TS...
230

ce14e868a   Paolo Bonzini   KVM: x86: pass kv...
231
  int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
064aea774   Nadav Har'El   KVM: nVMX: Decodi...
232
233
  	gva_t addr, void *val, unsigned int bytes,
  	struct x86_exception *exception);
ce14e868a   Paolo Bonzini   KVM: x86: pass kv...
234
  int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
6a4d75506   Nadav Har'El   KVM: nVMX: Implem...
235
236
  	gva_t addr, void *val, unsigned int bytes,
  	struct x86_exception *exception);
082d06eda   Wanpeng Li   KVM: X86: Introdu...
237
  int handle_ud(struct kvm_vcpu *vcpu);
da998b46d   Jim Mattson   kvm: x86: Defer s...
238
  void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu);
19efffa24   Xiao Guangrong   KVM: MTRR: sort v...
239
  void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
ff53604b4   Xiao Guangrong   KVM: x86: move MT...
240
  u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
4566654bb   Nadav Amit   KVM: vmx: Inject ...
241
  bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
ff53604b4   Xiao Guangrong   KVM: x86: move MT...
242
243
  int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
  int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
6a39bbc5d   Xiao Guangrong   KVM: MTRR: do not...
244
245
  bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
  					  int page_num);
520040146   Feng Wu   KVM: x86: Use vec...
246
  bool kvm_vector_hashing_enabled(void);
897861479   Mohammed Gamal   KVM: x86: Add hel...
247
  void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
736c291c9   Sean Christopherson   KVM: x86: Use gpa...
248
  int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
c60658d1d   Sean Christopherson   KVM: x86: Unexpor...
249
  			    int emulation_type, void *insn, int insn_len);
404d5d7bf   Wanpeng Li   KVM: X86: Introdu...
250
  fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
4566654bb   Nadav Amit   KVM: vmx: Inject ...
251

00b27a3ef   Avi Kivity   KVM: Move cpuid c...
252
  extern u64 host_xcr0;
cfc481810   Sean Christopherson   KVM: x86: Calcula...
253
  extern u64 supported_xcr0;
408e9a318   Paolo Bonzini   KVM: CPUID: add s...
254
  extern u64 supported_xss;
4ff417320   Paolo Bonzini   KVM: x86: introdu...
255

615a4ae1c   Sean Christopherson   KVM: x86: Make kv...
256
257
258
259
260
  static inline bool kvm_mpx_supported(void)
  {
  	return (supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
  		== (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
  }
9ed96e87c   Marcelo Tosatti   KVM: x86: limit P...
261
  extern unsigned int min_timer_period_us;
c4ae60e4b   Liran Alon   KVM: x86: Add mod...
262
  extern bool enable_vmware_backdoor;
0c5f81dad   Wanpeng Li   KVM: LAPIC: Injec...
263
  extern int pi_inject_timer;
54e9818f3   Gleb Natapov   KVM: use jump lab...
264
  extern struct static_key kvm_no_apic_vcpu;
b51012deb   Paolo Bonzini   KVM: x86: introdu...
265

8d93c874a   Marcelo Tosatti   KVM: x86: move ns...
266
267
268
269
270
  static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
  {
  	return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
  				   vcpu->arch.virtual_tsc_shift);
  }
b51012deb   Paolo Bonzini   KVM: x86: introdu...
271
272
273
274
275
276
277
278
279
280
281
282
283
  /* Same "calling convention" as do_div:
   * - divide (n << 32) by base
   * - put result in n
   * - return remainder
   */
  #define do_shl32_div32(n, base)					\
  	({							\
  	    u32 __quot, __rem;					\
  	    asm("divl %2" : "=a" (__quot), "=d" (__rem)		\
  			: "rm" (base), "0" (0), "1" ((u32) n));	\
  	    n = __quot;						\
  	    __rem;						\
  	 })
4d5422cea   Wanpeng Li   KVM: X86: Provide...
284
  static inline bool kvm_mwait_in_guest(struct kvm *kvm)
668fffa3f   Michael S. Tsirkin   kvm: better MWAIT...
285
  {
4d5422cea   Wanpeng Li   KVM: X86: Provide...
286
  	return kvm->arch.mwait_in_guest;
668fffa3f   Michael S. Tsirkin   kvm: better MWAIT...
287
  }
caa057a2c   Wanpeng Li   KVM: X86: Provide...
288
289
290
291
  static inline bool kvm_hlt_in_guest(struct kvm *kvm)
  {
  	return kvm->arch.hlt_in_guest;
  }
b31c114b8   Wanpeng Li   KVM: X86: Provide...
292
293
294
295
  static inline bool kvm_pause_in_guest(struct kvm *kvm)
  {
  	return kvm->arch.pause_in_guest;
  }
b51700632   Wanpeng Li   KVM: X86: Provide...
296
297
298
299
  static inline bool kvm_cstate_in_guest(struct kvm *kvm)
  {
  	return kvm->arch.cstate_in_guest;
  }
dd60d2170   Andi Kleen   KVM: x86: Fix per...
300
301
302
303
304
305
306
307
308
309
310
  DECLARE_PER_CPU(struct kvm_vcpu *, current_vcpu);
  
  static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu)
  {
  	__this_cpu_write(current_vcpu, vcpu);
  }
  
  static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
  {
  	__this_cpu_write(current_vcpu, NULL);
  }
674ea351c   Paolo Bonzini   KVM: x86: optimiz...
311
312
313
314
315
316
317
318
  
  static inline bool kvm_pat_valid(u64 data)
  {
  	if (data & 0xF8F8F8F8F8F8F8F8ull)
  		return false;
  	/* 0, 1, 4, 5, 6, 7 are valid values.  */
  	return (data | ((data & 0x0202020202020202ull) << 1)) == data;
  }
9b5e85320   Sean Christopherson   KVM: x86: Take a ...
319
  static inline bool kvm_dr7_valid(u64 data)
b91991bf6   Krish Sadhukhan   KVM: nVMX: Check ...
320
321
322
323
  {
  	/* Bits [63:32] are reserved */
  	return !(data >> 32);
  }
f5f6145e4   Krish Sadhukhan   KVM: x86: Move th...
324
325
326
327
328
  static inline bool kvm_dr6_valid(u64 data)
  {
  	/* Bits [63:32] are reserved */
  	return !(data >> 32);
  }
b91991bf6   Krish Sadhukhan   KVM: nVMX: Check ...
329

139a12cfe   Aaron Lewis   KVM: x86: Move IA...
330
331
  void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
  void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
841c2be09   Maxim Levitsky   kvm: x86: replace...
332
  int kvm_spec_ctrl_test_value(u64 value);
761e41693   Krish Sadhukhan   KVM: nSVM: Check ...
333
  int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
5a9f54435   Wanpeng Li   KVM: X86: Introdu...
334
  bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
3f3393b3c   Babu Moger   KVM: X86: Rename ...
335
336
  int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
  			      struct x86_exception *e);
9715092f8   Babu Moger   KVM: X86: Move ha...
337
  int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
51de8151b   Alexander Graf   KVM: x86: Add inf...
338
  bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
674ea351c   Paolo Bonzini   KVM: x86: optimiz...
339

cc4cb0176   Maxim Levitsky   KVM: x86: use pos...
340
341
342
343
344
345
346
  /*
   * Internal error codes that are used to indicate that MSR emulation encountered
   * an error that should result in #GP in the guest, unless userspace
   * handles it.
   */
  #define  KVM_MSR_RET_INVALID	2	/* in-kernel MSR emulation #GP condition */
  #define  KVM_MSR_RET_FILTERED	3	/* #GP due to userspace MSR filter */
6abe9c138   Peter Xu   KVM: X86: Move ig...
347

b899c1327   Krish Sadhukhan   KVM: x86: Create ...
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
  #define __cr4_reserved_bits(__cpu_has, __c)             \
  ({                                                      \
  	u64 __reserved_bits = CR4_RESERVED_BITS;        \
                                                          \
  	if (!__cpu_has(__c, X86_FEATURE_XSAVE))         \
  		__reserved_bits |= X86_CR4_OSXSAVE;     \
  	if (!__cpu_has(__c, X86_FEATURE_SMEP))          \
  		__reserved_bits |= X86_CR4_SMEP;        \
  	if (!__cpu_has(__c, X86_FEATURE_SMAP))          \
  		__reserved_bits |= X86_CR4_SMAP;        \
  	if (!__cpu_has(__c, X86_FEATURE_FSGSBASE))      \
  		__reserved_bits |= X86_CR4_FSGSBASE;    \
  	if (!__cpu_has(__c, X86_FEATURE_PKU))           \
  		__reserved_bits |= X86_CR4_PKE;         \
  	if (!__cpu_has(__c, X86_FEATURE_LA57))          \
  		__reserved_bits |= X86_CR4_LA57;        \
  	if (!__cpu_has(__c, X86_FEATURE_UMIP))          \
  		__reserved_bits |= X86_CR4_UMIP;        \
53efe527c   Paolo Bonzini   KVM: x86: Make CR...
366
367
  	if (!__cpu_has(__c, X86_FEATURE_VMX))           \
  		__reserved_bits |= X86_CR4_VMXE;        \
b899c1327   Krish Sadhukhan   KVM: x86: Create ...
368
369
  	__reserved_bits;                                \
  })
26eef70c3   Avi Kivity   KVM: Clear except...
370
  #endif