Commit 9962d032bbff0268f22068787831405f8468c8b4

Authored by Alexander Graf
Committed by Avi Kivity
1 parent f0b85051d0

KVM: SVM: Move EFER and MSR constants to generic x86 code

MSR_EFER_SVME_MASK, MSR_VM_CR and MSR_VM_HSAVE_PA are set in KVM
specific headers. Linux does have nice header files to collect
EFER bits and MSR IDs, so IMHO we should put them there.

While at it, I also changed the naming scheme to match that
of the other defines.

(introduced in v6)

Acked-by: Joerg Roedel <joro@8bytes.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>

Showing 5 changed files with 12 additions and 8 deletions Side-by-side Diff

arch/x86/include/asm/kvm_host.h
... ... @@ -22,6 +22,7 @@
22 22 #include <asm/pvclock-abi.h>
23 23 #include <asm/desc.h>
24 24 #include <asm/mtrr.h>
  25 +#include <asm/msr-index.h>
25 26  
26 27 #define KVM_MAX_VCPUS 16
27 28 #define KVM_MEMORY_SLOTS 32
arch/x86/include/asm/msr-index.h
... ... @@ -18,11 +18,13 @@
18 18 #define _EFER_LME 8 /* Long mode enable */
19 19 #define _EFER_LMA 10 /* Long mode active (read-only) */
20 20 #define _EFER_NX 11 /* No execute enable */
  21 +#define _EFER_SVME 12 /* Enable virtualization */
21 22  
22 23 #define EFER_SCE (1<<_EFER_SCE)
23 24 #define EFER_LME (1<<_EFER_LME)
24 25 #define EFER_LMA (1<<_EFER_LMA)
25 26 #define EFER_NX (1<<_EFER_NX)
  27 +#define EFER_SVME (1<<_EFER_SVME)
26 28  
27 29 /* Intel MSRs. Some also available on other CPUs */
28 30 #define MSR_IA32_PERFCTR0 0x000000c1
... ... @@ -359,6 +361,11 @@
359 361 #define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
360 362 #define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
361 363 #define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
  364 +
  365 +/* AMD-V MSRs */
  366 +
  367 +#define MSR_VM_CR 0xc0010114
  368 +#define MSR_VM_HSAVE_PA 0xc0010117
362 369  
363 370 #endif /* _ASM_X86_MSR_INDEX_H */
arch/x86/include/asm/svm.h
... ... @@ -174,10 +174,6 @@
174 174 #define SVM_CPUID_FEATURE_SHIFT 2
175 175 #define SVM_CPUID_FUNC 0x8000000a
176 176  
177   -#define MSR_EFER_SVME_MASK (1ULL << 12)
178   -#define MSR_VM_CR 0xc0010114
179   -#define MSR_VM_HSAVE_PA 0xc0010117ULL
180   -
181 177 #define SVM_VM_CR_SVM_DISABLE 4
182 178  
183 179 #define SVM_SELECTOR_S_SHIFT 4
arch/x86/include/asm/virtext.h
... ... @@ -118,7 +118,7 @@
118 118  
119 119 wrmsrl(MSR_VM_HSAVE_PA, 0);
120 120 rdmsrl(MSR_EFER, efer);
121   - wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
  121 + wrmsrl(MSR_EFER, efer & ~EFER_SVME);
122 122 }
123 123  
124 124 /** Makes sure SVM is disabled, if it is supported on the CPU
... ... @@ -198,7 +198,7 @@
198 198 if (!npt_enabled && !(efer & EFER_LMA))
199 199 efer &= ~EFER_LME;
200 200  
201   - to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
  201 + to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
202 202 vcpu->arch.shadow_efer = efer;
203 203 }
204 204  
... ... @@ -292,7 +292,7 @@
292 292 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
293 293  
294 294 rdmsrl(MSR_EFER, efer);
295   - wrmsrl(MSR_EFER, efer | MSR_EFER_SVME_MASK);
  295 + wrmsrl(MSR_EFER, efer | EFER_SVME);
296 296  
297 297 wrmsrl(MSR_VM_HSAVE_PA,
298 298 page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
... ... @@ -559,7 +559,7 @@
559 559 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
560 560 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
561 561  
562   - save->efer = MSR_EFER_SVME_MASK;
  562 + save->efer = EFER_SVME;
563 563 save->dr6 = 0xffff0ff0;
564 564 save->dr7 = 0x400;
565 565 save->rflags = 2;