Commit fb3f0f51d92d1496f9628ca6f0fb06a48dc9ed2a

Authored by Rusty Russell
Committed by Avi Kivity
1 parent a2fa3e9f52

KVM: Dynamically allocate vcpus

This patch converts the vcpus array in "struct kvm" to a pointer
array, and changes the "vcpu_create" and "vcpu_setup" hooks into one
"vcpu_create" call which does the allocation and initialization of the
vcpu (calling back into the kvm_vcpu_init core helper).

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Avi Kivity <avi@qumranet.com>

Showing 5 changed files with 236 additions and 218 deletions Side-by-side Diff

... ... @@ -300,10 +300,8 @@
300 300 struct kvm_io_device *dev);
301 301  
302 302 struct kvm_vcpu {
303   - int valid;
304 303 struct kvm *kvm;
305 304 int vcpu_id;
306   - void *_priv;
307 305 struct mutex mutex;
308 306 int cpu;
309 307 u64 host_tsc;
... ... @@ -404,8 +402,7 @@
404 402 struct list_head active_mmu_pages;
405 403 int n_free_mmu_pages;
406 404 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
407   - int nvcpus;
408   - struct kvm_vcpu vcpus[KVM_MAX_VCPUS];
  405 + struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
409 406 int memory_config_version;
410 407 int busy;
411 408 unsigned long rmap_overflow;
... ... @@ -428,7 +425,8 @@
428 425 int (*hardware_setup)(void); /* __init */
429 426 void (*hardware_unsetup)(void); /* __exit */
430 427  
431   - int (*vcpu_create)(struct kvm_vcpu *vcpu);
  428 + /* Create, but do not attach this VCPU */
  429 + struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
432 430 void (*vcpu_free)(struct kvm_vcpu *vcpu);
433 431  
434 432 void (*vcpu_load)(struct kvm_vcpu *vcpu);
... ... @@ -470,7 +468,6 @@
470 468 void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code);
471 469  
472 470 int (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run);
473   - int (*vcpu_setup)(struct kvm_vcpu *vcpu);
474 471 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
475 472 void (*patch_hypercall)(struct kvm_vcpu *vcpu,
476 473 unsigned char *hypercall_addr);
... ... @@ -480,6 +477,9 @@
480 477  
481 478 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
482 479 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
  480 +
  481 +int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
  482 +void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
483 483  
484 484 int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module);
485 485 void kvm_exit_arch(void);
drivers/kvm/kvm_main.c
... ... @@ -266,8 +266,10 @@
266 266 atomic_set(&completed, 0);
267 267 cpus_clear(cpus);
268 268 needed = 0;
269   - for (i = 0; i < kvm->nvcpus; ++i) {
270   - vcpu = &kvm->vcpus[i];
  269 + for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  270 + vcpu = kvm->vcpus[i];
  271 + if (!vcpu)
  272 + continue;
271 273 if (test_and_set_bit(KVM_TLB_FLUSH, &vcpu->requests))
272 274 continue;
273 275 cpu = vcpu->cpu;
274 276  
... ... @@ -291,10 +293,61 @@
291 293 }
292 294 }
293 295  
  296 +int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
  297 +{
  298 + struct page *page;
  299 + int r;
  300 +
  301 + mutex_init(&vcpu->mutex);
  302 + vcpu->cpu = -1;
  303 + vcpu->mmu.root_hpa = INVALID_PAGE;
  304 + vcpu->kvm = kvm;
  305 + vcpu->vcpu_id = id;
  306 +
  307 + page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  308 + if (!page) {
  309 + r = -ENOMEM;
  310 + goto fail;
  311 + }
  312 + vcpu->run = page_address(page);
  313 +
  314 + page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  315 + if (!page) {
  316 + r = -ENOMEM;
  317 + goto fail_free_run;
  318 + }
  319 + vcpu->pio_data = page_address(page);
  320 +
  321 + vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
  322 + FX_IMAGE_ALIGN);
  323 + vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
  324 +
  325 + r = kvm_mmu_create(vcpu);
  326 + if (r < 0)
  327 + goto fail_free_pio_data;
  328 +
  329 + return 0;
  330 +
  331 +fail_free_pio_data:
  332 + free_page((unsigned long)vcpu->pio_data);
  333 +fail_free_run:
  334 + free_page((unsigned long)vcpu->run);
  335 +fail:
  336 + return -ENOMEM;
  337 +}
  338 +EXPORT_SYMBOL_GPL(kvm_vcpu_init);
  339 +
  340 +void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
  341 +{
  342 + kvm_mmu_destroy(vcpu);
  343 + free_page((unsigned long)vcpu->pio_data);
  344 + free_page((unsigned long)vcpu->run);
  345 +}
  346 +EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
  347 +
294 348 static struct kvm *kvm_create_vm(void)
295 349 {
296 350 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
297   - int i;
298 351  
299 352 if (!kvm)
300 353 return ERR_PTR(-ENOMEM);
... ... @@ -303,14 +356,6 @@
303 356 spin_lock_init(&kvm->lock);
304 357 INIT_LIST_HEAD(&kvm->active_mmu_pages);
305 358 kvm_io_bus_init(&kvm->mmio_bus);
306   - for (i = 0; i < KVM_MAX_VCPUS; ++i) {
307   - struct kvm_vcpu *vcpu = &kvm->vcpus[i];
308   -
309   - mutex_init(&vcpu->mutex);
310   - vcpu->cpu = -1;
311   - vcpu->kvm = kvm;
312   - vcpu->mmu.root_hpa = INVALID_PAGE;
313   - }
314 359 spin_lock(&kvm_lock);
315 360 list_add(&kvm->vm_list, &vm_list);
316 361 spin_unlock(&kvm_lock);
317 362  
... ... @@ -367,30 +412,11 @@
367 412  
368 413 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
369 414 {
370   - if (!vcpu->valid)
371   - return;
372   -
373 415 vcpu_load(vcpu);
374 416 kvm_mmu_unload(vcpu);
375 417 vcpu_put(vcpu);
376 418 }
377 419  
378   -static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
379   -{
380   - if (!vcpu->valid)
381   - return;
382   -
383   - vcpu_load(vcpu);
384   - kvm_mmu_destroy(vcpu);
385   - vcpu_put(vcpu);
386   - kvm_arch_ops->vcpu_free(vcpu);
387   - free_page((unsigned long)vcpu->run);
388   - vcpu->run = NULL;
389   - free_page((unsigned long)vcpu->pio_data);
390   - vcpu->pio_data = NULL;
391   - free_pio_guest_pages(vcpu);
392   -}
393   -
394 420 static void kvm_free_vcpus(struct kvm *kvm)
395 421 {
396 422 unsigned int i;
... ... @@ -399,9 +425,15 @@
399 425 * Unpin any mmu pages first.
400 426 */
401 427 for (i = 0; i < KVM_MAX_VCPUS; ++i)
402   - kvm_unload_vcpu_mmu(&kvm->vcpus[i]);
403   - for (i = 0; i < KVM_MAX_VCPUS; ++i)
404   - kvm_free_vcpu(&kvm->vcpus[i]);
  428 + if (kvm->vcpus[i])
  429 + kvm_unload_vcpu_mmu(kvm->vcpus[i]);
  430 + for (i = 0; i < KVM_MAX_VCPUS; ++i) {
  431 + if (kvm->vcpus[i]) {
  432 + kvm_arch_ops->vcpu_free(kvm->vcpus[i]);
  433 + kvm->vcpus[i] = NULL;
  434 + }
  435 + }
  436 +
405 437 }
406 438  
407 439 static int kvm_dev_release(struct inode *inode, struct file *filp)
408 440  
409 441  
410 442  
411 443  
412 444  
413 445  
414 446  
415 447  
416 448  
417 449  
418 450  
419 451  
420 452  
... ... @@ -2372,78 +2404,48 @@
2372 2404 {
2373 2405 int r;
2374 2406 struct kvm_vcpu *vcpu;
2375   - struct page *page;
2376 2407  
2377   - r = -EINVAL;
2378 2408 if (!valid_vcpu(n))
2379   - goto out;
  2409 + return -EINVAL;
2380 2410  
2381   - vcpu = &kvm->vcpus[n];
2382   - vcpu->vcpu_id = n;
  2411 + vcpu = kvm_arch_ops->vcpu_create(kvm, n);
  2412 + if (IS_ERR(vcpu))
  2413 + return PTR_ERR(vcpu);
2383 2414  
2384   - mutex_lock(&vcpu->mutex);
2385   -
2386   - if (vcpu->valid) {
2387   - mutex_unlock(&vcpu->mutex);
2388   - return -EEXIST;
2389   - }
2390   -
2391   - page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2392   - r = -ENOMEM;
2393   - if (!page)
2394   - goto out_unlock;
2395   - vcpu->run = page_address(page);
2396   -
2397   - page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2398   - r = -ENOMEM;
2399   - if (!page)
2400   - goto out_free_run;
2401   - vcpu->pio_data = page_address(page);
2402   -
2403   - vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
2404   - FX_IMAGE_ALIGN);
2405   - vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
2406   - vcpu->cr0 = 0x10;
2407   -
2408   - r = kvm_arch_ops->vcpu_create(vcpu);
2409   - if (r < 0)
2410   - goto out_free_vcpus;
2411   -
2412   - r = kvm_mmu_create(vcpu);
2413   - if (r < 0)
2414   - goto out_free_vcpus;
2415   -
2416   - kvm_arch_ops->vcpu_load(vcpu);
  2415 + vcpu_load(vcpu);
2417 2416 r = kvm_mmu_setup(vcpu);
2418   - if (r >= 0)
2419   - r = kvm_arch_ops->vcpu_setup(vcpu);
2420 2417 vcpu_put(vcpu);
2421   -
2422 2418 if (r < 0)
2423   - goto out_free_vcpus;
  2419 + goto free_vcpu;
2424 2420  
  2421 + spin_lock(&kvm->lock);
  2422 + if (kvm->vcpus[n]) {
  2423 + r = -EEXIST;
  2424 + spin_unlock(&kvm->lock);
  2425 + goto mmu_unload;
  2426 + }
  2427 + kvm->vcpus[n] = vcpu;
  2428 + spin_unlock(&kvm->lock);
  2429 +
  2430 + /* Now it's all set up, let userspace reach it */
2425 2431 r = create_vcpu_fd(vcpu);
2426 2432 if (r < 0)
2427   - goto out_free_vcpus;
  2433 + goto unlink;
  2434 + return r;
2428 2435  
2429   - spin_lock(&kvm_lock);
2430   - if (n >= kvm->nvcpus)
2431   - kvm->nvcpus = n + 1;
2432   - spin_unlock(&kvm_lock);
  2436 +unlink:
  2437 + spin_lock(&kvm->lock);
  2438 + kvm->vcpus[n] = NULL;
  2439 + spin_unlock(&kvm->lock);
2433 2440  
2434   - vcpu->valid = 1;
  2441 +mmu_unload:
  2442 + vcpu_load(vcpu);
  2443 + kvm_mmu_unload(vcpu);
  2444 + vcpu_put(vcpu);
2435 2445  
  2446 +free_vcpu:
  2447 + kvm_arch_ops->vcpu_free(vcpu);
2436 2448 return r;
2437   -
2438   -out_free_vcpus:
2439   - kvm_free_vcpu(vcpu);
2440   -out_free_run:
2441   - free_page((unsigned long)vcpu->run);
2442   - vcpu->run = NULL;
2443   -out_unlock:
2444   - mutex_unlock(&vcpu->mutex);
2445   -out:
2446   - return r;
2447 2449 }
2448 2450  
2449 2451 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
2450 2452  
... ... @@ -2935,9 +2937,12 @@
2935 2937 int i;
2936 2938  
2937 2939 spin_lock(&kvm_lock);
2938   - list_for_each_entry(vm, &vm_list, vm_list)
  2940 + list_for_each_entry(vm, &vm_list, vm_list) {
  2941 + spin_lock(&vm->lock);
2939 2942 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
2940   - vcpu = &vm->vcpus[i];
  2943 + vcpu = vm->vcpus[i];
  2944 + if (!vcpu)
  2945 + continue;
2941 2946 /*
2942 2947 * If the vcpu is locked, then it is running on some
2943 2948 * other cpu and therefore it is not cached on the
... ... @@ -2954,6 +2959,8 @@
2954 2959 mutex_unlock(&vcpu->mutex);
2955 2960 }
2956 2961 }
  2962 + spin_unlock(&vm->lock);
  2963 + }
2957 2964 spin_unlock(&kvm_lock);
2958 2965 }
2959 2966  
... ... @@ -3078,8 +3085,9 @@
3078 3085 spin_lock(&kvm_lock);
3079 3086 list_for_each_entry(kvm, &vm_list, vm_list)
3080 3087 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
3081   - vcpu = &kvm->vcpus[i];
3082   - total += *(u32 *)((void *)vcpu + offset);
  3088 + vcpu = kvm->vcpus[i];
  3089 + if (vcpu)
  3090 + total += *(u32 *)((void *)vcpu + offset);
3083 3091 }
3084 3092 spin_unlock(&kvm_lock);
3085 3093 return total;
drivers/kvm/kvm_svm.h
... ... @@ -23,7 +23,7 @@
23 23 struct kvm_vcpu;
24 24  
25 25 struct vcpu_svm {
26   - struct kvm_vcpu *vcpu;
  26 + struct kvm_vcpu vcpu;
27 27 struct vmcb *vmcb;
28 28 unsigned long vmcb_pa;
29 29 struct svm_cpu_data *svm_data;
... ... @@ -51,7 +51,7 @@
51 51  
52 52 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
53 53 {
54   - return (struct vcpu_svm*)vcpu->_priv;
  54 + return container_of(vcpu, struct vcpu_svm, vcpu);
55 55 }
56 56  
57 57 unsigned long iopm_base;
... ... @@ -466,11 +466,6 @@
466 466 seg->base = 0;
467 467 }
468 468  
469   -static int svm_vcpu_setup(struct kvm_vcpu *vcpu)
470   -{
471   - return 0;
472   -}
473   -
474 469 static void init_vmcb(struct vmcb *vmcb)
475 470 {
476 471 struct vmcb_control_area *control = &vmcb->control;
477 472  
478 473  
479 474  
480 475  
... ... @@ -576,19 +571,27 @@
576 571 /* rdx = ?? */
577 572 }
578 573  
579   -static int svm_create_vcpu(struct kvm_vcpu *vcpu)
  574 +static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
580 575 {
581 576 struct vcpu_svm *svm;
582 577 struct page *page;
583   - int r;
  578 + int err;
584 579  
585   - r = -ENOMEM;
586 580 svm = kzalloc(sizeof *svm, GFP_KERNEL);
587   - if (!svm)
588   - goto out1;
  581 + if (!svm) {
  582 + err = -ENOMEM;
  583 + goto out;
  584 + }
  585 +
  586 + err = kvm_vcpu_init(&svm->vcpu, kvm, id);
  587 + if (err)
  588 + goto free_svm;
  589 +
589 590 page = alloc_page(GFP_KERNEL);
590   - if (!page)
591   - goto out2;
  591 + if (!page) {
  592 + err = -ENOMEM;
  593 + goto uninit;
  594 + }
592 595  
593 596 svm->vmcb = page_address(page);
594 597 clear_page(svm->vmcb);
595 598  
596 599  
597 600  
598 601  
599 602  
... ... @@ -597,33 +600,29 @@
597 600 memset(svm->db_regs, 0, sizeof(svm->db_regs));
598 601 init_vmcb(svm->vmcb);
599 602  
600   - svm->vcpu = vcpu;
601   - vcpu->_priv = svm;
  603 + fx_init(&svm->vcpu);
  604 + svm->vcpu.fpu_active = 1;
  605 + svm->vcpu.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
  606 + if (svm->vcpu.vcpu_id == 0)
  607 + svm->vcpu.apic_base |= MSR_IA32_APICBASE_BSP;
602 608  
603   - fx_init(vcpu);
604   - vcpu->fpu_active = 1;
605   - vcpu->apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
606   - if (vcpu->vcpu_id == 0)
607   - vcpu->apic_base |= MSR_IA32_APICBASE_BSP;
  609 + return &svm->vcpu;
608 610  
609   - return 0;
610   -
611   -out2:
  611 +uninit:
  612 + kvm_vcpu_uninit(&svm->vcpu);
  613 +free_svm:
612 614 kfree(svm);
613   -out1:
614   - return r;
  615 +out:
  616 + return ERR_PTR(err);
615 617 }
616 618  
617 619 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
618 620 {
619 621 struct vcpu_svm *svm = to_svm(vcpu);
620 622  
621   - if (!svm)
622   - return;
623   - if (svm->vmcb)
624   - __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
  623 + __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
  624 + kvm_vcpu_uninit(vcpu);
625 625 kfree(svm);
626   - vcpu->_priv = NULL;
627 626 }
628 627  
629 628 static void svm_vcpu_load(struct kvm_vcpu *vcpu)
630 629  
631 630  
... ... @@ -1591,34 +1590,33 @@
1591 1590 #endif
1592 1591  
1593 1592 #ifdef CONFIG_X86_64
1594   - "mov %c[rbx](%[vcpu]), %%rbx \n\t"
1595   - "mov %c[rcx](%[vcpu]), %%rcx \n\t"
1596   - "mov %c[rdx](%[vcpu]), %%rdx \n\t"
1597   - "mov %c[rsi](%[vcpu]), %%rsi \n\t"
1598   - "mov %c[rdi](%[vcpu]), %%rdi \n\t"
1599   - "mov %c[rbp](%[vcpu]), %%rbp \n\t"
1600   - "mov %c[r8](%[vcpu]), %%r8 \n\t"
1601   - "mov %c[r9](%[vcpu]), %%r9 \n\t"
1602   - "mov %c[r10](%[vcpu]), %%r10 \n\t"
1603   - "mov %c[r11](%[vcpu]), %%r11 \n\t"
1604   - "mov %c[r12](%[vcpu]), %%r12 \n\t"
1605   - "mov %c[r13](%[vcpu]), %%r13 \n\t"
1606   - "mov %c[r14](%[vcpu]), %%r14 \n\t"
1607   - "mov %c[r15](%[vcpu]), %%r15 \n\t"
  1593 + "mov %c[rbx](%[svm]), %%rbx \n\t"
  1594 + "mov %c[rcx](%[svm]), %%rcx \n\t"
  1595 + "mov %c[rdx](%[svm]), %%rdx \n\t"
  1596 + "mov %c[rsi](%[svm]), %%rsi \n\t"
  1597 + "mov %c[rdi](%[svm]), %%rdi \n\t"
  1598 + "mov %c[rbp](%[svm]), %%rbp \n\t"
  1599 + "mov %c[r8](%[svm]), %%r8 \n\t"
  1600 + "mov %c[r9](%[svm]), %%r9 \n\t"
  1601 + "mov %c[r10](%[svm]), %%r10 \n\t"
  1602 + "mov %c[r11](%[svm]), %%r11 \n\t"
  1603 + "mov %c[r12](%[svm]), %%r12 \n\t"
  1604 + "mov %c[r13](%[svm]), %%r13 \n\t"
  1605 + "mov %c[r14](%[svm]), %%r14 \n\t"
  1606 + "mov %c[r15](%[svm]), %%r15 \n\t"
1608 1607 #else
1609   - "mov %c[rbx](%[vcpu]), %%ebx \n\t"
1610   - "mov %c[rcx](%[vcpu]), %%ecx \n\t"
1611   - "mov %c[rdx](%[vcpu]), %%edx \n\t"
1612   - "mov %c[rsi](%[vcpu]), %%esi \n\t"
1613   - "mov %c[rdi](%[vcpu]), %%edi \n\t"
1614   - "mov %c[rbp](%[vcpu]), %%ebp \n\t"
  1608 + "mov %c[rbx](%[svm]), %%ebx \n\t"
  1609 + "mov %c[rcx](%[svm]), %%ecx \n\t"
  1610 + "mov %c[rdx](%[svm]), %%edx \n\t"
  1611 + "mov %c[rsi](%[svm]), %%esi \n\t"
  1612 + "mov %c[rdi](%[svm]), %%edi \n\t"
  1613 + "mov %c[rbp](%[svm]), %%ebp \n\t"
1615 1614 #endif
1616 1615  
1617 1616 #ifdef CONFIG_X86_64
1618 1617 /* Enter guest mode */
1619 1618 "push %%rax \n\t"
1620   - "mov %c[svm](%[vcpu]), %%rax \n\t"
1621   - "mov %c[vmcb](%%rax), %%rax \n\t"
  1619 + "mov %c[vmcb](%[svm]), %%rax \n\t"
1622 1620 SVM_VMLOAD "\n\t"
1623 1621 SVM_VMRUN "\n\t"
1624 1622 SVM_VMSAVE "\n\t"
... ... @@ -1626,8 +1624,7 @@
1626 1624 #else
1627 1625 /* Enter guest mode */
1628 1626 "push %%eax \n\t"
1629   - "mov %c[svm](%[vcpu]), %%eax \n\t"
1630   - "mov %c[vmcb](%%eax), %%eax \n\t"
  1627 + "mov %c[vmcb](%[svm]), %%eax \n\t"
1631 1628 SVM_VMLOAD "\n\t"
1632 1629 SVM_VMRUN "\n\t"
1633 1630 SVM_VMSAVE "\n\t"
1634 1631  
1635 1632  
1636 1633  
1637 1634  
... ... @@ -1636,55 +1633,54 @@
1636 1633  
1637 1634 /* Save guest registers, load host registers */
1638 1635 #ifdef CONFIG_X86_64
1639   - "mov %%rbx, %c[rbx](%[vcpu]) \n\t"
1640   - "mov %%rcx, %c[rcx](%[vcpu]) \n\t"
1641   - "mov %%rdx, %c[rdx](%[vcpu]) \n\t"
1642   - "mov %%rsi, %c[rsi](%[vcpu]) \n\t"
1643   - "mov %%rdi, %c[rdi](%[vcpu]) \n\t"
1644   - "mov %%rbp, %c[rbp](%[vcpu]) \n\t"
1645   - "mov %%r8, %c[r8](%[vcpu]) \n\t"
1646   - "mov %%r9, %c[r9](%[vcpu]) \n\t"
1647   - "mov %%r10, %c[r10](%[vcpu]) \n\t"
1648   - "mov %%r11, %c[r11](%[vcpu]) \n\t"
1649   - "mov %%r12, %c[r12](%[vcpu]) \n\t"
1650   - "mov %%r13, %c[r13](%[vcpu]) \n\t"
1651   - "mov %%r14, %c[r14](%[vcpu]) \n\t"
1652   - "mov %%r15, %c[r15](%[vcpu]) \n\t"
  1636 + "mov %%rbx, %c[rbx](%[svm]) \n\t"
  1637 + "mov %%rcx, %c[rcx](%[svm]) \n\t"
  1638 + "mov %%rdx, %c[rdx](%[svm]) \n\t"
  1639 + "mov %%rsi, %c[rsi](%[svm]) \n\t"
  1640 + "mov %%rdi, %c[rdi](%[svm]) \n\t"
  1641 + "mov %%rbp, %c[rbp](%[svm]) \n\t"
  1642 + "mov %%r8, %c[r8](%[svm]) \n\t"
  1643 + "mov %%r9, %c[r9](%[svm]) \n\t"
  1644 + "mov %%r10, %c[r10](%[svm]) \n\t"
  1645 + "mov %%r11, %c[r11](%[svm]) \n\t"
  1646 + "mov %%r12, %c[r12](%[svm]) \n\t"
  1647 + "mov %%r13, %c[r13](%[svm]) \n\t"
  1648 + "mov %%r14, %c[r14](%[svm]) \n\t"
  1649 + "mov %%r15, %c[r15](%[svm]) \n\t"
1653 1650  
1654 1651 "pop %%r15; pop %%r14; pop %%r13; pop %%r12;"
1655 1652 "pop %%r11; pop %%r10; pop %%r9; pop %%r8;"
1656 1653 "pop %%rbp; pop %%rdi; pop %%rsi;"
1657 1654 "pop %%rdx; pop %%rcx; pop %%rbx; \n\t"
1658 1655 #else
1659   - "mov %%ebx, %c[rbx](%[vcpu]) \n\t"
1660   - "mov %%ecx, %c[rcx](%[vcpu]) \n\t"
1661   - "mov %%edx, %c[rdx](%[vcpu]) \n\t"
1662   - "mov %%esi, %c[rsi](%[vcpu]) \n\t"
1663   - "mov %%edi, %c[rdi](%[vcpu]) \n\t"
1664   - "mov %%ebp, %c[rbp](%[vcpu]) \n\t"
  1656 + "mov %%ebx, %c[rbx](%[svm]) \n\t"
  1657 + "mov %%ecx, %c[rcx](%[svm]) \n\t"
  1658 + "mov %%edx, %c[rdx](%[svm]) \n\t"
  1659 + "mov %%esi, %c[rsi](%[svm]) \n\t"
  1660 + "mov %%edi, %c[rdi](%[svm]) \n\t"
  1661 + "mov %%ebp, %c[rbp](%[svm]) \n\t"
1665 1662  
1666 1663 "pop %%ebp; pop %%edi; pop %%esi;"
1667 1664 "pop %%edx; pop %%ecx; pop %%ebx; \n\t"
1668 1665 #endif
1669 1666 :
1670   - : [vcpu]"a"(vcpu),
1671   - [svm]"i"(offsetof(struct kvm_vcpu, _priv)),
  1667 + : [svm]"a"(svm),
1672 1668 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
1673   - [rbx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBX])),
1674   - [rcx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RCX])),
1675   - [rdx]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDX])),
1676   - [rsi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RSI])),
1677   - [rdi]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RDI])),
1678   - [rbp]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_RBP]))
  1669 + [rbx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RBX])),
  1670 + [rcx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RCX])),
  1671 + [rdx]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RDX])),
  1672 + [rsi]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RSI])),
  1673 + [rdi]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RDI])),
  1674 + [rbp]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_RBP]))
1679 1675 #ifdef CONFIG_X86_64
1680   - ,[r8 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R8 ])),
1681   - [r9 ]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R9 ])),
1682   - [r10]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R10])),
1683   - [r11]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R11])),
1684   - [r12]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R12])),
1685   - [r13]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R13])),
1686   - [r14]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R14])),
1687   - [r15]"i"(offsetof(struct kvm_vcpu, regs[VCPU_REGS_R15]))
  1676 + ,[r8 ]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R8])),
  1677 + [r9 ]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R9 ])),
  1678 + [r10]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R10])),
  1679 + [r11]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R11])),
  1680 + [r12]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R12])),
  1681 + [r13]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R13])),
  1682 + [r14]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R14])),
  1683 + [r15]"i"(offsetof(struct vcpu_svm,vcpu.regs[VCPU_REGS_R15]))
1688 1684 #endif
1689 1685 : "cc", "memory" );
1690 1686  
... ... @@ -1865,7 +1861,6 @@
1865 1861  
1866 1862 .run = svm_vcpu_run,
1867 1863 .skip_emulated_instruction = skip_emulated_instruction,
1868   - .vcpu_setup = svm_vcpu_setup,
1869 1864 .patch_hypercall = svm_patch_hypercall,
1870 1865 };
1871 1866  
... ... @@ -39,7 +39,7 @@
39 39 };
40 40  
41 41 struct vcpu_vmx {
42   - struct kvm_vcpu *vcpu;
  42 + struct kvm_vcpu vcpu;
43 43 int launched;
44 44 struct kvm_msr_entry *guest_msrs;
45 45 struct kvm_msr_entry *host_msrs;
... ... @@ -60,7 +60,7 @@
60 60  
61 61 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
62 62 {
63   - return (struct vcpu_vmx*)vcpu->_priv;
  63 + return container_of(vcpu, struct vcpu_vmx, vcpu);
64 64 }
65 65  
66 66 static int init_rmode_tss(struct kvm *kvm);
67 67  
68 68  
69 69  
70 70  
71 71  
72 72  
73 73  
74 74  
75 75  
76 76  
77 77  
78 78  
79 79  
... ... @@ -2302,46 +2302,62 @@
2302 2302  
2303 2303 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
2304 2304 {
  2305 + struct vcpu_vmx *vmx = to_vmx(vcpu);
  2306 +
2305 2307 vmx_free_vmcs(vcpu);
  2308 + kfree(vmx->host_msrs);
  2309 + kfree(vmx->guest_msrs);
  2310 + kvm_vcpu_uninit(vcpu);
  2311 + kfree(vmx);
2306 2312 }
2307 2313  
2308   -static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
  2314 +static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
2309 2315 {
2310   - struct vcpu_vmx *vmx;
  2316 + int err;
  2317 + struct vcpu_vmx *vmx = kzalloc(sizeof(*vmx), GFP_KERNEL);
2311 2318  
2312   - vmx = kzalloc(sizeof(*vmx), GFP_KERNEL);
2313 2319 if (!vmx)
2314   - return -ENOMEM;
  2320 + return ERR_PTR(-ENOMEM);
2315 2321  
  2322 + err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
  2323 + if (err)
  2324 + goto free_vcpu;
  2325 +
2316 2326 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2317   - if (!vmx->guest_msrs)
2318   - goto out_free;
  2327 + if (!vmx->guest_msrs) {
  2328 + err = -ENOMEM;
  2329 + goto uninit_vcpu;
  2330 + }
2319 2331  
2320 2332 vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
2321 2333 if (!vmx->host_msrs)
2322   - goto out_free;
  2334 + goto free_guest_msrs;
2323 2335  
2324 2336 vmx->vmcs = alloc_vmcs();
2325 2337 if (!vmx->vmcs)
2326   - goto out_free;
  2338 + goto free_msrs;
2327 2339  
2328 2340 vmcs_clear(vmx->vmcs);
2329 2341  
2330   - vmx->vcpu = vcpu;
2331   - vcpu->_priv = vmx;
  2342 + vmx_vcpu_load(&vmx->vcpu);
  2343 + err = vmx_vcpu_setup(&vmx->vcpu);
  2344 + vmx_vcpu_put(&vmx->vcpu);
  2345 + if (err)
  2346 + goto free_vmcs;
2332 2347  
2333   - return 0;
  2348 + return &vmx->vcpu;
2334 2349  
2335   -out_free:
2336   - if (vmx->host_msrs)
2337   - kfree(vmx->host_msrs);
2338   -
2339   - if (vmx->guest_msrs)
2340   - kfree(vmx->guest_msrs);
2341   -
  2350 +free_vmcs:
  2351 + free_vmcs(vmx->vmcs);
  2352 +free_msrs:
  2353 + kfree(vmx->host_msrs);
  2354 +free_guest_msrs:
  2355 + kfree(vmx->guest_msrs);
  2356 +uninit_vcpu:
  2357 + kvm_vcpu_uninit(&vmx->vcpu);
  2358 +free_vcpu:
2342 2359 kfree(vmx);
2343   -
2344   - return -ENOMEM;
  2360 + return ERR_PTR(err);
2345 2361 }
2346 2362  
2347 2363 static struct kvm_arch_ops vmx_arch_ops = {
... ... @@ -2389,7 +2405,6 @@
2389 2405  
2390 2406 .run = vmx_vcpu_run,
2391 2407 .skip_emulated_instruction = skip_emulated_instruction,
2392   - .vcpu_setup = vmx_vcpu_setup,
2393 2408 .patch_hypercall = vmx_patch_hypercall,
2394 2409 };
2395 2410