Commit 73880c80aa9c8dc353cd0ad26579023213cd5314

Authored by Gleb Natapov
Committed by Avi Kivity
1 parent 1ed0ce000a

KVM: Break dependency between vcpu index in vcpus array and vcpu_id.

Archs are free to use vcpu_id as they see fit. For x86 it is used as
vcpu's apic id. New ioctl is added to configure boot vcpu id that was
assumed to be 0 till now.

Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>

Showing 9 changed files with 55 additions and 33 deletions Side-by-side Diff

arch/ia64/include/asm/kvm_host.h
... ... @@ -465,7 +465,6 @@
465 465 unsigned long metaphysical_rr4;
466 466 unsigned long vmm_init_rr;
467 467  
468   - int online_vcpus;
469 468 int is_sn2;
470 469  
471 470 struct kvm_ioapic *vioapic;
arch/ia64/kvm/Kconfig
... ... @@ -25,6 +25,7 @@
25 25 select PREEMPT_NOTIFIERS
26 26 select ANON_INODES
27 27 select HAVE_KVM_IRQCHIP
  28 + select KVM_APIC_ARCHITECTURE
28 29 ---help---
29 30 Support hosting fully virtualized guest machines using hardware
30 31 virtualization extensions. You will need a fairly recent
arch/ia64/kvm/kvm-ia64.c
... ... @@ -338,7 +338,7 @@
338 338 union ia64_lid lid;
339 339 int i;
340 340  
341   - for (i = 0; i < kvm->arch.online_vcpus; i++) {
  341 + for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) {
342 342 if (kvm->vcpus[i]) {
343 343 lid.val = VCPU_LID(kvm->vcpus[i]);
344 344 if (lid.id == id && lid.eid == eid)
... ... @@ -412,7 +412,7 @@
412 412  
413 413 call_data.ptc_g_data = p->u.ptc_g_data;
414 414  
415   - for (i = 0; i < kvm->arch.online_vcpus; i++) {
  415 + for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) {
416 416 if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
417 417 KVM_MP_STATE_UNINITIALIZED ||
418 418 vcpu == kvm->vcpus[i])
... ... @@ -852,8 +852,6 @@
852 852  
853 853 kvm_init_vm(kvm);
854 854  
855   - kvm->arch.online_vcpus = 0;
856   -
857 855 return kvm;
858 856  
859 857 }
... ... @@ -1355,8 +1353,6 @@
1355 1353 printk(KERN_DEBUG"kvm: vcpu_setup error!!\n");
1356 1354 goto fail;
1357 1355 }
1358   -
1359   - kvm->arch.online_vcpus++;
1360 1356  
1361 1357 return vcpu;
1362 1358 fail:
arch/ia64/kvm/vcpu.c
... ... @@ -831,7 +831,7 @@
831 831 kvm = (struct kvm *)KVM_VM_BASE;
832 832  
833 833 if (kvm_vcpu_is_bsp(vcpu)) {
834   - for (i = 0; i < kvm->arch.online_vcpus; i++) {
  834 + for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) {
835 835 v = (struct kvm_vcpu *)((char *)vcpu +
836 836 sizeof(struct kvm_vcpu_data) * i);
837 837 VMX(v, itc_offset) = itc_offset;
arch/x86/kvm/Kconfig
... ... @@ -27,6 +27,7 @@
27 27 select ANON_INODES
28 28 select HAVE_KVM_IRQCHIP
29 29 select HAVE_KVM_EVENTFD
  30 + select KVM_APIC_ARCHITECTURE
30 31 ---help---
31 32 Support hosting fully virtualized guest machines using hardware
32 33 virtualization extensions. You will need a fairly recent
... ... @@ -430,6 +430,7 @@
430 430 #ifdef __KVM_HAVE_PIT
431 431 #define KVM_CAP_PIT2 33
432 432 #endif
  433 +#define KVM_CAP_SET_BOOT_CPU_ID 34
433 434  
434 435 #ifdef KVM_CAP_IRQ_ROUTING
435 436  
... ... @@ -537,6 +538,7 @@
537 538 #define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq)
538 539 #define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd)
539 540 #define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config)
  541 +#define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78)
540 542  
541 543 /*
542 544 * ioctls for vcpu fds
include/linux/kvm_host.h
... ... @@ -131,8 +131,12 @@
131 131 int nmemslots;
132 132 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
133 133 KVM_PRIVATE_MEM_SLOTS];
  134 +#ifdef CONFIG_KVM_APIC_ARCHITECTURE
  135 + u32 bsp_vcpu_id;
134 136 struct kvm_vcpu *bsp_vcpu;
  137 +#endif
135 138 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
  139 + atomic_t online_vcpus;
136 140 struct list_head vm_list;
137 141 struct mutex lock;
138 142 struct kvm_io_bus mmio_bus;
139 143  
... ... @@ -550,9 +554,11 @@
550 554  
551 555 #endif /* CONFIG_HAVE_KVM_EVENTFD */
552 556  
  557 +#ifdef CONFIG_KVM_APIC_ARCHITECTURE
553 558 static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
554 559 {
555 560 return vcpu->kvm->bsp_vcpu == vcpu;
556 561 }
  562 +#endif
557 563 #endif
... ... @@ -9,4 +9,7 @@
9 9 config HAVE_KVM_EVENTFD
10 10 bool
11 11 select EVENTFD
  12 +
  13 +config KVM_APIC_ARCHITECTURE
  14 + bool
... ... @@ -689,11 +689,6 @@
689 689 }
690 690 #endif
691 691  
692   -static inline int valid_vcpu(int n)
693   -{
694   - return likely(n >= 0 && n < KVM_MAX_VCPUS);
695   -}
696   -
697 692 inline int kvm_is_mmio_pfn(pfn_t pfn)
698 693 {
699 694 if (pfn_valid(pfn)) {
700 695  
701 696  
... ... @@ -1714,24 +1709,18 @@
1714 1709 */
1715 1710 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1716 1711 {
1717   - int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
1718   - if (fd < 0)
1719   - kvm_put_kvm(vcpu->kvm);
1720   - return fd;
  1712 + return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
1721 1713 }
1722 1714  
1723 1715 /*
1724 1716 * Creates some virtual cpus. Good luck creating more than one.
1725 1717 */
1726   -static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
  1718 +static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
1727 1719 {
1728 1720 int r;
1729 1721 struct kvm_vcpu *vcpu;
1730 1722  
1731   - if (!valid_vcpu(n))
1732   - return -EINVAL;
1733   -
1734   - vcpu = kvm_arch_vcpu_create(kvm, n);
  1723 + vcpu = kvm_arch_vcpu_create(kvm, id);
1735 1724 if (IS_ERR(vcpu))
1736 1725 return PTR_ERR(vcpu);
1737 1726  
1738 1727  
1739 1728  
1740 1729  
1741 1730  
... ... @@ -1742,25 +1731,38 @@
1742 1731 return r;
1743 1732  
1744 1733 mutex_lock(&kvm->lock);
1745   - if (kvm->vcpus[n]) {
1746   - r = -EEXIST;
  1734 + if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
  1735 + r = -EINVAL;
1747 1736 goto vcpu_destroy;
1748 1737 }
1749   - kvm->vcpus[n] = vcpu;
1750   - if (n == 0)
1751   - kvm->bsp_vcpu = vcpu;
1752   - mutex_unlock(&kvm->lock);
1753 1738  
  1739 + for (r = 0; r < atomic_read(&kvm->online_vcpus); r++)
  1740 + if (kvm->vcpus[r]->vcpu_id == id) {
  1741 + r = -EEXIST;
  1742 + goto vcpu_destroy;
  1743 + }
  1744 +
  1745 + BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
  1746 +
1754 1747 /* Now it's all set up, let userspace reach it */
1755 1748 kvm_get_kvm(kvm);
1756 1749 r = create_vcpu_fd(vcpu);
1757   - if (r < 0)
1758   - goto unlink;
  1750 + if (r < 0) {
  1751 + kvm_put_kvm(kvm);
  1752 + goto vcpu_destroy;
  1753 + }
  1754 +
  1755 + kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
  1756 + smp_wmb();
  1757 + atomic_inc(&kvm->online_vcpus);
  1758 +
  1759 +#ifdef CONFIG_KVM_APIC_ARCHITECTURE
  1760 + if (kvm->bsp_vcpu_id == id)
  1761 + kvm->bsp_vcpu = vcpu;
  1762 +#endif
  1763 + mutex_unlock(&kvm->lock);
1759 1764 return r;
1760 1765  
1761   -unlink:
1762   - mutex_lock(&kvm->lock);
1763   - kvm->vcpus[n] = NULL;
1764 1766 vcpu_destroy:
1765 1767 mutex_unlock(&kvm->lock);
1766 1768 kvm_arch_vcpu_destroy(vcpu);
... ... @@ -2233,6 +2235,15 @@
2233 2235 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
2234 2236 break;
2235 2237 }
  2238 +#ifdef CONFIG_KVM_APIC_ARCHITECTURE
  2239 + case KVM_SET_BOOT_CPU_ID:
  2240 + r = 0;
  2241 + if (atomic_read(&kvm->online_vcpus) != 0)
  2242 + r = -EBUSY;
  2243 + else
  2244 + kvm->bsp_vcpu_id = arg;
  2245 + break;
  2246 +#endif
2236 2247 default:
2237 2248 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
2238 2249 }
... ... @@ -2299,6 +2310,9 @@
2299 2310 case KVM_CAP_USER_MEMORY:
2300 2311 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
2301 2312 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
  2313 +#ifdef CONFIG_KVM_APIC_ARCHITECTURE
  2314 + case KVM_CAP_SET_BOOT_CPU_ID:
  2315 +#endif
2302 2316 return 1;
2303 2317 #ifdef CONFIG_HAVE_KVM_IRQCHIP
2304 2318 case KVM_CAP_IRQ_ROUTING: