Commit 988a2cae6a3c0dea6df59808a935a9a697bfc28c
Committed by
Avi Kivity
1 parent
73880c80aa
Exists in
master
and in
4 other branches
KVM: Use macro to iterate over vcpus.
[christian: remove unused variables on s390] Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Showing 9 changed files with 76 additions and 74 deletions Side-by-side Diff
arch/ia64/kvm/kvm-ia64.c
... | ... | @@ -337,13 +337,12 @@ |
337 | 337 | { |
338 | 338 | union ia64_lid lid; |
339 | 339 | int i; |
340 | + struct kvm_vcpu *vcpu; | |
340 | 341 | |
341 | - for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) { | |
342 | - if (kvm->vcpus[i]) { | |
343 | - lid.val = VCPU_LID(kvm->vcpus[i]); | |
344 | - if (lid.id == id && lid.eid == eid) | |
345 | - return kvm->vcpus[i]; | |
346 | - } | |
342 | + kvm_for_each_vcpu(i, vcpu, kvm) { | |
343 | + lid.val = VCPU_LID(vcpu); | |
344 | + if (lid.id == id && lid.eid == eid) | |
345 | + return vcpu; | |
347 | 346 | } |
348 | 347 | |
349 | 348 | return NULL; |
350 | 349 | |
351 | 350 | |
352 | 351 | |
... | ... | @@ -409,21 +408,21 @@ |
409 | 408 | struct kvm *kvm = vcpu->kvm; |
410 | 409 | struct call_data call_data; |
411 | 410 | int i; |
411 | + struct kvm_vcpu *vcpui; | |
412 | 412 | |
413 | 413 | call_data.ptc_g_data = p->u.ptc_g_data; |
414 | 414 | |
415 | - for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) { | |
416 | - if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state == | |
417 | - KVM_MP_STATE_UNINITIALIZED || | |
418 | - vcpu == kvm->vcpus[i]) | |
415 | + kvm_for_each_vcpu(i, vcpui, kvm) { | |
416 | + if (vcpui->arch.mp_state == KVM_MP_STATE_UNINITIALIZED || | |
417 | + vcpu == vcpui) | |
419 | 418 | continue; |
420 | 419 | |
421 | - if (waitqueue_active(&kvm->vcpus[i]->wq)) | |
422 | - wake_up_interruptible(&kvm->vcpus[i]->wq); | |
420 | + if (waitqueue_active(&vcpui->wq)) | |
421 | + wake_up_interruptible(&vcpui->wq); | |
423 | 422 | |
424 | - if (kvm->vcpus[i]->cpu != -1) { | |
425 | - call_data.vcpu = kvm->vcpus[i]; | |
426 | - smp_call_function_single(kvm->vcpus[i]->cpu, | |
423 | + if (vcpui->cpu != -1) { | |
424 | + call_data.vcpu = vcpui; | |
425 | + smp_call_function_single(vcpui->cpu, | |
427 | 426 | vcpu_global_purge, &call_data, 1); |
428 | 427 | } else |
429 | 428 | printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n"); |
arch/powerpc/kvm/powerpc.c
... | ... | @@ -122,13 +122,17 @@ |
122 | 122 | static void kvmppc_free_vcpus(struct kvm *kvm) |
123 | 123 | { |
124 | 124 | unsigned int i; |
125 | + struct kvm_vcpu *vcpu; | |
125 | 126 | |
126 | - for (i = 0; i < KVM_MAX_VCPUS; ++i) { | |
127 | - if (kvm->vcpus[i]) { | |
128 | - kvm_arch_vcpu_free(kvm->vcpus[i]); | |
129 | - kvm->vcpus[i] = NULL; | |
130 | - } | |
131 | - } | |
127 | + kvm_for_each_vcpu(i, vcpu, kvm) | |
128 | + kvm_arch_vcpu_free(vcpu); | |
129 | + | |
130 | + mutex_lock(&kvm->lock); | |
131 | + for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) | |
132 | + kvm->vcpus[i] = NULL; | |
133 | + | |
134 | + atomic_set(&kvm->online_vcpus, 0); | |
135 | + mutex_unlock(&kvm->lock); | |
132 | 136 | } |
133 | 137 | |
134 | 138 | void kvm_arch_sync_events(struct kvm *kvm) |
arch/s390/kvm/kvm-s390.c
... | ... | @@ -211,13 +211,17 @@ |
211 | 211 | static void kvm_free_vcpus(struct kvm *kvm) |
212 | 212 | { |
213 | 213 | unsigned int i; |
214 | + struct kvm_vcpu *vcpu; | |
214 | 215 | |
215 | - for (i = 0; i < KVM_MAX_VCPUS; ++i) { | |
216 | - if (kvm->vcpus[i]) { | |
217 | - kvm_arch_vcpu_destroy(kvm->vcpus[i]); | |
218 | - kvm->vcpus[i] = NULL; | |
219 | - } | |
220 | - } | |
216 | + kvm_for_each_vcpu(i, vcpu, kvm) | |
217 | + kvm_arch_vcpu_destroy(vcpu); | |
218 | + | |
219 | + mutex_lock(&kvm->lock); | |
220 | + for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) | |
221 | + kvm->vcpus[i] = NULL; | |
222 | + | |
223 | + atomic_set(&kvm->online_vcpus, 0); | |
224 | + mutex_unlock(&kvm->lock); | |
221 | 225 | } |
222 | 226 | |
223 | 227 | void kvm_arch_sync_events(struct kvm *kvm) |
... | ... | @@ -314,8 +318,6 @@ |
314 | 318 | BUG_ON(!kvm->arch.sca); |
315 | 319 | if (!kvm->arch.sca->cpu[id].sda) |
316 | 320 | kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block; |
317 | - else | |
318 | - BUG_ON(!kvm->vcpus[id]); /* vcpu does already exist */ | |
319 | 321 | vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32); |
320 | 322 | vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; |
321 | 323 | |
... | ... | @@ -683,6 +685,7 @@ |
683 | 685 | int user_alloc) |
684 | 686 | { |
685 | 687 | int i; |
688 | + struct kvm_vcpu *vcpu; | |
686 | 689 | |
687 | 690 | /* A few sanity checks. We can have exactly one memory slot which has |
688 | 691 | to start at guest virtual zero and which has to be located at a |
... | ... | @@ -707,14 +710,10 @@ |
707 | 710 | return -EINVAL; |
708 | 711 | |
709 | 712 | /* request update of sie control block for all available vcpus */ |
710 | - for (i = 0; i < KVM_MAX_VCPUS; ++i) { | |
711 | - if (kvm->vcpus[i]) { | |
712 | - if (test_and_set_bit(KVM_REQ_MMU_RELOAD, | |
713 | - &kvm->vcpus[i]->requests)) | |
714 | - continue; | |
715 | - kvm_s390_inject_sigp_stop(kvm->vcpus[i], | |
716 | - ACTION_RELOADVCPU_ON_STOP); | |
717 | - } | |
713 | + kvm_for_each_vcpu(i, vcpu, kvm) { | |
714 | + if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) | |
715 | + continue; | |
716 | + kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP); | |
718 | 717 | } |
719 | 718 | |
720 | 719 | return 0; |
arch/x86/kvm/i8254.c
... | ... | @@ -669,11 +669,8 @@ |
669 | 669 | * VCPU0, and only if its LVT0 is in EXTINT mode. |
670 | 670 | */ |
671 | 671 | if (kvm->arch.vapics_in_nmi_mode > 0) |
672 | - for (i = 0; i < KVM_MAX_VCPUS; ++i) { | |
673 | - vcpu = kvm->vcpus[i]; | |
674 | - if (vcpu) | |
675 | - kvm_apic_nmi_wd_deliver(vcpu); | |
676 | - } | |
672 | + kvm_for_each_vcpu(i, vcpu, kvm) | |
673 | + kvm_apic_nmi_wd_deliver(vcpu); | |
677 | 674 | } |
678 | 675 | |
679 | 676 | void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu) |
arch/x86/kvm/mmu.c
... | ... | @@ -1347,10 +1347,10 @@ |
1347 | 1347 | static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm) |
1348 | 1348 | { |
1349 | 1349 | int i; |
1350 | + struct kvm_vcpu *vcpu; | |
1350 | 1351 | |
1351 | - for (i = 0; i < KVM_MAX_VCPUS; ++i) | |
1352 | - if (kvm->vcpus[i]) | |
1353 | - kvm->vcpus[i]->arch.last_pte_updated = NULL; | |
1352 | + kvm_for_each_vcpu(i, vcpu, kvm) | |
1353 | + vcpu->arch.last_pte_updated = NULL; | |
1354 | 1354 | } |
1355 | 1355 | |
1356 | 1356 | static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) |
arch/x86/kvm/x86.c
... | ... | @@ -2946,10 +2946,7 @@ |
2946 | 2946 | |
2947 | 2947 | spin_lock(&kvm_lock); |
2948 | 2948 | list_for_each_entry(kvm, &vm_list, vm_list) { |
2949 | - for (i = 0; i < KVM_MAX_VCPUS; ++i) { | |
2950 | - vcpu = kvm->vcpus[i]; | |
2951 | - if (!vcpu) | |
2952 | - continue; | |
2949 | + kvm_for_each_vcpu(i, vcpu, kvm) { | |
2953 | 2950 | if (vcpu->cpu != freq->cpu) |
2954 | 2951 | continue; |
2955 | 2952 | if (!kvm_request_guest_time_update(vcpu)) |
2956 | 2953 | |
2957 | 2954 | |
... | ... | @@ -4678,20 +4675,22 @@ |
4678 | 4675 | static void kvm_free_vcpus(struct kvm *kvm) |
4679 | 4676 | { |
4680 | 4677 | unsigned int i; |
4678 | + struct kvm_vcpu *vcpu; | |
4681 | 4679 | |
4682 | 4680 | /* |
4683 | 4681 | * Unpin any mmu pages first. |
4684 | 4682 | */ |
4685 | - for (i = 0; i < KVM_MAX_VCPUS; ++i) | |
4686 | - if (kvm->vcpus[i]) | |
4687 | - kvm_unload_vcpu_mmu(kvm->vcpus[i]); | |
4688 | - for (i = 0; i < KVM_MAX_VCPUS; ++i) { | |
4689 | - if (kvm->vcpus[i]) { | |
4690 | - kvm_arch_vcpu_free(kvm->vcpus[i]); | |
4691 | - kvm->vcpus[i] = NULL; | |
4692 | - } | |
4693 | - } | |
4683 | + kvm_for_each_vcpu(i, vcpu, kvm) | |
4684 | + kvm_unload_vcpu_mmu(vcpu); | |
4685 | + kvm_for_each_vcpu(i, vcpu, kvm) | |
4686 | + kvm_arch_vcpu_free(vcpu); | |
4694 | 4687 | |
4688 | + mutex_lock(&kvm->lock); | |
4689 | + for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) | |
4690 | + kvm->vcpus[i] = NULL; | |
4691 | + | |
4692 | + atomic_set(&kvm->online_vcpus, 0); | |
4693 | + mutex_unlock(&kvm->lock); | |
4695 | 4694 | } |
4696 | 4695 | |
4697 | 4696 | void kvm_arch_sync_events(struct kvm *kvm) |
include/linux/kvm_host.h
... | ... | @@ -179,6 +179,17 @@ |
179 | 179 | #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) |
180 | 180 | #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) |
181 | 181 | |
182 | +static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) | |
183 | +{ | |
184 | + smp_rmb(); | |
185 | + return kvm->vcpus[i]; | |
186 | +} | |
187 | + | |
188 | +#define kvm_for_each_vcpu(idx, vcpup, kvm) \ | |
189 | + for (idx = 0, vcpup = kvm_get_vcpu(kvm, idx); \ | |
190 | + idx < atomic_read(&kvm->online_vcpus) && vcpup; \ | |
191 | + vcpup = kvm_get_vcpu(kvm, ++idx)) | |
192 | + | |
182 | 193 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); |
183 | 194 | void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); |
184 | 195 |
virt/kvm/irq_comm.c
... | ... | @@ -68,10 +68,8 @@ |
68 | 68 | kvm_is_dm_lowest_prio(irq)) |
69 | 69 | printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n"); |
70 | 70 | |
71 | - for (i = 0; i < KVM_MAX_VCPUS; i++) { | |
72 | - vcpu = kvm->vcpus[i]; | |
73 | - | |
74 | - if (!vcpu || !kvm_apic_present(vcpu)) | |
71 | + kvm_for_each_vcpu(i, vcpu, kvm) { | |
72 | + if (!kvm_apic_present(vcpu)) | |
75 | 73 | continue; |
76 | 74 | |
77 | 75 | if (!kvm_apic_match_dest(vcpu, src, irq->shorthand, |
virt/kvm/kvm_main.c
... | ... | @@ -738,10 +738,7 @@ |
738 | 738 | |
739 | 739 | me = get_cpu(); |
740 | 740 | spin_lock(&kvm->requests_lock); |
741 | - for (i = 0; i < KVM_MAX_VCPUS; ++i) { | |
742 | - vcpu = kvm->vcpus[i]; | |
743 | - if (!vcpu) | |
744 | - continue; | |
741 | + kvm_for_each_vcpu(i, vcpu, kvm) { | |
745 | 742 | if (test_and_set_bit(req, &vcpu->requests)) |
746 | 743 | continue; |
747 | 744 | cpu = vcpu->cpu; |
... | ... | @@ -1718,7 +1715,7 @@ |
1718 | 1715 | static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) |
1719 | 1716 | { |
1720 | 1717 | int r; |
1721 | - struct kvm_vcpu *vcpu; | |
1718 | + struct kvm_vcpu *vcpu, *v; | |
1722 | 1719 | |
1723 | 1720 | vcpu = kvm_arch_vcpu_create(kvm, id); |
1724 | 1721 | if (IS_ERR(vcpu)) |
... | ... | @@ -1736,8 +1733,8 @@ |
1736 | 1733 | goto vcpu_destroy; |
1737 | 1734 | } |
1738 | 1735 | |
1739 | - for (r = 0; r < atomic_read(&kvm->online_vcpus); r++) | |
1740 | - if (kvm->vcpus[r]->vcpu_id == id) { | |
1736 | + kvm_for_each_vcpu(r, v, kvm) | |
1737 | + if (v->vcpu_id == id) { | |
1741 | 1738 | r = -EEXIST; |
1742 | 1739 | goto vcpu_destroy; |
1743 | 1740 | } |
... | ... | @@ -2526,11 +2523,9 @@ |
2526 | 2523 | *val = 0; |
2527 | 2524 | spin_lock(&kvm_lock); |
2528 | 2525 | list_for_each_entry(kvm, &vm_list, vm_list) |
2529 | - for (i = 0; i < KVM_MAX_VCPUS; ++i) { | |
2530 | - vcpu = kvm->vcpus[i]; | |
2531 | - if (vcpu) | |
2532 | - *val += *(u32 *)((void *)vcpu + offset); | |
2533 | - } | |
2526 | + kvm_for_each_vcpu(i, vcpu, kvm) | |
2527 | + *val += *(u32 *)((void *)vcpu + offset); | |
2528 | + | |
2534 | 2529 | spin_unlock(&kvm_lock); |
2535 | 2530 | return 0; |
2536 | 2531 | } |