Commit be6ba0f0962a39091c52eb9167ddea201fe80716
Committed by
Avi Kivity
1 parent
be593d6286
Exists in
master
and in
6 other branches
KVM: introduce kvm_for_each_memslot macro
Introduce kvm_for_each_memslot to walk all valid memslot Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Showing 5 changed files with 27 additions and 26 deletions Side-by-side Diff
arch/ia64/kvm/kvm-ia64.c
... | ... | @@ -1366,14 +1366,12 @@ |
1366 | 1366 | { |
1367 | 1367 | struct kvm_memslots *slots; |
1368 | 1368 | struct kvm_memory_slot *memslot; |
1369 | - int i, j; | |
1369 | + int j; | |
1370 | 1370 | unsigned long base_gfn; |
1371 | 1371 | |
1372 | 1372 | slots = kvm_memslots(kvm); |
1373 | - for (i = 0; i < slots->nmemslots; i++) { | |
1374 | - memslot = &slots->memslots[i]; | |
1373 | + kvm_for_each_memslot(memslot, slots) { | |
1375 | 1374 | base_gfn = memslot->base_gfn; |
1376 | - | |
1377 | 1375 | for (j = 0; j < memslot->npages; j++) { |
1378 | 1376 | if (memslot->rmap[j]) |
1379 | 1377 | put_page((struct page *)memslot->rmap[j]); |
arch/x86/kvm/mmu.c
... | ... | @@ -1128,15 +1128,15 @@ |
1128 | 1128 | int (*handler)(struct kvm *kvm, unsigned long *rmapp, |
1129 | 1129 | unsigned long data)) |
1130 | 1130 | { |
1131 | - int i, j; | |
1131 | + int j; | |
1132 | 1132 | int ret; |
1133 | 1133 | int retval = 0; |
1134 | 1134 | struct kvm_memslots *slots; |
1135 | + struct kvm_memory_slot *memslot; | |
1135 | 1136 | |
1136 | 1137 | slots = kvm_memslots(kvm); |
1137 | 1138 | |
1138 | - for (i = 0; i < slots->nmemslots; i++) { | |
1139 | - struct kvm_memory_slot *memslot = &slots->memslots[i]; | |
1139 | + kvm_for_each_memslot(memslot, slots) { | |
1140 | 1140 | unsigned long start = memslot->userspace_addr; |
1141 | 1141 | unsigned long end; |
1142 | 1142 | |
1143 | 1143 | |
1144 | 1144 | |
... | ... | @@ -3985,15 +3985,15 @@ |
3985 | 3985 | */ |
3986 | 3986 | unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm) |
3987 | 3987 | { |
3988 | - int i; | |
3989 | 3988 | unsigned int nr_mmu_pages; |
3990 | 3989 | unsigned int nr_pages = 0; |
3991 | 3990 | struct kvm_memslots *slots; |
3991 | + struct kvm_memory_slot *memslot; | |
3992 | 3992 | |
3993 | 3993 | slots = kvm_memslots(kvm); |
3994 | 3994 | |
3995 | - for (i = 0; i < slots->nmemslots; i++) | |
3996 | - nr_pages += slots->memslots[i].npages; | |
3995 | + kvm_for_each_memslot(memslot, slots) | |
3996 | + nr_pages += memslot->npages; | |
3997 | 3997 | |
3998 | 3998 | nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; |
3999 | 3999 | nr_mmu_pages = max(nr_mmu_pages, |
include/linux/kvm_host.h
... | ... | @@ -308,6 +308,10 @@ |
308 | 308 | (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ |
309 | 309 | idx++) |
310 | 310 | |
311 | +#define kvm_for_each_memslot(memslot, slots) \ | |
312 | + for (memslot = &slots->memslots[0]; \ | |
313 | + memslot < slots->memslots + (slots)->nmemslots; memslot++) | |
314 | + | |
311 | 315 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); |
312 | 316 | void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); |
313 | 317 |
virt/kvm/iommu.c
... | ... | @@ -134,14 +134,15 @@ |
134 | 134 | |
135 | 135 | static int kvm_iommu_map_memslots(struct kvm *kvm) |
136 | 136 | { |
137 | - int i, idx, r = 0; | |
137 | + int idx, r = 0; | |
138 | 138 | struct kvm_memslots *slots; |
139 | + struct kvm_memory_slot *memslot; | |
139 | 140 | |
140 | 141 | idx = srcu_read_lock(&kvm->srcu); |
141 | 142 | slots = kvm_memslots(kvm); |
142 | 143 | |
143 | - for (i = 0; i < slots->nmemslots; i++) { | |
144 | - r = kvm_iommu_map_pages(kvm, &slots->memslots[i]); | |
144 | + kvm_for_each_memslot(memslot, slots) { | |
145 | + r = kvm_iommu_map_pages(kvm, memslot); | |
145 | 146 | if (r) |
146 | 147 | break; |
147 | 148 | } |
148 | 149 | |
149 | 150 | |
... | ... | @@ -311,16 +312,16 @@ |
311 | 312 | |
312 | 313 | static int kvm_iommu_unmap_memslots(struct kvm *kvm) |
313 | 314 | { |
314 | - int i, idx; | |
315 | + int idx; | |
315 | 316 | struct kvm_memslots *slots; |
317 | + struct kvm_memory_slot *memslot; | |
316 | 318 | |
317 | 319 | idx = srcu_read_lock(&kvm->srcu); |
318 | 320 | slots = kvm_memslots(kvm); |
319 | 321 | |
320 | - for (i = 0; i < slots->nmemslots; i++) { | |
321 | - kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn, | |
322 | - slots->memslots[i].npages); | |
323 | - } | |
322 | + kvm_for_each_memslot(memslot, slots) | |
323 | + kvm_iommu_put_pages(kvm, memslot->base_gfn, memslot->npages); | |
324 | + | |
324 | 325 | srcu_read_unlock(&kvm->srcu, idx); |
325 | 326 | |
326 | 327 | return 0; |
virt/kvm/kvm_main.c
... | ... | @@ -547,11 +547,11 @@ |
547 | 547 | |
548 | 548 | void kvm_free_physmem(struct kvm *kvm) |
549 | 549 | { |
550 | - int i; | |
551 | 550 | struct kvm_memslots *slots = kvm->memslots; |
551 | + struct kvm_memory_slot *memslot; | |
552 | 552 | |
553 | - for (i = 0; i < slots->nmemslots; ++i) | |
554 | - kvm_free_physmem_slot(&slots->memslots[i], NULL); | |
553 | + kvm_for_each_memslot(memslot, slots) | |
554 | + kvm_free_physmem_slot(memslot, NULL); | |
555 | 555 | |
556 | 556 | kfree(kvm->memslots); |
557 | 557 | } |
558 | 558 | |
559 | 559 | |
... | ... | @@ -975,15 +975,13 @@ |
975 | 975 | static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots, |
976 | 976 | gfn_t gfn) |
977 | 977 | { |
978 | - int i; | |
978 | + struct kvm_memory_slot *memslot; | |
979 | 979 | |
980 | - for (i = 0; i < slots->nmemslots; ++i) { | |
981 | - struct kvm_memory_slot *memslot = &slots->memslots[i]; | |
982 | - | |
980 | + kvm_for_each_memslot(memslot, slots) | |
983 | 981 | if (gfn >= memslot->base_gfn |
984 | 982 | && gfn < memslot->base_gfn + memslot->npages) |
985 | 983 | return memslot; |
986 | - } | |
984 | + | |
987 | 985 | return NULL; |
988 | 986 | } |
989 | 987 |