Commit 87bf6e7de1134f48681fd2ce4b7c1ec45458cb6d
Committed by
Avi Kivity
1 parent
77662e0028
KVM: fix the handling of dirty bitmaps to avoid overflows
Int is not long enough to store the size of a dirty bitmap. This patch fixes this problem with the introduction of a wrapper function to calculate the sizes of dirty bitmaps. Note: in mark_page_dirty(), we have to consider the fact that __set_bit() takes the offset as int, not long. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Showing 5 changed files with 24 additions and 13 deletions Side-by-side Diff
arch/ia64/kvm/kvm-ia64.c
... | ... | @@ -1802,7 +1802,8 @@ |
1802 | 1802 | { |
1803 | 1803 | struct kvm_memory_slot *memslot; |
1804 | 1804 | int r, i; |
1805 | - long n, base; | |
1805 | + long base; | |
1806 | + unsigned long n; | |
1806 | 1807 | unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + |
1807 | 1808 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); |
1808 | 1809 | |
... | ... | @@ -1815,7 +1816,7 @@ |
1815 | 1816 | if (!memslot->dirty_bitmap) |
1816 | 1817 | goto out; |
1817 | 1818 | |
1818 | - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | |
1819 | + n = kvm_dirty_bitmap_bytes(memslot); | |
1819 | 1820 | base = memslot->base_gfn / BITS_PER_LONG; |
1820 | 1821 | |
1821 | 1822 | for (i = 0; i < n/sizeof(long); ++i) { |
... | ... | @@ -1831,7 +1832,7 @@ |
1831 | 1832 | struct kvm_dirty_log *log) |
1832 | 1833 | { |
1833 | 1834 | int r; |
1834 | - int n; | |
1835 | + unsigned long n; | |
1835 | 1836 | struct kvm_memory_slot *memslot; |
1836 | 1837 | int is_dirty = 0; |
1837 | 1838 | |
... | ... | @@ -1850,7 +1851,7 @@ |
1850 | 1851 | if (is_dirty) { |
1851 | 1852 | kvm_flush_remote_tlbs(kvm); |
1852 | 1853 | memslot = &kvm->memslots->memslots[log->slot]; |
1853 | - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | |
1854 | + n = kvm_dirty_bitmap_bytes(memslot); | |
1854 | 1855 | memset(memslot->dirty_bitmap, 0, n); |
1855 | 1856 | } |
1856 | 1857 | r = 0; |
arch/powerpc/kvm/book3s.c
... | ... | @@ -1004,7 +1004,8 @@ |
1004 | 1004 | struct kvm_vcpu *vcpu; |
1005 | 1005 | ulong ga, ga_end; |
1006 | 1006 | int is_dirty = 0; |
1007 | - int r, n; | |
1007 | + int r; | |
1008 | + unsigned long n; | |
1008 | 1009 | |
1009 | 1010 | mutex_lock(&kvm->slots_lock); |
1010 | 1011 | |
... | ... | @@ -1022,7 +1023,7 @@ |
1022 | 1023 | kvm_for_each_vcpu(n, vcpu, kvm) |
1023 | 1024 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); |
1024 | 1025 | |
1025 | - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | |
1026 | + n = kvm_dirty_bitmap_bytes(memslot); | |
1026 | 1027 | memset(memslot->dirty_bitmap, 0, n); |
1027 | 1028 | } |
1028 | 1029 |
arch/x86/kvm/x86.c
... | ... | @@ -2612,8 +2612,9 @@ |
2612 | 2612 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
2613 | 2613 | struct kvm_dirty_log *log) |
2614 | 2614 | { |
2615 | - int r, n, i; | |
2615 | + int r, i; | |
2616 | 2616 | struct kvm_memory_slot *memslot; |
2617 | + unsigned long n; | |
2617 | 2618 | unsigned long is_dirty = 0; |
2618 | 2619 | unsigned long *dirty_bitmap = NULL; |
2619 | 2620 | |
... | ... | @@ -2628,7 +2629,7 @@ |
2628 | 2629 | if (!memslot->dirty_bitmap) |
2629 | 2630 | goto out; |
2630 | 2631 | |
2631 | - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | |
2632 | + n = kvm_dirty_bitmap_bytes(memslot); | |
2632 | 2633 | |
2633 | 2634 | r = -ENOMEM; |
2634 | 2635 | dirty_bitmap = vmalloc(n); |
include/linux/kvm_host.h
... | ... | @@ -119,6 +119,11 @@ |
119 | 119 | int user_alloc; |
120 | 120 | }; |
121 | 121 | |
122 | +static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) | |
123 | +{ | |
124 | + return ALIGN(memslot->npages, BITS_PER_LONG) / 8; | |
125 | +} | |
126 | + | |
122 | 127 | struct kvm_kernel_irq_routing_entry { |
123 | 128 | u32 gsi; |
124 | 129 | u32 type; |
virt/kvm/kvm_main.c
... | ... | @@ -648,7 +648,7 @@ |
648 | 648 | |
649 | 649 | /* Allocate page dirty bitmap if needed */ |
650 | 650 | if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { |
651 | - unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; | |
651 | + unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new); | |
652 | 652 | |
653 | 653 | new.dirty_bitmap = vmalloc(dirty_bytes); |
654 | 654 | if (!new.dirty_bitmap) |
... | ... | @@ -768,7 +768,7 @@ |
768 | 768 | { |
769 | 769 | struct kvm_memory_slot *memslot; |
770 | 770 | int r, i; |
771 | - int n; | |
771 | + unsigned long n; | |
772 | 772 | unsigned long any = 0; |
773 | 773 | |
774 | 774 | r = -EINVAL; |
... | ... | @@ -780,7 +780,7 @@ |
780 | 780 | if (!memslot->dirty_bitmap) |
781 | 781 | goto out; |
782 | 782 | |
783 | - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | |
783 | + n = kvm_dirty_bitmap_bytes(memslot); | |
784 | 784 | |
785 | 785 | for (i = 0; !any && i < n/sizeof(long); ++i) |
786 | 786 | any = memslot->dirty_bitmap[i]; |
787 | 787 | |
... | ... | @@ -1186,10 +1186,13 @@ |
1186 | 1186 | memslot = gfn_to_memslot_unaliased(kvm, gfn); |
1187 | 1187 | if (memslot && memslot->dirty_bitmap) { |
1188 | 1188 | unsigned long rel_gfn = gfn - memslot->base_gfn; |
1189 | + unsigned long *p = memslot->dirty_bitmap + | |
1190 | + rel_gfn / BITS_PER_LONG; | |
1191 | + int offset = rel_gfn % BITS_PER_LONG; | |
1189 | 1192 | |
1190 | 1193 | /* avoid RMW */ |
1191 | - if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap)) | |
1192 | - generic___set_le_bit(rel_gfn, memslot->dirty_bitmap); | |
1194 | + if (!generic_test_le_bit(offset, p)) | |
1195 | + generic___set_le_bit(offset, p); | |
1193 | 1196 | } |
1194 | 1197 | } |
1195 | 1198 |