Commit fcd95807fb61e67d602610e7ff7129ed769e9fee
1 parent
b146a1c9f7
kvm: Change kvm_iommu_map_pages to map large pages
This patch changes the implementation of of kvm_iommu_map_pages to map the pages with the host page size into the io virtual address space. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Acked-By: Avi Kivity <avi@redhat.com>
Showing 1 changed file with 91 additions and 22 deletions Side-by-side Diff
virt/kvm/iommu.c
... | ... | @@ -32,12 +32,30 @@ |
32 | 32 | static void kvm_iommu_put_pages(struct kvm *kvm, |
33 | 33 | gfn_t base_gfn, unsigned long npages); |
34 | 34 | |
35 | +static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot, | |
36 | + gfn_t gfn, unsigned long size) | |
37 | +{ | |
38 | + gfn_t end_gfn; | |
39 | + pfn_t pfn; | |
40 | + | |
41 | + pfn = gfn_to_pfn_memslot(kvm, slot, gfn); | |
42 | + end_gfn = gfn + (size >> PAGE_SHIFT); | |
43 | + gfn += 1; | |
44 | + | |
45 | + if (is_error_pfn(pfn)) | |
46 | + return pfn; | |
47 | + | |
48 | + while (gfn < end_gfn) | |
49 | + gfn_to_pfn_memslot(kvm, slot, gfn++); | |
50 | + | |
51 | + return pfn; | |
52 | +} | |
53 | + | |
35 | 54 | int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) |
36 | 55 | { |
37 | - gfn_t gfn = slot->base_gfn; | |
38 | - unsigned long npages = slot->npages; | |
56 | + gfn_t gfn, end_gfn; | |
39 | 57 | pfn_t pfn; |
40 | - int i, r = 0; | |
58 | + int r = 0; | |
41 | 59 | struct iommu_domain *domain = kvm->arch.iommu_domain; |
42 | 60 | int flags; |
43 | 61 | |
44 | 62 | |
45 | 63 | |
46 | 64 | |
47 | 65 | |
48 | 66 | |
49 | 67 | |
... | ... | @@ -45,31 +63,62 @@ |
45 | 63 | if (!domain) |
46 | 64 | return 0; |
47 | 65 | |
66 | + gfn = slot->base_gfn; | |
67 | + end_gfn = gfn + slot->npages; | |
68 | + | |
48 | 69 | flags = IOMMU_READ | IOMMU_WRITE; |
49 | 70 | if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY) |
50 | 71 | flags |= IOMMU_CACHE; |
51 | 72 | |
52 | - for (i = 0; i < npages; i++) { | |
53 | - /* check if already mapped */ | |
54 | - if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) | |
73 | + | |
74 | + while (gfn < end_gfn) { | |
75 | + unsigned long page_size; | |
76 | + | |
77 | + /* Check if already mapped */ | |
78 | + if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) { | |
79 | + gfn += 1; | |
55 | 80 | continue; |
81 | + } | |
56 | 82 | |
57 | - pfn = gfn_to_pfn_memslot(kvm, slot, gfn); | |
58 | - r = iommu_map_range(domain, | |
59 | - gfn_to_gpa(gfn), | |
60 | - pfn_to_hpa(pfn), | |
61 | - PAGE_SIZE, flags); | |
83 | + /* Get the page size we could use to map */ | |
84 | + page_size = kvm_host_page_size(kvm, gfn); | |
85 | + | |
86 | + /* Make sure the page_size does not exceed the memslot */ | |
87 | + while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn) | |
88 | + page_size >>= 1; | |
89 | + | |
90 | + /* Make sure gfn is aligned to the page size we want to map */ | |
91 | + while ((gfn << PAGE_SHIFT) & (page_size - 1)) | |
92 | + page_size >>= 1; | |
93 | + | |
94 | + /* | |
95 | + * Pin all pages we are about to map in memory. This is | |
96 | + * important because we unmap and unpin in 4kb steps later. | |
97 | + */ | |
98 | + pfn = kvm_pin_pages(kvm, slot, gfn, page_size); | |
99 | + if (is_error_pfn(pfn)) { | |
100 | + gfn += 1; | |
101 | + continue; | |
102 | + } | |
103 | + | |
104 | + /* Map into IO address space */ | |
105 | + r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn), | |
106 | + get_order(page_size), flags); | |
62 | 107 | if (r) { |
63 | 108 | printk(KERN_ERR "kvm_iommu_map_address:" |
64 | 109 | "iommu failed to map pfn=%lx\n", pfn); |
65 | 110 | goto unmap_pages; |
66 | 111 | } |
67 | - gfn++; | |
112 | + | |
113 | + gfn += page_size >> PAGE_SHIFT; | |
114 | + | |
115 | + | |
68 | 116 | } |
117 | + | |
69 | 118 | return 0; |
70 | 119 | |
71 | 120 | unmap_pages: |
72 | - kvm_iommu_put_pages(kvm, slot->base_gfn, i); | |
121 | + kvm_iommu_put_pages(kvm, slot->base_gfn, gfn); | |
73 | 122 | return r; |
74 | 123 | } |
75 | 124 | |
76 | 125 | |
77 | 126 | |
78 | 127 | |
79 | 128 | |
80 | 129 | |
81 | 130 | |
... | ... | @@ -189,27 +238,47 @@ |
189 | 238 | return r; |
190 | 239 | } |
191 | 240 | |
241 | +static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages) | |
242 | +{ | |
243 | + unsigned long i; | |
244 | + | |
245 | + for (i = 0; i < npages; ++i) | |
246 | + kvm_release_pfn_clean(pfn + i); | |
247 | +} | |
248 | + | |
192 | 249 | static void kvm_iommu_put_pages(struct kvm *kvm, |
193 | 250 | gfn_t base_gfn, unsigned long npages) |
194 | 251 | { |
195 | - gfn_t gfn = base_gfn; | |
252 | + struct iommu_domain *domain; | |
253 | + gfn_t end_gfn, gfn; | |
196 | 254 | pfn_t pfn; |
197 | - struct iommu_domain *domain = kvm->arch.iommu_domain; | |
198 | - unsigned long i; | |
199 | 255 | u64 phys; |
200 | 256 | |
257 | + domain = kvm->arch.iommu_domain; | |
258 | + end_gfn = base_gfn + npages; | |
259 | + gfn = base_gfn; | |
260 | + | |
201 | 261 | /* check if iommu exists and in use */ |
202 | 262 | if (!domain) |
203 | 263 | return; |
204 | 264 | |
205 | - for (i = 0; i < npages; i++) { | |
265 | + while (gfn < end_gfn) { | |
266 | + unsigned long unmap_pages; | |
267 | + int order; | |
268 | + | |
269 | + /* Get physical address */ | |
206 | 270 | phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn)); |
207 | - pfn = phys >> PAGE_SHIFT; | |
208 | - kvm_release_pfn_clean(pfn); | |
209 | - gfn++; | |
210 | - } | |
271 | + pfn = phys >> PAGE_SHIFT; | |
211 | 272 | |
212 | - iommu_unmap_range(domain, gfn_to_gpa(base_gfn), PAGE_SIZE * npages); | |
273 | + /* Unmap address from IO address space */ | |
274 | + order = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE); | |
275 | + unmap_pages = 1ULL << order; | |
276 | + | |
277 | + /* Unpin all pages we just unmapped to not leak any memory */ | |
278 | + kvm_unpin_pages(kvm, pfn, unmap_pages); | |
279 | + | |
280 | + gfn += unmap_pages; | |
281 | + } | |
213 | 282 | } |
214 | 283 | |
215 | 284 | static int kvm_iommu_unmap_memslots(struct kvm *kvm) |