Commit 8f964525a121f2ff2df948dac908dcc65be21b5b

Authored by Andrew Honig
Committed by Gleb Natapov
1 parent 09a6e1f4ad

KVM: Allow cross page reads and writes from cached translations.

This patch adds support for kvm_gfn_to_hva_cache_init functions for
reads and writes that will cross a page.  If the range falls within
the same memslot, then this will be a fast operation.  If the range
is split between two memslots, then the slower kvm_read_guest and
kvm_write_guest are used.

Tested: Test against kvm_clock unit tests.

Signed-off-by: Andrew Honig <ahonig@google.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>

Showing 5 changed files with 46 additions and 19 deletions Side-by-side Diff

arch/x86/kvm/lapic.c
... ... @@ -1857,7 +1857,7 @@
1857 1857 if (!pv_eoi_enabled(vcpu))
1858 1858 return 0;
1859 1859 return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
1860   - addr);
  1860 + addr, sizeof(u8));
1861 1861 }
1862 1862  
1863 1863 void kvm_lapic_init(void)
... ... @@ -1823,7 +1823,8 @@
1823 1823 return 0;
1824 1824 }
1825 1825  
1826   - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
  1826 + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
  1827 + sizeof(u32)))
1827 1828 return 1;
1828 1829  
1829 1830 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
1830 1831  
... ... @@ -1952,12 +1953,9 @@
1952 1953  
1953 1954 gpa_offset = data & ~(PAGE_MASK | 1);
1954 1955  
1955   - /* Check that the address is 32-byte aligned. */
1956   - if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
1957   - break;
1958   -
1959 1956 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
1960   - &vcpu->arch.pv_time, data & ~1ULL))
  1957 + &vcpu->arch.pv_time, data & ~1ULL,
  1958 + sizeof(struct pvclock_vcpu_time_info)))
1961 1959 vcpu->arch.pv_time_enabled = false;
1962 1960 else
1963 1961 vcpu->arch.pv_time_enabled = true;
... ... @@ -1977,7 +1975,8 @@
1977 1975 return 1;
1978 1976  
1979 1977 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
1980   - data & KVM_STEAL_VALID_BITS))
  1978 + data & KVM_STEAL_VALID_BITS,
  1979 + sizeof(struct kvm_steal_time)))
1981 1980 return 1;
1982 1981  
1983 1982 vcpu->arch.st.msr_val = data;
include/linux/kvm_host.h
... ... @@ -518,7 +518,7 @@
518 518 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
519 519 void *data, unsigned long len);
520 520 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
521   - gpa_t gpa);
  521 + gpa_t gpa, unsigned long len);
522 522 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
523 523 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
524 524 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
include/linux/kvm_types.h
... ... @@ -71,6 +71,7 @@
71 71 u64 generation;
72 72 gpa_t gpa;
73 73 unsigned long hva;
  74 + unsigned long len;
74 75 struct kvm_memory_slot *memslot;
75 76 };
76 77  
... ... @@ -1541,21 +1541,38 @@
1541 1541 }
1542 1542  
1543 1543 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1544   - gpa_t gpa)
  1544 + gpa_t gpa, unsigned long len)
1545 1545 {
1546 1546 struct kvm_memslots *slots = kvm_memslots(kvm);
1547 1547 int offset = offset_in_page(gpa);
1548   - gfn_t gfn = gpa >> PAGE_SHIFT;
  1548 + gfn_t start_gfn = gpa >> PAGE_SHIFT;
  1549 + gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
  1550 + gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
  1551 + gfn_t nr_pages_avail;
1549 1552  
1550 1553 ghc->gpa = gpa;
1551 1554 ghc->generation = slots->generation;
1552   - ghc->memslot = gfn_to_memslot(kvm, gfn);
1553   - ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
1554   - if (!kvm_is_error_hva(ghc->hva))
  1555 + ghc->len = len;
  1556 + ghc->memslot = gfn_to_memslot(kvm, start_gfn);
  1557 + ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
  1558 + if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
1555 1559 ghc->hva += offset;
1556   - else
1557   - return -EFAULT;
1558   -
  1560 + } else {
  1561 + /*
  1562 + * If the requested region crosses two memslots, we still
  1563 + * verify that the entire region is valid here.
  1564 + */
  1565 + while (start_gfn <= end_gfn) {
  1566 + ghc->memslot = gfn_to_memslot(kvm, start_gfn);
  1567 + ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
  1568 + &nr_pages_avail);
  1569 + if (kvm_is_error_hva(ghc->hva))
  1570 + return -EFAULT;
  1571 + start_gfn += nr_pages_avail;
  1572 + }
  1573 + /* Use the slow path for cross page reads and writes. */
  1574 + ghc->memslot = NULL;
  1575 + }
1559 1576 return 0;
1560 1577 }
1561 1578 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
1562 1579  
1563 1580  
... ... @@ -1566,9 +1583,14 @@
1566 1583 struct kvm_memslots *slots = kvm_memslots(kvm);
1567 1584 int r;
1568 1585  
  1586 + BUG_ON(len > ghc->len);
  1587 +
1569 1588 if (slots->generation != ghc->generation)
1570   - kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
  1589 + kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
1571 1590  
  1591 + if (unlikely(!ghc->memslot))
  1592 + return kvm_write_guest(kvm, ghc->gpa, data, len);
  1593 +
1572 1594 if (kvm_is_error_hva(ghc->hva))
1573 1595 return -EFAULT;
1574 1596  
1575 1597  
... ... @@ -1587,8 +1609,13 @@
1587 1609 struct kvm_memslots *slots = kvm_memslots(kvm);
1588 1610 int r;
1589 1611  
  1612 + BUG_ON(len > ghc->len);
  1613 +
1590 1614 if (slots->generation != ghc->generation)
1591   - kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
  1615 + kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len);
  1616 +
  1617 + if (unlikely(!ghc->memslot))
  1618 + return kvm_read_guest(kvm, ghc->gpa, data, len);
1592 1619  
1593 1620 if (kvm_is_error_hva(ghc->hva))
1594 1621 return -EFAULT;