Commit edba23e51578f7cb6781461568489fc1825db4ac

Authored by Gleb Natapov
Committed by Avi Kivity
1 parent fa7bff8f8a

KVM: Return EFAULT from kvm ioctl when guest accesses bad area

Currently if guest access address that belongs to memory slot but is not
backed up by page or page is read only KVM treats it like MMIO access.
Remove that capability. It was never part of the interface and should
not be relied upon.

Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>

Showing 3 changed files with 28 additions and 5 deletions Side-by-side Diff

... ... @@ -2078,7 +2078,9 @@
2078 2078 if (is_hwpoison_pfn(pfn)) {
2079 2079 kvm_send_hwpoison_signal(kvm, gfn);
2080 2080 return 0;
2081   - }
  2081 + } else if (is_fault_pfn(pfn))
  2082 + return -EFAULT;
  2083 +
2082 2084 return 1;
2083 2085 }
2084 2086  
include/linux/kvm_host.h
... ... @@ -269,6 +269,7 @@
269 269 int is_error_page(struct page *page);
270 270 int is_error_pfn(pfn_t pfn);
271 271 int is_hwpoison_pfn(pfn_t pfn);
  272 +int is_fault_pfn(pfn_t pfn);
272 273 int kvm_is_error_hva(unsigned long addr);
273 274 int kvm_set_memory_region(struct kvm *kvm,
274 275 struct kvm_userspace_memory_region *mem,
... ... @@ -96,6 +96,9 @@
96 96 static struct page *hwpoison_page;
97 97 static pfn_t hwpoison_pfn;
98 98  
  99 +static struct page *fault_page;
  100 +static pfn_t fault_pfn;
  101 +
99 102 inline int kvm_is_mmio_pfn(pfn_t pfn)
100 103 {
101 104 if (pfn_valid(pfn)) {
102 105  
... ... @@ -815,13 +818,13 @@
815 818  
816 819 int is_error_page(struct page *page)
817 820 {
818   - return page == bad_page || page == hwpoison_page;
  821 + return page == bad_page || page == hwpoison_page || page == fault_page;
819 822 }
820 823 EXPORT_SYMBOL_GPL(is_error_page);
821 824  
822 825 int is_error_pfn(pfn_t pfn)
823 826 {
824   - return pfn == bad_pfn || pfn == hwpoison_pfn;
  827 + return pfn == bad_pfn || pfn == hwpoison_pfn || pfn == fault_pfn;
825 828 }
826 829 EXPORT_SYMBOL_GPL(is_error_pfn);
827 830  
... ... @@ -831,6 +834,12 @@
831 834 }
832 835 EXPORT_SYMBOL_GPL(is_hwpoison_pfn);
833 836  
  837 +int is_fault_pfn(pfn_t pfn)
  838 +{
  839 + return pfn == fault_pfn;
  840 +}
  841 +EXPORT_SYMBOL_GPL(is_fault_pfn);
  842 +
834 843 static inline unsigned long bad_hva(void)
835 844 {
836 845 return PAGE_OFFSET;
... ... @@ -959,8 +968,8 @@
959 968 if (vma == NULL || addr < vma->vm_start ||
960 969 !(vma->vm_flags & VM_PFNMAP)) {
961 970 up_read(&current->mm->mmap_sem);
962   - get_page(bad_page);
963   - return page_to_pfn(bad_page);
  971 + get_page(fault_page);
  972 + return page_to_pfn(fault_page);
964 973 }
965 974  
966 975 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
... ... @@ -2226,6 +2235,15 @@
2226 2235  
2227 2236 hwpoison_pfn = page_to_pfn(hwpoison_page);
2228 2237  
  2238 + fault_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  2239 +
  2240 + if (fault_page == NULL) {
  2241 + r = -ENOMEM;
  2242 + goto out_free_0;
  2243 + }
  2244 +
  2245 + fault_pfn = page_to_pfn(fault_page);
  2246 +
2229 2247 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
2230 2248 r = -ENOMEM;
2231 2249 goto out_free_0;
... ... @@ -2298,6 +2316,8 @@
2298 2316 out_free_0a:
2299 2317 free_cpumask_var(cpus_hardware_enabled);
2300 2318 out_free_0:
  2319 + if (fault_page)
  2320 + __free_page(fault_page);
2301 2321 if (hwpoison_page)
2302 2322 __free_page(hwpoison_page);
2303 2323 __free_page(bad_page);