Commit 2ab640379a0ab4cef746ced1d7e04a0941774bcb

Authored by venkatesh.pallipadi@intel.com
Committed by H. Peter Anvin
1 parent e121e41844

x86: PAT: hooks in generic vm code to help archs to track pfnmap regions - v3

Impact: Introduces new hooks, which are currently null.

Introduce generic hooks in remap_pfn_range and vm_insert_pfn and
corresponding copy and free routines with reserve and free tracking.

Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>

Showing 2 changed files with 81 additions and 1 deletions Side-by-side Diff

... ... @@ -155,6 +155,12 @@
155 155 return (vma->vm_flags & VM_PFNMAP);
156 156 }
157 157  
  158 +extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
  159 + unsigned long pfn, unsigned long size);
  160 +extern int track_pfn_vma_copy(struct vm_area_struct *vma);
  161 +extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
  162 + unsigned long size);
  163 +
158 164 /*
159 165 * vm_fault is filled by the the pagefault handler and passed to the vma's
160 166 * ->fault function. The vma's ->fault is responsible for returning a bitmask
... ... @@ -99,6 +99,50 @@
99 99 2;
100 100 #endif
101 101  
  102 +#ifndef track_pfn_vma_new
  103 +/*
  104 + * Interface that can be used by architecture code to keep track of
  105 + * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
  106 + *
  107 + * track_pfn_vma_new is called when a _new_ pfn mapping is being established
  108 + * for physical range indicated by pfn and size.
  109 + */
  110 +int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
  111 + unsigned long pfn, unsigned long size)
  112 +{
  113 + return 0;
  114 +}
  115 +#endif
  116 +
  117 +#ifndef track_pfn_vma_copy
  118 +/*
  119 + * Interface that can be used by architecture code to keep track of
  120 + * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
  121 + *
  122 + * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
  123 + * copied through copy_page_range().
  124 + */
  125 +int track_pfn_vma_copy(struct vm_area_struct *vma)
  126 +{
  127 + return 0;
  128 +}
  129 +#endif
  130 +
  131 +#ifndef untrack_pfn_vma
  132 +/*
  133 + * Interface that can be used by architecture code to keep track of
  134 + * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
  135 + *
  136 + * untrack_pfn_vma is called while unmapping a pfnmap for a region.
  137 + * untrack can be called for a specific region indicated by pfn and size or
  138 + * can be for the entire vma (in which case size can be zero).
  139 + */
  140 +void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
  141 + unsigned long size)
  142 +{
  143 +}
  144 +#endif
  145 +
102 146 static int __init disable_randmaps(char *s)
103 147 {
104 148 randomize_va_space = 0;
... ... @@ -669,6 +713,16 @@
669 713 if (is_vm_hugetlb_page(vma))
670 714 return copy_hugetlb_page_range(dst_mm, src_mm, vma);
671 715  
  716 + if (is_pfn_mapping(vma)) {
  717 + /*
  718 + * We do not free on error cases below as remove_vma
  719 + * gets called on error from higher level routine
  720 + */
  721 + ret = track_pfn_vma_copy(vma);
  722 + if (ret)
  723 + return ret;
  724 + }
  725 +
672 726 /*
673 727 * We need to invalidate the secondary MMU mappings only when
674 728 * there could be a permission downgrade on the ptes of the
... ... @@ -915,6 +969,9 @@
915 969 if (vma->vm_flags & VM_ACCOUNT)
916 970 *nr_accounted += (end - start) >> PAGE_SHIFT;
917 971  
  972 + if (is_pfn_mapping(vma))
  973 + untrack_pfn_vma(vma, 0, 0);
  974 +
918 975 while (start != end) {
919 976 if (!tlb_start_valid) {
920 977 tlb_start = start;
... ... @@ -1473,6 +1530,7 @@
1473 1530 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1474 1531 unsigned long pfn)
1475 1532 {
  1533 + int ret;
1476 1534 /*
1477 1535 * Technically, architectures with pte_special can avoid all these
1478 1536 * restrictions (same for remap_pfn_range). However we would like
... ... @@ -1487,7 +1545,15 @@
1487 1545  
1488 1546 if (addr < vma->vm_start || addr >= vma->vm_end)
1489 1547 return -EFAULT;
1490   - return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
  1548 + if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE))
  1549 + return -EINVAL;
  1550 +
  1551 + ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot);
  1552 +
  1553 + if (ret)
  1554 + untrack_pfn_vma(vma, pfn, PAGE_SIZE);
  1555 +
  1556 + return ret;
1491 1557 }
1492 1558 EXPORT_SYMBOL(vm_insert_pfn);
1493 1559  
... ... @@ -1625,6 +1691,10 @@
1625 1691  
1626 1692 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1627 1693  
  1694 + err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size));
  1695 + if (err)
  1696 + return -EINVAL;
  1697 +
1628 1698 BUG_ON(addr >= end);
1629 1699 pfn -= addr >> PAGE_SHIFT;
1630 1700 pgd = pgd_offset(mm, addr);
... ... @@ -1636,6 +1706,10 @@
1636 1706 if (err)
1637 1707 break;
1638 1708 } while (pgd++, addr = next, addr != end);
  1709 +
  1710 + if (err)
  1711 + untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size));
  1712 +
1639 1713 return err;
1640 1714 }
1641 1715 EXPORT_SYMBOL(remap_pfn_range);