Commit e121e418441525b5636321fe03d16f0193ad218e

Authored by venkatesh.pallipadi@intel.com
Committed by H. Peter Anvin
1 parent 3c8bb73ace

x86: PAT: add follow_pfnmp_pte routine to help tracking pfnmap pages - v3

Impact: New currently unused interface.

Add a generic interface to follow pfn in a pfnmap vma range. This is used by
one of the subsequent x86 PAT related patch to keep track of memory types
for vma regions across vma copy and free.

Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>

Showing 2 changed files with 46 additions and 0 deletions Side-by-side Diff

... ... @@ -1223,6 +1223,9 @@
1223 1223 #define FOLL_GET 0x04 /* do get_page on page */
1224 1224 #define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */
1225 1225  
  1226 +int follow_pfnmap_pte(struct vm_area_struct *vma,
  1227 + unsigned long address, pte_t *ret_ptep);
  1228 +
1226 1229 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
1227 1230 void *data);
1228 1231 extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
... ... @@ -1111,6 +1111,49 @@
1111 1111 return page;
1112 1112 }
1113 1113  
  1114 +int follow_pfnmap_pte(struct vm_area_struct *vma, unsigned long address,
  1115 + pte_t *ret_ptep)
  1116 +{
  1117 + pgd_t *pgd;
  1118 + pud_t *pud;
  1119 + pmd_t *pmd;
  1120 + pte_t *ptep, pte;
  1121 + spinlock_t *ptl;
  1122 + struct page *page;
  1123 + struct mm_struct *mm = vma->vm_mm;
  1124 +
  1125 + if (!is_pfn_mapping(vma))
  1126 + goto err;
  1127 +
  1128 + page = NULL;
  1129 + pgd = pgd_offset(mm, address);
  1130 + if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
  1131 + goto err;
  1132 +
  1133 + pud = pud_offset(pgd, address);
  1134 + if (pud_none(*pud) || unlikely(pud_bad(*pud)))
  1135 + goto err;
  1136 +
  1137 + pmd = pmd_offset(pud, address);
  1138 + if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
  1139 + goto err;
  1140 +
  1141 + ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
  1142 +
  1143 + pte = *ptep;
  1144 + if (!pte_present(pte))
  1145 + goto err_unlock;
  1146 +
  1147 + *ret_ptep = pte;
  1148 + pte_unmap_unlock(ptep, ptl);
  1149 + return 0;
  1150 +
  1151 +err_unlock:
  1152 + pte_unmap_unlock(ptep, ptl);
  1153 +err:
  1154 + return -EINVAL;
  1155 +}
  1156 +
1114 1157 /* Can we do the FOLL_ANON optimization? */
1115 1158 static inline int use_zero_page(struct vm_area_struct *vma)
1116 1159 {