Blame view
mm/page_vma_mapped.c
6.79 KB
b24413180 License cleanup: ... |
1 |
// SPDX-License-Identifier: GPL-2.0 |
ace71a19c mm: introduce pag... |
2 3 4 5 6 7 8 |
#include <linux/mm.h> #include <linux/rmap.h> #include <linux/hugetlb.h> #include <linux/swap.h> #include <linux/swapops.h> #include "internal.h" |
ace71a19c mm: introduce pag... |
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 |
static inline bool not_found(struct page_vma_mapped_walk *pvmw) { page_vma_mapped_walk_done(pvmw); return false; } static bool map_pte(struct page_vma_mapped_walk *pvmw) { pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); if (!(pvmw->flags & PVMW_SYNC)) { if (pvmw->flags & PVMW_MIGRATION) { if (!is_swap_pte(*pvmw->pte)) return false; } else { if (!pte_present(*pvmw->pte)) return false; } } pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); spin_lock(pvmw->ptl); return true; } |
3abb4c110 mm, page_vma_mapp... |
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
/** * check_pte - check if @pvmw->page is mapped at the @pvmw->pte * * page_vma_mapped_walk() found a place where @pvmw->page is *potentially* * mapped. check_pte() has to validate this. * * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary * page. * * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration * entry that points to @pvmw->page or any subpage in case of THP. * * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to * @pvmw->page or any subpage in case of THP. * * Otherwise, return false. * */ |
ace71a19c mm: introduce pag... |
49 50 |
static bool check_pte(struct page_vma_mapped_walk *pvmw) { |
3abb4c110 mm, page_vma_mapp... |
51 |
unsigned long pfn; |
ace71a19c mm: introduce pag... |
52 |
if (pvmw->flags & PVMW_MIGRATION) { |
ace71a19c mm: introduce pag... |
53 54 55 56 |
swp_entry_t entry; if (!is_swap_pte(*pvmw->pte)) return false; entry = pte_to_swp_entry(*pvmw->pte); |
a5430dda8 mm/migrate: suppo... |
57 |
|
ace71a19c mm: introduce pag... |
58 59 |
if (!is_migration_entry(entry)) return false; |
a5430dda8 mm/migrate: suppo... |
60 |
|
3abb4c110 mm, page_vma_mapp... |
61 62 63 |
pfn = migration_entry_to_pfn(entry); } else if (is_swap_pte(*pvmw->pte)) { swp_entry_t entry; |
a5430dda8 mm/migrate: suppo... |
64 |
|
3abb4c110 mm, page_vma_mapp... |
65 66 67 |
/* Handle un-addressable ZONE_DEVICE memory */ entry = pte_to_swp_entry(*pvmw->pte); if (!is_device_private_entry(entry)) |
ace71a19c mm: introduce pag... |
68 |
return false; |
3abb4c110 mm, page_vma_mapp... |
69 70 71 |
pfn = device_private_entry_to_pfn(entry); } else { if (!pte_present(*pvmw->pte)) |
ace71a19c mm: introduce pag... |
72 |
return false; |
3abb4c110 mm, page_vma_mapp... |
73 74 |
pfn = pte_pfn(*pvmw->pte); |
ace71a19c mm: introduce pag... |
75 |
} |
3abb4c110 mm, page_vma_mapp... |
76 77 78 79 80 81 |
if (pfn < page_to_pfn(pvmw->page)) return false; /* THP can be referenced by any subpage */ if (pfn - page_to_pfn(pvmw->page) >= hpage_nr_pages(pvmw->page)) return false; |
ace71a19c mm: introduce pag... |
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 |
return true; } /** * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at * @pvmw->address * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags * must be set. pmd, pte and ptl must be NULL. * * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is * adjusted if needed (for PTE-mapped THPs). * * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in * a loop to find all PTEs that map the THP. * * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry * regardless of which page table level the page is mapped at. @pvmw->pmd is * NULL. * * Retruns false if there are no more page table entries for the page in * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped. * * If you need to stop the walk before page_vma_mapped_walk() returned false, * use page_vma_mapped_walk_done(). It will do the housekeeping. */ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) { struct mm_struct *mm = pvmw->vma->vm_mm; struct page *page = pvmw->page; pgd_t *pgd; |
c2febafc6 mm: convert gener... |
114 |
p4d_t *p4d; |
ace71a19c mm: introduce pag... |
115 |
pud_t *pud; |
a7b100953 mm: page_vma_mapp... |
116 |
pmd_t pmde; |
ace71a19c mm: introduce pag... |
117 118 119 120 |
/* The only possible pmd mapping has been handled on last iteration */ if (pvmw->pmd && !pvmw->pte) return not_found(pvmw); |
d75450ff4 mm: fix page_vma_... |
121 |
if (pvmw->pte) |
ace71a19c mm: introduce pag... |
122 |
goto next_pte; |
ace71a19c mm: introduce pag... |
123 124 125 |
if (unlikely(PageHuge(pvmw->page))) { /* when pud is not present, pte will be NULL */ |
7868a2087 mm/hugetlb: add s... |
126 127 |
pvmw->pte = huge_pte_offset(mm, pvmw->address, PAGE_SIZE << compound_order(page)); |
ace71a19c mm: introduce pag... |
128 129 130 131 132 133 134 135 136 137 138 139 140 |
if (!pvmw->pte) return false; pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte); spin_lock(pvmw->ptl); if (!check_pte(pvmw)) return not_found(pvmw); return true; } restart: pgd = pgd_offset(mm, pvmw->address); if (!pgd_present(*pgd)) return false; |
c2febafc6 mm: convert gener... |
141 142 143 144 |
p4d = p4d_offset(pgd, pvmw->address); if (!p4d_present(*p4d)) return false; pud = pud_offset(p4d, pvmw->address); |
ace71a19c mm: introduce pag... |
145 146 147 |
if (!pud_present(*pud)) return false; pvmw->pmd = pmd_offset(pud, pvmw->address); |
a7b100953 mm: page_vma_mapp... |
148 149 150 151 152 153 154 |
/* * Make sure the pmd value isn't cached in a register by the * compiler and used as a stale value after we've observed a * subsequent update. */ pmde = READ_ONCE(*pvmw->pmd); if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) { |
ace71a19c mm: introduce pag... |
155 |
pvmw->ptl = pmd_lock(mm, pvmw->pmd); |
ace71a19c mm: introduce pag... |
156 157 158 159 160 161 |
if (likely(pmd_trans_huge(*pvmw->pmd))) { if (pvmw->flags & PVMW_MIGRATION) return not_found(pvmw); if (pmd_page(*pvmw->pmd) != page) return not_found(pvmw); return true; |
616b83715 mm: thp: enable t... |
162 163 164 165 166 167 168 169 170 171 172 |
} else if (!pmd_present(*pvmw->pmd)) { if (thp_migration_supported()) { if (!(pvmw->flags & PVMW_MIGRATION)) return not_found(pvmw); if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) { swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd); if (migration_entry_to_page(entry) != page) return not_found(pvmw); return true; } |
af0db981f mm: remove unnece... |
173 |
} |
616b83715 mm: thp: enable t... |
174 |
return not_found(pvmw); |
ace71a19c mm: introduce pag... |
175 176 177 178 179 |
} else { /* THP pmd was split under us: handle on pte level */ spin_unlock(pvmw->ptl); pvmw->ptl = NULL; } |
a7b100953 mm: page_vma_mapp... |
180 181 |
} else if (!pmd_present(pmde)) { return false; |
ace71a19c mm: introduce pag... |
182 183 184 185 186 187 |
} if (!map_pte(pvmw)) goto next_pte; while (1) { if (check_pte(pvmw)) return true; |
d75450ff4 mm: fix page_vma_... |
188 189 190 191 192 |
next_pte: /* Seek to next pte only makes sense for THP */ if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page)) return not_found(pvmw); do { |
ace71a19c mm: introduce pag... |
193 |
pvmw->address += PAGE_SIZE; |
d75450ff4 mm: fix page_vma_... |
194 195 |
if (pvmw->address >= pvmw->vma->vm_end || pvmw->address >= |
ace71a19c mm: introduce pag... |
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 |
__vma_address(pvmw->page, pvmw->vma) + hpage_nr_pages(pvmw->page) * PAGE_SIZE) return not_found(pvmw); /* Did we cross page table boundary? */ if (pvmw->address % PMD_SIZE == 0) { pte_unmap(pvmw->pte); if (pvmw->ptl) { spin_unlock(pvmw->ptl); pvmw->ptl = NULL; } goto restart; } else { pvmw->pte++; } } while (pte_none(*pvmw->pte)); if (!pvmw->ptl) { pvmw->ptl = pte_lockptr(mm, pvmw->pmd); spin_lock(pvmw->ptl); } } } |
6a328a626 mm: convert page_... |
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 |
/** * page_mapped_in_vma - check whether a page is really mapped in a VMA * @page: the page to test * @vma: the VMA to test * * Returns 1 if the page is mapped into the page tables of the VMA, 0 * if the page is not mapped into the page tables of this VMA. Only * valid for normal file or anonymous VMAs. */ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) { struct page_vma_mapped_walk pvmw = { .page = page, .vma = vma, .flags = PVMW_SYNC, }; unsigned long start, end; start = __vma_address(page, vma); end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1); if (unlikely(end < vma->vm_start || start >= vma->vm_end)) return 0; pvmw.address = max(start, vma->vm_start); if (!page_vma_mapped_walk(&pvmw)) return 0; page_vma_mapped_walk_done(&pvmw); return 1; } |