Blame view

mm/page_vma_mapped.c 7.56 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
2
3
4
5
6
7
8
  #include <linux/mm.h>
  #include <linux/rmap.h>
  #include <linux/hugetlb.h>
  #include <linux/swap.h>
  #include <linux/swapops.h>
  
  #include "internal.h"
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
9
10
11
12
13
14
15
16
17
18
19
20
21
22
  static inline bool not_found(struct page_vma_mapped_walk *pvmw)
  {
  	page_vma_mapped_walk_done(pvmw);
  	return false;
  }
  
  static bool map_pte(struct page_vma_mapped_walk *pvmw)
  {
  	pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
  	if (!(pvmw->flags & PVMW_SYNC)) {
  		if (pvmw->flags & PVMW_MIGRATION) {
  			if (!is_swap_pte(*pvmw->pte))
  				return false;
  		} else {
9d3cc761c   Ralph Campbell   mm/rmap: map_pte(...
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
  			/*
  			 * We get here when we are trying to unmap a private
  			 * device page from the process address space. Such
  			 * page is not CPU accessible and thus is mapped as
  			 * a special swap entry, nonetheless it still does
  			 * count as a valid regular mapping for the page (and
  			 * is accounted as such in page maps count).
  			 *
  			 * So handle this special case as if it was a normal
  			 * page mapping ie lock CPU page table and returns
  			 * true.
  			 *
  			 * For more details on device private memory see HMM
  			 * (include/linux/hmm.h or mm/hmm.c).
  			 */
  			if (is_swap_pte(*pvmw->pte)) {
  				swp_entry_t entry;
  
  				/* Handle un-addressable ZONE_DEVICE memory */
  				entry = pte_to_swp_entry(*pvmw->pte);
  				if (!is_device_private_entry(entry))
  					return false;
  			} else if (!pte_present(*pvmw->pte))
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
46
47
48
49
50
51
52
  				return false;
  		}
  	}
  	pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
  	spin_lock(pvmw->ptl);
  	return true;
  }
3abb4c110   Kirill A. Shutemov   mm, page_vma_mapp...
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
  /**
   * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
   *
   * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
   * mapped. check_pte() has to validate this.
   *
   * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
   * page.
   *
   * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
   * entry that points to @pvmw->page or any subpage in case of THP.
   *
   * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
   * @pvmw->page or any subpage in case of THP.
   *
   * Otherwise, return false.
   *
   */
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
71
72
  static bool check_pte(struct page_vma_mapped_walk *pvmw)
  {
3abb4c110   Kirill A. Shutemov   mm, page_vma_mapp...
73
  	unsigned long pfn;
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
74
  	if (pvmw->flags & PVMW_MIGRATION) {
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
75
76
77
78
  		swp_entry_t entry;
  		if (!is_swap_pte(*pvmw->pte))
  			return false;
  		entry = pte_to_swp_entry(*pvmw->pte);
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
79

ace71a19c   Kirill A. Shutemov   mm: introduce pag...
80
81
  		if (!is_migration_entry(entry))
  			return false;
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
82

3abb4c110   Kirill A. Shutemov   mm, page_vma_mapp...
83
84
85
  		pfn = migration_entry_to_pfn(entry);
  	} else if (is_swap_pte(*pvmw->pte)) {
  		swp_entry_t entry;
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
86

3abb4c110   Kirill A. Shutemov   mm, page_vma_mapp...
87
88
89
  		/* Handle un-addressable ZONE_DEVICE memory */
  		entry = pte_to_swp_entry(*pvmw->pte);
  		if (!is_device_private_entry(entry))
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
90
  			return false;
3abb4c110   Kirill A. Shutemov   mm, page_vma_mapp...
91
92
93
  		pfn = device_private_entry_to_pfn(entry);
  	} else {
  		if (!pte_present(*pvmw->pte))
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
94
  			return false;
3abb4c110   Kirill A. Shutemov   mm, page_vma_mapp...
95
96
  
  		pfn = pte_pfn(*pvmw->pte);
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
97
  	}
3abb4c110   Kirill A. Shutemov   mm, page_vma_mapp...
98
99
100
101
102
103
  	if (pfn < page_to_pfn(pvmw->page))
  		return false;
  
  	/* THP can be referenced by any subpage */
  	if (pfn - page_to_pfn(pvmw->page) >= hpage_nr_pages(pvmw->page))
  		return false;
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
  	return true;
  }
  
  /**
   * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
   * @pvmw->address
   * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
   * must be set. pmd, pte and ptl must be NULL.
   *
   * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
   * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
   * adjusted if needed (for PTE-mapped THPs).
   *
   * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
   * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
   * a loop to find all PTEs that map the THP.
   *
   * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
   * regardless of which page table level the page is mapped at. @pvmw->pmd is
   * NULL.
   *
   * Retruns false if there are no more page table entries for the page in
   * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
   *
   * If you need to stop the walk before page_vma_mapped_walk() returned false,
   * use page_vma_mapped_walk_done(). It will do the housekeeping.
   */
  bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
  {
  	struct mm_struct *mm = pvmw->vma->vm_mm;
  	struct page *page = pvmw->page;
  	pgd_t *pgd;
c2febafc6   Kirill A. Shutemov   mm: convert gener...
136
  	p4d_t *p4d;
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
137
  	pud_t *pud;
a7b100953   Will Deacon   mm: page_vma_mapp...
138
  	pmd_t pmde;
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
139
140
141
142
  
  	/* The only possible pmd mapping has been handled on last iteration */
  	if (pvmw->pmd && !pvmw->pte)
  		return not_found(pvmw);
d75450ff4   Hugh Dickins   mm: fix page_vma_...
143
  	if (pvmw->pte)
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
144
  		goto next_pte;
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
145
146
147
  
  	if (unlikely(PageHuge(pvmw->page))) {
  		/* when pud is not present, pte will be NULL */
7868a2087   Punit Agrawal   mm/hugetlb: add s...
148
149
  		pvmw->pte = huge_pte_offset(mm, pvmw->address,
  					    PAGE_SIZE << compound_order(page));
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
150
151
152
153
154
155
156
157
158
159
160
161
162
  		if (!pvmw->pte)
  			return false;
  
  		pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
  		spin_lock(pvmw->ptl);
  		if (!check_pte(pvmw))
  			return not_found(pvmw);
  		return true;
  	}
  restart:
  	pgd = pgd_offset(mm, pvmw->address);
  	if (!pgd_present(*pgd))
  		return false;
c2febafc6   Kirill A. Shutemov   mm: convert gener...
163
164
165
166
  	p4d = p4d_offset(pgd, pvmw->address);
  	if (!p4d_present(*p4d))
  		return false;
  	pud = pud_offset(p4d, pvmw->address);
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
167
168
169
  	if (!pud_present(*pud))
  		return false;
  	pvmw->pmd = pmd_offset(pud, pvmw->address);
a7b100953   Will Deacon   mm: page_vma_mapp...
170
171
172
173
174
175
176
  	/*
  	 * Make sure the pmd value isn't cached in a register by the
  	 * compiler and used as a stale value after we've observed a
  	 * subsequent update.
  	 */
  	pmde = READ_ONCE(*pvmw->pmd);
  	if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
177
  		pvmw->ptl = pmd_lock(mm, pvmw->pmd);
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
178
179
180
181
182
183
  		if (likely(pmd_trans_huge(*pvmw->pmd))) {
  			if (pvmw->flags & PVMW_MIGRATION)
  				return not_found(pvmw);
  			if (pmd_page(*pvmw->pmd) != page)
  				return not_found(pvmw);
  			return true;
616b83715   Zi Yan   mm: thp: enable t...
184
185
186
187
188
189
190
191
192
193
194
  		} else if (!pmd_present(*pvmw->pmd)) {
  			if (thp_migration_supported()) {
  				if (!(pvmw->flags & PVMW_MIGRATION))
  					return not_found(pvmw);
  				if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
  					swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
  
  					if (migration_entry_to_page(entry) != page)
  						return not_found(pvmw);
  					return true;
  				}
af0db981f   Zi Yan   mm: remove unnece...
195
  			}
616b83715   Zi Yan   mm: thp: enable t...
196
  			return not_found(pvmw);
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
197
198
199
200
201
  		} else {
  			/* THP pmd was split under us: handle on pte level */
  			spin_unlock(pvmw->ptl);
  			pvmw->ptl = NULL;
  		}
a7b100953   Will Deacon   mm: page_vma_mapp...
202
203
  	} else if (!pmd_present(pmde)) {
  		return false;
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
204
205
206
207
208
209
  	}
  	if (!map_pte(pvmw))
  		goto next_pte;
  	while (1) {
  		if (check_pte(pvmw))
  			return true;
d75450ff4   Hugh Dickins   mm: fix page_vma_...
210
211
212
213
214
  next_pte:
  		/* Seek to next pte only makes sense for THP */
  		if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
  			return not_found(pvmw);
  		do {
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
215
  			pvmw->address += PAGE_SIZE;
d75450ff4   Hugh Dickins   mm: fix page_vma_...
216
217
  			if (pvmw->address >= pvmw->vma->vm_end ||
  			    pvmw->address >=
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
  					__vma_address(pvmw->page, pvmw->vma) +
  					hpage_nr_pages(pvmw->page) * PAGE_SIZE)
  				return not_found(pvmw);
  			/* Did we cross page table boundary? */
  			if (pvmw->address % PMD_SIZE == 0) {
  				pte_unmap(pvmw->pte);
  				if (pvmw->ptl) {
  					spin_unlock(pvmw->ptl);
  					pvmw->ptl = NULL;
  				}
  				goto restart;
  			} else {
  				pvmw->pte++;
  			}
  		} while (pte_none(*pvmw->pte));
  
  		if (!pvmw->ptl) {
  			pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
  			spin_lock(pvmw->ptl);
  		}
  	}
  }
6a328a626   Kirill A. Shutemov   mm: convert page_...
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
  
  /**
   * page_mapped_in_vma - check whether a page is really mapped in a VMA
   * @page: the page to test
   * @vma: the VMA to test
   *
   * Returns 1 if the page is mapped into the page tables of the VMA, 0
   * if the page is not mapped into the page tables of this VMA.  Only
   * valid for normal file or anonymous VMAs.
   */
  int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
  {
  	struct page_vma_mapped_walk pvmw = {
  		.page = page,
  		.vma = vma,
  		.flags = PVMW_SYNC,
  	};
  	unsigned long start, end;
  
  	start = __vma_address(page, vma);
  	end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
  
  	if (unlikely(end < vma->vm_start || start >= vma->vm_end))
  		return 0;
  	pvmw.address = max(start, vma->vm_start);
  	if (!page_vma_mapped_walk(&pvmw))
  		return 0;
  	page_vma_mapped_walk_done(&pvmw);
  	return 1;
  }