Blame view

mm/page_vma_mapped.c 7.69 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
2
3
4
5
6
7
8
  #include <linux/mm.h>
  #include <linux/rmap.h>
  #include <linux/hugetlb.h>
  #include <linux/swap.h>
  #include <linux/swapops.h>
  
  #include "internal.h"
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
9
10
11
12
13
14
15
16
17
18
19
20
21
22
  static inline bool not_found(struct page_vma_mapped_walk *pvmw)
  {
  	page_vma_mapped_walk_done(pvmw);
  	return false;
  }
  
  static bool map_pte(struct page_vma_mapped_walk *pvmw)
  {
  	pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
  	if (!(pvmw->flags & PVMW_SYNC)) {
  		if (pvmw->flags & PVMW_MIGRATION) {
  			if (!is_swap_pte(*pvmw->pte))
  				return false;
  		} else {
aab8d0520   Ralph Campbell   mm/rmap: map_pte(...
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
  			/*
  			 * We get here when we are trying to unmap a private
  			 * device page from the process address space. Such
  			 * page is not CPU accessible and thus is mapped as
  			 * a special swap entry, nonetheless it still does
  			 * count as a valid regular mapping for the page (and
  			 * is accounted as such in page maps count).
  			 *
  			 * So handle this special case as if it was a normal
  			 * page mapping ie lock CPU page table and returns
  			 * true.
  			 *
  			 * For more details on device private memory see HMM
  			 * (include/linux/hmm.h or mm/hmm.c).
  			 */
  			if (is_swap_pte(*pvmw->pte)) {
  				swp_entry_t entry;
  
  				/* Handle un-addressable ZONE_DEVICE memory */
  				entry = pte_to_swp_entry(*pvmw->pte);
  				if (!is_device_private_entry(entry))
  					return false;
  			} else if (!pte_present(*pvmw->pte))
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
46
47
48
49
50
51
52
  				return false;
  		}
  	}
  	pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
  	spin_lock(pvmw->ptl);
  	return true;
  }
5b8d6e37b   Li Xinhai   mm/page_vma_mappe...
53
  static inline bool pfn_is_match(struct page *page, unsigned long pfn)
7222708e8   Kirill A. Shutemov   mm, page_vma_mapp...
54
  {
5b8d6e37b   Li Xinhai   mm/page_vma_mappe...
55
56
57
58
59
  	unsigned long page_pfn = page_to_pfn(page);
  
  	/* normal page and hugetlbfs page */
  	if (!PageTransCompound(page) || PageHuge(page))
  		return page_pfn == pfn;
7222708e8   Kirill A. Shutemov   mm, page_vma_mapp...
60
61
  
  	/* THP can be referenced by any subpage */
6c357848b   Matthew Wilcox (Oracle)   mm: replace hpage...
62
  	return pfn >= page_pfn && pfn - page_pfn < thp_nr_pages(page);
7222708e8   Kirill A. Shutemov   mm, page_vma_mapp...
63
  }
0d665e7b1   Kirill A. Shutemov   mm, page_vma_mapp...
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
  /**
   * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
   *
   * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
   * mapped. check_pte() has to validate this.
   *
   * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
   * page.
   *
   * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
   * entry that points to @pvmw->page or any subpage in case of THP.
   *
   * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
   * @pvmw->page or any subpage in case of THP.
   *
   * Otherwise, return false.
   *
   */
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
82
83
  static bool check_pte(struct page_vma_mapped_walk *pvmw)
  {
0d665e7b1   Kirill A. Shutemov   mm, page_vma_mapp...
84
  	unsigned long pfn;
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
85
  	if (pvmw->flags & PVMW_MIGRATION) {
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
86
87
88
89
  		swp_entry_t entry;
  		if (!is_swap_pte(*pvmw->pte))
  			return false;
  		entry = pte_to_swp_entry(*pvmw->pte);
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
90

ace71a19c   Kirill A. Shutemov   mm: introduce pag...
91
92
  		if (!is_migration_entry(entry))
  			return false;
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
93

0d665e7b1   Kirill A. Shutemov   mm, page_vma_mapp...
94
95
96
  		pfn = migration_entry_to_pfn(entry);
  	} else if (is_swap_pte(*pvmw->pte)) {
  		swp_entry_t entry;
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
97

0d665e7b1   Kirill A. Shutemov   mm, page_vma_mapp...
98
99
100
  		/* Handle un-addressable ZONE_DEVICE memory */
  		entry = pte_to_swp_entry(*pvmw->pte);
  		if (!is_device_private_entry(entry))
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
101
  			return false;
0d665e7b1   Kirill A. Shutemov   mm, page_vma_mapp...
102
103
104
  		pfn = device_private_entry_to_pfn(entry);
  	} else {
  		if (!pte_present(*pvmw->pte))
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
105
  			return false;
0d665e7b1   Kirill A. Shutemov   mm, page_vma_mapp...
106
107
  
  		pfn = pte_pfn(*pvmw->pte);
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
108
  	}
5b8d6e37b   Li Xinhai   mm/page_vma_mappe...
109
  	return pfn_is_match(pvmw->page, pfn);
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
  }
  
  /**
   * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
   * @pvmw->address
   * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
   * must be set. pmd, pte and ptl must be NULL.
   *
   * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
   * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
   * adjusted if needed (for PTE-mapped THPs).
   *
   * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
   * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
   * a loop to find all PTEs that map the THP.
   *
   * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
   * regardless of which page table level the page is mapped at. @pvmw->pmd is
   * NULL.
   *
   * Retruns false if there are no more page table entries for the page in
   * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
   *
   * If you need to stop the walk before page_vma_mapped_walk() returned false,
   * use page_vma_mapped_walk_done(). It will do the housekeeping.
   */
  bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
  {
  	struct mm_struct *mm = pvmw->vma->vm_mm;
  	struct page *page = pvmw->page;
  	pgd_t *pgd;
c2febafc6   Kirill A. Shutemov   mm: convert gener...
141
  	p4d_t *p4d;
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
142
  	pud_t *pud;
a7b100953   Will Deacon   mm: page_vma_mapp...
143
  	pmd_t pmde;
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
144
145
146
147
  
  	/* The only possible pmd mapping has been handled on last iteration */
  	if (pvmw->pmd && !pvmw->pte)
  		return not_found(pvmw);
d75450ff4   Hugh Dickins   mm: fix page_vma_...
148
  	if (pvmw->pte)
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
149
  		goto next_pte;
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
150
151
152
  
  	if (unlikely(PageHuge(pvmw->page))) {
  		/* when pud is not present, pte will be NULL */
a50b854e0   Matthew Wilcox (Oracle)   mm: introduce pag...
153
  		pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
154
155
156
157
158
159
160
161
162
163
164
165
166
  		if (!pvmw->pte)
  			return false;
  
  		pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
  		spin_lock(pvmw->ptl);
  		if (!check_pte(pvmw))
  			return not_found(pvmw);
  		return true;
  	}
  restart:
  	pgd = pgd_offset(mm, pvmw->address);
  	if (!pgd_present(*pgd))
  		return false;
c2febafc6   Kirill A. Shutemov   mm: convert gener...
167
168
169
170
  	p4d = p4d_offset(pgd, pvmw->address);
  	if (!p4d_present(*p4d))
  		return false;
  	pud = pud_offset(p4d, pvmw->address);
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
171
172
173
  	if (!pud_present(*pud))
  		return false;
  	pvmw->pmd = pmd_offset(pud, pvmw->address);
a7b100953   Will Deacon   mm: page_vma_mapp...
174
175
176
177
178
179
180
  	/*
  	 * Make sure the pmd value isn't cached in a register by the
  	 * compiler and used as a stale value after we've observed a
  	 * subsequent update.
  	 */
  	pmde = READ_ONCE(*pvmw->pmd);
  	if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
181
  		pvmw->ptl = pmd_lock(mm, pvmw->pmd);
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
182
183
184
185
186
187
  		if (likely(pmd_trans_huge(*pvmw->pmd))) {
  			if (pvmw->flags & PVMW_MIGRATION)
  				return not_found(pvmw);
  			if (pmd_page(*pvmw->pmd) != page)
  				return not_found(pvmw);
  			return true;
616b83715   Zi Yan   mm: thp: enable t...
188
189
190
191
192
193
194
195
196
197
198
  		} else if (!pmd_present(*pvmw->pmd)) {
  			if (thp_migration_supported()) {
  				if (!(pvmw->flags & PVMW_MIGRATION))
  					return not_found(pvmw);
  				if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
  					swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
  
  					if (migration_entry_to_page(entry) != page)
  						return not_found(pvmw);
  					return true;
  				}
af0db981f   Zi Yan   mm: remove unnece...
199
  			}
616b83715   Zi Yan   mm: thp: enable t...
200
  			return not_found(pvmw);
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
201
202
203
204
205
  		} else {
  			/* THP pmd was split under us: handle on pte level */
  			spin_unlock(pvmw->ptl);
  			pvmw->ptl = NULL;
  		}
a7b100953   Will Deacon   mm: page_vma_mapp...
206
207
  	} else if (!pmd_present(pmde)) {
  		return false;
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
208
209
210
211
212
213
  	}
  	if (!map_pte(pvmw))
  		goto next_pte;
  	while (1) {
  		if (check_pte(pvmw))
  			return true;
d75450ff4   Hugh Dickins   mm: fix page_vma_...
214
215
216
217
218
  next_pte:
  		/* Seek to next pte only makes sense for THP */
  		if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
  			return not_found(pvmw);
  		do {
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
219
  			pvmw->address += PAGE_SIZE;
d75450ff4   Hugh Dickins   mm: fix page_vma_...
220
221
  			if (pvmw->address >= pvmw->vma->vm_end ||
  			    pvmw->address >=
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
222
  					__vma_address(pvmw->page, pvmw->vma) +
af3bbc12d   Matthew Wilcox (Oracle)   mm: add thp_size
223
  					thp_size(pvmw->page))
ace71a19c   Kirill A. Shutemov   mm: introduce pag...
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
  				return not_found(pvmw);
  			/* Did we cross page table boundary? */
  			if (pvmw->address % PMD_SIZE == 0) {
  				pte_unmap(pvmw->pte);
  				if (pvmw->ptl) {
  					spin_unlock(pvmw->ptl);
  					pvmw->ptl = NULL;
  				}
  				goto restart;
  			} else {
  				pvmw->pte++;
  			}
  		} while (pte_none(*pvmw->pte));
  
  		if (!pvmw->ptl) {
  			pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
  			spin_lock(pvmw->ptl);
  		}
  	}
  }
6a328a626   Kirill A. Shutemov   mm: convert page_...
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
  
  /**
   * page_mapped_in_vma - check whether a page is really mapped in a VMA
   * @page: the page to test
   * @vma: the VMA to test
   *
   * Returns 1 if the page is mapped into the page tables of the VMA, 0
   * if the page is not mapped into the page tables of this VMA.  Only
   * valid for normal file or anonymous VMAs.
   */
  int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
  {
  	struct page_vma_mapped_walk pvmw = {
  		.page = page,
  		.vma = vma,
  		.flags = PVMW_SYNC,
  	};
  	unsigned long start, end;
  
  	start = __vma_address(page, vma);
af3bbc12d   Matthew Wilcox (Oracle)   mm: add thp_size
264
  	end = start + thp_size(page) - PAGE_SIZE;
6a328a626   Kirill A. Shutemov   mm: convert page_...
265
266
267
268
269
270
271
272
273
  
  	if (unlikely(end < vma->vm_start || start >= vma->vm_end))
  		return 0;
  	pvmw.address = max(start, vma->vm_start);
  	if (!page_vma_mapped_walk(&pvmw))
  		return 0;
  	page_vma_mapped_walk_done(&pvmw);
  	return 1;
  }