Blame view
mm/hmm.c
16.7 KB
c942fddf8
|
1 |
// SPDX-License-Identifier: GPL-2.0-or-later |
133ff0eac
|
2 3 4 |
/* * Copyright 2013 Red Hat Inc. * |
f813f2197
|
5 |
* Authors: Jérôme Glisse <jglisse@redhat.com> |
133ff0eac
|
6 7 8 9 10 |
*/ /* * Refer to include/linux/hmm.h for information about heterogeneous memory * management or HMM for short. */ |
a520110e4
|
11 |
#include <linux/pagewalk.h> |
133ff0eac
|
12 |
#include <linux/hmm.h> |
858b54dab
|
13 |
#include <linux/init.h> |
da4c3c735
|
14 15 |
#include <linux/rmap.h> #include <linux/swap.h> |
133ff0eac
|
16 17 |
#include <linux/slab.h> #include <linux/sched.h> |
4ef589dc9
|
18 19 |
#include <linux/mmzone.h> #include <linux/pagemap.h> |
da4c3c735
|
20 21 |
#include <linux/swapops.h> #include <linux/hugetlb.h> |
4ef589dc9
|
22 |
#include <linux/memremap.h> |
c8a53b2db
|
23 |
#include <linux/sched/mm.h> |
7b2d55d2c
|
24 |
#include <linux/jump_label.h> |
55c0ece82
|
25 |
#include <linux/dma-mapping.h> |
c0b124054
|
26 |
#include <linux/mmu_notifier.h> |
4ef589dc9
|
27 |
#include <linux/memory_hotplug.h> |
b756a3b5e
|
28 |
#include "internal.h" |
74eee180b
|
29 30 31 |
struct hmm_vma_walk { struct hmm_range *range; unsigned long last; |
74eee180b
|
32 |
}; |
a3eb13c15
|
33 34 35 36 37 |
enum { HMM_NEED_FAULT = 1 << 0, HMM_NEED_WRITE_FAULT = 1 << 1, HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT, }; |
d28c2c9a4
|
38 |
static int hmm_pfns_fill(unsigned long addr, unsigned long end, |
2733ea144
|
39 |
struct hmm_range *range, unsigned long cpu_flags) |
da4c3c735
|
40 |
{ |
2733ea144
|
41 |
unsigned long i = (addr - range->start) >> PAGE_SHIFT; |
da4c3c735
|
42 |
|
da4c3c735
|
43 |
for (; addr < end; addr += PAGE_SIZE, i++) |
2733ea144
|
44 |
range->hmm_pfns[i] = cpu_flags; |
da4c3c735
|
45 46 |
return 0; } |
5504ed296
|
47 |
/* |
f8c888a30
|
48 |
* hmm_vma_fault() - fault in a range lacking valid pmd or pte(s) |
d2e8d5511
|
49 |
* @addr: range virtual start address (inclusive) |
5504ed296
|
50 |
* @end: range virtual end address (exclusive) |
a3eb13c15
|
51 |
* @required_fault: HMM_NEED_* flags |
5504ed296
|
52 |
* @walk: mm_walk structure |
f8c888a30
|
53 |
* Return: -EBUSY after page fault, or page fault error |
5504ed296
|
54 55 56 57 |
* * This function will be called whenever pmd_none() or pte_none() returns true, * or whenever there is no page directory covering the virtual address range. */ |
f8c888a30
|
58 |
static int hmm_vma_fault(unsigned long addr, unsigned long end, |
a3eb13c15
|
59 |
unsigned int required_fault, struct mm_walk *walk) |
da4c3c735
|
60 |
{ |
74eee180b
|
61 |
struct hmm_vma_walk *hmm_vma_walk = walk->private; |
5a0c38d30
|
62 |
struct vm_area_struct *vma = walk->vma; |
5a0c38d30
|
63 |
unsigned int fault_flags = FAULT_FLAG_REMOTE; |
da4c3c735
|
64 |
|
a3eb13c15
|
65 |
WARN_ON_ONCE(!required_fault); |
74eee180b
|
66 |
hmm_vma_walk->last = addr; |
63d5066f6
|
67 |
|
a3eb13c15
|
68 |
if (required_fault & HMM_NEED_WRITE_FAULT) { |
5a0c38d30
|
69 70 71 |
if (!(vma->vm_flags & VM_WRITE)) return -EPERM; fault_flags |= FAULT_FLAG_WRITE; |
74eee180b
|
72 |
} |
53bfe17ff
|
73 |
for (; addr < end; addr += PAGE_SIZE) |
bce617ede
|
74 75 |
if (handle_mm_fault(vma, addr, fault_flags, NULL) & VM_FAULT_ERROR) |
53bfe17ff
|
76 |
return -EFAULT; |
f8c888a30
|
77 |
return -EBUSY; |
2aee09d8c
|
78 |
} |
a3eb13c15
|
79 |
static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, |
2733ea144
|
80 81 |
unsigned long pfn_req_flags, unsigned long cpu_flags) |
2aee09d8c
|
82 |
{ |
f88a1e90c
|
83 |
struct hmm_range *range = hmm_vma_walk->range; |
023a019a9
|
84 85 86 |
/* * So we not only consider the individual per page request we also * consider the default flags requested for the range. The API can |
d2e8d5511
|
87 88 89 |
* be used 2 ways. The first one where the HMM user coalesces * multiple page faults into one request and sets flags per pfn for * those faults. The second one where the HMM user wants to pre- |
023a019a9
|
90 91 92 93 |
* fault a range with specific flags. For the latter one it is a * waste to have the user pre-fill the pfn arrays with a default * flags value. */ |
2733ea144
|
94 95 |
pfn_req_flags &= range->pfn_flags_mask; pfn_req_flags |= range->default_flags; |
023a019a9
|
96 |
|
2aee09d8c
|
97 |
/* We aren't ask to do anything ... */ |
2733ea144
|
98 |
if (!(pfn_req_flags & HMM_PFN_REQ_FAULT)) |
a3eb13c15
|
99 |
return 0; |
f88a1e90c
|
100 |
|
f88a1e90c
|
101 |
/* Need to write fault ? */ |
2733ea144
|
102 103 |
if ((pfn_req_flags & HMM_PFN_REQ_WRITE) && !(cpu_flags & HMM_PFN_WRITE)) |
a3eb13c15
|
104 105 106 |
return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT; /* If CPU page table is not valid then we need to fault */ |
2733ea144
|
107 |
if (!(cpu_flags & HMM_PFN_VALID)) |
a3eb13c15
|
108 109 |
return HMM_NEED_FAULT; return 0; |
2aee09d8c
|
110 |
} |
a3eb13c15
|
111 112 |
static unsigned int hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, |
2733ea144
|
113 114 |
const unsigned long hmm_pfns[], unsigned long npages, unsigned long cpu_flags) |
2aee09d8c
|
115 |
{ |
6bfef2f91
|
116 |
struct hmm_range *range = hmm_vma_walk->range; |
a3eb13c15
|
117 |
unsigned int required_fault = 0; |
2aee09d8c
|
118 |
unsigned long i; |
6bfef2f91
|
119 120 121 122 123 124 |
/* * If the default flags do not request to fault pages, and the mask does * not allow for individual pages to be faulted, then * hmm_pte_need_fault() will always return 0. */ if (!((range->default_flags | range->pfn_flags_mask) & |
2733ea144
|
125 |
HMM_PFN_REQ_FAULT)) |
a3eb13c15
|
126 |
return 0; |
2aee09d8c
|
127 128 |
for (i = 0; i < npages; ++i) { |
2733ea144
|
129 130 |
required_fault |= hmm_pte_need_fault(hmm_vma_walk, hmm_pfns[i], cpu_flags); |
a3eb13c15
|
131 132 |
if (required_fault == HMM_NEED_ALL_BITS) return required_fault; |
2aee09d8c
|
133 |
} |
a3eb13c15
|
134 |
return required_fault; |
2aee09d8c
|
135 136 137 |
} static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, |
b7a16c7ad
|
138 |
__always_unused int depth, struct mm_walk *walk) |
2aee09d8c
|
139 140 141 |
{ struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; |
a3eb13c15
|
142 |
unsigned int required_fault; |
2aee09d8c
|
143 |
unsigned long i, npages; |
2733ea144
|
144 |
unsigned long *hmm_pfns; |
2aee09d8c
|
145 146 147 |
i = (addr - range->start) >> PAGE_SHIFT; npages = (end - addr) >> PAGE_SHIFT; |
2733ea144
|
148 149 150 |
hmm_pfns = &range->hmm_pfns[i]; required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0); |
bd5d3587b
|
151 152 153 154 155 |
if (!walk->vma) { if (required_fault) return -EFAULT; return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR); } |
a3eb13c15
|
156 157 |
if (required_fault) return hmm_vma_fault(addr, end, required_fault, walk); |
2733ea144
|
158 |
return hmm_pfns_fill(addr, end, range, 0); |
2aee09d8c
|
159 |
} |
3b50a6e53
|
160 161 162 163 |
static inline unsigned long hmm_pfn_flags_order(unsigned long order) { return order << HMM_PFN_ORDER_SHIFT; } |
2733ea144
|
164 165 |
static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) |
2aee09d8c
|
166 167 168 |
{ if (pmd_protnone(pmd)) return 0; |
3b50a6e53
|
169 170 171 |
return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID) | hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT); |
da4c3c735
|
172 |
} |
992de9a8b
|
173 |
#ifdef CONFIG_TRANSPARENT_HUGEPAGE |
9d3973d60
|
174 |
static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, |
2733ea144
|
175 176 |
unsigned long end, unsigned long hmm_pfns[], pmd_t pmd) |
9d3973d60
|
177 |
{ |
53f5c3f48
|
178 |
struct hmm_vma_walk *hmm_vma_walk = walk->private; |
f88a1e90c
|
179 |
struct hmm_range *range = hmm_vma_walk->range; |
2aee09d8c
|
180 |
unsigned long pfn, npages, i; |
a3eb13c15
|
181 |
unsigned int required_fault; |
2733ea144
|
182 |
unsigned long cpu_flags; |
53f5c3f48
|
183 |
|
2aee09d8c
|
184 |
npages = (end - addr) >> PAGE_SHIFT; |
f88a1e90c
|
185 |
cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); |
a3eb13c15
|
186 |
required_fault = |
2733ea144
|
187 |
hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags); |
a3eb13c15
|
188 189 |
if (required_fault) return hmm_vma_fault(addr, end, required_fault, walk); |
53f5c3f48
|
190 |
|
309f9a4f5
|
191 |
pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); |
068354ade
|
192 |
for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) |
2733ea144
|
193 |
hmm_pfns[i] = pfn | cpu_flags; |
53f5c3f48
|
194 195 |
return 0; } |
9d3973d60
|
196 197 198 |
#else /* CONFIG_TRANSPARENT_HUGEPAGE */ /* stub to allow the code below to compile */ int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, |
2733ea144
|
199 |
unsigned long end, unsigned long hmm_pfns[], pmd_t pmd); |
9d3973d60
|
200 |
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
53f5c3f48
|
201 |
|
2733ea144
|
202 203 |
static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte) |
2aee09d8c
|
204 |
{ |
789c2af88
|
205 |
if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte)) |
2aee09d8c
|
206 |
return 0; |
2733ea144
|
207 |
return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID; |
2aee09d8c
|
208 |
} |
53f5c3f48
|
209 210 |
static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, unsigned long end, pmd_t *pmdp, pte_t *ptep, |
2733ea144
|
211 |
unsigned long *hmm_pfn) |
53f5c3f48
|
212 213 |
{ struct hmm_vma_walk *hmm_vma_walk = walk->private; |
f88a1e90c
|
214 |
struct hmm_range *range = hmm_vma_walk->range; |
a3eb13c15
|
215 |
unsigned int required_fault; |
2733ea144
|
216 |
unsigned long cpu_flags; |
53f5c3f48
|
217 |
pte_t pte = *ptep; |
2733ea144
|
218 |
uint64_t pfn_req_flags = *hmm_pfn; |
53f5c3f48
|
219 |
|
53f5c3f48
|
220 |
if (pte_none(pte)) { |
2733ea144
|
221 222 |
required_fault = hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0); |
a3eb13c15
|
223 |
if (required_fault) |
53f5c3f48
|
224 |
goto fault; |
2733ea144
|
225 |
*hmm_pfn = 0; |
53f5c3f48
|
226 227 228 229 230 |
return 0; } if (!pte_present(pte)) { swp_entry_t entry = pte_to_swp_entry(pte); |
53f5c3f48
|
231 |
/* |
51a772c34
|
232 233 |
* Don't fault in device private pages owned by the caller, * just report the PFN. |
53f5c3f48
|
234 |
*/ |
51a772c34
|
235 236 237 |
if (is_device_private_entry(entry) && pfn_swap_entry_to_page(entry)->pgmap->owner == range->dev_private_owner) { |
2733ea144
|
238 |
cpu_flags = HMM_PFN_VALID; |
4dd845b5a
|
239 |
if (is_writable_device_private_entry(entry)) |
2733ea144
|
240 |
cpu_flags |= HMM_PFN_WRITE; |
af5cdaf82
|
241 |
*hmm_pfn = swp_offset(entry) | cpu_flags; |
53f5c3f48
|
242 243 |
return 0; } |
2733ea144
|
244 245 |
required_fault = hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0); |
846babe85
|
246 |
if (!required_fault) { |
2733ea144
|
247 |
*hmm_pfn = 0; |
53f5c3f48
|
248 |
return 0; |
846babe85
|
249 |
} |
76612d6ce
|
250 251 |
if (!non_swap_entry(entry)) |
51a772c34
|
252 253 254 |
goto fault; if (is_device_private_entry(entry)) |
76612d6ce
|
255 |
goto fault; |
b756a3b5e
|
256 257 |
if (is_device_exclusive_entry(entry)) goto fault; |
76612d6ce
|
258 259 260 261 262 |
if (is_migration_entry(entry)) { pte_unmap(ptep); hmm_vma_walk->last = addr; migration_entry_wait(walk->mm, pmdp, addr); return -EBUSY; |
53f5c3f48
|
263 264 265 |
} /* Report error for everything else */ |
dfdc22078
|
266 |
pte_unmap(ptep); |
53f5c3f48
|
267 268 |
return -EFAULT; } |
76612d6ce
|
269 |
cpu_flags = pte_to_hmm_pfn_flags(range, pte); |
2733ea144
|
270 271 |
required_fault = hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags); |
a3eb13c15
|
272 |
if (required_fault) |
53f5c3f48
|
273 |
goto fault; |
405506274
|
274 |
/* |
4b42fb213
|
275 276 |
* Bypass devmap pte such as DAX page when all pfn requested * flags(pfn_req_flags) are fulfilled. |
405506274
|
277 278 279 |
* Since each architecture defines a struct page for the zero page, just * fall through and treat it like a normal page. */ |
52b66f818
|
280 281 |
if (!vm_normal_page(walk->vma, addr, pte) && !pte_devmap(pte) && |
4b42fb213
|
282 |
!is_zero_pfn(pte_pfn(pte))) { |
2733ea144
|
283 |
if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) { |
dfdc22078
|
284 |
pte_unmap(ptep); |
ac541f250
|
285 286 |
return -EFAULT; } |
2733ea144
|
287 |
*hmm_pfn = HMM_PFN_ERROR; |
405506274
|
288 |
return 0; |
992de9a8b
|
289 |
} |
2733ea144
|
290 |
*hmm_pfn = pte_pfn(pte) | cpu_flags; |
53f5c3f48
|
291 292 293 294 295 |
return 0; fault: pte_unmap(ptep); /* Fault any virtual address we were asked to fault */ |
a3eb13c15
|
296 |
return hmm_vma_fault(addr, end, required_fault, walk); |
53f5c3f48
|
297 |
} |
da4c3c735
|
298 299 300 301 302 |
static int hmm_vma_walk_pmd(pmd_t *pmdp, unsigned long start, unsigned long end, struct mm_walk *walk) { |
74eee180b
|
303 304 |
struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; |
2733ea144
|
305 306 |
unsigned long *hmm_pfns = &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT]; |
2288a9a68
|
307 308 |
unsigned long npages = (end - start) >> PAGE_SHIFT; unsigned long addr = start; |
da4c3c735
|
309 |
pte_t *ptep; |
d08faca01
|
310 |
pmd_t pmd; |
da4c3c735
|
311 |
|
da4c3c735
|
312 |
again: |
d08faca01
|
313 314 |
pmd = READ_ONCE(*pmdp); if (pmd_none(pmd)) |
b7a16c7ad
|
315 |
return hmm_vma_walk_hole(start, end, -1, walk); |
da4c3c735
|
316 |
|
d08faca01
|
317 |
if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { |
2733ea144
|
318 |
if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) { |
d08faca01
|
319 |
hmm_vma_walk->last = addr; |
d2e8d5511
|
320 |
pmd_migration_entry_wait(walk->mm, pmdp); |
73231612d
|
321 |
return -EBUSY; |
d08faca01
|
322 |
} |
2733ea144
|
323 |
return hmm_pfns_fill(start, end, range, 0); |
2288a9a68
|
324 325 326 |
} if (!pmd_present(pmd)) { |
2733ea144
|
327 |
if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) |
2288a9a68
|
328 |
return -EFAULT; |
d28c2c9a4
|
329 |
return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); |
2288a9a68
|
330 |
} |
da4c3c735
|
331 |
|
d08faca01
|
332 |
if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { |
da4c3c735
|
333 |
/* |
d2e8d5511
|
334 |
* No need to take pmd_lock here, even if some other thread |
da4c3c735
|
335 336 337 |
* is splitting the huge pmd we will get that event through * mmu_notifier callback. * |
d2e8d5511
|
338 |
* So just read pmd value and check again it's a transparent |
da4c3c735
|
339 340 341 342 343 344 345 |
* huge or device mapping one and compute corresponding pfn * values. */ pmd = pmd_read_atomic(pmdp); barrier(); if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) goto again; |
74eee180b
|
346 |
|
2733ea144
|
347 |
return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd); |
da4c3c735
|
348 |
} |
d08faca01
|
349 |
/* |
d2e8d5511
|
350 |
* We have handled all the valid cases above ie either none, migration, |
d08faca01
|
351 352 353 354 |
* huge or transparent huge. At this point either it is a valid pmd * entry pointing to pte directory or it is a bad pmd that will not * recover. */ |
2288a9a68
|
355 |
if (pmd_bad(pmd)) { |
2733ea144
|
356 |
if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) |
2288a9a68
|
357 |
return -EFAULT; |
d28c2c9a4
|
358 |
return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); |
2288a9a68
|
359 |
} |
da4c3c735
|
360 361 |
ptep = pte_offset_map(pmdp, addr); |
2733ea144
|
362 |
for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) { |
53f5c3f48
|
363 |
int r; |
74eee180b
|
364 |
|
2733ea144
|
365 |
r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, hmm_pfns); |
53f5c3f48
|
366 |
if (r) { |
dfdc22078
|
367 |
/* hmm_vma_handle_pte() did pte_unmap() */ |
53f5c3f48
|
368 |
return r; |
74eee180b
|
369 |
} |
da4c3c735
|
370 371 |
} pte_unmap(ptep - 1); |
da4c3c735
|
372 373 |
return 0; } |
f0b3c45c8
|
374 375 |
#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) |
2733ea144
|
376 377 |
static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud) |
f0b3c45c8
|
378 379 380 |
{ if (!pud_present(pud)) return 0; |
3b50a6e53
|
381 382 383 |
return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID) | hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT); |
f0b3c45c8
|
384 385 386 387 |
} static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, struct mm_walk *walk) |
992de9a8b
|
388 389 390 |
{ struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; |
3afc42363
|
391 |
unsigned long addr = start; |
992de9a8b
|
392 |
pud_t pud; |
3afc42363
|
393 394 395 396 397 398 399 400 |
int ret = 0; spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma); if (!ptl) return 0; /* Normally we don't want to split the huge page */ walk->action = ACTION_CONTINUE; |
992de9a8b
|
401 |
|
992de9a8b
|
402 |
pud = READ_ONCE(*pudp); |
3afc42363
|
403 |
if (pud_none(pud)) { |
05fc1df95
|
404 405 |
spin_unlock(ptl); return hmm_vma_walk_hole(start, end, -1, walk); |
3afc42363
|
406 |
} |
992de9a8b
|
407 408 409 |
if (pud_huge(pud) && pud_devmap(pud)) { unsigned long i, npages, pfn; |
a3eb13c15
|
410 |
unsigned int required_fault; |
2733ea144
|
411 412 |
unsigned long *hmm_pfns; unsigned long cpu_flags; |
992de9a8b
|
413 |
|
3afc42363
|
414 |
if (!pud_present(pud)) { |
05fc1df95
|
415 416 |
spin_unlock(ptl); return hmm_vma_walk_hole(start, end, -1, walk); |
3afc42363
|
417 |
} |
992de9a8b
|
418 419 420 |
i = (addr - range->start) >> PAGE_SHIFT; npages = (end - addr) >> PAGE_SHIFT; |
2733ea144
|
421 |
hmm_pfns = &range->hmm_pfns[i]; |
992de9a8b
|
422 423 |
cpu_flags = pud_to_hmm_pfn_flags(range, pud); |
2733ea144
|
424 |
required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns, |
a3eb13c15
|
425 426 |
npages, cpu_flags); if (required_fault) { |
05fc1df95
|
427 |
spin_unlock(ptl); |
a3eb13c15
|
428 |
return hmm_vma_fault(addr, end, required_fault, walk); |
3afc42363
|
429 |
} |
992de9a8b
|
430 |
|
992de9a8b
|
431 |
pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); |
068354ade
|
432 |
for (i = 0; i < npages; ++i, ++pfn) |
2733ea144
|
433 |
hmm_pfns[i] = pfn | cpu_flags; |
3afc42363
|
434 |
goto out_unlock; |
992de9a8b
|
435 |
} |
3afc42363
|
436 437 |
/* Ask for the PUD to be split */ walk->action = ACTION_SUBTREE; |
992de9a8b
|
438 |
|
3afc42363
|
439 440 441 |
out_unlock: spin_unlock(ptl); return ret; |
992de9a8b
|
442 |
} |
f0b3c45c8
|
443 444 445 |
#else #define hmm_vma_walk_pud NULL #endif |
992de9a8b
|
446 |
|
251bbe59b
|
447 |
#ifdef CONFIG_HUGETLB_PAGE |
63d5066f6
|
448 449 450 451 |
static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, unsigned long start, unsigned long end, struct mm_walk *walk) { |
05c23af4a
|
452 |
unsigned long addr = start, i, pfn; |
63d5066f6
|
453 454 455 |
struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; struct vm_area_struct *vma = walk->vma; |
a3eb13c15
|
456 |
unsigned int required_fault; |
2733ea144
|
457 458 |
unsigned long pfn_req_flags; unsigned long cpu_flags; |
63d5066f6
|
459 460 |
spinlock_t *ptl; pte_t entry; |
63d5066f6
|
461 |
|
d2e8d5511
|
462 |
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); |
63d5066f6
|
463 |
entry = huge_ptep_get(pte); |
7f08263d9
|
464 |
i = (start - range->start) >> PAGE_SHIFT; |
2733ea144
|
465 |
pfn_req_flags = range->hmm_pfns[i]; |
3b50a6e53
|
466 467 |
cpu_flags = pte_to_hmm_pfn_flags(range, entry) | hmm_pfn_flags_order(huge_page_order(hstate_vma(vma))); |
2733ea144
|
468 469 |
required_fault = hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags); |
a3eb13c15
|
470 |
if (required_fault) { |
45050692d
|
471 |
spin_unlock(ptl); |
a3eb13c15
|
472 |
return hmm_vma_fault(addr, end, required_fault, walk); |
63d5066f6
|
473 |
} |
05c23af4a
|
474 |
pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT); |
7f08263d9
|
475 |
for (; addr < end; addr += PAGE_SIZE, i++, pfn++) |
2733ea144
|
476 |
range->hmm_pfns[i] = pfn | cpu_flags; |
63d5066f6
|
477 |
spin_unlock(ptl); |
45050692d
|
478 |
return 0; |
63d5066f6
|
479 |
} |
251bbe59b
|
480 481 482 |
#else #define hmm_vma_walk_hugetlb_entry NULL #endif /* CONFIG_HUGETLB_PAGE */ |
63d5066f6
|
483 |
|
d28c2c9a4
|
484 485 |
static int hmm_vma_walk_test(unsigned long start, unsigned long end, struct mm_walk *walk) |
33cd47dcb
|
486 |
{ |
d28c2c9a4
|
487 488 489 |
struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; struct vm_area_struct *vma = walk->vma; |
52b66f818
|
490 |
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)) && |
a3eb13c15
|
491 492 |
vma->vm_flags & VM_READ) return 0; |
d28c2c9a4
|
493 |
/* |
a3eb13c15
|
494 495 |
* vma ranges that don't have struct page backing them or map I/O * devices directly cannot be handled by hmm_range_fault(). |
c2579c9c4
|
496 |
* |
d28c2c9a4
|
497 |
* If the vma does not allow read access, then assume that it does not |
c2579c9c4
|
498 499 |
* allow write access either. HMM does not support architectures that * allow write without read. |
a3eb13c15
|
500 501 502 |
* * If a fault is requested for an unsupported range then it is a hard * failure. |
d28c2c9a4
|
503 |
*/ |
a3eb13c15
|
504 |
if (hmm_range_need_fault(hmm_vma_walk, |
2733ea144
|
505 |
range->hmm_pfns + |
a3eb13c15
|
506 507 508 |
((start - range->start) >> PAGE_SHIFT), (end - start) >> PAGE_SHIFT, 0)) return -EFAULT; |
d28c2c9a4
|
509 |
|
a3eb13c15
|
510 |
hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); |
d28c2c9a4
|
511 |
|
a3eb13c15
|
512 513 |
/* Skip this vma and continue processing the next vma. */ return 1; |
33cd47dcb
|
514 |
} |
7b86ac337
|
515 516 517 518 519 |
static const struct mm_walk_ops hmm_walk_ops = { .pud_entry = hmm_vma_walk_pud, .pmd_entry = hmm_vma_walk_pmd, .pte_hole = hmm_vma_walk_hole, .hugetlb_entry = hmm_vma_walk_hugetlb_entry, |
d28c2c9a4
|
520 |
.test_walk = hmm_vma_walk_test, |
7b86ac337
|
521 |
}; |
9a4903e49
|
522 523 |
/** * hmm_range_fault - try to fault some address in a virtual address range |
f970b977e
|
524 |
* @range: argument structure |
9a4903e49
|
525 |
* |
be957c886
|
526 |
* Returns 0 on success or one of the following error codes: |
73231612d
|
527 |
* |
9a4903e49
|
528 529 530 531 532 |
* -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma * (e.g., device file vma). * -ENOMEM: Out of memory. * -EPERM: Invalid permission (e.g., asking for write and range is read * only). |
9a4903e49
|
533 534 |
* -EBUSY: The range has been invalidated and the caller needs to wait for * the invalidation to finish. |
f970b977e
|
535 536 |
* -EFAULT: A page was requested to be valid and could not be made valid * ie it has no backing VMA or it is illegal to access |
74eee180b
|
537 |
* |
f970b977e
|
538 539 |
* This is similar to get_user_pages(), except that it can read the page tables * without mutating them (ie causing faults). |
74eee180b
|
540 |
*/ |
be957c886
|
541 |
int hmm_range_fault(struct hmm_range *range) |
74eee180b
|
542 |
{ |
d28c2c9a4
|
543 544 545 |
struct hmm_vma_walk hmm_vma_walk = { .range = range, .last = range->start, |
d28c2c9a4
|
546 |
}; |
a22dd5064
|
547 |
struct mm_struct *mm = range->notifier->mm; |
74eee180b
|
548 |
int ret; |
42fc54140
|
549 |
mmap_assert_locked(mm); |
704f3f2cf
|
550 |
|
a3e0d41c2
|
551 552 |
do { /* If range is no longer valid force retry. */ |
a22dd5064
|
553 554 |
if (mmu_interval_check_retry(range->notifier, range->notifier_seq)) |
2bcbeaefd
|
555 |
return -EBUSY; |
d28c2c9a4
|
556 557 |
ret = walk_page_range(mm, hmm_vma_walk.last, range->end, &hmm_walk_ops, &hmm_vma_walk); |
be957c886
|
558 559 560 561 562 563 |
/* * When -EBUSY is returned the loop restarts with * hmm_vma_walk.last set to an address that has not been stored * in pfns. All entries < last in the pfn array are set to their * output, and all >= are still at their input values. */ |
d28c2c9a4
|
564 |
} while (ret == -EBUSY); |
be957c886
|
565 |
return ret; |
74eee180b
|
566 |
} |
73231612d
|
567 |
EXPORT_SYMBOL(hmm_range_fault); |