Blame view
mm/gup.c
50.5 KB
4bbd4c776 mm: move get_user... |
1 2 3 4 |
#include <linux/kernel.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/spinlock.h> |
4bbd4c776 mm: move get_user... |
5 |
#include <linux/mm.h> |
3565fce3a mm, x86: get_user... |
6 |
#include <linux/memremap.h> |
4bbd4c776 mm: move get_user... |
7 8 9 10 |
#include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/swap.h> #include <linux/swapops.h> |
174cd4b1e sched/headers: Pr... |
11 |
#include <linux/sched/signal.h> |
2667f50e8 mm: introduce a g... |
12 |
#include <linux/rwsem.h> |
f30c59e92 mm: Update generi... |
13 |
#include <linux/hugetlb.h> |
1027e4436 mm: make GUP hand... |
14 |
|
33a709b25 mm/gup, x86/mm/pk... |
15 |
#include <asm/mmu_context.h> |
2667f50e8 mm: introduce a g... |
16 |
#include <asm/pgtable.h> |
1027e4436 mm: make GUP hand... |
17 |
#include <asm/tlbflush.h> |
2667f50e8 mm: introduce a g... |
18 |
|
4bbd4c776 mm: move get_user... |
19 |
#include "internal.h" |
69e68b4f0 mm: cleanup follo... |
20 21 |
static struct page *no_page_table(struct vm_area_struct *vma, unsigned int flags) |
4bbd4c776 mm: move get_user... |
22 |
{ |
69e68b4f0 mm: cleanup follo... |
23 24 25 26 27 28 29 30 31 32 33 34 |
/* * When core dumping an enormous anonymous area that nobody * has touched so far, we don't want to allocate unnecessary pages or * page tables. Return error instead of NULL to skip handle_mm_fault, * then get_dump_page() will return NULL to leave a hole in the dump. * But we can only make this optimization where a hole would surely * be zero-filled if handle_mm_fault() actually did handle it. */ if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) return ERR_PTR(-EFAULT); return NULL; } |
4bbd4c776 mm: move get_user... |
35 |
|
1027e4436 mm: make GUP hand... |
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, unsigned int flags) { /* No page to get reference */ if (flags & FOLL_GET) return -EFAULT; if (flags & FOLL_TOUCH) { pte_t entry = *pte; if (flags & FOLL_WRITE) entry = pte_mkdirty(entry); entry = pte_mkyoung(entry); if (!pte_same(*pte, entry)) { set_pte_at(vma->vm_mm, address, pte, entry); update_mmu_cache(vma, address, pte); } } /* Proper page table entry exists, but no corresponding struct page */ return -EEXIST; } |
19be0eaff mm: remove gup_fl... |
59 60 61 62 63 64 65 66 67 |
/* * FOLL_FORCE can write to even unwritable pte's, but only * after we've gone through a COW cycle and they are dirty. */ static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) { return pte_write(pte) || ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); } |
69e68b4f0 mm: cleanup follo... |
68 69 70 71 |
static struct page *follow_page_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags) { struct mm_struct *mm = vma->vm_mm; |
3565fce3a mm, x86: get_user... |
72 |
struct dev_pagemap *pgmap = NULL; |
69e68b4f0 mm: cleanup follo... |
73 74 75 |
struct page *page; spinlock_t *ptl; pte_t *ptep, pte; |
4bbd4c776 mm: move get_user... |
76 |
|
69e68b4f0 mm: cleanup follo... |
77 |
retry: |
4bbd4c776 mm: move get_user... |
78 |
if (unlikely(pmd_bad(*pmd))) |
69e68b4f0 mm: cleanup follo... |
79 |
return no_page_table(vma, flags); |
4bbd4c776 mm: move get_user... |
80 81 |
ptep = pte_offset_map_lock(mm, pmd, address, &ptl); |
4bbd4c776 mm: move get_user... |
82 83 84 85 86 87 88 89 90 91 |
pte = *ptep; if (!pte_present(pte)) { swp_entry_t entry; /* * KSM's break_ksm() relies upon recognizing a ksm page * even while it is being migrated, so for that case we * need migration_entry_wait(). */ if (likely(!(flags & FOLL_MIGRATION))) goto no_page; |
0661a3361 mm: remove rest u... |
92 |
if (pte_none(pte)) |
4bbd4c776 mm: move get_user... |
93 94 95 96 97 98 |
goto no_page; entry = pte_to_swp_entry(pte); if (!is_migration_entry(entry)) goto no_page; pte_unmap_unlock(ptep, ptl); migration_entry_wait(mm, pmd, address); |
69e68b4f0 mm: cleanup follo... |
99 |
goto retry; |
4bbd4c776 mm: move get_user... |
100 |
} |
8a0516ed8 mm: convert p[te|... |
101 |
if ((flags & FOLL_NUMA) && pte_protnone(pte)) |
4bbd4c776 mm: move get_user... |
102 |
goto no_page; |
19be0eaff mm: remove gup_fl... |
103 |
if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) { |
69e68b4f0 mm: cleanup follo... |
104 105 106 |
pte_unmap_unlock(ptep, ptl); return NULL; } |
4bbd4c776 mm: move get_user... |
107 108 |
page = vm_normal_page(vma, address, pte); |
3565fce3a mm, x86: get_user... |
109 110 111 112 113 114 115 116 117 118 119 |
if (!page && pte_devmap(pte) && (flags & FOLL_GET)) { /* * Only return device mapping pages in the FOLL_GET case since * they are only valid while holding the pgmap reference. */ pgmap = get_dev_pagemap(pte_pfn(pte), NULL); if (pgmap) page = pte_page(pte); else goto no_page; } else if (unlikely(!page)) { |
1027e4436 mm: make GUP hand... |
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 |
if (flags & FOLL_DUMP) { /* Avoid special (like zero) pages in core dumps */ page = ERR_PTR(-EFAULT); goto out; } if (is_zero_pfn(pte_pfn(pte))) { page = pte_page(pte); } else { int ret; ret = follow_pfn_pte(vma, address, ptep, flags); page = ERR_PTR(ret); goto out; } |
4bbd4c776 mm: move get_user... |
135 |
} |
6742d293c mm: adjust FOLL_S... |
136 137 138 139 140 141 142 143 144 145 146 147 |
if (flags & FOLL_SPLIT && PageTransCompound(page)) { int ret; get_page(page); pte_unmap_unlock(ptep, ptl); lock_page(page); ret = split_huge_page(page); unlock_page(page); put_page(page); if (ret) return ERR_PTR(ret); goto retry; } |
3565fce3a mm, x86: get_user... |
148 |
if (flags & FOLL_GET) { |
ddc58f27f mm: drop tail pag... |
149 |
get_page(page); |
3565fce3a mm, x86: get_user... |
150 151 152 153 154 155 156 |
/* drop the pgmap reference now that we hold the page */ if (pgmap) { put_dev_pagemap(pgmap); pgmap = NULL; } } |
4bbd4c776 mm: move get_user... |
157 158 159 160 161 162 163 164 165 166 167 |
if (flags & FOLL_TOUCH) { if ((flags & FOLL_WRITE) && !pte_dirty(pte) && !PageDirty(page)) set_page_dirty(page); /* * pte_mkyoung() would be more correct here, but atomic care * is needed to avoid losing the dirty bit: it is easier to use * mark_page_accessed(). */ mark_page_accessed(page); } |
de60f5f10 mm: introduce VM_... |
168 |
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { |
e90309c9f thp: allow mlocke... |
169 170 171 |
/* Do not mlock pte-mapped THP */ if (PageTransCompound(page)) goto out; |
4bbd4c776 mm: move get_user... |
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 |
/* * The preliminary mapping check is mainly to avoid the * pointless overhead of lock_page on the ZERO_PAGE * which might bounce very badly if there is contention. * * If the page is already locked, we don't need to * handle it now - vmscan will handle it later if and * when it attempts to reclaim the page. */ if (page->mapping && trylock_page(page)) { lru_add_drain(); /* push cached pages to LRU */ /* * Because we lock page here, and migration is * blocked by the pte's page reference, and we * know the page is still mapped, we don't even * need to check for file-cache page truncation. */ mlock_vma_page(page); unlock_page(page); } } |
1027e4436 mm: make GUP hand... |
193 |
out: |
4bbd4c776 mm: move get_user... |
194 |
pte_unmap_unlock(ptep, ptl); |
4bbd4c776 mm: move get_user... |
195 |
return page; |
4bbd4c776 mm: move get_user... |
196 197 198 |
no_page: pte_unmap_unlock(ptep, ptl); if (!pte_none(pte)) |
69e68b4f0 mm: cleanup follo... |
199 200 201 |
return NULL; return no_page_table(vma, flags); } |
080dbb618 mm/follow_page_ma... |
202 203 204 |
static struct page *follow_pmd_mask(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, unsigned int flags, unsigned int *page_mask) |
69e68b4f0 mm: cleanup follo... |
205 |
{ |
69e68b4f0 mm: cleanup follo... |
206 207 208 209 |
pmd_t *pmd; spinlock_t *ptl; struct page *page; struct mm_struct *mm = vma->vm_mm; |
080dbb618 mm/follow_page_ma... |
210 |
pmd = pmd_offset(pudp, address); |
69e68b4f0 mm: cleanup follo... |
211 212 213 |
if (pmd_none(*pmd)) return no_page_table(vma, flags); if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { |
e66f17ff7 mm/hugetlb: take ... |
214 215 216 217 |
page = follow_huge_pmd(mm, address, pmd, flags); if (page) return page; return no_page_table(vma, flags); |
69e68b4f0 mm: cleanup follo... |
218 |
} |
4dc71451a mm/follow_page_ma... |
219 220 221 222 223 224 225 226 |
if (is_hugepd(__hugepd(pmd_val(*pmd)))) { page = follow_huge_pd(vma, address, __hugepd(pmd_val(*pmd)), flags, PMD_SHIFT); if (page) return page; return no_page_table(vma, flags); } |
84c3fc4e9 mm: thp: check pm... |
227 228 229 230 231 232 233 234 235 236 |
retry: if (!pmd_present(*pmd)) { if (likely(!(flags & FOLL_MIGRATION))) return no_page_table(vma, flags); VM_BUG_ON(thp_migration_supported() && !is_pmd_migration_entry(*pmd)); if (is_pmd_migration_entry(*pmd)) pmd_migration_entry_wait(mm, pmd); goto retry; } |
3565fce3a mm, x86: get_user... |
237 238 239 240 241 242 243 |
if (pmd_devmap(*pmd)) { ptl = pmd_lock(mm, pmd); page = follow_devmap_pmd(vma, address, pmd, flags); spin_unlock(ptl); if (page) return page; } |
6742d293c mm: adjust FOLL_S... |
244 245 |
if (likely(!pmd_trans_huge(*pmd))) return follow_page_pte(vma, address, pmd, flags); |
db08f2030 mm/gup: check for... |
246 247 |
if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) return no_page_table(vma, flags); |
84c3fc4e9 mm: thp: check pm... |
248 |
retry_locked: |
6742d293c mm: adjust FOLL_S... |
249 |
ptl = pmd_lock(mm, pmd); |
84c3fc4e9 mm: thp: check pm... |
250 251 252 253 254 255 256 |
if (unlikely(!pmd_present(*pmd))) { spin_unlock(ptl); if (likely(!(flags & FOLL_MIGRATION))) return no_page_table(vma, flags); pmd_migration_entry_wait(mm, pmd); goto retry_locked; } |
6742d293c mm: adjust FOLL_S... |
257 258 259 260 |
if (unlikely(!pmd_trans_huge(*pmd))) { spin_unlock(ptl); return follow_page_pte(vma, address, pmd, flags); } |
6742d293c mm: adjust FOLL_S... |
261 262 263 264 265 266 |
if (flags & FOLL_SPLIT) { int ret; page = pmd_page(*pmd); if (is_huge_zero_page(page)) { spin_unlock(ptl); ret = 0; |
78ddc5347 thp: rename split... |
267 |
split_huge_pmd(vma, pmd, address); |
337d9abf1 mm: thp: check pm... |
268 269 |
if (pmd_trans_unstable(pmd)) ret = -EBUSY; |
6742d293c mm: adjust FOLL_S... |
270 271 |
} else { get_page(page); |
69e68b4f0 mm: cleanup follo... |
272 |
spin_unlock(ptl); |
6742d293c mm: adjust FOLL_S... |
273 274 275 276 |
lock_page(page); ret = split_huge_page(page); unlock_page(page); put_page(page); |
baa355fd3 thp: file pages s... |
277 278 |
if (pmd_none(*pmd)) return no_page_table(vma, flags); |
6742d293c mm: adjust FOLL_S... |
279 280 281 282 |
} return ret ? ERR_PTR(ret) : follow_page_pte(vma, address, pmd, flags); |
69e68b4f0 mm: cleanup follo... |
283 |
} |
6742d293c mm: adjust FOLL_S... |
284 285 286 287 |
page = follow_trans_huge_pmd(vma, address, pmd, flags); spin_unlock(ptl); *page_mask = HPAGE_PMD_NR - 1; return page; |
4bbd4c776 mm: move get_user... |
288 |
} |
080dbb618 mm/follow_page_ma... |
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 |
static struct page *follow_pud_mask(struct vm_area_struct *vma, unsigned long address, p4d_t *p4dp, unsigned int flags, unsigned int *page_mask) { pud_t *pud; spinlock_t *ptl; struct page *page; struct mm_struct *mm = vma->vm_mm; pud = pud_offset(p4dp, address); if (pud_none(*pud)) return no_page_table(vma, flags); if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { page = follow_huge_pud(mm, address, pud, flags); if (page) return page; return no_page_table(vma, flags); } |
4dc71451a mm/follow_page_ma... |
308 309 310 311 312 313 314 315 |
if (is_hugepd(__hugepd(pud_val(*pud)))) { page = follow_huge_pd(vma, address, __hugepd(pud_val(*pud)), flags, PUD_SHIFT); if (page) return page; return no_page_table(vma, flags); } |
080dbb618 mm/follow_page_ma... |
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 |
if (pud_devmap(*pud)) { ptl = pud_lock(mm, pud); page = follow_devmap_pud(vma, address, pud, flags); spin_unlock(ptl); if (page) return page; } if (unlikely(pud_bad(*pud))) return no_page_table(vma, flags); return follow_pmd_mask(vma, address, pud, flags, page_mask); } static struct page *follow_p4d_mask(struct vm_area_struct *vma, unsigned long address, pgd_t *pgdp, unsigned int flags, unsigned int *page_mask) { p4d_t *p4d; |
4dc71451a mm/follow_page_ma... |
335 |
struct page *page; |
080dbb618 mm/follow_page_ma... |
336 337 338 339 340 341 342 |
p4d = p4d_offset(pgdp, address); if (p4d_none(*p4d)) return no_page_table(vma, flags); BUILD_BUG_ON(p4d_huge(*p4d)); if (unlikely(p4d_bad(*p4d))) return no_page_table(vma, flags); |
4dc71451a mm/follow_page_ma... |
343 344 345 346 347 348 349 350 |
if (is_hugepd(__hugepd(p4d_val(*p4d)))) { page = follow_huge_pd(vma, address, __hugepd(p4d_val(*p4d)), flags, P4D_SHIFT); if (page) return page; return no_page_table(vma, flags); } |
080dbb618 mm/follow_page_ma... |
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 |
return follow_pud_mask(vma, address, p4d, flags, page_mask); } /** * follow_page_mask - look up a page descriptor from a user-virtual address * @vma: vm_area_struct mapping @address * @address: virtual address to look up * @flags: flags modifying lookup behaviour * @page_mask: on output, *page_mask is set according to the size of the page * * @flags can have FOLL_ flags set, defined in <linux/mm.h> * * Returns the mapped (struct page *), %NULL if no mapping exists, or * an error pointer if there is a mapping to something not represented * by a page descriptor (see also vm_normal_page()). */ struct page *follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned int *page_mask) { pgd_t *pgd; struct page *page; struct mm_struct *mm = vma->vm_mm; *page_mask = 0; /* make this handle hugepd */ page = follow_huge_addr(mm, address, flags & FOLL_WRITE); if (!IS_ERR(page)) { BUG_ON(flags & FOLL_GET); return page; } pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) return no_page_table(vma, flags); |
faaa5b62d mm/follow_page_ma... |
388 389 390 391 392 393 |
if (pgd_huge(*pgd)) { page = follow_huge_pgd(mm, address, pgd, flags); if (page) return page; return no_page_table(vma, flags); } |
4dc71451a mm/follow_page_ma... |
394 395 396 397 398 399 400 401 |
if (is_hugepd(__hugepd(pgd_val(*pgd)))) { page = follow_huge_pd(vma, address, __hugepd(pgd_val(*pgd)), flags, PGDIR_SHIFT); if (page) return page; return no_page_table(vma, flags); } |
faaa5b62d mm/follow_page_ma... |
402 |
|
080dbb618 mm/follow_page_ma... |
403 404 |
return follow_p4d_mask(vma, address, pgd, flags, page_mask); } |
f2b495ca8 mm: extract in_ga... |
405 406 407 408 409 |
static int get_gate_page(struct mm_struct *mm, unsigned long address, unsigned int gup_flags, struct vm_area_struct **vma, struct page **page) { pgd_t *pgd; |
c2febafc6 mm: convert gener... |
410 |
p4d_t *p4d; |
f2b495ca8 mm: extract in_ga... |
411 412 413 414 415 416 417 418 419 420 421 422 423 |
pud_t *pud; pmd_t *pmd; pte_t *pte; int ret = -EFAULT; /* user gate pages are read-only */ if (gup_flags & FOLL_WRITE) return -EFAULT; if (address > TASK_SIZE) pgd = pgd_offset_k(address); else pgd = pgd_offset_gate(mm, address); BUG_ON(pgd_none(*pgd)); |
c2febafc6 mm: convert gener... |
424 425 426 |
p4d = p4d_offset(pgd, address); BUG_ON(p4d_none(*p4d)); pud = pud_offset(p4d, address); |
f2b495ca8 mm: extract in_ga... |
427 428 |
BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, address); |
84c3fc4e9 mm: thp: check pm... |
429 |
if (!pmd_present(*pmd)) |
f2b495ca8 mm: extract in_ga... |
430 431 432 433 434 435 436 437 438 439 440 441 442 |
return -EFAULT; VM_BUG_ON(pmd_trans_huge(*pmd)); pte = pte_offset_map(pmd, address); if (pte_none(*pte)) goto unmap; *vma = get_gate_vma(mm); if (!page) goto out; *page = vm_normal_page(*vma, address, *pte); if (!*page) { if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) goto unmap; *page = pte_page(*pte); |
df6ad6983 mm/device-public-... |
443 444 445 446 447 448 449 |
/* * This should never happen (a device public page in the gate * area). */ if (is_device_public_page(*page)) goto unmap; |
f2b495ca8 mm: extract in_ga... |
450 451 452 453 454 455 456 457 |
} get_page(*page); out: ret = 0; unmap: pte_unmap(pte); return ret; } |
9a95f3cf7 mm: describe mmap... |
458 459 460 461 462 |
/* * mmap_sem must be held on entry. If @nonblocking != NULL and * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released. * If it is, *@nonblocking will be set to 0 and -EBUSY returned. */ |
167444834 mm: extract code ... |
463 464 465 |
static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, unsigned long address, unsigned int *flags, int *nonblocking) { |
167444834 mm: extract code ... |
466 467 |
unsigned int fault_flags = 0; int ret; |
de60f5f10 mm: introduce VM_... |
468 469 470 |
/* mlock all present pages, but do not fault in new pages */ if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) return -ENOENT; |
167444834 mm: extract code ... |
471 472 |
if (*flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; |
1b2ee1266 mm/core: Do not e... |
473 474 |
if (*flags & FOLL_REMOTE) fault_flags |= FAULT_FLAG_REMOTE; |
167444834 mm: extract code ... |
475 476 477 478 |
if (nonblocking) fault_flags |= FAULT_FLAG_ALLOW_RETRY; if (*flags & FOLL_NOWAIT) fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; |
234b239be kvm: Faults which... |
479 480 481 482 |
if (*flags & FOLL_TRIED) { VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY); fault_flags |= FAULT_FLAG_TRIED; } |
167444834 mm: extract code ... |
483 |
|
dcddffd41 mm: do not pass m... |
484 |
ret = handle_mm_fault(vma, address, fault_flags); |
167444834 mm: extract code ... |
485 |
if (ret & VM_FAULT_ERROR) { |
9a291a7c9 mm/hugetlb: repor... |
486 487 488 489 |
int err = vm_fault_to_errno(ret, *flags); if (err) return err; |
167444834 mm: extract code ... |
490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 |
BUG(); } if (tsk) { if (ret & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; } if (ret & VM_FAULT_RETRY) { if (nonblocking) *nonblocking = 0; return -EBUSY; } /* * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when * necessary, even if maybe_mkwrite decided not to set pte_write. We * can thus safely do subsequent page lookups as if they were reads. * But only do so when looping for pte_write is futile: in some cases * userspace may also be wanting to write to the gotten user page, * which a read fault here might prevent (a readonly page might get * reCOWed by userspace write). */ if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) |
19be0eaff mm: remove gup_fl... |
516 |
*flags |= FOLL_COW; |
167444834 mm: extract code ... |
517 518 |
return 0; } |
fa5bb2093 mm: cleanup __get... |
519 520 521 |
static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) { vm_flags_t vm_flags = vma->vm_flags; |
1b2ee1266 mm/core: Do not e... |
522 523 |
int write = (gup_flags & FOLL_WRITE); int foreign = (gup_flags & FOLL_REMOTE); |
fa5bb2093 mm: cleanup __get... |
524 525 526 |
if (vm_flags & (VM_IO | VM_PFNMAP)) return -EFAULT; |
5c9a9508d proc: do not acce... |
527 528 |
if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma)) return -EFAULT; |
1b2ee1266 mm/core: Do not e... |
529 |
if (write) { |
fa5bb2093 mm: cleanup __get... |
530 531 532 533 534 535 536 537 538 539 540 541 |
if (!(vm_flags & VM_WRITE)) { if (!(gup_flags & FOLL_FORCE)) return -EFAULT; /* * We used to let the write,force case do COW in a * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could * set a breakpoint in a read-only mapping of an * executable, without corrupting the file (yet only * when that file had been opened for writing!). * Anon pages in shared mappings are surprising: now * just reject it. */ |
464353647 mm: retire GUP WA... |
542 |
if (!is_cow_mapping(vm_flags)) |
fa5bb2093 mm: cleanup __get... |
543 |
return -EFAULT; |
fa5bb2093 mm: cleanup __get... |
544 545 546 547 548 549 550 551 552 553 554 |
} } else if (!(vm_flags & VM_READ)) { if (!(gup_flags & FOLL_FORCE)) return -EFAULT; /* * Is there actually any vma we can reach here which does not * have VM_MAYREAD set? */ if (!(vm_flags & VM_MAYREAD)) return -EFAULT; } |
d61172b4b mm/core, x86/mm/p... |
555 556 557 558 559 |
/* * gups are always data accesses, not instruction * fetches, so execute=false here */ if (!arch_vma_access_permitted(vma, write, false, foreign)) |
33a709b25 mm/gup, x86/mm/pk... |
560 |
return -EFAULT; |
fa5bb2093 mm: cleanup __get... |
561 562 |
return 0; } |
4bbd4c776 mm: move get_user... |
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 |
/** * __get_user_pages() - pin user pages in memory * @tsk: task_struct of target task * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying pin behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. * @vmas: array of pointers to vmas corresponding to each page. * Or NULL if the caller does not require them. * @nonblocking: whether waiting for disk IO or mmap_sem contention * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. Each page returned must be released * with a put_page() call when it is finished with. vmas will only * remain valid while mmap_sem is held. * |
9a95f3cf7 mm: describe mmap... |
583 |
* Must be called with mmap_sem held. It may be released. See below. |
4bbd4c776 mm: move get_user... |
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 |
* * __get_user_pages walks a process's page tables and takes a reference to * each struct page that each user address corresponds to at a given * instant. That is, it takes the page that would be accessed if a user * thread accesses the given user virtual address at that instant. * * This does not guarantee that the page exists in the user mappings when * __get_user_pages returns, and there may even be a completely different * page there in some cases (eg. if mmapped pagecache has been invalidated * and subsequently re faulted). However it does guarantee that the page * won't be freed completely. And mostly callers simply care that the page * contains data that was valid *at some point in time*. Typically, an IO * or similar operation cannot guarantee anything stronger anyway because * locks can't be held over the syscall boundary. * * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If * the page is written to, set_page_dirty (or set_page_dirty_lock, as * appropriate) must be called after the page is finished with, and * before put_page is called. * * If @nonblocking != NULL, __get_user_pages will not wait for disk IO * or mmap_sem contention, and if waiting is needed to pin all pages, |
9a95f3cf7 mm: describe mmap... |
606 607 608 609 610 611 612 613 |
* *@nonblocking will be set to 0. Further, if @gup_flags does not * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in * this case. * * A caller using such a combination of @nonblocking and @gup_flags * must therefore hold the mmap_sem for reading only, and recognize * when it's been released. Otherwise, it must be held for either * reading or writing and will not be released. |
4bbd4c776 mm: move get_user... |
614 615 616 617 618 |
* * In most cases, get_user_pages or get_user_pages_fast should be used * instead of __get_user_pages. __get_user_pages should be used only if * you need some special @gup_flags. */ |
0d7317598 mm: unexport __ge... |
619 |
static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
4bbd4c776 mm: move get_user... |
620 621 622 623 |
unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking) { |
fa5bb2093 mm: cleanup __get... |
624 |
long i = 0; |
4bbd4c776 mm: move get_user... |
625 |
unsigned int page_mask; |
fa5bb2093 mm: cleanup __get... |
626 |
struct vm_area_struct *vma = NULL; |
4bbd4c776 mm: move get_user... |
627 628 629 630 631 632 633 634 635 636 637 638 639 |
if (!nr_pages) return 0; VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); /* * If FOLL_FORCE is set then do not force a full fault as the hinting * fault information is unrelated to the reference behaviour of a task * using the address space */ if (!(gup_flags & FOLL_FORCE)) gup_flags |= FOLL_NUMA; |
4bbd4c776 mm: move get_user... |
640 |
do { |
fa5bb2093 mm: cleanup __get... |
641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 |
struct page *page; unsigned int foll_flags = gup_flags; unsigned int page_increm; /* first iteration or cross vma bound */ if (!vma || start >= vma->vm_end) { vma = find_extend_vma(mm, start); if (!vma && in_gate_area(mm, start)) { int ret; ret = get_gate_page(mm, start & PAGE_MASK, gup_flags, &vma, pages ? &pages[i] : NULL); if (ret) return i ? : ret; page_mask = 0; goto next_page; } |
4bbd4c776 mm: move get_user... |
658 |
|
fa5bb2093 mm: cleanup __get... |
659 660 661 662 663 |
if (!vma || check_vma_flags(vma, gup_flags)) return i ? : -EFAULT; if (is_vm_hugetlb_page(vma)) { i = follow_hugetlb_page(mm, vma, pages, vmas, &start, &nr_pages, i, |
87ffc118b userfaultfd: huge... |
664 |
gup_flags, nonblocking); |
fa5bb2093 mm: cleanup __get... |
665 |
continue; |
4bbd4c776 mm: move get_user... |
666 |
} |
fa5bb2093 mm: cleanup __get... |
667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 |
} retry: /* * If we have a pending SIGKILL, don't keep faulting pages and * potentially allocating memory. */ if (unlikely(fatal_signal_pending(current))) return i ? i : -ERESTARTSYS; cond_resched(); page = follow_page_mask(vma, start, foll_flags, &page_mask); if (!page) { int ret; ret = faultin_page(tsk, vma, start, &foll_flags, nonblocking); switch (ret) { case 0: goto retry; case -EFAULT: case -ENOMEM: case -EHWPOISON: return i ? i : ret; case -EBUSY: return i; case -ENOENT: goto next_page; |
4bbd4c776 mm: move get_user... |
692 |
} |
fa5bb2093 mm: cleanup __get... |
693 |
BUG(); |
1027e4436 mm: make GUP hand... |
694 695 696 697 698 699 700 |
} else if (PTR_ERR(page) == -EEXIST) { /* * Proper page table entry exists, but no corresponding * struct page. */ goto next_page; } else if (IS_ERR(page)) { |
fa5bb2093 mm: cleanup __get... |
701 |
return i ? i : PTR_ERR(page); |
1027e4436 mm: make GUP hand... |
702 |
} |
fa5bb2093 mm: cleanup __get... |
703 704 705 706 707 |
if (pages) { pages[i] = page; flush_anon_page(vma, page, start); flush_dcache_page(page); page_mask = 0; |
4bbd4c776 mm: move get_user... |
708 |
} |
4bbd4c776 mm: move get_user... |
709 |
next_page: |
fa5bb2093 mm: cleanup __get... |
710 711 712 713 714 715 716 717 718 719 |
if (vmas) { vmas[i] = vma; page_mask = 0; } page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask); if (page_increm > nr_pages) page_increm = nr_pages; i += page_increm; start += page_increm * PAGE_SIZE; nr_pages -= page_increm; |
4bbd4c776 mm: move get_user... |
720 721 |
} while (nr_pages); return i; |
4bbd4c776 mm: move get_user... |
722 |
} |
4bbd4c776 mm: move get_user... |
723 |
|
771ab4302 mm/gup.c: make un... |
724 725 |
static bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags) |
d4925e00d mm/gup: Factor ou... |
726 |
{ |
1b2ee1266 mm/core: Do not e... |
727 728 |
bool write = !!(fault_flags & FAULT_FLAG_WRITE); bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); |
33a709b25 mm/gup, x86/mm/pk... |
729 |
vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; |
d4925e00d mm/gup: Factor ou... |
730 731 732 |
if (!(vm_flags & vma->vm_flags)) return false; |
33a709b25 mm/gup, x86/mm/pk... |
733 734 |
/* * The architecture might have a hardware protection |
1b2ee1266 mm/core: Do not e... |
735 |
* mechanism other than read/write that can deny access. |
d61172b4b mm/core, x86/mm/p... |
736 737 738 |
* * gup always represents data access, not instruction * fetches, so execute=false here: |
33a709b25 mm/gup, x86/mm/pk... |
739 |
*/ |
d61172b4b mm/core, x86/mm/p... |
740 |
if (!arch_vma_access_permitted(vma, write, false, foreign)) |
33a709b25 mm/gup, x86/mm/pk... |
741 |
return false; |
d4925e00d mm/gup: Factor ou... |
742 743 |
return true; } |
4bbd4c776 mm: move get_user... |
744 745 746 747 748 749 750 |
/* * fixup_user_fault() - manually resolve a user page fault * @tsk: the task_struct to use for page fault accounting, or * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @address: user address * @fault_flags:flags to pass down to handle_mm_fault() |
4a9e1cda2 mm: bring in addi... |
751 752 |
* @unlocked: did we unlock the mmap_sem while retrying, maybe NULL if caller * does not allow retry |
4bbd4c776 mm: move get_user... |
753 754 755 756 757 758 759 760 761 762 763 |
* * This is meant to be called in the specific scenario where for locking reasons * we try to access user memory in atomic context (within a pagefault_disable() * section), this returns -EFAULT, and we want to resolve the user fault before * trying again. * * Typically this is meant to be used by the futex code. * * The main difference with get_user_pages() is that this function will * unconditionally call handle_mm_fault() which will in turn perform all the * necessary SW fixup of the dirty and young bits in the PTE, while |
4a9e1cda2 mm: bring in addi... |
764 |
* get_user_pages() only guarantees to update these in the struct page. |
4bbd4c776 mm: move get_user... |
765 766 767 768 769 770 |
* * This is important for some architectures where those bits also gate the * access permission to the page because they are maintained in software. On * such architectures, gup() will not be enough to make a subsequent access * succeed. * |
4a9e1cda2 mm: bring in addi... |
771 772 |
* This function will not return with an unlocked mmap_sem. So it has not the * same semantics wrt the @mm->mmap_sem as does filemap_fault(). |
4bbd4c776 mm: move get_user... |
773 774 |
*/ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, |
4a9e1cda2 mm: bring in addi... |
775 776 |
unsigned long address, unsigned int fault_flags, bool *unlocked) |
4bbd4c776 mm: move get_user... |
777 778 |
{ struct vm_area_struct *vma; |
4a9e1cda2 mm: bring in addi... |
779 780 781 782 |
int ret, major = 0; if (unlocked) fault_flags |= FAULT_FLAG_ALLOW_RETRY; |
4bbd4c776 mm: move get_user... |
783 |
|
4a9e1cda2 mm: bring in addi... |
784 |
retry: |
4bbd4c776 mm: move get_user... |
785 786 787 |
vma = find_extend_vma(mm, address); if (!vma || address < vma->vm_start) return -EFAULT; |
d4925e00d mm/gup: Factor ou... |
788 |
if (!vma_permits_fault(vma, fault_flags)) |
4bbd4c776 mm: move get_user... |
789 |
return -EFAULT; |
dcddffd41 mm: do not pass m... |
790 |
ret = handle_mm_fault(vma, address, fault_flags); |
4a9e1cda2 mm: bring in addi... |
791 |
major |= ret & VM_FAULT_MAJOR; |
4bbd4c776 mm: move get_user... |
792 |
if (ret & VM_FAULT_ERROR) { |
9a291a7c9 mm/hugetlb: repor... |
793 794 795 796 |
int err = vm_fault_to_errno(ret, 0); if (err) return err; |
4bbd4c776 mm: move get_user... |
797 798 |
BUG(); } |
4a9e1cda2 mm: bring in addi... |
799 800 801 802 803 804 805 806 807 808 |
if (ret & VM_FAULT_RETRY) { down_read(&mm->mmap_sem); if (!(fault_flags & FAULT_FLAG_TRIED)) { *unlocked = true; fault_flags &= ~FAULT_FLAG_ALLOW_RETRY; fault_flags |= FAULT_FLAG_TRIED; goto retry; } } |
4bbd4c776 mm: move get_user... |
809 |
if (tsk) { |
4a9e1cda2 mm: bring in addi... |
810 |
if (major) |
4bbd4c776 mm: move get_user... |
811 812 813 814 815 816 |
tsk->maj_flt++; else tsk->min_flt++; } return 0; } |
add6a0cd1 KVM: MMU: try to ... |
817 |
EXPORT_SYMBOL_GPL(fixup_user_fault); |
4bbd4c776 mm: move get_user... |
818 |
|
f0818f472 mm: gup: add get_... |
819 820 821 822 |
static __always_inline long __get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, |
f0818f472 mm: gup: add get_... |
823 824 |
struct page **pages, struct vm_area_struct **vmas, |
0fd71a56f mm: gup: add __ge... |
825 826 |
int *locked, bool notify_drop, unsigned int flags) |
f0818f472 mm: gup: add get_... |
827 |
{ |
f0818f472 mm: gup: add get_... |
828 829 830 831 832 833 834 835 836 837 838 839 |
long ret, pages_done; bool lock_dropped; if (locked) { /* if VM_FAULT_RETRY can be returned, vmas become invalid */ BUG_ON(vmas); /* check caller initialized locked */ BUG_ON(*locked != 1); } if (pages) flags |= FOLL_GET; |
f0818f472 mm: gup: add get_... |
840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 |
pages_done = 0; lock_dropped = false; for (;;) { ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, locked); if (!locked) /* VM_FAULT_RETRY couldn't trigger, bypass */ return ret; /* VM_FAULT_RETRY cannot return errors */ if (!*locked) { BUG_ON(ret < 0); BUG_ON(ret >= nr_pages); } if (!pages) /* If it's a prefault don't insist harder */ return ret; if (ret > 0) { nr_pages -= ret; pages_done += ret; if (!nr_pages) break; } if (*locked) { /* VM_FAULT_RETRY didn't trigger */ if (!pages_done) pages_done = ret; break; } /* VM_FAULT_RETRY triggered, so seek to the faulting offset */ pages += ret; start += ret << PAGE_SHIFT; /* * Repeat on the address that fired VM_FAULT_RETRY * without FAULT_FLAG_ALLOW_RETRY but with * FAULT_FLAG_TRIED. */ *locked = 1; lock_dropped = true; down_read(&mm->mmap_sem); ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, pages, NULL, NULL); if (ret != 1) { BUG_ON(ret > 1); if (!pages_done) pages_done = ret; break; } nr_pages--; pages_done++; if (!nr_pages) break; pages++; start += PAGE_SIZE; } if (notify_drop && lock_dropped && *locked) { /* * We must let the caller know we temporarily dropped the lock * and so the critical section protected by it was lost. */ up_read(&mm->mmap_sem); *locked = 0; } return pages_done; } /* * We can leverage the VM_FAULT_RETRY functionality in the page fault * paths better by using either get_user_pages_locked() or * get_user_pages_unlocked(). * * get_user_pages_locked() is suitable to replace the form: * * down_read(&mm->mmap_sem); * do_something() * get_user_pages(tsk, mm, ..., pages, NULL); * up_read(&mm->mmap_sem); * * to: * * int locked = 1; * down_read(&mm->mmap_sem); * do_something() * get_user_pages_locked(tsk, mm, ..., pages, &locked); * if (locked) * up_read(&mm->mmap_sem); */ |
c12d2da56 mm/gup: Remove th... |
931 |
long get_user_pages_locked(unsigned long start, unsigned long nr_pages, |
3b913179c mm: replace get_u... |
932 |
unsigned int gup_flags, struct page **pages, |
f0818f472 mm: gup: add get_... |
933 934 |
int *locked) { |
cde70140f mm/gup: Overload ... |
935 |
return __get_user_pages_locked(current, current->mm, start, nr_pages, |
3b913179c mm: replace get_u... |
936 937 |
pages, NULL, locked, true, gup_flags | FOLL_TOUCH); |
f0818f472 mm: gup: add get_... |
938 |
} |
c12d2da56 mm/gup: Remove th... |
939 |
EXPORT_SYMBOL(get_user_pages_locked); |
f0818f472 mm: gup: add get_... |
940 941 |
/* |
80a795162 mm: fix up get_us... |
942 943 |
* Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows for * tsk, mm to be specified. |
0fd71a56f mm: gup: add __ge... |
944 945 |
* * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the |
80a795162 mm: fix up get_us... |
946 947 |
* caller if required (just like with __get_user_pages). "FOLL_GET" * is set implicitly if "pages" is non-NULL. |
0fd71a56f mm: gup: add __ge... |
948 |
*/ |
8b7457ef9 mm: unexport __ge... |
949 950 951 952 |
static __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags) |
0fd71a56f mm: gup: add __ge... |
953 954 955 |
{ long ret; int locked = 1; |
859110d74 mm: remove write/... |
956 |
|
0fd71a56f mm: gup: add __ge... |
957 |
down_read(&mm->mmap_sem); |
859110d74 mm: remove write/... |
958 959 |
ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL, &locked, false, gup_flags); |
0fd71a56f mm: gup: add __ge... |
960 961 962 963 |
if (locked) up_read(&mm->mmap_sem); return ret; } |
0fd71a56f mm: gup: add __ge... |
964 965 |
/* |
f0818f472 mm: gup: add get_... |
966 967 968 969 970 971 972 973 974 975 976 |
* get_user_pages_unlocked() is suitable to replace the form: * * down_read(&mm->mmap_sem); * get_user_pages(tsk, mm, ..., pages, NULL); * up_read(&mm->mmap_sem); * * with: * * get_user_pages_unlocked(tsk, mm, ..., pages); * * It is functionally equivalent to get_user_pages_fast so |
80a795162 mm: fix up get_us... |
977 978 |
* get_user_pages_fast should be used instead if specific gup_flags * (e.g. FOLL_FORCE) are not required. |
f0818f472 mm: gup: add get_... |
979 |
*/ |
c12d2da56 mm/gup: Remove th... |
980 |
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, |
c164154f6 mm: replace get_u... |
981 |
struct page **pages, unsigned int gup_flags) |
f0818f472 mm: gup: add get_... |
982 |
{ |
cde70140f mm/gup: Overload ... |
983 |
return __get_user_pages_unlocked(current, current->mm, start, nr_pages, |
c164154f6 mm: replace get_u... |
984 |
pages, gup_flags | FOLL_TOUCH); |
f0818f472 mm: gup: add get_... |
985 |
} |
c12d2da56 mm/gup: Remove th... |
986 |
EXPORT_SYMBOL(get_user_pages_unlocked); |
f0818f472 mm: gup: add get_... |
987 |
|
4bbd4c776 mm: move get_user... |
988 |
/* |
1e9877902 mm/gup: Introduce... |
989 |
* get_user_pages_remote() - pin user pages in memory |
4bbd4c776 mm: move get_user... |
990 991 992 993 994 |
* @tsk: the task_struct to use for page fault accounting, or * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin |
9beae1ea8 mm: replace get_u... |
995 |
* @gup_flags: flags modifying lookup behaviour |
4bbd4c776 mm: move get_user... |
996 997 998 999 1000 |
* @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. * @vmas: array of pointers to vmas corresponding to each page. * Or NULL if the caller does not require them. |
5b56d49fc mm: add locked pa... |
1001 1002 1003 |
* @locked: pointer to lock flag indicating whether lock is held and * subsequently whether VM_FAULT_RETRY functionality can be * utilised. Lock must initially be held. |
4bbd4c776 mm: move get_user... |
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 |
* * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. Each page returned must be released * with a put_page() call when it is finished with. vmas will only * remain valid while mmap_sem is held. * * Must be called with mmap_sem held for read or write. * * get_user_pages walks a process's page tables and takes a reference to * each struct page that each user address corresponds to at a given * instant. That is, it takes the page that would be accessed if a user * thread accesses the given user virtual address at that instant. * * This does not guarantee that the page exists in the user mappings when * get_user_pages returns, and there may even be a completely different * page there in some cases (eg. if mmapped pagecache has been invalidated * and subsequently re faulted). However it does guarantee that the page * won't be freed completely. And mostly callers simply care that the page * contains data that was valid *at some point in time*. Typically, an IO * or similar operation cannot guarantee anything stronger anyway because * locks can't be held over the syscall boundary. * |
9beae1ea8 mm: replace get_u... |
1027 1028 1029 |
* If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must * be called after the page is finished with, and before put_page is called. |
4bbd4c776 mm: move get_user... |
1030 1031 1032 1033 1034 1035 1036 1037 |
* * get_user_pages is typically used for fewer-copy IO operations, to get a * handle on the memory by some means other than accesses via the user virtual * addresses. The pages may be submitted for DMA to devices or accessed via * their kernel linear mapping (via the kmap APIs). Care should be taken to * use the correct cache flushing APIs. * * See also get_user_pages_fast, for performance critical applications. |
f0818f472 mm: gup: add get_... |
1038 1039 1040 1041 1042 |
* * get_user_pages should be phased out in favor of * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing * should use get_user_pages because it cannot pass * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. |
4bbd4c776 mm: move get_user... |
1043 |
*/ |
1e9877902 mm/gup: Introduce... |
1044 1045 |
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, |
9beae1ea8 mm: replace get_u... |
1046 |
unsigned int gup_flags, struct page **pages, |
5b56d49fc mm: add locked pa... |
1047 |
struct vm_area_struct **vmas, int *locked) |
4bbd4c776 mm: move get_user... |
1048 |
{ |
859110d74 mm: remove write/... |
1049 |
return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, |
5b56d49fc mm: add locked pa... |
1050 |
locked, true, |
9beae1ea8 mm: replace get_u... |
1051 |
gup_flags | FOLL_TOUCH | FOLL_REMOTE); |
1e9877902 mm/gup: Introduce... |
1052 1053 1054 1055 |
} EXPORT_SYMBOL(get_user_pages_remote); /* |
d4edcf0d5 mm/gup: Switch al... |
1056 1057 |
* This is the same as get_user_pages_remote(), just with a * less-flexible calling convention where we assume that the task |
5b56d49fc mm: add locked pa... |
1058 1059 1060 |
* and mm being operated on are the current task's and don't allow * passing of a locked parameter. We also obviously don't pass * FOLL_REMOTE in here. |
1e9877902 mm/gup: Introduce... |
1061 |
*/ |
c12d2da56 mm/gup: Remove th... |
1062 |
long get_user_pages(unsigned long start, unsigned long nr_pages, |
768ae309a mm: replace get_u... |
1063 |
unsigned int gup_flags, struct page **pages, |
1e9877902 mm/gup: Introduce... |
1064 1065 |
struct vm_area_struct **vmas) { |
cde70140f mm/gup: Overload ... |
1066 |
return __get_user_pages_locked(current, current->mm, start, nr_pages, |
768ae309a mm: replace get_u... |
1067 1068 |
pages, vmas, NULL, false, gup_flags | FOLL_TOUCH); |
4bbd4c776 mm: move get_user... |
1069 |
} |
c12d2da56 mm/gup: Remove th... |
1070 |
EXPORT_SYMBOL(get_user_pages); |
4bbd4c776 mm: move get_user... |
1071 |
|
40aa9d299 mm: introduce get... |
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 |
#ifdef CONFIG_FS_DAX /* * This is the same as get_user_pages() in that it assumes we are * operating on the current task's mm, but it goes further to validate * that the vmas associated with the address range are suitable for * longterm elevated page reference counts. For example, filesystem-dax * mappings are subject to the lifetime enforced by the filesystem and * we need guarantees that longterm users like RDMA and V4L2 only * establish mappings that have a kernel enforced revocation mechanism. * * "longterm" == userspace controlled elevated page count lifetime. * Contrast this to iov_iter_get_pages() usages which are transient. */ long get_user_pages_longterm(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas_arg) { struct vm_area_struct **vmas = vmas_arg; struct vm_area_struct *vma_prev = NULL; long rc, i; if (!pages) return -EINVAL; if (!vmas) { vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *), GFP_KERNEL); if (!vmas) return -ENOMEM; } rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas); for (i = 0; i < rc; i++) { struct vm_area_struct *vma = vmas[i]; if (vma == vma_prev) continue; vma_prev = vma; if (vma_is_fsdax(vma)) break; } /* * Either get_user_pages() failed, or the vma validation * succeeded, in either case we don't need to put_page() before * returning. */ if (i >= rc) goto out; for (i = 0; i < rc; i++) put_page(pages[i]); rc = -EOPNOTSUPP; out: if (vmas != vmas_arg) kfree(vmas); return rc; } EXPORT_SYMBOL(get_user_pages_longterm); #endif /* CONFIG_FS_DAX */ |
4bbd4c776 mm: move get_user... |
1135 |
/** |
acc3c8d15 mm: move mm_popul... |
1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 |
* populate_vma_page_range() - populate a range of pages in the vma. * @vma: target vma * @start: start address * @end: end address * @nonblocking: * * This takes care of mlocking the pages too if VM_LOCKED is set. * * return 0 on success, negative error code on error. * * vma->vm_mm->mmap_sem must be held. * * If @nonblocking is NULL, it may be held for read or write and will * be unperturbed. * * If @nonblocking is non-NULL, it must held for read only and may be * released. If it's released, *@nonblocking will be set to 0. */ long populate_vma_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, int *nonblocking) { struct mm_struct *mm = vma->vm_mm; unsigned long nr_pages = (end - start) / PAGE_SIZE; int gup_flags; VM_BUG_ON(start & ~PAGE_MASK); VM_BUG_ON(end & ~PAGE_MASK); VM_BUG_ON_VMA(start < vma->vm_start, vma); VM_BUG_ON_VMA(end > vma->vm_end, vma); VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); |
de60f5f10 mm: introduce VM_... |
1166 1167 1168 |
gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK; if (vma->vm_flags & VM_LOCKONFAULT) gup_flags &= ~FOLL_POPULATE; |
acc3c8d15 mm: move mm_popul... |
1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 |
/* * We want to touch writable mappings with a write fault in order * to break COW, except for shared mappings because these don't COW * and we would not want to dirty them for nothing. */ if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) gup_flags |= FOLL_WRITE; /* * We want mlock to succeed for regions that have any permissions * other than PROT_NONE. */ if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) gup_flags |= FOLL_FORCE; /* * We made sure addr is within a VMA, so the following will * not result in a stack expansion that recurses back here. */ return __get_user_pages(current, mm, start, nr_pages, gup_flags, NULL, NULL, nonblocking); } /* * __mm_populate - populate and/or mlock pages within a range of address space. * * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap * flags. VMAs must be already marked with the desired vm_flags, and * mmap_sem must not be held. */ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) { struct mm_struct *mm = current->mm; unsigned long end, nstart, nend; struct vm_area_struct *vma = NULL; int locked = 0; long ret = 0; |
acc3c8d15 mm: move mm_popul... |
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 |
end = start + len; for (nstart = start; nstart < end; nstart = nend) { /* * We want to fault in pages for [nstart; end) address range. * Find first corresponding VMA. */ if (!locked) { locked = 1; down_read(&mm->mmap_sem); vma = find_vma(mm, nstart); } else if (nstart >= vma->vm_end) vma = vma->vm_next; if (!vma || vma->vm_start >= end) break; /* * Set [nstart; nend) to intersection of desired address * range with the first VMA. Also, skip undesirable VMA types. */ nend = min(end, vma->vm_end); if (vma->vm_flags & (VM_IO | VM_PFNMAP)) continue; if (nstart < vma->vm_start) nstart = vma->vm_start; /* * Now fault in a range of pages. populate_vma_page_range() * double checks the vma flags, so that it won't mlock pages * if the vma was already munlocked. */ ret = populate_vma_page_range(vma, nstart, nend, &locked); if (ret < 0) { if (ignore_errors) { ret = 0; continue; /* continue at next VMA */ } break; } nend = nstart + ret * PAGE_SIZE; ret = 0; } if (locked) up_read(&mm->mmap_sem); return ret; /* 0 or negative error code */ } /** |
4bbd4c776 mm: move get_user... |
1252 1253 1254 1255 |
* get_dump_page() - pin user page in memory while writing it to core dump * @addr: user address * * Returns struct page pointer of user page pinned for dump, |
ea1754a08 mm, fs: remove re... |
1256 |
* to be freed afterwards by put_page(). |
4bbd4c776 mm: move get_user... |
1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 |
* * Returns NULL on any kind of failure - a hole must then be inserted into * the corefile, to preserve alignment with its headers; and also returns * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - * allowing a hole to be left in the corefile to save diskspace. * * Called without mmap_sem, but after all other threads have been killed. */ #ifdef CONFIG_ELF_CORE struct page *get_dump_page(unsigned long addr) { struct vm_area_struct *vma; struct page *page; if (__get_user_pages(current, current->mm, addr, 1, FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, NULL) < 1) return NULL; flush_cache_page(vma, addr, page_to_pfn(page)); return page; } #endif /* CONFIG_ELF_CORE */ |
2667f50e8 mm: introduce a g... |
1279 1280 |
/* |
e585513b7 x86/mm/gup: Switc... |
1281 |
* Generic Fast GUP |
2667f50e8 mm: introduce a g... |
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 |
* * get_user_pages_fast attempts to pin user pages by walking the page * tables directly and avoids taking locks. Thus the walker needs to be * protected from page table pages being freed from under it, and should * block any THP splits. * * One way to achieve this is to have the walker disable interrupts, and * rely on IPIs from the TLB flushing code blocking before the page table * pages are freed. This is unsuitable for architectures that do not need * to broadcast an IPI when invalidating TLBs. * * Another way to achieve this is to batch up page table containing pages * belonging to more than one mm_user, then rcu_sched a callback to free those * pages. Disabling interrupts will allow the fast_gup walker to both block * the rcu_sched callback, and an IPI that we broadcast for splitting THPs * (which is a relatively rare event). The code below adopts this strategy. * * Before activating this code, please be aware that the following assumptions * are currently made: * |
e585513b7 x86/mm/gup: Switc... |
1302 1303 |
* *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to * free pages containing page tables or TLB flushing requires IPI broadcast. |
2667f50e8 mm: introduce a g... |
1304 |
* |
2667f50e8 mm: introduce a g... |
1305 1306 1307 1308 1309 1310 1311 1312 |
* *) ptes can be read atomically by the architecture. * * *) access_ok is sufficient to validate userspace address ranges. * * The last two assumptions can be relaxed by the addition of helper functions. * * This code is based heavily on the PowerPC implementation by Nick Piggin. */ |
e585513b7 x86/mm/gup: Switc... |
1313 |
#ifdef CONFIG_HAVE_GENERIC_GUP |
2667f50e8 mm: introduce a g... |
1314 |
|
0005d20b2 mm/gup: Move page... |
1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 |
#ifndef gup_get_pte /* * We assume that the PTE can be read atomically. If this is not the case for * your architecture, please provide the helper. */ static inline pte_t gup_get_pte(pte_t *ptep) { return READ_ONCE(*ptep); } #endif |
b59f65fa0 mm/gup: Implement... |
1325 1326 1327 1328 1329 1330 1331 1332 1333 |
static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages) { while ((*nr) - nr_start) { struct page *page = pages[--(*nr)]; ClearPageReferenced(page); put_page(page); } } |
2667f50e8 mm: introduce a g... |
1334 1335 1336 1337 |
#ifdef __HAVE_ARCH_PTE_SPECIAL static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { |
b59f65fa0 mm/gup: Implement... |
1338 1339 |
struct dev_pagemap *pgmap = NULL; int nr_start = *nr, ret = 0; |
2667f50e8 mm: introduce a g... |
1340 |
pte_t *ptep, *ptem; |
2667f50e8 mm: introduce a g... |
1341 1342 1343 |
ptem = ptep = pte_offset_map(&pmd, addr); do { |
0005d20b2 mm/gup: Move page... |
1344 |
pte_t pte = gup_get_pte(ptep); |
7aef4172c mm: handle PTE-ma... |
1345 |
struct page *head, *page; |
2667f50e8 mm: introduce a g... |
1346 1347 1348 |
/* * Similar to the PMD case below, NUMA hinting must take slow |
8a0516ed8 mm: convert p[te|... |
1349 |
* path using the pte_protnone check. |
2667f50e8 mm: introduce a g... |
1350 |
*/ |
e7884f8ea mm/gup: Move perm... |
1351 1352 1353 1354 1355 |
if (pte_protnone(pte)) goto pte_unmap; if (!pte_access_permitted(pte, write)) goto pte_unmap; |
b59f65fa0 mm/gup: Implement... |
1356 1357 1358 1359 1360 1361 1362 |
if (pte_devmap(pte)) { pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); if (unlikely(!pgmap)) { undo_dev_pagemap(nr, nr_start, pages); goto pte_unmap; } } else if (pte_special(pte)) |
2667f50e8 mm: introduce a g... |
1363 1364 1365 1366 |
goto pte_unmap; VM_BUG_ON(!pfn_valid(pte_pfn(pte))); page = pte_page(pte); |
7aef4172c mm: handle PTE-ma... |
1367 |
head = compound_head(page); |
2667f50e8 mm: introduce a g... |
1368 |
|
7aef4172c mm: handle PTE-ma... |
1369 |
if (!page_cache_get_speculative(head)) |
2667f50e8 mm: introduce a g... |
1370 1371 1372 |
goto pte_unmap; if (unlikely(pte_val(pte) != pte_val(*ptep))) { |
7aef4172c mm: handle PTE-ma... |
1373 |
put_page(head); |
2667f50e8 mm: introduce a g... |
1374 1375 |
goto pte_unmap; } |
7aef4172c mm: handle PTE-ma... |
1376 |
VM_BUG_ON_PAGE(compound_head(page) != head, page); |
e93480537 mm/gup: Mark all ... |
1377 |
|
b59f65fa0 mm/gup: Implement... |
1378 |
put_dev_pagemap(pgmap); |
e93480537 mm/gup: Mark all ... |
1379 |
SetPageReferenced(page); |
2667f50e8 mm: introduce a g... |
1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 |
pages[*nr] = page; (*nr)++; } while (ptep++, addr += PAGE_SIZE, addr != end); ret = 1; pte_unmap: pte_unmap(ptem); return ret; } #else /* * If we can't determine whether or not a pte is special, then fail immediately * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not * to be special. * * For a futex to be placed on a THP tail page, get_futex_key requires a * __get_user_pages_fast implementation that can pin pages. Thus it's still * useful to have gup_huge_pmd even if we can't operate on ptes. */ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { return 0; } #endif /* __HAVE_ARCH_PTE_SPECIAL */ |
09180ca4b mm/gup: make __gu... |
1408 |
#if defined(__HAVE_ARCH_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) |
b59f65fa0 mm/gup: Implement... |
1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 |
static int __gup_device_huge(unsigned long pfn, unsigned long addr, unsigned long end, struct page **pages, int *nr) { int nr_start = *nr; struct dev_pagemap *pgmap = NULL; do { struct page *page = pfn_to_page(pfn); pgmap = get_dev_pagemap(pfn, pgmap); if (unlikely(!pgmap)) { undo_dev_pagemap(nr, nr_start, pages); return 0; } SetPageReferenced(page); pages[*nr] = page; get_page(page); put_dev_pagemap(pgmap); (*nr)++; pfn++; } while (addr += PAGE_SIZE, addr != end); return 1; } |
2d329968a mm: fix __gup_dev... |
1432 |
static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, |
b59f65fa0 mm/gup: Implement... |
1433 1434 1435 |
unsigned long end, struct page **pages, int *nr) { unsigned long fault_pfn; |
2d329968a mm: fix __gup_dev... |
1436 1437 1438 1439 1440 |
int nr_start = *nr; fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) return 0; |
b59f65fa0 mm/gup: Implement... |
1441 |
|
2d329968a mm: fix __gup_dev... |
1442 1443 1444 1445 1446 |
if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { undo_dev_pagemap(nr, nr_start, pages); return 0; } return 1; |
b59f65fa0 mm/gup: Implement... |
1447 |
} |
2d329968a mm: fix __gup_dev... |
1448 |
static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, |
b59f65fa0 mm/gup: Implement... |
1449 1450 1451 |
unsigned long end, struct page **pages, int *nr) { unsigned long fault_pfn; |
2d329968a mm: fix __gup_dev... |
1452 1453 1454 1455 1456 |
int nr_start = *nr; fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) return 0; |
b59f65fa0 mm/gup: Implement... |
1457 |
|
2d329968a mm: fix __gup_dev... |
1458 1459 1460 1461 1462 |
if (unlikely(pud_val(orig) != pud_val(*pudp))) { undo_dev_pagemap(nr, nr_start, pages); return 0; } return 1; |
b59f65fa0 mm/gup: Implement... |
1463 1464 |
} #else |
2d329968a mm: fix __gup_dev... |
1465 |
static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, |
b59f65fa0 mm/gup: Implement... |
1466 1467 1468 1469 1470 |
unsigned long end, struct page **pages, int *nr) { BUILD_BUG(); return 0; } |
2d329968a mm: fix __gup_dev... |
1471 |
static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, |
b59f65fa0 mm/gup: Implement... |
1472 1473 1474 1475 1476 1477 |
unsigned long end, struct page **pages, int *nr) { BUILD_BUG(); return 0; } #endif |
2667f50e8 mm: introduce a g... |
1478 1479 1480 |
static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { |
ddc58f27f mm: drop tail pag... |
1481 |
struct page *head, *page; |
2667f50e8 mm: introduce a g... |
1482 |
int refs; |
e7884f8ea mm/gup: Move perm... |
1483 |
if (!pmd_access_permitted(orig, write)) |
2667f50e8 mm: introduce a g... |
1484 |
return 0; |
b59f65fa0 mm/gup: Implement... |
1485 |
if (pmd_devmap(orig)) |
2d329968a mm: fix __gup_dev... |
1486 |
return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr); |
b59f65fa0 mm/gup: Implement... |
1487 |
|
2667f50e8 mm: introduce a g... |
1488 |
refs = 0; |
d63206ee3 mm, gup: ensure r... |
1489 |
page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); |
2667f50e8 mm: introduce a g... |
1490 |
do { |
2667f50e8 mm: introduce a g... |
1491 1492 1493 1494 1495 |
pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); |
d63206ee3 mm, gup: ensure r... |
1496 |
head = compound_head(pmd_page(orig)); |
2667f50e8 mm: introduce a g... |
1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 |
if (!page_cache_add_speculative(head, refs)) { *nr -= refs; return 0; } if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { *nr -= refs; while (refs--) put_page(head); return 0; } |
e93480537 mm/gup: Mark all ... |
1508 |
SetPageReferenced(head); |
2667f50e8 mm: introduce a g... |
1509 1510 1511 1512 1513 1514 |
return 1; } static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { |
ddc58f27f mm: drop tail pag... |
1515 |
struct page *head, *page; |
2667f50e8 mm: introduce a g... |
1516 |
int refs; |
e7884f8ea mm/gup: Move perm... |
1517 |
if (!pud_access_permitted(orig, write)) |
2667f50e8 mm: introduce a g... |
1518 |
return 0; |
b59f65fa0 mm/gup: Implement... |
1519 |
if (pud_devmap(orig)) |
2d329968a mm: fix __gup_dev... |
1520 |
return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr); |
b59f65fa0 mm/gup: Implement... |
1521 |
|
2667f50e8 mm: introduce a g... |
1522 |
refs = 0; |
d63206ee3 mm, gup: ensure r... |
1523 |
page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); |
2667f50e8 mm: introduce a g... |
1524 |
do { |
2667f50e8 mm: introduce a g... |
1525 1526 1527 1528 1529 |
pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); |
d63206ee3 mm, gup: ensure r... |
1530 |
head = compound_head(pud_page(orig)); |
2667f50e8 mm: introduce a g... |
1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 |
if (!page_cache_add_speculative(head, refs)) { *nr -= refs; return 0; } if (unlikely(pud_val(orig) != pud_val(*pudp))) { *nr -= refs; while (refs--) put_page(head); return 0; } |
e93480537 mm/gup: Mark all ... |
1542 |
SetPageReferenced(head); |
2667f50e8 mm: introduce a g... |
1543 1544 |
return 1; } |
f30c59e92 mm: Update generi... |
1545 1546 1547 1548 1549 |
static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { int refs; |
ddc58f27f mm: drop tail pag... |
1550 |
struct page *head, *page; |
f30c59e92 mm: Update generi... |
1551 |
|
e7884f8ea mm/gup: Move perm... |
1552 |
if (!pgd_access_permitted(orig, write)) |
f30c59e92 mm: Update generi... |
1553 |
return 0; |
b59f65fa0 mm/gup: Implement... |
1554 |
BUILD_BUG_ON(pgd_devmap(orig)); |
f30c59e92 mm: Update generi... |
1555 |
refs = 0; |
d63206ee3 mm, gup: ensure r... |
1556 |
page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT); |
f30c59e92 mm: Update generi... |
1557 |
do { |
f30c59e92 mm: Update generi... |
1558 1559 1560 1561 1562 |
pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); |
d63206ee3 mm, gup: ensure r... |
1563 |
head = compound_head(pgd_page(orig)); |
f30c59e92 mm: Update generi... |
1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 |
if (!page_cache_add_speculative(head, refs)) { *nr -= refs; return 0; } if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { *nr -= refs; while (refs--) put_page(head); return 0; } |
e93480537 mm/gup: Mark all ... |
1575 |
SetPageReferenced(head); |
f30c59e92 mm: Update generi... |
1576 1577 |
return 1; } |
2667f50e8 mm: introduce a g... |
1578 1579 1580 1581 1582 1583 1584 1585 |
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pmd_t *pmdp; pmdp = pmd_offset(&pud, addr); do { |
38c5ce936 mm/gup: Replace A... |
1586 |
pmd_t pmd = READ_ONCE(*pmdp); |
2667f50e8 mm: introduce a g... |
1587 1588 |
next = pmd_addr_end(addr, end); |
84c3fc4e9 mm: thp: check pm... |
1589 |
if (!pmd_present(pmd)) |
2667f50e8 mm: introduce a g... |
1590 1591 1592 1593 1594 1595 1596 1597 |
return 0; if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) { /* * NUMA hinting faults need to be handled in the GUP * slowpath for accounting purposes and so that they * can be serialised against THP migration. */ |
8a0516ed8 mm: convert p[te|... |
1598 |
if (pmd_protnone(pmd)) |
2667f50e8 mm: introduce a g... |
1599 1600 1601 1602 1603 |
return 0; if (!gup_huge_pmd(pmd, pmdp, addr, next, write, pages, nr)) return 0; |
f30c59e92 mm: Update generi... |
1604 1605 1606 1607 1608 1609 1610 1611 |
} else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { /* * architecture have different format for hugetlbfs * pmd format and THP pmd format */ if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, PMD_SHIFT, next, write, pages, nr)) return 0; |
2667f50e8 mm: introduce a g... |
1612 1613 1614 1615 1616 1617 |
} else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) return 0; } while (pmdp++, addr = next, addr != end); return 1; } |
c2febafc6 mm: convert gener... |
1618 |
static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, |
f30c59e92 mm: Update generi... |
1619 |
int write, struct page **pages, int *nr) |
2667f50e8 mm: introduce a g... |
1620 1621 1622 |
{ unsigned long next; pud_t *pudp; |
c2febafc6 mm: convert gener... |
1623 |
pudp = pud_offset(&p4d, addr); |
2667f50e8 mm: introduce a g... |
1624 |
do { |
e37c69827 mm: replace ACCES... |
1625 |
pud_t pud = READ_ONCE(*pudp); |
2667f50e8 mm: introduce a g... |
1626 1627 1628 1629 |
next = pud_addr_end(addr, end); if (pud_none(pud)) return 0; |
f30c59e92 mm: Update generi... |
1630 |
if (unlikely(pud_huge(pud))) { |
2667f50e8 mm: introduce a g... |
1631 |
if (!gup_huge_pud(pud, pudp, addr, next, write, |
f30c59e92 mm: Update generi... |
1632 1633 1634 1635 1636 |
pages, nr)) return 0; } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, PUD_SHIFT, next, write, pages, nr)) |
2667f50e8 mm: introduce a g... |
1637 1638 1639 1640 1641 1642 1643 |
return 0; } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) return 0; } while (pudp++, addr = next, addr != end); return 1; } |
c2febafc6 mm: convert gener... |
1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 |
static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; p4d_t *p4dp; p4dp = p4d_offset(&pgd, addr); do { p4d_t p4d = READ_ONCE(*p4dp); next = p4d_addr_end(addr, end); if (p4d_none(p4d)) return 0; BUILD_BUG_ON(p4d_huge(p4d)); if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, P4D_SHIFT, next, write, pages, nr)) return 0; |
ce70df089 mm, gup: fix typo... |
1662 |
} else if (!gup_pud_range(p4d, addr, next, write, pages, nr)) |
c2febafc6 mm: convert gener... |
1663 1664 1665 1666 1667 |
return 0; } while (p4dp++, addr = next, addr != end); return 1; } |
342ee8775 mm, x86/mm: Fix p... |
1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 |
static void gup_pgd_range(unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pgd_t *pgdp; pgdp = pgd_offset(current->mm, addr); do { pgd_t pgd = READ_ONCE(*pgdp); next = pgd_addr_end(addr, end); if (pgd_none(pgd)) return; if (unlikely(pgd_huge(pgd))) { if (!gup_huge_pgd(pgd, pgdp, addr, next, write, pages, nr)) return; } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, PGDIR_SHIFT, next, write, pages, nr)) return; } else if (!gup_p4d_range(pgd, addr, next, write, pages, nr)) return; } while (pgdp++, addr = next, addr != end); } #ifndef gup_fast_permitted /* * Check if it's allowed to use __get_user_pages_fast() for the range, or * we need to fall back to the slow version: */ bool gup_fast_permitted(unsigned long start, int nr_pages, int write) { unsigned long len, end; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; return end >= start; } #endif |
2667f50e8 mm: introduce a g... |
1708 1709 1710 1711 1712 1713 1714 |
/* * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to * the regular GUP. It will only return non-negative values. */ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { |
2667f50e8 mm: introduce a g... |
1715 |
unsigned long addr, len, end; |
342ee8775 mm, x86/mm: Fix p... |
1716 |
unsigned long flags; |
2667f50e8 mm: introduce a g... |
1717 1718 1719 1720 1721 1722 1723 1724 |
int nr = 0; start &= PAGE_MASK; addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, |
aa2369f11 mm/gup.c: fix acc... |
1725 |
(void __user *)start, len))) |
2667f50e8 mm: introduce a g... |
1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 |
return 0; /* * Disable interrupts. We use the nested form as we can already have * interrupts disabled by get_futex_key. * * With interrupts disabled, we block page table pages from being * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h * for more details. * * We do not adopt an rcu_read_lock(.) here as we also want to * block IPIs that come from THPs splitting. */ |
342ee8775 mm, x86/mm: Fix p... |
1739 1740 1741 1742 1743 |
if (gup_fast_permitted(start, nr_pages, write)) { local_irq_save(flags); gup_pgd_range(addr, end, write, pages, &nr); local_irq_restore(flags); } |
2667f50e8 mm: introduce a g... |
1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 |
return nr; } /** * get_user_pages_fast() - pin user pages in memory * @start: starting user address * @nr_pages: number of pages from start to pin * @write: whether pages will be written to * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. * * Attempt to pin user pages in memory without taking mm->mmap_sem. * If not successful, it will fall back to taking the lock and * calling get_user_pages(). * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. */ int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { |
342ee8775 mm, x86/mm: Fix p... |
1767 |
unsigned long addr, len, end; |
73e10a618 mm/gup: Provide c... |
1768 |
int nr = 0, ret = 0; |
2667f50e8 mm: introduce a g... |
1769 1770 |
start &= PAGE_MASK; |
342ee8775 mm, x86/mm: Fix p... |
1771 1772 1773 |
addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; |
adea72f0e get_user_pages_fa... |
1774 1775 |
if (nr_pages <= 0) return 0; |
342ee8775 mm, x86/mm: Fix p... |
1776 1777 |
if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, (void __user *)start, len))) |
adea72f0e get_user_pages_fa... |
1778 |
return -EFAULT; |
73e10a618 mm/gup: Provide c... |
1779 1780 |
if (gup_fast_permitted(start, nr_pages, write)) { |
342ee8775 mm, x86/mm: Fix p... |
1781 1782 1783 |
local_irq_disable(); gup_pgd_range(addr, end, write, pages, &nr); local_irq_enable(); |
73e10a618 mm/gup: Provide c... |
1784 1785 |
ret = nr; } |
2667f50e8 mm: introduce a g... |
1786 1787 1788 1789 1790 |
if (nr < nr_pages) { /* Try to get the remaining pages with get_user_pages */ start += nr << PAGE_SHIFT; pages += nr; |
c164154f6 mm: replace get_u... |
1791 1792 |
ret = get_user_pages_unlocked(start, nr_pages - nr, pages, write ? FOLL_WRITE : 0); |
2667f50e8 mm: introduce a g... |
1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 |
/* Have to be a bit careful with return values */ if (nr > 0) { if (ret < 0) ret = nr; else ret += nr; } } return ret; } |
e585513b7 x86/mm/gup: Switc... |
1805 |
#endif /* CONFIG_HAVE_GENERIC_GUP */ |