Blame view

mm/gup.c 42.6 KB
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
1
2
3
4
  #include <linux/kernel.h>
  #include <linux/errno.h>
  #include <linux/err.h>
  #include <linux/spinlock.h>
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
5
  #include <linux/mm.h>
3565fce3a   Dan Williams   mm, x86: get_user...
6
  #include <linux/memremap.h>
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
7
8
9
10
  #include <linux/pagemap.h>
  #include <linux/rmap.h>
  #include <linux/swap.h>
  #include <linux/swapops.h>
2667f50e8   Steve Capper   mm: introduce a g...
11
12
  #include <linux/sched.h>
  #include <linux/rwsem.h>
f30c59e92   Aneesh Kumar K.V   mm: Update generi...
13
  #include <linux/hugetlb.h>
1027e4436   Kirill A. Shutemov   mm: make GUP hand...
14

33a709b25   Dave Hansen   mm/gup, x86/mm/pk...
15
  #include <asm/mmu_context.h>
2667f50e8   Steve Capper   mm: introduce a g...
16
  #include <asm/pgtable.h>
1027e4436   Kirill A. Shutemov   mm: make GUP hand...
17
  #include <asm/tlbflush.h>
2667f50e8   Steve Capper   mm: introduce a g...
18

4bbd4c776   Kirill A. Shutemov   mm: move get_user...
19
  #include "internal.h"
69e68b4f0   Kirill A. Shutemov   mm: cleanup follo...
20
21
  static struct page *no_page_table(struct vm_area_struct *vma,
  		unsigned int flags)
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
22
  {
69e68b4f0   Kirill A. Shutemov   mm: cleanup follo...
23
24
25
26
27
28
29
30
31
32
33
34
  	/*
  	 * When core dumping an enormous anonymous area that nobody
  	 * has touched so far, we don't want to allocate unnecessary pages or
  	 * page tables.  Return error instead of NULL to skip handle_mm_fault,
  	 * then get_dump_page() will return NULL to leave a hole in the dump.
  	 * But we can only make this optimization where a hole would surely
  	 * be zero-filled if handle_mm_fault() actually did handle it.
  	 */
  	if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
  		return ERR_PTR(-EFAULT);
  	return NULL;
  }
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
35

1027e4436   Kirill A. Shutemov   mm: make GUP hand...
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
  static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
  		pte_t *pte, unsigned int flags)
  {
  	/* No page to get reference */
  	if (flags & FOLL_GET)
  		return -EFAULT;
  
  	if (flags & FOLL_TOUCH) {
  		pte_t entry = *pte;
  
  		if (flags & FOLL_WRITE)
  			entry = pte_mkdirty(entry);
  		entry = pte_mkyoung(entry);
  
  		if (!pte_same(*pte, entry)) {
  			set_pte_at(vma->vm_mm, address, pte, entry);
  			update_mmu_cache(vma, address, pte);
  		}
  	}
  
  	/* Proper page table entry exists, but no corresponding struct page */
  	return -EEXIST;
  }
19be0eaff   Linus Torvalds   mm: remove gup_fl...
59
60
61
62
63
64
65
66
67
  /*
   * FOLL_FORCE can write to even unwritable pte's, but only
   * after we've gone through a COW cycle and they are dirty.
   */
  static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
  {
  	return pte_write(pte) ||
  		((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
  }
69e68b4f0   Kirill A. Shutemov   mm: cleanup follo...
68
69
70
71
  static struct page *follow_page_pte(struct vm_area_struct *vma,
  		unsigned long address, pmd_t *pmd, unsigned int flags)
  {
  	struct mm_struct *mm = vma->vm_mm;
3565fce3a   Dan Williams   mm, x86: get_user...
72
  	struct dev_pagemap *pgmap = NULL;
69e68b4f0   Kirill A. Shutemov   mm: cleanup follo...
73
74
75
  	struct page *page;
  	spinlock_t *ptl;
  	pte_t *ptep, pte;
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
76

69e68b4f0   Kirill A. Shutemov   mm: cleanup follo...
77
  retry:
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
78
  	if (unlikely(pmd_bad(*pmd)))
69e68b4f0   Kirill A. Shutemov   mm: cleanup follo...
79
  		return no_page_table(vma, flags);
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
80
81
  
  	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
82
83
84
85
86
87
88
89
90
91
  	pte = *ptep;
  	if (!pte_present(pte)) {
  		swp_entry_t entry;
  		/*
  		 * KSM's break_ksm() relies upon recognizing a ksm page
  		 * even while it is being migrated, so for that case we
  		 * need migration_entry_wait().
  		 */
  		if (likely(!(flags & FOLL_MIGRATION)))
  			goto no_page;
0661a3361   Kirill A. Shutemov   mm: remove rest u...
92
  		if (pte_none(pte))
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
93
94
95
96
97
98
  			goto no_page;
  		entry = pte_to_swp_entry(pte);
  		if (!is_migration_entry(entry))
  			goto no_page;
  		pte_unmap_unlock(ptep, ptl);
  		migration_entry_wait(mm, pmd, address);
69e68b4f0   Kirill A. Shutemov   mm: cleanup follo...
99
  		goto retry;
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
100
  	}
8a0516ed8   Mel Gorman   mm: convert p[te|...
101
  	if ((flags & FOLL_NUMA) && pte_protnone(pte))
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
102
  		goto no_page;
19be0eaff   Linus Torvalds   mm: remove gup_fl...
103
  	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
69e68b4f0   Kirill A. Shutemov   mm: cleanup follo...
104
105
106
  		pte_unmap_unlock(ptep, ptl);
  		return NULL;
  	}
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
107
108
  
  	page = vm_normal_page(vma, address, pte);
3565fce3a   Dan Williams   mm, x86: get_user...
109
110
111
112
113
114
115
116
117
118
119
  	if (!page && pte_devmap(pte) && (flags & FOLL_GET)) {
  		/*
  		 * Only return device mapping pages in the FOLL_GET case since
  		 * they are only valid while holding the pgmap reference.
  		 */
  		pgmap = get_dev_pagemap(pte_pfn(pte), NULL);
  		if (pgmap)
  			page = pte_page(pte);
  		else
  			goto no_page;
  	} else if (unlikely(!page)) {
1027e4436   Kirill A. Shutemov   mm: make GUP hand...
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
  		if (flags & FOLL_DUMP) {
  			/* Avoid special (like zero) pages in core dumps */
  			page = ERR_PTR(-EFAULT);
  			goto out;
  		}
  
  		if (is_zero_pfn(pte_pfn(pte))) {
  			page = pte_page(pte);
  		} else {
  			int ret;
  
  			ret = follow_pfn_pte(vma, address, ptep, flags);
  			page = ERR_PTR(ret);
  			goto out;
  		}
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
135
  	}
6742d293c   Kirill A. Shutemov   mm: adjust FOLL_S...
136
137
138
139
140
141
142
143
144
145
146
147
  	if (flags & FOLL_SPLIT && PageTransCompound(page)) {
  		int ret;
  		get_page(page);
  		pte_unmap_unlock(ptep, ptl);
  		lock_page(page);
  		ret = split_huge_page(page);
  		unlock_page(page);
  		put_page(page);
  		if (ret)
  			return ERR_PTR(ret);
  		goto retry;
  	}
3565fce3a   Dan Williams   mm, x86: get_user...
148
  	if (flags & FOLL_GET) {
ddc58f27f   Kirill A. Shutemov   mm: drop tail pag...
149
  		get_page(page);
3565fce3a   Dan Williams   mm, x86: get_user...
150
151
152
153
154
155
156
  
  		/* drop the pgmap reference now that we hold the page */
  		if (pgmap) {
  			put_dev_pagemap(pgmap);
  			pgmap = NULL;
  		}
  	}
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
157
158
159
160
161
162
163
164
165
166
167
  	if (flags & FOLL_TOUCH) {
  		if ((flags & FOLL_WRITE) &&
  		    !pte_dirty(pte) && !PageDirty(page))
  			set_page_dirty(page);
  		/*
  		 * pte_mkyoung() would be more correct here, but atomic care
  		 * is needed to avoid losing the dirty bit: it is easier to use
  		 * mark_page_accessed().
  		 */
  		mark_page_accessed(page);
  	}
de60f5f10   Eric B Munson   mm: introduce VM_...
168
  	if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
e90309c9f   Kirill A. Shutemov   thp: allow mlocke...
169
170
171
  		/* Do not mlock pte-mapped THP */
  		if (PageTransCompound(page))
  			goto out;
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
  		/*
  		 * The preliminary mapping check is mainly to avoid the
  		 * pointless overhead of lock_page on the ZERO_PAGE
  		 * which might bounce very badly if there is contention.
  		 *
  		 * If the page is already locked, we don't need to
  		 * handle it now - vmscan will handle it later if and
  		 * when it attempts to reclaim the page.
  		 */
  		if (page->mapping && trylock_page(page)) {
  			lru_add_drain();  /* push cached pages to LRU */
  			/*
  			 * Because we lock page here, and migration is
  			 * blocked by the pte's page reference, and we
  			 * know the page is still mapped, we don't even
  			 * need to check for file-cache page truncation.
  			 */
  			mlock_vma_page(page);
  			unlock_page(page);
  		}
  	}
1027e4436   Kirill A. Shutemov   mm: make GUP hand...
193
  out:
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
194
  	pte_unmap_unlock(ptep, ptl);
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
195
  	return page;
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
196
197
198
  no_page:
  	pte_unmap_unlock(ptep, ptl);
  	if (!pte_none(pte))
69e68b4f0   Kirill A. Shutemov   mm: cleanup follo...
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
  		return NULL;
  	return no_page_table(vma, flags);
  }
  
  /**
   * follow_page_mask - look up a page descriptor from a user-virtual address
   * @vma: vm_area_struct mapping @address
   * @address: virtual address to look up
   * @flags: flags modifying lookup behaviour
   * @page_mask: on output, *page_mask is set according to the size of the page
   *
   * @flags can have FOLL_ flags set, defined in <linux/mm.h>
   *
   * Returns the mapped (struct page *), %NULL if no mapping exists, or
   * an error pointer if there is a mapping to something not represented
   * by a page descriptor (see also vm_normal_page()).
   */
  struct page *follow_page_mask(struct vm_area_struct *vma,
  			      unsigned long address, unsigned int flags,
  			      unsigned int *page_mask)
  {
  	pgd_t *pgd;
  	pud_t *pud;
  	pmd_t *pmd;
  	spinlock_t *ptl;
  	struct page *page;
  	struct mm_struct *mm = vma->vm_mm;
  
  	*page_mask = 0;
  
  	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
  	if (!IS_ERR(page)) {
  		BUG_ON(flags & FOLL_GET);
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
232
  		return page;
69e68b4f0   Kirill A. Shutemov   mm: cleanup follo...
233
  	}
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
234

69e68b4f0   Kirill A. Shutemov   mm: cleanup follo...
235
236
237
238
239
240
241
242
  	pgd = pgd_offset(mm, address);
  	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
  		return no_page_table(vma, flags);
  
  	pud = pud_offset(pgd, address);
  	if (pud_none(*pud))
  		return no_page_table(vma, flags);
  	if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
e66f17ff7   Naoya Horiguchi   mm/hugetlb: take ...
243
244
245
246
  		page = follow_huge_pud(mm, address, pud, flags);
  		if (page)
  			return page;
  		return no_page_table(vma, flags);
69e68b4f0   Kirill A. Shutemov   mm: cleanup follo...
247
248
249
250
251
252
253
254
  	}
  	if (unlikely(pud_bad(*pud)))
  		return no_page_table(vma, flags);
  
  	pmd = pmd_offset(pud, address);
  	if (pmd_none(*pmd))
  		return no_page_table(vma, flags);
  	if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
e66f17ff7   Naoya Horiguchi   mm/hugetlb: take ...
255
256
257
258
  		page = follow_huge_pmd(mm, address, pmd, flags);
  		if (page)
  			return page;
  		return no_page_table(vma, flags);
69e68b4f0   Kirill A. Shutemov   mm: cleanup follo...
259
  	}
8a0516ed8   Mel Gorman   mm: convert p[te|...
260
  	if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
69e68b4f0   Kirill A. Shutemov   mm: cleanup follo...
261
  		return no_page_table(vma, flags);
3565fce3a   Dan Williams   mm, x86: get_user...
262
263
264
265
266
267
268
  	if (pmd_devmap(*pmd)) {
  		ptl = pmd_lock(mm, pmd);
  		page = follow_devmap_pmd(vma, address, pmd, flags);
  		spin_unlock(ptl);
  		if (page)
  			return page;
  	}
6742d293c   Kirill A. Shutemov   mm: adjust FOLL_S...
269
270
271
272
273
274
275
276
  	if (likely(!pmd_trans_huge(*pmd)))
  		return follow_page_pte(vma, address, pmd, flags);
  
  	ptl = pmd_lock(mm, pmd);
  	if (unlikely(!pmd_trans_huge(*pmd))) {
  		spin_unlock(ptl);
  		return follow_page_pte(vma, address, pmd, flags);
  	}
6742d293c   Kirill A. Shutemov   mm: adjust FOLL_S...
277
278
279
280
281
282
  	if (flags & FOLL_SPLIT) {
  		int ret;
  		page = pmd_page(*pmd);
  		if (is_huge_zero_page(page)) {
  			spin_unlock(ptl);
  			ret = 0;
78ddc5347   Kirill A. Shutemov   thp: rename split...
283
  			split_huge_pmd(vma, pmd, address);
337d9abf1   Naoya Horiguchi   mm: thp: check pm...
284
285
  			if (pmd_trans_unstable(pmd))
  				ret = -EBUSY;
6742d293c   Kirill A. Shutemov   mm: adjust FOLL_S...
286
287
  		} else {
  			get_page(page);
69e68b4f0   Kirill A. Shutemov   mm: cleanup follo...
288
  			spin_unlock(ptl);
6742d293c   Kirill A. Shutemov   mm: adjust FOLL_S...
289
290
291
292
  			lock_page(page);
  			ret = split_huge_page(page);
  			unlock_page(page);
  			put_page(page);
baa355fd3   Kirill A. Shutemov   thp: file pages s...
293
294
  			if (pmd_none(*pmd))
  				return no_page_table(vma, flags);
6742d293c   Kirill A. Shutemov   mm: adjust FOLL_S...
295
296
297
298
  		}
  
  		return ret ? ERR_PTR(ret) :
  			follow_page_pte(vma, address, pmd, flags);
69e68b4f0   Kirill A. Shutemov   mm: cleanup follo...
299
  	}
6742d293c   Kirill A. Shutemov   mm: adjust FOLL_S...
300
301
302
303
304
  
  	page = follow_trans_huge_pmd(vma, address, pmd, flags);
  	spin_unlock(ptl);
  	*page_mask = HPAGE_PMD_NR - 1;
  	return page;
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
305
  }
f2b495ca8   Kirill A. Shutemov   mm: extract in_ga...
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
  static int get_gate_page(struct mm_struct *mm, unsigned long address,
  		unsigned int gup_flags, struct vm_area_struct **vma,
  		struct page **page)
  {
  	pgd_t *pgd;
  	pud_t *pud;
  	pmd_t *pmd;
  	pte_t *pte;
  	int ret = -EFAULT;
  
  	/* user gate pages are read-only */
  	if (gup_flags & FOLL_WRITE)
  		return -EFAULT;
  	if (address > TASK_SIZE)
  		pgd = pgd_offset_k(address);
  	else
  		pgd = pgd_offset_gate(mm, address);
  	BUG_ON(pgd_none(*pgd));
  	pud = pud_offset(pgd, address);
  	BUG_ON(pud_none(*pud));
  	pmd = pmd_offset(pud, address);
  	if (pmd_none(*pmd))
  		return -EFAULT;
  	VM_BUG_ON(pmd_trans_huge(*pmd));
  	pte = pte_offset_map(pmd, address);
  	if (pte_none(*pte))
  		goto unmap;
  	*vma = get_gate_vma(mm);
  	if (!page)
  		goto out;
  	*page = vm_normal_page(*vma, address, *pte);
  	if (!*page) {
  		if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
  			goto unmap;
  		*page = pte_page(*pte);
  	}
  	get_page(*page);
  out:
  	ret = 0;
  unmap:
  	pte_unmap(pte);
  	return ret;
  }
9a95f3cf7   Paul Cassella   mm: describe mmap...
349
350
351
352
353
  /*
   * mmap_sem must be held on entry.  If @nonblocking != NULL and
   * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released.
   * If it is, *@nonblocking will be set to 0 and -EBUSY returned.
   */
167444834   Kirill A. Shutemov   mm: extract code ...
354
355
356
  static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
  		unsigned long address, unsigned int *flags, int *nonblocking)
  {
167444834   Kirill A. Shutemov   mm: extract code ...
357
358
  	unsigned int fault_flags = 0;
  	int ret;
de60f5f10   Eric B Munson   mm: introduce VM_...
359
360
361
  	/* mlock all present pages, but do not fault in new pages */
  	if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
  		return -ENOENT;
84d33df27   Kirill A. Shutemov   mm: rename FOLL_M...
362
363
  	/* For mm_populate(), just skip the stack guard page. */
  	if ((*flags & FOLL_POPULATE) &&
167444834   Kirill A. Shutemov   mm: extract code ...
364
365
366
367
368
  			(stack_guard_page_start(vma, address) ||
  			 stack_guard_page_end(vma, address + PAGE_SIZE)))
  		return -ENOENT;
  	if (*flags & FOLL_WRITE)
  		fault_flags |= FAULT_FLAG_WRITE;
1b2ee1266   Dave Hansen   mm/core: Do not e...
369
370
  	if (*flags & FOLL_REMOTE)
  		fault_flags |= FAULT_FLAG_REMOTE;
167444834   Kirill A. Shutemov   mm: extract code ...
371
372
373
374
  	if (nonblocking)
  		fault_flags |= FAULT_FLAG_ALLOW_RETRY;
  	if (*flags & FOLL_NOWAIT)
  		fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
234b239be   Andres Lagar-Cavilla   kvm: Faults which...
375
376
377
378
  	if (*flags & FOLL_TRIED) {
  		VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
  		fault_flags |= FAULT_FLAG_TRIED;
  	}
167444834   Kirill A. Shutemov   mm: extract code ...
379

dcddffd41   Kirill A. Shutemov   mm: do not pass m...
380
  	ret = handle_mm_fault(vma, address, fault_flags);
167444834   Kirill A. Shutemov   mm: extract code ...
381
382
383
384
385
  	if (ret & VM_FAULT_ERROR) {
  		if (ret & VM_FAULT_OOM)
  			return -ENOMEM;
  		if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
  			return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
33692f275   Linus Torvalds   vm: add VM_FAULT_...
386
  		if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
167444834   Kirill A. Shutemov   mm: extract code ...
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
  			return -EFAULT;
  		BUG();
  	}
  
  	if (tsk) {
  		if (ret & VM_FAULT_MAJOR)
  			tsk->maj_flt++;
  		else
  			tsk->min_flt++;
  	}
  
  	if (ret & VM_FAULT_RETRY) {
  		if (nonblocking)
  			*nonblocking = 0;
  		return -EBUSY;
  	}
  
  	/*
  	 * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
  	 * necessary, even if maybe_mkwrite decided not to set pte_write. We
  	 * can thus safely do subsequent page lookups as if they were reads.
  	 * But only do so when looping for pte_write is futile: in some cases
  	 * userspace may also be wanting to write to the gotten user page,
  	 * which a read fault here might prevent (a readonly page might get
  	 * reCOWed by userspace write).
  	 */
  	if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
19be0eaff   Linus Torvalds   mm: remove gup_fl...
414
  	        *flags |= FOLL_COW;
167444834   Kirill A. Shutemov   mm: extract code ...
415
416
  	return 0;
  }
fa5bb2093   Kirill A. Shutemov   mm: cleanup __get...
417
418
419
  static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
  {
  	vm_flags_t vm_flags = vma->vm_flags;
1b2ee1266   Dave Hansen   mm/core: Do not e...
420
421
  	int write = (gup_flags & FOLL_WRITE);
  	int foreign = (gup_flags & FOLL_REMOTE);
fa5bb2093   Kirill A. Shutemov   mm: cleanup __get...
422
423
424
  
  	if (vm_flags & (VM_IO | VM_PFNMAP))
  		return -EFAULT;
1b2ee1266   Dave Hansen   mm/core: Do not e...
425
  	if (write) {
fa5bb2093   Kirill A. Shutemov   mm: cleanup __get...
426
427
428
429
430
431
432
433
434
435
436
437
  		if (!(vm_flags & VM_WRITE)) {
  			if (!(gup_flags & FOLL_FORCE))
  				return -EFAULT;
  			/*
  			 * We used to let the write,force case do COW in a
  			 * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
  			 * set a breakpoint in a read-only mapping of an
  			 * executable, without corrupting the file (yet only
  			 * when that file had been opened for writing!).
  			 * Anon pages in shared mappings are surprising: now
  			 * just reject it.
  			 */
464353647   Hugh Dickins   mm: retire GUP WA...
438
  			if (!is_cow_mapping(vm_flags))
fa5bb2093   Kirill A. Shutemov   mm: cleanup __get...
439
  				return -EFAULT;
fa5bb2093   Kirill A. Shutemov   mm: cleanup __get...
440
441
442
443
444
445
446
447
448
449
450
  		}
  	} else if (!(vm_flags & VM_READ)) {
  		if (!(gup_flags & FOLL_FORCE))
  			return -EFAULT;
  		/*
  		 * Is there actually any vma we can reach here which does not
  		 * have VM_MAYREAD set?
  		 */
  		if (!(vm_flags & VM_MAYREAD))
  			return -EFAULT;
  	}
d61172b4b   Dave Hansen   mm/core, x86/mm/p...
451
452
453
454
455
  	/*
  	 * gups are always data accesses, not instruction
  	 * fetches, so execute=false here
  	 */
  	if (!arch_vma_access_permitted(vma, write, false, foreign))
33a709b25   Dave Hansen   mm/gup, x86/mm/pk...
456
  		return -EFAULT;
fa5bb2093   Kirill A. Shutemov   mm: cleanup __get...
457
458
  	return 0;
  }
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
  /**
   * __get_user_pages() - pin user pages in memory
   * @tsk:	task_struct of target task
   * @mm:		mm_struct of target mm
   * @start:	starting user address
   * @nr_pages:	number of pages from start to pin
   * @gup_flags:	flags modifying pin behaviour
   * @pages:	array that receives pointers to the pages pinned.
   *		Should be at least nr_pages long. Or NULL, if caller
   *		only intends to ensure the pages are faulted in.
   * @vmas:	array of pointers to vmas corresponding to each page.
   *		Or NULL if the caller does not require them.
   * @nonblocking: whether waiting for disk IO or mmap_sem contention
   *
   * Returns number of pages pinned. This may be fewer than the number
   * requested. If nr_pages is 0 or negative, returns 0. If no pages
   * were pinned, returns -errno. Each page returned must be released
   * with a put_page() call when it is finished with. vmas will only
   * remain valid while mmap_sem is held.
   *
9a95f3cf7   Paul Cassella   mm: describe mmap...
479
   * Must be called with mmap_sem held.  It may be released.  See below.
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
   *
   * __get_user_pages walks a process's page tables and takes a reference to
   * each struct page that each user address corresponds to at a given
   * instant. That is, it takes the page that would be accessed if a user
   * thread accesses the given user virtual address at that instant.
   *
   * This does not guarantee that the page exists in the user mappings when
   * __get_user_pages returns, and there may even be a completely different
   * page there in some cases (eg. if mmapped pagecache has been invalidated
   * and subsequently re faulted). However it does guarantee that the page
   * won't be freed completely. And mostly callers simply care that the page
   * contains data that was valid *at some point in time*. Typically, an IO
   * or similar operation cannot guarantee anything stronger anyway because
   * locks can't be held over the syscall boundary.
   *
   * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
   * the page is written to, set_page_dirty (or set_page_dirty_lock, as
   * appropriate) must be called after the page is finished with, and
   * before put_page is called.
   *
   * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
   * or mmap_sem contention, and if waiting is needed to pin all pages,
9a95f3cf7   Paul Cassella   mm: describe mmap...
502
503
504
505
506
507
508
509
   * *@nonblocking will be set to 0.  Further, if @gup_flags does not
   * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
   * this case.
   *
   * A caller using such a combination of @nonblocking and @gup_flags
   * must therefore hold the mmap_sem for reading only, and recognize
   * when it's been released.  Otherwise, it must be held for either
   * reading or writing and will not be released.
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
510
511
512
513
514
   *
   * In most cases, get_user_pages or get_user_pages_fast should be used
   * instead of __get_user_pages. __get_user_pages should be used only if
   * you need some special @gup_flags.
   */
0d7317598   Lorenzo Stoakes   mm: unexport __ge...
515
  static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
516
517
518
519
  		unsigned long start, unsigned long nr_pages,
  		unsigned int gup_flags, struct page **pages,
  		struct vm_area_struct **vmas, int *nonblocking)
  {
fa5bb2093   Kirill A. Shutemov   mm: cleanup __get...
520
  	long i = 0;
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
521
  	unsigned int page_mask;
fa5bb2093   Kirill A. Shutemov   mm: cleanup __get...
522
  	struct vm_area_struct *vma = NULL;
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
523
524
525
526
527
528
529
530
531
532
533
534
535
  
  	if (!nr_pages)
  		return 0;
  
  	VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
  
  	/*
  	 * If FOLL_FORCE is set then do not force a full fault as the hinting
  	 * fault information is unrelated to the reference behaviour of a task
  	 * using the address space
  	 */
  	if (!(gup_flags & FOLL_FORCE))
  		gup_flags |= FOLL_NUMA;
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
536
  	do {
fa5bb2093   Kirill A. Shutemov   mm: cleanup __get...
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
  		struct page *page;
  		unsigned int foll_flags = gup_flags;
  		unsigned int page_increm;
  
  		/* first iteration or cross vma bound */
  		if (!vma || start >= vma->vm_end) {
  			vma = find_extend_vma(mm, start);
  			if (!vma && in_gate_area(mm, start)) {
  				int ret;
  				ret = get_gate_page(mm, start & PAGE_MASK,
  						gup_flags, &vma,
  						pages ? &pages[i] : NULL);
  				if (ret)
  					return i ? : ret;
  				page_mask = 0;
  				goto next_page;
  			}
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
554

fa5bb2093   Kirill A. Shutemov   mm: cleanup __get...
555
556
557
558
559
560
561
  			if (!vma || check_vma_flags(vma, gup_flags))
  				return i ? : -EFAULT;
  			if (is_vm_hugetlb_page(vma)) {
  				i = follow_hugetlb_page(mm, vma, pages, vmas,
  						&start, &nr_pages, i,
  						gup_flags);
  				continue;
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
562
  			}
fa5bb2093   Kirill A. Shutemov   mm: cleanup __get...
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
  		}
  retry:
  		/*
  		 * If we have a pending SIGKILL, don't keep faulting pages and
  		 * potentially allocating memory.
  		 */
  		if (unlikely(fatal_signal_pending(current)))
  			return i ? i : -ERESTARTSYS;
  		cond_resched();
  		page = follow_page_mask(vma, start, foll_flags, &page_mask);
  		if (!page) {
  			int ret;
  			ret = faultin_page(tsk, vma, start, &foll_flags,
  					nonblocking);
  			switch (ret) {
  			case 0:
  				goto retry;
  			case -EFAULT:
  			case -ENOMEM:
  			case -EHWPOISON:
  				return i ? i : ret;
  			case -EBUSY:
  				return i;
  			case -ENOENT:
  				goto next_page;
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
588
  			}
fa5bb2093   Kirill A. Shutemov   mm: cleanup __get...
589
  			BUG();
1027e4436   Kirill A. Shutemov   mm: make GUP hand...
590
591
592
593
594
595
596
  		} else if (PTR_ERR(page) == -EEXIST) {
  			/*
  			 * Proper page table entry exists, but no corresponding
  			 * struct page.
  			 */
  			goto next_page;
  		} else if (IS_ERR(page)) {
fa5bb2093   Kirill A. Shutemov   mm: cleanup __get...
597
  			return i ? i : PTR_ERR(page);
1027e4436   Kirill A. Shutemov   mm: make GUP hand...
598
  		}
fa5bb2093   Kirill A. Shutemov   mm: cleanup __get...
599
600
601
602
603
  		if (pages) {
  			pages[i] = page;
  			flush_anon_page(vma, page, start);
  			flush_dcache_page(page);
  			page_mask = 0;
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
604
  		}
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
605
  next_page:
fa5bb2093   Kirill A. Shutemov   mm: cleanup __get...
606
607
608
609
610
611
612
613
614
615
  		if (vmas) {
  			vmas[i] = vma;
  			page_mask = 0;
  		}
  		page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
  		if (page_increm > nr_pages)
  			page_increm = nr_pages;
  		i += page_increm;
  		start += page_increm * PAGE_SIZE;
  		nr_pages -= page_increm;
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
616
617
  	} while (nr_pages);
  	return i;
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
618
  }
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
619

d4925e00d   Dave Hansen   mm/gup: Factor ou...
620
621
  bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags)
  {
1b2ee1266   Dave Hansen   mm/core: Do not e...
622
623
  	bool write   = !!(fault_flags & FAULT_FLAG_WRITE);
  	bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
33a709b25   Dave Hansen   mm/gup, x86/mm/pk...
624
  	vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
d4925e00d   Dave Hansen   mm/gup: Factor ou...
625
626
627
  
  	if (!(vm_flags & vma->vm_flags))
  		return false;
33a709b25   Dave Hansen   mm/gup, x86/mm/pk...
628
629
  	/*
  	 * The architecture might have a hardware protection
1b2ee1266   Dave Hansen   mm/core: Do not e...
630
  	 * mechanism other than read/write that can deny access.
d61172b4b   Dave Hansen   mm/core, x86/mm/p...
631
632
633
  	 *
  	 * gup always represents data access, not instruction
  	 * fetches, so execute=false here:
33a709b25   Dave Hansen   mm/gup, x86/mm/pk...
634
  	 */
d61172b4b   Dave Hansen   mm/core, x86/mm/p...
635
  	if (!arch_vma_access_permitted(vma, write, false, foreign))
33a709b25   Dave Hansen   mm/gup, x86/mm/pk...
636
  		return false;
d4925e00d   Dave Hansen   mm/gup: Factor ou...
637
638
  	return true;
  }
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
639
640
641
642
643
644
645
  /*
   * fixup_user_fault() - manually resolve a user page fault
   * @tsk:	the task_struct to use for page fault accounting, or
   *		NULL if faults are not to be recorded.
   * @mm:		mm_struct of target mm
   * @address:	user address
   * @fault_flags:flags to pass down to handle_mm_fault()
4a9e1cda2   Dominik Dingel   mm: bring in addi...
646
647
   * @unlocked:	did we unlock the mmap_sem while retrying, maybe NULL if caller
   *		does not allow retry
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
648
649
650
651
652
653
654
655
656
657
658
   *
   * This is meant to be called in the specific scenario where for locking reasons
   * we try to access user memory in atomic context (within a pagefault_disable()
   * section), this returns -EFAULT, and we want to resolve the user fault before
   * trying again.
   *
   * Typically this is meant to be used by the futex code.
   *
   * The main difference with get_user_pages() is that this function will
   * unconditionally call handle_mm_fault() which will in turn perform all the
   * necessary SW fixup of the dirty and young bits in the PTE, while
4a9e1cda2   Dominik Dingel   mm: bring in addi...
659
   * get_user_pages() only guarantees to update these in the struct page.
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
660
661
662
663
664
665
   *
   * This is important for some architectures where those bits also gate the
   * access permission to the page because they are maintained in software.  On
   * such architectures, gup() will not be enough to make a subsequent access
   * succeed.
   *
4a9e1cda2   Dominik Dingel   mm: bring in addi...
666
667
   * This function will not return with an unlocked mmap_sem. So it has not the
   * same semantics wrt the @mm->mmap_sem as does filemap_fault().
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
668
669
   */
  int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
4a9e1cda2   Dominik Dingel   mm: bring in addi...
670
671
  		     unsigned long address, unsigned int fault_flags,
  		     bool *unlocked)
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
672
673
  {
  	struct vm_area_struct *vma;
4a9e1cda2   Dominik Dingel   mm: bring in addi...
674
675
676
677
  	int ret, major = 0;
  
  	if (unlocked)
  		fault_flags |= FAULT_FLAG_ALLOW_RETRY;
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
678

4a9e1cda2   Dominik Dingel   mm: bring in addi...
679
  retry:
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
680
681
682
  	vma = find_extend_vma(mm, address);
  	if (!vma || address < vma->vm_start)
  		return -EFAULT;
d4925e00d   Dave Hansen   mm/gup: Factor ou...
683
  	if (!vma_permits_fault(vma, fault_flags))
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
684
  		return -EFAULT;
dcddffd41   Kirill A. Shutemov   mm: do not pass m...
685
  	ret = handle_mm_fault(vma, address, fault_flags);
4a9e1cda2   Dominik Dingel   mm: bring in addi...
686
  	major |= ret & VM_FAULT_MAJOR;
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
687
688
689
690
691
  	if (ret & VM_FAULT_ERROR) {
  		if (ret & VM_FAULT_OOM)
  			return -ENOMEM;
  		if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
  			return -EHWPOISON;
33692f275   Linus Torvalds   vm: add VM_FAULT_...
692
  		if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
693
694
695
  			return -EFAULT;
  		BUG();
  	}
4a9e1cda2   Dominik Dingel   mm: bring in addi...
696
697
698
699
700
701
702
703
704
705
  
  	if (ret & VM_FAULT_RETRY) {
  		down_read(&mm->mmap_sem);
  		if (!(fault_flags & FAULT_FLAG_TRIED)) {
  			*unlocked = true;
  			fault_flags &= ~FAULT_FLAG_ALLOW_RETRY;
  			fault_flags |= FAULT_FLAG_TRIED;
  			goto retry;
  		}
  	}
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
706
  	if (tsk) {
4a9e1cda2   Dominik Dingel   mm: bring in addi...
707
  		if (major)
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
708
709
710
711
712
713
  			tsk->maj_flt++;
  		else
  			tsk->min_flt++;
  	}
  	return 0;
  }
add6a0cd1   Paolo Bonzini   KVM: MMU: try to ...
714
  EXPORT_SYMBOL_GPL(fixup_user_fault);
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
715

f0818f472   Andrea Arcangeli   mm: gup: add get_...
716
717
718
719
  static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
  						struct mm_struct *mm,
  						unsigned long start,
  						unsigned long nr_pages,
f0818f472   Andrea Arcangeli   mm: gup: add get_...
720
721
  						struct page **pages,
  						struct vm_area_struct **vmas,
0fd71a56f   Andrea Arcangeli   mm: gup: add __ge...
722
723
  						int *locked, bool notify_drop,
  						unsigned int flags)
f0818f472   Andrea Arcangeli   mm: gup: add get_...
724
  {
f0818f472   Andrea Arcangeli   mm: gup: add get_...
725
726
727
728
729
730
731
732
733
734
735
736
  	long ret, pages_done;
  	bool lock_dropped;
  
  	if (locked) {
  		/* if VM_FAULT_RETRY can be returned, vmas become invalid */
  		BUG_ON(vmas);
  		/* check caller initialized locked */
  		BUG_ON(*locked != 1);
  	}
  
  	if (pages)
  		flags |= FOLL_GET;
f0818f472   Andrea Arcangeli   mm: gup: add get_...
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
  
  	pages_done = 0;
  	lock_dropped = false;
  	for (;;) {
  		ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages,
  				       vmas, locked);
  		if (!locked)
  			/* VM_FAULT_RETRY couldn't trigger, bypass */
  			return ret;
  
  		/* VM_FAULT_RETRY cannot return errors */
  		if (!*locked) {
  			BUG_ON(ret < 0);
  			BUG_ON(ret >= nr_pages);
  		}
  
  		if (!pages)
  			/* If it's a prefault don't insist harder */
  			return ret;
  
  		if (ret > 0) {
  			nr_pages -= ret;
  			pages_done += ret;
  			if (!nr_pages)
  				break;
  		}
  		if (*locked) {
  			/* VM_FAULT_RETRY didn't trigger */
  			if (!pages_done)
  				pages_done = ret;
  			break;
  		}
  		/* VM_FAULT_RETRY triggered, so seek to the faulting offset */
  		pages += ret;
  		start += ret << PAGE_SHIFT;
  
  		/*
  		 * Repeat on the address that fired VM_FAULT_RETRY
  		 * without FAULT_FLAG_ALLOW_RETRY but with
  		 * FAULT_FLAG_TRIED.
  		 */
  		*locked = 1;
  		lock_dropped = true;
  		down_read(&mm->mmap_sem);
  		ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED,
  				       pages, NULL, NULL);
  		if (ret != 1) {
  			BUG_ON(ret > 1);
  			if (!pages_done)
  				pages_done = ret;
  			break;
  		}
  		nr_pages--;
  		pages_done++;
  		if (!nr_pages)
  			break;
  		pages++;
  		start += PAGE_SIZE;
  	}
  	if (notify_drop && lock_dropped && *locked) {
  		/*
  		 * We must let the caller know we temporarily dropped the lock
  		 * and so the critical section protected by it was lost.
  		 */
  		up_read(&mm->mmap_sem);
  		*locked = 0;
  	}
  	return pages_done;
  }
  
  /*
   * We can leverage the VM_FAULT_RETRY functionality in the page fault
   * paths better by using either get_user_pages_locked() or
   * get_user_pages_unlocked().
   *
   * get_user_pages_locked() is suitable to replace the form:
   *
   *      down_read(&mm->mmap_sem);
   *      do_something()
   *      get_user_pages(tsk, mm, ..., pages, NULL);
   *      up_read(&mm->mmap_sem);
   *
   *  to:
   *
   *      int locked = 1;
   *      down_read(&mm->mmap_sem);
   *      do_something()
   *      get_user_pages_locked(tsk, mm, ..., pages, &locked);
   *      if (locked)
   *          up_read(&mm->mmap_sem);
   */
c12d2da56   Ingo Molnar   mm/gup: Remove th...
828
  long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
3b913179c   Lorenzo Stoakes   mm: replace get_u...
829
  			   unsigned int gup_flags, struct page **pages,
f0818f472   Andrea Arcangeli   mm: gup: add get_...
830
831
  			   int *locked)
  {
cde70140f   Dave Hansen   mm/gup: Overload ...
832
  	return __get_user_pages_locked(current, current->mm, start, nr_pages,
3b913179c   Lorenzo Stoakes   mm: replace get_u...
833
834
  				       pages, NULL, locked, true,
  				       gup_flags | FOLL_TOUCH);
f0818f472   Andrea Arcangeli   mm: gup: add get_...
835
  }
c12d2da56   Ingo Molnar   mm/gup: Remove th...
836
  EXPORT_SYMBOL(get_user_pages_locked);
f0818f472   Andrea Arcangeli   mm: gup: add get_...
837
838
  
  /*
0fd71a56f   Andrea Arcangeli   mm: gup: add __ge...
839
840
841
842
843
844
845
846
847
848
849
   * Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to
   * pass additional gup_flags as last parameter (like FOLL_HWPOISON).
   *
   * NOTE: here FOLL_TOUCH is not set implicitly and must be set by the
   * caller if required (just like with __get_user_pages). "FOLL_GET",
   * "FOLL_WRITE" and "FOLL_FORCE" are set implicitly as needed
   * according to the parameters "pages", "write", "force"
   * respectively.
   */
  __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
  					       unsigned long start, unsigned long nr_pages,
d4944b0ec   Lorenzo Stoakes   mm: remove write/...
850
  					       struct page **pages, unsigned int gup_flags)
0fd71a56f   Andrea Arcangeli   mm: gup: add __ge...
851
852
853
  {
  	long ret;
  	int locked = 1;
859110d74   Lorenzo Stoakes   mm: remove write/...
854

0fd71a56f   Andrea Arcangeli   mm: gup: add __ge...
855
  	down_read(&mm->mmap_sem);
859110d74   Lorenzo Stoakes   mm: remove write/...
856
857
  	ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
  				      &locked, false, gup_flags);
0fd71a56f   Andrea Arcangeli   mm: gup: add __ge...
858
859
860
861
862
863
864
  	if (locked)
  		up_read(&mm->mmap_sem);
  	return ret;
  }
  EXPORT_SYMBOL(__get_user_pages_unlocked);
  
  /*
f0818f472   Andrea Arcangeli   mm: gup: add get_...
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
   * get_user_pages_unlocked() is suitable to replace the form:
   *
   *      down_read(&mm->mmap_sem);
   *      get_user_pages(tsk, mm, ..., pages, NULL);
   *      up_read(&mm->mmap_sem);
   *
   *  with:
   *
   *      get_user_pages_unlocked(tsk, mm, ..., pages);
   *
   * It is functionally equivalent to get_user_pages_fast so
   * get_user_pages_fast should be used instead, if the two parameters
   * "tsk" and "mm" are respectively equal to current and current->mm,
   * or if "force" shall be set to 1 (get_user_pages_fast misses the
   * "force" parameter).
   */
c12d2da56   Ingo Molnar   mm/gup: Remove th...
881
  long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
c164154f6   Lorenzo Stoakes   mm: replace get_u...
882
  			     struct page **pages, unsigned int gup_flags)
f0818f472   Andrea Arcangeli   mm: gup: add get_...
883
  {
cde70140f   Dave Hansen   mm/gup: Overload ...
884
  	return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
c164154f6   Lorenzo Stoakes   mm: replace get_u...
885
  					 pages, gup_flags | FOLL_TOUCH);
f0818f472   Andrea Arcangeli   mm: gup: add get_...
886
  }
c12d2da56   Ingo Molnar   mm/gup: Remove th...
887
  EXPORT_SYMBOL(get_user_pages_unlocked);
f0818f472   Andrea Arcangeli   mm: gup: add get_...
888

4bbd4c776   Kirill A. Shutemov   mm: move get_user...
889
  /*
1e9877902   Dave Hansen   mm/gup: Introduce...
890
   * get_user_pages_remote() - pin user pages in memory
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
891
892
893
894
895
   * @tsk:	the task_struct to use for page fault accounting, or
   *		NULL if faults are not to be recorded.
   * @mm:		mm_struct of target mm
   * @start:	starting user address
   * @nr_pages:	number of pages from start to pin
9beae1ea8   Lorenzo Stoakes   mm: replace get_u...
896
   * @gup_flags:	flags modifying lookup behaviour
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
   * @pages:	array that receives pointers to the pages pinned.
   *		Should be at least nr_pages long. Or NULL, if caller
   *		only intends to ensure the pages are faulted in.
   * @vmas:	array of pointers to vmas corresponding to each page.
   *		Or NULL if the caller does not require them.
   *
   * Returns number of pages pinned. This may be fewer than the number
   * requested. If nr_pages is 0 or negative, returns 0. If no pages
   * were pinned, returns -errno. Each page returned must be released
   * with a put_page() call when it is finished with. vmas will only
   * remain valid while mmap_sem is held.
   *
   * Must be called with mmap_sem held for read or write.
   *
   * get_user_pages walks a process's page tables and takes a reference to
   * each struct page that each user address corresponds to at a given
   * instant. That is, it takes the page that would be accessed if a user
   * thread accesses the given user virtual address at that instant.
   *
   * This does not guarantee that the page exists in the user mappings when
   * get_user_pages returns, and there may even be a completely different
   * page there in some cases (eg. if mmapped pagecache has been invalidated
   * and subsequently re faulted). However it does guarantee that the page
   * won't be freed completely. And mostly callers simply care that the page
   * contains data that was valid *at some point in time*. Typically, an IO
   * or similar operation cannot guarantee anything stronger anyway because
   * locks can't be held over the syscall boundary.
   *
9beae1ea8   Lorenzo Stoakes   mm: replace get_u...
925
926
927
   * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
   * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
   * be called after the page is finished with, and before put_page is called.
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
928
929
930
931
932
933
934
935
   *
   * get_user_pages is typically used for fewer-copy IO operations, to get a
   * handle on the memory by some means other than accesses via the user virtual
   * addresses. The pages may be submitted for DMA to devices or accessed via
   * their kernel linear mapping (via the kmap APIs). Care should be taken to
   * use the correct cache flushing APIs.
   *
   * See also get_user_pages_fast, for performance critical applications.
f0818f472   Andrea Arcangeli   mm: gup: add get_...
936
937
938
939
940
   *
   * get_user_pages should be phased out in favor of
   * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
   * should use get_user_pages because it cannot pass
   * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
941
   */
1e9877902   Dave Hansen   mm/gup: Introduce...
942
943
  long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
  		unsigned long start, unsigned long nr_pages,
9beae1ea8   Lorenzo Stoakes   mm: replace get_u...
944
  		unsigned int gup_flags, struct page **pages,
1e9877902   Dave Hansen   mm/gup: Introduce...
945
  		struct vm_area_struct **vmas)
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
946
  {
859110d74   Lorenzo Stoakes   mm: remove write/...
947
  	return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
9beae1ea8   Lorenzo Stoakes   mm: replace get_u...
948
949
  				       NULL, false,
  				       gup_flags | FOLL_TOUCH | FOLL_REMOTE);
1e9877902   Dave Hansen   mm/gup: Introduce...
950
951
952
953
  }
  EXPORT_SYMBOL(get_user_pages_remote);
  
  /*
d4edcf0d5   Dave Hansen   mm/gup: Switch al...
954
955
956
957
   * This is the same as get_user_pages_remote(), just with a
   * less-flexible calling convention where we assume that the task
   * and mm being operated on are the current task's.  We also
   * obviously don't pass FOLL_REMOTE in here.
1e9877902   Dave Hansen   mm/gup: Introduce...
958
   */
c12d2da56   Ingo Molnar   mm/gup: Remove th...
959
  long get_user_pages(unsigned long start, unsigned long nr_pages,
768ae309a   Lorenzo Stoakes   mm: replace get_u...
960
  		unsigned int gup_flags, struct page **pages,
1e9877902   Dave Hansen   mm/gup: Introduce...
961
962
  		struct vm_area_struct **vmas)
  {
cde70140f   Dave Hansen   mm/gup: Overload ...
963
  	return __get_user_pages_locked(current, current->mm, start, nr_pages,
768ae309a   Lorenzo Stoakes   mm: replace get_u...
964
965
  				       pages, vmas, NULL, false,
  				       gup_flags | FOLL_TOUCH);
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
966
  }
c12d2da56   Ingo Molnar   mm/gup: Remove th...
967
  EXPORT_SYMBOL(get_user_pages);
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
968
969
  
  /**
acc3c8d15   Kirill A. Shutemov   mm: move mm_popul...
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
   * populate_vma_page_range() -  populate a range of pages in the vma.
   * @vma:   target vma
   * @start: start address
   * @end:   end address
   * @nonblocking:
   *
   * This takes care of mlocking the pages too if VM_LOCKED is set.
   *
   * return 0 on success, negative error code on error.
   *
   * vma->vm_mm->mmap_sem must be held.
   *
   * If @nonblocking is NULL, it may be held for read or write and will
   * be unperturbed.
   *
   * If @nonblocking is non-NULL, it must held for read only and may be
   * released.  If it's released, *@nonblocking will be set to 0.
   */
  long populate_vma_page_range(struct vm_area_struct *vma,
  		unsigned long start, unsigned long end, int *nonblocking)
  {
  	struct mm_struct *mm = vma->vm_mm;
  	unsigned long nr_pages = (end - start) / PAGE_SIZE;
  	int gup_flags;
  
  	VM_BUG_ON(start & ~PAGE_MASK);
  	VM_BUG_ON(end   & ~PAGE_MASK);
  	VM_BUG_ON_VMA(start < vma->vm_start, vma);
  	VM_BUG_ON_VMA(end   > vma->vm_end, vma);
  	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
de60f5f10   Eric B Munson   mm: introduce VM_...
1000
1001
1002
  	gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
  	if (vma->vm_flags & VM_LOCKONFAULT)
  		gup_flags &= ~FOLL_POPULATE;
acc3c8d15   Kirill A. Shutemov   mm: move mm_popul...
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
  	/*
  	 * We want to touch writable mappings with a write fault in order
  	 * to break COW, except for shared mappings because these don't COW
  	 * and we would not want to dirty them for nothing.
  	 */
  	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
  		gup_flags |= FOLL_WRITE;
  
  	/*
  	 * We want mlock to succeed for regions that have any permissions
  	 * other than PROT_NONE.
  	 */
  	if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
  		gup_flags |= FOLL_FORCE;
  
  	/*
  	 * We made sure addr is within a VMA, so the following will
  	 * not result in a stack expansion that recurses back here.
  	 */
  	return __get_user_pages(current, mm, start, nr_pages, gup_flags,
  				NULL, NULL, nonblocking);
  }
  
  /*
   * __mm_populate - populate and/or mlock pages within a range of address space.
   *
   * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
   * flags. VMAs must be already marked with the desired vm_flags, and
   * mmap_sem must not be held.
   */
  int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
  {
  	struct mm_struct *mm = current->mm;
  	unsigned long end, nstart, nend;
  	struct vm_area_struct *vma = NULL;
  	int locked = 0;
  	long ret = 0;
  
  	VM_BUG_ON(start & ~PAGE_MASK);
  	VM_BUG_ON(len != PAGE_ALIGN(len));
  	end = start + len;
  
  	for (nstart = start; nstart < end; nstart = nend) {
  		/*
  		 * We want to fault in pages for [nstart; end) address range.
  		 * Find first corresponding VMA.
  		 */
  		if (!locked) {
  			locked = 1;
  			down_read(&mm->mmap_sem);
  			vma = find_vma(mm, nstart);
  		} else if (nstart >= vma->vm_end)
  			vma = vma->vm_next;
  		if (!vma || vma->vm_start >= end)
  			break;
  		/*
  		 * Set [nstart; nend) to intersection of desired address
  		 * range with the first VMA. Also, skip undesirable VMA types.
  		 */
  		nend = min(end, vma->vm_end);
  		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
  			continue;
  		if (nstart < vma->vm_start)
  			nstart = vma->vm_start;
  		/*
  		 * Now fault in a range of pages. populate_vma_page_range()
  		 * double checks the vma flags, so that it won't mlock pages
  		 * if the vma was already munlocked.
  		 */
  		ret = populate_vma_page_range(vma, nstart, nend, &locked);
  		if (ret < 0) {
  			if (ignore_errors) {
  				ret = 0;
  				continue;	/* continue at next VMA */
  			}
  			break;
  		}
  		nend = nstart + ret * PAGE_SIZE;
  		ret = 0;
  	}
  	if (locked)
  		up_read(&mm->mmap_sem);
  	return ret;	/* 0 or negative error code */
  }
  
  /**
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
1089
1090
1091
1092
   * get_dump_page() - pin user page in memory while writing it to core dump
   * @addr: user address
   *
   * Returns struct page pointer of user page pinned for dump,
ea1754a08   Kirill A. Shutemov   mm, fs: remove re...
1093
   * to be freed afterwards by put_page().
4bbd4c776   Kirill A. Shutemov   mm: move get_user...
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
   *
   * Returns NULL on any kind of failure - a hole must then be inserted into
   * the corefile, to preserve alignment with its headers; and also returns
   * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
   * allowing a hole to be left in the corefile to save diskspace.
   *
   * Called without mmap_sem, but after all other threads have been killed.
   */
  #ifdef CONFIG_ELF_CORE
  struct page *get_dump_page(unsigned long addr)
  {
  	struct vm_area_struct *vma;
  	struct page *page;
  
  	if (__get_user_pages(current, current->mm, addr, 1,
  			     FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
  			     NULL) < 1)
  		return NULL;
  	flush_cache_page(vma, addr, page_to_pfn(page));
  	return page;
  }
  #endif /* CONFIG_ELF_CORE */
2667f50e8   Steve Capper   mm: introduce a g...
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
  
  /*
   * Generic RCU Fast GUP
   *
   * get_user_pages_fast attempts to pin user pages by walking the page
   * tables directly and avoids taking locks. Thus the walker needs to be
   * protected from page table pages being freed from under it, and should
   * block any THP splits.
   *
   * One way to achieve this is to have the walker disable interrupts, and
   * rely on IPIs from the TLB flushing code blocking before the page table
   * pages are freed. This is unsuitable for architectures that do not need
   * to broadcast an IPI when invalidating TLBs.
   *
   * Another way to achieve this is to batch up page table containing pages
   * belonging to more than one mm_user, then rcu_sched a callback to free those
   * pages. Disabling interrupts will allow the fast_gup walker to both block
   * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
   * (which is a relatively rare event). The code below adopts this strategy.
   *
   * Before activating this code, please be aware that the following assumptions
   * are currently made:
   *
   *  *) HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table is used to free
   *      pages containing page tables.
   *
2667f50e8   Steve Capper   mm: introduce a g...
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
   *  *) ptes can be read atomically by the architecture.
   *
   *  *) access_ok is sufficient to validate userspace address ranges.
   *
   * The last two assumptions can be relaxed by the addition of helper functions.
   *
   * This code is based heavily on the PowerPC implementation by Nick Piggin.
   */
  #ifdef CONFIG_HAVE_GENERIC_RCU_GUP
  
  #ifdef __HAVE_ARCH_PTE_SPECIAL
  static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
  			 int write, struct page **pages, int *nr)
  {
  	pte_t *ptep, *ptem;
  	int ret = 0;
  
  	ptem = ptep = pte_offset_map(&pmd, addr);
  	do {
  		/*
  		 * In the line below we are assuming that the pte can be read
  		 * atomically. If this is not the case for your architecture,
  		 * please wrap this in a helper function!
  		 *
  		 * for an example see gup_get_pte in arch/x86/mm/gup.c
  		 */
9d8c47e4b   Jason Low   mm: use READ_ONCE...
1168
  		pte_t pte = READ_ONCE(*ptep);
7aef4172c   Kirill A. Shutemov   mm: handle PTE-ma...
1169
  		struct page *head, *page;
2667f50e8   Steve Capper   mm: introduce a g...
1170
1171
1172
  
  		/*
  		 * Similar to the PMD case below, NUMA hinting must take slow
8a0516ed8   Mel Gorman   mm: convert p[te|...
1173
  		 * path using the pte_protnone check.
2667f50e8   Steve Capper   mm: introduce a g...
1174
1175
  		 */
  		if (!pte_present(pte) || pte_special(pte) ||
8a0516ed8   Mel Gorman   mm: convert p[te|...
1176
  			pte_protnone(pte) || (write && !pte_write(pte)))
2667f50e8   Steve Capper   mm: introduce a g...
1177
  			goto pte_unmap;
33a709b25   Dave Hansen   mm/gup, x86/mm/pk...
1178
1179
  		if (!arch_pte_access_permitted(pte, write))
  			goto pte_unmap;
2667f50e8   Steve Capper   mm: introduce a g...
1180
1181
  		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
  		page = pte_page(pte);
7aef4172c   Kirill A. Shutemov   mm: handle PTE-ma...
1182
  		head = compound_head(page);
2667f50e8   Steve Capper   mm: introduce a g...
1183

7aef4172c   Kirill A. Shutemov   mm: handle PTE-ma...
1184
  		if (!page_cache_get_speculative(head))
2667f50e8   Steve Capper   mm: introduce a g...
1185
1186
1187
  			goto pte_unmap;
  
  		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
7aef4172c   Kirill A. Shutemov   mm: handle PTE-ma...
1188
  			put_page(head);
2667f50e8   Steve Capper   mm: introduce a g...
1189
1190
  			goto pte_unmap;
  		}
7aef4172c   Kirill A. Shutemov   mm: handle PTE-ma...
1191
  		VM_BUG_ON_PAGE(compound_head(page) != head, page);
2667f50e8   Steve Capper   mm: introduce a g...
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
  		pages[*nr] = page;
  		(*nr)++;
  
  	} while (ptep++, addr += PAGE_SIZE, addr != end);
  
  	ret = 1;
  
  pte_unmap:
  	pte_unmap(ptem);
  	return ret;
  }
  #else
  
  /*
   * If we can't determine whether or not a pte is special, then fail immediately
   * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
   * to be special.
   *
   * For a futex to be placed on a THP tail page, get_futex_key requires a
   * __get_user_pages_fast implementation that can pin pages. Thus it's still
   * useful to have gup_huge_pmd even if we can't operate on ptes.
   */
  static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
  			 int write, struct page **pages, int *nr)
  {
  	return 0;
  }
  #endif /* __HAVE_ARCH_PTE_SPECIAL */
  
  static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
  		unsigned long end, int write, struct page **pages, int *nr)
  {
ddc58f27f   Kirill A. Shutemov   mm: drop tail pag...
1224
  	struct page *head, *page;
2667f50e8   Steve Capper   mm: introduce a g...
1225
1226
1227
1228
1229
1230
1231
1232
  	int refs;
  
  	if (write && !pmd_write(orig))
  		return 0;
  
  	refs = 0;
  	head = pmd_page(orig);
  	page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
2667f50e8   Steve Capper   mm: introduce a g...
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
  	do {
  		VM_BUG_ON_PAGE(compound_head(page) != head, page);
  		pages[*nr] = page;
  		(*nr)++;
  		page++;
  		refs++;
  	} while (addr += PAGE_SIZE, addr != end);
  
  	if (!page_cache_add_speculative(head, refs)) {
  		*nr -= refs;
  		return 0;
  	}
  
  	if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
  		*nr -= refs;
  		while (refs--)
  			put_page(head);
  		return 0;
  	}
2667f50e8   Steve Capper   mm: introduce a g...
1252
1253
1254
1255
1256
1257
  	return 1;
  }
  
  static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
  		unsigned long end, int write, struct page **pages, int *nr)
  {
ddc58f27f   Kirill A. Shutemov   mm: drop tail pag...
1258
  	struct page *head, *page;
2667f50e8   Steve Capper   mm: introduce a g...
1259
1260
1261
1262
1263
1264
1265
1266
  	int refs;
  
  	if (write && !pud_write(orig))
  		return 0;
  
  	refs = 0;
  	head = pud_page(orig);
  	page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
2667f50e8   Steve Capper   mm: introduce a g...
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
  	do {
  		VM_BUG_ON_PAGE(compound_head(page) != head, page);
  		pages[*nr] = page;
  		(*nr)++;
  		page++;
  		refs++;
  	} while (addr += PAGE_SIZE, addr != end);
  
  	if (!page_cache_add_speculative(head, refs)) {
  		*nr -= refs;
  		return 0;
  	}
  
  	if (unlikely(pud_val(orig) != pud_val(*pudp))) {
  		*nr -= refs;
  		while (refs--)
  			put_page(head);
  		return 0;
  	}
2667f50e8   Steve Capper   mm: introduce a g...
1286
1287
  	return 1;
  }
f30c59e92   Aneesh Kumar K.V   mm: Update generi...
1288
1289
1290
1291
1292
  static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
  			unsigned long end, int write,
  			struct page **pages, int *nr)
  {
  	int refs;
ddc58f27f   Kirill A. Shutemov   mm: drop tail pag...
1293
  	struct page *head, *page;
f30c59e92   Aneesh Kumar K.V   mm: Update generi...
1294
1295
1296
1297
1298
1299
1300
  
  	if (write && !pgd_write(orig))
  		return 0;
  
  	refs = 0;
  	head = pgd_page(orig);
  	page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
f30c59e92   Aneesh Kumar K.V   mm: Update generi...
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
  	do {
  		VM_BUG_ON_PAGE(compound_head(page) != head, page);
  		pages[*nr] = page;
  		(*nr)++;
  		page++;
  		refs++;
  	} while (addr += PAGE_SIZE, addr != end);
  
  	if (!page_cache_add_speculative(head, refs)) {
  		*nr -= refs;
  		return 0;
  	}
  
  	if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
  		*nr -= refs;
  		while (refs--)
  			put_page(head);
  		return 0;
  	}
f30c59e92   Aneesh Kumar K.V   mm: Update generi...
1320
1321
  	return 1;
  }
2667f50e8   Steve Capper   mm: introduce a g...
1322
1323
1324
1325
1326
1327
1328
1329
  static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
  		int write, struct page **pages, int *nr)
  {
  	unsigned long next;
  	pmd_t *pmdp;
  
  	pmdp = pmd_offset(&pud, addr);
  	do {
38c5ce936   Christian Borntraeger   mm/gup: Replace A...
1330
  		pmd_t pmd = READ_ONCE(*pmdp);
2667f50e8   Steve Capper   mm: introduce a g...
1331
1332
  
  		next = pmd_addr_end(addr, end);
4b471e889   Kirill A. Shutemov   mm, thp: remove i...
1333
  		if (pmd_none(pmd))
2667f50e8   Steve Capper   mm: introduce a g...
1334
1335
1336
1337
1338
1339
1340
1341
  			return 0;
  
  		if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
  			/*
  			 * NUMA hinting faults need to be handled in the GUP
  			 * slowpath for accounting purposes and so that they
  			 * can be serialised against THP migration.
  			 */
8a0516ed8   Mel Gorman   mm: convert p[te|...
1342
  			if (pmd_protnone(pmd))
2667f50e8   Steve Capper   mm: introduce a g...
1343
1344
1345
1346
1347
  				return 0;
  
  			if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
  				pages, nr))
  				return 0;
f30c59e92   Aneesh Kumar K.V   mm: Update generi...
1348
1349
1350
1351
1352
1353
1354
1355
  		} else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
  			/*
  			 * architecture have different format for hugetlbfs
  			 * pmd format and THP pmd format
  			 */
  			if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
  					 PMD_SHIFT, next, write, pages, nr))
  				return 0;
2667f50e8   Steve Capper   mm: introduce a g...
1356
1357
1358
1359
1360
1361
  		} else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
  				return 0;
  	} while (pmdp++, addr = next, addr != end);
  
  	return 1;
  }
f30c59e92   Aneesh Kumar K.V   mm: Update generi...
1362
1363
  static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
  			 int write, struct page **pages, int *nr)
2667f50e8   Steve Capper   mm: introduce a g...
1364
1365
1366
  {
  	unsigned long next;
  	pud_t *pudp;
f30c59e92   Aneesh Kumar K.V   mm: Update generi...
1367
  	pudp = pud_offset(&pgd, addr);
2667f50e8   Steve Capper   mm: introduce a g...
1368
  	do {
e37c69827   Christian Borntraeger   mm: replace ACCES...
1369
  		pud_t pud = READ_ONCE(*pudp);
2667f50e8   Steve Capper   mm: introduce a g...
1370
1371
1372
1373
  
  		next = pud_addr_end(addr, end);
  		if (pud_none(pud))
  			return 0;
f30c59e92   Aneesh Kumar K.V   mm: Update generi...
1374
  		if (unlikely(pud_huge(pud))) {
2667f50e8   Steve Capper   mm: introduce a g...
1375
  			if (!gup_huge_pud(pud, pudp, addr, next, write,
f30c59e92   Aneesh Kumar K.V   mm: Update generi...
1376
1377
1378
1379
1380
  					  pages, nr))
  				return 0;
  		} else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
  			if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
  					 PUD_SHIFT, next, write, pages, nr))
2667f50e8   Steve Capper   mm: introduce a g...
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
  				return 0;
  		} else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
  			return 0;
  	} while (pudp++, addr = next, addr != end);
  
  	return 1;
  }
  
  /*
   * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
   * the regular GUP. It will only return non-negative values.
   */
  int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
  			  struct page **pages)
  {
  	struct mm_struct *mm = current->mm;
  	unsigned long addr, len, end;
  	unsigned long next, flags;
  	pgd_t *pgdp;
  	int nr = 0;
  
  	start &= PAGE_MASK;
  	addr = start;
  	len = (unsigned long) nr_pages << PAGE_SHIFT;
  	end = start + len;
  
  	if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
  					start, len)))
  		return 0;
  
  	/*
  	 * Disable interrupts.  We use the nested form as we can already have
  	 * interrupts disabled by get_futex_key.
  	 *
  	 * With interrupts disabled, we block page table pages from being
  	 * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h
  	 * for more details.
  	 *
  	 * We do not adopt an rcu_read_lock(.) here as we also want to
  	 * block IPIs that come from THPs splitting.
  	 */
  
  	local_irq_save(flags);
  	pgdp = pgd_offset(mm, addr);
  	do {
9d8c47e4b   Jason Low   mm: use READ_ONCE...
1426
  		pgd_t pgd = READ_ONCE(*pgdp);
f30c59e92   Aneesh Kumar K.V   mm: Update generi...
1427

2667f50e8   Steve Capper   mm: introduce a g...
1428
  		next = pgd_addr_end(addr, end);
f30c59e92   Aneesh Kumar K.V   mm: Update generi...
1429
  		if (pgd_none(pgd))
2667f50e8   Steve Capper   mm: introduce a g...
1430
  			break;
f30c59e92   Aneesh Kumar K.V   mm: Update generi...
1431
1432
1433
1434
1435
1436
1437
1438
1439
  		if (unlikely(pgd_huge(pgd))) {
  			if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
  					  pages, &nr))
  				break;
  		} else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
  			if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
  					 PGDIR_SHIFT, next, write, pages, &nr))
  				break;
  		} else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
2667f50e8   Steve Capper   mm: introduce a g...
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
  			break;
  	} while (pgdp++, addr = next, addr != end);
  	local_irq_restore(flags);
  
  	return nr;
  }
  
  /**
   * get_user_pages_fast() - pin user pages in memory
   * @start:	starting user address
   * @nr_pages:	number of pages from start to pin
   * @write:	whether pages will be written to
   * @pages:	array that receives pointers to the pages pinned.
   *		Should be at least nr_pages long.
   *
   * Attempt to pin user pages in memory without taking mm->mmap_sem.
   * If not successful, it will fall back to taking the lock and
   * calling get_user_pages().
   *
   * Returns number of pages pinned. This may be fewer than the number
   * requested. If nr_pages is 0 or negative, returns 0. If no pages
   * were pinned, returns -errno.
   */
  int get_user_pages_fast(unsigned long start, int nr_pages, int write,
  			struct page **pages)
  {
2667f50e8   Steve Capper   mm: introduce a g...
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
  	int nr, ret;
  
  	start &= PAGE_MASK;
  	nr = __get_user_pages_fast(start, nr_pages, write, pages);
  	ret = nr;
  
  	if (nr < nr_pages) {
  		/* Try to get the remaining pages with get_user_pages */
  		start += nr << PAGE_SHIFT;
  		pages += nr;
c164154f6   Lorenzo Stoakes   mm: replace get_u...
1476
1477
  		ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
  				write ? FOLL_WRITE : 0);
2667f50e8   Steve Capper   mm: introduce a g...
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
  
  		/* Have to be a bit careful with return values */
  		if (nr > 0) {
  			if (ret < 0)
  				ret = nr;
  			else
  				ret += nr;
  		}
  	}
  
  	return ret;
  }
  
  #endif /* CONFIG_HAVE_GENERIC_RCU_GUP */