Blame view

mm/userfaultfd.c 15.6 KB
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
1
2
3
4
5
6
7
8
9
10
  /*
   *  mm/userfaultfd.c
   *
   *  Copyright (C) 2015  Red Hat, Inc.
   *
   *  This work is licensed under the terms of the GNU GPL, version 2. See
   *  the COPYING file in the top-level directory.
   */
  
  #include <linux/mm.h>
174cd4b1e   Ingo Molnar   sched/headers: Pr...
11
  #include <linux/sched/signal.h>
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
12
13
14
15
16
17
  #include <linux/pagemap.h>
  #include <linux/rmap.h>
  #include <linux/swap.h>
  #include <linux/swapops.h>
  #include <linux/userfaultfd_k.h>
  #include <linux/mmu_notifier.h>
60d4d2d2b   Mike Kravetz   userfaultfd: huge...
18
19
  #include <linux/hugetlb.h>
  #include <linux/pagemap.h>
26071cedc   Mike Rapoport   userfaultfd: shme...
20
  #include <linux/shmem_fs.h>
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
21
22
23
24
25
26
27
  #include <asm/tlbflush.h>
  #include "internal.h"
  
  static int mcopy_atomic_pte(struct mm_struct *dst_mm,
  			    pmd_t *dst_pmd,
  			    struct vm_area_struct *dst_vma,
  			    unsigned long dst_addr,
b6ebaedb4   Andrea Arcangeli   userfaultfd: avoi...
28
29
  			    unsigned long src_addr,
  			    struct page **pagep)
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
30
31
32
33
  {
  	struct mem_cgroup *memcg;
  	pte_t _dst_pte, *dst_pte;
  	spinlock_t *ptl;
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
34
35
  	void *page_kaddr;
  	int ret;
b6ebaedb4   Andrea Arcangeli   userfaultfd: avoi...
36
  	struct page *page;
af3edb30c   Andrea Arcangeli   userfaultfd: shme...
37
38
  	pgoff_t offset, max_off;
  	struct inode *inode;
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
39

b6ebaedb4   Andrea Arcangeli   userfaultfd: avoi...
40
41
42
43
44
45
46
47
48
49
50
51
52
53
  	if (!*pagep) {
  		ret = -ENOMEM;
  		page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
  		if (!page)
  			goto out;
  
  		page_kaddr = kmap_atomic(page);
  		ret = copy_from_user(page_kaddr,
  				     (const void __user *) src_addr,
  				     PAGE_SIZE);
  		kunmap_atomic(page_kaddr);
  
  		/* fallback to copy_from_user outside mmap_sem */
  		if (unlikely(ret)) {
82c5a8c0d   Andrea Arcangeli   userfaultfd: use ...
54
  			ret = -ENOENT;
b6ebaedb4   Andrea Arcangeli   userfaultfd: avoi...
55
56
57
58
59
60
61
62
  			*pagep = page;
  			/* don't free the page */
  			goto out;
  		}
  	} else {
  		page = *pagep;
  		*pagep = NULL;
  	}
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
63
64
65
66
67
68
69
70
71
  
  	/*
  	 * The memory barrier inside __SetPageUptodate makes sure that
  	 * preceeding stores to the page contents become visible before
  	 * the set_pte_at() write.
  	 */
  	__SetPageUptodate(page);
  
  	ret = -ENOMEM;
f627c2f53   Kirill A. Shutemov   memcg: adjust to ...
72
  	if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false))
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
73
74
75
76
77
  		goto out_release;
  
  	_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
  	if (dst_vma->vm_flags & VM_WRITE)
  		_dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
78
  	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
af3edb30c   Andrea Arcangeli   userfaultfd: shme...
79
80
81
82
83
84
85
86
87
88
  	if (dst_vma->vm_file) {
  		/* the shmem MAP_PRIVATE case requires checking the i_size */
  		inode = dst_vma->vm_file->f_inode;
  		offset = linear_page_index(dst_vma, dst_addr);
  		max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
  		ret = -EFAULT;
  		if (unlikely(offset >= max_off))
  			goto out_release_uncharge_unlock;
  	}
  	ret = -EEXIST;
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
89
90
91
92
  	if (!pte_none(*dst_pte))
  		goto out_release_uncharge_unlock;
  
  	inc_mm_counter(dst_mm, MM_ANONPAGES);
d281ee614   Kirill A. Shutemov   rmap: add argumen...
93
  	page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
f627c2f53   Kirill A. Shutemov   memcg: adjust to ...
94
  	mem_cgroup_commit_charge(page, memcg, false, false);
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
95
96
97
98
99
100
101
102
103
104
105
106
107
  	lru_cache_add_active_or_unevictable(page, dst_vma);
  
  	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
  
  	/* No need to invalidate - it was non-present before */
  	update_mmu_cache(dst_vma, dst_addr, dst_pte);
  
  	pte_unmap_unlock(dst_pte, ptl);
  	ret = 0;
  out:
  	return ret;
  out_release_uncharge_unlock:
  	pte_unmap_unlock(dst_pte, ptl);
f627c2f53   Kirill A. Shutemov   memcg: adjust to ...
108
  	mem_cgroup_cancel_charge(page, memcg, false);
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
109
  out_release:
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
110
  	put_page(page);
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
111
  	goto out;
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
112
113
114
115
116
117
118
119
120
121
  }
  
  static int mfill_zeropage_pte(struct mm_struct *dst_mm,
  			      pmd_t *dst_pmd,
  			      struct vm_area_struct *dst_vma,
  			      unsigned long dst_addr)
  {
  	pte_t _dst_pte, *dst_pte;
  	spinlock_t *ptl;
  	int ret;
af3edb30c   Andrea Arcangeli   userfaultfd: shme...
122
123
  	pgoff_t offset, max_off;
  	struct inode *inode;
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
124
125
126
  
  	_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
  					 dst_vma->vm_page_prot));
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
127
  	dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
af3edb30c   Andrea Arcangeli   userfaultfd: shme...
128
129
130
131
132
133
134
135
136
137
  	if (dst_vma->vm_file) {
  		/* the shmem MAP_PRIVATE case requires checking the i_size */
  		inode = dst_vma->vm_file->f_inode;
  		offset = linear_page_index(dst_vma, dst_addr);
  		max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
  		ret = -EFAULT;
  		if (unlikely(offset >= max_off))
  			goto out_unlock;
  	}
  	ret = -EEXIST;
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
138
139
140
141
142
143
144
145
146
147
148
149
150
151
  	if (!pte_none(*dst_pte))
  		goto out_unlock;
  	set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
  	/* No need to invalidate - it was non-present before */
  	update_mmu_cache(dst_vma, dst_addr, dst_pte);
  	ret = 0;
  out_unlock:
  	pte_unmap_unlock(dst_pte, ptl);
  	return ret;
  }
  
  static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address)
  {
  	pgd_t *pgd;
c2febafc6   Kirill A. Shutemov   mm: convert gener...
152
  	p4d_t *p4d;
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
153
  	pud_t *pud;
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
154
155
  
  	pgd = pgd_offset(mm, address);
c2febafc6   Kirill A. Shutemov   mm: convert gener...
156
157
158
159
160
161
162
163
164
165
166
167
  	p4d = p4d_alloc(mm, pgd, address);
  	if (!p4d)
  		return NULL;
  	pud = pud_alloc(mm, p4d, address);
  	if (!pud)
  		return NULL;
  	/*
  	 * Note that we didn't run this because the pmd was
  	 * missing, the *pmd may be already established and in
  	 * turn it may also be a trans_huge_pmd.
  	 */
  	return pmd_alloc(mm, pud, address);
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
168
  }
60d4d2d2b   Mike Kravetz   userfaultfd: huge...
169
170
171
172
173
174
175
176
177
178
179
180
  #ifdef CONFIG_HUGETLB_PAGE
  /*
   * __mcopy_atomic processing for HUGETLB vmas.  Note that this routine is
   * called with mmap_sem held, it will release mmap_sem before returning.
   */
  static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
  					      struct vm_area_struct *dst_vma,
  					      unsigned long dst_start,
  					      unsigned long src_start,
  					      unsigned long len,
  					      bool zeropage)
  {
1c9e8def4   Mike Kravetz   userfaultfd: huge...
181
182
  	int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED;
  	int vm_shared = dst_vma->vm_flags & VM_SHARED;
60d4d2d2b   Mike Kravetz   userfaultfd: huge...
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
  	ssize_t err;
  	pte_t *dst_pte;
  	unsigned long src_addr, dst_addr;
  	long copied;
  	struct page *page;
  	struct hstate *h;
  	unsigned long vma_hpagesize;
  	pgoff_t idx;
  	u32 hash;
  	struct address_space *mapping;
  
  	/*
  	 * There is no default zero huge page for all huge page sizes as
  	 * supported by hugetlb.  A PMD_SIZE huge pages may exist as used
  	 * by THP.  Since we can not reliably insert a zero page, this
  	 * feature is not supported.
  	 */
  	if (zeropage) {
  		up_read(&dst_mm->mmap_sem);
  		return -EINVAL;
  	}
  
  	src_addr = src_start;
  	dst_addr = dst_start;
  	copied = 0;
  	page = NULL;
  	vma_hpagesize = vma_kernel_pagesize(dst_vma);
  
  	/*
  	 * Validate alignment based on huge page size
  	 */
  	err = -EINVAL;
  	if (dst_start & (vma_hpagesize - 1) || len & (vma_hpagesize - 1))
  		goto out_unlock;
  
  retry:
  	/*
  	 * On routine entry dst_vma is set.  If we had to drop mmap_sem and
  	 * retry, dst_vma will be set to NULL and we must lookup again.
  	 */
  	if (!dst_vma) {
27d02568f   Mike Rapoport   userfaultfd: mcop...
224
  		err = -ENOENT;
60d4d2d2b   Mike Kravetz   userfaultfd: huge...
225
226
227
  		dst_vma = find_vma(dst_mm, dst_start);
  		if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
  			goto out_unlock;
60d4d2d2b   Mike Kravetz   userfaultfd: huge...
228
  		/*
705a2810a   Andrea Arcangeli   userfaultfd: shme...
229
230
231
  		 * Check the vma is registered in uffd, this is
  		 * required to enforce the VM_MAYWRITE check done at
  		 * uffd registration time.
60d4d2d2b   Mike Kravetz   userfaultfd: huge...
232
  		 */
27d02568f   Mike Rapoport   userfaultfd: mcop...
233
234
  		if (!dst_vma->vm_userfaultfd_ctx.ctx)
  			goto out_unlock;
60d4d2d2b   Mike Kravetz   userfaultfd: huge...
235
236
237
  		if (dst_start < dst_vma->vm_start ||
  		    dst_start + len > dst_vma->vm_end)
  			goto out_unlock;
1c9e8def4   Mike Kravetz   userfaultfd: huge...
238

27d02568f   Mike Rapoport   userfaultfd: mcop...
239
240
241
  		err = -EINVAL;
  		if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
  			goto out_unlock;
1c9e8def4   Mike Kravetz   userfaultfd: huge...
242
  		vm_shared = dst_vma->vm_flags & VM_SHARED;
60d4d2d2b   Mike Kravetz   userfaultfd: huge...
243
244
245
246
247
248
249
  	}
  
  	if (WARN_ON(dst_addr & (vma_hpagesize - 1) ||
  		    (len - copied) & (vma_hpagesize - 1)))
  		goto out_unlock;
  
  	/*
1c9e8def4   Mike Kravetz   userfaultfd: huge...
250
  	 * If not shared, ensure the dst_vma has a anon_vma.
60d4d2d2b   Mike Kravetz   userfaultfd: huge...
251
252
  	 */
  	err = -ENOMEM;
1c9e8def4   Mike Kravetz   userfaultfd: huge...
253
254
255
256
  	if (!vm_shared) {
  		if (unlikely(anon_vma_prepare(dst_vma)))
  			goto out_unlock;
  	}
60d4d2d2b   Mike Kravetz   userfaultfd: huge...
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
  
  	h = hstate_vma(dst_vma);
  
  	while (src_addr < src_start + len) {
  		pte_t dst_pteval;
  
  		BUG_ON(dst_addr >= dst_start + len);
  		VM_BUG_ON(dst_addr & ~huge_page_mask(h));
  
  		/*
  		 * Serialize via hugetlb_fault_mutex
  		 */
  		idx = linear_page_index(dst_vma, dst_addr);
  		mapping = dst_vma->vm_file->f_mapping;
  		hash = hugetlb_fault_mutex_hash(h, dst_mm, dst_vma, mapping,
  								idx, dst_addr);
  		mutex_lock(&hugetlb_fault_mutex_table[hash]);
  
  		err = -ENOMEM;
  		dst_pte = huge_pte_alloc(dst_mm, dst_addr, huge_page_size(h));
  		if (!dst_pte) {
  			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  			goto out_unlock;
  		}
  
  		err = -EEXIST;
  		dst_pteval = huge_ptep_get(dst_pte);
  		if (!huge_pte_none(dst_pteval)) {
  			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  			goto out_unlock;
  		}
  
  		err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
  						dst_addr, src_addr, &page);
  
  		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
1c9e8def4   Mike Kravetz   userfaultfd: huge...
293
  		vm_alloc_shared = vm_shared;
60d4d2d2b   Mike Kravetz   userfaultfd: huge...
294
295
  
  		cond_resched();
82c5a8c0d   Andrea Arcangeli   userfaultfd: use ...
296
  		if (unlikely(err == -ENOENT)) {
60d4d2d2b   Mike Kravetz   userfaultfd: huge...
297
298
299
300
301
  			up_read(&dst_mm->mmap_sem);
  			BUG_ON(!page);
  
  			err = copy_huge_page_from_user(page,
  						(const void __user *)src_addr,
810a56b94   Mike Kravetz   userfaultfd: huge...
302
  						pages_per_huge_page(h), true);
60d4d2d2b   Mike Kravetz   userfaultfd: huge...
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
  			if (unlikely(err)) {
  				err = -EFAULT;
  				goto out;
  			}
  			down_read(&dst_mm->mmap_sem);
  
  			dst_vma = NULL;
  			goto retry;
  		} else
  			BUG_ON(page);
  
  		if (!err) {
  			dst_addr += vma_hpagesize;
  			src_addr += vma_hpagesize;
  			copied += vma_hpagesize;
  
  			if (fatal_signal_pending(current))
  				err = -EINTR;
  		}
  		if (err)
  			break;
  	}
  
  out_unlock:
  	up_read(&dst_mm->mmap_sem);
  out:
21205bf8f   Mike Kravetz   userfaultfd: huge...
329
330
331
  	if (page) {
  		/*
  		 * We encountered an error and are about to free a newly
1c9e8def4   Mike Kravetz   userfaultfd: huge...
332
333
334
335
336
337
338
339
340
341
342
343
  		 * allocated huge page.
  		 *
  		 * Reservation handling is very subtle, and is different for
  		 * private and shared mappings.  See the routine
  		 * restore_reserve_on_error for details.  Unfortunately, we
  		 * can not call restore_reserve_on_error now as it would
  		 * require holding mmap_sem.
  		 *
  		 * If a reservation for the page existed in the reservation
  		 * map of a private mapping, the map was modified to indicate
  		 * the reservation was consumed when the page was allocated.
  		 * We clear the PagePrivate flag now so that the global
21205bf8f   Mike Kravetz   userfaultfd: huge...
344
345
346
  		 * reserve count will not be incremented in free_huge_page.
  		 * The reservation map will still indicate the reservation
  		 * was consumed and possibly prevent later page allocation.
1c9e8def4   Mike Kravetz   userfaultfd: huge...
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
  		 * This is better than leaking a global reservation.  If no
  		 * reservation existed, it is still safe to clear PagePrivate
  		 * as no adjustments to reservation counts were made during
  		 * allocation.
  		 *
  		 * The reservation map for shared mappings indicates which
  		 * pages have reservations.  When a huge page is allocated
  		 * for an address with a reservation, no change is made to
  		 * the reserve map.  In this case PagePrivate will be set
  		 * to indicate that the global reservation count should be
  		 * incremented when the page is freed.  This is the desired
  		 * behavior.  However, when a huge page is allocated for an
  		 * address without a reservation a reservation entry is added
  		 * to the reservation map, and PagePrivate will not be set.
  		 * When the page is freed, the global reserve count will NOT
  		 * be incremented and it will appear as though we have leaked
  		 * reserved page.  In this case, set PagePrivate so that the
  		 * global reserve count will be incremented to match the
  		 * reservation map entry which was created.
  		 *
  		 * Note that vm_alloc_shared is based on the flags of the vma
  		 * for which the page was originally allocated.  dst_vma could
  		 * be different or NULL on error.
21205bf8f   Mike Kravetz   userfaultfd: huge...
370
  		 */
1c9e8def4   Mike Kravetz   userfaultfd: huge...
371
372
373
374
  		if (vm_alloc_shared)
  			SetPagePrivate(page);
  		else
  			ClearPagePrivate(page);
60d4d2d2b   Mike Kravetz   userfaultfd: huge...
375
  		put_page(page);
21205bf8f   Mike Kravetz   userfaultfd: huge...
376
  	}
60d4d2d2b   Mike Kravetz   userfaultfd: huge...
377
378
379
380
381
382
383
384
385
386
387
388
389
390
  	BUG_ON(copied < 0);
  	BUG_ON(err > 0);
  	BUG_ON(!copied && !err);
  	return copied ? copied : err;
  }
  #else /* !CONFIG_HUGETLB_PAGE */
  /* fail at build time if gcc attempts to use this */
  extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
  				      struct vm_area_struct *dst_vma,
  				      unsigned long dst_start,
  				      unsigned long src_start,
  				      unsigned long len,
  				      bool zeropage);
  #endif /* CONFIG_HUGETLB_PAGE */
3217d3c79   Mike Rapoport   userfaultfd: mcop...
391
392
393
394
395
396
397
398
399
  static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
  						pmd_t *dst_pmd,
  						struct vm_area_struct *dst_vma,
  						unsigned long dst_addr,
  						unsigned long src_addr,
  						struct page **page,
  						bool zeropage)
  {
  	ssize_t err;
683b47330   Andrea Arcangeli   userfaultfd: shme...
400
401
402
403
404
405
406
407
408
409
410
  	/*
  	 * The normal page fault path for a shmem will invoke the
  	 * fault, fill the hole in the file and COW it right away. The
  	 * result generates plain anonymous memory. So when we are
  	 * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
  	 * generate anonymous memory directly without actually filling
  	 * the hole. For the MAP_PRIVATE case the robustness check
  	 * only happens in the pagetable (to verify it's still none)
  	 * and not in the radix tree.
  	 */
  	if (!(dst_vma->vm_flags & VM_SHARED)) {
3217d3c79   Mike Rapoport   userfaultfd: mcop...
411
412
413
414
415
416
417
  		if (!zeropage)
  			err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
  					       dst_addr, src_addr, page);
  		else
  			err = mfill_zeropage_pte(dst_mm, dst_pmd,
  						 dst_vma, dst_addr);
  	} else {
8fb44e540   Mike Rapoport   userfaultfd: shme...
418
  		if (!zeropage)
3217d3c79   Mike Rapoport   userfaultfd: mcop...
419
420
421
  			err = shmem_mcopy_atomic_pte(dst_mm, dst_pmd,
  						     dst_vma, dst_addr,
  						     src_addr, page);
8fb44e540   Mike Rapoport   userfaultfd: shme...
422
423
424
  		else
  			err = shmem_mfill_zeropage_pte(dst_mm, dst_pmd,
  						       dst_vma, dst_addr);
3217d3c79   Mike Rapoport   userfaultfd: mcop...
425
426
427
428
  	}
  
  	return err;
  }
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
429
430
431
432
433
434
435
436
437
438
  static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
  					      unsigned long dst_start,
  					      unsigned long src_start,
  					      unsigned long len,
  					      bool zeropage)
  {
  	struct vm_area_struct *dst_vma;
  	ssize_t err;
  	pmd_t *dst_pmd;
  	unsigned long src_addr, dst_addr;
b6ebaedb4   Andrea Arcangeli   userfaultfd: avoi...
439
440
  	long copied;
  	struct page *page;
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
441
442
443
444
445
446
447
448
449
450
  
  	/*
  	 * Sanitize the command parameters:
  	 */
  	BUG_ON(dst_start & ~PAGE_MASK);
  	BUG_ON(len & ~PAGE_MASK);
  
  	/* Does the address range wrap, or is the span zero-sized? */
  	BUG_ON(src_start + len <= src_start);
  	BUG_ON(dst_start + len <= dst_start);
b6ebaedb4   Andrea Arcangeli   userfaultfd: avoi...
451
452
453
454
455
  	src_addr = src_start;
  	dst_addr = dst_start;
  	copied = 0;
  	page = NULL;
  retry:
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
456
457
458
459
460
461
  	down_read(&dst_mm->mmap_sem);
  
  	/*
  	 * Make sure the vma is not shared, that the dst range is
  	 * both valid and fully within a single existing vma.
  	 */
27d02568f   Mike Rapoport   userfaultfd: mcop...
462
  	err = -ENOENT;
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
463
  	dst_vma = find_vma(dst_mm, dst_start);
26071cedc   Mike Rapoport   userfaultfd: shme...
464
465
  	if (!dst_vma)
  		goto out_unlock;
1c9e8def4   Mike Kravetz   userfaultfd: huge...
466
  	/*
705a2810a   Andrea Arcangeli   userfaultfd: shme...
467
468
469
  	 * Check the vma is registered in uffd, this is required to
  	 * enforce the VM_MAYWRITE check done at uffd registration
  	 * time.
1c9e8def4   Mike Kravetz   userfaultfd: huge...
470
  	 */
27d02568f   Mike Rapoport   userfaultfd: mcop...
471
  	if (!dst_vma->vm_userfaultfd_ctx.ctx)
b6ebaedb4   Andrea Arcangeli   userfaultfd: avoi...
472
  		goto out_unlock;
1c9e8def4   Mike Kravetz   userfaultfd: huge...
473

c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
474
475
  	if (dst_start < dst_vma->vm_start ||
  	    dst_start + len > dst_vma->vm_end)
b6ebaedb4   Andrea Arcangeli   userfaultfd: avoi...
476
  		goto out_unlock;
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
477

27d02568f   Mike Rapoport   userfaultfd: mcop...
478
479
480
481
482
483
484
485
  	err = -EINVAL;
  	/*
  	 * shmem_zero_setup is invoked in mmap for MAP_ANONYMOUS|MAP_SHARED but
  	 * it will overwrite vm_ops, so vma_is_anonymous must return false.
  	 */
  	if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
  	    dst_vma->vm_flags & VM_SHARED))
  		goto out_unlock;
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
486
  	/*
60d4d2d2b   Mike Kravetz   userfaultfd: huge...
487
488
489
490
491
  	 * If this is a HUGETLB vma, pass off to appropriate routine
  	 */
  	if (is_vm_hugetlb_page(dst_vma))
  		return  __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
  						src_start, len, zeropage);
26071cedc   Mike Rapoport   userfaultfd: shme...
492
  	if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
b6ebaedb4   Andrea Arcangeli   userfaultfd: avoi...
493
  		goto out_unlock;
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
494
495
496
497
498
499
500
  
  	/*
  	 * Ensure the dst_vma has a anon_vma or this page
  	 * would get a NULL anon_vma when moved in the
  	 * dst_vma.
  	 */
  	err = -ENOMEM;
683b47330   Andrea Arcangeli   userfaultfd: shme...
501
502
  	if (!(dst_vma->vm_flags & VM_SHARED) &&
  	    unlikely(anon_vma_prepare(dst_vma)))
b6ebaedb4   Andrea Arcangeli   userfaultfd: avoi...
503
  		goto out_unlock;
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
504

b6ebaedb4   Andrea Arcangeli   userfaultfd: avoi...
505
  	while (src_addr < src_start + len) {
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
506
  		pmd_t dst_pmdval;
b6ebaedb4   Andrea Arcangeli   userfaultfd: avoi...
507

c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
508
  		BUG_ON(dst_addr >= dst_start + len);
b6ebaedb4   Andrea Arcangeli   userfaultfd: avoi...
509

c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
  		dst_pmd = mm_alloc_pmd(dst_mm, dst_addr);
  		if (unlikely(!dst_pmd)) {
  			err = -ENOMEM;
  			break;
  		}
  
  		dst_pmdval = pmd_read_atomic(dst_pmd);
  		/*
  		 * If the dst_pmd is mapped as THP don't
  		 * override it and just be strict.
  		 */
  		if (unlikely(pmd_trans_huge(dst_pmdval))) {
  			err = -EEXIST;
  			break;
  		}
  		if (unlikely(pmd_none(dst_pmdval)) &&
3ed3a4f0d   Kirill A. Shutemov   mm: cleanup *pte_...
526
  		    unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) {
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
527
528
529
530
531
532
533
534
535
536
537
  			err = -ENOMEM;
  			break;
  		}
  		/* If an huge pmd materialized from under us fail */
  		if (unlikely(pmd_trans_huge(*dst_pmd))) {
  			err = -EFAULT;
  			break;
  		}
  
  		BUG_ON(pmd_none(*dst_pmd));
  		BUG_ON(pmd_trans_huge(*dst_pmd));
3217d3c79   Mike Rapoport   userfaultfd: mcop...
538
539
  		err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
  				       src_addr, &page, zeropage);
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
540
  		cond_resched();
82c5a8c0d   Andrea Arcangeli   userfaultfd: use ...
541
  		if (unlikely(err == -ENOENT)) {
b6ebaedb4   Andrea Arcangeli   userfaultfd: avoi...
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
  			void *page_kaddr;
  
  			up_read(&dst_mm->mmap_sem);
  			BUG_ON(!page);
  
  			page_kaddr = kmap(page);
  			err = copy_from_user(page_kaddr,
  					     (const void __user *) src_addr,
  					     PAGE_SIZE);
  			kunmap(page);
  			if (unlikely(err)) {
  				err = -EFAULT;
  				goto out;
  			}
  			goto retry;
  		} else
  			BUG_ON(page);
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
559
560
561
562
563
564
565
566
567
568
569
  		if (!err) {
  			dst_addr += PAGE_SIZE;
  			src_addr += PAGE_SIZE;
  			copied += PAGE_SIZE;
  
  			if (fatal_signal_pending(current))
  				err = -EINTR;
  		}
  		if (err)
  			break;
  	}
b6ebaedb4   Andrea Arcangeli   userfaultfd: avoi...
570
  out_unlock:
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
571
  	up_read(&dst_mm->mmap_sem);
b6ebaedb4   Andrea Arcangeli   userfaultfd: avoi...
572
573
  out:
  	if (page)
09cbfeaf1   Kirill A. Shutemov   mm, fs: get rid o...
574
  		put_page(page);
c1a4de99f   Andrea Arcangeli   userfaultfd: mcop...
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
  	BUG_ON(copied < 0);
  	BUG_ON(err > 0);
  	BUG_ON(!copied && !err);
  	return copied ? copied : err;
  }
  
  ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
  		     unsigned long src_start, unsigned long len)
  {
  	return __mcopy_atomic(dst_mm, dst_start, src_start, len, false);
  }
  
  ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
  		       unsigned long len)
  {
  	return __mcopy_atomic(dst_mm, start, 0, len, true);
  }