Blame view

fs/userfaultfd.c 51.3 KB
20c8ccb19   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-only
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
2
3
4
5
6
7
8
  /*
   *  fs/userfaultfd.c
   *
   *  Copyright (C) 2007  Davide Libenzi <davidel@xmailserver.org>
   *  Copyright (C) 2008-2009 Red Hat, Inc.
   *  Copyright (C) 2015  Red Hat, Inc.
   *
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
9
10
11
   *  Some part derived from fs/eventfd.c (anon inode setup) and
   *  mm/ksm.c (mm hashing).
   */
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
12
  #include <linux/list.h>
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
13
  #include <linux/hashtable.h>
174cd4b1e   Ingo Molnar   sched/headers: Pr...
14
  #include <linux/sched/signal.h>
6e84f3152   Ingo Molnar   sched/headers: Pr...
15
  #include <linux/sched/mm.h>
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
16
17
18
19
20
21
22
23
24
25
26
27
  #include <linux/mm.h>
  #include <linux/poll.h>
  #include <linux/slab.h>
  #include <linux/seq_file.h>
  #include <linux/file.h>
  #include <linux/bug.h>
  #include <linux/anon_inodes.h>
  #include <linux/syscalls.h>
  #include <linux/userfaultfd_k.h>
  #include <linux/mempolicy.h>
  #include <linux/ioctl.h>
  #include <linux/security.h>
cab350afc   Mike Kravetz   userfaultfd: huge...
28
  #include <linux/hugetlb.h>
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
29

cefdca0a8   Peter Xu   userfaultfd/sysct...
30
  int sysctl_unprivileged_userfaultfd __read_mostly = 1;
3004ec9ca   Andrea Arcangeli   userfaultfd: allo...
31
  static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
32
33
34
35
  enum userfaultfd_state {
  	UFFD_STATE_WAIT_API,
  	UFFD_STATE_RUNNING,
  };
3004ec9ca   Andrea Arcangeli   userfaultfd: allo...
36
37
38
  /*
   * Start with fault_pending_wqh and fault_wqh so they're more likely
   * to be in the same cacheline.
cbcfa130a   Eric Biggers   fs/userfaultfd.c:...
39
40
41
42
43
44
45
46
47
48
   *
   * Locking order:
   *	fd_wqh.lock
   *		fault_pending_wqh.lock
   *			fault_wqh.lock
   *		event_wqh.lock
   *
   * To avoid deadlocks, IRQs must be disabled when taking any of the above locks,
   * since fd_wqh.lock is taken by aio_poll() while it's holding a lock that's
   * also taken in IRQ context.
3004ec9ca   Andrea Arcangeli   userfaultfd: allo...
49
   */
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
50
  struct userfaultfd_ctx {
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
51
52
53
  	/* waitqueue head for the pending (i.e. not read) userfaults */
  	wait_queue_head_t fault_pending_wqh;
  	/* waitqueue head for the userfaults */
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
54
55
56
  	wait_queue_head_t fault_wqh;
  	/* waitqueue head for the pseudo fd to wakeup poll/read */
  	wait_queue_head_t fd_wqh;
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
57
58
  	/* waitqueue head for events */
  	wait_queue_head_t event_wqh;
2c5b7e1be   Andrea Arcangeli   userfaultfd: avoi...
59
60
  	/* a refile sequence protected by fault_pending_wqh lock */
  	struct seqcount refile_seq;
3004ec9ca   Andrea Arcangeli   userfaultfd: allo...
61
  	/* pseudo fd refcounting */
ca8804206   Eric Biggers   userfaultfd: conv...
62
  	refcount_t refcount;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
63
64
  	/* userfaultfd syscall flags */
  	unsigned int flags;
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
65
66
  	/* features requested from the userspace */
  	unsigned int features;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
67
68
69
70
  	/* state machine */
  	enum userfaultfd_state state;
  	/* released */
  	bool released;
df2cc96e7   Mike Rapoport   userfaultfd: prev...
71
72
  	/* memory mappings are changing because of non-cooperative event */
  	bool mmap_changing;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
73
74
75
  	/* mm with one ore more vmas attached to this userfaultfd_ctx */
  	struct mm_struct *mm;
  };
893e26e61   Pavel Emelyanov   userfaultfd: non-...
76
77
78
79
80
  struct userfaultfd_fork_ctx {
  	struct userfaultfd_ctx *orig;
  	struct userfaultfd_ctx *new;
  	struct list_head list;
  };
897ab3e0c   Mike Rapoport   userfaultfd: non-...
81
82
83
84
85
86
  struct userfaultfd_unmap_ctx {
  	struct userfaultfd_ctx *ctx;
  	unsigned long start;
  	unsigned long end;
  	struct list_head list;
  };
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
87
  struct userfaultfd_wait_queue {
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
88
  	struct uffd_msg msg;
ac6424b98   Ingo Molnar   sched/wait: Renam...
89
  	wait_queue_entry_t wq;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
90
  	struct userfaultfd_ctx *ctx;
15a77c6fe   Andrea Arcangeli   userfaultfd: fix ...
91
  	bool waken;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
92
93
94
95
96
97
  };
  
  struct userfaultfd_wake_range {
  	unsigned long start;
  	unsigned long len;
  };
ac6424b98   Ingo Molnar   sched/wait: Renam...
98
  static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
99
100
101
102
103
104
105
106
107
  				     int wake_flags, void *key)
  {
  	struct userfaultfd_wake_range *range = key;
  	int ret;
  	struct userfaultfd_wait_queue *uwq;
  	unsigned long start, len;
  
  	uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
  	ret = 0;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
108
109
110
  	/* len == 0 means wake all */
  	start = range->start;
  	len = range->len;
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
111
112
  	if (len && (start > uwq->msg.arg.pagefault.address ||
  		    start + len <= uwq->msg.arg.pagefault.address))
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
113
  		goto out;
15a77c6fe   Andrea Arcangeli   userfaultfd: fix ...
114
115
  	WRITE_ONCE(uwq->waken, true);
  	/*
a9668cd6e   Peter Zijlstra   locking: Remove s...
116
117
  	 * The Program-Order guarantees provided by the scheduler
  	 * ensure uwq->waken is visible before the task is woken.
15a77c6fe   Andrea Arcangeli   userfaultfd: fix ...
118
  	 */
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
119
  	ret = wake_up_state(wq->private, mode);
a9668cd6e   Peter Zijlstra   locking: Remove s...
120
  	if (ret) {
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
121
122
123
  		/*
  		 * Wake only once, autoremove behavior.
  		 *
a9668cd6e   Peter Zijlstra   locking: Remove s...
124
125
126
127
128
129
130
  		 * After the effect of list_del_init is visible to the other
  		 * CPUs, the waitqueue may disappear from under us, see the
  		 * !list_empty_careful() in handle_userfault().
  		 *
  		 * try_to_wake_up() has an implicit smp_mb(), and the
  		 * wq->private is read before calling the extern function
  		 * "wake_up_state" (which in turns calls try_to_wake_up).
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
131
  		 */
2055da973   Ingo Molnar   sched/wait: Disam...
132
  		list_del_init(&wq->entry);
a9668cd6e   Peter Zijlstra   locking: Remove s...
133
  	}
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
134
135
136
137
138
139
140
141
  out:
  	return ret;
  }
  
  /**
   * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
   * context.
   * @ctx: [in] Pointer to the userfaultfd context.
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
142
143
144
   */
  static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
  {
ca8804206   Eric Biggers   userfaultfd: conv...
145
  	refcount_inc(&ctx->refcount);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
146
147
148
149
150
151
152
153
154
155
156
157
  }
  
  /**
   * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd
   * context.
   * @ctx: [in] Pointer to userfaultfd context.
   *
   * The userfaultfd context reference must have been previously acquired either
   * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget().
   */
  static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
  {
ca8804206   Eric Biggers   userfaultfd: conv...
158
  	if (refcount_dec_and_test(&ctx->refcount)) {
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
159
160
161
162
  		VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
  		VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
  		VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
  		VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
163
164
  		VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock));
  		VM_BUG_ON(waitqueue_active(&ctx->event_wqh));
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
165
166
  		VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
  		VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
167
  		mmdrop(ctx->mm);
3004ec9ca   Andrea Arcangeli   userfaultfd: allo...
168
  		kmem_cache_free(userfaultfd_ctx_cachep, ctx);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
169
170
  	}
  }
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
171
  static inline void msg_init(struct uffd_msg *msg)
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
172
  {
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
173
174
175
176
177
178
179
180
181
182
  	BUILD_BUG_ON(sizeof(struct uffd_msg) != 32);
  	/*
  	 * Must use memset to zero out the paddings or kernel data is
  	 * leaked to userland.
  	 */
  	memset(msg, 0, sizeof(struct uffd_msg));
  }
  
  static inline struct uffd_msg userfault_msg(unsigned long address,
  					    unsigned int flags,
9d4ac9348   Alexey Perevalov   userfaultfd: prov...
183
184
  					    unsigned long reason,
  					    unsigned int features)
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
185
186
187
188
189
  {
  	struct uffd_msg msg;
  	msg_init(&msg);
  	msg.event = UFFD_EVENT_PAGEFAULT;
  	msg.arg.pagefault.address = address;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
190
191
  	if (flags & FAULT_FLAG_WRITE)
  		/*
a4605a61d   Andrea Arcangeli   userfaultfd: corr...
192
  		 * If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
193
194
195
196
  		 * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WRITE
  		 * was not set in a UFFD_EVENT_PAGEFAULT, it means it
  		 * was a read fault, otherwise if set it means it's
  		 * a write fault.
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
197
  		 */
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
198
  		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
199
200
  	if (reason & VM_UFFD_WP)
  		/*
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
201
202
203
204
205
  		 * If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the
  		 * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WP was
  		 * not set in a UFFD_EVENT_PAGEFAULT, it means it was
  		 * a missing fault, otherwise if set it means it's a
  		 * write protect fault.
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
206
  		 */
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
207
  		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP;
9d4ac9348   Alexey Perevalov   userfaultfd: prov...
208
  	if (features & UFFD_FEATURE_THREAD_ID)
a36985d31   Andrea Arcangeli   userfaultfd: prov...
209
  		msg.arg.pagefault.feat.ptid = task_pid_vnr(current);
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
210
  	return msg;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
211
  }
369cd2121   Mike Kravetz   userfaultfd: huge...
212
213
214
215
216
217
  #ifdef CONFIG_HUGETLB_PAGE
  /*
   * Same functionality as userfaultfd_must_wait below with modifications for
   * hugepmd ranges.
   */
  static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
7868a2087   Punit Agrawal   mm/hugetlb: add s...
218
  					 struct vm_area_struct *vma,
369cd2121   Mike Kravetz   userfaultfd: huge...
219
220
221
222
223
  					 unsigned long address,
  					 unsigned long flags,
  					 unsigned long reason)
  {
  	struct mm_struct *mm = ctx->mm;
1e2c04362   Janosch Frank   userfaultfd: huge...
224
  	pte_t *ptep, pte;
369cd2121   Mike Kravetz   userfaultfd: huge...
225
226
227
  	bool ret = true;
  
  	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
1e2c04362   Janosch Frank   userfaultfd: huge...
228
229
230
  	ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
  
  	if (!ptep)
369cd2121   Mike Kravetz   userfaultfd: huge...
231
232
233
  		goto out;
  
  	ret = false;
1e2c04362   Janosch Frank   userfaultfd: huge...
234
  	pte = huge_ptep_get(ptep);
369cd2121   Mike Kravetz   userfaultfd: huge...
235
236
237
238
239
  
  	/*
  	 * Lockless access: we're in a wait_event so it's ok if it
  	 * changes under us.
  	 */
1e2c04362   Janosch Frank   userfaultfd: huge...
240
  	if (huge_pte_none(pte))
369cd2121   Mike Kravetz   userfaultfd: huge...
241
  		ret = true;
1e2c04362   Janosch Frank   userfaultfd: huge...
242
  	if (!huge_pte_write(pte) && (reason & VM_UFFD_WP))
369cd2121   Mike Kravetz   userfaultfd: huge...
243
244
245
246
247
248
  		ret = true;
  out:
  	return ret;
  }
  #else
  static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
7868a2087   Punit Agrawal   mm/hugetlb: add s...
249
  					 struct vm_area_struct *vma,
369cd2121   Mike Kravetz   userfaultfd: huge...
250
251
252
253
254
255
256
  					 unsigned long address,
  					 unsigned long flags,
  					 unsigned long reason)
  {
  	return false;	/* should never get here */
  }
  #endif /* CONFIG_HUGETLB_PAGE */
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
257
  /*
8d2afd96c   Andrea Arcangeli   userfaultfd: solv...
258
259
260
261
262
263
264
265
266
267
268
269
270
   * Verify the pagetables are still not ok after having reigstered into
   * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any
   * userfault that has already been resolved, if userfaultfd_read and
   * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different
   * threads.
   */
  static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
  					 unsigned long address,
  					 unsigned long flags,
  					 unsigned long reason)
  {
  	struct mm_struct *mm = ctx->mm;
  	pgd_t *pgd;
c2febafc6   Kirill A. Shutemov   mm: convert gener...
271
  	p4d_t *p4d;
8d2afd96c   Andrea Arcangeli   userfaultfd: solv...
272
273
274
275
276
277
278
279
280
281
  	pud_t *pud;
  	pmd_t *pmd, _pmd;
  	pte_t *pte;
  	bool ret = true;
  
  	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
  
  	pgd = pgd_offset(mm, address);
  	if (!pgd_present(*pgd))
  		goto out;
c2febafc6   Kirill A. Shutemov   mm: convert gener...
282
283
284
285
  	p4d = p4d_offset(pgd, address);
  	if (!p4d_present(*p4d))
  		goto out;
  	pud = pud_offset(p4d, address);
8d2afd96c   Andrea Arcangeli   userfaultfd: solv...
286
287
288
289
290
291
292
293
294
295
296
297
  	if (!pud_present(*pud))
  		goto out;
  	pmd = pmd_offset(pud, address);
  	/*
  	 * READ_ONCE must function as a barrier with narrower scope
  	 * and it must be equivalent to:
  	 *	_pmd = *pmd; barrier();
  	 *
  	 * This is to deal with the instability (as in
  	 * pmd_trans_unstable) of the pmd.
  	 */
  	_pmd = READ_ONCE(*pmd);
a365ac09d   Huang Ying   mm, userfaultfd, ...
298
  	if (pmd_none(_pmd))
8d2afd96c   Andrea Arcangeli   userfaultfd: solv...
299
300
301
  		goto out;
  
  	ret = false;
a365ac09d   Huang Ying   mm, userfaultfd, ...
302
303
  	if (!pmd_present(_pmd))
  		goto out;
8d2afd96c   Andrea Arcangeli   userfaultfd: solv...
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
  	if (pmd_trans_huge(_pmd))
  		goto out;
  
  	/*
  	 * the pmd is stable (as in !pmd_trans_unstable) so we can re-read it
  	 * and use the standard pte_offset_map() instead of parsing _pmd.
  	 */
  	pte = pte_offset_map(pmd, address);
  	/*
  	 * Lockless access: we're in a wait_event so it's ok if it
  	 * changes under us.
  	 */
  	if (pte_none(*pte))
  		ret = true;
  	pte_unmap(pte);
  
  out:
  	return ret;
  }
  
  /*
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
325
326
327
328
329
330
331
332
333
334
335
336
337
338
   * The locking rules involved in returning VM_FAULT_RETRY depending on
   * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
   * FAULT_FLAG_KILLABLE are not straightforward. The "Caution"
   * recommendation in __lock_page_or_retry is not an understatement.
   *
   * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_sem must be released
   * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is
   * not set.
   *
   * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not
   * set, VM_FAULT_RETRY can still be returned if and only if there are
   * fatal_signal_pending()s, and the mmap_sem must be released before
   * returning it.
   */
2b7403035   Souptick Joarder   mm: Change return...
339
  vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
340
  {
82b0f8c39   Jan Kara   mm: join struct f...
341
  	struct mm_struct *mm = vmf->vma->vm_mm;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
342
343
  	struct userfaultfd_ctx *ctx;
  	struct userfaultfd_wait_queue uwq;
2b7403035   Souptick Joarder   mm: Change return...
344
  	vm_fault_t ret = VM_FAULT_SIGBUS;
dfa37dc3f   Andrea Arcangeli   userfaultfd: allo...
345
  	bool must_wait, return_to_userland;
15a77c6fe   Andrea Arcangeli   userfaultfd: fix ...
346
  	long blocking_state;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
347

64c2b2030   Andrea Arcangeli   userfaultfd: shme...
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
  	/*
  	 * We don't do userfault handling for the final child pid update.
  	 *
  	 * We also don't do userfault handling during
  	 * coredumping. hugetlbfs has the special
  	 * follow_hugetlb_page() to skip missing pages in the
  	 * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
  	 * the no_page_table() helper in follow_page_mask(), but the
  	 * shmem_vm_ops->fault method is invoked even during
  	 * coredumping without mmap_sem and it ends up here.
  	 */
  	if (current->flags & (PF_EXITING|PF_DUMPCORE))
  		goto out;
  
  	/*
  	 * Coredumping runs without mmap_sem so we can only check that
  	 * the mmap_sem is held, if PF_DUMPCORE was not set.
  	 */
  	WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
82b0f8c39   Jan Kara   mm: join struct f...
367
  	ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
368
  	if (!ctx)
ba85c702e   Andrea Arcangeli   userfaultfd: wake...
369
  		goto out;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
370
371
372
373
374
  
  	BUG_ON(ctx->mm != mm);
  
  	VM_BUG_ON(reason & ~(VM_UFFD_MISSING|VM_UFFD_WP));
  	VM_BUG_ON(!(reason & VM_UFFD_MISSING) ^ !!(reason & VM_UFFD_WP));
2d6d6f5a0   Prakash Sangappa   mm: userfaultfd: ...
375
376
  	if (ctx->features & UFFD_FEATURE_SIGBUS)
  		goto out;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
377
378
379
380
381
  	/*
  	 * If it's already released don't get it. This avoids to loop
  	 * in __get_user_pages if userfaultfd_release waits on the
  	 * caller of handle_userfault to release the mmap_sem.
  	 */
6aa7de059   Mark Rutland   locking/atomics: ...
382
  	if (unlikely(READ_ONCE(ctx->released))) {
656710a60   Andrea Arcangeli   userfaultfd: non-...
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
  		/*
  		 * Don't return VM_FAULT_SIGBUS in this case, so a non
  		 * cooperative manager can close the uffd after the
  		 * last UFFDIO_COPY, without risking to trigger an
  		 * involuntary SIGBUS if the process was starting the
  		 * userfaultfd while the userfaultfd was still armed
  		 * (but after the last UFFDIO_COPY). If the uffd
  		 * wasn't already closed when the userfault reached
  		 * this point, that would normally be solved by
  		 * userfaultfd_must_wait returning 'false'.
  		 *
  		 * If we were to return VM_FAULT_SIGBUS here, the non
  		 * cooperative manager would be instead forced to
  		 * always call UFFDIO_UNREGISTER before it can safely
  		 * close the uffd.
  		 */
  		ret = VM_FAULT_NOPAGE;
ba85c702e   Andrea Arcangeli   userfaultfd: wake...
400
  		goto out;
656710a60   Andrea Arcangeli   userfaultfd: non-...
401
  	}
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
402
403
404
405
406
407
408
409
410
411
412
  
  	/*
  	 * Check that we can return VM_FAULT_RETRY.
  	 *
  	 * NOTE: it should become possible to return VM_FAULT_RETRY
  	 * even if FAULT_FLAG_TRIED is set without leading to gup()
  	 * -EBUSY failures, if the userfaultfd is to be extended for
  	 * VM_UFFD_WP tracking and we intend to arm the userfault
  	 * without first stopping userland access to the memory. For
  	 * VM_UFFD_MISSING userfaults this is enough for now.
  	 */
82b0f8c39   Jan Kara   mm: join struct f...
413
  	if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) {
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
414
415
416
417
418
  		/*
  		 * Validate the invariant that nowait must allow retry
  		 * to be sure not to return SIGBUS erroneously on
  		 * nowait invocations.
  		 */
82b0f8c39   Jan Kara   mm: join struct f...
419
  		BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
420
421
422
  #ifdef CONFIG_DEBUG_VM
  		if (printk_ratelimit()) {
  			printk(KERN_WARNING
82b0f8c39   Jan Kara   mm: join struct f...
423
424
425
  			       "FAULT_FLAG_ALLOW_RETRY missing %x
  ",
  			       vmf->flags);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
426
427
428
  			dump_stack();
  		}
  #endif
ba85c702e   Andrea Arcangeli   userfaultfd: wake...
429
  		goto out;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
430
431
432
433
434
435
  	}
  
  	/*
  	 * Handle nowait, not much to do other than tell it to retry
  	 * and wait.
  	 */
ba85c702e   Andrea Arcangeli   userfaultfd: wake...
436
  	ret = VM_FAULT_RETRY;
82b0f8c39   Jan Kara   mm: join struct f...
437
  	if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
ba85c702e   Andrea Arcangeli   userfaultfd: wake...
438
  		goto out;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
439
440
441
  
  	/* take the reference before dropping the mmap_sem */
  	userfaultfd_ctx_get(ctx);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
442
443
  	init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
  	uwq.wq.private = current;
9d4ac9348   Alexey Perevalov   userfaultfd: prov...
444
445
  	uwq.msg = userfault_msg(vmf->address, vmf->flags, reason,
  			ctx->features);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
446
  	uwq.ctx = ctx;
15a77c6fe   Andrea Arcangeli   userfaultfd: fix ...
447
  	uwq.waken = false;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
448

bae473a42   Kirill A. Shutemov   mm: introduce fau...
449
  	return_to_userland =
82b0f8c39   Jan Kara   mm: join struct f...
450
  		(vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
dfa37dc3f   Andrea Arcangeli   userfaultfd: allo...
451
  		(FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
15a77c6fe   Andrea Arcangeli   userfaultfd: fix ...
452
453
  	blocking_state = return_to_userland ? TASK_INTERRUPTIBLE :
  			 TASK_KILLABLE;
dfa37dc3f   Andrea Arcangeli   userfaultfd: allo...
454

cbcfa130a   Eric Biggers   fs/userfaultfd.c:...
455
  	spin_lock_irq(&ctx->fault_pending_wqh.lock);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
456
457
458
459
  	/*
  	 * After the __add_wait_queue the uwq is visible to userland
  	 * through poll/read().
  	 */
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
460
461
462
463
464
465
  	__add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
  	/*
  	 * The smp_mb() after __set_current_state prevents the reads
  	 * following the spin_unlock to happen before the list_add in
  	 * __add_wait_queue.
  	 */
15a77c6fe   Andrea Arcangeli   userfaultfd: fix ...
466
  	set_current_state(blocking_state);
cbcfa130a   Eric Biggers   fs/userfaultfd.c:...
467
  	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
468

369cd2121   Mike Kravetz   userfaultfd: huge...
469
470
471
472
  	if (!is_vm_hugetlb_page(vmf->vma))
  		must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
  						  reason);
  	else
7868a2087   Punit Agrawal   mm/hugetlb: add s...
473
474
  		must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma,
  						       vmf->address,
369cd2121   Mike Kravetz   userfaultfd: huge...
475
  						       vmf->flags, reason);
8d2afd96c   Andrea Arcangeli   userfaultfd: solv...
476
  	up_read(&mm->mmap_sem);
6aa7de059   Mark Rutland   locking/atomics: ...
477
  	if (likely(must_wait && !READ_ONCE(ctx->released) &&
dfa37dc3f   Andrea Arcangeli   userfaultfd: allo...
478
479
  		   (return_to_userland ? !signal_pending(current) :
  		    !fatal_signal_pending(current)))) {
a9a08845e   Linus Torvalds   vfs: do bulk POLL...
480
  		wake_up_poll(&ctx->fd_wqh, EPOLLIN);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
481
  		schedule();
ba85c702e   Andrea Arcangeli   userfaultfd: wake...
482
  		ret |= VM_FAULT_MAJOR;
15a77c6fe   Andrea Arcangeli   userfaultfd: fix ...
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
  
  		/*
  		 * False wakeups can orginate even from rwsem before
  		 * up_read() however userfaults will wait either for a
  		 * targeted wakeup on the specific uwq waitqueue from
  		 * wake_userfault() or for signals or for uffd
  		 * release.
  		 */
  		while (!READ_ONCE(uwq.waken)) {
  			/*
  			 * This needs the full smp_store_mb()
  			 * guarantee as the state write must be
  			 * visible to other CPUs before reading
  			 * uwq.waken from other CPUs.
  			 */
  			set_current_state(blocking_state);
  			if (READ_ONCE(uwq.waken) ||
  			    READ_ONCE(ctx->released) ||
  			    (return_to_userland ? signal_pending(current) :
  			     fatal_signal_pending(current)))
  				break;
  			schedule();
  		}
ba85c702e   Andrea Arcangeli   userfaultfd: wake...
506
  	}
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
507

ba85c702e   Andrea Arcangeli   userfaultfd: wake...
508
  	__set_current_state(TASK_RUNNING);
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
509

dfa37dc3f   Andrea Arcangeli   userfaultfd: allo...
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
  	if (return_to_userland) {
  		if (signal_pending(current) &&
  		    !fatal_signal_pending(current)) {
  			/*
  			 * If we got a SIGSTOP or SIGCONT and this is
  			 * a normal userland page fault, just let
  			 * userland return so the signal will be
  			 * handled and gdb debugging works.  The page
  			 * fault code immediately after we return from
  			 * this function is going to release the
  			 * mmap_sem and it's not depending on it
  			 * (unlike gup would if we were not to return
  			 * VM_FAULT_RETRY).
  			 *
  			 * If a fatal signal is pending we still take
  			 * the streamlined VM_FAULT_RETRY failure path
  			 * and there's no need to retake the mmap_sem
  			 * in such case.
  			 */
  			down_read(&mm->mmap_sem);
6bbc4a414   Andrea Arcangeli   userfaultfd: shme...
530
  			ret = VM_FAULT_NOPAGE;
dfa37dc3f   Andrea Arcangeli   userfaultfd: allo...
531
532
  		}
  	}
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
533
534
535
536
537
538
539
540
541
542
543
544
545
  	/*
  	 * Here we race with the list_del; list_add in
  	 * userfaultfd_ctx_read(), however because we don't ever run
  	 * list_del_init() to refile across the two lists, the prev
  	 * and next pointers will never point to self. list_add also
  	 * would never let any of the two pointers to point to
  	 * self. So list_empty_careful won't risk to see both pointers
  	 * pointing to self at any time during the list refile. The
  	 * only case where list_del_init() is called is the full
  	 * removal in the wake function and there we don't re-list_add
  	 * and it's fine not to block on the spinlock. The uwq on this
  	 * kernel stack can be released after the list_del_init.
  	 */
2055da973   Ingo Molnar   sched/wait: Disam...
546
  	if (!list_empty_careful(&uwq.wq.entry)) {
cbcfa130a   Eric Biggers   fs/userfaultfd.c:...
547
  		spin_lock_irq(&ctx->fault_pending_wqh.lock);
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
548
549
550
551
  		/*
  		 * No need of list_del_init(), the uwq on the stack
  		 * will be freed shortly anyway.
  		 */
2055da973   Ingo Molnar   sched/wait: Disam...
552
  		list_del(&uwq.wq.entry);
cbcfa130a   Eric Biggers   fs/userfaultfd.c:...
553
  		spin_unlock_irq(&ctx->fault_pending_wqh.lock);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
554
  	}
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
555
556
557
558
559
560
  
  	/*
  	 * ctx may go away after this if the userfault pseudo fd is
  	 * already released.
  	 */
  	userfaultfd_ctx_put(ctx);
ba85c702e   Andrea Arcangeli   userfaultfd: wake...
561
562
  out:
  	return ret;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
563
  }
8c9e7bb7a   Andrea Arcangeli   userfaultfd: non-...
564
565
  static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
  					      struct userfaultfd_wait_queue *ewq)
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
566
  {
0cbb4b4f4   Andrea Arcangeli   userfaultfd: clea...
567
  	struct userfaultfd_ctx *release_new_ctx;
9a69a829f   Andrea Arcangeli   userfaultfd: non-...
568
569
  	if (WARN_ON_ONCE(current->flags & PF_EXITING))
  		goto out;
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
570
571
572
  
  	ewq->ctx = ctx;
  	init_waitqueue_entry(&ewq->wq, current);
0cbb4b4f4   Andrea Arcangeli   userfaultfd: clea...
573
  	release_new_ctx = NULL;
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
574

cbcfa130a   Eric Biggers   fs/userfaultfd.c:...
575
  	spin_lock_irq(&ctx->event_wqh.lock);
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
576
577
578
579
580
581
582
583
584
  	/*
  	 * After the __add_wait_queue the uwq is visible to userland
  	 * through poll/read().
  	 */
  	__add_wait_queue(&ctx->event_wqh, &ewq->wq);
  	for (;;) {
  		set_current_state(TASK_KILLABLE);
  		if (ewq->msg.event == 0)
  			break;
6aa7de059   Mark Rutland   locking/atomics: ...
585
  		if (READ_ONCE(ctx->released) ||
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
586
  		    fatal_signal_pending(current)) {
384632e67   Andrea Arcangeli   userfaultfd: non-...
587
588
589
590
591
592
  			/*
  			 * &ewq->wq may be queued in fork_event, but
  			 * __remove_wait_queue ignores the head
  			 * parameter. It would be a problem if it
  			 * didn't.
  			 */
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
593
  			__remove_wait_queue(&ctx->event_wqh, &ewq->wq);
7eb76d457   Mike Rapoport   userfaultfd: non-...
594
595
596
597
598
599
  			if (ewq->msg.event == UFFD_EVENT_FORK) {
  				struct userfaultfd_ctx *new;
  
  				new = (struct userfaultfd_ctx *)
  					(unsigned long)
  					ewq->msg.arg.reserved.reserved1;
0cbb4b4f4   Andrea Arcangeli   userfaultfd: clea...
600
  				release_new_ctx = new;
7eb76d457   Mike Rapoport   userfaultfd: non-...
601
  			}
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
602
603
  			break;
  		}
cbcfa130a   Eric Biggers   fs/userfaultfd.c:...
604
  		spin_unlock_irq(&ctx->event_wqh.lock);
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
605

a9a08845e   Linus Torvalds   vfs: do bulk POLL...
606
  		wake_up_poll(&ctx->fd_wqh, EPOLLIN);
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
607
  		schedule();
cbcfa130a   Eric Biggers   fs/userfaultfd.c:...
608
  		spin_lock_irq(&ctx->event_wqh.lock);
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
609
610
  	}
  	__set_current_state(TASK_RUNNING);
cbcfa130a   Eric Biggers   fs/userfaultfd.c:...
611
  	spin_unlock_irq(&ctx->event_wqh.lock);
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
612

0cbb4b4f4   Andrea Arcangeli   userfaultfd: clea...
613
614
615
616
617
618
  	if (release_new_ctx) {
  		struct vm_area_struct *vma;
  		struct mm_struct *mm = release_new_ctx->mm;
  
  		/* the various vma->vm_userfaultfd_ctx still points to it */
  		down_write(&mm->mmap_sem);
04f5866e4   Andrea Arcangeli   coredump: fix rac...
619
620
  		/* no task can run (and in turn coredump) yet */
  		VM_WARN_ON(!mmget_still_valid(mm));
0cbb4b4f4   Andrea Arcangeli   userfaultfd: clea...
621
  		for (vma = mm->mmap; vma; vma = vma->vm_next)
31e810aa1   Mike Rapoport   userfaultfd: remo...
622
  			if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
0cbb4b4f4   Andrea Arcangeli   userfaultfd: clea...
623
  				vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
31e810aa1   Mike Rapoport   userfaultfd: remo...
624
625
  				vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
  			}
0cbb4b4f4   Andrea Arcangeli   userfaultfd: clea...
626
627
628
629
  		up_write(&mm->mmap_sem);
  
  		userfaultfd_ctx_put(release_new_ctx);
  	}
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
630
631
632
633
  	/*
  	 * ctx may go away after this if the userfault pseudo fd is
  	 * already released.
  	 */
9a69a829f   Andrea Arcangeli   userfaultfd: non-...
634
  out:
df2cc96e7   Mike Rapoport   userfaultfd: prev...
635
  	WRITE_ONCE(ctx->mmap_changing, false);
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
636
  	userfaultfd_ctx_put(ctx);
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
637
638
639
640
641
642
643
644
645
  }
  
  static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
  				       struct userfaultfd_wait_queue *ewq)
  {
  	ewq->msg.event = 0;
  	wake_up_locked(&ctx->event_wqh);
  	__remove_wait_queue(&ctx->event_wqh, &ewq->wq);
  }
893e26e61   Pavel Emelyanov   userfaultfd: non-...
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
  int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
  {
  	struct userfaultfd_ctx *ctx = NULL, *octx;
  	struct userfaultfd_fork_ctx *fctx;
  
  	octx = vma->vm_userfaultfd_ctx.ctx;
  	if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
  		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
  		vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
  		return 0;
  	}
  
  	list_for_each_entry(fctx, fcs, list)
  		if (fctx->orig == octx) {
  			ctx = fctx->new;
  			break;
  		}
  
  	if (!ctx) {
  		fctx = kmalloc(sizeof(*fctx), GFP_KERNEL);
  		if (!fctx)
  			return -ENOMEM;
  
  		ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
  		if (!ctx) {
  			kfree(fctx);
  			return -ENOMEM;
  		}
ca8804206   Eric Biggers   userfaultfd: conv...
674
  		refcount_set(&ctx->refcount, 1);
893e26e61   Pavel Emelyanov   userfaultfd: non-...
675
676
677
678
  		ctx->flags = octx->flags;
  		ctx->state = UFFD_STATE_RUNNING;
  		ctx->features = octx->features;
  		ctx->released = false;
df2cc96e7   Mike Rapoport   userfaultfd: prev...
679
  		ctx->mmap_changing = false;
893e26e61   Pavel Emelyanov   userfaultfd: non-...
680
  		ctx->mm = vma->vm_mm;
00bb31fa4   Mike Rapoport   userfaultfd: use ...
681
  		mmgrab(ctx->mm);
893e26e61   Pavel Emelyanov   userfaultfd: non-...
682
683
  
  		userfaultfd_ctx_get(octx);
df2cc96e7   Mike Rapoport   userfaultfd: prev...
684
  		WRITE_ONCE(octx->mmap_changing, true);
893e26e61   Pavel Emelyanov   userfaultfd: non-...
685
686
687
688
689
690
691
692
  		fctx->orig = octx;
  		fctx->new = ctx;
  		list_add_tail(&fctx->list, fcs);
  	}
  
  	vma->vm_userfaultfd_ctx.ctx = ctx;
  	return 0;
  }
8c9e7bb7a   Andrea Arcangeli   userfaultfd: non-...
693
  static void dup_fctx(struct userfaultfd_fork_ctx *fctx)
893e26e61   Pavel Emelyanov   userfaultfd: non-...
694
695
696
697
698
699
700
701
  {
  	struct userfaultfd_ctx *ctx = fctx->orig;
  	struct userfaultfd_wait_queue ewq;
  
  	msg_init(&ewq.msg);
  
  	ewq.msg.event = UFFD_EVENT_FORK;
  	ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
8c9e7bb7a   Andrea Arcangeli   userfaultfd: non-...
702
  	userfaultfd_event_wait_completion(ctx, &ewq);
893e26e61   Pavel Emelyanov   userfaultfd: non-...
703
704
705
706
  }
  
  void dup_userfaultfd_complete(struct list_head *fcs)
  {
893e26e61   Pavel Emelyanov   userfaultfd: non-...
707
708
709
  	struct userfaultfd_fork_ctx *fctx, *n;
  
  	list_for_each_entry_safe(fctx, n, fcs, list) {
8c9e7bb7a   Andrea Arcangeli   userfaultfd: non-...
710
  		dup_fctx(fctx);
893e26e61   Pavel Emelyanov   userfaultfd: non-...
711
712
713
714
  		list_del(&fctx->list);
  		kfree(fctx);
  	}
  }
72f87654c   Pavel Emelyanov   userfaultfd: non-...
715
716
717
718
719
720
  void mremap_userfaultfd_prep(struct vm_area_struct *vma,
  			     struct vm_userfaultfd_ctx *vm_ctx)
  {
  	struct userfaultfd_ctx *ctx;
  
  	ctx = vma->vm_userfaultfd_ctx.ctx;
3cfd22be0   Peter Xu   userfaultfd: clea...
721
722
723
724
725
  
  	if (!ctx)
  		return;
  
  	if (ctx->features & UFFD_FEATURE_EVENT_REMAP) {
72f87654c   Pavel Emelyanov   userfaultfd: non-...
726
727
  		vm_ctx->ctx = ctx;
  		userfaultfd_ctx_get(ctx);
df2cc96e7   Mike Rapoport   userfaultfd: prev...
728
  		WRITE_ONCE(ctx->mmap_changing, true);
3cfd22be0   Peter Xu   userfaultfd: clea...
729
730
731
732
  	} else {
  		/* Drop uffd context if remap feature not enabled */
  		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
  		vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
72f87654c   Pavel Emelyanov   userfaultfd: non-...
733
734
  	}
  }
90794bf19   Andrea Arcangeli   userfaultfd: non-...
735
  void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
72f87654c   Pavel Emelyanov   userfaultfd: non-...
736
737
738
  				 unsigned long from, unsigned long to,
  				 unsigned long len)
  {
90794bf19   Andrea Arcangeli   userfaultfd: non-...
739
  	struct userfaultfd_ctx *ctx = vm_ctx->ctx;
72f87654c   Pavel Emelyanov   userfaultfd: non-...
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
  	struct userfaultfd_wait_queue ewq;
  
  	if (!ctx)
  		return;
  
  	if (to & ~PAGE_MASK) {
  		userfaultfd_ctx_put(ctx);
  		return;
  	}
  
  	msg_init(&ewq.msg);
  
  	ewq.msg.event = UFFD_EVENT_REMAP;
  	ewq.msg.arg.remap.from = from;
  	ewq.msg.arg.remap.to = to;
  	ewq.msg.arg.remap.len = len;
  
  	userfaultfd_event_wait_completion(ctx, &ewq);
  }
70ccb92fd   Andrea Arcangeli   userfaultfd: non-...
759
  bool userfaultfd_remove(struct vm_area_struct *vma,
d811914d8   Mike Rapoport   userfaultfd: non-...
760
  			unsigned long start, unsigned long end)
05ce77249   Pavel Emelyanov   userfaultfd: non-...
761
762
763
764
765
766
  {
  	struct mm_struct *mm = vma->vm_mm;
  	struct userfaultfd_ctx *ctx;
  	struct userfaultfd_wait_queue ewq;
  
  	ctx = vma->vm_userfaultfd_ctx.ctx;
d811914d8   Mike Rapoport   userfaultfd: non-...
767
  	if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
70ccb92fd   Andrea Arcangeli   userfaultfd: non-...
768
  		return true;
05ce77249   Pavel Emelyanov   userfaultfd: non-...
769
770
  
  	userfaultfd_ctx_get(ctx);
df2cc96e7   Mike Rapoport   userfaultfd: prev...
771
  	WRITE_ONCE(ctx->mmap_changing, true);
05ce77249   Pavel Emelyanov   userfaultfd: non-...
772
  	up_read(&mm->mmap_sem);
05ce77249   Pavel Emelyanov   userfaultfd: non-...
773
  	msg_init(&ewq.msg);
d811914d8   Mike Rapoport   userfaultfd: non-...
774
775
776
  	ewq.msg.event = UFFD_EVENT_REMOVE;
  	ewq.msg.arg.remove.start = start;
  	ewq.msg.arg.remove.end = end;
05ce77249   Pavel Emelyanov   userfaultfd: non-...
777
778
  
  	userfaultfd_event_wait_completion(ctx, &ewq);
70ccb92fd   Andrea Arcangeli   userfaultfd: non-...
779
  	return false;
05ce77249   Pavel Emelyanov   userfaultfd: non-...
780
  }
897ab3e0c   Mike Rapoport   userfaultfd: non-...
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
  static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
  			  unsigned long start, unsigned long end)
  {
  	struct userfaultfd_unmap_ctx *unmap_ctx;
  
  	list_for_each_entry(unmap_ctx, unmaps, list)
  		if (unmap_ctx->ctx == ctx && unmap_ctx->start == start &&
  		    unmap_ctx->end == end)
  			return true;
  
  	return false;
  }
  
  int userfaultfd_unmap_prep(struct vm_area_struct *vma,
  			   unsigned long start, unsigned long end,
  			   struct list_head *unmaps)
  {
  	for ( ; vma && vma->vm_start < end; vma = vma->vm_next) {
  		struct userfaultfd_unmap_ctx *unmap_ctx;
  		struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
  
  		if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
  		    has_unmap_ctx(ctx, unmaps, start, end))
  			continue;
  
  		unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL);
  		if (!unmap_ctx)
  			return -ENOMEM;
  
  		userfaultfd_ctx_get(ctx);
df2cc96e7   Mike Rapoport   userfaultfd: prev...
811
  		WRITE_ONCE(ctx->mmap_changing, true);
897ab3e0c   Mike Rapoport   userfaultfd: non-...
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
  		unmap_ctx->ctx = ctx;
  		unmap_ctx->start = start;
  		unmap_ctx->end = end;
  		list_add_tail(&unmap_ctx->list, unmaps);
  	}
  
  	return 0;
  }
  
  void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
  {
  	struct userfaultfd_unmap_ctx *ctx, *n;
  	struct userfaultfd_wait_queue ewq;
  
  	list_for_each_entry_safe(ctx, n, uf, list) {
  		msg_init(&ewq.msg);
  
  		ewq.msg.event = UFFD_EVENT_UNMAP;
  		ewq.msg.arg.remove.start = ctx->start;
  		ewq.msg.arg.remove.end = ctx->end;
  
  		userfaultfd_event_wait_completion(ctx->ctx, &ewq);
  
  		list_del(&ctx->list);
  		kfree(ctx);
  	}
  }
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
839
840
841
842
843
844
845
846
  static int userfaultfd_release(struct inode *inode, struct file *file)
  {
  	struct userfaultfd_ctx *ctx = file->private_data;
  	struct mm_struct *mm = ctx->mm;
  	struct vm_area_struct *vma, *prev;
  	/* len == 0 means wake all */
  	struct userfaultfd_wake_range range = { .len = 0, };
  	unsigned long new_flags;
46d0b24c5   Oleg Nesterov   userfaultfd_relea...
847
  	bool still_valid;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
848

6aa7de059   Mark Rutland   locking/atomics: ...
849
  	WRITE_ONCE(ctx->released, true);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
850

d2005e3f4   Oleg Nesterov   userfaultfd: don'...
851
852
  	if (!mmget_not_zero(mm))
  		goto wakeup;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
853
854
855
856
857
858
859
860
861
  	/*
  	 * Flush page faults out of all CPUs. NOTE: all page faults
  	 * must be retried without returning VM_FAULT_SIGBUS if
  	 * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
  	 * changes while handle_userfault released the mmap_sem. So
  	 * it's critical that released is set to true (above), before
  	 * taking the mmap_sem for writing.
  	 */
  	down_write(&mm->mmap_sem);
46d0b24c5   Oleg Nesterov   userfaultfd_relea...
862
  	still_valid = mmget_still_valid(mm);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
863
864
865
866
867
868
869
870
871
872
  	prev = NULL;
  	for (vma = mm->mmap; vma; vma = vma->vm_next) {
  		cond_resched();
  		BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
  		       !!(vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
  		if (vma->vm_userfaultfd_ctx.ctx != ctx) {
  			prev = vma;
  			continue;
  		}
  		new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
46d0b24c5   Oleg Nesterov   userfaultfd_relea...
873
874
875
876
877
878
879
880
881
882
883
  		if (still_valid) {
  			prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
  					 new_flags, vma->anon_vma,
  					 vma->vm_file, vma->vm_pgoff,
  					 vma_policy(vma),
  					 NULL_VM_UFFD_CTX);
  			if (prev)
  				vma = prev;
  			else
  				prev = vma;
  		}
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
884
885
886
887
  		vma->vm_flags = new_flags;
  		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
  	}
  	up_write(&mm->mmap_sem);
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
888
889
  	mmput(mm);
  wakeup:
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
890
  	/*
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
891
  	 * After no new page faults can wait on this fault_*wqh, flush
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
892
  	 * the last page faults that may have been already waiting on
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
893
  	 * the fault_*wqh.
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
894
  	 */
cbcfa130a   Eric Biggers   fs/userfaultfd.c:...
895
  	spin_lock_irq(&ctx->fault_pending_wqh.lock);
ac5be6b47   Andrea Arcangeli   userfaultfd: reve...
896
  	__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
c430d1e84   Matthew Wilcox   userfaultfd: use ...
897
  	__wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range);
cbcfa130a   Eric Biggers   fs/userfaultfd.c:...
898
  	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
899

5a18b64e3   Mike Rapoport   userfaultfd: non-...
900
901
  	/* Flush pending events that may still wait on event_wqh */
  	wake_up_all(&ctx->event_wqh);
a9a08845e   Linus Torvalds   vfs: do bulk POLL...
902
  	wake_up_poll(&ctx->fd_wqh, EPOLLHUP);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
903
904
905
  	userfaultfd_ctx_put(ctx);
  	return 0;
  }
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
906
  /* fault_pending_wqh.lock must be hold by the caller */
6dcc27fd3   Pavel Emelyanov   userfaultfd: non-...
907
908
  static inline struct userfaultfd_wait_queue *find_userfault_in(
  		wait_queue_head_t *wqh)
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
909
  {
ac6424b98   Ingo Molnar   sched/wait: Renam...
910
  	wait_queue_entry_t *wq;
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
911
  	struct userfaultfd_wait_queue *uwq;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
912

456a73789   Lance Roy   userfaultfd: Repl...
913
  	lockdep_assert_held(&wqh->lock);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
914

15b726ef0   Andrea Arcangeli   userfaultfd: opti...
915
  	uwq = NULL;
6dcc27fd3   Pavel Emelyanov   userfaultfd: non-...
916
  	if (!waitqueue_active(wqh))
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
917
918
  		goto out;
  	/* walk in reverse to provide FIFO behavior to read userfaults */
2055da973   Ingo Molnar   sched/wait: Disam...
919
  	wq = list_last_entry(&wqh->head, typeof(*wq), entry);
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
920
921
922
  	uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
  out:
  	return uwq;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
923
  }
6dcc27fd3   Pavel Emelyanov   userfaultfd: non-...
924
925
926
927
928
929
  
  static inline struct userfaultfd_wait_queue *find_userfault(
  		struct userfaultfd_ctx *ctx)
  {
  	return find_userfault_in(&ctx->fault_pending_wqh);
  }
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
930

9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
931
932
933
934
935
  static inline struct userfaultfd_wait_queue *find_userfault_evt(
  		struct userfaultfd_ctx *ctx)
  {
  	return find_userfault_in(&ctx->event_wqh);
  }
076ccb76e   Al Viro   fs: annotate ->po...
936
  static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
937
938
  {
  	struct userfaultfd_ctx *ctx = file->private_data;
076ccb76e   Al Viro   fs: annotate ->po...
939
  	__poll_t ret;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
940
941
942
943
944
  
  	poll_wait(file, &ctx->fd_wqh, wait);
  
  	switch (ctx->state) {
  	case UFFD_STATE_WAIT_API:
a9a08845e   Linus Torvalds   vfs: do bulk POLL...
945
  		return EPOLLERR;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
946
  	case UFFD_STATE_RUNNING:
ba85c702e   Andrea Arcangeli   userfaultfd: wake...
947
948
949
950
951
  		/*
  		 * poll() never guarantees that read won't block.
  		 * userfaults can be waken before they're read().
  		 */
  		if (unlikely(!(file->f_flags & O_NONBLOCK)))
a9a08845e   Linus Torvalds   vfs: do bulk POLL...
952
  			return EPOLLERR;
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
953
954
955
956
957
958
959
960
961
962
963
964
965
  		/*
  		 * lockless access to see if there are pending faults
  		 * __pollwait last action is the add_wait_queue but
  		 * the spin_unlock would allow the waitqueue_active to
  		 * pass above the actual list_add inside
  		 * add_wait_queue critical section. So use a full
  		 * memory barrier to serialize the list_add write of
  		 * add_wait_queue() with the waitqueue_active read
  		 * below.
  		 */
  		ret = 0;
  		smp_mb();
  		if (waitqueue_active(&ctx->fault_pending_wqh))
a9a08845e   Linus Torvalds   vfs: do bulk POLL...
966
  			ret = EPOLLIN;
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
967
  		else if (waitqueue_active(&ctx->event_wqh))
a9a08845e   Linus Torvalds   vfs: do bulk POLL...
968
  			ret = EPOLLIN;
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
969

86039bd3b   Andrea Arcangeli   userfaultfd: add ...
970
971
  		return ret;
  	default:
8474901a3   Andrea Arcangeli   userfaultfd: conv...
972
  		WARN_ON_ONCE(1);
a9a08845e   Linus Torvalds   vfs: do bulk POLL...
973
  		return EPOLLERR;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
974
975
  	}
  }
893e26e61   Pavel Emelyanov   userfaultfd: non-...
976
977
978
979
980
981
982
  static const struct file_operations userfaultfd_fops;
  
  static int resolve_userfault_fork(struct userfaultfd_ctx *ctx,
  				  struct userfaultfd_ctx *new,
  				  struct uffd_msg *msg)
  {
  	int fd;
893e26e61   Pavel Emelyanov   userfaultfd: non-...
983

284cd241a   Eric Biggers   userfaultfd: conv...
984
985
  	fd = anon_inode_getfd("[userfaultfd]", &userfaultfd_fops, new,
  			      O_RDWR | (new->flags & UFFD_SHARED_FCNTL_FLAGS));
893e26e61   Pavel Emelyanov   userfaultfd: non-...
986
987
  	if (fd < 0)
  		return fd;
893e26e61   Pavel Emelyanov   userfaultfd: non-...
988
989
  	msg->arg.reserved.reserved1 = 0;
  	msg->arg.fork.ufd = fd;
893e26e61   Pavel Emelyanov   userfaultfd: non-...
990
991
  	return 0;
  }
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
992
  static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
993
  				    struct uffd_msg *msg)
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
994
995
996
  {
  	ssize_t ret;
  	DECLARE_WAITQUEUE(wait, current);
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
997
  	struct userfaultfd_wait_queue *uwq;
893e26e61   Pavel Emelyanov   userfaultfd: non-...
998
999
1000
1001
1002
1003
1004
1005
1006
  	/*
  	 * Handling fork event requires sleeping operations, so
  	 * we drop the event_wqh lock, then do these ops, then
  	 * lock it back and wake up the waiter. While the lock is
  	 * dropped the ewq may go away so we keep track of it
  	 * carefully.
  	 */
  	LIST_HEAD(fork_event);
  	struct userfaultfd_ctx *fork_nctx = NULL;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1007

15b726ef0   Andrea Arcangeli   userfaultfd: opti...
1008
  	/* always take the fd_wqh lock before the fault_pending_wqh lock */
ae62c16e1   Christoph Hellwig   userfaultfd: disa...
1009
  	spin_lock_irq(&ctx->fd_wqh.lock);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1010
1011
1012
  	__add_wait_queue(&ctx->fd_wqh, &wait);
  	for (;;) {
  		set_current_state(TASK_INTERRUPTIBLE);
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
1013
1014
1015
  		spin_lock(&ctx->fault_pending_wqh.lock);
  		uwq = find_userfault(ctx);
  		if (uwq) {
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1016
  			/*
2c5b7e1be   Andrea Arcangeli   userfaultfd: avoi...
1017
1018
1019
1020
1021
1022
1023
1024
1025
  			 * Use a seqcount to repeat the lockless check
  			 * in wake_userfault() to avoid missing
  			 * wakeups because during the refile both
  			 * waitqueue could become empty if this is the
  			 * only userfault.
  			 */
  			write_seqcount_begin(&ctx->refile_seq);
  
  			/*
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
  			 * The fault_pending_wqh.lock prevents the uwq
  			 * to disappear from under us.
  			 *
  			 * Refile this userfault from
  			 * fault_pending_wqh to fault_wqh, it's not
  			 * pending anymore after we read it.
  			 *
  			 * Use list_del() by hand (as
  			 * userfaultfd_wake_function also uses
  			 * list_del_init() by hand) to be sure nobody
  			 * changes __remove_wait_queue() to use
  			 * list_del_init() in turn breaking the
  			 * !list_empty_careful() check in
2055da973   Ingo Molnar   sched/wait: Disam...
1039
  			 * handle_userfault(). The uwq->wq.head list
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
1040
1041
1042
1043
1044
  			 * must never be empty at any time during the
  			 * refile, or the waitqueue could disappear
  			 * from under us. The "wait_queue_head_t"
  			 * parameter of __remove_wait_queue() is unused
  			 * anyway.
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1045
  			 */
2055da973   Ingo Molnar   sched/wait: Disam...
1046
  			list_del(&uwq->wq.entry);
c430d1e84   Matthew Wilcox   userfaultfd: use ...
1047
  			add_wait_queue(&ctx->fault_wqh, &uwq->wq);
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
1048

2c5b7e1be   Andrea Arcangeli   userfaultfd: avoi...
1049
  			write_seqcount_end(&ctx->refile_seq);
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
1050
1051
  			/* careful to always initialize msg if ret == 0 */
  			*msg = uwq->msg;
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
1052
  			spin_unlock(&ctx->fault_pending_wqh.lock);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1053
1054
1055
  			ret = 0;
  			break;
  		}
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
1056
  		spin_unlock(&ctx->fault_pending_wqh.lock);
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
1057
1058
1059
1060
1061
  
  		spin_lock(&ctx->event_wqh.lock);
  		uwq = find_userfault_evt(ctx);
  		if (uwq) {
  			*msg = uwq->msg;
893e26e61   Pavel Emelyanov   userfaultfd: non-...
1062
1063
1064
1065
  			if (uwq->msg.event == UFFD_EVENT_FORK) {
  				fork_nctx = (struct userfaultfd_ctx *)
  					(unsigned long)
  					uwq->msg.arg.reserved.reserved1;
2055da973   Ingo Molnar   sched/wait: Disam...
1066
  				list_move(&uwq->wq.entry, &fork_event);
384632e67   Andrea Arcangeli   userfaultfd: non-...
1067
1068
1069
1070
1071
1072
  				/*
  				 * fork_nctx can be freed as soon as
  				 * we drop the lock, unless we take a
  				 * reference on it.
  				 */
  				userfaultfd_ctx_get(fork_nctx);
893e26e61   Pavel Emelyanov   userfaultfd: non-...
1073
1074
1075
1076
  				spin_unlock(&ctx->event_wqh.lock);
  				ret = 0;
  				break;
  			}
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
1077
1078
1079
1080
1081
1082
  			userfaultfd_event_complete(ctx, uwq);
  			spin_unlock(&ctx->event_wqh.lock);
  			ret = 0;
  			break;
  		}
  		spin_unlock(&ctx->event_wqh.lock);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1083
1084
1085
1086
1087
1088
1089
1090
  		if (signal_pending(current)) {
  			ret = -ERESTARTSYS;
  			break;
  		}
  		if (no_wait) {
  			ret = -EAGAIN;
  			break;
  		}
ae62c16e1   Christoph Hellwig   userfaultfd: disa...
1091
  		spin_unlock_irq(&ctx->fd_wqh.lock);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1092
  		schedule();
ae62c16e1   Christoph Hellwig   userfaultfd: disa...
1093
  		spin_lock_irq(&ctx->fd_wqh.lock);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1094
1095
1096
  	}
  	__remove_wait_queue(&ctx->fd_wqh, &wait);
  	__set_current_state(TASK_RUNNING);
ae62c16e1   Christoph Hellwig   userfaultfd: disa...
1097
  	spin_unlock_irq(&ctx->fd_wqh.lock);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1098

893e26e61   Pavel Emelyanov   userfaultfd: non-...
1099
1100
  	if (!ret && msg->event == UFFD_EVENT_FORK) {
  		ret = resolve_userfault_fork(ctx, fork_nctx, msg);
cbcfa130a   Eric Biggers   fs/userfaultfd.c:...
1101
  		spin_lock_irq(&ctx->event_wqh.lock);
384632e67   Andrea Arcangeli   userfaultfd: non-...
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
  		if (!list_empty(&fork_event)) {
  			/*
  			 * The fork thread didn't abort, so we can
  			 * drop the temporary refcount.
  			 */
  			userfaultfd_ctx_put(fork_nctx);
  
  			uwq = list_first_entry(&fork_event,
  					       typeof(*uwq),
  					       wq.entry);
  			/*
  			 * If fork_event list wasn't empty and in turn
  			 * the event wasn't already released by fork
  			 * (the event is allocated on fork kernel
  			 * stack), put the event back to its place in
  			 * the event_wq. fork_event head will be freed
  			 * as soon as we return so the event cannot
  			 * stay queued there no matter the current
  			 * "ret" value.
  			 */
  			list_del(&uwq->wq.entry);
  			__add_wait_queue(&ctx->event_wqh, &uwq->wq);
893e26e61   Pavel Emelyanov   userfaultfd: non-...
1124

384632e67   Andrea Arcangeli   userfaultfd: non-...
1125
1126
1127
1128
1129
1130
  			/*
  			 * Leave the event in the waitqueue and report
  			 * error to userland if we failed to resolve
  			 * the userfault fork.
  			 */
  			if (likely(!ret))
893e26e61   Pavel Emelyanov   userfaultfd: non-...
1131
  				userfaultfd_event_complete(ctx, uwq);
384632e67   Andrea Arcangeli   userfaultfd: non-...
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
  		} else {
  			/*
  			 * Here the fork thread aborted and the
  			 * refcount from the fork thread on fork_nctx
  			 * has already been released. We still hold
  			 * the reference we took before releasing the
  			 * lock above. If resolve_userfault_fork
  			 * failed we've to drop it because the
  			 * fork_nctx has to be freed in such case. If
  			 * it succeeded we'll hold it because the new
  			 * uffd references it.
  			 */
  			if (ret)
  				userfaultfd_ctx_put(fork_nctx);
893e26e61   Pavel Emelyanov   userfaultfd: non-...
1146
  		}
cbcfa130a   Eric Biggers   fs/userfaultfd.c:...
1147
  		spin_unlock_irq(&ctx->event_wqh.lock);
893e26e61   Pavel Emelyanov   userfaultfd: non-...
1148
  	}
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1149
1150
1151
1152
1153
1154
1155
1156
  	return ret;
  }
  
  static ssize_t userfaultfd_read(struct file *file, char __user *buf,
  				size_t count, loff_t *ppos)
  {
  	struct userfaultfd_ctx *ctx = file->private_data;
  	ssize_t _ret, ret = 0;
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
1157
  	struct uffd_msg msg;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1158
1159
1160
1161
  	int no_wait = file->f_flags & O_NONBLOCK;
  
  	if (ctx->state == UFFD_STATE_WAIT_API)
  		return -EINVAL;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1162
1163
  
  	for (;;) {
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
1164
  		if (count < sizeof(msg))
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1165
  			return ret ? ret : -EINVAL;
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
1166
  		_ret = userfaultfd_ctx_read(ctx, no_wait, &msg);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1167
1168
  		if (_ret < 0)
  			return ret ? ret : _ret;
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
1169
  		if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1170
  			return ret ? ret : -EFAULT;
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
1171
1172
1173
  		ret += sizeof(msg);
  		buf += sizeof(msg);
  		count -= sizeof(msg);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
  		/*
  		 * Allow to read more than one fault at time but only
  		 * block if waiting for the very first one.
  		 */
  		no_wait = O_NONBLOCK;
  	}
  }
  
  static void __wake_userfault(struct userfaultfd_ctx *ctx,
  			     struct userfaultfd_wake_range *range)
  {
cbcfa130a   Eric Biggers   fs/userfaultfd.c:...
1185
  	spin_lock_irq(&ctx->fault_pending_wqh.lock);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1186
  	/* wake all in the range and autoremove */
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
1187
  	if (waitqueue_active(&ctx->fault_pending_wqh))
ac5be6b47   Andrea Arcangeli   userfaultfd: reve...
1188
  		__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
1189
1190
  				     range);
  	if (waitqueue_active(&ctx->fault_wqh))
c430d1e84   Matthew Wilcox   userfaultfd: use ...
1191
  		__wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range);
cbcfa130a   Eric Biggers   fs/userfaultfd.c:...
1192
  	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1193
1194
1195
1196
1197
  }
  
  static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
  					   struct userfaultfd_wake_range *range)
  {
2c5b7e1be   Andrea Arcangeli   userfaultfd: avoi...
1198
1199
  	unsigned seq;
  	bool need_wakeup;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
  	/*
  	 * To be sure waitqueue_active() is not reordered by the CPU
  	 * before the pagetable update, use an explicit SMP memory
  	 * barrier here. PT lock release or up_read(mmap_sem) still
  	 * have release semantics that can allow the
  	 * waitqueue_active() to be reordered before the pte update.
  	 */
  	smp_mb();
  
  	/*
  	 * Use waitqueue_active because it's very frequent to
  	 * change the address space atomically even if there are no
  	 * userfaults yet. So we take the spinlock only when we're
  	 * sure we've userfaults to wake.
  	 */
2c5b7e1be   Andrea Arcangeli   userfaultfd: avoi...
1215
1216
1217
1218
1219
1220
1221
  	do {
  		seq = read_seqcount_begin(&ctx->refile_seq);
  		need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
  			waitqueue_active(&ctx->fault_wqh);
  		cond_resched();
  	} while (read_seqcount_retry(&ctx->refile_seq, seq));
  	if (need_wakeup)
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1222
1223
1224
1225
  		__wake_userfault(ctx, range);
  }
  
  static __always_inline int validate_range(struct mm_struct *mm,
7d0325749   Andrey Konovalov   userfaultfd: unta...
1226
  					  __u64 *start, __u64 len)
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1227
1228
  {
  	__u64 task_size = mm->task_size;
7d0325749   Andrey Konovalov   userfaultfd: unta...
1229
1230
1231
  	*start = untagged_addr(*start);
  
  	if (*start & ~PAGE_MASK)
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1232
1233
1234
1235
1236
  		return -EINVAL;
  	if (len & ~PAGE_MASK)
  		return -EINVAL;
  	if (!len)
  		return -EINVAL;
7d0325749   Andrey Konovalov   userfaultfd: unta...
1237
  	if (*start < mmap_min_addr)
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1238
  		return -EINVAL;
7d0325749   Andrey Konovalov   userfaultfd: unta...
1239
  	if (*start >= task_size)
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1240
  		return -EINVAL;
7d0325749   Andrey Konovalov   userfaultfd: unta...
1241
  	if (len > task_size - *start)
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1242
1243
1244
  		return -EINVAL;
  	return 0;
  }
ba6907db6   Mike Rapoport   userfaultfd: intr...
1245
1246
  static inline bool vma_can_userfault(struct vm_area_struct *vma)
  {
cac673292   Mike Rapoport   userfaultfd: shme...
1247
1248
  	return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
  		vma_is_shmem(vma);
ba6907db6   Mike Rapoport   userfaultfd: intr...
1249
  }
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
  static int userfaultfd_register(struct userfaultfd_ctx *ctx,
  				unsigned long arg)
  {
  	struct mm_struct *mm = ctx->mm;
  	struct vm_area_struct *vma, *prev, *cur;
  	int ret;
  	struct uffdio_register uffdio_register;
  	struct uffdio_register __user *user_uffdio_register;
  	unsigned long vm_flags, new_flags;
  	bool found;
ce53e8e6f   Mike Rapoport   userfaultfd: repo...
1260
  	bool basic_ioctls;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
  	unsigned long start, end, vma_end;
  
  	user_uffdio_register = (struct uffdio_register __user *) arg;
  
  	ret = -EFAULT;
  	if (copy_from_user(&uffdio_register, user_uffdio_register,
  			   sizeof(uffdio_register)-sizeof(__u64)))
  		goto out;
  
  	ret = -EINVAL;
  	if (!uffdio_register.mode)
  		goto out;
  	if (uffdio_register.mode & ~(UFFDIO_REGISTER_MODE_MISSING|
  				     UFFDIO_REGISTER_MODE_WP))
  		goto out;
  	vm_flags = 0;
  	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
  		vm_flags |= VM_UFFD_MISSING;
  	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
  		vm_flags |= VM_UFFD_WP;
  		/*
  		 * FIXME: remove the below error constraint by
  		 * implementing the wprotect tracking mode.
  		 */
  		ret = -EINVAL;
  		goto out;
  	}
7d0325749   Andrey Konovalov   userfaultfd: unta...
1288
  	ret = validate_range(mm, &uffdio_register.range.start,
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1289
1290
1291
1292
1293
1294
  			     uffdio_register.range.len);
  	if (ret)
  		goto out;
  
  	start = uffdio_register.range.start;
  	end = start + uffdio_register.range.len;
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
1295
1296
1297
  	ret = -ENOMEM;
  	if (!mmget_not_zero(mm))
  		goto out;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1298
  	down_write(&mm->mmap_sem);
04f5866e4   Andrea Arcangeli   coredump: fix rac...
1299
1300
  	if (!mmget_still_valid(mm))
  		goto out_unlock;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1301
  	vma = find_vma_prev(mm, start, &prev);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1302
1303
1304
1305
1306
1307
1308
1309
1310
  	if (!vma)
  		goto out_unlock;
  
  	/* check that there's at least one vma in the range */
  	ret = -EINVAL;
  	if (vma->vm_start >= end)
  		goto out_unlock;
  
  	/*
cab350afc   Mike Kravetz   userfaultfd: huge...
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
  	 * If the first vma contains huge pages, make sure start address
  	 * is aligned to huge page size.
  	 */
  	if (is_vm_hugetlb_page(vma)) {
  		unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
  
  		if (start & (vma_hpagesize - 1))
  			goto out_unlock;
  	}
  
  	/*
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1322
  	 * Search for not compatible vmas.
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1323
1324
  	 */
  	found = false;
ce53e8e6f   Mike Rapoport   userfaultfd: repo...
1325
  	basic_ioctls = false;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1326
1327
1328
1329
1330
1331
1332
1333
  	for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
  		cond_resched();
  
  		BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
  		       !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
  
  		/* check not compatible vmas */
  		ret = -EINVAL;
ba6907db6   Mike Rapoport   userfaultfd: intr...
1334
  		if (!vma_can_userfault(cur))
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1335
  			goto out_unlock;
29ec90660   Andrea Arcangeli   userfaultfd: shme...
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
  
  		/*
  		 * UFFDIO_COPY will fill file holes even without
  		 * PROT_WRITE. This check enforces that if this is a
  		 * MAP_SHARED, the process has write permission to the backing
  		 * file. If VM_MAYWRITE is set it also enforces that on a
  		 * MAP_SHARED vma: there is no F_WRITE_SEAL and no further
  		 * F_WRITE_SEAL can be taken until the vma is destroyed.
  		 */
  		ret = -EPERM;
  		if (unlikely(!(cur->vm_flags & VM_MAYWRITE)))
  			goto out_unlock;
cab350afc   Mike Kravetz   userfaultfd: huge...
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
  		/*
  		 * If this vma contains ending address, and huge pages
  		 * check alignment.
  		 */
  		if (is_vm_hugetlb_page(cur) && end <= cur->vm_end &&
  		    end > cur->vm_start) {
  			unsigned long vma_hpagesize = vma_kernel_pagesize(cur);
  
  			ret = -EINVAL;
  
  			if (end & (vma_hpagesize - 1))
  				goto out_unlock;
  		}
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
  
  		/*
  		 * Check that this vma isn't already owned by a
  		 * different userfaultfd. We can't allow more than one
  		 * userfaultfd to own a single vma simultaneously or we
  		 * wouldn't know which one to deliver the userfaults to.
  		 */
  		ret = -EBUSY;
  		if (cur->vm_userfaultfd_ctx.ctx &&
  		    cur->vm_userfaultfd_ctx.ctx != ctx)
  			goto out_unlock;
cab350afc   Mike Kravetz   userfaultfd: huge...
1372
1373
1374
  		/*
  		 * Note vmas containing huge pages
  		 */
ce53e8e6f   Mike Rapoport   userfaultfd: repo...
1375
1376
  		if (is_vm_hugetlb_page(cur))
  			basic_ioctls = true;
cab350afc   Mike Kravetz   userfaultfd: huge...
1377

86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
  		found = true;
  	}
  	BUG_ON(!found);
  
  	if (vma->vm_start < start)
  		prev = vma;
  
  	ret = 0;
  	do {
  		cond_resched();
ba6907db6   Mike Rapoport   userfaultfd: intr...
1388
  		BUG_ON(!vma_can_userfault(vma));
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1389
1390
  		BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
  		       vma->vm_userfaultfd_ctx.ctx != ctx);
29ec90660   Andrea Arcangeli   userfaultfd: shme...
1391
  		WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
  
  		/*
  		 * Nothing to do: this vma is already registered into this
  		 * userfaultfd and with the right tracking mode too.
  		 */
  		if (vma->vm_userfaultfd_ctx.ctx == ctx &&
  		    (vma->vm_flags & vm_flags) == vm_flags)
  			goto skip;
  
  		if (vma->vm_start > start)
  			start = vma->vm_start;
  		vma_end = min(end, vma->vm_end);
  
  		new_flags = (vma->vm_flags & ~vm_flags) | vm_flags;
  		prev = vma_merge(mm, prev, start, vma_end, new_flags,
  				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
  				 vma_policy(vma),
  				 ((struct vm_userfaultfd_ctx){ ctx }));
  		if (prev) {
  			vma = prev;
  			goto next;
  		}
  		if (vma->vm_start < start) {
  			ret = split_vma(mm, vma, start, 1);
  			if (ret)
  				break;
  		}
  		if (vma->vm_end > end) {
  			ret = split_vma(mm, vma, end, 0);
  			if (ret)
  				break;
  		}
  	next:
  		/*
  		 * In the vma_merge() successful mprotect-like case 8:
  		 * the next vma was merged into the current one and
  		 * the current one has not been updated yet.
  		 */
  		vma->vm_flags = new_flags;
  		vma->vm_userfaultfd_ctx.ctx = ctx;
  
  	skip:
  		prev = vma;
  		start = vma->vm_end;
  		vma = vma->vm_next;
  	} while (vma && vma->vm_start < end);
  out_unlock:
  	up_write(&mm->mmap_sem);
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
1440
  	mmput(mm);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1441
1442
1443
1444
1445
1446
  	if (!ret) {
  		/*
  		 * Now that we scanned all vmas we can already tell
  		 * userland which ioctls methods are guaranteed to
  		 * succeed on this range.
  		 */
ce53e8e6f   Mike Rapoport   userfaultfd: repo...
1447
  		if (put_user(basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC :
cab350afc   Mike Kravetz   userfaultfd: huge...
1448
  			     UFFD_API_RANGE_IOCTLS,
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
  			     &user_uffdio_register->ioctls))
  			ret = -EFAULT;
  	}
  out:
  	return ret;
  }
  
  static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
  				  unsigned long arg)
  {
  	struct mm_struct *mm = ctx->mm;
  	struct vm_area_struct *vma, *prev, *cur;
  	int ret;
  	struct uffdio_range uffdio_unregister;
  	unsigned long new_flags;
  	bool found;
  	unsigned long start, end, vma_end;
  	const void __user *buf = (void __user *)arg;
  
  	ret = -EFAULT;
  	if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
  		goto out;
7d0325749   Andrey Konovalov   userfaultfd: unta...
1471
  	ret = validate_range(mm, &uffdio_unregister.start,
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1472
1473
1474
1475
1476
1477
  			     uffdio_unregister.len);
  	if (ret)
  		goto out;
  
  	start = uffdio_unregister.start;
  	end = start + uffdio_unregister.len;
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
1478
1479
1480
  	ret = -ENOMEM;
  	if (!mmget_not_zero(mm))
  		goto out;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1481
  	down_write(&mm->mmap_sem);
04f5866e4   Andrea Arcangeli   coredump: fix rac...
1482
1483
  	if (!mmget_still_valid(mm))
  		goto out_unlock;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1484
  	vma = find_vma_prev(mm, start, &prev);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1485
1486
1487
1488
1489
1490
1491
1492
1493
  	if (!vma)
  		goto out_unlock;
  
  	/* check that there's at least one vma in the range */
  	ret = -EINVAL;
  	if (vma->vm_start >= end)
  		goto out_unlock;
  
  	/*
cab350afc   Mike Kravetz   userfaultfd: huge...
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
  	 * If the first vma contains huge pages, make sure start address
  	 * is aligned to huge page size.
  	 */
  	if (is_vm_hugetlb_page(vma)) {
  		unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
  
  		if (start & (vma_hpagesize - 1))
  			goto out_unlock;
  	}
  
  	/*
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1505
  	 * Search for not compatible vmas.
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
  	 */
  	found = false;
  	ret = -EINVAL;
  	for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
  		cond_resched();
  
  		BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
  		       !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
  
  		/*
  		 * Check not compatible vmas, not strictly required
  		 * here as not compatible vmas cannot have an
  		 * userfaultfd_ctx registered on them, but this
  		 * provides for more strict behavior to notice
  		 * unregistration errors.
  		 */
ba6907db6   Mike Rapoport   userfaultfd: intr...
1522
  		if (!vma_can_userfault(cur))
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
  			goto out_unlock;
  
  		found = true;
  	}
  	BUG_ON(!found);
  
  	if (vma->vm_start < start)
  		prev = vma;
  
  	ret = 0;
  	do {
  		cond_resched();
ba6907db6   Mike Rapoport   userfaultfd: intr...
1535
  		BUG_ON(!vma_can_userfault(vma));
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1536
1537
1538
1539
1540
1541
1542
  
  		/*
  		 * Nothing to do: this vma is already registered into this
  		 * userfaultfd and with the right tracking mode too.
  		 */
  		if (!vma->vm_userfaultfd_ctx.ctx)
  			goto skip;
01e881f5a   Andrea Arcangeli   userfaultfd: chec...
1543
  		WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1544
1545
1546
  		if (vma->vm_start > start)
  			start = vma->vm_start;
  		vma_end = min(end, vma->vm_end);
09fa5296a   Andrea Arcangeli   userfaultfd: non-...
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
  		if (userfaultfd_missing(vma)) {
  			/*
  			 * Wake any concurrent pending userfault while
  			 * we unregister, so they will not hang
  			 * permanently and it avoids userland to call
  			 * UFFDIO_WAKE explicitly.
  			 */
  			struct userfaultfd_wake_range range;
  			range.start = start;
  			range.len = vma_end - start;
  			wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
  		}
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
  		new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
  		prev = vma_merge(mm, prev, start, vma_end, new_flags,
  				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
  				 vma_policy(vma),
  				 NULL_VM_UFFD_CTX);
  		if (prev) {
  			vma = prev;
  			goto next;
  		}
  		if (vma->vm_start < start) {
  			ret = split_vma(mm, vma, start, 1);
  			if (ret)
  				break;
  		}
  		if (vma->vm_end > end) {
  			ret = split_vma(mm, vma, end, 0);
  			if (ret)
  				break;
  		}
  	next:
  		/*
  		 * In the vma_merge() successful mprotect-like case 8:
  		 * the next vma was merged into the current one and
  		 * the current one has not been updated yet.
  		 */
  		vma->vm_flags = new_flags;
  		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
  
  	skip:
  		prev = vma;
  		start = vma->vm_end;
  		vma = vma->vm_next;
  	} while (vma && vma->vm_start < end);
  out_unlock:
  	up_write(&mm->mmap_sem);
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
1594
  	mmput(mm);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1595
1596
1597
1598
1599
  out:
  	return ret;
  }
  
  /*
ba85c702e   Andrea Arcangeli   userfaultfd: wake...
1600
1601
   * userfaultfd_wake may be used in combination with the
   * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches.
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
   */
  static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
  			    unsigned long arg)
  {
  	int ret;
  	struct uffdio_range uffdio_wake;
  	struct userfaultfd_wake_range range;
  	const void __user *buf = (void __user *)arg;
  
  	ret = -EFAULT;
  	if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
  		goto out;
7d0325749   Andrey Konovalov   userfaultfd: unta...
1614
  	ret = validate_range(ctx->mm, &uffdio_wake.start, uffdio_wake.len);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
  	if (ret)
  		goto out;
  
  	range.start = uffdio_wake.start;
  	range.len = uffdio_wake.len;
  
  	/*
  	 * len == 0 means wake all and we don't want to wake all here,
  	 * so check it again to be sure.
  	 */
  	VM_BUG_ON(!range.len);
  
  	wake_userfault(ctx, &range);
  	ret = 0;
  
  out:
  	return ret;
  }
ad465cae9   Andrea Arcangeli   userfaultfd: UFFD...
1633
1634
1635
1636
1637
1638
1639
1640
1641
  static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
  			    unsigned long arg)
  {
  	__s64 ret;
  	struct uffdio_copy uffdio_copy;
  	struct uffdio_copy __user *user_uffdio_copy;
  	struct userfaultfd_wake_range range;
  
  	user_uffdio_copy = (struct uffdio_copy __user *) arg;
df2cc96e7   Mike Rapoport   userfaultfd: prev...
1642
1643
1644
  	ret = -EAGAIN;
  	if (READ_ONCE(ctx->mmap_changing))
  		goto out;
ad465cae9   Andrea Arcangeli   userfaultfd: UFFD...
1645
1646
1647
1648
1649
  	ret = -EFAULT;
  	if (copy_from_user(&uffdio_copy, user_uffdio_copy,
  			   /* don't copy "copy" last field */
  			   sizeof(uffdio_copy)-sizeof(__s64)))
  		goto out;
7d0325749   Andrey Konovalov   userfaultfd: unta...
1650
  	ret = validate_range(ctx->mm, &uffdio_copy.dst, uffdio_copy.len);
ad465cae9   Andrea Arcangeli   userfaultfd: UFFD...
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
  	if (ret)
  		goto out;
  	/*
  	 * double check for wraparound just in case. copy_from_user()
  	 * will later check uffdio_copy.src + uffdio_copy.len to fit
  	 * in the userland range.
  	 */
  	ret = -EINVAL;
  	if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src)
  		goto out;
  	if (uffdio_copy.mode & ~UFFDIO_COPY_MODE_DONTWAKE)
  		goto out;
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
1663
1664
  	if (mmget_not_zero(ctx->mm)) {
  		ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
df2cc96e7   Mike Rapoport   userfaultfd: prev...
1665
  				   uffdio_copy.len, &ctx->mmap_changing);
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
1666
  		mmput(ctx->mm);
96333187a   Mike Rapoport   userfaultfd_copy:...
1667
  	} else {
e86b298be   Mike Rapoport   userfaultfd: repl...
1668
  		return -ESRCH;
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
1669
  	}
ad465cae9   Andrea Arcangeli   userfaultfd: UFFD...
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
  	if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
  		return -EFAULT;
  	if (ret < 0)
  		goto out;
  	BUG_ON(!ret);
  	/* len == 0 would wake all */
  	range.len = ret;
  	if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) {
  		range.start = uffdio_copy.dst;
  		wake_userfault(ctx, &range);
  	}
  	ret = range.len == uffdio_copy.len ? 0 : -EAGAIN;
  out:
  	return ret;
  }
  
  static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
  				unsigned long arg)
  {
  	__s64 ret;
  	struct uffdio_zeropage uffdio_zeropage;
  	struct uffdio_zeropage __user *user_uffdio_zeropage;
  	struct userfaultfd_wake_range range;
  
  	user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
df2cc96e7   Mike Rapoport   userfaultfd: prev...
1695
1696
1697
  	ret = -EAGAIN;
  	if (READ_ONCE(ctx->mmap_changing))
  		goto out;
ad465cae9   Andrea Arcangeli   userfaultfd: UFFD...
1698
1699
1700
1701
1702
  	ret = -EFAULT;
  	if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
  			   /* don't copy "zeropage" last field */
  			   sizeof(uffdio_zeropage)-sizeof(__s64)))
  		goto out;
7d0325749   Andrey Konovalov   userfaultfd: unta...
1703
  	ret = validate_range(ctx->mm, &uffdio_zeropage.range.start,
ad465cae9   Andrea Arcangeli   userfaultfd: UFFD...
1704
1705
1706
1707
1708
1709
  			     uffdio_zeropage.range.len);
  	if (ret)
  		goto out;
  	ret = -EINVAL;
  	if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
  		goto out;
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
1710
1711
  	if (mmget_not_zero(ctx->mm)) {
  		ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
df2cc96e7   Mike Rapoport   userfaultfd: prev...
1712
1713
  				     uffdio_zeropage.range.len,
  				     &ctx->mmap_changing);
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
1714
  		mmput(ctx->mm);
9d95aa4ba   Mike Rapoport   userfaultfd_zerop...
1715
  	} else {
e86b298be   Mike Rapoport   userfaultfd: repl...
1716
  		return -ESRCH;
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
1717
  	}
ad465cae9   Andrea Arcangeli   userfaultfd: UFFD...
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
  	if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
  		return -EFAULT;
  	if (ret < 0)
  		goto out;
  	/* len == 0 would wake all */
  	BUG_ON(!ret);
  	range.len = ret;
  	if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) {
  		range.start = uffdio_zeropage.range.start;
  		wake_userfault(ctx, &range);
  	}
  	ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN;
  out:
  	return ret;
  }
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
1733
1734
1735
1736
1737
1738
1739
  static inline unsigned int uffd_ctx_features(__u64 user_features)
  {
  	/*
  	 * For the current set of features the bits just coincide
  	 */
  	return (unsigned int)user_features;
  }
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
  /*
   * userland asks for a certain API version and we return which bits
   * and ioctl commands are implemented in this kernel for such API
   * version or -EINVAL if unknown.
   */
  static int userfaultfd_api(struct userfaultfd_ctx *ctx,
  			   unsigned long arg)
  {
  	struct uffdio_api uffdio_api;
  	void __user *buf = (void __user *)arg;
  	int ret;
656031445   Andrea Arcangeli   userfaultfd: non-...
1751
  	__u64 features;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1752
1753
1754
1755
1756
  
  	ret = -EINVAL;
  	if (ctx->state != UFFD_STATE_WAIT_API)
  		goto out;
  	ret = -EFAULT;
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
1757
  	if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1758
  		goto out;
656031445   Andrea Arcangeli   userfaultfd: non-...
1759
  	features = uffdio_api.features;
2176441fd   Mike Rapoport   userfaultfd: requ...
1760
1761
1762
1763
1764
1765
  	ret = -EINVAL;
  	if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
  		goto err_out;
  	ret = -EPERM;
  	if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
  		goto err_out;
656031445   Andrea Arcangeli   userfaultfd: non-...
1766
1767
  	/* report all available features and ioctls to userland */
  	uffdio_api.features = UFFD_API_FEATURES;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1768
1769
1770
1771
1772
  	uffdio_api.ioctls = UFFD_API_IOCTLS;
  	ret = -EFAULT;
  	if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
  		goto out;
  	ctx->state = UFFD_STATE_RUNNING;
656031445   Andrea Arcangeli   userfaultfd: non-...
1773
1774
  	/* only enable the requested features for this uffd context */
  	ctx->features = uffd_ctx_features(features);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1775
1776
1777
  	ret = 0;
  out:
  	return ret;
2176441fd   Mike Rapoport   userfaultfd: requ...
1778
1779
1780
1781
1782
  err_out:
  	memset(&uffdio_api, 0, sizeof(uffdio_api));
  	if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
  		ret = -EFAULT;
  	goto out;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1783
1784
1785
1786
1787
1788
1789
  }
  
  static long userfaultfd_ioctl(struct file *file, unsigned cmd,
  			      unsigned long arg)
  {
  	int ret = -EINVAL;
  	struct userfaultfd_ctx *ctx = file->private_data;
e6485a47b   Andrea Arcangeli   userfaultfd: requ...
1790
1791
  	if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API)
  		return -EINVAL;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
  	switch(cmd) {
  	case UFFDIO_API:
  		ret = userfaultfd_api(ctx, arg);
  		break;
  	case UFFDIO_REGISTER:
  		ret = userfaultfd_register(ctx, arg);
  		break;
  	case UFFDIO_UNREGISTER:
  		ret = userfaultfd_unregister(ctx, arg);
  		break;
  	case UFFDIO_WAKE:
  		ret = userfaultfd_wake(ctx, arg);
  		break;
ad465cae9   Andrea Arcangeli   userfaultfd: UFFD...
1805
1806
1807
1808
1809
1810
  	case UFFDIO_COPY:
  		ret = userfaultfd_copy(ctx, arg);
  		break;
  	case UFFDIO_ZEROPAGE:
  		ret = userfaultfd_zeropage(ctx, arg);
  		break;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1811
1812
1813
1814
1815
1816
1817
1818
  	}
  	return ret;
  }
  
  #ifdef CONFIG_PROC_FS
  static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
  {
  	struct userfaultfd_ctx *ctx = f->private_data;
ac6424b98   Ingo Molnar   sched/wait: Renam...
1819
  	wait_queue_entry_t *wq;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1820
  	unsigned long pending = 0, total = 0;
cbcfa130a   Eric Biggers   fs/userfaultfd.c:...
1821
  	spin_lock_irq(&ctx->fault_pending_wqh.lock);
2055da973   Ingo Molnar   sched/wait: Disam...
1822
  	list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
1823
1824
1825
  		pending++;
  		total++;
  	}
2055da973   Ingo Molnar   sched/wait: Disam...
1826
  	list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1827
1828
  		total++;
  	}
cbcfa130a   Eric Biggers   fs/userfaultfd.c:...
1829
  	spin_unlock_irq(&ctx->fault_pending_wqh.lock);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
  
  	/*
  	 * If more protocols will be added, there will be all shown
  	 * separated by a space. Like this:
  	 *	protocols: aa:... bb:...
  	 */
  	seq_printf(m, "pending:\t%lu
  total:\t%lu
  API:\t%Lx:%x:%Lx
  ",
045098e94   Mike Rapoport   userfaultfd: repo...
1840
  		   pending, total, UFFD_API, ctx->features,
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
  		   UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
  }
  #endif
  
  static const struct file_operations userfaultfd_fops = {
  #ifdef CONFIG_PROC_FS
  	.show_fdinfo	= userfaultfd_show_fdinfo,
  #endif
  	.release	= userfaultfd_release,
  	.poll		= userfaultfd_poll,
  	.read		= userfaultfd_read,
  	.unlocked_ioctl = userfaultfd_ioctl,
  	.compat_ioctl	= userfaultfd_ioctl,
  	.llseek		= noop_llseek,
  };
3004ec9ca   Andrea Arcangeli   userfaultfd: allo...
1856
1857
1858
1859
1860
1861
  static void init_once_userfaultfd_ctx(void *mem)
  {
  	struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
  
  	init_waitqueue_head(&ctx->fault_pending_wqh);
  	init_waitqueue_head(&ctx->fault_wqh);
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
1862
  	init_waitqueue_head(&ctx->event_wqh);
3004ec9ca   Andrea Arcangeli   userfaultfd: allo...
1863
  	init_waitqueue_head(&ctx->fd_wqh);
2c5b7e1be   Andrea Arcangeli   userfaultfd: avoi...
1864
  	seqcount_init(&ctx->refile_seq);
3004ec9ca   Andrea Arcangeli   userfaultfd: allo...
1865
  }
284cd241a   Eric Biggers   userfaultfd: conv...
1866
  SYSCALL_DEFINE1(userfaultfd, int, flags)
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1867
  {
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1868
  	struct userfaultfd_ctx *ctx;
284cd241a   Eric Biggers   userfaultfd: conv...
1869
  	int fd;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1870

cefdca0a8   Peter Xu   userfaultfd/sysct...
1871
1872
  	if (!sysctl_unprivileged_userfaultfd && !capable(CAP_SYS_PTRACE))
  		return -EPERM;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1873
1874
1875
1876
1877
  	BUG_ON(!current->mm);
  
  	/* Check the UFFD_* constants for consistency.  */
  	BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
  	BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1878
  	if (flags & ~UFFD_SHARED_FCNTL_FLAGS)
284cd241a   Eric Biggers   userfaultfd: conv...
1879
  		return -EINVAL;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1880

3004ec9ca   Andrea Arcangeli   userfaultfd: allo...
1881
  	ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1882
  	if (!ctx)
284cd241a   Eric Biggers   userfaultfd: conv...
1883
  		return -ENOMEM;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1884

ca8804206   Eric Biggers   userfaultfd: conv...
1885
  	refcount_set(&ctx->refcount, 1);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1886
  	ctx->flags = flags;
9cd75c3cd   Pavel Emelyanov   userfaultfd: non-...
1887
  	ctx->features = 0;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1888
1889
  	ctx->state = UFFD_STATE_WAIT_API;
  	ctx->released = false;
df2cc96e7   Mike Rapoport   userfaultfd: prev...
1890
  	ctx->mmap_changing = false;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1891
1892
  	ctx->mm = current->mm;
  	/* prevent the mm struct to be freed */
f1f100764   Vegard Nossum   mm: add new mmgra...
1893
  	mmgrab(ctx->mm);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1894

284cd241a   Eric Biggers   userfaultfd: conv...
1895
1896
1897
  	fd = anon_inode_getfd("[userfaultfd]", &userfaultfd_fops, ctx,
  			      O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
  	if (fd < 0) {
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
1898
  		mmdrop(ctx->mm);
3004ec9ca   Andrea Arcangeli   userfaultfd: allo...
1899
  		kmem_cache_free(userfaultfd_ctx_cachep, ctx);
c03e946fd   Eric Biggers   userfaultfd: add ...
1900
  	}
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1901
  	return fd;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1902
  }
3004ec9ca   Andrea Arcangeli   userfaultfd: allo...
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
  
  static int __init userfaultfd_init(void)
  {
  	userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache",
  						sizeof(struct userfaultfd_ctx),
  						0,
  						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
  						init_once_userfaultfd_ctx);
  	return 0;
  }
  __initcall(userfaultfd_init);