Blame view

fs/userfaultfd.c 35.3 KB
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
  /*
   *  fs/userfaultfd.c
   *
   *  Copyright (C) 2007  Davide Libenzi <davidel@xmailserver.org>
   *  Copyright (C) 2008-2009 Red Hat, Inc.
   *  Copyright (C) 2015  Red Hat, Inc.
   *
   *  This work is licensed under the terms of the GNU GPL, version 2. See
   *  the COPYING file in the top-level directory.
   *
   *  Some part derived from fs/eventfd.c (anon inode setup) and
   *  mm/ksm.c (mm hashing).
   */
  
  #include <linux/hashtable.h>
  #include <linux/sched.h>
  #include <linux/mm.h>
  #include <linux/poll.h>
  #include <linux/slab.h>
  #include <linux/seq_file.h>
  #include <linux/file.h>
  #include <linux/bug.h>
  #include <linux/anon_inodes.h>
  #include <linux/syscalls.h>
  #include <linux/userfaultfd_k.h>
  #include <linux/mempolicy.h>
  #include <linux/ioctl.h>
  #include <linux/security.h>
3004ec9ca   Andrea Arcangeli   userfaultfd: allo...
29
  static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
30
31
32
33
  enum userfaultfd_state {
  	UFFD_STATE_WAIT_API,
  	UFFD_STATE_RUNNING,
  };
3004ec9ca   Andrea Arcangeli   userfaultfd: allo...
34
35
36
37
  /*
   * Start with fault_pending_wqh and fault_wqh so they're more likely
   * to be in the same cacheline.
   */
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
38
  struct userfaultfd_ctx {
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
39
40
41
  	/* waitqueue head for the pending (i.e. not read) userfaults */
  	wait_queue_head_t fault_pending_wqh;
  	/* waitqueue head for the userfaults */
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
42
43
44
  	wait_queue_head_t fault_wqh;
  	/* waitqueue head for the pseudo fd to wakeup poll/read */
  	wait_queue_head_t fd_wqh;
2c5b7e1be   Andrea Arcangeli   userfaultfd: avoi...
45
46
  	/* a refile sequence protected by fault_pending_wqh lock */
  	struct seqcount refile_seq;
3004ec9ca   Andrea Arcangeli   userfaultfd: allo...
47
48
  	/* pseudo fd refcounting */
  	atomic_t refcount;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
49
50
51
52
53
54
55
56
57
58
59
  	/* userfaultfd syscall flags */
  	unsigned int flags;
  	/* state machine */
  	enum userfaultfd_state state;
  	/* released */
  	bool released;
  	/* mm with one ore more vmas attached to this userfaultfd_ctx */
  	struct mm_struct *mm;
  };
  
  struct userfaultfd_wait_queue {
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
60
  	struct uffd_msg msg;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
61
  	wait_queue_t wq;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
  	struct userfaultfd_ctx *ctx;
  };
  
  struct userfaultfd_wake_range {
  	unsigned long start;
  	unsigned long len;
  };
  
  static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
  				     int wake_flags, void *key)
  {
  	struct userfaultfd_wake_range *range = key;
  	int ret;
  	struct userfaultfd_wait_queue *uwq;
  	unsigned long start, len;
  
  	uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
  	ret = 0;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
80
81
82
  	/* len == 0 means wake all */
  	start = range->start;
  	len = range->len;
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
83
84
  	if (len && (start > uwq->msg.arg.pagefault.address ||
  		    start + len <= uwq->msg.arg.pagefault.address))
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
  		goto out;
  	ret = wake_up_state(wq->private, mode);
  	if (ret)
  		/*
  		 * Wake only once, autoremove behavior.
  		 *
  		 * After the effect of list_del_init is visible to the
  		 * other CPUs, the waitqueue may disappear from under
  		 * us, see the !list_empty_careful() in
  		 * handle_userfault(). try_to_wake_up() has an
  		 * implicit smp_mb__before_spinlock, and the
  		 * wq->private is read before calling the extern
  		 * function "wake_up_state" (which in turns calls
  		 * try_to_wake_up). While the spin_lock;spin_unlock;
  		 * wouldn't be enough, the smp_mb__before_spinlock is
  		 * enough to avoid an explicit smp_mb() here.
  		 */
  		list_del_init(&wq->task_list);
  out:
  	return ret;
  }
  
  /**
   * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
   * context.
   * @ctx: [in] Pointer to the userfaultfd context.
   *
   * Returns: In case of success, returns not zero.
   */
  static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
  {
  	if (!atomic_inc_not_zero(&ctx->refcount))
  		BUG();
  }
  
  /**
   * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd
   * context.
   * @ctx: [in] Pointer to userfaultfd context.
   *
   * The userfaultfd context reference must have been previously acquired either
   * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget().
   */
  static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
  {
  	if (atomic_dec_and_test(&ctx->refcount)) {
  		VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
  		VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
  		VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
  		VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
  		VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
  		VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
137
  		mmdrop(ctx->mm);
3004ec9ca   Andrea Arcangeli   userfaultfd: allo...
138
  		kmem_cache_free(userfaultfd_ctx_cachep, ctx);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
139
140
  	}
  }
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
141
  static inline void msg_init(struct uffd_msg *msg)
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
142
  {
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
  	BUILD_BUG_ON(sizeof(struct uffd_msg) != 32);
  	/*
  	 * Must use memset to zero out the paddings or kernel data is
  	 * leaked to userland.
  	 */
  	memset(msg, 0, sizeof(struct uffd_msg));
  }
  
  static inline struct uffd_msg userfault_msg(unsigned long address,
  					    unsigned int flags,
  					    unsigned long reason)
  {
  	struct uffd_msg msg;
  	msg_init(&msg);
  	msg.event = UFFD_EVENT_PAGEFAULT;
  	msg.arg.pagefault.address = address;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
159
160
  	if (flags & FAULT_FLAG_WRITE)
  		/*
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
161
162
163
164
165
  		 * If UFFD_FEATURE_PAGEFAULT_FLAG_WRITE was set in the
  		 * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WRITE
  		 * was not set in a UFFD_EVENT_PAGEFAULT, it means it
  		 * was a read fault, otherwise if set it means it's
  		 * a write fault.
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
166
  		 */
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
167
  		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
168
169
  	if (reason & VM_UFFD_WP)
  		/*
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
170
171
172
173
174
  		 * If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the
  		 * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WP was
  		 * not set in a UFFD_EVENT_PAGEFAULT, it means it was
  		 * a missing fault, otherwise if set it means it's a
  		 * write protect fault.
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
175
  		 */
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
176
177
  		msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP;
  	return msg;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
178
179
180
  }
  
  /*
8d2afd96c   Andrea Arcangeli   userfaultfd: solv...
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
   * Verify the pagetables are still not ok after having reigstered into
   * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any
   * userfault that has already been resolved, if userfaultfd_read and
   * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different
   * threads.
   */
  static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
  					 unsigned long address,
  					 unsigned long flags,
  					 unsigned long reason)
  {
  	struct mm_struct *mm = ctx->mm;
  	pgd_t *pgd;
  	pud_t *pud;
  	pmd_t *pmd, _pmd;
  	pte_t *pte;
  	bool ret = true;
  
  	VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
  
  	pgd = pgd_offset(mm, address);
  	if (!pgd_present(*pgd))
  		goto out;
  	pud = pud_offset(pgd, address);
  	if (!pud_present(*pud))
  		goto out;
  	pmd = pmd_offset(pud, address);
  	/*
  	 * READ_ONCE must function as a barrier with narrower scope
  	 * and it must be equivalent to:
  	 *	_pmd = *pmd; barrier();
  	 *
  	 * This is to deal with the instability (as in
  	 * pmd_trans_unstable) of the pmd.
  	 */
  	_pmd = READ_ONCE(*pmd);
  	if (!pmd_present(_pmd))
  		goto out;
  
  	ret = false;
  	if (pmd_trans_huge(_pmd))
  		goto out;
  
  	/*
  	 * the pmd is stable (as in !pmd_trans_unstable) so we can re-read it
  	 * and use the standard pte_offset_map() instead of parsing _pmd.
  	 */
  	pte = pte_offset_map(pmd, address);
  	/*
  	 * Lockless access: we're in a wait_event so it's ok if it
  	 * changes under us.
  	 */
  	if (pte_none(*pte))
  		ret = true;
  	pte_unmap(pte);
  
  out:
  	return ret;
  }
  
  /*
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
242
243
244
245
246
247
248
249
250
251
252
253
254
255
   * The locking rules involved in returning VM_FAULT_RETRY depending on
   * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
   * FAULT_FLAG_KILLABLE are not straightforward. The "Caution"
   * recommendation in __lock_page_or_retry is not an understatement.
   *
   * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_sem must be released
   * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is
   * not set.
   *
   * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not
   * set, VM_FAULT_RETRY can still be returned if and only if there are
   * fatal_signal_pending()s, and the mmap_sem must be released before
   * returning it.
   */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
256
  int handle_userfault(struct fault_env *fe, unsigned long reason)
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
257
  {
bae473a42   Kirill A. Shutemov   mm: introduce fau...
258
  	struct mm_struct *mm = fe->vma->vm_mm;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
259
260
  	struct userfaultfd_ctx *ctx;
  	struct userfaultfd_wait_queue uwq;
ba85c702e   Andrea Arcangeli   userfaultfd: wake...
261
  	int ret;
dfa37dc3f   Andrea Arcangeli   userfaultfd: allo...
262
  	bool must_wait, return_to_userland;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
263
264
  
  	BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
ba85c702e   Andrea Arcangeli   userfaultfd: wake...
265
  	ret = VM_FAULT_SIGBUS;
bae473a42   Kirill A. Shutemov   mm: introduce fau...
266
  	ctx = fe->vma->vm_userfaultfd_ctx.ctx;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
267
  	if (!ctx)
ba85c702e   Andrea Arcangeli   userfaultfd: wake...
268
  		goto out;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
269
270
271
272
273
274
275
276
277
278
279
280
  
  	BUG_ON(ctx->mm != mm);
  
  	VM_BUG_ON(reason & ~(VM_UFFD_MISSING|VM_UFFD_WP));
  	VM_BUG_ON(!(reason & VM_UFFD_MISSING) ^ !!(reason & VM_UFFD_WP));
  
  	/*
  	 * If it's already released don't get it. This avoids to loop
  	 * in __get_user_pages if userfaultfd_release waits on the
  	 * caller of handle_userfault to release the mmap_sem.
  	 */
  	if (unlikely(ACCESS_ONCE(ctx->released)))
ba85c702e   Andrea Arcangeli   userfaultfd: wake...
281
  		goto out;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
282
283
  
  	/*
39680f50a   Linus Torvalds   userfaultfd: don'...
284
285
286
287
288
289
  	 * We don't do userfault handling for the final child pid update.
  	 */
  	if (current->flags & PF_EXITING)
  		goto out;
  
  	/*
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
290
291
292
293
294
295
296
297
298
  	 * Check that we can return VM_FAULT_RETRY.
  	 *
  	 * NOTE: it should become possible to return VM_FAULT_RETRY
  	 * even if FAULT_FLAG_TRIED is set without leading to gup()
  	 * -EBUSY failures, if the userfaultfd is to be extended for
  	 * VM_UFFD_WP tracking and we intend to arm the userfault
  	 * without first stopping userland access to the memory. For
  	 * VM_UFFD_MISSING userfaults this is enough for now.
  	 */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
299
  	if (unlikely(!(fe->flags & FAULT_FLAG_ALLOW_RETRY))) {
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
300
301
302
303
304
  		/*
  		 * Validate the invariant that nowait must allow retry
  		 * to be sure not to return SIGBUS erroneously on
  		 * nowait invocations.
  		 */
bae473a42   Kirill A. Shutemov   mm: introduce fau...
305
  		BUG_ON(fe->flags & FAULT_FLAG_RETRY_NOWAIT);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
306
307
308
  #ifdef CONFIG_DEBUG_VM
  		if (printk_ratelimit()) {
  			printk(KERN_WARNING
bae473a42   Kirill A. Shutemov   mm: introduce fau...
309
310
  			       "FAULT_FLAG_ALLOW_RETRY missing %x
  ", fe->flags);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
311
312
313
  			dump_stack();
  		}
  #endif
ba85c702e   Andrea Arcangeli   userfaultfd: wake...
314
  		goto out;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
315
316
317
318
319
320
  	}
  
  	/*
  	 * Handle nowait, not much to do other than tell it to retry
  	 * and wait.
  	 */
ba85c702e   Andrea Arcangeli   userfaultfd: wake...
321
  	ret = VM_FAULT_RETRY;
bae473a42   Kirill A. Shutemov   mm: introduce fau...
322
  	if (fe->flags & FAULT_FLAG_RETRY_NOWAIT)
ba85c702e   Andrea Arcangeli   userfaultfd: wake...
323
  		goto out;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
324
325
326
  
  	/* take the reference before dropping the mmap_sem */
  	userfaultfd_ctx_get(ctx);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
327
328
  	init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
  	uwq.wq.private = current;
bae473a42   Kirill A. Shutemov   mm: introduce fau...
329
  	uwq.msg = userfault_msg(fe->address, fe->flags, reason);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
330
  	uwq.ctx = ctx;
bae473a42   Kirill A. Shutemov   mm: introduce fau...
331
332
  	return_to_userland =
  		(fe->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
dfa37dc3f   Andrea Arcangeli   userfaultfd: allo...
333
  		(FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
334
  	spin_lock(&ctx->fault_pending_wqh.lock);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
335
336
337
338
  	/*
  	 * After the __add_wait_queue the uwq is visible to userland
  	 * through poll/read().
  	 */
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
339
340
341
342
343
344
  	__add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
  	/*
  	 * The smp_mb() after __set_current_state prevents the reads
  	 * following the spin_unlock to happen before the list_add in
  	 * __add_wait_queue.
  	 */
dfa37dc3f   Andrea Arcangeli   userfaultfd: allo...
345
346
  	set_current_state(return_to_userland ? TASK_INTERRUPTIBLE :
  			  TASK_KILLABLE);
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
347
  	spin_unlock(&ctx->fault_pending_wqh.lock);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
348

bae473a42   Kirill A. Shutemov   mm: introduce fau...
349
  	must_wait = userfaultfd_must_wait(ctx, fe->address, fe->flags, reason);
8d2afd96c   Andrea Arcangeli   userfaultfd: solv...
350
351
352
  	up_read(&mm->mmap_sem);
  
  	if (likely(must_wait && !ACCESS_ONCE(ctx->released) &&
dfa37dc3f   Andrea Arcangeli   userfaultfd: allo...
353
354
  		   (return_to_userland ? !signal_pending(current) :
  		    !fatal_signal_pending(current)))) {
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
355
356
  		wake_up_poll(&ctx->fd_wqh, POLLIN);
  		schedule();
ba85c702e   Andrea Arcangeli   userfaultfd: wake...
357
358
  		ret |= VM_FAULT_MAJOR;
  	}
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
359

ba85c702e   Andrea Arcangeli   userfaultfd: wake...
360
  	__set_current_state(TASK_RUNNING);
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
361

dfa37dc3f   Andrea Arcangeli   userfaultfd: allo...
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
  	if (return_to_userland) {
  		if (signal_pending(current) &&
  		    !fatal_signal_pending(current)) {
  			/*
  			 * If we got a SIGSTOP or SIGCONT and this is
  			 * a normal userland page fault, just let
  			 * userland return so the signal will be
  			 * handled and gdb debugging works.  The page
  			 * fault code immediately after we return from
  			 * this function is going to release the
  			 * mmap_sem and it's not depending on it
  			 * (unlike gup would if we were not to return
  			 * VM_FAULT_RETRY).
  			 *
  			 * If a fatal signal is pending we still take
  			 * the streamlined VM_FAULT_RETRY failure path
  			 * and there's no need to retake the mmap_sem
  			 * in such case.
  			 */
  			down_read(&mm->mmap_sem);
  			ret = 0;
  		}
  	}
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
385
386
387
388
389
390
391
392
393
394
395
396
397
  	/*
  	 * Here we race with the list_del; list_add in
  	 * userfaultfd_ctx_read(), however because we don't ever run
  	 * list_del_init() to refile across the two lists, the prev
  	 * and next pointers will never point to self. list_add also
  	 * would never let any of the two pointers to point to
  	 * self. So list_empty_careful won't risk to see both pointers
  	 * pointing to self at any time during the list refile. The
  	 * only case where list_del_init() is called is the full
  	 * removal in the wake function and there we don't re-list_add
  	 * and it's fine not to block on the spinlock. The uwq on this
  	 * kernel stack can be released after the list_del_init.
  	 */
ba85c702e   Andrea Arcangeli   userfaultfd: wake...
398
  	if (!list_empty_careful(&uwq.wq.task_list)) {
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
399
400
401
402
403
404
405
  		spin_lock(&ctx->fault_pending_wqh.lock);
  		/*
  		 * No need of list_del_init(), the uwq on the stack
  		 * will be freed shortly anyway.
  		 */
  		list_del(&uwq.wq.task_list);
  		spin_unlock(&ctx->fault_pending_wqh.lock);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
406
  	}
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
407
408
409
410
411
412
  
  	/*
  	 * ctx may go away after this if the userfault pseudo fd is
  	 * already released.
  	 */
  	userfaultfd_ctx_put(ctx);
ba85c702e   Andrea Arcangeli   userfaultfd: wake...
413
414
  out:
  	return ret;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
415
416
417
418
419
420
421
422
423
424
425
426
  }
  
  static int userfaultfd_release(struct inode *inode, struct file *file)
  {
  	struct userfaultfd_ctx *ctx = file->private_data;
  	struct mm_struct *mm = ctx->mm;
  	struct vm_area_struct *vma, *prev;
  	/* len == 0 means wake all */
  	struct userfaultfd_wake_range range = { .len = 0, };
  	unsigned long new_flags;
  
  	ACCESS_ONCE(ctx->released) = true;
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
427
428
  	if (!mmget_not_zero(mm))
  		goto wakeup;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
  	/*
  	 * Flush page faults out of all CPUs. NOTE: all page faults
  	 * must be retried without returning VM_FAULT_SIGBUS if
  	 * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
  	 * changes while handle_userfault released the mmap_sem. So
  	 * it's critical that released is set to true (above), before
  	 * taking the mmap_sem for writing.
  	 */
  	down_write(&mm->mmap_sem);
  	prev = NULL;
  	for (vma = mm->mmap; vma; vma = vma->vm_next) {
  		cond_resched();
  		BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
  		       !!(vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
  		if (vma->vm_userfaultfd_ctx.ctx != ctx) {
  			prev = vma;
  			continue;
  		}
  		new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
  		prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
  				 new_flags, vma->anon_vma,
  				 vma->vm_file, vma->vm_pgoff,
  				 vma_policy(vma),
  				 NULL_VM_UFFD_CTX);
  		if (prev)
  			vma = prev;
  		else
  			prev = vma;
  		vma->vm_flags = new_flags;
  		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
  	}
  	up_write(&mm->mmap_sem);
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
461
462
  	mmput(mm);
  wakeup:
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
463
  	/*
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
464
  	 * After no new page faults can wait on this fault_*wqh, flush
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
465
  	 * the last page faults that may have been already waiting on
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
466
  	 * the fault_*wqh.
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
467
  	 */
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
468
  	spin_lock(&ctx->fault_pending_wqh.lock);
ac5be6b47   Andrea Arcangeli   userfaultfd: reve...
469
470
  	__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
  	__wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range);
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
471
  	spin_unlock(&ctx->fault_pending_wqh.lock);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
472
473
474
475
476
  
  	wake_up_poll(&ctx->fd_wqh, POLLHUP);
  	userfaultfd_ctx_put(ctx);
  	return 0;
  }
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
477
478
479
  /* fault_pending_wqh.lock must be hold by the caller */
  static inline struct userfaultfd_wait_queue *find_userfault(
  	struct userfaultfd_ctx *ctx)
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
480
481
  {
  	wait_queue_t *wq;
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
482
  	struct userfaultfd_wait_queue *uwq;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
483

15b726ef0   Andrea Arcangeli   userfaultfd: opti...
484
  	VM_BUG_ON(!spin_is_locked(&ctx->fault_pending_wqh.lock));
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
485

15b726ef0   Andrea Arcangeli   userfaultfd: opti...
486
487
488
489
490
491
492
493
494
  	uwq = NULL;
  	if (!waitqueue_active(&ctx->fault_pending_wqh))
  		goto out;
  	/* walk in reverse to provide FIFO behavior to read userfaults */
  	wq = list_last_entry(&ctx->fault_pending_wqh.task_list,
  			     typeof(*wq), task_list);
  	uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
  out:
  	return uwq;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
495
496
497
498
499
500
501
502
503
504
505
506
507
  }
  
  static unsigned int userfaultfd_poll(struct file *file, poll_table *wait)
  {
  	struct userfaultfd_ctx *ctx = file->private_data;
  	unsigned int ret;
  
  	poll_wait(file, &ctx->fd_wqh, wait);
  
  	switch (ctx->state) {
  	case UFFD_STATE_WAIT_API:
  		return POLLERR;
  	case UFFD_STATE_RUNNING:
ba85c702e   Andrea Arcangeli   userfaultfd: wake...
508
509
510
511
512
513
  		/*
  		 * poll() never guarantees that read won't block.
  		 * userfaults can be waken before they're read().
  		 */
  		if (unlikely(!(file->f_flags & O_NONBLOCK)))
  			return POLLERR;
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
514
515
516
517
518
519
520
521
522
523
524
525
526
527
  		/*
  		 * lockless access to see if there are pending faults
  		 * __pollwait last action is the add_wait_queue but
  		 * the spin_unlock would allow the waitqueue_active to
  		 * pass above the actual list_add inside
  		 * add_wait_queue critical section. So use a full
  		 * memory barrier to serialize the list_add write of
  		 * add_wait_queue() with the waitqueue_active read
  		 * below.
  		 */
  		ret = 0;
  		smp_mb();
  		if (waitqueue_active(&ctx->fault_pending_wqh))
  			ret = POLLIN;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
528
529
530
531
532
533
534
  		return ret;
  	default:
  		BUG();
  	}
  }
  
  static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
535
  				    struct uffd_msg *msg)
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
536
537
538
  {
  	ssize_t ret;
  	DECLARE_WAITQUEUE(wait, current);
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
539
  	struct userfaultfd_wait_queue *uwq;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
540

15b726ef0   Andrea Arcangeli   userfaultfd: opti...
541
  	/* always take the fd_wqh lock before the fault_pending_wqh lock */
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
542
543
544
545
  	spin_lock(&ctx->fd_wqh.lock);
  	__add_wait_queue(&ctx->fd_wqh, &wait);
  	for (;;) {
  		set_current_state(TASK_INTERRUPTIBLE);
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
546
547
548
  		spin_lock(&ctx->fault_pending_wqh.lock);
  		uwq = find_userfault(ctx);
  		if (uwq) {
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
549
  			/*
2c5b7e1be   Andrea Arcangeli   userfaultfd: avoi...
550
551
552
553
554
555
556
557
558
  			 * Use a seqcount to repeat the lockless check
  			 * in wake_userfault() to avoid missing
  			 * wakeups because during the refile both
  			 * waitqueue could become empty if this is the
  			 * only userfault.
  			 */
  			write_seqcount_begin(&ctx->refile_seq);
  
  			/*
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
  			 * The fault_pending_wqh.lock prevents the uwq
  			 * to disappear from under us.
  			 *
  			 * Refile this userfault from
  			 * fault_pending_wqh to fault_wqh, it's not
  			 * pending anymore after we read it.
  			 *
  			 * Use list_del() by hand (as
  			 * userfaultfd_wake_function also uses
  			 * list_del_init() by hand) to be sure nobody
  			 * changes __remove_wait_queue() to use
  			 * list_del_init() in turn breaking the
  			 * !list_empty_careful() check in
  			 * handle_userfault(). The uwq->wq.task_list
  			 * must never be empty at any time during the
  			 * refile, or the waitqueue could disappear
  			 * from under us. The "wait_queue_head_t"
  			 * parameter of __remove_wait_queue() is unused
  			 * anyway.
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
578
  			 */
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
579
580
  			list_del(&uwq->wq.task_list);
  			__add_wait_queue(&ctx->fault_wqh, &uwq->wq);
2c5b7e1be   Andrea Arcangeli   userfaultfd: avoi...
581
  			write_seqcount_end(&ctx->refile_seq);
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
582
583
  			/* careful to always initialize msg if ret == 0 */
  			*msg = uwq->msg;
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
584
  			spin_unlock(&ctx->fault_pending_wqh.lock);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
585
586
587
  			ret = 0;
  			break;
  		}
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
588
  		spin_unlock(&ctx->fault_pending_wqh.lock);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
  		if (signal_pending(current)) {
  			ret = -ERESTARTSYS;
  			break;
  		}
  		if (no_wait) {
  			ret = -EAGAIN;
  			break;
  		}
  		spin_unlock(&ctx->fd_wqh.lock);
  		schedule();
  		spin_lock(&ctx->fd_wqh.lock);
  	}
  	__remove_wait_queue(&ctx->fd_wqh, &wait);
  	__set_current_state(TASK_RUNNING);
  	spin_unlock(&ctx->fd_wqh.lock);
  
  	return ret;
  }
  
  static ssize_t userfaultfd_read(struct file *file, char __user *buf,
  				size_t count, loff_t *ppos)
  {
  	struct userfaultfd_ctx *ctx = file->private_data;
  	ssize_t _ret, ret = 0;
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
613
  	struct uffd_msg msg;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
614
615
616
617
  	int no_wait = file->f_flags & O_NONBLOCK;
  
  	if (ctx->state == UFFD_STATE_WAIT_API)
  		return -EINVAL;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
618
619
  
  	for (;;) {
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
620
  		if (count < sizeof(msg))
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
621
  			return ret ? ret : -EINVAL;
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
622
  		_ret = userfaultfd_ctx_read(ctx, no_wait, &msg);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
623
624
  		if (_ret < 0)
  			return ret ? ret : _ret;
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
625
  		if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
626
  			return ret ? ret : -EFAULT;
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
627
628
629
  		ret += sizeof(msg);
  		buf += sizeof(msg);
  		count -= sizeof(msg);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
  		/*
  		 * Allow to read more than one fault at time but only
  		 * block if waiting for the very first one.
  		 */
  		no_wait = O_NONBLOCK;
  	}
  }
  
  static void __wake_userfault(struct userfaultfd_ctx *ctx,
  			     struct userfaultfd_wake_range *range)
  {
  	unsigned long start, end;
  
  	start = range->start;
  	end = range->start + range->len;
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
645
  	spin_lock(&ctx->fault_pending_wqh.lock);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
646
  	/* wake all in the range and autoremove */
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
647
  	if (waitqueue_active(&ctx->fault_pending_wqh))
ac5be6b47   Andrea Arcangeli   userfaultfd: reve...
648
  		__wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
649
650
  				     range);
  	if (waitqueue_active(&ctx->fault_wqh))
ac5be6b47   Andrea Arcangeli   userfaultfd: reve...
651
  		__wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, range);
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
652
  	spin_unlock(&ctx->fault_pending_wqh.lock);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
653
654
655
656
657
  }
  
  static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
  					   struct userfaultfd_wake_range *range)
  {
2c5b7e1be   Andrea Arcangeli   userfaultfd: avoi...
658
659
  	unsigned seq;
  	bool need_wakeup;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
  	/*
  	 * To be sure waitqueue_active() is not reordered by the CPU
  	 * before the pagetable update, use an explicit SMP memory
  	 * barrier here. PT lock release or up_read(mmap_sem) still
  	 * have release semantics that can allow the
  	 * waitqueue_active() to be reordered before the pte update.
  	 */
  	smp_mb();
  
  	/*
  	 * Use waitqueue_active because it's very frequent to
  	 * change the address space atomically even if there are no
  	 * userfaults yet. So we take the spinlock only when we're
  	 * sure we've userfaults to wake.
  	 */
2c5b7e1be   Andrea Arcangeli   userfaultfd: avoi...
675
676
677
678
679
680
681
  	do {
  		seq = read_seqcount_begin(&ctx->refile_seq);
  		need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
  			waitqueue_active(&ctx->fault_wqh);
  		cond_resched();
  	} while (read_seqcount_retry(&ctx->refile_seq, seq));
  	if (need_wakeup)
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
  		__wake_userfault(ctx, range);
  }
  
  static __always_inline int validate_range(struct mm_struct *mm,
  					  __u64 start, __u64 len)
  {
  	__u64 task_size = mm->task_size;
  
  	if (start & ~PAGE_MASK)
  		return -EINVAL;
  	if (len & ~PAGE_MASK)
  		return -EINVAL;
  	if (!len)
  		return -EINVAL;
  	if (start < mmap_min_addr)
  		return -EINVAL;
  	if (start >= task_size)
  		return -EINVAL;
  	if (len > task_size - start)
  		return -EINVAL;
  	return 0;
  }
  
  static int userfaultfd_register(struct userfaultfd_ctx *ctx,
  				unsigned long arg)
  {
  	struct mm_struct *mm = ctx->mm;
  	struct vm_area_struct *vma, *prev, *cur;
  	int ret;
  	struct uffdio_register uffdio_register;
  	struct uffdio_register __user *user_uffdio_register;
  	unsigned long vm_flags, new_flags;
  	bool found;
  	unsigned long start, end, vma_end;
  
  	user_uffdio_register = (struct uffdio_register __user *) arg;
  
  	ret = -EFAULT;
  	if (copy_from_user(&uffdio_register, user_uffdio_register,
  			   sizeof(uffdio_register)-sizeof(__u64)))
  		goto out;
  
  	ret = -EINVAL;
  	if (!uffdio_register.mode)
  		goto out;
  	if (uffdio_register.mode & ~(UFFDIO_REGISTER_MODE_MISSING|
  				     UFFDIO_REGISTER_MODE_WP))
  		goto out;
  	vm_flags = 0;
  	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
  		vm_flags |= VM_UFFD_MISSING;
  	if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
  		vm_flags |= VM_UFFD_WP;
  		/*
  		 * FIXME: remove the below error constraint by
  		 * implementing the wprotect tracking mode.
  		 */
  		ret = -EINVAL;
  		goto out;
  	}
  
  	ret = validate_range(mm, uffdio_register.range.start,
  			     uffdio_register.range.len);
  	if (ret)
  		goto out;
  
  	start = uffdio_register.range.start;
  	end = start + uffdio_register.range.len;
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
750
751
752
  	ret = -ENOMEM;
  	if (!mmget_not_zero(mm))
  		goto out;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
753
754
  	down_write(&mm->mmap_sem);
  	vma = find_vma_prev(mm, start, &prev);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
  	if (!vma)
  		goto out_unlock;
  
  	/* check that there's at least one vma in the range */
  	ret = -EINVAL;
  	if (vma->vm_start >= end)
  		goto out_unlock;
  
  	/*
  	 * Search for not compatible vmas.
  	 *
  	 * FIXME: this shall be relaxed later so that it doesn't fail
  	 * on tmpfs backed vmas (in addition to the current allowance
  	 * on anonymous vmas).
  	 */
  	found = false;
  	for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
  		cond_resched();
  
  		BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
  		       !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
  
  		/* check not compatible vmas */
  		ret = -EINVAL;
  		if (cur->vm_ops)
  			goto out_unlock;
  
  		/*
  		 * Check that this vma isn't already owned by a
  		 * different userfaultfd. We can't allow more than one
  		 * userfaultfd to own a single vma simultaneously or we
  		 * wouldn't know which one to deliver the userfaults to.
  		 */
  		ret = -EBUSY;
  		if (cur->vm_userfaultfd_ctx.ctx &&
  		    cur->vm_userfaultfd_ctx.ctx != ctx)
  			goto out_unlock;
  
  		found = true;
  	}
  	BUG_ON(!found);
  
  	if (vma->vm_start < start)
  		prev = vma;
  
  	ret = 0;
  	do {
  		cond_resched();
  
  		BUG_ON(vma->vm_ops);
  		BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
  		       vma->vm_userfaultfd_ctx.ctx != ctx);
  
  		/*
  		 * Nothing to do: this vma is already registered into this
  		 * userfaultfd and with the right tracking mode too.
  		 */
  		if (vma->vm_userfaultfd_ctx.ctx == ctx &&
  		    (vma->vm_flags & vm_flags) == vm_flags)
  			goto skip;
  
  		if (vma->vm_start > start)
  			start = vma->vm_start;
  		vma_end = min(end, vma->vm_end);
  
  		new_flags = (vma->vm_flags & ~vm_flags) | vm_flags;
  		prev = vma_merge(mm, prev, start, vma_end, new_flags,
  				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
  				 vma_policy(vma),
  				 ((struct vm_userfaultfd_ctx){ ctx }));
  		if (prev) {
  			vma = prev;
  			goto next;
  		}
  		if (vma->vm_start < start) {
  			ret = split_vma(mm, vma, start, 1);
  			if (ret)
  				break;
  		}
  		if (vma->vm_end > end) {
  			ret = split_vma(mm, vma, end, 0);
  			if (ret)
  				break;
  		}
  	next:
  		/*
  		 * In the vma_merge() successful mprotect-like case 8:
  		 * the next vma was merged into the current one and
  		 * the current one has not been updated yet.
  		 */
  		vma->vm_flags = new_flags;
  		vma->vm_userfaultfd_ctx.ctx = ctx;
  
  	skip:
  		prev = vma;
  		start = vma->vm_end;
  		vma = vma->vm_next;
  	} while (vma && vma->vm_start < end);
  out_unlock:
  	up_write(&mm->mmap_sem);
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
855
  	mmput(mm);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
  	if (!ret) {
  		/*
  		 * Now that we scanned all vmas we can already tell
  		 * userland which ioctls methods are guaranteed to
  		 * succeed on this range.
  		 */
  		if (put_user(UFFD_API_RANGE_IOCTLS,
  			     &user_uffdio_register->ioctls))
  			ret = -EFAULT;
  	}
  out:
  	return ret;
  }
  
  static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
  				  unsigned long arg)
  {
  	struct mm_struct *mm = ctx->mm;
  	struct vm_area_struct *vma, *prev, *cur;
  	int ret;
  	struct uffdio_range uffdio_unregister;
  	unsigned long new_flags;
  	bool found;
  	unsigned long start, end, vma_end;
  	const void __user *buf = (void __user *)arg;
  
  	ret = -EFAULT;
  	if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
  		goto out;
  
  	ret = validate_range(mm, uffdio_unregister.start,
  			     uffdio_unregister.len);
  	if (ret)
  		goto out;
  
  	start = uffdio_unregister.start;
  	end = start + uffdio_unregister.len;
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
893
894
895
  	ret = -ENOMEM;
  	if (!mmget_not_zero(mm))
  		goto out;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
896
897
  	down_write(&mm->mmap_sem);
  	vma = find_vma_prev(mm, start, &prev);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
  	if (!vma)
  		goto out_unlock;
  
  	/* check that there's at least one vma in the range */
  	ret = -EINVAL;
  	if (vma->vm_start >= end)
  		goto out_unlock;
  
  	/*
  	 * Search for not compatible vmas.
  	 *
  	 * FIXME: this shall be relaxed later so that it doesn't fail
  	 * on tmpfs backed vmas (in addition to the current allowance
  	 * on anonymous vmas).
  	 */
  	found = false;
  	ret = -EINVAL;
  	for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
  		cond_resched();
  
  		BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
  		       !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
  
  		/*
  		 * Check not compatible vmas, not strictly required
  		 * here as not compatible vmas cannot have an
  		 * userfaultfd_ctx registered on them, but this
  		 * provides for more strict behavior to notice
  		 * unregistration errors.
  		 */
  		if (cur->vm_ops)
  			goto out_unlock;
  
  		found = true;
  	}
  	BUG_ON(!found);
  
  	if (vma->vm_start < start)
  		prev = vma;
  
  	ret = 0;
  	do {
  		cond_resched();
  
  		BUG_ON(vma->vm_ops);
  
  		/*
  		 * Nothing to do: this vma is already registered into this
  		 * userfaultfd and with the right tracking mode too.
  		 */
  		if (!vma->vm_userfaultfd_ctx.ctx)
  			goto skip;
  
  		if (vma->vm_start > start)
  			start = vma->vm_start;
  		vma_end = min(end, vma->vm_end);
  
  		new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
  		prev = vma_merge(mm, prev, start, vma_end, new_flags,
  				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
  				 vma_policy(vma),
  				 NULL_VM_UFFD_CTX);
  		if (prev) {
  			vma = prev;
  			goto next;
  		}
  		if (vma->vm_start < start) {
  			ret = split_vma(mm, vma, start, 1);
  			if (ret)
  				break;
  		}
  		if (vma->vm_end > end) {
  			ret = split_vma(mm, vma, end, 0);
  			if (ret)
  				break;
  		}
  	next:
  		/*
  		 * In the vma_merge() successful mprotect-like case 8:
  		 * the next vma was merged into the current one and
  		 * the current one has not been updated yet.
  		 */
  		vma->vm_flags = new_flags;
  		vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
  
  	skip:
  		prev = vma;
  		start = vma->vm_end;
  		vma = vma->vm_next;
  	} while (vma && vma->vm_start < end);
  out_unlock:
  	up_write(&mm->mmap_sem);
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
990
  	mmput(mm);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
991
992
993
994
995
  out:
  	return ret;
  }
  
  /*
ba85c702e   Andrea Arcangeli   userfaultfd: wake...
996
997
   * userfaultfd_wake may be used in combination with the
   * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches.
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
   */
  static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
  			    unsigned long arg)
  {
  	int ret;
  	struct uffdio_range uffdio_wake;
  	struct userfaultfd_wake_range range;
  	const void __user *buf = (void __user *)arg;
  
  	ret = -EFAULT;
  	if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
  		goto out;
  
  	ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
  	if (ret)
  		goto out;
  
  	range.start = uffdio_wake.start;
  	range.len = uffdio_wake.len;
  
  	/*
  	 * len == 0 means wake all and we don't want to wake all here,
  	 * so check it again to be sure.
  	 */
  	VM_BUG_ON(!range.len);
  
  	wake_userfault(ctx, &range);
  	ret = 0;
  
  out:
  	return ret;
  }
ad465cae9   Andrea Arcangeli   userfaultfd: UFFD...
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
  static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
  			    unsigned long arg)
  {
  	__s64 ret;
  	struct uffdio_copy uffdio_copy;
  	struct uffdio_copy __user *user_uffdio_copy;
  	struct userfaultfd_wake_range range;
  
  	user_uffdio_copy = (struct uffdio_copy __user *) arg;
  
  	ret = -EFAULT;
  	if (copy_from_user(&uffdio_copy, user_uffdio_copy,
  			   /* don't copy "copy" last field */
  			   sizeof(uffdio_copy)-sizeof(__s64)))
  		goto out;
  
  	ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
  	if (ret)
  		goto out;
  	/*
  	 * double check for wraparound just in case. copy_from_user()
  	 * will later check uffdio_copy.src + uffdio_copy.len to fit
  	 * in the userland range.
  	 */
  	ret = -EINVAL;
  	if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src)
  		goto out;
  	if (uffdio_copy.mode & ~UFFDIO_COPY_MODE_DONTWAKE)
  		goto out;
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
1059
1060
1061
1062
1063
  	if (mmget_not_zero(ctx->mm)) {
  		ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
  				   uffdio_copy.len);
  		mmput(ctx->mm);
  	}
ad465cae9   Andrea Arcangeli   userfaultfd: UFFD...
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
  	if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
  		return -EFAULT;
  	if (ret < 0)
  		goto out;
  	BUG_ON(!ret);
  	/* len == 0 would wake all */
  	range.len = ret;
  	if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) {
  		range.start = uffdio_copy.dst;
  		wake_userfault(ctx, &range);
  	}
  	ret = range.len == uffdio_copy.len ? 0 : -EAGAIN;
  out:
  	return ret;
  }
  
  static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
  				unsigned long arg)
  {
  	__s64 ret;
  	struct uffdio_zeropage uffdio_zeropage;
  	struct uffdio_zeropage __user *user_uffdio_zeropage;
  	struct userfaultfd_wake_range range;
  
  	user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
  
  	ret = -EFAULT;
  	if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
  			   /* don't copy "zeropage" last field */
  			   sizeof(uffdio_zeropage)-sizeof(__s64)))
  		goto out;
  
  	ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
  			     uffdio_zeropage.range.len);
  	if (ret)
  		goto out;
  	ret = -EINVAL;
  	if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
  		goto out;
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
1103
1104
1105
1106
1107
  	if (mmget_not_zero(ctx->mm)) {
  		ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
  				     uffdio_zeropage.range.len);
  		mmput(ctx->mm);
  	}
ad465cae9   Andrea Arcangeli   userfaultfd: UFFD...
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
  	if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
  		return -EFAULT;
  	if (ret < 0)
  		goto out;
  	/* len == 0 would wake all */
  	BUG_ON(!ret);
  	range.len = ret;
  	if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) {
  		range.start = uffdio_zeropage.range.start;
  		wake_userfault(ctx, &range);
  	}
  	ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN;
  out:
  	return ret;
  }
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
  /*
   * userland asks for a certain API version and we return which bits
   * and ioctl commands are implemented in this kernel for such API
   * version or -EINVAL if unknown.
   */
  static int userfaultfd_api(struct userfaultfd_ctx *ctx,
  			   unsigned long arg)
  {
  	struct uffdio_api uffdio_api;
  	void __user *buf = (void __user *)arg;
  	int ret;
  
  	ret = -EINVAL;
  	if (ctx->state != UFFD_STATE_WAIT_API)
  		goto out;
  	ret = -EFAULT;
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
1139
  	if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1140
  		goto out;
a9b85f941   Andrea Arcangeli   userfaultfd: chan...
1141
  	if (uffdio_api.api != UFFD_API || uffdio_api.features) {
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1142
1143
1144
1145
1146
1147
  		memset(&uffdio_api, 0, sizeof(uffdio_api));
  		if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
  			goto out;
  		ret = -EINVAL;
  		goto out;
  	}
3f602d272   Pavel Emelyanov   userfaultfd: Rena...
1148
  	uffdio_api.features = UFFD_API_FEATURES;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
  	uffdio_api.ioctls = UFFD_API_IOCTLS;
  	ret = -EFAULT;
  	if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
  		goto out;
  	ctx->state = UFFD_STATE_RUNNING;
  	ret = 0;
  out:
  	return ret;
  }
  
  static long userfaultfd_ioctl(struct file *file, unsigned cmd,
  			      unsigned long arg)
  {
  	int ret = -EINVAL;
  	struct userfaultfd_ctx *ctx = file->private_data;
e6485a47b   Andrea Arcangeli   userfaultfd: requ...
1164
1165
  	if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API)
  		return -EINVAL;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
  	switch(cmd) {
  	case UFFDIO_API:
  		ret = userfaultfd_api(ctx, arg);
  		break;
  	case UFFDIO_REGISTER:
  		ret = userfaultfd_register(ctx, arg);
  		break;
  	case UFFDIO_UNREGISTER:
  		ret = userfaultfd_unregister(ctx, arg);
  		break;
  	case UFFDIO_WAKE:
  		ret = userfaultfd_wake(ctx, arg);
  		break;
ad465cae9   Andrea Arcangeli   userfaultfd: UFFD...
1179
1180
1181
1182
1183
1184
  	case UFFDIO_COPY:
  		ret = userfaultfd_copy(ctx, arg);
  		break;
  	case UFFDIO_ZEROPAGE:
  		ret = userfaultfd_zeropage(ctx, arg);
  		break;
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
  	}
  	return ret;
  }
  
  #ifdef CONFIG_PROC_FS
  static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
  {
  	struct userfaultfd_ctx *ctx = f->private_data;
  	wait_queue_t *wq;
  	struct userfaultfd_wait_queue *uwq;
  	unsigned long pending = 0, total = 0;
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
1196
1197
1198
1199
1200
1201
  	spin_lock(&ctx->fault_pending_wqh.lock);
  	list_for_each_entry(wq, &ctx->fault_pending_wqh.task_list, task_list) {
  		uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
  		pending++;
  		total++;
  	}
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1202
1203
  	list_for_each_entry(wq, &ctx->fault_wqh.task_list, task_list) {
  		uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1204
1205
  		total++;
  	}
15b726ef0   Andrea Arcangeli   userfaultfd: opti...
1206
  	spin_unlock(&ctx->fault_pending_wqh.lock);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
  
  	/*
  	 * If more protocols will be added, there will be all shown
  	 * separated by a space. Like this:
  	 *	protocols: aa:... bb:...
  	 */
  	seq_printf(m, "pending:\t%lu
  total:\t%lu
  API:\t%Lx:%x:%Lx
  ",
3f602d272   Pavel Emelyanov   userfaultfd: Rena...
1217
  		   pending, total, UFFD_API, UFFD_API_FEATURES,
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
  		   UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
  }
  #endif
  
  static const struct file_operations userfaultfd_fops = {
  #ifdef CONFIG_PROC_FS
  	.show_fdinfo	= userfaultfd_show_fdinfo,
  #endif
  	.release	= userfaultfd_release,
  	.poll		= userfaultfd_poll,
  	.read		= userfaultfd_read,
  	.unlocked_ioctl = userfaultfd_ioctl,
  	.compat_ioctl	= userfaultfd_ioctl,
  	.llseek		= noop_llseek,
  };
3004ec9ca   Andrea Arcangeli   userfaultfd: allo...
1233
1234
1235
1236
1237
1238
1239
  static void init_once_userfaultfd_ctx(void *mem)
  {
  	struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
  
  	init_waitqueue_head(&ctx->fault_pending_wqh);
  	init_waitqueue_head(&ctx->fault_wqh);
  	init_waitqueue_head(&ctx->fd_wqh);
2c5b7e1be   Andrea Arcangeli   userfaultfd: avoi...
1240
  	seqcount_init(&ctx->refile_seq);
3004ec9ca   Andrea Arcangeli   userfaultfd: allo...
1241
  }
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
  /**
   * userfaultfd_file_create - Creates an userfaultfd file pointer.
   * @flags: Flags for the userfaultfd file.
   *
   * This function creates an userfaultfd file pointer, w/out installing
   * it into the fd table. This is useful when the userfaultfd file is
   * used during the initialization of data structures that require
   * extra setup after the userfaultfd creation. So the userfaultfd
   * creation is split into the file pointer creation phase, and the
   * file descriptor installation phase.  In this way races with
   * userspace closing the newly installed file descriptor can be
   * avoided.  Returns an userfaultfd file pointer, or a proper error
   * pointer.
   */
  static struct file *userfaultfd_file_create(int flags)
  {
  	struct file *file;
  	struct userfaultfd_ctx *ctx;
  
  	BUG_ON(!current->mm);
  
  	/* Check the UFFD_* constants for consistency.  */
  	BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
  	BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
  
  	file = ERR_PTR(-EINVAL);
  	if (flags & ~UFFD_SHARED_FCNTL_FLAGS)
  		goto out;
  
  	file = ERR_PTR(-ENOMEM);
3004ec9ca   Andrea Arcangeli   userfaultfd: allo...
1272
  	ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1273
1274
1275
1276
  	if (!ctx)
  		goto out;
  
  	atomic_set(&ctx->refcount, 1);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1277
1278
1279
1280
1281
  	ctx->flags = flags;
  	ctx->state = UFFD_STATE_WAIT_API;
  	ctx->released = false;
  	ctx->mm = current->mm;
  	/* prevent the mm struct to be freed */
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
1282
  	atomic_inc(&ctx->mm->mm_count);
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1283
1284
1285
  
  	file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx,
  				  O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
c03e946fd   Eric Biggers   userfaultfd: add ...
1286
  	if (IS_ERR(file)) {
d2005e3f4   Oleg Nesterov   userfaultfd: don'...
1287
  		mmdrop(ctx->mm);
3004ec9ca   Andrea Arcangeli   userfaultfd: allo...
1288
  		kmem_cache_free(userfaultfd_ctx_cachep, ctx);
c03e946fd   Eric Biggers   userfaultfd: add ...
1289
  	}
86039bd3b   Andrea Arcangeli   userfaultfd: add ...
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
  out:
  	return file;
  }
  
  SYSCALL_DEFINE1(userfaultfd, int, flags)
  {
  	int fd, error;
  	struct file *file;
  
  	error = get_unused_fd_flags(flags & UFFD_SHARED_FCNTL_FLAGS);
  	if (error < 0)
  		return error;
  	fd = error;
  
  	file = userfaultfd_file_create(flags);
  	if (IS_ERR(file)) {
  		error = PTR_ERR(file);
  		goto err_put_unused_fd;
  	}
  	fd_install(fd, file);
  
  	return fd;
  
  err_put_unused_fd:
  	put_unused_fd(fd);
  
  	return error;
  }
3004ec9ca   Andrea Arcangeli   userfaultfd: allo...
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
  
  static int __init userfaultfd_init(void)
  {
  	userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache",
  						sizeof(struct userfaultfd_ctx),
  						0,
  						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
  						init_once_userfaultfd_ctx);
  	return 0;
  }
  __initcall(userfaultfd_init);