Blame view
kernel/futex.c
104 KB
1a59d1b8e
|
1 |
// SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4c
|
2 3 4 5 6 7 8 9 10 11 |
/* * Fast Userspace Mutexes (which I call "Futexes!"). * (C) Rusty Russell, IBM 2002 * * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar * (C) Copyright 2003 Red Hat Inc, All Rights Reserved * * Removed page pinning, fix privately mapped COW pages and other cleanups * (C) Copyright 2003, 2004 Jamie Lokier * |
0771dfefc
|
12 13 14 15 |
* Robust futex support started by Ingo Molnar * (C) Copyright 2006 Red Hat Inc, All Rights Reserved * Thanks to Thomas Gleixner for suggestions, analysis and fixes. * |
c87e2837b
|
16 17 18 19 |
* PI-futex support started by Ingo Molnar and Thomas Gleixner * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> * |
34f01cc1f
|
20 21 22 |
* PRIVATE futexes by Eric Dumazet * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com> * |
52400ba94
|
23 24 25 26 |
* Requeue-PI support by Darren Hart <dvhltc@us.ibm.com> * Copyright (C) IBM Corporation, 2009 * Thanks to Thomas Gleixner for conceptual design and careful reviews. * |
1da177e4c
|
27 28 29 30 31 32 |
* Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly * enough at me, Linus for the original (flawed) idea, Matthew * Kirkwood for proof-of-concept implementation. * * "The futexes are also cursed." * "But they come in a choice of three flavours!" |
1da177e4c
|
33 |
*/ |
04e7712f4
|
34 |
#include <linux/compat.h> |
1da177e4c
|
35 36 37 38 39 40 41 42 43 44 |
#include <linux/slab.h> #include <linux/poll.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/jhash.h> #include <linux/init.h> #include <linux/futex.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/syscalls.h> |
7ed20e1ad
|
45 |
#include <linux/signal.h> |
9984de1a5
|
46 |
#include <linux/export.h> |
fd5eea421
|
47 |
#include <linux/magic.h> |
b488893a3
|
48 49 |
#include <linux/pid.h> #include <linux/nsproxy.h> |
bdbb776f8
|
50 |
#include <linux/ptrace.h> |
8bd75c77b
|
51 |
#include <linux/sched/rt.h> |
84f001e15
|
52 |
#include <linux/sched/wake_q.h> |
6e84f3152
|
53 |
#include <linux/sched/mm.h> |
13d60f4b6
|
54 |
#include <linux/hugetlb.h> |
88c8004fd
|
55 |
#include <linux/freezer.h> |
57c8a661d
|
56 |
#include <linux/memblock.h> |
ab51fbab3
|
57 |
#include <linux/fault-inject.h> |
49262de22
|
58 |
#include <linux/refcount.h> |
b488893a3
|
59 |
|
4732efbeb
|
60 |
#include <asm/futex.h> |
1da177e4c
|
61 |
|
1696a8bee
|
62 |
#include "locking/rtmutex_common.h" |
c87e2837b
|
63 |
|
99b60ce69
|
64 |
/* |
d7e8af1af
|
65 66 67 68 |
* READ this before attempting to hack on futexes! * * Basic futex operation and ordering guarantees * ============================================= |
99b60ce69
|
69 70 71 72 |
* * The waiter reads the futex value in user space and calls * futex_wait(). This function computes the hash bucket and acquires * the hash bucket lock. After that it reads the futex user space value |
b0c29f79e
|
73 74 75 |
* again and verifies that the data has not changed. If it has not changed * it enqueues itself into the hash bucket, releases the hash bucket lock * and schedules. |
99b60ce69
|
76 77 |
* * The waker side modifies the user space value of the futex and calls |
b0c29f79e
|
78 79 80 |
* futex_wake(). This function computes the hash bucket and acquires the * hash bucket lock. Then it looks for waiters on that futex in the hash * bucket and wakes them. |
99b60ce69
|
81 |
* |
b0c29f79e
|
82 83 84 85 86 |
* In futex wake up scenarios where no tasks are blocked on a futex, taking * the hb spinlock can be avoided and simply return. In order for this * optimization to work, ordering guarantees must exist so that the waiter * being added to the list is acknowledged when the list is concurrently being * checked by the waker, avoiding scenarios like the following: |
99b60ce69
|
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
* * CPU 0 CPU 1 * val = *futex; * sys_futex(WAIT, futex, val); * futex_wait(futex, val); * uval = *futex; * *futex = newval; * sys_futex(WAKE, futex); * futex_wake(futex); * if (queue_empty()) * return; * if (uval == val) * lock(hash_bucket(futex)); * queue(); * unlock(hash_bucket(futex)); * schedule(); * * This would cause the waiter on CPU 0 to wait forever because it * missed the transition of the user space value from val to newval * and the waker did not find the waiter in the hash bucket queue. |
99b60ce69
|
107 |
* |
b0c29f79e
|
108 109 110 111 112 |
* The correct serialization ensures that a waiter either observes * the changed user space value before blocking or is woken by a * concurrent waker: * * CPU 0 CPU 1 |
99b60ce69
|
113 114 115 |
* val = *futex; * sys_futex(WAIT, futex, val); * futex_wait(futex, val); |
b0c29f79e
|
116 |
* |
d7e8af1af
|
117 |
* waiters++; (a) |
8ad7b378d
|
118 119 120 121 122 123 124 125 126 127 |
* smp_mb(); (A) <-- paired with -. * | * lock(hash_bucket(futex)); | * | * uval = *futex; | * | *futex = newval; * | sys_futex(WAKE, futex); * | futex_wake(futex); * | * `--------> smp_mb(); (B) |
99b60ce69
|
128 |
* if (uval == val) |
b0c29f79e
|
129 |
* queue(); |
99b60ce69
|
130 |
* unlock(hash_bucket(futex)); |
b0c29f79e
|
131 132 |
* schedule(); if (waiters) * lock(hash_bucket(futex)); |
d7e8af1af
|
133 134 |
* else wake_waiters(futex); * waiters--; (b) unlock(hash_bucket(futex)); |
b0c29f79e
|
135 |
* |
d7e8af1af
|
136 137 |
* Where (A) orders the waiters increment and the futex value read through * atomic operations (see hb_waiters_inc) and where (B) orders the write |
993b2ff22
|
138 139 |
* to futex and the waiters read -- this is done by the barriers for both * shared and private futexes in get_futex_key_refs(). |
b0c29f79e
|
140 141 142 143 144 145 146 147 148 149 150 151 |
* * This yields the following case (where X:=waiters, Y:=futex): * * X = Y = 0 * * w[X]=1 w[Y]=1 * MB MB * r[Y]=y r[X]=x * * Which guarantees that x==0 && y==0 is impossible; which translates back into * the guarantee that we cannot both miss the futex variable change and the * enqueue. |
d7e8af1af
|
152 153 154 155 156 157 158 159 160 161 162 |
* * Note that a new waiter is accounted for in (a) even when it is possible that * the wait call can return error, in which case we backtrack from it in (b). * Refer to the comment in queue_lock(). * * Similarly, in order to account for waiters being requeued on another * address we always increment the waiters for the destination bucket before * acquiring the lock. It then decrements them again after releasing it - * the code that actually moves the futex(es) between hash buckets (requeue_futex) * will do the additional required waiter count housekeeping. This is done for * double_lock_hb() and double_unlock_hb(), respectively. |
99b60ce69
|
163 |
*/ |
04e7712f4
|
164 165 166 167 |
#ifdef CONFIG_HAVE_FUTEX_CMPXCHG #define futex_cmpxchg_enabled 1 #else static int __read_mostly futex_cmpxchg_enabled; |
03b8c7b62
|
168 |
#endif |
a0c1e9073
|
169 |
|
1da177e4c
|
170 |
/* |
b41277dc7
|
171 172 173 |
* Futex flags used to encode options to functions and preserve them across * restarts. */ |
784bdf3bb
|
174 175 176 177 178 179 180 181 182 |
#ifdef CONFIG_MMU # define FLAGS_SHARED 0x01 #else /* * NOMMU does not have per process address space. Let the compiler optimize * code away. */ # define FLAGS_SHARED 0x00 #endif |
b41277dc7
|
183 184 185 186 |
#define FLAGS_CLOCKRT 0x02 #define FLAGS_HAS_TIMEOUT 0x04 /* |
c87e2837b
|
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 |
* Priority Inheritance state: */ struct futex_pi_state { /* * list of 'owned' pi_state instances - these have to be * cleaned up in do_exit() if the task exits prematurely: */ struct list_head list; /* * The PI object: */ struct rt_mutex pi_mutex; struct task_struct *owner; |
49262de22
|
202 |
refcount_t refcount; |
c87e2837b
|
203 204 |
union futex_key key; |
3859a271a
|
205 |
} __randomize_layout; |
c87e2837b
|
206 |
|
d8d88fbb1
|
207 208 |
/** * struct futex_q - The hashed futex queue entry, one per waiting task |
fb62db2ba
|
209 |
* @list: priority-sorted list of tasks waiting on this futex |
d8d88fbb1
|
210 211 212 213 214 215 216 217 |
* @task: the task waiting on the futex * @lock_ptr: the hash bucket lock * @key: the key the futex is hashed on * @pi_state: optional priority inheritance state * @rt_waiter: rt_waiter storage for use with requeue_pi * @requeue_pi_key: the requeue_pi target futex key * @bitset: bitset for the optional bitmasked wakeup * |
ac6424b98
|
218 |
* We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so |
1da177e4c
|
219 220 221 |
* we can wake only the relevant ones (hashed queues may be shared). * * A futex_q has a woken state, just like tasks have TASK_RUNNING. |
ec92d0829
|
222 |
* It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. |
fb62db2ba
|
223 |
* The order of wakeup is always to make the first condition true, then |
d8d88fbb1
|
224 225 226 227 |
* the second. * * PI futexes are typically woken before they are removed from the hash list via * the rt_mutex code. See unqueue_me_pi(). |
1da177e4c
|
228 229 |
*/ struct futex_q { |
ec92d0829
|
230 |
struct plist_node list; |
1da177e4c
|
231 |
|
d8d88fbb1
|
232 |
struct task_struct *task; |
1da177e4c
|
233 |
spinlock_t *lock_ptr; |
1da177e4c
|
234 |
union futex_key key; |
c87e2837b
|
235 |
struct futex_pi_state *pi_state; |
52400ba94
|
236 |
struct rt_mutex_waiter *rt_waiter; |
84bc4af59
|
237 |
union futex_key *requeue_pi_key; |
cd689985c
|
238 |
u32 bitset; |
3859a271a
|
239 |
} __randomize_layout; |
1da177e4c
|
240 |
|
5bdb05f91
|
241 242 243 244 245 |
static const struct futex_q futex_q_init = { /* list gets initialized in queue_me()*/ .key = FUTEX_KEY_INIT, .bitset = FUTEX_BITSET_MATCH_ANY }; |
1da177e4c
|
246 |
/* |
b2d0994b1
|
247 248 249 |
* Hash buckets are shared by all the futex_keys that hash to the same * location. Each key may have multiple futex_q structures, one for each task * waiting on a futex. |
1da177e4c
|
250 251 |
*/ struct futex_hash_bucket { |
11d4616bd
|
252 |
atomic_t waiters; |
ec92d0829
|
253 254 |
spinlock_t lock; struct plist_head chain; |
a52b89ebb
|
255 |
} ____cacheline_aligned_in_smp; |
1da177e4c
|
256 |
|
ac742d371
|
257 258 259 260 261 262 263 264 265 266 267 |
/* * The base of the bucket array and its size are always used together * (after initialization only in hash_futex()), so ensure that they * reside in the same cacheline. */ static struct { struct futex_hash_bucket *queues; unsigned long hashsize; } __futex_data __read_mostly __aligned(2*sizeof(long)); #define futex_queues (__futex_data.queues) #define futex_hashsize (__futex_data.hashsize) |
a52b89ebb
|
268 |
|
1da177e4c
|
269 |
|
ab51fbab3
|
270 271 272 273 274 275 276 |
/* * Fault injections for futexes. */ #ifdef CONFIG_FAIL_FUTEX static struct { struct fault_attr attr; |
621a5f7ad
|
277 |
bool ignore_private; |
ab51fbab3
|
278 279 |
} fail_futex = { .attr = FAULT_ATTR_INITIALIZER, |
621a5f7ad
|
280 |
.ignore_private = false, |
ab51fbab3
|
281 282 283 284 285 286 287 |
}; static int __init setup_fail_futex(char *str) { return setup_fault_attr(&fail_futex.attr, str); } __setup("fail_futex=", setup_fail_futex); |
5d285a7f3
|
288 |
static bool should_fail_futex(bool fshared) |
ab51fbab3
|
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 |
{ if (fail_futex.ignore_private && !fshared) return false; return should_fail(&fail_futex.attr, 1); } #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS static int __init fail_futex_debugfs(void) { umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; struct dentry *dir; dir = fault_create_debugfs_attr("fail_futex", NULL, &fail_futex.attr); if (IS_ERR(dir)) return PTR_ERR(dir); |
0365aeba5
|
307 308 |
debugfs_create_bool("ignore-private", mode, dir, &fail_futex.ignore_private); |
ab51fbab3
|
309 310 311 312 313 314 315 316 317 318 319 320 321 |
return 0; } late_initcall(fail_futex_debugfs); #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ #else static inline bool should_fail_futex(bool fshared) { return false; } #endif /* CONFIG_FAIL_FUTEX */ |
b0c29f79e
|
322 323 |
static inline void futex_get_mm(union futex_key *key) { |
f1f100764
|
324 |
mmgrab(key->private.mm); |
b0c29f79e
|
325 326 327 |
/* * Ensure futex_get_mm() implies a full barrier such that * get_futex_key() implies a full barrier. This is relied upon |
8ad7b378d
|
328 |
* as smp_mb(); (B), see the ordering comment above. |
b0c29f79e
|
329 |
*/ |
4e857c58e
|
330 |
smp_mb__after_atomic(); |
b0c29f79e
|
331 |
} |
11d4616bd
|
332 333 334 335 |
/* * Reflects a new waiter being added to the waitqueue. */ static inline void hb_waiters_inc(struct futex_hash_bucket *hb) |
b0c29f79e
|
336 337 |
{ #ifdef CONFIG_SMP |
11d4616bd
|
338 |
atomic_inc(&hb->waiters); |
b0c29f79e
|
339 |
/* |
11d4616bd
|
340 |
* Full barrier (A), see the ordering comment above. |
b0c29f79e
|
341 |
*/ |
4e857c58e
|
342 |
smp_mb__after_atomic(); |
11d4616bd
|
343 344 345 346 347 348 349 350 351 352 353 354 355 |
#endif } /* * Reflects a waiter being removed from the waitqueue by wakeup * paths. */ static inline void hb_waiters_dec(struct futex_hash_bucket *hb) { #ifdef CONFIG_SMP atomic_dec(&hb->waiters); #endif } |
b0c29f79e
|
356 |
|
11d4616bd
|
357 358 359 360 |
static inline int hb_waiters_pending(struct futex_hash_bucket *hb) { #ifdef CONFIG_SMP return atomic_read(&hb->waiters); |
b0c29f79e
|
361 |
#else |
11d4616bd
|
362 |
return 1; |
b0c29f79e
|
363 364 |
#endif } |
e8b61b3f2
|
365 366 367 368 369 370 |
/** * hash_futex - Return the hash bucket in the global hash * @key: Pointer to the futex key for which the hash is calculated * * We hash on the keys returned from get_futex_key (see below) and return the * corresponding hash bucket in the global hash. |
1da177e4c
|
371 372 373 374 375 376 |
*/ static struct futex_hash_bucket *hash_futex(union futex_key *key) { u32 hash = jhash2((u32*)&key->both.word, (sizeof(key->both.word)+sizeof(key->both.ptr))/4, key->both.offset); |
a52b89ebb
|
377 |
return &futex_queues[hash & (futex_hashsize - 1)]; |
1da177e4c
|
378 |
} |
e8b61b3f2
|
379 380 381 382 383 384 |
/** * match_futex - Check whether two futex keys are equal * @key1: Pointer to key1 * @key2: Pointer to key2 * |
1da177e4c
|
385 386 387 388 |
* Return 1 if two futex_keys are equal, 0 otherwise. */ static inline int match_futex(union futex_key *key1, union futex_key *key2) { |
2bc872036
|
389 390 |
return (key1 && key2 && key1->both.word == key2->both.word |
1da177e4c
|
391 392 393 |
&& key1->both.ptr == key2->both.ptr && key1->both.offset == key2->both.offset); } |
38d47c1b7
|
394 395 396 397 398 399 400 401 402 |
/* * Take a reference to the resource addressed by a key. * Can be called while holding spinlocks. * */ static void get_futex_key_refs(union futex_key *key) { if (!key->both.ptr) return; |
784bdf3bb
|
403 404 405 406 407 408 409 410 411 |
/* * On MMU less systems futexes are always "private" as there is no per * process address space. We need the smp wmb nevertheless - yes, * arch/blackfin has MMU less SMP ... */ if (!IS_ENABLED(CONFIG_MMU)) { smp_mb(); /* explicit smp_mb(); (B) */ return; } |
38d47c1b7
|
412 413 |
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { case FUT_OFF_INODE: |
8ad7b378d
|
414 |
ihold(key->shared.inode); /* implies smp_mb(); (B) */ |
38d47c1b7
|
415 416 |
break; case FUT_OFF_MMSHARED: |
8ad7b378d
|
417 |
futex_get_mm(key); /* implies smp_mb(); (B) */ |
38d47c1b7
|
418 |
break; |
76835b0eb
|
419 |
default: |
993b2ff22
|
420 421 422 423 424 |
/* * Private futexes do not hold reference on an inode or * mm, therefore the only purpose of calling get_futex_key_refs * is because we need the barrier for the lockless waiter check. */ |
8ad7b378d
|
425 |
smp_mb(); /* explicit smp_mb(); (B) */ |
38d47c1b7
|
426 427 428 429 430 |
} } /* * Drop a reference to the resource addressed by a key. |
993b2ff22
|
431 432 433 |
* The hash bucket spinlock must not be held. This is * a no-op for private futexes, see comment in the get * counterpart. |
38d47c1b7
|
434 435 436 |
*/ static void drop_futex_key_refs(union futex_key *key) { |
90621c40c
|
437 438 439 |
if (!key->both.ptr) { /* If we're here then we tried to put a key we failed to get */ WARN_ON_ONCE(1); |
38d47c1b7
|
440 |
return; |
90621c40c
|
441 |
} |
38d47c1b7
|
442 |
|
784bdf3bb
|
443 444 |
if (!IS_ENABLED(CONFIG_MMU)) return; |
38d47c1b7
|
445 446 447 448 449 450 451 452 453 |
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { case FUT_OFF_INODE: iput(key->shared.inode); break; case FUT_OFF_MMSHARED: mmdrop(key->private.mm); break; } } |
96d4f267e
|
454 455 456 457 |
enum futex_access { FUTEX_READ, FUTEX_WRITE }; |
34f01cc1f
|
458 |
/** |
5ca584d93
|
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 |
* futex_setup_timer - set up the sleeping hrtimer. * @time: ptr to the given timeout value * @timeout: the hrtimer_sleeper structure to be set up * @flags: futex flags * @range_ns: optional range in ns * * Return: Initialized hrtimer_sleeper structure or NULL if no timeout * value given */ static inline struct hrtimer_sleeper * futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout, int flags, u64 range_ns) { if (!time) return NULL; hrtimer_init_on_stack(&timeout->timer, (flags & FLAGS_CLOCKRT) ? CLOCK_REALTIME : CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_init_sleeper(timeout, current); /* * If range_ns is 0, calling hrtimer_set_expires_range_ns() is * effectively the same as calling hrtimer_set_expires(). */ hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns); return timeout; } /** |
d96ee56ce
|
490 491 492 493 |
* get_futex_key() - Get parameters which are the keys for a futex * @uaddr: virtual address of the futex * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED * @key: address where result is stored. |
96d4f267e
|
494 495 |
* @rw: mapping needs to be read/write (values: FUTEX_READ, * FUTEX_WRITE) |
34f01cc1f
|
496 |
* |
6c23cbbd5
|
497 498 |
* Return: a negative error code or 0 * |
7b4ff1adb
|
499 |
* The key words are stored in @key on success. |
1da177e4c
|
500 |
* |
6131ffaa1
|
501 |
* For shared mappings, it's (page->index, file_inode(vma->vm_file), |
1da177e4c
|
502 503 504 |
* offset_within_page). For private mappings, it's (uaddr, current->mm). * We can usually work out the index without swapping in the page. * |
b2d0994b1
|
505 |
* lock_page() might sleep, the caller should not hold a spinlock. |
1da177e4c
|
506 |
*/ |
64d1304a6
|
507 |
static int |
96d4f267e
|
508 |
get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, enum futex_access rw) |
1da177e4c
|
509 |
{ |
e2970f2fb
|
510 |
unsigned long address = (unsigned long)uaddr; |
1da177e4c
|
511 |
struct mm_struct *mm = current->mm; |
077fa7aed
|
512 |
struct page *page, *tail; |
14d27abd1
|
513 |
struct address_space *mapping; |
9ea71503a
|
514 |
int err, ro = 0; |
1da177e4c
|
515 516 517 518 |
/* * The futex address must be "naturally" aligned. */ |
e2970f2fb
|
519 |
key->both.offset = address % PAGE_SIZE; |
34f01cc1f
|
520 |
if (unlikely((address % sizeof(u32)) != 0)) |
1da177e4c
|
521 |
return -EINVAL; |
e2970f2fb
|
522 |
address -= key->both.offset; |
1da177e4c
|
523 |
|
96d4f267e
|
524 |
if (unlikely(!access_ok(uaddr, sizeof(u32)))) |
5cdec2d83
|
525 |
return -EFAULT; |
ab51fbab3
|
526 527 |
if (unlikely(should_fail_futex(fshared))) return -EFAULT; |
1da177e4c
|
528 |
/* |
34f01cc1f
|
529 530 531 532 533 534 535 |
* PROCESS_PRIVATE futexes are fast. * As the mm cannot disappear under us and the 'key' only needs * virtual address, we dont even have to find the underlying vma. * Note : We do have to check 'uaddr' is a valid user address, * but access_ok() should be faster than find_vma() */ if (!fshared) { |
34f01cc1f
|
536 537 |
key->private.mm = mm; key->private.address = address; |
8ad7b378d
|
538 |
get_futex_key_refs(key); /* implies smp_mb(); (B) */ |
34f01cc1f
|
539 540 |
return 0; } |
1da177e4c
|
541 |
|
38d47c1b7
|
542 |
again: |
ab51fbab3
|
543 544 545 |
/* Ignore any VERIFY_READ mapping (futex common case) */ if (unlikely(should_fail_futex(fshared))) return -EFAULT; |
73b0140bf
|
546 |
err = get_user_pages_fast(address, 1, FOLL_WRITE, &page); |
9ea71503a
|
547 548 549 550 |
/* * If write access is not required (eg. FUTEX_WAIT), try * and get read-only access. */ |
96d4f267e
|
551 |
if (err == -EFAULT && rw == FUTEX_READ) { |
9ea71503a
|
552 553 554 |
err = get_user_pages_fast(address, 1, 0, &page); ro = 1; } |
38d47c1b7
|
555 556 |
if (err < 0) return err; |
9ea71503a
|
557 558 |
else err = 0; |
38d47c1b7
|
559 |
|
65d8fc777
|
560 561 562 563 564 565 566 567 568 569 |
/* * The treatment of mapping from this point on is critical. The page * lock protects many things but in this context the page lock * stabilizes mapping, prevents inode freeing in the shared * file-backed region case and guards against movement to swap cache. * * Strictly speaking the page lock is not needed in all cases being * considered here and page lock forces unnecessarily serialization * From this point on, mapping will be re-verified if necessary and * page lock will be acquired only if it is unavoidable |
077fa7aed
|
570 571 572 573 574 575 576 |
* * Mapping checks require the head page for any compound page so the * head page and mapping is looked up now. For anonymous pages, it * does not matter if the page splits in the future as the key is * based on the address. For filesystem-backed pages, the tail is * required as the index of the page determines the key. For * base pages, there is no tail page and tail == page. |
65d8fc777
|
577 |
*/ |
077fa7aed
|
578 |
tail = page; |
65d8fc777
|
579 580 |
page = compound_head(page); mapping = READ_ONCE(page->mapping); |
e6780f724
|
581 |
/* |
14d27abd1
|
582 |
* If page->mapping is NULL, then it cannot be a PageAnon |
e6780f724
|
583 584 585 586 587 588 589 590 591 592 593 |
* page; but it might be the ZERO_PAGE or in the gate area or * in a special mapping (all cases which we are happy to fail); * or it may have been a good file page when get_user_pages_fast * found it, but truncated or holepunched or subjected to * invalidate_complete_page2 before we got the page lock (also * cases which we are happy to fail). And we hold a reference, * so refcount care in invalidate_complete_page's remove_mapping * prevents drop_caches from setting mapping to NULL beneath us. * * The case we do have to guard against is when memory pressure made * shmem_writepage move it from filecache to swapcache beneath us: |
14d27abd1
|
594 |
* an unlikely race, but we do need to retry for page->mapping. |
e6780f724
|
595 |
*/ |
65d8fc777
|
596 597 598 599 600 601 602 603 604 605 |
if (unlikely(!mapping)) { int shmem_swizzled; /* * Page lock is required to identify which special case above * applies. If this is really a shmem page then the page lock * will prevent unexpected transitions. */ lock_page(page); shmem_swizzled = PageSwapCache(page) || page->mapping; |
14d27abd1
|
606 607 |
unlock_page(page); put_page(page); |
65d8fc777
|
608 |
|
e6780f724
|
609 610 |
if (shmem_swizzled) goto again; |
65d8fc777
|
611 |
|
e6780f724
|
612 |
return -EFAULT; |
38d47c1b7
|
613 |
} |
1da177e4c
|
614 615 616 617 |
/* * Private mappings are handled in a simple way. * |
65d8fc777
|
618 619 620 |
* If the futex key is stored on an anonymous page, then the associated * object is the mm which is implicitly pinned by the calling process. * |
1da177e4c
|
621 622 |
* NOTE: When userspace waits on a MAP_SHARED mapping, even if * it's a read-only handle, it's expected that futexes attach to |
38d47c1b7
|
623 |
* the object not the particular process. |
1da177e4c
|
624 |
*/ |
14d27abd1
|
625 |
if (PageAnon(page)) { |
9ea71503a
|
626 627 628 629 |
/* * A RO anonymous page will never change and thus doesn't make * sense for futex operations. */ |
ab51fbab3
|
630 |
if (unlikely(should_fail_futex(fshared)) || ro) { |
9ea71503a
|
631 632 633 |
err = -EFAULT; goto out; } |
38d47c1b7
|
634 |
key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ |
1da177e4c
|
635 |
key->private.mm = mm; |
e2970f2fb
|
636 |
key->private.address = address; |
65d8fc777
|
637 638 |
get_futex_key_refs(key); /* implies smp_mb(); (B) */ |
38d47c1b7
|
639 |
} else { |
65d8fc777
|
640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 |
struct inode *inode; /* * The associated futex object in this case is the inode and * the page->mapping must be traversed. Ordinarily this should * be stabilised under page lock but it's not strictly * necessary in this case as we just want to pin the inode, not * update the radix tree or anything like that. * * The RCU read lock is taken as the inode is finally freed * under RCU. If the mapping still matches expectations then the * mapping->host can be safely accessed as being a valid inode. */ rcu_read_lock(); if (READ_ONCE(page->mapping) != mapping) { rcu_read_unlock(); put_page(page); goto again; } inode = READ_ONCE(mapping->host); if (!inode) { rcu_read_unlock(); put_page(page); goto again; } /* * Take a reference unless it is about to be freed. Previously * this reference was taken by ihold under the page lock * pinning the inode in place so i_lock was unnecessary. The * only way for this check to fail is if the inode was |
48fb6f4db
|
675 676 |
* truncated in parallel which is almost certainly an * application bug. In such a case, just retry. |
65d8fc777
|
677 678 679 680 681 |
* * We are not calling into get_futex_key_refs() in file-backed * cases, therefore a successful atomic_inc return below will * guarantee that get_futex_key() will still imply smp_mb(); (B). */ |
48fb6f4db
|
682 |
if (!atomic_inc_not_zero(&inode->i_count)) { |
65d8fc777
|
683 684 685 686 687 688 689 690 691 692 693 694 695 696 |
rcu_read_unlock(); put_page(page); goto again; } /* Should be impossible but lets be paranoid for now */ if (WARN_ON_ONCE(inode->i_mapping != mapping)) { err = -EFAULT; rcu_read_unlock(); iput(inode); goto out; } |
38d47c1b7
|
697 |
key->both.offset |= FUT_OFF_INODE; /* inode-based key */ |
65d8fc777
|
698 |
key->shared.inode = inode; |
077fa7aed
|
699 |
key->shared.pgoff = basepage_index(tail); |
65d8fc777
|
700 |
rcu_read_unlock(); |
1da177e4c
|
701 |
} |
9ea71503a
|
702 |
out: |
14d27abd1
|
703 |
put_page(page); |
9ea71503a
|
704 |
return err; |
1da177e4c
|
705 |
} |
ae791a2d2
|
706 |
static inline void put_futex_key(union futex_key *key) |
1da177e4c
|
707 |
{ |
38d47c1b7
|
708 |
drop_futex_key_refs(key); |
1da177e4c
|
709 |
} |
d96ee56ce
|
710 711 |
/** * fault_in_user_writeable() - Fault in user address and verify RW access |
d0725992c
|
712 713 714 715 716 |
* @uaddr: pointer to faulting user space address * * Slow path to fixup the fault we just took in the atomic write * access to @uaddr. * |
fb62db2ba
|
717 |
* We have no generic implementation of a non-destructive write to the |
d0725992c
|
718 719 720 721 722 723 |
* user address. We know that we faulted in the atomic pagefault * disabled section so we can as well avoid the #PF overhead by * calling get_user_pages() right away. */ static int fault_in_user_writeable(u32 __user *uaddr) { |
722d01723
|
724 725 726 727 |
struct mm_struct *mm = current->mm; int ret; down_read(&mm->mmap_sem); |
2efaca927
|
728 |
ret = fixup_user_fault(current, mm, (unsigned long)uaddr, |
4a9e1cda2
|
729 |
FAULT_FLAG_WRITE, NULL); |
722d01723
|
730 |
up_read(&mm->mmap_sem); |
d0725992c
|
731 732 |
return ret < 0 ? ret : 0; } |
4b1c486b3
|
733 734 |
/** * futex_top_waiter() - Return the highest priority waiter on a futex |
d96ee56ce
|
735 736 |
* @hb: the hash bucket the futex_q's reside in * @key: the futex key (to distinguish it from other futex futex_q's) |
4b1c486b3
|
737 738 739 740 741 742 743 744 745 746 747 748 749 750 |
* * Must be called with the hb lock held. */ static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key) { struct futex_q *this; plist_for_each_entry(this, &hb->chain, list) { if (match_futex(&this->key, key)) return this; } return NULL; } |
37a9d912b
|
751 752 |
static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr, u32 uval, u32 newval) |
36cf3b5c3
|
753 |
{ |
37a9d912b
|
754 |
int ret; |
36cf3b5c3
|
755 756 |
pagefault_disable(); |
37a9d912b
|
757 |
ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval); |
36cf3b5c3
|
758 |
pagefault_enable(); |
37a9d912b
|
759 |
return ret; |
36cf3b5c3
|
760 761 762 |
} static int get_futex_value_locked(u32 *dest, u32 __user *from) |
1da177e4c
|
763 764 |
{ int ret; |
a866374ae
|
765 |
pagefault_disable(); |
bd28b1459
|
766 |
ret = __get_user(*dest, from); |
a866374ae
|
767 |
pagefault_enable(); |
1da177e4c
|
768 769 770 |
return ret ? -EFAULT : 0; } |
c87e2837b
|
771 772 773 774 775 776 777 778 779 780 |
/* * PI code: */ static int refill_pi_state_cache(void) { struct futex_pi_state *pi_state; if (likely(current->pi_state_cache)) return 0; |
4668edc33
|
781 |
pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL); |
c87e2837b
|
782 783 784 |
if (!pi_state) return -ENOMEM; |
c87e2837b
|
785 786 787 |
INIT_LIST_HEAD(&pi_state->list); /* pi_mutex gets initialized later */ pi_state->owner = NULL; |
49262de22
|
788 |
refcount_set(&pi_state->refcount, 1); |
38d47c1b7
|
789 |
pi_state->key = FUTEX_KEY_INIT; |
c87e2837b
|
790 791 792 793 794 |
current->pi_state_cache = pi_state; return 0; } |
bf92cf3a5
|
795 |
static struct futex_pi_state *alloc_pi_state(void) |
c87e2837b
|
796 797 798 799 800 801 802 803 |
{ struct futex_pi_state *pi_state = current->pi_state_cache; WARN_ON(!pi_state); current->pi_state_cache = NULL; return pi_state; } |
bf92cf3a5
|
804 805 |
static void get_pi_state(struct futex_pi_state *pi_state) { |
49262de22
|
806 |
WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount)); |
bf92cf3a5
|
807 |
} |
30a6b8031
|
808 |
/* |
29e9ee5d4
|
809 810 |
* Drops a reference to the pi_state object and frees or caches it * when the last reference is gone. |
30a6b8031
|
811 |
*/ |
29e9ee5d4
|
812 |
static void put_pi_state(struct futex_pi_state *pi_state) |
c87e2837b
|
813 |
{ |
30a6b8031
|
814 815 |
if (!pi_state) return; |
49262de22
|
816 |
if (!refcount_dec_and_test(&pi_state->refcount)) |
c87e2837b
|
817 818 819 820 821 822 823 |
return; /* * If pi_state->owner is NULL, the owner is most probably dying * and has cleaned up the pi_state already */ if (pi_state->owner) { |
c74aef2d0
|
824 |
struct task_struct *owner; |
c87e2837b
|
825 |
|
c74aef2d0
|
826 827 828 829 830 831 832 833 834 |
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); owner = pi_state->owner; if (owner) { raw_spin_lock(&owner->pi_lock); list_del_init(&pi_state->list); raw_spin_unlock(&owner->pi_lock); } rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner); raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
c87e2837b
|
835 |
} |
c74aef2d0
|
836 |
if (current->pi_state_cache) { |
c87e2837b
|
837 |
kfree(pi_state); |
c74aef2d0
|
838 |
} else { |
c87e2837b
|
839 840 841 842 843 844 |
/* * pi_state->list is already empty. * clear pi_state->owner. * refcount is at 0 - put it back to 1. */ pi_state->owner = NULL; |
49262de22
|
845 |
refcount_set(&pi_state->refcount, 1); |
c87e2837b
|
846 847 848 |
current->pi_state_cache = pi_state; } } |
bc2eecd7e
|
849 |
#ifdef CONFIG_FUTEX_PI |
c87e2837b
|
850 851 852 853 854 855 856 |
/* * This task is holding PI mutexes at exit time => bad. * Kernel cleans up PI-state, but userspace is likely hosed. * (Robust-futex cleanup is separate and might save the day for userspace.) */ void exit_pi_state_list(struct task_struct *curr) { |
c87e2837b
|
857 858 |
struct list_head *next, *head = &curr->pi_state_list; struct futex_pi_state *pi_state; |
627371d73
|
859 |
struct futex_hash_bucket *hb; |
38d47c1b7
|
860 |
union futex_key key = FUTEX_KEY_INIT; |
c87e2837b
|
861 |
|
a0c1e9073
|
862 863 |
if (!futex_cmpxchg_enabled) return; |
c87e2837b
|
864 865 866 |
/* * We are a ZOMBIE and nobody can enqueue itself on * pi_state_list anymore, but we have to be careful |
627371d73
|
867 |
* versus waiters unqueueing themselves: |
c87e2837b
|
868 |
*/ |
1d6154825
|
869 |
raw_spin_lock_irq(&curr->pi_lock); |
c87e2837b
|
870 |
while (!list_empty(head)) { |
c87e2837b
|
871 872 873 |
next = head->next; pi_state = list_entry(next, struct futex_pi_state, list); key = pi_state->key; |
627371d73
|
874 |
hb = hash_futex(&key); |
153fbd122
|
875 876 877 878 879 880 881 882 883 884 885 |
/* * We can race against put_pi_state() removing itself from the * list (a waiter going away). put_pi_state() will first * decrement the reference count and then modify the list, so * its possible to see the list entry but fail this reference * acquire. * * In that case; drop the locks to let put_pi_state() make * progress and retry the loop. */ |
49262de22
|
886 |
if (!refcount_inc_not_zero(&pi_state->refcount)) { |
153fbd122
|
887 888 889 890 891 |
raw_spin_unlock_irq(&curr->pi_lock); cpu_relax(); raw_spin_lock_irq(&curr->pi_lock); continue; } |
1d6154825
|
892 |
raw_spin_unlock_irq(&curr->pi_lock); |
c87e2837b
|
893 |
|
c87e2837b
|
894 |
spin_lock(&hb->lock); |
c74aef2d0
|
895 896 |
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); raw_spin_lock(&curr->pi_lock); |
627371d73
|
897 898 899 900 |
/* * We dropped the pi-lock, so re-check whether this * task still owns the PI-state: */ |
c87e2837b
|
901 |
if (head->next != next) { |
153fbd122
|
902 |
/* retain curr->pi_lock for the loop invariant */ |
c74aef2d0
|
903 |
raw_spin_unlock(&pi_state->pi_mutex.wait_lock); |
c87e2837b
|
904 |
spin_unlock(&hb->lock); |
153fbd122
|
905 |
put_pi_state(pi_state); |
c87e2837b
|
906 907 |
continue; } |
c87e2837b
|
908 |
WARN_ON(pi_state->owner != curr); |
627371d73
|
909 910 |
WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); |
c87e2837b
|
911 |
pi_state->owner = NULL; |
c87e2837b
|
912 |
|
153fbd122
|
913 |
raw_spin_unlock(&curr->pi_lock); |
c74aef2d0
|
914 |
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
c87e2837b
|
915 |
spin_unlock(&hb->lock); |
16ffa12d7
|
916 917 |
rt_mutex_futex_unlock(&pi_state->pi_mutex); put_pi_state(pi_state); |
1d6154825
|
918 |
raw_spin_lock_irq(&curr->pi_lock); |
c87e2837b
|
919 |
} |
1d6154825
|
920 |
raw_spin_unlock_irq(&curr->pi_lock); |
c87e2837b
|
921 |
} |
bc2eecd7e
|
922 |
#endif |
54a217887
|
923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 |
/* * We need to check the following states: * * Waiter | pi_state | pi->owner | uTID | uODIED | ? * * [1] NULL | --- | --- | 0 | 0/1 | Valid * [2] NULL | --- | --- | >0 | 0/1 | Valid * * [3] Found | NULL | -- | Any | 0/1 | Invalid * * [4] Found | Found | NULL | 0 | 1 | Valid * [5] Found | Found | NULL | >0 | 1 | Invalid * * [6] Found | Found | task | 0 | 1 | Valid * * [7] Found | Found | NULL | Any | 0 | Invalid * * [8] Found | Found | task | ==taskTID | 0/1 | Valid * [9] Found | Found | task | 0 | 0 | Invalid * [10] Found | Found | task | !=taskTID | 0/1 | Invalid * * [1] Indicates that the kernel can acquire the futex atomically. We * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit. * * [2] Valid, if TID does not belong to a kernel thread. If no matching * thread is found then it indicates that the owner TID has died. * * [3] Invalid. The waiter is queued on a non PI futex * * [4] Valid state after exit_robust_list(), which sets the user space * value to FUTEX_WAITERS | FUTEX_OWNER_DIED. * * [5] The user space value got manipulated between exit_robust_list() * and exit_pi_state_list() * * [6] Valid state after exit_pi_state_list() which sets the new owner in * the pi_state but cannot access the user space value. * * [7] pi_state->owner can only be NULL when the OWNER_DIED bit is set. * * [8] Owner and user space value match * * [9] There is no transient state which sets the user space TID to 0 * except exit_robust_list(), but this is indicated by the * FUTEX_OWNER_DIED bit. See [4] * * [10] There is no transient state which leaves owner and user space * TID out of sync. |
734009e96
|
971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 |
* * * Serialization and lifetime rules: * * hb->lock: * * hb -> futex_q, relation * futex_q -> pi_state, relation * * (cannot be raw because hb can contain arbitrary amount * of futex_q's) * * pi_mutex->wait_lock: * * {uval, pi_state} * * (and pi_mutex 'obviously') * * p->pi_lock: * * p->pi_state_list -> pi_state->list, relation * * pi_state->refcount: * * pi_state lifetime * * * Lock order: * * hb->lock * pi_mutex->wait_lock * p->pi_lock * |
54a217887
|
1004 |
*/ |
e60cbc5ce
|
1005 1006 1007 1008 1009 1010 |
/* * Validate that the existing waiter has a pi_state and sanity check * the pi_state against the user space value. If correct, attach to * it. */ |
734009e96
|
1011 1012 |
static int attach_to_pi_state(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state, |
e60cbc5ce
|
1013 |
struct futex_pi_state **ps) |
c87e2837b
|
1014 |
{ |
778e9a9c3
|
1015 |
pid_t pid = uval & FUTEX_TID_MASK; |
94ffac5d8
|
1016 1017 |
u32 uval2; int ret; |
c87e2837b
|
1018 |
|
e60cbc5ce
|
1019 1020 1021 1022 1023 |
/* * Userspace might have messed up non-PI and PI futexes [3] */ if (unlikely(!pi_state)) return -EINVAL; |
06a9ec291
|
1024 |
|
734009e96
|
1025 1026 1027 1028 1029 1030 |
/* * We get here with hb->lock held, and having found a * futex_top_waiter(). This means that futex_lock_pi() of said futex_q * has dropped the hb->lock in between queue_me() and unqueue_me_pi(), * which in turn means that futex_lock_pi() still has a reference on * our pi_state. |
16ffa12d7
|
1031 1032 1033 1034 1035 |
* * The waiter holding a reference on @pi_state also protects against * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi() * and futex_wait_requeue_pi() as it cannot go to 0 and consequently * free pi_state before we can take a reference ourselves. |
734009e96
|
1036 |
*/ |
49262de22
|
1037 |
WARN_ON(!refcount_read(&pi_state->refcount)); |
59647b6ac
|
1038 |
|
e60cbc5ce
|
1039 |
/* |
734009e96
|
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 |
* Now that we have a pi_state, we can acquire wait_lock * and do the state validation. */ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); /* * Since {uval, pi_state} is serialized by wait_lock, and our current * uval was read without holding it, it can have changed. Verify it * still is what we expect it to be, otherwise retry the entire * operation. */ if (get_futex_value_locked(&uval2, uaddr)) goto out_efault; if (uval != uval2) goto out_eagain; /* |
e60cbc5ce
|
1058 1059 1060 |
* Handle the owner died case: */ if (uval & FUTEX_OWNER_DIED) { |
bd1dbcc67
|
1061 |
/* |
e60cbc5ce
|
1062 1063 1064 |
* exit_pi_state_list sets owner to NULL and wakes the * topmost waiter. The task which acquires the * pi_state->rt_mutex will fixup owner. |
bd1dbcc67
|
1065 |
*/ |
e60cbc5ce
|
1066 |
if (!pi_state->owner) { |
59647b6ac
|
1067 |
/* |
e60cbc5ce
|
1068 1069 |
* No pi state owner, but the user space TID * is not 0. Inconsistent state. [5] |
59647b6ac
|
1070 |
*/ |
e60cbc5ce
|
1071 |
if (pid) |
734009e96
|
1072 |
goto out_einval; |
bd1dbcc67
|
1073 |
/* |
e60cbc5ce
|
1074 |
* Take a ref on the state and return success. [4] |
866293ee5
|
1075 |
*/ |
734009e96
|
1076 |
goto out_attach; |
c87e2837b
|
1077 |
} |
bd1dbcc67
|
1078 1079 |
/* |
e60cbc5ce
|
1080 1081 1082 1083 1084 1085 1086 1087 |
* If TID is 0, then either the dying owner has not * yet executed exit_pi_state_list() or some waiter * acquired the rtmutex in the pi state, but did not * yet fixup the TID in user space. * * Take a ref on the state and return success. [6] */ if (!pid) |
734009e96
|
1088 |
goto out_attach; |
e60cbc5ce
|
1089 1090 1091 1092 |
} else { /* * If the owner died bit is not set, then the pi_state * must have an owner. [7] |
bd1dbcc67
|
1093 |
*/ |
e60cbc5ce
|
1094 |
if (!pi_state->owner) |
734009e96
|
1095 |
goto out_einval; |
c87e2837b
|
1096 1097 1098 |
} /* |
e60cbc5ce
|
1099 1100 1101 1102 1103 |
* Bail out if user space manipulated the futex value. If pi * state exists then the owner TID must be the same as the * user space TID. [9/10] */ if (pid != task_pid_vnr(pi_state->owner)) |
734009e96
|
1104 1105 1106 |
goto out_einval; out_attach: |
bf92cf3a5
|
1107 |
get_pi_state(pi_state); |
734009e96
|
1108 |
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
e60cbc5ce
|
1109 1110 |
*ps = pi_state; return 0; |
734009e96
|
1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 |
out_einval: ret = -EINVAL; goto out_error; out_eagain: ret = -EAGAIN; goto out_error; out_efault: ret = -EFAULT; goto out_error; out_error: raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); return ret; |
e60cbc5ce
|
1127 |
} |
da791a667
|
1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 |
static int handle_exit_race(u32 __user *uaddr, u32 uval, struct task_struct *tsk) { u32 uval2; /* * If PF_EXITPIDONE is not yet set, then try again. */ if (tsk && !(tsk->flags & PF_EXITPIDONE)) return -EAGAIN; /* * Reread the user space value to handle the following situation: * * CPU0 CPU1 * * sys_exit() sys_futex() * do_exit() futex_lock_pi() * futex_lock_pi_atomic() * exit_signals(tsk) No waiters: * tsk->flags |= PF_EXITING; *uaddr == 0x00000PID * mm_release(tsk) Set waiter bit * exit_robust_list(tsk) { *uaddr = 0x80000PID; * Set owner died attach_to_pi_owner() { * *uaddr = 0xC0000000; tsk = get_task(PID); * } if (!tsk->flags & PF_EXITING) { * ... attach(); * tsk->flags |= PF_EXITPIDONE; } else { * if (!(tsk->flags & PF_EXITPIDONE)) * return -EAGAIN; * return -ESRCH; <--- FAIL * } * * Returning ESRCH unconditionally is wrong here because the * user space value has been changed by the exiting task. * * The same logic applies to the case where the exiting task is * already gone. */ if (get_futex_value_locked(&uval2, uaddr)) return -EFAULT; /* If the user space value has changed, try again. */ if (uval2 != uval) return -EAGAIN; /* * The exiting task did not have a robust list, the robust list was * corrupted or the user space value in *uaddr is simply bogus. * Give up and tell user space. */ return -ESRCH; } |
04e1b2e52
|
1181 1182 1183 1184 |
/* * Lookup the task for the TID provided from user space and attach to * it after doing proper sanity checks. */ |
da791a667
|
1185 |
static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key, |
04e1b2e52
|
1186 |
struct futex_pi_state **ps) |
e60cbc5ce
|
1187 |
{ |
e60cbc5ce
|
1188 |
pid_t pid = uval & FUTEX_TID_MASK; |
04e1b2e52
|
1189 1190 |
struct futex_pi_state *pi_state; struct task_struct *p; |
e60cbc5ce
|
1191 1192 |
/* |
e3f2ddeac
|
1193 |
* We are the first waiter - try to look up the real owner and attach |
54a217887
|
1194 |
* the new pi_state to it, but bail out when TID = 0 [1] |
da791a667
|
1195 1196 1197 |
* * The !pid check is paranoid. None of the call sites should end up * with pid == 0, but better safe than sorry. Let the caller retry |
c87e2837b
|
1198 |
*/ |
778e9a9c3
|
1199 |
if (!pid) |
da791a667
|
1200 |
return -EAGAIN; |
2ee082608
|
1201 |
p = find_get_task_by_vpid(pid); |
7a0ea09ad
|
1202 |
if (!p) |
da791a667
|
1203 |
return handle_exit_race(uaddr, uval, NULL); |
778e9a9c3
|
1204 |
|
a21294644
|
1205 |
if (unlikely(p->flags & PF_KTHREAD)) { |
f0d71b3dc
|
1206 1207 1208 |
put_task_struct(p); return -EPERM; } |
778e9a9c3
|
1209 1210 1211 1212 1213 1214 |
/* * We need to look at the task state flags to figure out, * whether the task is exiting. To protect against the do_exit * change of the task flags, we do this protected by * p->pi_lock: */ |
1d6154825
|
1215 |
raw_spin_lock_irq(&p->pi_lock); |
778e9a9c3
|
1216 1217 1218 1219 1220 1221 |
if (unlikely(p->flags & PF_EXITING)) { /* * The task is on the way out. When PF_EXITPIDONE is * set, we know that the task has finished the * cleanup: */ |
da791a667
|
1222 |
int ret = handle_exit_race(uaddr, uval, p); |
778e9a9c3
|
1223 |
|
1d6154825
|
1224 |
raw_spin_unlock_irq(&p->pi_lock); |
778e9a9c3
|
1225 1226 1227 |
put_task_struct(p); return ret; } |
c87e2837b
|
1228 |
|
54a217887
|
1229 1230 |
/* * No existing pi state. First waiter. [2] |
734009e96
|
1231 1232 1233 |
* * This creates pi_state, we have hb->lock held, this means nothing can * observe this state, wait_lock is irrelevant. |
54a217887
|
1234 |
*/ |
c87e2837b
|
1235 1236 1237 |
pi_state = alloc_pi_state(); /* |
04e1b2e52
|
1238 |
* Initialize the pi_mutex in locked state and make @p |
c87e2837b
|
1239 1240 1241 1242 1243 |
* the owner of it: */ rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p); /* Store the key for possible exit cleanups: */ |
d0aa7a70b
|
1244 |
pi_state->key = *key; |
c87e2837b
|
1245 |
|
627371d73
|
1246 |
WARN_ON(!list_empty(&pi_state->list)); |
c87e2837b
|
1247 |
list_add(&pi_state->list, &p->pi_state_list); |
c74aef2d0
|
1248 1249 1250 1251 |
/* * Assignment without holding pi_state->pi_mutex.wait_lock is safe * because there is no concurrency as the object is not published yet. */ |
c87e2837b
|
1252 |
pi_state->owner = p; |
1d6154825
|
1253 |
raw_spin_unlock_irq(&p->pi_lock); |
c87e2837b
|
1254 1255 |
put_task_struct(p); |
d0aa7a70b
|
1256 |
*ps = pi_state; |
c87e2837b
|
1257 1258 1259 |
return 0; } |
734009e96
|
1260 1261 |
static int lookup_pi_state(u32 __user *uaddr, u32 uval, struct futex_hash_bucket *hb, |
04e1b2e52
|
1262 1263 |
union futex_key *key, struct futex_pi_state **ps) { |
499f5aca2
|
1264 |
struct futex_q *top_waiter = futex_top_waiter(hb, key); |
04e1b2e52
|
1265 1266 1267 1268 1269 |
/* * If there is a waiter on that futex, validate it and * attach to the pi_state when the validation succeeds. */ |
499f5aca2
|
1270 |
if (top_waiter) |
734009e96
|
1271 |
return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps); |
04e1b2e52
|
1272 1273 1274 1275 1276 |
/* * We are the first waiter - try to look up the owner based on * @uval and attach to it. */ |
da791a667
|
1277 |
return attach_to_pi_owner(uaddr, uval, key, ps); |
04e1b2e52
|
1278 |
} |
af54d6a1c
|
1279 1280 |
static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) { |
6b4f4bc9c
|
1281 |
int err; |
af54d6a1c
|
1282 |
u32 uninitialized_var(curval); |
ab51fbab3
|
1283 1284 |
if (unlikely(should_fail_futex(true))) return -EFAULT; |
6b4f4bc9c
|
1285 1286 1287 |
err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval); if (unlikely(err)) return err; |
af54d6a1c
|
1288 |
|
734009e96
|
1289 |
/* If user space value changed, let the caller retry */ |
af54d6a1c
|
1290 1291 |
return curval != uval ? -EAGAIN : 0; } |
1a52084d0
|
1292 |
/** |
d96ee56ce
|
1293 |
* futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex |
bab5bc9e8
|
1294 1295 1296 1297 1298 1299 1300 1301 |
* @uaddr: the pi futex user address * @hb: the pi futex hash bucket * @key: the futex key associated with uaddr and hb * @ps: the pi_state pointer where we store the result of the * lookup * @task: the task to perform the atomic lock work for. This will * be "current" except in the case of requeue pi. * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) |
1a52084d0
|
1302 |
* |
6c23cbbd5
|
1303 |
* Return: |
7b4ff1adb
|
1304 1305 1306 |
* - 0 - ready to wait; * - 1 - acquired the lock; * - <0 - error |
1a52084d0
|
1307 1308 1309 1310 1311 1312 |
* * The hb->lock and futex_key refs shall be held by the caller. */ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, union futex_key *key, struct futex_pi_state **ps, |
bab5bc9e8
|
1313 |
struct task_struct *task, int set_waiters) |
1a52084d0
|
1314 |
{ |
af54d6a1c
|
1315 |
u32 uval, newval, vpid = task_pid_vnr(task); |
499f5aca2
|
1316 |
struct futex_q *top_waiter; |
af54d6a1c
|
1317 |
int ret; |
1a52084d0
|
1318 1319 |
/* |
af54d6a1c
|
1320 1321 |
* Read the user space value first so we can validate a few * things before proceeding further. |
1a52084d0
|
1322 |
*/ |
af54d6a1c
|
1323 |
if (get_futex_value_locked(&uval, uaddr)) |
1a52084d0
|
1324 |
return -EFAULT; |
ab51fbab3
|
1325 1326 |
if (unlikely(should_fail_futex(true))) return -EFAULT; |
1a52084d0
|
1327 1328 1329 |
/* * Detect deadlocks. */ |
af54d6a1c
|
1330 |
if ((unlikely((uval & FUTEX_TID_MASK) == vpid))) |
1a52084d0
|
1331 |
return -EDEADLK; |
ab51fbab3
|
1332 1333 |
if ((unlikely(should_fail_futex(true)))) return -EDEADLK; |
1a52084d0
|
1334 |
/* |
af54d6a1c
|
1335 1336 |
* Lookup existing state first. If it exists, try to attach to * its pi_state. |
1a52084d0
|
1337 |
*/ |
499f5aca2
|
1338 1339 |
top_waiter = futex_top_waiter(hb, key); if (top_waiter) |
734009e96
|
1340 |
return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps); |
1a52084d0
|
1341 1342 |
/* |
af54d6a1c
|
1343 1344 1345 1346 |
* No waiter and user TID is 0. We are here because the * waiters or the owner died bit is set or called from * requeue_cmp_pi or for whatever reason something took the * syscall. |
1a52084d0
|
1347 |
*/ |
af54d6a1c
|
1348 |
if (!(uval & FUTEX_TID_MASK)) { |
59fa62451
|
1349 |
/* |
af54d6a1c
|
1350 1351 |
* We take over the futex. No other waiters and the user space * TID is 0. We preserve the owner died bit. |
59fa62451
|
1352 |
*/ |
af54d6a1c
|
1353 1354 |
newval = uval & FUTEX_OWNER_DIED; newval |= vpid; |
1a52084d0
|
1355 |
|
af54d6a1c
|
1356 1357 1358 1359 1360 1361 1362 1363 |
/* The futex requeue_pi code can enforce the waiters bit */ if (set_waiters) newval |= FUTEX_WAITERS; ret = lock_pi_update_atomic(uaddr, uval, newval); /* If the take over worked, return 1 */ return ret < 0 ? ret : 1; } |
1a52084d0
|
1364 1365 |
/* |
af54d6a1c
|
1366 1367 1368 |
* First waiter. Set the waiters bit before attaching ourself to * the owner. If owner tries to unlock, it will be forced into * the kernel and blocked on hb->lock. |
1a52084d0
|
1369 |
*/ |
af54d6a1c
|
1370 1371 1372 1373 |
newval = uval | FUTEX_WAITERS; ret = lock_pi_update_atomic(uaddr, uval, newval); if (ret) return ret; |
1a52084d0
|
1374 |
/* |
af54d6a1c
|
1375 1376 1377 |
* If the update of the user space value succeeded, we try to * attach to the owner. If that fails, no harm done, we only * set the FUTEX_WAITERS bit in the user space variable. |
1a52084d0
|
1378 |
*/ |
da791a667
|
1379 |
return attach_to_pi_owner(uaddr, newval, key, ps); |
1a52084d0
|
1380 |
} |
2e12978a9
|
1381 1382 1383 1384 1385 1386 1387 1388 1389 |
/** * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket * @q: The futex_q to unqueue * * The q->lock_ptr must not be NULL and must be held by the caller. */ static void __unqueue_futex(struct futex_q *q) { struct futex_hash_bucket *hb; |
4de1a293a
|
1390 |
if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list))) |
2e12978a9
|
1391 |
return; |
4de1a293a
|
1392 |
lockdep_assert_held(q->lock_ptr); |
2e12978a9
|
1393 1394 1395 |
hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); plist_del(&q->list, &hb->chain); |
11d4616bd
|
1396 |
hb_waiters_dec(hb); |
2e12978a9
|
1397 |
} |
c87e2837b
|
1398 |
/* |
1da177e4c
|
1399 |
* The hash bucket lock must be held when this is called. |
1d0dcb3ad
|
1400 1401 1402 |
* Afterwards, the futex_q must not be accessed. Callers * must ensure to later call wake_up_q() for the actual * wakeups to occur. |
1da177e4c
|
1403 |
*/ |
1d0dcb3ad
|
1404 |
static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q) |
1da177e4c
|
1405 |
{ |
f1a11e057
|
1406 |
struct task_struct *p = q->task; |
aa10990e0
|
1407 1408 1409 |
if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex ")) return; |
b061c38be
|
1410 |
get_task_struct(p); |
2e12978a9
|
1411 |
__unqueue_futex(q); |
1da177e4c
|
1412 |
/* |
38fcd06e9
|
1413 1414 1415 1416 1417 |
* The waiting task can free the futex_q as soon as q->lock_ptr = NULL * is written, without taking any locks. This is possible in the event * of a spurious wakeup, for example. A memory barrier is required here * to prevent the following store to lock_ptr from getting ahead of the * plist_del in __unqueue_futex(). |
1da177e4c
|
1418 |
*/ |
1b367ece0
|
1419 |
smp_store_release(&q->lock_ptr, NULL); |
b061c38be
|
1420 1421 1422 1423 1424 |
/* * Queue the task for later wakeup for after we've released * the hb->lock. wake_q_add() grabs reference to p. */ |
07879c6a3
|
1425 |
wake_q_add_safe(wake_q, p); |
1da177e4c
|
1426 |
} |
16ffa12d7
|
1427 1428 1429 1430 |
/* * Caller must hold a reference on @pi_state. */ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state) |
c87e2837b
|
1431 |
{ |
7cfdaf38d
|
1432 |
u32 uninitialized_var(curval), newval; |
16ffa12d7
|
1433 |
struct task_struct *new_owner; |
aa2bfe553
|
1434 |
bool postunlock = false; |
194a6b5b9
|
1435 |
DEFINE_WAKE_Q(wake_q); |
13fbca4c6
|
1436 |
int ret = 0; |
c87e2837b
|
1437 |
|
c87e2837b
|
1438 |
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); |
bebe5b514
|
1439 |
if (WARN_ON_ONCE(!new_owner)) { |
16ffa12d7
|
1440 |
/* |
bebe5b514
|
1441 |
* As per the comment in futex_unlock_pi() this should not happen. |
16ffa12d7
|
1442 1443 1444 1445 1446 1447 1448 1449 |
* * When this happens, give up our locks and try again, giving * the futex_lock_pi() instance time to complete, either by * waiting on the rtmutex or removing itself from the futex * queue. */ ret = -EAGAIN; goto out_unlock; |
73d786bd0
|
1450 |
} |
c87e2837b
|
1451 1452 |
/* |
16ffa12d7
|
1453 1454 1455 |
* We pass it to the next owner. The WAITERS bit is always kept * enabled while there is PI state around. We cleanup the owner * died bit, because we are the owner. |
c87e2837b
|
1456 |
*/ |
13fbca4c6
|
1457 |
newval = FUTEX_WAITERS | task_pid_vnr(new_owner); |
e3f2ddeac
|
1458 |
|
ab51fbab3
|
1459 1460 |
if (unlikely(should_fail_futex(true))) ret = -EFAULT; |
6b4f4bc9c
|
1461 1462 |
ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval); if (!ret && (curval != uval)) { |
89e9e66ba
|
1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 |
/* * If a unconditional UNLOCK_PI operation (user space did not * try the TID->0 transition) raced with a waiter setting the * FUTEX_WAITERS flag between get_user() and locking the hash * bucket lock, retry the operation. */ if ((FUTEX_TID_MASK & curval) == uval) ret = -EAGAIN; else ret = -EINVAL; } |
734009e96
|
1474 |
|
16ffa12d7
|
1475 1476 |
if (ret) goto out_unlock; |
c87e2837b
|
1477 |
|
94ffac5d8
|
1478 1479 1480 1481 |
/* * This is a point of no return; once we modify the uval there is no * going back and subsequent operations must not fail. */ |
b4abf9104
|
1482 |
raw_spin_lock(&pi_state->owner->pi_lock); |
627371d73
|
1483 1484 |
WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); |
b4abf9104
|
1485 |
raw_spin_unlock(&pi_state->owner->pi_lock); |
627371d73
|
1486 |
|
b4abf9104
|
1487 |
raw_spin_lock(&new_owner->pi_lock); |
627371d73
|
1488 |
WARN_ON(!list_empty(&pi_state->list)); |
c87e2837b
|
1489 1490 |
list_add(&pi_state->list, &new_owner->pi_state_list); pi_state->owner = new_owner; |
b4abf9104
|
1491 |
raw_spin_unlock(&new_owner->pi_lock); |
627371d73
|
1492 |
|
aa2bfe553
|
1493 |
postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); |
5293c2efd
|
1494 |
|
16ffa12d7
|
1495 |
out_unlock: |
5293c2efd
|
1496 |
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
5293c2efd
|
1497 |
|
aa2bfe553
|
1498 1499 |
if (postunlock) rt_mutex_postunlock(&wake_q); |
c87e2837b
|
1500 |
|
16ffa12d7
|
1501 |
return ret; |
c87e2837b
|
1502 |
} |
1da177e4c
|
1503 |
/* |
8b8f319fc
|
1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 |
* Express the locking dependencies for lockdep: */ static inline void double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) { if (hb1 <= hb2) { spin_lock(&hb1->lock); if (hb1 < hb2) spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING); } else { /* hb1 > hb2 */ spin_lock(&hb2->lock); spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING); } } |
5eb3dc62f
|
1518 1519 1520 |
static inline void double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2) { |
f061d3515
|
1521 |
spin_unlock(&hb1->lock); |
88f502fed
|
1522 1523 |
if (hb1 != hb2) spin_unlock(&hb2->lock); |
5eb3dc62f
|
1524 |
} |
8b8f319fc
|
1525 |
/* |
b2d0994b1
|
1526 |
* Wake up waiters matching bitset queued on this futex (uaddr). |
1da177e4c
|
1527 |
*/ |
b41277dc7
|
1528 1529 |
static int futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset) |
1da177e4c
|
1530 |
{ |
e2970f2fb
|
1531 |
struct futex_hash_bucket *hb; |
1da177e4c
|
1532 |
struct futex_q *this, *next; |
38d47c1b7
|
1533 |
union futex_key key = FUTEX_KEY_INIT; |
1da177e4c
|
1534 |
int ret; |
194a6b5b9
|
1535 |
DEFINE_WAKE_Q(wake_q); |
1da177e4c
|
1536 |
|
cd689985c
|
1537 1538 |
if (!bitset) return -EINVAL; |
96d4f267e
|
1539 |
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_READ); |
1da177e4c
|
1540 1541 |
if (unlikely(ret != 0)) goto out; |
e2970f2fb
|
1542 |
hb = hash_futex(&key); |
b0c29f79e
|
1543 1544 1545 1546 |
/* Make sure we really have tasks to wakeup */ if (!hb_waiters_pending(hb)) goto out_put_key; |
e2970f2fb
|
1547 |
spin_lock(&hb->lock); |
1da177e4c
|
1548 |
|
0d00c7b20
|
1549 |
plist_for_each_entry_safe(this, next, &hb->chain, list) { |
1da177e4c
|
1550 |
if (match_futex (&this->key, &key)) { |
52400ba94
|
1551 |
if (this->pi_state || this->rt_waiter) { |
ed6f7b10e
|
1552 1553 1554 |
ret = -EINVAL; break; } |
cd689985c
|
1555 1556 1557 1558 |
/* Check if one of the bits is set in both bitsets */ if (!(this->bitset & bitset)) continue; |
1d0dcb3ad
|
1559 |
mark_wake_futex(&wake_q, this); |
1da177e4c
|
1560 1561 1562 1563 |
if (++ret >= nr_wake) break; } } |
e2970f2fb
|
1564 |
spin_unlock(&hb->lock); |
1d0dcb3ad
|
1565 |
wake_up_q(&wake_q); |
b0c29f79e
|
1566 |
out_put_key: |
ae791a2d2
|
1567 |
put_futex_key(&key); |
42d35d48c
|
1568 |
out: |
1da177e4c
|
1569 1570 |
return ret; } |
30d6e0a41
|
1571 1572 1573 1574 |
static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr) { unsigned int op = (encoded_op & 0x70000000) >> 28; unsigned int cmp = (encoded_op & 0x0f000000) >> 24; |
d70ef2289
|
1575 1576 |
int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11); int cmparg = sign_extend32(encoded_op & 0x00000fff, 11); |
30d6e0a41
|
1577 1578 1579 |
int oldval, ret; if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) { |
e78c38f6b
|
1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 |
if (oparg < 0 || oparg > 31) { char comm[sizeof(current->comm)]; /* * kill this print and return -EINVAL when userspace * is sane again */ pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program ", get_task_comm(comm, current), oparg); oparg &= 31; } |
30d6e0a41
|
1591 1592 |
oparg = 1 << oparg; } |
96d4f267e
|
1593 |
if (!access_ok(uaddr, sizeof(u32))) |
30d6e0a41
|
1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 |
return -EFAULT; ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr); if (ret) return ret; switch (cmp) { case FUTEX_OP_CMP_EQ: return oldval == cmparg; case FUTEX_OP_CMP_NE: return oldval != cmparg; case FUTEX_OP_CMP_LT: return oldval < cmparg; case FUTEX_OP_CMP_GE: return oldval >= cmparg; case FUTEX_OP_CMP_LE: return oldval <= cmparg; case FUTEX_OP_CMP_GT: return oldval > cmparg; default: return -ENOSYS; } } |
1da177e4c
|
1617 |
/* |
4732efbeb
|
1618 1619 1620 |
* Wake up all waiters hashed on the physical page that is mapped * to this virtual address: */ |
e2970f2fb
|
1621 |
static int |
b41277dc7
|
1622 |
futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2, |
e2970f2fb
|
1623 |
int nr_wake, int nr_wake2, int op) |
4732efbeb
|
1624 |
{ |
38d47c1b7
|
1625 |
union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; |
e2970f2fb
|
1626 |
struct futex_hash_bucket *hb1, *hb2; |
4732efbeb
|
1627 |
struct futex_q *this, *next; |
e4dc5b7a3
|
1628 |
int ret, op_ret; |
194a6b5b9
|
1629 |
DEFINE_WAKE_Q(wake_q); |
4732efbeb
|
1630 |
|
e4dc5b7a3
|
1631 |
retry: |
96d4f267e
|
1632 |
ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ); |
4732efbeb
|
1633 1634 |
if (unlikely(ret != 0)) goto out; |
96d4f267e
|
1635 |
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE); |
4732efbeb
|
1636 |
if (unlikely(ret != 0)) |
42d35d48c
|
1637 |
goto out_put_key1; |
4732efbeb
|
1638 |
|
e2970f2fb
|
1639 1640 |
hb1 = hash_futex(&key1); hb2 = hash_futex(&key2); |
4732efbeb
|
1641 |
|
e4dc5b7a3
|
1642 |
retry_private: |
eaaea8036
|
1643 |
double_lock_hb(hb1, hb2); |
e2970f2fb
|
1644 |
op_ret = futex_atomic_op_inuser(op, uaddr2); |
4732efbeb
|
1645 |
if (unlikely(op_ret < 0)) { |
5eb3dc62f
|
1646 |
double_unlock_hb(hb1, hb2); |
4732efbeb
|
1647 |
|
6b4f4bc9c
|
1648 1649 1650 1651 1652 1653 |
if (!IS_ENABLED(CONFIG_MMU) || unlikely(op_ret != -EFAULT && op_ret != -EAGAIN)) { /* * we don't get EFAULT from MMU faults if we don't have * an MMU, but we might get them from range checking */ |
796f8d9b9
|
1654 |
ret = op_ret; |
42d35d48c
|
1655 |
goto out_put_keys; |
796f8d9b9
|
1656 |
} |
6b4f4bc9c
|
1657 1658 1659 1660 1661 |
if (op_ret == -EFAULT) { ret = fault_in_user_writeable(uaddr2); if (ret) goto out_put_keys; } |
4732efbeb
|
1662 |
|
6b4f4bc9c
|
1663 1664 |
if (!(flags & FLAGS_SHARED)) { cond_resched(); |
e4dc5b7a3
|
1665 |
goto retry_private; |
6b4f4bc9c
|
1666 |
} |
e4dc5b7a3
|
1667 |
|
ae791a2d2
|
1668 1669 |
put_futex_key(&key2); put_futex_key(&key1); |
6b4f4bc9c
|
1670 |
cond_resched(); |
e4dc5b7a3
|
1671 |
goto retry; |
4732efbeb
|
1672 |
} |
0d00c7b20
|
1673 |
plist_for_each_entry_safe(this, next, &hb1->chain, list) { |
4732efbeb
|
1674 |
if (match_futex (&this->key, &key1)) { |
aa10990e0
|
1675 1676 1677 1678 |
if (this->pi_state || this->rt_waiter) { ret = -EINVAL; goto out_unlock; } |
1d0dcb3ad
|
1679 |
mark_wake_futex(&wake_q, this); |
4732efbeb
|
1680 1681 1682 1683 1684 1685 |
if (++ret >= nr_wake) break; } } if (op_ret > 0) { |
4732efbeb
|
1686 |
op_ret = 0; |
0d00c7b20
|
1687 |
plist_for_each_entry_safe(this, next, &hb2->chain, list) { |
4732efbeb
|
1688 |
if (match_futex (&this->key, &key2)) { |
aa10990e0
|
1689 1690 1691 1692 |
if (this->pi_state || this->rt_waiter) { ret = -EINVAL; goto out_unlock; } |
1d0dcb3ad
|
1693 |
mark_wake_futex(&wake_q, this); |
4732efbeb
|
1694 1695 1696 1697 1698 1699 |
if (++op_ret >= nr_wake2) break; } } ret += op_ret; } |
aa10990e0
|
1700 |
out_unlock: |
5eb3dc62f
|
1701 |
double_unlock_hb(hb1, hb2); |
1d0dcb3ad
|
1702 |
wake_up_q(&wake_q); |
42d35d48c
|
1703 |
out_put_keys: |
ae791a2d2
|
1704 |
put_futex_key(&key2); |
42d35d48c
|
1705 |
out_put_key1: |
ae791a2d2
|
1706 |
put_futex_key(&key1); |
42d35d48c
|
1707 |
out: |
4732efbeb
|
1708 1709 |
return ret; } |
9121e4783
|
1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 |
/** * requeue_futex() - Requeue a futex_q from one hb to another * @q: the futex_q to requeue * @hb1: the source hash_bucket * @hb2: the target hash_bucket * @key2: the new key for the requeued futex_q */ static inline void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2, union futex_key *key2) { /* * If key1 and key2 hash to the same bucket, no need to * requeue. */ if (likely(&hb1->chain != &hb2->chain)) { plist_del(&q->list, &hb1->chain); |
11d4616bd
|
1728 |
hb_waiters_dec(hb1); |
11d4616bd
|
1729 |
hb_waiters_inc(hb2); |
fe1bce9e2
|
1730 |
plist_add(&q->list, &hb2->chain); |
9121e4783
|
1731 |
q->lock_ptr = &hb2->lock; |
9121e4783
|
1732 1733 1734 1735 |
} get_futex_key_refs(key2); q->key = *key2; } |
52400ba94
|
1736 1737 |
/** * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue |
d96ee56ce
|
1738 1739 1740 |
* @q: the futex_q * @key: the key of the requeue target futex * @hb: the hash_bucket of the requeue target futex |
52400ba94
|
1741 1742 1743 1744 1745 |
* * During futex_requeue, with requeue_pi=1, it is possible to acquire the * target futex if it is uncontended or via a lock steal. Set the futex_q key * to the requeue target futex so the waiter can detect the wakeup on the right * futex, but remove it from the hb and NULL the rt_waiter so it can detect |
beda2c7ea
|
1746 1747 1748 |
* atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock * to protect access to the pi_state to fixup the owner later. Must be called * with both q->lock_ptr and hb->lock held. |
52400ba94
|
1749 1750 |
*/ static inline |
beda2c7ea
|
1751 1752 |
void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, struct futex_hash_bucket *hb) |
52400ba94
|
1753 |
{ |
52400ba94
|
1754 1755 |
get_futex_key_refs(key); q->key = *key; |
2e12978a9
|
1756 |
__unqueue_futex(q); |
52400ba94
|
1757 1758 1759 |
WARN_ON(!q->rt_waiter); q->rt_waiter = NULL; |
beda2c7ea
|
1760 |
q->lock_ptr = &hb->lock; |
beda2c7ea
|
1761 |
|
f1a11e057
|
1762 |
wake_up_state(q->task, TASK_NORMAL); |
52400ba94
|
1763 1764 1765 1766 |
} /** * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter |
bab5bc9e8
|
1767 1768 1769 1770 1771 1772 1773 |
* @pifutex: the user address of the to futex * @hb1: the from futex hash bucket, must be locked by the caller * @hb2: the to futex hash bucket, must be locked by the caller * @key1: the from futex key * @key2: the to futex key * @ps: address to store the pi_state pointer * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) |
52400ba94
|
1774 1775 |
* * Try and get the lock on behalf of the top waiter if we can do it atomically. |
bab5bc9e8
|
1776 1777 1778 |
* Wake the top waiter if we succeed. If the caller specified set_waiters, * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. * hb1 and hb2 must be held by the caller. |
52400ba94
|
1779 |
* |
6c23cbbd5
|
1780 |
* Return: |
7b4ff1adb
|
1781 1782 1783 |
* - 0 - failed to acquire the lock atomically; * - >0 - acquired the lock, return value is vpid of the top_waiter * - <0 - error |
52400ba94
|
1784 1785 1786 1787 1788 |
*/ static int futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2, union futex_key *key1, union futex_key *key2, |
bab5bc9e8
|
1789 |
struct futex_pi_state **ps, int set_waiters) |
52400ba94
|
1790 |
{ |
bab5bc9e8
|
1791 |
struct futex_q *top_waiter = NULL; |
52400ba94
|
1792 |
u32 curval; |
866293ee5
|
1793 |
int ret, vpid; |
52400ba94
|
1794 1795 1796 |
if (get_futex_value_locked(&curval, pifutex)) return -EFAULT; |
ab51fbab3
|
1797 1798 |
if (unlikely(should_fail_futex(true))) return -EFAULT; |
bab5bc9e8
|
1799 1800 1801 1802 1803 1804 1805 1806 |
/* * Find the top_waiter and determine if there are additional waiters. * If the caller intends to requeue more than 1 waiter to pifutex, * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now, * as we have means to handle the possible fault. If not, don't set * the bit unecessarily as it will force the subsequent unlock to enter * the kernel. */ |
52400ba94
|
1807 1808 1809 1810 1811 |
top_waiter = futex_top_waiter(hb1, key1); /* There are no waiters, nothing for us to do. */ if (!top_waiter) return 0; |
84bc4af59
|
1812 1813 1814 |
/* Ensure we requeue to the expected futex. */ if (!match_futex(top_waiter->requeue_pi_key, key2)) return -EINVAL; |
52400ba94
|
1815 |
/* |
bab5bc9e8
|
1816 1817 1818 |
* Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in * the contended case or if set_waiters is 1. The pi_state is returned * in ps in contended cases. |
52400ba94
|
1819 |
*/ |
866293ee5
|
1820 |
vpid = task_pid_vnr(top_waiter->task); |
bab5bc9e8
|
1821 1822 |
ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, set_waiters); |
866293ee5
|
1823 |
if (ret == 1) { |
beda2c7ea
|
1824 |
requeue_pi_wake_futex(top_waiter, key2, hb2); |
866293ee5
|
1825 1826 |
return vpid; } |
52400ba94
|
1827 1828 1829 1830 1831 |
return ret; } /** * futex_requeue() - Requeue waiters from uaddr1 to uaddr2 |
fb62db2ba
|
1832 |
* @uaddr1: source futex user address |
b41277dc7
|
1833 |
* @flags: futex flags (FLAGS_SHARED, etc.) |
fb62db2ba
|
1834 1835 1836 1837 1838 |
* @uaddr2: target futex user address * @nr_wake: number of waiters to wake (must be 1 for requeue_pi) * @nr_requeue: number of waiters to requeue (0-INT_MAX) * @cmpval: @uaddr1 expected value (or %NULL) * @requeue_pi: if we are attempting to requeue from a non-pi futex to a |
b41277dc7
|
1839 |
* pi futex (pi to pi requeue is not supported) |
52400ba94
|
1840 1841 1842 1843 |
* * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire * uaddr2 atomically on behalf of the top waiter. * |
6c23cbbd5
|
1844 |
* Return: |
7b4ff1adb
|
1845 1846 |
* - >=0 - on success, the number of tasks requeued or woken; * - <0 - on error |
1da177e4c
|
1847 |
*/ |
b41277dc7
|
1848 1849 1850 |
static int futex_requeue(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2, int nr_wake, int nr_requeue, u32 *cmpval, int requeue_pi) |
1da177e4c
|
1851 |
{ |
38d47c1b7
|
1852 |
union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; |
52400ba94
|
1853 1854 |
int drop_count = 0, task_count = 0, ret; struct futex_pi_state *pi_state = NULL; |
e2970f2fb
|
1855 |
struct futex_hash_bucket *hb1, *hb2; |
1da177e4c
|
1856 |
struct futex_q *this, *next; |
194a6b5b9
|
1857 |
DEFINE_WAKE_Q(wake_q); |
52400ba94
|
1858 |
|
fbe0e839d
|
1859 1860 |
if (nr_wake < 0 || nr_requeue < 0) return -EINVAL; |
bc2eecd7e
|
1861 1862 1863 1864 1865 1866 1867 1868 |
/* * When PI not supported: return -ENOSYS if requeue_pi is true, * consequently the compiler knows requeue_pi is always false past * this point which will optimize away all the conditional code * further down. */ if (!IS_ENABLED(CONFIG_FUTEX_PI) && requeue_pi) return -ENOSYS; |
52400ba94
|
1869 1870 |
if (requeue_pi) { /* |
e9c243a5a
|
1871 1872 1873 1874 1875 1876 1877 |
* Requeue PI only works on two distinct uaddrs. This * check is only valid for private futexes. See below. */ if (uaddr1 == uaddr2) return -EINVAL; /* |
52400ba94
|
1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 |
* requeue_pi requires a pi_state, try to allocate it now * without any locks in case it fails. */ if (refill_pi_state_cache()) return -ENOMEM; /* * requeue_pi must wake as many tasks as it can, up to nr_wake * + nr_requeue, since it acquires the rt_mutex prior to * returning to userspace, so as to not leave the rt_mutex with * waiters and no owner. However, second and third wake-ups * cannot be predicted as they involve race conditions with the * first wake and a fault while looking up the pi_state. Both * pthread_cond_signal() and pthread_cond_broadcast() should * use nr_wake=1. */ if (nr_wake != 1) return -EINVAL; } |
1da177e4c
|
1896 |
|
42d35d48c
|
1897 |
retry: |
96d4f267e
|
1898 |
ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ); |
1da177e4c
|
1899 1900 |
if (unlikely(ret != 0)) goto out; |
9ea71503a
|
1901 |
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, |
96d4f267e
|
1902 |
requeue_pi ? FUTEX_WRITE : FUTEX_READ); |
1da177e4c
|
1903 |
if (unlikely(ret != 0)) |
42d35d48c
|
1904 |
goto out_put_key1; |
1da177e4c
|
1905 |
|
e9c243a5a
|
1906 1907 1908 1909 1910 1911 1912 1913 |
/* * The check above which compares uaddrs is not sufficient for * shared futexes. We need to compare the keys: */ if (requeue_pi && match_futex(&key1, &key2)) { ret = -EINVAL; goto out_put_keys; } |
e2970f2fb
|
1914 1915 |
hb1 = hash_futex(&key1); hb2 = hash_futex(&key2); |
1da177e4c
|
1916 |
|
e4dc5b7a3
|
1917 |
retry_private: |
69cd9eba3
|
1918 |
hb_waiters_inc(hb2); |
8b8f319fc
|
1919 |
double_lock_hb(hb1, hb2); |
1da177e4c
|
1920 |
|
e2970f2fb
|
1921 1922 |
if (likely(cmpval != NULL)) { u32 curval; |
1da177e4c
|
1923 |
|
e2970f2fb
|
1924 |
ret = get_futex_value_locked(&curval, uaddr1); |
1da177e4c
|
1925 1926 |
if (unlikely(ret)) { |
5eb3dc62f
|
1927 |
double_unlock_hb(hb1, hb2); |
69cd9eba3
|
1928 |
hb_waiters_dec(hb2); |
1da177e4c
|
1929 |
|
e2970f2fb
|
1930 |
ret = get_user(curval, uaddr1); |
e4dc5b7a3
|
1931 1932 |
if (ret) goto out_put_keys; |
1da177e4c
|
1933 |
|
b41277dc7
|
1934 |
if (!(flags & FLAGS_SHARED)) |
e4dc5b7a3
|
1935 |
goto retry_private; |
1da177e4c
|
1936 |
|
ae791a2d2
|
1937 1938 |
put_futex_key(&key2); put_futex_key(&key1); |
e4dc5b7a3
|
1939 |
goto retry; |
1da177e4c
|
1940 |
} |
e2970f2fb
|
1941 |
if (curval != *cmpval) { |
1da177e4c
|
1942 1943 1944 1945 |
ret = -EAGAIN; goto out_unlock; } } |
52400ba94
|
1946 |
if (requeue_pi && (task_count - nr_wake < nr_requeue)) { |
bab5bc9e8
|
1947 1948 1949 1950 1951 1952 |
/* * Attempt to acquire uaddr2 and wake the top waiter. If we * intend to requeue waiters, force setting the FUTEX_WAITERS * bit. We force this here where we are able to easily handle * faults rather in the requeue loop below. */ |
52400ba94
|
1953 |
ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1, |
bab5bc9e8
|
1954 |
&key2, &pi_state, nr_requeue); |
52400ba94
|
1955 1956 1957 1958 1959 |
/* * At this point the top_waiter has either taken uaddr2 or is * waiting on it. If the former, then the pi_state will not * exist yet, look it up one more time to ensure we have a |
866293ee5
|
1960 1961 |
* reference to it. If the lock was taken, ret contains the * vpid of the top waiter task. |
ecb38b78f
|
1962 1963 |
* If the lock was not taken, we have pi_state and an initial * refcount on it. In case of an error we have nothing. |
52400ba94
|
1964 |
*/ |
866293ee5
|
1965 |
if (ret > 0) { |
52400ba94
|
1966 |
WARN_ON(pi_state); |
89061d3d5
|
1967 |
drop_count++; |
52400ba94
|
1968 |
task_count++; |
866293ee5
|
1969 |
/* |
ecb38b78f
|
1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 |
* If we acquired the lock, then the user space value * of uaddr2 should be vpid. It cannot be changed by * the top waiter as it is blocked on hb2 lock if it * tries to do so. If something fiddled with it behind * our back the pi state lookup might unearth it. So * we rather use the known value than rereading and * handing potential crap to lookup_pi_state. * * If that call succeeds then we have pi_state and an * initial refcount on it. |
866293ee5
|
1980 |
*/ |
734009e96
|
1981 |
ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state); |
52400ba94
|
1982 1983 1984 1985 |
} switch (ret) { case 0: |
ecb38b78f
|
1986 |
/* We hold a reference on the pi state. */ |
52400ba94
|
1987 |
break; |
4959f2de1
|
1988 1989 |
/* If the above failed, then pi_state is NULL */ |
52400ba94
|
1990 1991 |
case -EFAULT: double_unlock_hb(hb1, hb2); |
69cd9eba3
|
1992 |
hb_waiters_dec(hb2); |
ae791a2d2
|
1993 1994 |
put_futex_key(&key2); put_futex_key(&key1); |
d0725992c
|
1995 |
ret = fault_in_user_writeable(uaddr2); |
52400ba94
|
1996 1997 1998 1999 |
if (!ret) goto retry; goto out; case -EAGAIN: |
af54d6a1c
|
2000 2001 2002 2003 2004 2005 |
/* * Two reasons for this: * - Owner is exiting and we just wait for the * exit to complete. * - The user space value changed. */ |
52400ba94
|
2006 |
double_unlock_hb(hb1, hb2); |
69cd9eba3
|
2007 |
hb_waiters_dec(hb2); |
ae791a2d2
|
2008 2009 |
put_futex_key(&key2); put_futex_key(&key1); |
52400ba94
|
2010 2011 2012 2013 2014 2015 |
cond_resched(); goto retry; default: goto out_unlock; } } |
0d00c7b20
|
2016 |
plist_for_each_entry_safe(this, next, &hb1->chain, list) { |
52400ba94
|
2017 2018 2019 2020 |
if (task_count - nr_wake >= nr_requeue) break; if (!match_futex(&this->key, &key1)) |
1da177e4c
|
2021 |
continue; |
52400ba94
|
2022 |
|
392741e0a
|
2023 2024 2025 |
/* * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always * be paired with each other and no other futex ops. |
aa10990e0
|
2026 2027 2028 |
* * We should never be requeueing a futex_q with a pi_state, * which is awaiting a futex_unlock_pi(). |
392741e0a
|
2029 2030 |
*/ if ((requeue_pi && !this->rt_waiter) || |
aa10990e0
|
2031 2032 |
(!requeue_pi && this->rt_waiter) || this->pi_state) { |
392741e0a
|
2033 2034 2035 |
ret = -EINVAL; break; } |
52400ba94
|
2036 2037 2038 2039 2040 2041 2042 |
/* * Wake nr_wake waiters. For requeue_pi, if we acquired the * lock, we already woke the top_waiter. If not, it will be * woken by futex_unlock_pi(). */ if (++task_count <= nr_wake && !requeue_pi) { |
1d0dcb3ad
|
2043 |
mark_wake_futex(&wake_q, this); |
52400ba94
|
2044 2045 |
continue; } |
1da177e4c
|
2046 |
|
84bc4af59
|
2047 2048 2049 2050 2051 |
/* Ensure we requeue to the expected futex for requeue_pi. */ if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) { ret = -EINVAL; break; } |
52400ba94
|
2052 2053 2054 2055 2056 |
/* * Requeue nr_requeue waiters and possibly one more in the case * of requeue_pi if we couldn't acquire the lock atomically. */ if (requeue_pi) { |
ecb38b78f
|
2057 2058 2059 2060 2061 |
/* * Prepare the waiter to take the rt_mutex. Take a * refcount on the pi_state and store the pointer in * the futex_q object of the waiter. */ |
bf92cf3a5
|
2062 |
get_pi_state(pi_state); |
52400ba94
|
2063 2064 2065 |
this->pi_state = pi_state; ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex, this->rt_waiter, |
c051b21f7
|
2066 |
this->task); |
52400ba94
|
2067 |
if (ret == 1) { |
ecb38b78f
|
2068 2069 2070 2071 2072 2073 2074 2075 |
/* * We got the lock. We do neither drop the * refcount on pi_state nor clear * this->pi_state because the waiter needs the * pi_state for cleaning up the user space * value. It will drop the refcount after * doing so. */ |
beda2c7ea
|
2076 |
requeue_pi_wake_futex(this, &key2, hb2); |
89061d3d5
|
2077 |
drop_count++; |
52400ba94
|
2078 2079 |
continue; } else if (ret) { |
ecb38b78f
|
2080 2081 2082 2083 2084 2085 2086 2087 |
/* * rt_mutex_start_proxy_lock() detected a * potential deadlock when we tried to queue * that waiter. Drop the pi_state reference * which we took above and remove the pointer * to the state from the waiters futex_q * object. */ |
52400ba94
|
2088 |
this->pi_state = NULL; |
29e9ee5d4
|
2089 |
put_pi_state(pi_state); |
885c2cb77
|
2090 2091 2092 2093 2094 |
/* * We stop queueing more waiters and let user * space deal with the mess. */ break; |
52400ba94
|
2095 |
} |
1da177e4c
|
2096 |
} |
52400ba94
|
2097 2098 |
requeue_futex(this, hb1, hb2, &key2); drop_count++; |
1da177e4c
|
2099 |
} |
ecb38b78f
|
2100 2101 2102 2103 2104 |
/* * We took an extra initial reference to the pi_state either * in futex_proxy_trylock_atomic() or in lookup_pi_state(). We * need to drop it here again. */ |
29e9ee5d4
|
2105 |
put_pi_state(pi_state); |
885c2cb77
|
2106 2107 |
out_unlock: |
5eb3dc62f
|
2108 |
double_unlock_hb(hb1, hb2); |
1d0dcb3ad
|
2109 |
wake_up_q(&wake_q); |
69cd9eba3
|
2110 |
hb_waiters_dec(hb2); |
1da177e4c
|
2111 |
|
cd84a42f3
|
2112 2113 2114 2115 2116 2117 |
/* * drop_futex_key_refs() must be called outside the spinlocks. During * the requeue we moved futex_q's from the hash bucket at key1 to the * one at key2 and updated their key pointer. We no longer need to * hold the references to key1. */ |
1da177e4c
|
2118 |
while (--drop_count >= 0) |
9adef58b1
|
2119 |
drop_futex_key_refs(&key1); |
1da177e4c
|
2120 |
|
42d35d48c
|
2121 |
out_put_keys: |
ae791a2d2
|
2122 |
put_futex_key(&key2); |
42d35d48c
|
2123 |
out_put_key1: |
ae791a2d2
|
2124 |
put_futex_key(&key1); |
42d35d48c
|
2125 |
out: |
52400ba94
|
2126 |
return ret ? ret : task_count; |
1da177e4c
|
2127 2128 2129 |
} /* The key must be already stored in q->key. */ |
82af7aca5
|
2130 |
static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) |
15e408cd6
|
2131 |
__acquires(&hb->lock) |
1da177e4c
|
2132 |
{ |
e2970f2fb
|
2133 |
struct futex_hash_bucket *hb; |
1da177e4c
|
2134 |
|
e2970f2fb
|
2135 |
hb = hash_futex(&q->key); |
11d4616bd
|
2136 2137 2138 2139 2140 2141 2142 2143 2144 |
/* * Increment the counter before taking the lock so that * a potential waker won't miss a to-be-slept task that is * waiting for the spinlock. This is safe as all queue_lock() * users end up calling queue_me(). Similarly, for housekeeping, * decrement the counter at queue_unlock() when some error has * occurred and we don't end up adding the task to the list. */ |
6f568ebe2
|
2145 |
hb_waiters_inc(hb); /* implies smp_mb(); (A) */ |
11d4616bd
|
2146 |
|
e2970f2fb
|
2147 |
q->lock_ptr = &hb->lock; |
1da177e4c
|
2148 |
|
6f568ebe2
|
2149 |
spin_lock(&hb->lock); |
e2970f2fb
|
2150 |
return hb; |
1da177e4c
|
2151 |
} |
d40d65c8d
|
2152 |
static inline void |
0d00c7b20
|
2153 |
queue_unlock(struct futex_hash_bucket *hb) |
15e408cd6
|
2154 |
__releases(&hb->lock) |
d40d65c8d
|
2155 2156 |
{ spin_unlock(&hb->lock); |
11d4616bd
|
2157 |
hb_waiters_dec(hb); |
d40d65c8d
|
2158 |
} |
cfafcd117
|
2159 |
static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb) |
1da177e4c
|
2160 |
{ |
ec92d0829
|
2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 |
int prio; /* * The priority used to register this element is * - either the real thread-priority for the real-time threads * (i.e. threads with a priority lower than MAX_RT_PRIO) * - or MAX_RT_PRIO for non-RT threads. * Thus, all RT-threads are woken first in priority order, and * the others are woken last, in FIFO order. */ prio = min(current->normal_prio, MAX_RT_PRIO); plist_node_init(&q->list, prio); |
ec92d0829
|
2174 |
plist_add(&q->list, &hb->chain); |
c87e2837b
|
2175 |
q->task = current; |
cfafcd117
|
2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 |
} /** * queue_me() - Enqueue the futex_q on the futex_hash_bucket * @q: The futex_q to enqueue * @hb: The destination hash bucket * * The hb->lock must be held by the caller, and is released here. A call to * queue_me() is typically paired with exactly one call to unqueue_me(). The * exceptions involve the PI related operations, which may use unqueue_me_pi() * or nothing if the unqueue is done as part of the wake process and the unqueue * state is implicit in the state of woken task (see futex_wait_requeue_pi() for * an example). */ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) __releases(&hb->lock) { __queue_me(q, hb); |
e2970f2fb
|
2194 |
spin_unlock(&hb->lock); |
1da177e4c
|
2195 |
} |
d40d65c8d
|
2196 2197 2198 2199 2200 2201 2202 |
/** * unqueue_me() - Remove the futex_q from its futex_hash_bucket * @q: The futex_q to unqueue * * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must * be paired with exactly one earlier call to queue_me(). * |
6c23cbbd5
|
2203 |
* Return: |
7b4ff1adb
|
2204 2205 |
* - 1 - if the futex_q was still queued (and we removed unqueued it); * - 0 - if the futex_q was already removed by the waking thread |
1da177e4c
|
2206 |
*/ |
1da177e4c
|
2207 2208 |
static int unqueue_me(struct futex_q *q) { |
1da177e4c
|
2209 |
spinlock_t *lock_ptr; |
e2970f2fb
|
2210 |
int ret = 0; |
1da177e4c
|
2211 2212 |
/* In the common case we don't take the spinlock, which is nice. */ |
42d35d48c
|
2213 |
retry: |
29b75eb2d
|
2214 2215 2216 2217 2218 2219 |
/* * q->lock_ptr can change between this read and the following spin_lock. * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and * optimizing lock_ptr out of the logic below. */ lock_ptr = READ_ONCE(q->lock_ptr); |
c80544dc0
|
2220 |
if (lock_ptr != NULL) { |
1da177e4c
|
2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 |
spin_lock(lock_ptr); /* * q->lock_ptr can change between reading it and * spin_lock(), causing us to take the wrong lock. This * corrects the race condition. * * Reasoning goes like this: if we have the wrong lock, * q->lock_ptr must have changed (maybe several times) * between reading it and the spin_lock(). It can * change again after the spin_lock() but only if it was * already changed before the spin_lock(). It cannot, * however, change back to the original value. Therefore * we can detect whether we acquired the correct lock. */ if (unlikely(lock_ptr != q->lock_ptr)) { spin_unlock(lock_ptr); goto retry; } |
2e12978a9
|
2239 |
__unqueue_futex(q); |
c87e2837b
|
2240 2241 |
BUG_ON(q->pi_state); |
1da177e4c
|
2242 2243 2244 |
spin_unlock(lock_ptr); ret = 1; } |
9adef58b1
|
2245 |
drop_futex_key_refs(&q->key); |
1da177e4c
|
2246 2247 |
return ret; } |
c87e2837b
|
2248 2249 |
/* * PI futexes can not be requeued and must remove themself from the |
d0aa7a70b
|
2250 2251 |
* hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry * and dropped here. |
c87e2837b
|
2252 |
*/ |
d0aa7a70b
|
2253 |
static void unqueue_me_pi(struct futex_q *q) |
15e408cd6
|
2254 |
__releases(q->lock_ptr) |
c87e2837b
|
2255 |
{ |
2e12978a9
|
2256 |
__unqueue_futex(q); |
c87e2837b
|
2257 2258 |
BUG_ON(!q->pi_state); |
29e9ee5d4
|
2259 |
put_pi_state(q->pi_state); |
c87e2837b
|
2260 |
q->pi_state = NULL; |
d0aa7a70b
|
2261 |
spin_unlock(q->lock_ptr); |
c87e2837b
|
2262 |
} |
778e9a9c3
|
2263 |
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, |
c1e2f0eaf
|
2264 |
struct task_struct *argowner) |
d0aa7a70b
|
2265 |
{ |
d0aa7a70b
|
2266 |
struct futex_pi_state *pi_state = q->pi_state; |
7cfdaf38d
|
2267 |
u32 uval, uninitialized_var(curval), newval; |
c1e2f0eaf
|
2268 2269 |
struct task_struct *oldowner, *newowner; u32 newtid; |
6b4f4bc9c
|
2270 |
int ret, err = 0; |
d0aa7a70b
|
2271 |
|
c1e2f0eaf
|
2272 |
lockdep_assert_held(q->lock_ptr); |
734009e96
|
2273 2274 2275 |
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); oldowner = pi_state->owner; |
1b7558e45
|
2276 2277 |
/* |
c1e2f0eaf
|
2278 |
* We are here because either: |
16ffa12d7
|
2279 |
* |
c1e2f0eaf
|
2280 2281 2282 2283 2284 2285 2286 2287 2288 |
* - we stole the lock and pi_state->owner needs updating to reflect * that (@argowner == current), * * or: * * - someone stole our lock and we need to fix things to point to the * new owner (@argowner == NULL). * * Either way, we have to replace the TID in the user space variable. |
8161239a8
|
2289 |
* This must be atomic as we have to preserve the owner died bit here. |
1b7558e45
|
2290 |
* |
b2d0994b1
|
2291 2292 2293 |
* Note: We write the user space value _before_ changing the pi_state * because we can fault here. Imagine swapped out pages or a fork * that marked all the anonymous memory readonly for cow. |
1b7558e45
|
2294 |
* |
734009e96
|
2295 2296 2297 2298 |
* Modifying pi_state _before_ the user space value would leave the * pi_state in an inconsistent state when we fault here, because we * need to drop the locks to handle the fault. This might be observed * in the PID check in lookup_pi_state. |
1b7558e45
|
2299 2300 |
*/ retry: |
c1e2f0eaf
|
2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 |
if (!argowner) { if (oldowner != current) { /* * We raced against a concurrent self; things are * already fixed up. Nothing to do. */ ret = 0; goto out_unlock; } if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) { /* We got the lock after all, nothing to fix. */ ret = 0; goto out_unlock; } /* * Since we just failed the trylock; there must be an owner. */ newowner = rt_mutex_owner(&pi_state->pi_mutex); BUG_ON(!newowner); } else { WARN_ON_ONCE(argowner != current); if (oldowner == current) { /* * We raced against a concurrent self; things are * already fixed up. Nothing to do. */ ret = 0; goto out_unlock; } newowner = argowner; } newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; |
a97cb0e7b
|
2336 2337 2338 |
/* Owner died? */ if (!pi_state->owner) newtid |= FUTEX_OWNER_DIED; |
c1e2f0eaf
|
2339 |
|
6b4f4bc9c
|
2340 2341 2342 |
err = get_futex_value_locked(&uval, uaddr); if (err) goto handle_err; |
1b7558e45
|
2343 |
|
16ffa12d7
|
2344 |
for (;;) { |
1b7558e45
|
2345 |
newval = (uval & FUTEX_OWNER_DIED) | newtid; |
6b4f4bc9c
|
2346 2347 2348 |
err = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval); if (err) goto handle_err; |
1b7558e45
|
2349 2350 2351 2352 2353 2354 2355 2356 2357 |
if (curval == uval) break; uval = curval; } /* * We fixed up user space. Now we need to fix the pi_state * itself. */ |
d0aa7a70b
|
2358 |
if (pi_state->owner != NULL) { |
734009e96
|
2359 |
raw_spin_lock(&pi_state->owner->pi_lock); |
d0aa7a70b
|
2360 2361 |
WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); |
734009e96
|
2362 |
raw_spin_unlock(&pi_state->owner->pi_lock); |
1b7558e45
|
2363 |
} |
d0aa7a70b
|
2364 |
|
cdf71a10c
|
2365 |
pi_state->owner = newowner; |
d0aa7a70b
|
2366 |
|
734009e96
|
2367 |
raw_spin_lock(&newowner->pi_lock); |
d0aa7a70b
|
2368 |
WARN_ON(!list_empty(&pi_state->list)); |
cdf71a10c
|
2369 |
list_add(&pi_state->list, &newowner->pi_state_list); |
734009e96
|
2370 2371 |
raw_spin_unlock(&newowner->pi_lock); raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
1b7558e45
|
2372 |
return 0; |
d0aa7a70b
|
2373 |
|
d0aa7a70b
|
2374 |
/* |
6b4f4bc9c
|
2375 2376 2377 2378 2379 2380 2381 |
* In order to reschedule or handle a page fault, we need to drop the * locks here. In the case of a fault, this gives the other task * (either the highest priority waiter itself or the task which stole * the rtmutex) the chance to try the fixup of the pi_state. So once we * are back from handling the fault we need to check the pi_state after * reacquiring the locks and before trying to do another fixup. When * the fixup has been done already we simply return. |
734009e96
|
2382 2383 2384 2385 |
* * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely * drop hb->lock since the caller owns the hb -> futex_q relation. * Dropping the pi_mutex->wait_lock requires the state revalidate. |
d0aa7a70b
|
2386 |
*/ |
6b4f4bc9c
|
2387 |
handle_err: |
734009e96
|
2388 |
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); |
1b7558e45
|
2389 |
spin_unlock(q->lock_ptr); |
778e9a9c3
|
2390 |
|
6b4f4bc9c
|
2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 |
switch (err) { case -EFAULT: ret = fault_in_user_writeable(uaddr); break; case -EAGAIN: cond_resched(); ret = 0; break; default: WARN_ON_ONCE(1); ret = err; break; } |
778e9a9c3
|
2406 |
|
1b7558e45
|
2407 |
spin_lock(q->lock_ptr); |
734009e96
|
2408 |
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
778e9a9c3
|
2409 |
|
1b7558e45
|
2410 2411 2412 |
/* * Check if someone else fixed it for us: */ |
734009e96
|
2413 2414 2415 2416 |
if (pi_state->owner != oldowner) { ret = 0; goto out_unlock; } |
1b7558e45
|
2417 2418 |
if (ret) |
734009e96
|
2419 |
goto out_unlock; |
1b7558e45
|
2420 2421 |
goto retry; |
734009e96
|
2422 2423 2424 2425 |
out_unlock: raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); return ret; |
d0aa7a70b
|
2426 |
} |
72c1bbf30
|
2427 |
static long futex_wait_restart(struct restart_block *restart); |
36cf3b5c3
|
2428 |
|
ca5f9524d
|
2429 |
/** |
dd9739980
|
2430 2431 |
* fixup_owner() - Post lock pi_state and corner case management * @uaddr: user address of the futex |
dd9739980
|
2432 2433 2434 2435 2436 2437 2438 |
* @q: futex_q (contains pi_state and access to the rt_mutex) * @locked: if the attempt to take the rt_mutex succeeded (1) or not (0) * * After attempting to lock an rt_mutex, this function is called to cleanup * the pi_state owner as well as handle race conditions that may allow us to * acquire the lock. Must be called with the hb lock held. * |
6c23cbbd5
|
2439 |
* Return: |
7b4ff1adb
|
2440 2441 2442 |
* - 1 - success, lock taken; * - 0 - success, lock not taken; * - <0 - on error (-EFAULT) |
dd9739980
|
2443 |
*/ |
ae791a2d2
|
2444 |
static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) |
dd9739980
|
2445 |
{ |
dd9739980
|
2446 2447 2448 2449 2450 2451 |
int ret = 0; if (locked) { /* * Got the lock. We might not be the anticipated owner if we * did a lock-steal - fix up the PI-state in that case: |
16ffa12d7
|
2452 |
* |
c1e2f0eaf
|
2453 2454 2455 |
* Speculative pi_state->owner read (we don't hold wait_lock); * since we own the lock pi_state->owner == current is the * stable state, anything else needs more attention. |
dd9739980
|
2456 2457 |
*/ if (q->pi_state->owner != current) |
ae791a2d2
|
2458 |
ret = fixup_pi_state_owner(uaddr, q, current); |
dd9739980
|
2459 2460 2461 2462 |
goto out; } /* |
c1e2f0eaf
|
2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 |
* If we didn't get the lock; check if anybody stole it from us. In * that case, we need to fix up the uval to point to them instead of * us, otherwise bad things happen. [10] * * Another speculative read; pi_state->owner == current is unstable * but needs our attention. */ if (q->pi_state->owner == current) { ret = fixup_pi_state_owner(uaddr, q, NULL); goto out; } /* |
dd9739980
|
2476 |
* Paranoia check. If we did not take the lock, then we should not be |
8161239a8
|
2477 |
* the owner of the rt_mutex. |
dd9739980
|
2478 |
*/ |
73d786bd0
|
2479 |
if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) { |
dd9739980
|
2480 2481 2482 2483 2484 |
printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p " "pi-state %p ", ret, q->pi_state->pi_mutex.owner, q->pi_state->owner); |
73d786bd0
|
2485 |
} |
dd9739980
|
2486 2487 2488 2489 2490 2491 |
out: return ret ? ret : locked; } /** |
ca5f9524d
|
2492 2493 2494 2495 |
* futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal * @hb: the futex hash bucket, must be locked by the caller * @q: the futex_q to queue up on * @timeout: the prepared hrtimer_sleeper, or null for no timeout |
ca5f9524d
|
2496 2497 |
*/ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, |
f1a11e057
|
2498 |
struct hrtimer_sleeper *timeout) |
ca5f9524d
|
2499 |
{ |
9beba3c54
|
2500 2501 |
/* * The task state is guaranteed to be set before another task can |
b92b8b35a
|
2502 |
* wake it. set_current_state() is implemented using smp_store_mb() and |
9beba3c54
|
2503 2504 2505 |
* queue_me() calls spin_unlock() upon completion, both serializing * access to the hash list and forcing another memory barrier. */ |
f1a11e057
|
2506 |
set_current_state(TASK_INTERRUPTIBLE); |
0729e1961
|
2507 |
queue_me(q, hb); |
ca5f9524d
|
2508 2509 |
/* Arm the timer */ |
2e4b0d3fe
|
2510 |
if (timeout) |
ca5f9524d
|
2511 |
hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); |
ca5f9524d
|
2512 2513 |
/* |
0729e1961
|
2514 2515 |
* If we have been removed from the hash list, then another task * has tried to wake us, and we can skip the call to schedule(). |
ca5f9524d
|
2516 2517 2518 2519 2520 2521 2522 2523 |
*/ if (likely(!plist_node_empty(&q->list))) { /* * If the timer has already expired, current will already be * flagged for rescheduling. Only call schedule if there * is no timeout, or if it has yet to expire. */ if (!timeout || timeout->task) |
88c8004fd
|
2524 |
freezable_schedule(); |
ca5f9524d
|
2525 2526 2527 |
} __set_current_state(TASK_RUNNING); } |
f801073f8
|
2528 2529 2530 2531 |
/** * futex_wait_setup() - Prepare to wait on a futex * @uaddr: the futex userspace address * @val: the expected value |
b41277dc7
|
2532 |
* @flags: futex flags (FLAGS_SHARED, etc.) |
f801073f8
|
2533 2534 2535 2536 2537 2538 2539 2540 |
* @q: the associated futex_q * @hb: storage for hash_bucket pointer to be returned to caller * * Setup the futex_q and locate the hash_bucket. Get the futex value and * compare it with the expected value. Handle atomic faults internally. * Return with the hb lock held and a q.key reference on success, and unlocked * with no q.key reference on failure. * |
6c23cbbd5
|
2541 |
* Return: |
7b4ff1adb
|
2542 2543 |
* - 0 - uaddr contains val and hb has been locked; * - <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked |
f801073f8
|
2544 |
*/ |
b41277dc7
|
2545 |
static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, |
f801073f8
|
2546 |
struct futex_q *q, struct futex_hash_bucket **hb) |
1da177e4c
|
2547 |
{ |
e2970f2fb
|
2548 2549 |
u32 uval; int ret; |
1da177e4c
|
2550 |
|
1da177e4c
|
2551 |
/* |
b2d0994b1
|
2552 |
* Access the page AFTER the hash-bucket is locked. |
1da177e4c
|
2553 2554 2555 2556 2557 2558 2559 |
* Order is important: * * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val); * Userspace waker: if (cond(var)) { var = new; futex_wake(&var); } * * The basic logical guarantee of a futex is that it blocks ONLY * if cond(var) is known to be true at the time of blocking, for |
8fe8f545c
|
2560 2561 |
* any cond. If we locked the hash-bucket after testing *uaddr, that * would open a race condition where we could block indefinitely with |
1da177e4c
|
2562 2563 |
* cond(var) false, which would violate the guarantee. * |
8fe8f545c
|
2564 2565 2566 2567 |
* On the other hand, we insert q and release the hash-bucket only * after testing *uaddr. This guarantees that futex_wait() will NOT * absorb a wakeup if *uaddr does not match the desired values * while the syscall executes. |
1da177e4c
|
2568 |
*/ |
f801073f8
|
2569 |
retry: |
96d4f267e
|
2570 |
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, FUTEX_READ); |
f801073f8
|
2571 |
if (unlikely(ret != 0)) |
a5a2a0c7f
|
2572 |
return ret; |
f801073f8
|
2573 2574 2575 |
retry_private: *hb = queue_lock(q); |
e2970f2fb
|
2576 |
ret = get_futex_value_locked(&uval, uaddr); |
1da177e4c
|
2577 |
|
f801073f8
|
2578 |
if (ret) { |
0d00c7b20
|
2579 |
queue_unlock(*hb); |
1da177e4c
|
2580 |
|
e2970f2fb
|
2581 |
ret = get_user(uval, uaddr); |
e4dc5b7a3
|
2582 |
if (ret) |
f801073f8
|
2583 |
goto out; |
1da177e4c
|
2584 |
|
b41277dc7
|
2585 |
if (!(flags & FLAGS_SHARED)) |
e4dc5b7a3
|
2586 |
goto retry_private; |
ae791a2d2
|
2587 |
put_futex_key(&q->key); |
e4dc5b7a3
|
2588 |
goto retry; |
1da177e4c
|
2589 |
} |
ca5f9524d
|
2590 |
|
f801073f8
|
2591 |
if (uval != val) { |
0d00c7b20
|
2592 |
queue_unlock(*hb); |
f801073f8
|
2593 |
ret = -EWOULDBLOCK; |
2fff78c78
|
2594 |
} |
1da177e4c
|
2595 |
|
f801073f8
|
2596 2597 |
out: if (ret) |
ae791a2d2
|
2598 |
put_futex_key(&q->key); |
f801073f8
|
2599 2600 |
return ret; } |
b41277dc7
|
2601 2602 |
static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, ktime_t *abs_time, u32 bitset) |
f801073f8
|
2603 |
{ |
5ca584d93
|
2604 |
struct hrtimer_sleeper timeout, *to; |
f801073f8
|
2605 2606 |
struct restart_block *restart; struct futex_hash_bucket *hb; |
5bdb05f91
|
2607 |
struct futex_q q = futex_q_init; |
f801073f8
|
2608 2609 2610 2611 |
int ret; if (!bitset) return -EINVAL; |
f801073f8
|
2612 |
q.bitset = bitset; |
5ca584d93
|
2613 2614 |
to = futex_setup_timer(abs_time, &timeout, flags, current->timer_slack_ns); |
d58e6576b
|
2615 |
retry: |
7ada876a8
|
2616 2617 2618 2619 |
/* * Prepare to wait on uaddr. On success, holds hb lock and increments * q.key refs. */ |
b41277dc7
|
2620 |
ret = futex_wait_setup(uaddr, val, flags, &q, &hb); |
f801073f8
|
2621 2622 |
if (ret) goto out; |
ca5f9524d
|
2623 |
/* queue_me and wait for wakeup, timeout, or a signal. */ |
f1a11e057
|
2624 |
futex_wait_queue_me(hb, &q, to); |
1da177e4c
|
2625 2626 |
/* If we were woken (and unqueued), we succeeded, whatever. */ |
2fff78c78
|
2627 |
ret = 0; |
7ada876a8
|
2628 |
/* unqueue_me() drops q.key ref */ |
1da177e4c
|
2629 |
if (!unqueue_me(&q)) |
7ada876a8
|
2630 |
goto out; |
2fff78c78
|
2631 |
ret = -ETIMEDOUT; |
ca5f9524d
|
2632 |
if (to && !to->task) |
7ada876a8
|
2633 |
goto out; |
72c1bbf30
|
2634 |
|
e2970f2fb
|
2635 |
/* |
d58e6576b
|
2636 2637 |
* We expect signal_pending(current), but we might be the * victim of a spurious wakeup as well. |
e2970f2fb
|
2638 |
*/ |
7ada876a8
|
2639 |
if (!signal_pending(current)) |
d58e6576b
|
2640 |
goto retry; |
d58e6576b
|
2641 |
|
2fff78c78
|
2642 |
ret = -ERESTARTSYS; |
c19384b5b
|
2643 |
if (!abs_time) |
7ada876a8
|
2644 |
goto out; |
1da177e4c
|
2645 |
|
f56141e3e
|
2646 |
restart = ¤t->restart_block; |
2fff78c78
|
2647 |
restart->fn = futex_wait_restart; |
a3c74c525
|
2648 |
restart->futex.uaddr = uaddr; |
2fff78c78
|
2649 |
restart->futex.val = val; |
2456e8553
|
2650 |
restart->futex.time = *abs_time; |
2fff78c78
|
2651 |
restart->futex.bitset = bitset; |
0cd9c6494
|
2652 |
restart->futex.flags = flags | FLAGS_HAS_TIMEOUT; |
42d35d48c
|
2653 |
|
2fff78c78
|
2654 |
ret = -ERESTART_RESTARTBLOCK; |
42d35d48c
|
2655 |
out: |
ca5f9524d
|
2656 2657 2658 2659 |
if (to) { hrtimer_cancel(&to->timer); destroy_hrtimer_on_stack(&to->timer); } |
c87e2837b
|
2660 2661 |
return ret; } |
72c1bbf30
|
2662 2663 2664 |
static long futex_wait_restart(struct restart_block *restart) { |
a3c74c525
|
2665 |
u32 __user *uaddr = restart->futex.uaddr; |
a72188d8a
|
2666 |
ktime_t t, *tp = NULL; |
72c1bbf30
|
2667 |
|
a72188d8a
|
2668 |
if (restart->futex.flags & FLAGS_HAS_TIMEOUT) { |
2456e8553
|
2669 |
t = restart->futex.time; |
a72188d8a
|
2670 2671 |
tp = &t; } |
72c1bbf30
|
2672 |
restart->fn = do_no_restart_syscall; |
b41277dc7
|
2673 2674 2675 |
return (long)futex_wait(uaddr, restart->futex.flags, restart->futex.val, tp, restart->futex.bitset); |
72c1bbf30
|
2676 |
} |
c87e2837b
|
2677 2678 2679 |
/* * Userspace tried a 0 -> TID atomic transition of the futex value * and failed. The kernel side here does the whole locking operation: |
767f509ca
|
2680 2681 2682 2683 2684 |
* if there are waiters then it will block as a consequence of relying * on rt-mutexes, it does PI, etc. (Due to races the kernel might see * a 0 value of the futex too.). * * Also serves as futex trylock_pi()'ing, and due semantics. |
c87e2837b
|
2685 |
*/ |
996636dda
|
2686 |
static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, |
b41277dc7
|
2687 |
ktime_t *time, int trylock) |
c87e2837b
|
2688 |
{ |
5ca584d93
|
2689 |
struct hrtimer_sleeper timeout, *to; |
16ffa12d7
|
2690 |
struct futex_pi_state *pi_state = NULL; |
cfafcd117
|
2691 |
struct rt_mutex_waiter rt_waiter; |
c87e2837b
|
2692 |
struct futex_hash_bucket *hb; |
5bdb05f91
|
2693 |
struct futex_q q = futex_q_init; |
dd9739980
|
2694 |
int res, ret; |
c87e2837b
|
2695 |
|
bc2eecd7e
|
2696 2697 |
if (!IS_ENABLED(CONFIG_FUTEX_PI)) return -ENOSYS; |
c87e2837b
|
2698 2699 |
if (refill_pi_state_cache()) return -ENOMEM; |
5ca584d93
|
2700 |
to = futex_setup_timer(time, &timeout, FLAGS_CLOCKRT, 0); |
c5780e976
|
2701 |
|
42d35d48c
|
2702 |
retry: |
96d4f267e
|
2703 |
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, FUTEX_WRITE); |
c87e2837b
|
2704 |
if (unlikely(ret != 0)) |
42d35d48c
|
2705 |
goto out; |
c87e2837b
|
2706 |
|
e4dc5b7a3
|
2707 |
retry_private: |
82af7aca5
|
2708 |
hb = queue_lock(&q); |
c87e2837b
|
2709 |
|
bab5bc9e8
|
2710 |
ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0); |
c87e2837b
|
2711 |
if (unlikely(ret)) { |
767f509ca
|
2712 2713 2714 2715 |
/* * Atomic work succeeded and we got the lock, * or failed. Either way, we do _not_ block. */ |
778e9a9c3
|
2716 |
switch (ret) { |
1a52084d0
|
2717 2718 2719 2720 2721 2722 |
case 1: /* We got the lock. */ ret = 0; goto out_unlock_put_key; case -EFAULT: goto uaddr_faulted; |
778e9a9c3
|
2723 2724 |
case -EAGAIN: /* |
af54d6a1c
|
2725 2726 2727 2728 |
* Two reasons for this: * - Task is exiting and we just wait for the * exit to complete. * - The user space value changed. |
778e9a9c3
|
2729 |
*/ |
0d00c7b20
|
2730 |
queue_unlock(hb); |
ae791a2d2
|
2731 |
put_futex_key(&q.key); |
778e9a9c3
|
2732 2733 |
cond_resched(); goto retry; |
778e9a9c3
|
2734 |
default: |
42d35d48c
|
2735 |
goto out_unlock_put_key; |
c87e2837b
|
2736 |
} |
c87e2837b
|
2737 |
} |
cfafcd117
|
2738 |
WARN_ON(!q.pi_state); |
c87e2837b
|
2739 2740 2741 |
/* * Only actually queue now that the atomic ops are done: */ |
cfafcd117
|
2742 |
__queue_me(&q, hb); |
c87e2837b
|
2743 |
|
cfafcd117
|
2744 |
if (trylock) { |
5293c2efd
|
2745 |
ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex); |
c87e2837b
|
2746 2747 |
/* Fixup the trylock return value: */ ret = ret ? 0 : -EWOULDBLOCK; |
cfafcd117
|
2748 |
goto no_block; |
c87e2837b
|
2749 |
} |
56222b212
|
2750 |
rt_mutex_init_waiter(&rt_waiter); |
cfafcd117
|
2751 |
/* |
56222b212
|
2752 2753 2754 2755 2756 2757 2758 |
* On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not * hold it while doing rt_mutex_start_proxy(), because then it will * include hb->lock in the blocking chain, even through we'll not in * fact hold it while blocking. This will lead it to report -EDEADLK * and BUG when futex_unlock_pi() interleaves with this. * * Therefore acquire wait_lock while holding hb->lock, but drop the |
1a1fb985f
|
2759 2760 2761 2762 |
* latter before calling __rt_mutex_start_proxy_lock(). This * interleaves with futex_unlock_pi() -- which does a similar lock * handoff -- such that the latter can observe the futex_q::pi_state * before __rt_mutex_start_proxy_lock() is done. |
cfafcd117
|
2763 |
*/ |
56222b212
|
2764 2765 |
raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); spin_unlock(q.lock_ptr); |
1a1fb985f
|
2766 2767 2768 2769 2770 |
/* * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter * such that futex_unlock_pi() is guaranteed to observe the waiter when * it sees the futex_q::pi_state. */ |
56222b212
|
2771 2772 |
ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); |
cfafcd117
|
2773 2774 2775 |
if (ret) { if (ret == 1) ret = 0; |
1a1fb985f
|
2776 |
goto cleanup; |
cfafcd117
|
2777 |
} |
cfafcd117
|
2778 2779 2780 2781 |
if (unlikely(to)) hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS); ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter); |
1a1fb985f
|
2782 |
cleanup: |
a99e4e413
|
2783 |
spin_lock(q.lock_ptr); |
dd9739980
|
2784 |
/* |
1a1fb985f
|
2785 |
* If we failed to acquire the lock (deadlock/signal/timeout), we must |
cfafcd117
|
2786 |
* first acquire the hb->lock before removing the lock from the |
1a1fb985f
|
2787 2788 |
* rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait * lists consistent. |
56222b212
|
2789 2790 2791 |
* * In particular; it is important that futex_unlock_pi() can not * observe this inconsistency. |
cfafcd117
|
2792 2793 2794 2795 2796 2797 |
*/ if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter)) ret = 0; no_block: /* |
dd9739980
|
2798 2799 2800 |
* Fixup the pi_state owner and possibly acquire the lock if we * haven't already. */ |
ae791a2d2
|
2801 |
res = fixup_owner(uaddr, &q, !ret); |
dd9739980
|
2802 2803 2804 2805 2806 2807 |
/* * If fixup_owner() returned an error, proprogate that. If it acquired * the lock, clear our -ETIMEDOUT or -EINTR. */ if (res) ret = (res < 0) ? res : 0; |
c87e2837b
|
2808 |
|
e8f6386c0
|
2809 |
/* |
dd9739980
|
2810 2811 |
* If fixup_owner() faulted and was unable to handle the fault, unlock * it and return the fault to userspace. |
e8f6386c0
|
2812 |
*/ |
16ffa12d7
|
2813 2814 2815 2816 |
if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) { pi_state = q.pi_state; get_pi_state(pi_state); } |
e8f6386c0
|
2817 |
|
778e9a9c3
|
2818 2819 |
/* Unqueue and drop the lock */ unqueue_me_pi(&q); |
c87e2837b
|
2820 |
|
16ffa12d7
|
2821 2822 2823 2824 |
if (pi_state) { rt_mutex_futex_unlock(&pi_state->pi_mutex); put_pi_state(pi_state); } |
5ecb01cfd
|
2825 |
goto out_put_key; |
c87e2837b
|
2826 |
|
42d35d48c
|
2827 |
out_unlock_put_key: |
0d00c7b20
|
2828 |
queue_unlock(hb); |
c87e2837b
|
2829 |
|
42d35d48c
|
2830 |
out_put_key: |
ae791a2d2
|
2831 |
put_futex_key(&q.key); |
42d35d48c
|
2832 |
out: |
97181f9bd
|
2833 2834 |
if (to) { hrtimer_cancel(&to->timer); |
237fc6e7a
|
2835 |
destroy_hrtimer_on_stack(&to->timer); |
97181f9bd
|
2836 |
} |
dd9739980
|
2837 |
return ret != -EINTR ? ret : -ERESTARTNOINTR; |
c87e2837b
|
2838 |
|
42d35d48c
|
2839 |
uaddr_faulted: |
0d00c7b20
|
2840 |
queue_unlock(hb); |
778e9a9c3
|
2841 |
|
d0725992c
|
2842 |
ret = fault_in_user_writeable(uaddr); |
e4dc5b7a3
|
2843 2844 |
if (ret) goto out_put_key; |
c87e2837b
|
2845 |
|
b41277dc7
|
2846 |
if (!(flags & FLAGS_SHARED)) |
e4dc5b7a3
|
2847 |
goto retry_private; |
ae791a2d2
|
2848 |
put_futex_key(&q.key); |
e4dc5b7a3
|
2849 |
goto retry; |
c87e2837b
|
2850 2851 2852 |
} /* |
c87e2837b
|
2853 2854 2855 2856 |
* Userspace attempted a TID -> 0 atomic transition, and failed. * This is the in-kernel slowpath: we look up the PI state (if any), * and do the rt-mutex unlock. */ |
b41277dc7
|
2857 |
static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) |
c87e2837b
|
2858 |
{ |
ccf9e6a80
|
2859 |
u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current); |
38d47c1b7
|
2860 |
union futex_key key = FUTEX_KEY_INIT; |
ccf9e6a80
|
2861 |
struct futex_hash_bucket *hb; |
499f5aca2
|
2862 |
struct futex_q *top_waiter; |
e4dc5b7a3
|
2863 |
int ret; |
c87e2837b
|
2864 |
|
bc2eecd7e
|
2865 2866 |
if (!IS_ENABLED(CONFIG_FUTEX_PI)) return -ENOSYS; |
c87e2837b
|
2867 2868 2869 2870 2871 2872 |
retry: if (get_user(uval, uaddr)) return -EFAULT; /* * We release only a lock we actually own: */ |
c0c9ed150
|
2873 |
if ((uval & FUTEX_TID_MASK) != vpid) |
c87e2837b
|
2874 |
return -EPERM; |
c87e2837b
|
2875 |
|
96d4f267e
|
2876 |
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_WRITE); |
ccf9e6a80
|
2877 2878 |
if (ret) return ret; |
c87e2837b
|
2879 2880 2881 |
hb = hash_futex(&key); spin_lock(&hb->lock); |
c87e2837b
|
2882 |
/* |
ccf9e6a80
|
2883 2884 2885 |
* Check waiters first. We do not trust user space values at * all and we at least want to know if user space fiddled * with the futex value instead of blindly unlocking. |
c87e2837b
|
2886 |
*/ |
499f5aca2
|
2887 2888 |
top_waiter = futex_top_waiter(hb, &key); if (top_waiter) { |
16ffa12d7
|
2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 |
struct futex_pi_state *pi_state = top_waiter->pi_state; ret = -EINVAL; if (!pi_state) goto out_unlock; /* * If current does not own the pi_state then the futex is * inconsistent and user space fiddled with the futex value. */ if (pi_state->owner != current) goto out_unlock; |
bebe5b514
|
2901 |
get_pi_state(pi_state); |
802ab58da
|
2902 |
/* |
bebe5b514
|
2903 2904 2905 2906 |
* By taking wait_lock while still holding hb->lock, we ensure * there is no point where we hold neither; and therefore * wake_futex_pi() must observe a state consistent with what we * observed. |
1a1fb985f
|
2907 2908 2909 2910 |
* * In particular; this forces __rt_mutex_start_proxy() to * complete such that we're guaranteed to observe the * rt_waiter. Also see the WARN in wake_futex_pi(). |
16ffa12d7
|
2911 |
*/ |
bebe5b514
|
2912 |
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
16ffa12d7
|
2913 |
spin_unlock(&hb->lock); |
c74aef2d0
|
2914 |
/* drops pi_state->pi_mutex.wait_lock */ |
16ffa12d7
|
2915 2916 2917 2918 2919 2920 |
ret = wake_futex_pi(uaddr, uval, pi_state); put_pi_state(pi_state); /* * Success, we're done! No tricky corner cases. |
802ab58da
|
2921 2922 2923 |
*/ if (!ret) goto out_putkey; |
c87e2837b
|
2924 |
/* |
ccf9e6a80
|
2925 2926 |
* The atomic access to the futex value generated a * pagefault, so retry the user-access and the wakeup: |
c87e2837b
|
2927 2928 2929 |
*/ if (ret == -EFAULT) goto pi_faulted; |
802ab58da
|
2930 |
/* |
89e9e66ba
|
2931 2932 2933 |
* A unconditional UNLOCK_PI op raced against a waiter * setting the FUTEX_WAITERS bit. Try again. */ |
6b4f4bc9c
|
2934 2935 |
if (ret == -EAGAIN) goto pi_retry; |
89e9e66ba
|
2936 |
/* |
802ab58da
|
2937 2938 2939 |
* wake_futex_pi has detected invalid state. Tell user * space. */ |
16ffa12d7
|
2940 |
goto out_putkey; |
c87e2837b
|
2941 |
} |
ccf9e6a80
|
2942 |
|
c87e2837b
|
2943 |
/* |
ccf9e6a80
|
2944 2945 2946 2947 2948 |
* We have no kernel internal state, i.e. no waiters in the * kernel. Waiters which are about to queue themselves are stuck * on hb->lock. So we can safely ignore them. We do neither * preserve the WAITERS bit not the OWNER_DIED one. We are the * owner. |
c87e2837b
|
2949 |
*/ |
6b4f4bc9c
|
2950 |
if ((ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))) { |
16ffa12d7
|
2951 |
spin_unlock(&hb->lock); |
6b4f4bc9c
|
2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 |
switch (ret) { case -EFAULT: goto pi_faulted; case -EAGAIN: goto pi_retry; default: WARN_ON_ONCE(1); goto out_putkey; } |
16ffa12d7
|
2963 |
} |
c87e2837b
|
2964 |
|
ccf9e6a80
|
2965 2966 2967 2968 |
/* * If uval has changed, let user space handle it. */ ret = (curval == uval) ? 0 : -EAGAIN; |
c87e2837b
|
2969 2970 |
out_unlock: spin_unlock(&hb->lock); |
802ab58da
|
2971 |
out_putkey: |
ae791a2d2
|
2972 |
put_futex_key(&key); |
c87e2837b
|
2973 |
return ret; |
6b4f4bc9c
|
2974 2975 2976 2977 |
pi_retry: put_futex_key(&key); cond_resched(); goto retry; |
c87e2837b
|
2978 |
pi_faulted: |
ae791a2d2
|
2979 |
put_futex_key(&key); |
c87e2837b
|
2980 |
|
d0725992c
|
2981 |
ret = fault_in_user_writeable(uaddr); |
b56863630
|
2982 |
if (!ret) |
c87e2837b
|
2983 |
goto retry; |
1da177e4c
|
2984 2985 |
return ret; } |
52400ba94
|
2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 |
/** * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex * @hb: the hash_bucket futex_q was original enqueued on * @q: the futex_q woken while waiting to be requeued * @key2: the futex_key of the requeue target futex * @timeout: the timeout associated with the wait (NULL if none) * * Detect if the task was woken on the initial futex as opposed to the requeue * target futex. If so, determine if it was a timeout or a signal that caused * the wakeup and return the appropriate error code to the caller. Must be * called with the hb lock held. * |
6c23cbbd5
|
2998 |
* Return: |
7b4ff1adb
|
2999 3000 |
* - 0 = no early wakeup detected; * - <0 = -ETIMEDOUT or -ERESTARTNOINTR |
52400ba94
|
3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 |
*/ static inline int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, struct futex_q *q, union futex_key *key2, struct hrtimer_sleeper *timeout) { int ret = 0; /* * With the hb lock held, we avoid races while we process the wakeup. * We only need to hold hb (and not hb2) to ensure atomicity as the * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb. * It can't be requeued from uaddr2 to something else since we don't * support a PI aware source futex for requeue. */ if (!match_futex(&q->key, key2)) { WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr)); /* * We were woken prior to requeue by a timeout or a signal. * Unqueue the futex_q and determine which it was. */ |
2e12978a9
|
3022 |
plist_del(&q->list, &hb->chain); |
11d4616bd
|
3023 |
hb_waiters_dec(hb); |
52400ba94
|
3024 |
|
d58e6576b
|
3025 |
/* Handle spurious wakeups gracefully */ |
11df6dddc
|
3026 |
ret = -EWOULDBLOCK; |
52400ba94
|
3027 3028 |
if (timeout && !timeout->task) ret = -ETIMEDOUT; |
d58e6576b
|
3029 |
else if (signal_pending(current)) |
1c840c149
|
3030 |
ret = -ERESTARTNOINTR; |
52400ba94
|
3031 3032 3033 3034 3035 3036 |
} return ret; } /** * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2 |
56ec1607b
|
3037 |
* @uaddr: the futex we initially wait on (non-pi) |
b41277dc7
|
3038 |
* @flags: futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be |
ab51fbab3
|
3039 |
* the same type, no requeueing from private to shared, etc. |
52400ba94
|
3040 3041 |
* @val: the expected value of uaddr * @abs_time: absolute timeout |
56ec1607b
|
3042 |
* @bitset: 32 bit wakeup bitset set by userspace, defaults to all |
52400ba94
|
3043 3044 3045 |
* @uaddr2: the pi futex we will take prior to returning to user-space * * The caller will wait on uaddr and will be requeued by futex_requeue() to |
6f7b0a2a5
|
3046 3047 3048 3049 3050 |
* uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to * userspace. This ensures the rt_mutex maintains an owner when it has waiters; * without one, the pi logic would not know which task to boost/deboost, if * there was a need to. |
52400ba94
|
3051 3052 |
* * We call schedule in futex_wait_queue_me() when we enqueue and return there |
6c23cbbd5
|
3053 |
* via the following-- |
52400ba94
|
3054 |
* 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue() |
cc6db4e60
|
3055 3056 3057 |
* 2) wakeup on uaddr2 after a requeue * 3) signal * 4) timeout |
52400ba94
|
3058 |
* |
cc6db4e60
|
3059 |
* If 3, cleanup and return -ERESTARTNOINTR. |
52400ba94
|
3060 3061 3062 3063 3064 3065 3066 |
* * If 2, we may then block on trying to take the rt_mutex and return via: * 5) successful lock * 6) signal * 7) timeout * 8) other lock acquisition failure * |
cc6db4e60
|
3067 |
* If 6, return -EWOULDBLOCK (restarting the syscall would do the same). |
52400ba94
|
3068 3069 3070 |
* * If 4 or 7, we cleanup and return with -ETIMEDOUT. * |
6c23cbbd5
|
3071 |
* Return: |
7b4ff1adb
|
3072 3073 |
* - 0 - On success; * - <0 - On error |
52400ba94
|
3074 |
*/ |
b41277dc7
|
3075 |
static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
52400ba94
|
3076 |
u32 val, ktime_t *abs_time, u32 bitset, |
b41277dc7
|
3077 |
u32 __user *uaddr2) |
52400ba94
|
3078 |
{ |
5ca584d93
|
3079 |
struct hrtimer_sleeper timeout, *to; |
16ffa12d7
|
3080 |
struct futex_pi_state *pi_state = NULL; |
52400ba94
|
3081 |
struct rt_mutex_waiter rt_waiter; |
52400ba94
|
3082 |
struct futex_hash_bucket *hb; |
5bdb05f91
|
3083 3084 |
union futex_key key2 = FUTEX_KEY_INIT; struct futex_q q = futex_q_init; |
52400ba94
|
3085 |
int res, ret; |
52400ba94
|
3086 |
|
bc2eecd7e
|
3087 3088 |
if (!IS_ENABLED(CONFIG_FUTEX_PI)) return -ENOSYS; |
6f7b0a2a5
|
3089 3090 |
if (uaddr == uaddr2) return -EINVAL; |
52400ba94
|
3091 3092 |
if (!bitset) return -EINVAL; |
5ca584d93
|
3093 3094 |
to = futex_setup_timer(abs_time, &timeout, flags, current->timer_slack_ns); |
52400ba94
|
3095 3096 3097 3098 3099 |
/* * The waiter is allocated on our stack, manipulated by the requeue * code while we sleep on uaddr. */ |
50809358d
|
3100 |
rt_mutex_init_waiter(&rt_waiter); |
52400ba94
|
3101 |
|
96d4f267e
|
3102 |
ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE); |
52400ba94
|
3103 3104 |
if (unlikely(ret != 0)) goto out; |
84bc4af59
|
3105 3106 3107 |
q.bitset = bitset; q.rt_waiter = &rt_waiter; q.requeue_pi_key = &key2; |
7ada876a8
|
3108 3109 3110 3111 |
/* * Prepare to wait on uaddr. On success, increments q.key (key1) ref * count. */ |
b41277dc7
|
3112 |
ret = futex_wait_setup(uaddr, val, flags, &q, &hb); |
c8b15a706
|
3113 3114 |
if (ret) goto out_key2; |
52400ba94
|
3115 |
|
e9c243a5a
|
3116 3117 3118 3119 3120 |
/* * The check above which compares uaddrs is not sufficient for * shared futexes. We need to compare the keys: */ if (match_futex(&q.key, &key2)) { |
13c42c2f4
|
3121 |
queue_unlock(hb); |
e9c243a5a
|
3122 3123 3124 |
ret = -EINVAL; goto out_put_keys; } |
52400ba94
|
3125 |
/* Queue the futex_q, drop the hb lock, wait for wakeup. */ |
f1a11e057
|
3126 |
futex_wait_queue_me(hb, &q, to); |
52400ba94
|
3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 |
spin_lock(&hb->lock); ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); spin_unlock(&hb->lock); if (ret) goto out_put_keys; /* * In order for us to be here, we know our q.key == key2, and since * we took the hb->lock above, we also know that futex_requeue() has * completed and we no longer have to concern ourselves with a wakeup |
7ada876a8
|
3138 3139 3140 |
* race with the atomic proxy lock acquisition by the requeue code. The * futex_requeue dropped our key1 reference and incremented our key2 * reference count. |
52400ba94
|
3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 |
*/ /* Check if the requeue code acquired the second futex for us. */ if (!q.rt_waiter) { /* * Got the lock. We might not be the anticipated owner if we * did a lock-steal - fix up the PI-state in that case. */ if (q.pi_state && (q.pi_state->owner != current)) { spin_lock(q.lock_ptr); |
ae791a2d2
|
3151 |
ret = fixup_pi_state_owner(uaddr2, &q, current); |
16ffa12d7
|
3152 3153 3154 3155 |
if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { pi_state = q.pi_state; get_pi_state(pi_state); } |
fb75a4282
|
3156 3157 3158 3159 |
/* * Drop the reference to the pi state which * the requeue_pi() code acquired for us. */ |
29e9ee5d4
|
3160 |
put_pi_state(q.pi_state); |
52400ba94
|
3161 3162 3163 |
spin_unlock(q.lock_ptr); } } else { |
c236c8e95
|
3164 |
struct rt_mutex *pi_mutex; |
52400ba94
|
3165 3166 3167 3168 3169 |
/* * We have been woken up by futex_unlock_pi(), a timeout, or a * signal. futex_unlock_pi() will not destroy the lock_ptr nor * the pi_state. */ |
f27071cb7
|
3170 |
WARN_ON(!q.pi_state); |
52400ba94
|
3171 |
pi_mutex = &q.pi_state->pi_mutex; |
38d589f2f
|
3172 |
ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter); |
52400ba94
|
3173 3174 |
spin_lock(q.lock_ptr); |
38d589f2f
|
3175 3176 3177 3178 |
if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter)) ret = 0; debug_rt_mutex_free_waiter(&rt_waiter); |
52400ba94
|
3179 3180 3181 3182 |
/* * Fixup the pi_state owner and possibly acquire the lock if we * haven't already. */ |
ae791a2d2
|
3183 |
res = fixup_owner(uaddr2, &q, !ret); |
52400ba94
|
3184 3185 |
/* * If fixup_owner() returned an error, proprogate that. If it |
56ec1607b
|
3186 |
* acquired the lock, clear -ETIMEDOUT or -EINTR. |
52400ba94
|
3187 3188 3189 |
*/ if (res) ret = (res < 0) ? res : 0; |
c236c8e95
|
3190 3191 3192 3193 3194 |
/* * If fixup_pi_state_owner() faulted and was unable to handle * the fault, unlock the rt_mutex and return the fault to * userspace. */ |
16ffa12d7
|
3195 3196 3197 3198 |
if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { pi_state = q.pi_state; get_pi_state(pi_state); } |
c236c8e95
|
3199 |
|
52400ba94
|
3200 3201 3202 |
/* Unqueue and drop the lock. */ unqueue_me_pi(&q); } |
16ffa12d7
|
3203 3204 3205 3206 |
if (pi_state) { rt_mutex_futex_unlock(&pi_state->pi_mutex); put_pi_state(pi_state); } |
c236c8e95
|
3207 |
if (ret == -EINTR) { |
52400ba94
|
3208 |
/* |
cc6db4e60
|
3209 3210 3211 3212 3213 |
* We've already been requeued, but cannot restart by calling * futex_lock_pi() directly. We could restart this syscall, but * it would detect that the user space "val" changed and return * -EWOULDBLOCK. Save the overhead of the restart and return * -EWOULDBLOCK directly. |
52400ba94
|
3214 |
*/ |
2070887fd
|
3215 |
ret = -EWOULDBLOCK; |
52400ba94
|
3216 3217 3218 |
} out_put_keys: |
ae791a2d2
|
3219 |
put_futex_key(&q.key); |
c8b15a706
|
3220 |
out_key2: |
ae791a2d2
|
3221 |
put_futex_key(&key2); |
52400ba94
|
3222 3223 3224 3225 3226 3227 3228 3229 |
out: if (to) { hrtimer_cancel(&to->timer); destroy_hrtimer_on_stack(&to->timer); } return ret; } |
0771dfefc
|
3230 3231 3232 3233 3234 3235 3236 |
/* * Support for robust futexes: the kernel cleans up held futexes at * thread exit time. * * Implementation: user-space maintains a per-thread list of locks it * is holding. Upon do_exit(), the kernel carefully walks this list, * and marks all locks that are owned by this thread with the |
c87e2837b
|
3237 |
* FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is |
0771dfefc
|
3238 3239 3240 3241 3242 3243 3244 3245 |
* always manipulated with the lock held, so the list is private and * per-thread. Userspace also maintains a per-thread 'list_op_pending' * field, to allow the kernel to clean up if the thread dies after * acquiring the lock, but just before it could have added itself to * the list. There can only be one such pending lock. */ /** |
d96ee56ce
|
3246 3247 3248 |
* sys_set_robust_list() - Set the robust-futex list head of a task * @head: pointer to the list-head * @len: length of the list-head, as userspace expects |
0771dfefc
|
3249 |
*/ |
836f92adf
|
3250 3251 |
SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, size_t, len) |
0771dfefc
|
3252 |
{ |
a0c1e9073
|
3253 3254 |
if (!futex_cmpxchg_enabled) return -ENOSYS; |
0771dfefc
|
3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 |
/* * The kernel knows only one size for now: */ if (unlikely(len != sizeof(*head))) return -EINVAL; current->robust_list = head; return 0; } /** |
d96ee56ce
|
3267 3268 3269 3270 |
* sys_get_robust_list() - Get the robust-futex list head of a task * @pid: pid of the process [zero for current task] * @head_ptr: pointer to a list-head pointer, the kernel fills it in * @len_ptr: pointer to a length field, the kernel fills in the header size |
0771dfefc
|
3271 |
*/ |
836f92adf
|
3272 3273 3274 |
SYSCALL_DEFINE3(get_robust_list, int, pid, struct robust_list_head __user * __user *, head_ptr, size_t __user *, len_ptr) |
0771dfefc
|
3275 |
{ |
ba46df984
|
3276 |
struct robust_list_head __user *head; |
0771dfefc
|
3277 |
unsigned long ret; |
bdbb776f8
|
3278 |
struct task_struct *p; |
0771dfefc
|
3279 |
|
a0c1e9073
|
3280 3281 |
if (!futex_cmpxchg_enabled) return -ENOSYS; |
bdbb776f8
|
3282 3283 3284 |
rcu_read_lock(); ret = -ESRCH; |
0771dfefc
|
3285 |
if (!pid) |
bdbb776f8
|
3286 |
p = current; |
0771dfefc
|
3287 |
else { |
228ebcbe6
|
3288 |
p = find_task_by_vpid(pid); |
0771dfefc
|
3289 3290 |
if (!p) goto err_unlock; |
0771dfefc
|
3291 |
} |
bdbb776f8
|
3292 |
ret = -EPERM; |
caaee6234
|
3293 |
if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) |
bdbb776f8
|
3294 3295 3296 3297 |
goto err_unlock; head = p->robust_list; rcu_read_unlock(); |
0771dfefc
|
3298 3299 3300 3301 3302 |
if (put_user(sizeof(*head), len_ptr)) return -EFAULT; return put_user(head, head_ptr); err_unlock: |
aaa2a97eb
|
3303 |
rcu_read_unlock(); |
0771dfefc
|
3304 3305 3306 3307 3308 3309 3310 3311 |
return ret; } /* * Process a futex-list entry, check whether it's owned by the * dying task, and do notification if so: */ |
04e7712f4
|
3312 |
static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) |
0771dfefc
|
3313 |
{ |
7cfdaf38d
|
3314 |
u32 uval, uninitialized_var(nval), mval; |
6b4f4bc9c
|
3315 |
int err; |
0771dfefc
|
3316 |
|
5a07168d8
|
3317 3318 3319 |
/* Futex address must be 32bit aligned */ if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0) return -1; |
8f17d3a50
|
3320 3321 |
retry: if (get_user(uval, uaddr)) |
0771dfefc
|
3322 |
return -1; |
6b4f4bc9c
|
3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 |
if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr)) return 0; /* * Ok, this dying thread is truly holding a futex * of interest. Set the OWNER_DIED bit atomically * via cmpxchg, and if the value had FUTEX_WAITERS * set, wake up a waiter (if any). (We have to do a * futex_wake() even if OWNER_DIED is already set - * to handle the rare but possible case of recursive * thread-death.) The rest of the cleanup is done in * userspace. */ mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; /* * We are not holding a lock here, but we want to have * the pagefault_disable/enable() protection because * we want to handle the fault gracefully. If the * access fails we try to fault in the futex with R/W * verification via get_user_pages. get_user() above * does not guarantee R/W access. If that fails we * give up and leave the futex locked. */ if ((err = cmpxchg_futex_value_locked(&nval, uaddr, uval, mval))) { switch (err) { case -EFAULT: |
6e0aa9f8a
|
3350 3351 3352 |
if (fault_in_user_writeable(uaddr)) return -1; goto retry; |
6b4f4bc9c
|
3353 3354 3355 |
case -EAGAIN: cond_resched(); |
8f17d3a50
|
3356 |
goto retry; |
0771dfefc
|
3357 |
|
6b4f4bc9c
|
3358 3359 3360 3361 |
default: WARN_ON_ONCE(1); return err; } |
0771dfefc
|
3362 |
} |
6b4f4bc9c
|
3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 |
if (nval != uval) goto retry; /* * Wake robust non-PI futexes here. The wakeup of * PI futexes happens in exit_pi_state(): */ if (!pi && (uval & FUTEX_WAITERS)) futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY); |
0771dfefc
|
3373 3374 3375 3376 |
return 0; } /* |
e3f2ddeac
|
3377 3378 3379 |
* Fetch a robust-list pointer. Bit 0 signals PI futexes: */ static inline int fetch_robust_entry(struct robust_list __user **entry, |
ba46df984
|
3380 |
struct robust_list __user * __user *head, |
1dcc41bb0
|
3381 |
unsigned int *pi) |
e3f2ddeac
|
3382 3383 |
{ unsigned long uentry; |
ba46df984
|
3384 |
if (get_user(uentry, (unsigned long __user *)head)) |
e3f2ddeac
|
3385 |
return -EFAULT; |
ba46df984
|
3386 |
*entry = (void __user *)(uentry & ~1UL); |
e3f2ddeac
|
3387 3388 3389 3390 3391 3392 |
*pi = uentry & 1; return 0; } /* |
0771dfefc
|
3393 3394 3395 3396 3397 3398 3399 3400 |
* Walk curr->robust_list (very carefully, it's a userspace list!) * and mark any locks found there dead, and notify any waiters. * * We silently return on any sign of list-walking problem. */ void exit_robust_list(struct task_struct *curr) { struct robust_list_head __user *head = curr->robust_list; |
9f96cb1e8
|
3401 |
struct robust_list __user *entry, *next_entry, *pending; |
4c115e951
|
3402 3403 |
unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; unsigned int uninitialized_var(next_pi); |
0771dfefc
|
3404 |
unsigned long futex_offset; |
9f96cb1e8
|
3405 |
int rc; |
0771dfefc
|
3406 |
|
a0c1e9073
|
3407 3408 |
if (!futex_cmpxchg_enabled) return; |
0771dfefc
|
3409 3410 3411 3412 |
/* * Fetch the list head (which was registered earlier, via * sys_set_robust_list()): */ |
e3f2ddeac
|
3413 |
if (fetch_robust_entry(&entry, &head->list.next, &pi)) |
0771dfefc
|
3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 |
return; /* * Fetch the relative futex offset: */ if (get_user(futex_offset, &head->futex_offset)) return; /* * Fetch any possibly pending lock-add first, and handle it * if it exists: */ |
e3f2ddeac
|
3424 |
if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) |
0771dfefc
|
3425 |
return; |
e3f2ddeac
|
3426 |
|
9f96cb1e8
|
3427 |
next_entry = NULL; /* avoid warning with gcc */ |
0771dfefc
|
3428 3429 |
while (entry != &head->list) { /* |
9f96cb1e8
|
3430 3431 3432 3433 3434 |
* Fetch the next entry in the list before calling * handle_futex_death: */ rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); /* |
0771dfefc
|
3435 |
* A pending lock might already be on the list, so |
c87e2837b
|
3436 |
* don't process it twice: |
0771dfefc
|
3437 3438 |
*/ if (entry != pending) |
ba46df984
|
3439 |
if (handle_futex_death((void __user *)entry + futex_offset, |
e3f2ddeac
|
3440 |
curr, pi)) |
0771dfefc
|
3441 |
return; |
9f96cb1e8
|
3442 |
if (rc) |
0771dfefc
|
3443 |
return; |
9f96cb1e8
|
3444 3445 |
entry = next_entry; pi = next_pi; |
0771dfefc
|
3446 3447 3448 3449 3450 3451 3452 3453 |
/* * Avoid excessively long or circular lists: */ if (!--limit) break; cond_resched(); } |
9f96cb1e8
|
3454 3455 3456 3457 |
if (pending) handle_futex_death((void __user *)pending + futex_offset, curr, pip); |
0771dfefc
|
3458 |
} |
c19384b5b
|
3459 |
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, |
e2970f2fb
|
3460 |
u32 __user *uaddr2, u32 val2, u32 val3) |
1da177e4c
|
3461 |
{ |
81b40539e
|
3462 |
int cmd = op & FUTEX_CMD_MASK; |
b41277dc7
|
3463 |
unsigned int flags = 0; |
34f01cc1f
|
3464 3465 |
if (!(op & FUTEX_PRIVATE_FLAG)) |
b41277dc7
|
3466 |
flags |= FLAGS_SHARED; |
1da177e4c
|
3467 |
|
b41277dc7
|
3468 3469 |
if (op & FUTEX_CLOCK_REALTIME) { flags |= FLAGS_CLOCKRT; |
337f13046
|
3470 3471 |
if (cmd != FUTEX_WAIT && cmd != FUTEX_WAIT_BITSET && \ cmd != FUTEX_WAIT_REQUEUE_PI) |
b41277dc7
|
3472 3473 |
return -ENOSYS; } |
1da177e4c
|
3474 |
|
34f01cc1f
|
3475 |
switch (cmd) { |
59263b513
|
3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 |
case FUTEX_LOCK_PI: case FUTEX_UNLOCK_PI: case FUTEX_TRYLOCK_PI: case FUTEX_WAIT_REQUEUE_PI: case FUTEX_CMP_REQUEUE_PI: if (!futex_cmpxchg_enabled) return -ENOSYS; } switch (cmd) { |
1da177e4c
|
3486 |
case FUTEX_WAIT: |
cd689985c
|
3487 |
val3 = FUTEX_BITSET_MATCH_ANY; |
b639186ff
|
3488 |
/* fall through */ |
cd689985c
|
3489 |
case FUTEX_WAIT_BITSET: |
81b40539e
|
3490 |
return futex_wait(uaddr, flags, val, timeout, val3); |
1da177e4c
|
3491 |
case FUTEX_WAKE: |
cd689985c
|
3492 |
val3 = FUTEX_BITSET_MATCH_ANY; |
b639186ff
|
3493 |
/* fall through */ |
cd689985c
|
3494 |
case FUTEX_WAKE_BITSET: |
81b40539e
|
3495 |
return futex_wake(uaddr, flags, val, val3); |
1da177e4c
|
3496 |
case FUTEX_REQUEUE: |
81b40539e
|
3497 |
return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0); |
1da177e4c
|
3498 |
case FUTEX_CMP_REQUEUE: |
81b40539e
|
3499 |
return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0); |
4732efbeb
|
3500 |
case FUTEX_WAKE_OP: |
81b40539e
|
3501 |
return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3); |
c87e2837b
|
3502 |
case FUTEX_LOCK_PI: |
996636dda
|
3503 |
return futex_lock_pi(uaddr, flags, timeout, 0); |
c87e2837b
|
3504 |
case FUTEX_UNLOCK_PI: |
81b40539e
|
3505 |
return futex_unlock_pi(uaddr, flags); |
c87e2837b
|
3506 |
case FUTEX_TRYLOCK_PI: |
996636dda
|
3507 |
return futex_lock_pi(uaddr, flags, NULL, 1); |
52400ba94
|
3508 3509 |
case FUTEX_WAIT_REQUEUE_PI: val3 = FUTEX_BITSET_MATCH_ANY; |
81b40539e
|
3510 3511 |
return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3, uaddr2); |
52400ba94
|
3512 |
case FUTEX_CMP_REQUEUE_PI: |
81b40539e
|
3513 |
return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1); |
1da177e4c
|
3514 |
} |
81b40539e
|
3515 |
return -ENOSYS; |
1da177e4c
|
3516 |
} |
17da2bd90
|
3517 |
SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, |
bec2f7cbb
|
3518 |
struct __kernel_timespec __user *, utime, u32 __user *, uaddr2, |
17da2bd90
|
3519 |
u32, val3) |
1da177e4c
|
3520 |
{ |
bec2f7cbb
|
3521 |
struct timespec64 ts; |
c19384b5b
|
3522 |
ktime_t t, *tp = NULL; |
e2970f2fb
|
3523 |
u32 val2 = 0; |
34f01cc1f
|
3524 |
int cmd = op & FUTEX_CMD_MASK; |
1da177e4c
|
3525 |
|
cd689985c
|
3526 |
if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || |
52400ba94
|
3527 3528 |
cmd == FUTEX_WAIT_BITSET || cmd == FUTEX_WAIT_REQUEUE_PI)) { |
ab51fbab3
|
3529 3530 |
if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG)))) return -EFAULT; |
bec2f7cbb
|
3531 |
if (get_timespec64(&ts, utime)) |
1da177e4c
|
3532 |
return -EFAULT; |
bec2f7cbb
|
3533 |
if (!timespec64_valid(&ts)) |
9741ef964
|
3534 |
return -EINVAL; |
c19384b5b
|
3535 |
|
bec2f7cbb
|
3536 |
t = timespec64_to_ktime(ts); |
34f01cc1f
|
3537 |
if (cmd == FUTEX_WAIT) |
5a7780e72
|
3538 |
t = ktime_add_safe(ktime_get(), t); |
c19384b5b
|
3539 |
tp = &t; |
1da177e4c
|
3540 3541 |
} /* |
52400ba94
|
3542 |
* requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*. |
f54f09861
|
3543 |
* number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP. |
1da177e4c
|
3544 |
*/ |
f54f09861
|
3545 |
if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || |
ba9c22f2c
|
3546 |
cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) |
e2970f2fb
|
3547 |
val2 = (u32) (unsigned long) utime; |
1da177e4c
|
3548 |
|
c19384b5b
|
3549 |
return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); |
1da177e4c
|
3550 |
} |
04e7712f4
|
3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 |
#ifdef CONFIG_COMPAT /* * Fetch a robust-list pointer. Bit 0 signals PI futexes: */ static inline int compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, compat_uptr_t __user *head, unsigned int *pi) { if (get_user(*uentry, head)) return -EFAULT; *entry = compat_ptr((*uentry) & ~1); *pi = (unsigned int)(*uentry) & 1; return 0; } static void __user *futex_uaddr(struct robust_list __user *entry, compat_long_t futex_offset) { compat_uptr_t base = ptr_to_compat(entry); void __user *uaddr = compat_ptr(base + futex_offset); return uaddr; } /* * Walk curr->robust_list (very carefully, it's a userspace list!) * and mark any locks found there dead, and notify any waiters. * * We silently return on any sign of list-walking problem. */ void compat_exit_robust_list(struct task_struct *curr) { struct compat_robust_list_head __user *head = curr->compat_robust_list; struct robust_list __user *entry, *next_entry, *pending; unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; unsigned int uninitialized_var(next_pi); compat_uptr_t uentry, next_uentry, upending; compat_long_t futex_offset; int rc; if (!futex_cmpxchg_enabled) return; /* * Fetch the list head (which was registered earlier, via * sys_set_robust_list()): */ if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) return; /* * Fetch the relative futex offset: */ if (get_user(futex_offset, &head->futex_offset)) return; /* * Fetch any possibly pending lock-add first, and handle it * if it exists: */ if (compat_fetch_robust_entry(&upending, &pending, &head->list_op_pending, &pip)) return; next_entry = NULL; /* avoid warning with gcc */ while (entry != (struct robust_list __user *) &head->list) { /* * Fetch the next entry in the list before calling * handle_futex_death: */ rc = compat_fetch_robust_entry(&next_uentry, &next_entry, (compat_uptr_t __user *)&entry->next, &next_pi); /* * A pending lock might already be on the list, so * dont process it twice: */ if (entry != pending) { void __user *uaddr = futex_uaddr(entry, futex_offset); if (handle_futex_death(uaddr, curr, pi)) return; } if (rc) return; uentry = next_uentry; entry = next_entry; pi = next_pi; /* * Avoid excessively long or circular lists: */ if (!--limit) break; cond_resched(); } if (pending) { void __user *uaddr = futex_uaddr(pending, futex_offset); handle_futex_death(uaddr, curr, pip); } } COMPAT_SYSCALL_DEFINE2(set_robust_list, struct compat_robust_list_head __user *, head, compat_size_t, len) { if (!futex_cmpxchg_enabled) return -ENOSYS; if (unlikely(len != sizeof(*head))) return -EINVAL; current->compat_robust_list = head; return 0; } COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, compat_uptr_t __user *, head_ptr, compat_size_t __user *, len_ptr) { struct compat_robust_list_head __user *head; unsigned long ret; struct task_struct *p; if (!futex_cmpxchg_enabled) return -ENOSYS; rcu_read_lock(); ret = -ESRCH; if (!pid) p = current; else { p = find_task_by_vpid(pid); if (!p) goto err_unlock; } ret = -EPERM; if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) goto err_unlock; head = p->compat_robust_list; rcu_read_unlock(); if (put_user(sizeof(*head), len_ptr)) return -EFAULT; return put_user(ptr_to_compat(head), head_ptr); err_unlock: rcu_read_unlock(); return ret; } |
bec2f7cbb
|
3706 |
#endif /* CONFIG_COMPAT */ |
04e7712f4
|
3707 |
|
bec2f7cbb
|
3708 |
#ifdef CONFIG_COMPAT_32BIT_TIME |
8dabe7245
|
3709 |
SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val, |
04e7712f4
|
3710 3711 3712 |
struct old_timespec32 __user *, utime, u32 __user *, uaddr2, u32, val3) { |
bec2f7cbb
|
3713 |
struct timespec64 ts; |
04e7712f4
|
3714 3715 3716 3717 3718 3719 3720 |
ktime_t t, *tp = NULL; int val2 = 0; int cmd = op & FUTEX_CMD_MASK; if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || cmd == FUTEX_WAIT_BITSET || cmd == FUTEX_WAIT_REQUEUE_PI)) { |
bec2f7cbb
|
3721 |
if (get_old_timespec32(&ts, utime)) |
04e7712f4
|
3722 |
return -EFAULT; |
bec2f7cbb
|
3723 |
if (!timespec64_valid(&ts)) |
04e7712f4
|
3724 |
return -EINVAL; |
bec2f7cbb
|
3725 |
t = timespec64_to_ktime(ts); |
04e7712f4
|
3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 |
if (cmd == FUTEX_WAIT) t = ktime_add_safe(ktime_get(), t); tp = &t; } if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP) val2 = (int) (unsigned long) utime; return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); } |
bec2f7cbb
|
3736 |
#endif /* CONFIG_COMPAT_32BIT_TIME */ |
04e7712f4
|
3737 |
|
03b8c7b62
|
3738 |
static void __init futex_detect_cmpxchg(void) |
1da177e4c
|
3739 |
{ |
03b8c7b62
|
3740 |
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG |
a0c1e9073
|
3741 |
u32 curval; |
03b8c7b62
|
3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 |
/* * This will fail and we want it. Some arch implementations do * runtime detection of the futex_atomic_cmpxchg_inatomic() * functionality. We want to know that before we call in any * of the complex code paths. Also we want to prevent * registration of robust lists in that case. NULL is * guaranteed to fault and we get -EFAULT on functional * implementation, the non-functional ones will return * -ENOSYS. */ if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT) futex_cmpxchg_enabled = 1; #endif } static int __init futex_init(void) { |
63b1a8169
|
3760 |
unsigned int futex_shift; |
a52b89ebb
|
3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 |
unsigned long i; #if CONFIG_BASE_SMALL futex_hashsize = 16; #else futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus()); #endif futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues), futex_hashsize, 0, futex_hashsize < 256 ? HASH_SMALL : 0, |
63b1a8169
|
3772 3773 3774 |
&futex_shift, NULL, futex_hashsize, futex_hashsize); futex_hashsize = 1UL << futex_shift; |
03b8c7b62
|
3775 3776 |
futex_detect_cmpxchg(); |
a0c1e9073
|
3777 |
|
a52b89ebb
|
3778 |
for (i = 0; i < futex_hashsize; i++) { |
11d4616bd
|
3779 |
atomic_set(&futex_queues[i].waiters, 0); |
732375c6a
|
3780 |
plist_head_init(&futex_queues[i].chain); |
3e4ab747e
|
3781 3782 |
spin_lock_init(&futex_queues[i].lock); } |
1da177e4c
|
3783 3784 |
return 0; } |
25f71d1c3
|
3785 |
core_initcall(futex_init); |