Blame view

kernel/futex.c 43.8 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
7
8
9
10
  /*
   *  Fast Userspace Mutexes (which I call "Futexes!").
   *  (C) Rusty Russell, IBM 2002
   *
   *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
   *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
   *
   *  Removed page pinning, fix privately mapped COW pages and other cleanups
   *  (C) Copyright 2003, 2004 Jamie Lokier
   *
0771dfefc   Ingo Molnar   [PATCH] lightweig...
11
12
13
14
   *  Robust futex support started by Ingo Molnar
   *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
   *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
   *
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
15
16
17
18
   *  PI-futex support started by Ingo Molnar and Thomas Gleixner
   *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
   *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
   *
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
   *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
   *  enough at me, Linus for the original (flawed) idea, Matthew
   *  Kirkwood for proof-of-concept implementation.
   *
   *  "The futexes are also cursed."
   *  "But they come in a choice of three flavours!"
   *
   *  This program is free software; you can redistribute it and/or modify
   *  it under the terms of the GNU General Public License as published by
   *  the Free Software Foundation; either version 2 of the License, or
   *  (at your option) any later version.
   *
   *  This program is distributed in the hope that it will be useful,
   *  but WITHOUT ANY WARRANTY; without even the implied warranty of
   *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   *  GNU General Public License for more details.
   *
   *  You should have received a copy of the GNU General Public License
   *  along with this program; if not, write to the Free Software
   *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
   */
  #include <linux/slab.h>
  #include <linux/poll.h>
  #include <linux/fs.h>
  #include <linux/file.h>
  #include <linux/jhash.h>
  #include <linux/init.h>
  #include <linux/futex.h>
  #include <linux/mount.h>
  #include <linux/pagemap.h>
  #include <linux/syscalls.h>
7ed20e1ad   Jesper Juhl   [PATCH] convert t...
50
  #include <linux/signal.h>
4732efbeb   Jakub Jelinek   [PATCH] FUTEX_WAK...
51
  #include <asm/futex.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
52

c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
53
  #include "rtmutex_common.h"
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
  #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
  
  /*
   * Futexes are matched on equal values of this key.
   * The key type depends on whether it's a shared or private mapping.
   * Don't rearrange members without looking at hash_futex().
   *
   * offset is aligned to a multiple of sizeof(u32) (== 4) by definition.
   * We set bit 0 to indicate if it's an inode-based key.
   */
  union futex_key {
  	struct {
  		unsigned long pgoff;
  		struct inode *inode;
  		int offset;
  	} shared;
  	struct {
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
71
  		unsigned long address;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
72
73
74
75
76
77
78
79
80
81
82
  		struct mm_struct *mm;
  		int offset;
  	} private;
  	struct {
  		unsigned long word;
  		void *ptr;
  		int offset;
  	} both;
  };
  
  /*
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
   * Priority Inheritance state:
   */
  struct futex_pi_state {
  	/*
  	 * list of 'owned' pi_state instances - these have to be
  	 * cleaned up in do_exit() if the task exits prematurely:
  	 */
  	struct list_head list;
  
  	/*
  	 * The PI object:
  	 */
  	struct rt_mutex pi_mutex;
  
  	struct task_struct *owner;
  	atomic_t refcount;
  
  	union futex_key key;
  };
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
104
105
106
107
108
109
110
111
112
113
114
   * We use this hashed waitqueue instead of a normal wait_queue_t, so
   * we can wake only the relevant ones (hashed queues may be shared).
   *
   * A futex_q has a woken state, just like tasks have TASK_RUNNING.
   * It is considered woken when list_empty(&q->list) || q->lock_ptr == 0.
   * The order of wakup is always to make the first condition true, then
   * wake up q->waiters, then make the second condition true.
   */
  struct futex_q {
  	struct list_head list;
  	wait_queue_head_t waiters;
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
115
  	/* Which hash list lock to use: */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
116
  	spinlock_t *lock_ptr;
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
117
  	/* Key which the futex is hashed on: */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
118
  	union futex_key key;
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
119
  	/* For fd, sigio sent using these: */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
120
121
  	int fd;
  	struct file *filp;
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
122
123
124
125
  
  	/* Optional priority inheritance state: */
  	struct futex_pi_state *pi_state;
  	struct task_struct *task;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
  };
  
  /*
   * Split the global futex_lock into every hash list lock.
   */
  struct futex_hash_bucket {
         spinlock_t              lock;
         struct list_head       chain;
  };
  
  static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
  
  /* Futex-fs vfsmount entry: */
  static struct vfsmount *futex_mnt;
  
  /*
   * We hash on the keys returned from get_futex_key (see below).
   */
  static struct futex_hash_bucket *hash_futex(union futex_key *key)
  {
  	u32 hash = jhash2((u32*)&key->both.word,
  			  (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
  			  key->both.offset);
  	return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
  }
  
  /*
   * Return 1 if two futex_keys are equal, 0 otherwise.
   */
  static inline int match_futex(union futex_key *key1, union futex_key *key2)
  {
  	return (key1->both.word == key2->both.word
  		&& key1->both.ptr == key2->both.ptr
  		&& key1->both.offset == key2->both.offset);
  }
  
  /*
   * Get parameters which are the keys for a futex.
   *
f3a43f3f6   Josef "Jeff" Sipek   [PATCH] kernel: c...
165
   * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
166
167
168
169
170
171
172
173
   * offset_within_page).  For private mappings, it's (uaddr, current->mm).
   * We can usually work out the index without swapping in the page.
   *
   * Returns: 0, or negative error code.
   * The key words are stored in *key on success.
   *
   * Should be called with &current->mm->mmap_sem but NOT any spinlocks.
   */
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
174
  static int get_futex_key(u32 __user *uaddr, union futex_key *key)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
175
  {
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
176
  	unsigned long address = (unsigned long)uaddr;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
177
178
179
180
181
182
183
184
  	struct mm_struct *mm = current->mm;
  	struct vm_area_struct *vma;
  	struct page *page;
  	int err;
  
  	/*
  	 * The futex address must be "naturally" aligned.
  	 */
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
185
  	key->both.offset = address % PAGE_SIZE;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
186
187
  	if (unlikely((key->both.offset % sizeof(u32)) != 0))
  		return -EINVAL;
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
188
  	address -= key->both.offset;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
189
190
191
192
193
  
  	/*
  	 * The futex is hashed differently depending on whether
  	 * it's in a shared or private mapping.  So check vma first.
  	 */
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
194
  	vma = find_extend_vma(mm, address);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
  	if (unlikely(!vma))
  		return -EFAULT;
  
  	/*
  	 * Permissions.
  	 */
  	if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ))
  		return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES;
  
  	/*
  	 * Private mappings are handled in a simple way.
  	 *
  	 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
  	 * it's a read-only handle, it's expected that futexes attach to
  	 * the object not the particular process.  Therefore we use
  	 * VM_MAYSHARE here, not VM_SHARED which is restricted to shared
  	 * mappings of _writable_ handles.
  	 */
  	if (likely(!(vma->vm_flags & VM_MAYSHARE))) {
  		key->private.mm = mm;
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
215
  		key->private.address = address;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
216
217
218
219
220
221
  		return 0;
  	}
  
  	/*
  	 * Linear file mappings are also simple.
  	 */
f3a43f3f6   Josef "Jeff" Sipek   [PATCH] kernel: c...
222
  	key->shared.inode = vma->vm_file->f_path.dentry->d_inode;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
223
224
  	key->both.offset++; /* Bit 0 of offset indicates inode-based key. */
  	if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
225
  		key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
226
227
228
229
230
231
232
233
234
235
  				     + vma->vm_pgoff);
  		return 0;
  	}
  
  	/*
  	 * We could walk the page table to read the non-linear
  	 * pte, and get the page index without fetching the page
  	 * from swap.  But that's a lot of code to duplicate here
  	 * for a rare case, so we simply fetch the page.
  	 */
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
236
  	err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
  	if (err >= 0) {
  		key->shared.pgoff =
  			page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  		put_page(page);
  		return 0;
  	}
  	return err;
  }
  
  /*
   * Take a reference to the resource addressed by a key.
   * Can be called while holding spinlocks.
   *
   * NOTE: mmap_sem MUST be held between get_futex_key() and calling this
   * function, if it is called at all.  mmap_sem keeps key->shared.inode valid.
   */
  static inline void get_key_refs(union futex_key *key)
  {
  	if (key->both.ptr != 0) {
  		if (key->both.offset & 1)
  			atomic_inc(&key->shared.inode->i_count);
  		else
  			atomic_inc(&key->private.mm->mm_count);
  	}
  }
  
  /*
   * Drop a reference to the resource addressed by a key.
   * The hash bucket spinlock must not be held.
   */
  static void drop_key_refs(union futex_key *key)
  {
  	if (key->both.ptr != 0) {
  		if (key->both.offset & 1)
  			iput(key->shared.inode);
  		else
  			mmdrop(key->private.mm);
  	}
  }
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
276
  static inline int get_futex_value_locked(u32 *dest, u32 __user *from)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
277
278
  {
  	int ret;
a866374ae   Peter Zijlstra   [PATCH] mm: pagef...
279
  	pagefault_disable();
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
280
  	ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
a866374ae   Peter Zijlstra   [PATCH] mm: pagef...
281
  	pagefault_enable();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
282
283
284
285
286
  
  	return ret ? -EFAULT : 0;
  }
  
  /*
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
287
288
289
290
291
292
   * Fault handling. Called with current->mm->mmap_sem held.
   */
  static int futex_handle_fault(unsigned long address, int attempt)
  {
  	struct vm_area_struct * vma;
  	struct mm_struct *mm = current->mm;
e579dcbf2   John Stultz   [PATCH] futex_han...
293
  	if (attempt > 2 || !(vma = find_vma(mm, address)) ||
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
  	    vma->vm_start > address || !(vma->vm_flags & VM_WRITE))
  		return -EFAULT;
  
  	switch (handle_mm_fault(mm, vma, address, 1)) {
  	case VM_FAULT_MINOR:
  		current->min_flt++;
  		break;
  	case VM_FAULT_MAJOR:
  		current->maj_flt++;
  		break;
  	default:
  		return -EFAULT;
  	}
  	return 0;
  }
  
  /*
   * PI code:
   */
  static int refill_pi_state_cache(void)
  {
  	struct futex_pi_state *pi_state;
  
  	if (likely(current->pi_state_cache))
  		return 0;
4668edc33   Burman Yan   [PATCH] kernel co...
319
  	pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
320
321
322
  
  	if (!pi_state)
  		return -ENOMEM;
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
  	INIT_LIST_HEAD(&pi_state->list);
  	/* pi_mutex gets initialized later */
  	pi_state->owner = NULL;
  	atomic_set(&pi_state->refcount, 1);
  
  	current->pi_state_cache = pi_state;
  
  	return 0;
  }
  
  static struct futex_pi_state * alloc_pi_state(void)
  {
  	struct futex_pi_state *pi_state = current->pi_state_cache;
  
  	WARN_ON(!pi_state);
  	current->pi_state_cache = NULL;
  
  	return pi_state;
  }
  
  static void free_pi_state(struct futex_pi_state *pi_state)
  {
  	if (!atomic_dec_and_test(&pi_state->refcount))
  		return;
  
  	/*
  	 * If pi_state->owner is NULL, the owner is most probably dying
  	 * and has cleaned up the pi_state already
  	 */
  	if (pi_state->owner) {
  		spin_lock_irq(&pi_state->owner->pi_lock);
  		list_del_init(&pi_state->list);
  		spin_unlock_irq(&pi_state->owner->pi_lock);
  
  		rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
  	}
  
  	if (current->pi_state_cache)
  		kfree(pi_state);
  	else {
  		/*
  		 * pi_state->list is already empty.
  		 * clear pi_state->owner.
  		 * refcount is at 0 - put it back to 1.
  		 */
  		pi_state->owner = NULL;
  		atomic_set(&pi_state->refcount, 1);
  		current->pi_state_cache = pi_state;
  	}
  }
  
  /*
   * Look up the task based on what TID userspace gave us.
   * We dont trust it.
   */
  static struct task_struct * futex_find_get_task(pid_t pid)
  {
  	struct task_struct *p;
d359b549b   Oleg Nesterov   [PATCH] futex_fin...
381
  	rcu_read_lock();
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
382
383
384
385
386
387
388
  	p = find_task_by_pid(pid);
  	if (!p)
  		goto out_unlock;
  	if ((current->euid != p->euid) && (current->euid != p->uid)) {
  		p = NULL;
  		goto out_unlock;
  	}
d015baebb   Oleg Nesterov   [PATCH] futex_fin...
389
  	if (p->exit_state != 0) {
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
390
391
392
393
394
  		p = NULL;
  		goto out_unlock;
  	}
  	get_task_struct(p);
  out_unlock:
d359b549b   Oleg Nesterov   [PATCH] futex_fin...
395
  	rcu_read_unlock();
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
396
397
398
399
400
401
402
403
404
405
406
  
  	return p;
  }
  
  /*
   * This task is holding PI mutexes at exit time => bad.
   * Kernel cleans up PI-state, but userspace is likely hosed.
   * (Robust-futex cleanup is separate and might save the day for userspace.)
   */
  void exit_pi_state_list(struct task_struct *curr)
  {
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
407
408
  	struct list_head *next, *head = &curr->pi_state_list;
  	struct futex_pi_state *pi_state;
627371d73   Ingo Molnar   [PATCH] pi-futex:...
409
  	struct futex_hash_bucket *hb;
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
410
411
412
413
414
  	union futex_key key;
  
  	/*
  	 * We are a ZOMBIE and nobody can enqueue itself on
  	 * pi_state_list anymore, but we have to be careful
627371d73   Ingo Molnar   [PATCH] pi-futex:...
415
  	 * versus waiters unqueueing themselves:
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
416
417
418
419
420
421
422
  	 */
  	spin_lock_irq(&curr->pi_lock);
  	while (!list_empty(head)) {
  
  		next = head->next;
  		pi_state = list_entry(next, struct futex_pi_state, list);
  		key = pi_state->key;
627371d73   Ingo Molnar   [PATCH] pi-futex:...
423
  		hb = hash_futex(&key);
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
424
  		spin_unlock_irq(&curr->pi_lock);
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
425
426
427
  		spin_lock(&hb->lock);
  
  		spin_lock_irq(&curr->pi_lock);
627371d73   Ingo Molnar   [PATCH] pi-futex:...
428
429
430
431
  		/*
  		 * We dropped the pi-lock, so re-check whether this
  		 * task still owns the PI-state:
  		 */
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
432
433
434
435
  		if (head->next != next) {
  			spin_unlock(&hb->lock);
  			continue;
  		}
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
436
  		WARN_ON(pi_state->owner != curr);
627371d73   Ingo Molnar   [PATCH] pi-futex:...
437
438
  		WARN_ON(list_empty(&pi_state->list));
  		list_del_init(&pi_state->list);
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
  		pi_state->owner = NULL;
  		spin_unlock_irq(&curr->pi_lock);
  
  		rt_mutex_unlock(&pi_state->pi_mutex);
  
  		spin_unlock(&hb->lock);
  
  		spin_lock_irq(&curr->pi_lock);
  	}
  	spin_unlock_irq(&curr->pi_lock);
  }
  
  static int
  lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
  {
  	struct futex_pi_state *pi_state = NULL;
  	struct futex_q *this, *next;
  	struct list_head *head;
  	struct task_struct *p;
  	pid_t pid;
  
  	head = &hb->chain;
  
  	list_for_each_entry_safe(this, next, head, list) {
627371d73   Ingo Molnar   [PATCH] pi-futex:...
463
  		if (match_futex(&this->key, &me->key)) {
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
464
465
466
467
468
  			/*
  			 * Another waiter already exists - bump up
  			 * the refcount and return its pi_state:
  			 */
  			pi_state = this->pi_state;
06a9ec291   Thomas Gleixner   [PATCH] pi-futex:...
469
470
471
472
473
  			/*
  			 * Userspace might have messed up non PI and PI futexes
  			 */
  			if (unlikely(!pi_state))
  				return -EINVAL;
627371d73   Ingo Molnar   [PATCH] pi-futex:...
474
  			WARN_ON(!atomic_read(&pi_state->refcount));
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
475
476
477
478
479
480
481
482
  			atomic_inc(&pi_state->refcount);
  			me->pi_state = pi_state;
  
  			return 0;
  		}
  	}
  
  	/*
e3f2ddeac   Ingo Molnar   [PATCH] pi-futex:...
483
484
485
  	 * We are the first waiter - try to look up the real owner and attach
  	 * the new pi_state to it, but bail out when the owner died bit is set
  	 * and TID = 0:
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
486
487
  	 */
  	pid = uval & FUTEX_TID_MASK;
e3f2ddeac   Ingo Molnar   [PATCH] pi-futex:...
488
489
  	if (!pid && (uval & FUTEX_OWNER_DIED))
  		return -ESRCH;
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
  	p = futex_find_get_task(pid);
  	if (!p)
  		return -ESRCH;
  
  	pi_state = alloc_pi_state();
  
  	/*
  	 * Initialize the pi_mutex in locked state and make 'p'
  	 * the owner of it:
  	 */
  	rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
  
  	/* Store the key for possible exit cleanups: */
  	pi_state->key = me->key;
  
  	spin_lock_irq(&p->pi_lock);
627371d73   Ingo Molnar   [PATCH] pi-futex:...
506
  	WARN_ON(!list_empty(&pi_state->list));
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
507
508
509
510
511
512
513
514
515
516
517
518
  	list_add(&pi_state->list, &p->pi_state_list);
  	pi_state->owner = p;
  	spin_unlock_irq(&p->pi_lock);
  
  	put_task_struct(p);
  
  	me->pi_state = pi_state;
  
  	return 0;
  }
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
   * The hash bucket lock must be held when this is called.
   * Afterwards, the futex_q must not be accessed.
   */
  static void wake_futex(struct futex_q *q)
  {
  	list_del_init(&q->list);
  	if (q->filp)
  		send_sigio(&q->filp->f_owner, q->fd, POLL_IN);
  	/*
  	 * The lock in wake_up_all() is a crucial memory barrier after the
  	 * list_del_init() and also before assigning to q->lock_ptr.
  	 */
  	wake_up_all(&q->waiters);
  	/*
  	 * The waiting task can free the futex_q as soon as this is written,
  	 * without taking any locks.  This must come last.
8e31108b9   Andrew Morton   [PATCH] Fix memor...
535
536
537
538
539
  	 *
  	 * A memory barrier is required here to prevent the following store
  	 * to lock_ptr from getting ahead of the wakeup. Clearing the lock
  	 * at the end of wake_up_all() does not prevent this store from
  	 * moving.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
540
  	 */
ccdea2f88   Ralf Baechle   [PATCH] futex: re...
541
  	smp_wmb();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
542
543
  	q->lock_ptr = NULL;
  }
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
  static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
  {
  	struct task_struct *new_owner;
  	struct futex_pi_state *pi_state = this->pi_state;
  	u32 curval, newval;
  
  	if (!pi_state)
  		return -EINVAL;
  
  	new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
  
  	/*
  	 * This happens when we have stolen the lock and the original
  	 * pending owner did not enqueue itself back on the rt_mutex.
  	 * Thats not a tragedy. We know that way, that a lock waiter
  	 * is on the fly. We make the futex_q waiter the pending owner.
  	 */
  	if (!new_owner)
  		new_owner = this->task;
  
  	/*
  	 * We pass it to the next owner. (The WAITERS bit is always
  	 * kept enabled while there is PI state around. We must also
  	 * preserve the owner died bit.)
  	 */
e3f2ddeac   Ingo Molnar   [PATCH] pi-futex:...
569
570
  	if (!(uval & FUTEX_OWNER_DIED)) {
  		newval = FUTEX_WAITERS | new_owner->pid;
a866374ae   Peter Zijlstra   [PATCH] mm: pagef...
571
  		pagefault_disable();
e3f2ddeac   Ingo Molnar   [PATCH] pi-futex:...
572
  		curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
a866374ae   Peter Zijlstra   [PATCH] mm: pagef...
573
  		pagefault_enable();
e3f2ddeac   Ingo Molnar   [PATCH] pi-futex:...
574
575
576
577
578
  		if (curval == -EFAULT)
  			return -EFAULT;
  		if (curval != uval)
  			return -EINVAL;
  	}
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
579

627371d73   Ingo Molnar   [PATCH] pi-futex:...
580
581
582
583
584
585
586
  	spin_lock_irq(&pi_state->owner->pi_lock);
  	WARN_ON(list_empty(&pi_state->list));
  	list_del_init(&pi_state->list);
  	spin_unlock_irq(&pi_state->owner->pi_lock);
  
  	spin_lock_irq(&new_owner->pi_lock);
  	WARN_ON(!list_empty(&pi_state->list));
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
587
588
  	list_add(&pi_state->list, &new_owner->pi_state_list);
  	pi_state->owner = new_owner;
627371d73   Ingo Molnar   [PATCH] pi-futex:...
589
  	spin_unlock_irq(&new_owner->pi_lock);
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
590
591
592
593
594
595
596
597
598
599
600
601
602
  	rt_mutex_unlock(&pi_state->pi_mutex);
  
  	return 0;
  }
  
  static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
  {
  	u32 oldval;
  
  	/*
  	 * There is no waiter, so we unlock the futex. The owner died
  	 * bit has not to be preserved here. We are the owner:
  	 */
a866374ae   Peter Zijlstra   [PATCH] mm: pagef...
603
  	pagefault_disable();
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
604
  	oldval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 0);
a866374ae   Peter Zijlstra   [PATCH] mm: pagef...
605
  	pagefault_enable();
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
606
607
608
609
610
611
612
613
  
  	if (oldval == -EFAULT)
  		return oldval;
  	if (oldval != uval)
  		return -EAGAIN;
  
  	return 0;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
614
  /*
8b8f319fc   Ingo Molnar   [PATCH] lockdep: ...
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
   * Express the locking dependencies for lockdep:
   */
  static inline void
  double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
  {
  	if (hb1 <= hb2) {
  		spin_lock(&hb1->lock);
  		if (hb1 < hb2)
  			spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
  	} else { /* hb1 > hb2 */
  		spin_lock(&hb2->lock);
  		spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
  	}
  }
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
631
632
633
   * Wake up all waiters hashed on the physical page that is mapped
   * to this virtual address:
   */
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
634
  static int futex_wake(u32 __user *uaddr, int nr_wake)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
635
  {
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
636
  	struct futex_hash_bucket *hb;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
637
  	struct futex_q *this, *next;
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
638
639
  	struct list_head *head;
  	union futex_key key;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
640
641
642
643
644
645
646
  	int ret;
  
  	down_read(&current->mm->mmap_sem);
  
  	ret = get_futex_key(uaddr, &key);
  	if (unlikely(ret != 0))
  		goto out;
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
647
648
649
  	hb = hash_futex(&key);
  	spin_lock(&hb->lock);
  	head = &hb->chain;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
650
651
652
  
  	list_for_each_entry_safe(this, next, head, list) {
  		if (match_futex (&this->key, &key)) {
ed6f7b10e   Ingo Molnar   [PATCH] pi-futex:...
653
654
655
656
  			if (this->pi_state) {
  				ret = -EINVAL;
  				break;
  			}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
657
658
659
660
661
  			wake_futex(this);
  			if (++ret >= nr_wake)
  				break;
  		}
  	}
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
662
  	spin_unlock(&hb->lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
663
664
665
666
667
668
  out:
  	up_read(&current->mm->mmap_sem);
  	return ret;
  }
  
  /*
4732efbeb   Jakub Jelinek   [PATCH] FUTEX_WAK...
669
670
671
   * Wake up all waiters hashed on the physical page that is mapped
   * to this virtual address:
   */
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
672
673
674
  static int
  futex_wake_op(u32 __user *uaddr1, u32 __user *uaddr2,
  	      int nr_wake, int nr_wake2, int op)
4732efbeb   Jakub Jelinek   [PATCH] FUTEX_WAK...
675
676
  {
  	union futex_key key1, key2;
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
677
  	struct futex_hash_bucket *hb1, *hb2;
4732efbeb   Jakub Jelinek   [PATCH] FUTEX_WAK...
678
679
680
681
682
683
684
685
686
687
688
689
690
  	struct list_head *head;
  	struct futex_q *this, *next;
  	int ret, op_ret, attempt = 0;
  
  retryfull:
  	down_read(&current->mm->mmap_sem);
  
  	ret = get_futex_key(uaddr1, &key1);
  	if (unlikely(ret != 0))
  		goto out;
  	ret = get_futex_key(uaddr2, &key2);
  	if (unlikely(ret != 0))
  		goto out;
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
691
692
  	hb1 = hash_futex(&key1);
  	hb2 = hash_futex(&key2);
4732efbeb   Jakub Jelinek   [PATCH] FUTEX_WAK...
693
694
  
  retry:
8b8f319fc   Ingo Molnar   [PATCH] lockdep: ...
695
  	double_lock_hb(hb1, hb2);
4732efbeb   Jakub Jelinek   [PATCH] FUTEX_WAK...
696

e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
697
  	op_ret = futex_atomic_op_inuser(op, uaddr2);
4732efbeb   Jakub Jelinek   [PATCH] FUTEX_WAK...
698
  	if (unlikely(op_ret < 0)) {
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
699
  		u32 dummy;
4732efbeb   Jakub Jelinek   [PATCH] FUTEX_WAK...
700

e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
701
702
703
  		spin_unlock(&hb1->lock);
  		if (hb1 != hb2)
  			spin_unlock(&hb2->lock);
4732efbeb   Jakub Jelinek   [PATCH] FUTEX_WAK...
704

7ee1dd3fe   David Howells   [PATCH] FRV: Make...
705
  #ifndef CONFIG_MMU
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
706
707
708
709
  		/*
  		 * we don't get EFAULT from MMU faults if we don't have an MMU,
  		 * but we might get them from range checking
  		 */
7ee1dd3fe   David Howells   [PATCH] FRV: Make...
710
711
712
  		ret = op_ret;
  		goto out;
  #endif
796f8d9b9   David Gibson   [PATCH] FUTEX_WAK...
713
714
715
716
  		if (unlikely(op_ret != -EFAULT)) {
  			ret = op_ret;
  			goto out;
  		}
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
717
718
  		/*
  		 * futex_atomic_op_inuser needs to both read and write
4732efbeb   Jakub Jelinek   [PATCH] FUTEX_WAK...
719
720
721
  		 * *(int __user *)uaddr2, but we can't modify it
  		 * non-atomically.  Therefore, if get_user below is not
  		 * enough, we need to handle the fault ourselves, while
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
722
723
  		 * still holding the mmap_sem.
  		 */
4732efbeb   Jakub Jelinek   [PATCH] FUTEX_WAK...
724
  		if (attempt++) {
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
725
  			if (futex_handle_fault((unsigned long)uaddr2,
e579dcbf2   John Stultz   [PATCH] futex_han...
726
727
  						attempt)) {
  				ret = -EFAULT;
4732efbeb   Jakub Jelinek   [PATCH] FUTEX_WAK...
728
  				goto out;
e579dcbf2   John Stultz   [PATCH] futex_han...
729
  			}
4732efbeb   Jakub Jelinek   [PATCH] FUTEX_WAK...
730
731
  			goto retry;
  		}
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
732
733
734
735
  		/*
  		 * If we would have faulted, release mmap_sem,
  		 * fault it in and start all over again.
  		 */
4732efbeb   Jakub Jelinek   [PATCH] FUTEX_WAK...
736
  		up_read(&current->mm->mmap_sem);
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
737
  		ret = get_user(dummy, uaddr2);
4732efbeb   Jakub Jelinek   [PATCH] FUTEX_WAK...
738
739
740
741
742
  		if (ret)
  			return ret;
  
  		goto retryfull;
  	}
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
743
  	head = &hb1->chain;
4732efbeb   Jakub Jelinek   [PATCH] FUTEX_WAK...
744
745
746
747
748
749
750
751
752
753
  
  	list_for_each_entry_safe(this, next, head, list) {
  		if (match_futex (&this->key, &key1)) {
  			wake_futex(this);
  			if (++ret >= nr_wake)
  				break;
  		}
  	}
  
  	if (op_ret > 0) {
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
754
  		head = &hb2->chain;
4732efbeb   Jakub Jelinek   [PATCH] FUTEX_WAK...
755
756
757
758
759
760
761
762
763
764
765
  
  		op_ret = 0;
  		list_for_each_entry_safe(this, next, head, list) {
  			if (match_futex (&this->key, &key2)) {
  				wake_futex(this);
  				if (++op_ret >= nr_wake2)
  					break;
  			}
  		}
  		ret += op_ret;
  	}
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
766
767
768
  	spin_unlock(&hb1->lock);
  	if (hb1 != hb2)
  		spin_unlock(&hb2->lock);
4732efbeb   Jakub Jelinek   [PATCH] FUTEX_WAK...
769
770
771
772
773
774
  out:
  	up_read(&current->mm->mmap_sem);
  	return ret;
  }
  
  /*
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
775
776
777
   * Requeue all waiters hashed on one physical page to another
   * physical page.
   */
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
778
779
  static int futex_requeue(u32 __user *uaddr1, u32 __user *uaddr2,
  			 int nr_wake, int nr_requeue, u32 *cmpval)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
780
781
  {
  	union futex_key key1, key2;
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
782
  	struct futex_hash_bucket *hb1, *hb2;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
783
784
785
786
787
788
789
790
791
792
793
794
795
  	struct list_head *head1;
  	struct futex_q *this, *next;
  	int ret, drop_count = 0;
  
   retry:
  	down_read(&current->mm->mmap_sem);
  
  	ret = get_futex_key(uaddr1, &key1);
  	if (unlikely(ret != 0))
  		goto out;
  	ret = get_futex_key(uaddr2, &key2);
  	if (unlikely(ret != 0))
  		goto out;
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
796
797
  	hb1 = hash_futex(&key1);
  	hb2 = hash_futex(&key2);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
798

8b8f319fc   Ingo Molnar   [PATCH] lockdep: ...
799
  	double_lock_hb(hb1, hb2);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
800

e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
801
802
  	if (likely(cmpval != NULL)) {
  		u32 curval;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
803

e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
804
  		ret = get_futex_value_locked(&curval, uaddr1);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
805
806
  
  		if (unlikely(ret)) {
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
807
808
809
  			spin_unlock(&hb1->lock);
  			if (hb1 != hb2)
  				spin_unlock(&hb2->lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
810

e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
811
812
  			/*
  			 * If we would have faulted, release mmap_sem, fault
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
813
814
815
  			 * it in and start all over again.
  			 */
  			up_read(&current->mm->mmap_sem);
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
816
  			ret = get_user(curval, uaddr1);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
817
818
819
820
821
822
  
  			if (!ret)
  				goto retry;
  
  			return ret;
  		}
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
823
  		if (curval != *cmpval) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
824
825
826
827
  			ret = -EAGAIN;
  			goto out_unlock;
  		}
  	}
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
828
  	head1 = &hb1->chain;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
829
830
831
832
833
834
  	list_for_each_entry_safe(this, next, head1, list) {
  		if (!match_futex (&this->key, &key1))
  			continue;
  		if (++ret <= nr_wake) {
  			wake_futex(this);
  		} else {
59e0e0ace   Sebastien Dugue   [PATCH] futex_req...
835
836
837
838
839
840
841
842
  			/*
  			 * If key1 and key2 hash to the same bucket, no need to
  			 * requeue.
  			 */
  			if (likely(head1 != &hb2->chain)) {
  				list_move_tail(&this->list, &hb2->chain);
  				this->lock_ptr = &hb2->lock;
  			}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
843
844
845
846
847
848
  			this->key = key2;
  			get_key_refs(&key2);
  			drop_count++;
  
  			if (ret - nr_wake >= nr_requeue)
  				break;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
849
850
851
852
  		}
  	}
  
  out_unlock:
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
853
854
855
  	spin_unlock(&hb1->lock);
  	if (hb1 != hb2)
  		spin_unlock(&hb2->lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
856
857
858
859
860
861
862
863
864
865
866
867
868
869
  
  	/* drop_key_refs() must be called outside the spinlocks. */
  	while (--drop_count >= 0)
  		drop_key_refs(&key1);
  
  out:
  	up_read(&current->mm->mmap_sem);
  	return ret;
  }
  
  /* The key must be already stored in q->key. */
  static inline struct futex_hash_bucket *
  queue_lock(struct futex_q *q, int fd, struct file *filp)
  {
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
870
  	struct futex_hash_bucket *hb;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
871
872
873
874
875
876
877
  
  	q->fd = fd;
  	q->filp = filp;
  
  	init_waitqueue_head(&q->waiters);
  
  	get_key_refs(&q->key);
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
878
879
  	hb = hash_futex(&q->key);
  	q->lock_ptr = &hb->lock;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
880

e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
881
882
  	spin_lock(&hb->lock);
  	return hb;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
883
  }
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
884
  static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
885
  {
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
886
  	list_add_tail(&q->list, &hb->chain);
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
887
  	q->task = current;
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
888
  	spin_unlock(&hb->lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
889
890
891
  }
  
  static inline void
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
892
  queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
893
  {
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
894
  	spin_unlock(&hb->lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
895
896
897
898
899
900
901
902
903
904
905
  	drop_key_refs(&q->key);
  }
  
  /*
   * queue_me and unqueue_me must be called as a pair, each
   * exactly once.  They are called with the hashed spinlock held.
   */
  
  /* The key must be already stored in q->key. */
  static void queue_me(struct futex_q *q, int fd, struct file *filp)
  {
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
906
907
908
909
  	struct futex_hash_bucket *hb;
  
  	hb = queue_lock(q, fd, filp);
  	__queue_me(q, hb);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
910
911
912
913
914
  }
  
  /* Return 1 if we were still queued (ie. 0 means we were woken) */
  static int unqueue_me(struct futex_q *q)
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
915
  	spinlock_t *lock_ptr;
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
916
  	int ret = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
917
918
919
920
  
  	/* In the common case we don't take the spinlock, which is nice. */
   retry:
  	lock_ptr = q->lock_ptr;
e91467ecd   Christian Borntraeger   [PATCH] bug in fu...
921
  	barrier();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
  	if (lock_ptr != 0) {
  		spin_lock(lock_ptr);
  		/*
  		 * q->lock_ptr can change between reading it and
  		 * spin_lock(), causing us to take the wrong lock.  This
  		 * corrects the race condition.
  		 *
  		 * Reasoning goes like this: if we have the wrong lock,
  		 * q->lock_ptr must have changed (maybe several times)
  		 * between reading it and the spin_lock().  It can
  		 * change again after the spin_lock() but only if it was
  		 * already changed before the spin_lock().  It cannot,
  		 * however, change back to the original value.  Therefore
  		 * we can detect whether we acquired the correct lock.
  		 */
  		if (unlikely(lock_ptr != q->lock_ptr)) {
  			spin_unlock(lock_ptr);
  			goto retry;
  		}
  		WARN_ON(list_empty(&q->list));
  		list_del(&q->list);
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
943
944
  
  		BUG_ON(q->pi_state);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
945
946
947
948
949
950
951
  		spin_unlock(lock_ptr);
  		ret = 1;
  	}
  
  	drop_key_refs(&q->key);
  	return ret;
  }
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
  /*
   * PI futexes can not be requeued and must remove themself from the
   * hash bucket. The hash bucket lock is held on entry and dropped here.
   */
  static void unqueue_me_pi(struct futex_q *q, struct futex_hash_bucket *hb)
  {
  	WARN_ON(list_empty(&q->list));
  	list_del(&q->list);
  
  	BUG_ON(!q->pi_state);
  	free_pi_state(q->pi_state);
  	q->pi_state = NULL;
  
  	spin_unlock(&hb->lock);
  
  	drop_key_refs(&q->key);
  }
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
969
  static int futex_wait(u32 __user *uaddr, u32 val, unsigned long time)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
970
  {
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
971
972
  	struct task_struct *curr = current;
  	DECLARE_WAITQUEUE(wait, curr);
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
973
  	struct futex_hash_bucket *hb;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
974
  	struct futex_q q;
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
975
976
  	u32 uval;
  	int ret;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
977

c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
978
  	q.pi_state = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
979
   retry:
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
980
  	down_read(&curr->mm->mmap_sem);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
981
982
983
984
  
  	ret = get_futex_key(uaddr, &q.key);
  	if (unlikely(ret != 0))
  		goto out_release_sem;
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
985
  	hb = queue_lock(&q, -1, NULL);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
  
  	/*
  	 * Access the page AFTER the futex is queued.
  	 * Order is important:
  	 *
  	 *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
  	 *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
  	 *
  	 * The basic logical guarantee of a futex is that it blocks ONLY
  	 * if cond(var) is known to be true at the time of blocking, for
  	 * any cond.  If we queued after testing *uaddr, that would open
  	 * a race condition where we could block indefinitely with
  	 * cond(var) false, which would violate the guarantee.
  	 *
  	 * A consequence is that futex_wait() can return zero and absorb
  	 * a wakeup when *uaddr != val on entry to the syscall.  This is
  	 * rare, but normal.
  	 *
  	 * We hold the mmap semaphore, so the mapping cannot have changed
  	 * since we looked it up in get_futex_key.
  	 */
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
1007
  	ret = get_futex_value_locked(&uval, uaddr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1008
1009
  
  	if (unlikely(ret)) {
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
1010
  		queue_unlock(&q, hb);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1011

e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
1012
1013
  		/*
  		 * If we would have faulted, release mmap_sem, fault it in and
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1014
1015
  		 * start all over again.
  		 */
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1016
  		up_read(&curr->mm->mmap_sem);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1017

e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
1018
  		ret = get_user(uval, uaddr);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1019
1020
1021
1022
1023
  
  		if (!ret)
  			goto retry;
  		return ret;
  	}
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1024
1025
1026
  	ret = -EWOULDBLOCK;
  	if (uval != val)
  		goto out_unlock_release_sem;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1027
1028
  
  	/* Only actually queue if *uaddr contained val.  */
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
1029
  	__queue_me(&q, hb);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1030
1031
1032
1033
  
  	/*
  	 * Now the futex is queued and we have checked the data, we
  	 * don't want to hold mmap_sem while we sleep.
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1034
1035
  	 */
  	up_read(&curr->mm->mmap_sem);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
  
  	/*
  	 * There might have been scheduling since the queue_me(), as we
  	 * cannot hold a spinlock across the get_user() in case it
  	 * faults, and we cannot just set TASK_INTERRUPTIBLE state when
  	 * queueing ourselves into the futex hash.  This code thus has to
  	 * rely on the futex_wake() code removing us from hash when it
  	 * wakes us up.
  	 */
  
  	/* add_wait_queue is the barrier after __set_current_state. */
  	__set_current_state(TASK_INTERRUPTIBLE);
  	add_wait_queue(&q.waiters, &wait);
  	/*
  	 * !list_empty() is safe here without any lock.
  	 * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
  	 */
  	if (likely(!list_empty(&q.list)))
  		time = schedule_timeout(time);
  	__set_current_state(TASK_RUNNING);
  
  	/*
  	 * NOTE: we don't remove ourselves from the waitqueue because
  	 * we are the only user of it.
  	 */
  
  	/* If we were woken (and unqueued), we succeeded, whatever. */
  	if (!unqueue_me(&q))
  		return 0;
  	if (time == 0)
  		return -ETIMEDOUT;
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
1067
1068
1069
1070
  	/*
  	 * We expect signal_pending(current), but another thread may
  	 * have handled it for us already.
  	 */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1071
  	return -EINTR;
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1072
1073
   out_unlock_release_sem:
  	queue_unlock(&q, hb);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1074
   out_release_sem:
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
  	up_read(&curr->mm->mmap_sem);
  	return ret;
  }
  
  /*
   * Userspace tried a 0 -> TID atomic transition of the futex value
   * and failed. The kernel side here does the whole locking operation:
   * if there are waiters then it will block, it does PI, etc. (Due to
   * races the kernel might see a 0 value of the futex too.)
   */
c5780e976   Thomas Gleixner   [PATCH] Use the c...
1085
1086
  static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
  			 long nsec, int trylock)
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1087
  {
c5780e976   Thomas Gleixner   [PATCH] Use the c...
1088
  	struct hrtimer_sleeper timeout, *to = NULL;
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1089
1090
1091
1092
1093
1094
1095
1096
  	struct task_struct *curr = current;
  	struct futex_hash_bucket *hb;
  	u32 uval, newval, curval;
  	struct futex_q q;
  	int ret, attempt = 0;
  
  	if (refill_pi_state_cache())
  		return -ENOMEM;
c5780e976   Thomas Gleixner   [PATCH] Use the c...
1097
1098
  	if (sec != MAX_SCHEDULE_TIMEOUT) {
  		to = &timeout;
c9cb2e3d7   Thomas Gleixner   [PATCH] hrtimers:...
1099
  		hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
c5780e976   Thomas Gleixner   [PATCH] Use the c...
1100
1101
1102
  		hrtimer_init_sleeper(to, current);
  		to->timer.expires = ktime_set(sec, nsec);
  	}
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
  	q.pi_state = NULL;
   retry:
  	down_read(&curr->mm->mmap_sem);
  
  	ret = get_futex_key(uaddr, &q.key);
  	if (unlikely(ret != 0))
  		goto out_release_sem;
  
  	hb = queue_lock(&q, -1, NULL);
  
   retry_locked:
  	/*
  	 * To avoid races, we attempt to take the lock here again
  	 * (by doing a 0 -> TID atomic cmpxchg), while holding all
  	 * the locks. It will most likely not succeed.
  	 */
  	newval = current->pid;
a866374ae   Peter Zijlstra   [PATCH] mm: pagef...
1120
  	pagefault_disable();
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1121
  	curval = futex_atomic_cmpxchg_inatomic(uaddr, 0, newval);
a866374ae   Peter Zijlstra   [PATCH] mm: pagef...
1122
  	pagefault_enable();
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
  
  	if (unlikely(curval == -EFAULT))
  		goto uaddr_faulted;
  
  	/* We own the lock already */
  	if (unlikely((curval & FUTEX_TID_MASK) == current->pid)) {
  		if (!detect && 0)
  			force_sig(SIGKILL, current);
  		ret = -EDEADLK;
  		goto out_unlock_release_sem;
  	}
  
  	/*
  	 * Surprise - we got the lock. Just return
  	 * to userspace:
  	 */
  	if (unlikely(!curval))
  		goto out_unlock_release_sem;
  
  	uval = curval;
  	newval = uval | FUTEX_WAITERS;
a866374ae   Peter Zijlstra   [PATCH] mm: pagef...
1144
  	pagefault_disable();
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1145
  	curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
a866374ae   Peter Zijlstra   [PATCH] mm: pagef...
1146
  	pagefault_enable();
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
  
  	if (unlikely(curval == -EFAULT))
  		goto uaddr_faulted;
  	if (unlikely(curval != uval))
  		goto retry_locked;
  
  	/*
  	 * We dont have the lock. Look up the PI state (or create it if
  	 * we are the first waiter):
  	 */
  	ret = lookup_pi_state(uval, hb, &q);
  
  	if (unlikely(ret)) {
  		/*
  		 * There were no waiters and the owner task lookup
  		 * failed. When the OWNER_DIED bit is set, then we
  		 * know that this is a robust futex and we actually
  		 * take the lock. This is safe as we are protected by
  		 * the hash bucket lock. We also set the waiters bit
  		 * unconditionally here, to simplify glibc handling of
  		 * multiple tasks racing to acquire the lock and
  		 * cleanup the problems which were left by the dead
  		 * owner.
  		 */
  		if (curval & FUTEX_OWNER_DIED) {
  			uval = newval;
  			newval = current->pid |
  				FUTEX_OWNER_DIED | FUTEX_WAITERS;
a866374ae   Peter Zijlstra   [PATCH] mm: pagef...
1175
  			pagefault_disable();
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1176
1177
  			curval = futex_atomic_cmpxchg_inatomic(uaddr,
  							       uval, newval);
a866374ae   Peter Zijlstra   [PATCH] mm: pagef...
1178
  			pagefault_enable();
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
  
  			if (unlikely(curval == -EFAULT))
  				goto uaddr_faulted;
  			if (unlikely(curval != uval))
  				goto retry_locked;
  			ret = 0;
  		}
  		goto out_unlock_release_sem;
  	}
  
  	/*
  	 * Only actually queue now that the atomic ops are done:
  	 */
  	__queue_me(&q, hb);
  
  	/*
  	 * Now the futex is queued and we have checked the data, we
  	 * don't want to hold mmap_sem while we sleep.
  	 */
  	up_read(&curr->mm->mmap_sem);
  
  	WARN_ON(!q.pi_state);
  	/*
  	 * Block on the PI mutex:
  	 */
  	if (!trylock)
  		ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
  	else {
  		ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
  		/* Fixup the trylock return value: */
  		ret = ret ? 0 : -EWOULDBLOCK;
  	}
  
  	down_read(&curr->mm->mmap_sem);
a99e4e413   Vernon Mauery   [PATCH] pi-futex:...
1213
  	spin_lock(q.lock_ptr);
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
  
  	/*
  	 * Got the lock. We might not be the anticipated owner if we
  	 * did a lock-steal - fix up the PI-state in that case.
  	 */
  	if (!ret && q.pi_state->owner != curr) {
  		u32 newtid = current->pid | FUTEX_WAITERS;
  
  		/* Owner died? */
  		if (q.pi_state->owner != NULL) {
  			spin_lock_irq(&q.pi_state->owner->pi_lock);
627371d73   Ingo Molnar   [PATCH] pi-futex:...
1225
  			WARN_ON(list_empty(&q.pi_state->list));
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1226
1227
1228
1229
1230
1231
1232
1233
  			list_del_init(&q.pi_state->list);
  			spin_unlock_irq(&q.pi_state->owner->pi_lock);
  		} else
  			newtid |= FUTEX_OWNER_DIED;
  
  		q.pi_state->owner = current;
  
  		spin_lock_irq(&current->pi_lock);
627371d73   Ingo Molnar   [PATCH] pi-futex:...
1234
  		WARN_ON(!list_empty(&q.pi_state->list));
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
  		list_add(&q.pi_state->list, &current->pi_state_list);
  		spin_unlock_irq(&current->pi_lock);
  
  		/* Unqueue and drop the lock */
  		unqueue_me_pi(&q, hb);
  		up_read(&curr->mm->mmap_sem);
  		/*
  		 * We own it, so we have to replace the pending owner
  		 * TID. This must be atomic as we have preserve the
  		 * owner died bit here.
  		 */
  		ret = get_user(uval, uaddr);
  		while (!ret) {
  			newval = (uval & FUTEX_OWNER_DIED) | newtid;
  			curval = futex_atomic_cmpxchg_inatomic(uaddr,
  							       uval, newval);
  			if (curval == -EFAULT)
  				ret = -EFAULT;
  			if (curval == uval)
  				break;
  			uval = curval;
  		}
  	} else {
  		/*
  		 * Catch the rare case, where the lock was released
  		 * when we were on the way back before we locked
  		 * the hash bucket.
  		 */
  		if (ret && q.pi_state->owner == curr) {
  			if (rt_mutex_trylock(&q.pi_state->pi_mutex))
  				ret = 0;
  		}
  		/* Unqueue and drop the lock */
  		unqueue_me_pi(&q, hb);
  		up_read(&curr->mm->mmap_sem);
  	}
  
  	if (!detect && ret == -EDEADLK && 0)
  		force_sig(SIGKILL, current);
c5780e976   Thomas Gleixner   [PATCH] Use the c...
1274
  	return ret != -EINTR ? ret : -ERESTARTNOINTR;
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
  
   out_unlock_release_sem:
  	queue_unlock(&q, hb);
  
   out_release_sem:
  	up_read(&curr->mm->mmap_sem);
  	return ret;
  
   uaddr_faulted:
  	/*
  	 * We have to r/w  *(int __user *)uaddr, but we can't modify it
  	 * non-atomically.  Therefore, if get_user below is not
  	 * enough, we need to handle the fault ourselves, while
  	 * still holding the mmap_sem.
  	 */
  	if (attempt++) {
e579dcbf2   John Stultz   [PATCH] futex_han...
1291
1292
  		if (futex_handle_fault((unsigned long)uaddr, attempt)) {
  			ret = -EFAULT;
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1293
  			goto out_unlock_release_sem;
e579dcbf2   John Stultz   [PATCH] futex_han...
1294
  		}
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
  		goto retry_locked;
  	}
  
  	queue_unlock(&q, hb);
  	up_read(&curr->mm->mmap_sem);
  
  	ret = get_user(uval, uaddr);
  	if (!ret && (uval != -EFAULT))
  		goto retry;
  
  	return ret;
  }
  
  /*
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
   * Userspace attempted a TID -> 0 atomic transition, and failed.
   * This is the in-kernel slowpath: we look up the PI state (if any),
   * and do the rt-mutex unlock.
   */
  static int futex_unlock_pi(u32 __user *uaddr)
  {
  	struct futex_hash_bucket *hb;
  	struct futex_q *this, *next;
  	u32 uval;
  	struct list_head *head;
  	union futex_key key;
  	int ret, attempt = 0;
  
  retry:
  	if (get_user(uval, uaddr))
  		return -EFAULT;
  	/*
  	 * We release only a lock we actually own:
  	 */
  	if ((uval & FUTEX_TID_MASK) != current->pid)
  		return -EPERM;
  	/*
  	 * First take all the futex related locks:
  	 */
  	down_read(&current->mm->mmap_sem);
  
  	ret = get_futex_key(uaddr, &key);
  	if (unlikely(ret != 0))
  		goto out;
  
  	hb = hash_futex(&key);
  	spin_lock(&hb->lock);
  
  retry_locked:
  	/*
  	 * To avoid races, try to do the TID -> 0 atomic transition
  	 * again. If it succeeds then we can return without waking
  	 * anyone else up:
  	 */
e3f2ddeac   Ingo Molnar   [PATCH] pi-futex:...
1348
  	if (!(uval & FUTEX_OWNER_DIED)) {
a866374ae   Peter Zijlstra   [PATCH] mm: pagef...
1349
  		pagefault_disable();
e3f2ddeac   Ingo Molnar   [PATCH] pi-futex:...
1350
  		uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0);
a866374ae   Peter Zijlstra   [PATCH] mm: pagef...
1351
  		pagefault_enable();
e3f2ddeac   Ingo Molnar   [PATCH] pi-futex:...
1352
  	}
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
  
  	if (unlikely(uval == -EFAULT))
  		goto pi_faulted;
  	/*
  	 * Rare case: we managed to release the lock atomically,
  	 * no need to wake anyone else up:
  	 */
  	if (unlikely(uval == current->pid))
  		goto out_unlock;
  
  	/*
  	 * Ok, other tasks may need to be woken up - check waiters
  	 * and do the wakeup if necessary:
  	 */
  	head = &hb->chain;
  
  	list_for_each_entry_safe(this, next, head, list) {
  		if (!match_futex (&this->key, &key))
  			continue;
  		ret = wake_futex_pi(uaddr, uval, this);
  		/*
  		 * The atomic access to the futex value
  		 * generated a pagefault, so retry the
  		 * user-access and the wakeup:
  		 */
  		if (ret == -EFAULT)
  			goto pi_faulted;
  		goto out_unlock;
  	}
  	/*
  	 * No waiters - kernel unlocks the futex:
  	 */
e3f2ddeac   Ingo Molnar   [PATCH] pi-futex:...
1385
1386
1387
1388
1389
  	if (!(uval & FUTEX_OWNER_DIED)) {
  		ret = unlock_futex_pi(uaddr, uval);
  		if (ret == -EFAULT)
  			goto pi_faulted;
  	}
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
  
  out_unlock:
  	spin_unlock(&hb->lock);
  out:
  	up_read(&current->mm->mmap_sem);
  
  	return ret;
  
  pi_faulted:
  	/*
  	 * We have to r/w  *(int __user *)uaddr, but we can't modify it
  	 * non-atomically.  Therefore, if get_user below is not
  	 * enough, we need to handle the fault ourselves, while
  	 * still holding the mmap_sem.
  	 */
  	if (attempt++) {
e579dcbf2   John Stultz   [PATCH] futex_han...
1406
1407
  		if (futex_handle_fault((unsigned long)uaddr, attempt)) {
  			ret = -EFAULT;
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1408
  			goto out_unlock;
e579dcbf2   John Stultz   [PATCH] futex_han...
1409
  		}
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1410
1411
1412
1413
  		goto retry_locked;
  	}
  
  	spin_unlock(&hb->lock);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1414
  	up_read(&current->mm->mmap_sem);
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1415
1416
1417
1418
  
  	ret = get_user(uval, uaddr);
  	if (!ret && (uval != -EFAULT))
  		goto retry;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1419
1420
1421
1422
1423
1424
1425
1426
1427
  	return ret;
  }
  
  static int futex_close(struct inode *inode, struct file *filp)
  {
  	struct futex_q *q = filp->private_data;
  
  	unqueue_me(q);
  	kfree(q);
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
1428

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
  	return 0;
  }
  
  /* This is one-shot: once it's gone off you need a new fd */
  static unsigned int futex_poll(struct file *filp,
  			       struct poll_table_struct *wait)
  {
  	struct futex_q *q = filp->private_data;
  	int ret = 0;
  
  	poll_wait(filp, &q->waiters, wait);
  
  	/*
  	 * list_empty() is safe here without any lock.
  	 * q->lock_ptr != 0 is not safe, because of ordering against wakeup.
  	 */
  	if (list_empty(&q->list))
  		ret = POLLIN | POLLRDNORM;
  
  	return ret;
  }
15ad7cdcf   Helge Deller   [PATCH] struct se...
1450
  static const struct file_operations futex_fops = {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1451
1452
1453
1454
1455
1456
1457
1458
  	.release	= futex_close,
  	.poll		= futex_poll,
  };
  
  /*
   * Signal allows caller to avoid the race which would occur if they
   * set the sigio stuff up afterwards.
   */
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
1459
  static int futex_fd(u32 __user *uaddr, int signal)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1460
1461
1462
1463
  {
  	struct futex_q *q;
  	struct file *filp;
  	int ret, err;
19c6b6ed3   Andrew Morton   [PATCH] schedule ...
1464
1465
1466
1467
1468
1469
1470
1471
  	static unsigned long printk_interval;
  
  	if (printk_timed_ratelimit(&printk_interval, 60 * 60 * 1000)) {
  		printk(KERN_WARNING "Process `%s' used FUTEX_FD, which "
  		    	"will be removed from the kernel in June 2007
  ",
  			current->comm);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1472
1473
  
  	ret = -EINVAL;
7ed20e1ad   Jesper Juhl   [PATCH] convert t...
1474
  	if (!valid_signal(signal))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
  		goto out;
  
  	ret = get_unused_fd();
  	if (ret < 0)
  		goto out;
  	filp = get_empty_filp();
  	if (!filp) {
  		put_unused_fd(ret);
  		ret = -ENFILE;
  		goto out;
  	}
  	filp->f_op = &futex_fops;
f3a43f3f6   Josef "Jeff" Sipek   [PATCH] kernel: c...
1487
1488
1489
  	filp->f_path.mnt = mntget(futex_mnt);
  	filp->f_path.dentry = dget(futex_mnt->mnt_root);
  	filp->f_mapping = filp->f_path.dentry->d_inode->i_mapping;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1490
1491
  
  	if (signal) {
609d7fa95   Eric W. Biederman   [PATCH] file: mod...
1492
  		err = __f_setown(filp, task_pid(current), PIDTYPE_PID, 1);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1493
  		if (err < 0) {
39ed3fdee   Pekka Enberg   [PATCH] futex: re...
1494
  			goto error;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1495
1496
1497
1498
1499
1500
  		}
  		filp->f_owner.signum = signal;
  	}
  
  	q = kmalloc(sizeof(*q), GFP_KERNEL);
  	if (!q) {
39ed3fdee   Pekka Enberg   [PATCH] futex: re...
1501
1502
  		err = -ENOMEM;
  		goto error;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1503
  	}
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1504
  	q->pi_state = NULL;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1505
1506
1507
1508
1509
1510
  
  	down_read(&current->mm->mmap_sem);
  	err = get_futex_key(uaddr, &q->key);
  
  	if (unlikely(err != 0)) {
  		up_read(&current->mm->mmap_sem);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1511
  		kfree(q);
39ed3fdee   Pekka Enberg   [PATCH] futex: re...
1512
  		goto error;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
  	}
  
  	/*
  	 * queue_me() must be called before releasing mmap_sem, because
  	 * key->shared.inode needs to be referenced while holding it.
  	 */
  	filp->private_data = q;
  
  	queue_me(q, ret, filp);
  	up_read(&current->mm->mmap_sem);
  
  	/* Now we map fd to filp, so userspace can access it */
  	fd_install(ret, filp);
  out:
  	return ret;
39ed3fdee   Pekka Enberg   [PATCH] futex: re...
1528
1529
1530
1531
1532
  error:
  	put_unused_fd(ret);
  	put_filp(filp);
  	ret = err;
  	goto out;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1533
  }
0771dfefc   Ingo Molnar   [PATCH] lightweig...
1534
1535
1536
1537
1538
1539
1540
  /*
   * Support for robust futexes: the kernel cleans up held futexes at
   * thread exit time.
   *
   * Implementation: user-space maintains a per-thread list of locks it
   * is holding. Upon do_exit(), the kernel carefully walks this list,
   * and marks all locks that are owned by this thread with the
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1541
   * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
0771dfefc   Ingo Molnar   [PATCH] lightweig...
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
   * always manipulated with the lock held, so the list is private and
   * per-thread. Userspace also maintains a per-thread 'list_op_pending'
   * field, to allow the kernel to clean up if the thread dies after
   * acquiring the lock, but just before it could have added itself to
   * the list. There can only be one such pending lock.
   */
  
  /**
   * sys_set_robust_list - set the robust-futex list head of a task
   * @head: pointer to the list-head
   * @len: length of the list-head, as userspace expects
   */
  asmlinkage long
  sys_set_robust_list(struct robust_list_head __user *head,
  		    size_t len)
  {
  	/*
  	 * The kernel knows only one size for now:
  	 */
  	if (unlikely(len != sizeof(*head)))
  		return -EINVAL;
  
  	current->robust_list = head;
  
  	return 0;
  }
  
  /**
   * sys_get_robust_list - get the robust-futex list head of a task
   * @pid: pid of the process [zero for current task]
   * @head_ptr: pointer to a list-head pointer, the kernel fills it in
   * @len_ptr: pointer to a length field, the kernel fills in the header size
   */
  asmlinkage long
ba46df984   Al Viro   [PATCH] __user an...
1576
  sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr,
0771dfefc   Ingo Molnar   [PATCH] lightweig...
1577
1578
  		    size_t __user *len_ptr)
  {
ba46df984   Al Viro   [PATCH] __user an...
1579
  	struct robust_list_head __user *head;
0771dfefc   Ingo Molnar   [PATCH] lightweig...
1580
1581
1582
1583
1584
1585
1586
1587
  	unsigned long ret;
  
  	if (!pid)
  		head = current->robust_list;
  	else {
  		struct task_struct *p;
  
  		ret = -ESRCH;
aaa2a97eb   Oleg Nesterov   [PATCH] sys_get_r...
1588
  		rcu_read_lock();
0771dfefc   Ingo Molnar   [PATCH] lightweig...
1589
1590
1591
1592
1593
1594
1595
1596
  		p = find_task_by_pid(pid);
  		if (!p)
  			goto err_unlock;
  		ret = -EPERM;
  		if ((current->euid != p->euid) && (current->euid != p->uid) &&
  				!capable(CAP_SYS_PTRACE))
  			goto err_unlock;
  		head = p->robust_list;
aaa2a97eb   Oleg Nesterov   [PATCH] sys_get_r...
1597
  		rcu_read_unlock();
0771dfefc   Ingo Molnar   [PATCH] lightweig...
1598
1599
1600
1601
1602
1603
1604
  	}
  
  	if (put_user(sizeof(*head), len_ptr))
  		return -EFAULT;
  	return put_user(head, head_ptr);
  
  err_unlock:
aaa2a97eb   Oleg Nesterov   [PATCH] sys_get_r...
1605
  	rcu_read_unlock();
0771dfefc   Ingo Molnar   [PATCH] lightweig...
1606
1607
1608
1609
1610
1611
1612
1613
  
  	return ret;
  }
  
  /*
   * Process a futex-list entry, check whether it's owned by the
   * dying task, and do notification if so:
   */
e3f2ddeac   Ingo Molnar   [PATCH] pi-futex:...
1614
  int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
0771dfefc   Ingo Molnar   [PATCH] lightweig...
1615
  {
e3f2ddeac   Ingo Molnar   [PATCH] pi-futex:...
1616
  	u32 uval, nval, mval;
0771dfefc   Ingo Molnar   [PATCH] lightweig...
1617

8f17d3a50   Ingo Molnar   [PATCH] lightweig...
1618
1619
  retry:
  	if (get_user(uval, uaddr))
0771dfefc   Ingo Molnar   [PATCH] lightweig...
1620
  		return -1;
8f17d3a50   Ingo Molnar   [PATCH] lightweig...
1621
  	if ((uval & FUTEX_TID_MASK) == curr->pid) {
0771dfefc   Ingo Molnar   [PATCH] lightweig...
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
  		/*
  		 * Ok, this dying thread is truly holding a futex
  		 * of interest. Set the OWNER_DIED bit atomically
  		 * via cmpxchg, and if the value had FUTEX_WAITERS
  		 * set, wake up a waiter (if any). (We have to do a
  		 * futex_wake() even if OWNER_DIED is already set -
  		 * to handle the rare but possible case of recursive
  		 * thread-death.) The rest of the cleanup is done in
  		 * userspace.
  		 */
e3f2ddeac   Ingo Molnar   [PATCH] pi-futex:...
1632
1633
  		mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
  		nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1634
1635
1636
1637
  		if (nval == -EFAULT)
  			return -1;
  
  		if (nval != uval)
8f17d3a50   Ingo Molnar   [PATCH] lightweig...
1638
  			goto retry;
0771dfefc   Ingo Molnar   [PATCH] lightweig...
1639

e3f2ddeac   Ingo Molnar   [PATCH] pi-futex:...
1640
1641
1642
1643
1644
1645
1646
1647
  		/*
  		 * Wake robust non-PI futexes here. The wakeup of
  		 * PI futexes happens in exit_pi_state():
  		 */
  		if (!pi) {
  			if (uval & FUTEX_WAITERS)
  				futex_wake(uaddr, 1);
  		}
0771dfefc   Ingo Molnar   [PATCH] lightweig...
1648
1649
1650
1651
1652
  	}
  	return 0;
  }
  
  /*
e3f2ddeac   Ingo Molnar   [PATCH] pi-futex:...
1653
1654
1655
   * Fetch a robust-list pointer. Bit 0 signals PI futexes:
   */
  static inline int fetch_robust_entry(struct robust_list __user **entry,
ba46df984   Al Viro   [PATCH] __user an...
1656
1657
  				     struct robust_list __user * __user *head,
  				     int *pi)
e3f2ddeac   Ingo Molnar   [PATCH] pi-futex:...
1658
1659
  {
  	unsigned long uentry;
ba46df984   Al Viro   [PATCH] __user an...
1660
  	if (get_user(uentry, (unsigned long __user *)head))
e3f2ddeac   Ingo Molnar   [PATCH] pi-futex:...
1661
  		return -EFAULT;
ba46df984   Al Viro   [PATCH] __user an...
1662
  	*entry = (void __user *)(uentry & ~1UL);
e3f2ddeac   Ingo Molnar   [PATCH] pi-futex:...
1663
1664
1665
1666
1667
1668
  	*pi = uentry & 1;
  
  	return 0;
  }
  
  /*
0771dfefc   Ingo Molnar   [PATCH] lightweig...
1669
1670
1671
1672
1673
1674
1675
1676
1677
   * Walk curr->robust_list (very carefully, it's a userspace list!)
   * and mark any locks found there dead, and notify any waiters.
   *
   * We silently return on any sign of list-walking problem.
   */
  void exit_robust_list(struct task_struct *curr)
  {
  	struct robust_list_head __user *head = curr->robust_list;
  	struct robust_list __user *entry, *pending;
e3f2ddeac   Ingo Molnar   [PATCH] pi-futex:...
1678
  	unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
0771dfefc   Ingo Molnar   [PATCH] lightweig...
1679
1680
1681
1682
1683
1684
  	unsigned long futex_offset;
  
  	/*
  	 * Fetch the list head (which was registered earlier, via
  	 * sys_set_robust_list()):
  	 */
e3f2ddeac   Ingo Molnar   [PATCH] pi-futex:...
1685
  	if (fetch_robust_entry(&entry, &head->list.next, &pi))
0771dfefc   Ingo Molnar   [PATCH] lightweig...
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
  		return;
  	/*
  	 * Fetch the relative futex offset:
  	 */
  	if (get_user(futex_offset, &head->futex_offset))
  		return;
  	/*
  	 * Fetch any possibly pending lock-add first, and handle it
  	 * if it exists:
  	 */
e3f2ddeac   Ingo Molnar   [PATCH] pi-futex:...
1696
  	if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
0771dfefc   Ingo Molnar   [PATCH] lightweig...
1697
  		return;
e3f2ddeac   Ingo Molnar   [PATCH] pi-futex:...
1698

0771dfefc   Ingo Molnar   [PATCH] lightweig...
1699
  	if (pending)
ba46df984   Al Viro   [PATCH] __user an...
1700
  		handle_futex_death((void __user *)pending + futex_offset, curr, pip);
0771dfefc   Ingo Molnar   [PATCH] lightweig...
1701
1702
1703
1704
  
  	while (entry != &head->list) {
  		/*
  		 * A pending lock might already be on the list, so
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1705
  		 * don't process it twice:
0771dfefc   Ingo Molnar   [PATCH] lightweig...
1706
1707
  		 */
  		if (entry != pending)
ba46df984   Al Viro   [PATCH] __user an...
1708
  			if (handle_futex_death((void __user *)entry + futex_offset,
e3f2ddeac   Ingo Molnar   [PATCH] pi-futex:...
1709
  						curr, pi))
0771dfefc   Ingo Molnar   [PATCH] lightweig...
1710
  				return;
0771dfefc   Ingo Molnar   [PATCH] lightweig...
1711
1712
1713
  		/*
  		 * Fetch the next entry in the list:
  		 */
e3f2ddeac   Ingo Molnar   [PATCH] pi-futex:...
1714
  		if (fetch_robust_entry(&entry, &entry->next, &pi))
0771dfefc   Ingo Molnar   [PATCH] lightweig...
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
  			return;
  		/*
  		 * Avoid excessively long or circular lists:
  		 */
  		if (!--limit)
  			break;
  
  		cond_resched();
  	}
  }
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
1725
1726
  long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout,
  		u32 __user *uaddr2, u32 val2, u32 val3)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
  {
  	int ret;
  
  	switch (op) {
  	case FUTEX_WAIT:
  		ret = futex_wait(uaddr, val, timeout);
  		break;
  	case FUTEX_WAKE:
  		ret = futex_wake(uaddr, val);
  		break;
  	case FUTEX_FD:
  		/* non-zero val means F_SETOWN(getpid()) & F_SETSIG(val) */
  		ret = futex_fd(uaddr, val);
  		break;
  	case FUTEX_REQUEUE:
  		ret = futex_requeue(uaddr, uaddr2, val, val2, NULL);
  		break;
  	case FUTEX_CMP_REQUEUE:
  		ret = futex_requeue(uaddr, uaddr2, val, val2, &val3);
  		break;
4732efbeb   Jakub Jelinek   [PATCH] FUTEX_WAK...
1747
1748
1749
  	case FUTEX_WAKE_OP:
  		ret = futex_wake_op(uaddr, uaddr2, val, val2, val3);
  		break;
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1750
1751
1752
1753
1754
1755
1756
1757
1758
  	case FUTEX_LOCK_PI:
  		ret = futex_lock_pi(uaddr, val, timeout, val2, 0);
  		break;
  	case FUTEX_UNLOCK_PI:
  		ret = futex_unlock_pi(uaddr);
  		break;
  	case FUTEX_TRYLOCK_PI:
  		ret = futex_lock_pi(uaddr, 0, timeout, val2, 1);
  		break;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1759
1760
1761
1762
1763
  	default:
  		ret = -ENOSYS;
  	}
  	return ret;
  }
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
1764
  asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1765
  			  struct timespec __user *utime, u32 __user *uaddr2,
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
1766
  			  u32 val3)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1767
1768
1769
  {
  	struct timespec t;
  	unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
1770
  	u32 val2 = 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1771

c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1772
  	if (utime && (op == FUTEX_WAIT || op == FUTEX_LOCK_PI)) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1773
1774
  		if (copy_from_user(&t, utime, sizeof(t)) != 0)
  			return -EFAULT;
9741ef964   Thomas Gleixner   [PATCH] futex: ch...
1775
1776
  		if (!timespec_valid(&t))
  			return -EINVAL;
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1777
1778
1779
1780
1781
1782
  		if (op == FUTEX_WAIT)
  			timeout = timespec_to_jiffies(&t) + 1;
  		else {
  			timeout = t.tv_sec;
  			val2 = t.tv_nsec;
  		}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1783
1784
1785
1786
  	}
  	/*
  	 * requeue parameter in 'utime' if op == FUTEX_REQUEUE.
  	 */
c87e2837b   Ingo Molnar   [PATCH] pi-futex:...
1787
  	if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE)
e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
1788
  		val2 = (u32) (unsigned long) utime;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1789

e2970f2fb   Ingo Molnar   [PATCH] pi-futex:...
1790
  	return do_futex(uaddr, op, val, timeout, uaddr2, val2, val3);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1791
  }
454e2398b   David Howells   [PATCH] VFS: Perm...
1792
1793
1794
  static int futexfs_get_sb(struct file_system_type *fs_type,
  			  int flags, const char *dev_name, void *data,
  			  struct vfsmount *mnt)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1795
  {
454e2398b   David Howells   [PATCH] VFS: Perm...
1796
  	return get_sb_pseudo(fs_type, "futex", NULL, 0xBAD1DEA, mnt);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
  }
  
  static struct file_system_type futex_fs_type = {
  	.name		= "futexfs",
  	.get_sb		= futexfs_get_sb,
  	.kill_sb	= kill_anon_super,
  };
  
  static int __init init(void)
  {
95362fa90   Akinobu Mita   [PATCH] futex: in...
1807
1808
1809
1810
  	int i = register_filesystem(&futex_fs_type);
  
  	if (i)
  		return i;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1811

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1812
  	futex_mnt = kern_mount(&futex_fs_type);
95362fa90   Akinobu Mita   [PATCH] futex: in...
1813
1814
1815
1816
  	if (IS_ERR(futex_mnt)) {
  		unregister_filesystem(&futex_fs_type);
  		return PTR_ERR(futex_mnt);
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1817
1818
1819
1820
1821
1822
1823
1824
  
  	for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
  		INIT_LIST_HEAD(&futex_queues[i].chain);
  		spin_lock_init(&futex_queues[i].lock);
  	}
  	return 0;
  }
  __initcall(init);