Commit 345af7bf3304410634c21ada4664fda83d4d9a16

Authored by Michel Lespinasse
Committed by Linus Torvalds
1 parent 7721fea3d0

rwsem: fully separate code paths to wake writers vs readers

This is in preparation for later changes in the series.

In __rwsem_do_wake(), the first queued waiter is checked first in order to
determine whether it's a writer or a reader.  The code paths diverge at
this point.  The code that checks and increments the rwsem active count is
duplicated on both sides - the point is that later changes in the series
will be able to independently modify both sides.

Signed-off-by: Michel Lespinasse <walken@google.com>
Acked-by: David Howells <dhowells@redhat.com>
Cc: Mike Waychison <mikew@google.com>
Cc: Suleiman Souhlal <suleiman@google.com>
Cc: Ying Han <yinghan@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 34 additions and 27 deletions Side-by-side Diff

... ... @@ -41,7 +41,7 @@
41 41 * - if we come here from up_xxxx(), then:
42 42 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
43 43 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
44   - * - there must be someone on the queue
  44 + * - there must be someone on the queue
45 45 * - the spinlock must be held by the caller
46 46 * - woken process blocks are discarded from the list after having task zeroed
47 47 * - writers are only woken if downgrading is false
48 48  
49 49  
50 50  
51 51  
52 52  
... ... @@ -54,27 +54,24 @@
54 54 struct list_head *next;
55 55 signed long oldcount, woken, loop;
56 56  
  57 + waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
  58 + if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
  59 + goto readers_only;
  60 +
57 61 if (downgrading)
58   - goto dont_wake_writers;
  62 + goto out;
59 63  
60   - /* if we came through an up_xxxx() call, we only only wake someone up
61   - * if we can transition the active part of the count from 0 -> 1
  64 + /* There's a writer at the front of the queue - try to grant it the
  65 + * write lock. However, we only wake this writer if we can transition
  66 + * the active part of the count from 0 -> 1
62 67 */
63   - try_again:
  68 + try_again_write:
64 69 oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
65 70 - RWSEM_ACTIVE_BIAS;
66 71 if (oldcount & RWSEM_ACTIVE_MASK)
67   - goto undo;
  72 + /* Someone grabbed the sem already */
  73 + goto undo_write;
68 74  
69   - waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
70   -
71   - /* try to grant a single write lock if there's a writer at the front
72   - * of the queue - note we leave the 'active part' of the count
73   - * incremented by 1 and the waiting part incremented by 0x00010000
74   - */
75   - if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
76   - goto readers_only;
77   -
78 75 /* We must be careful not to touch 'waiter' after we set ->task = NULL.
79 76 * It is an allocated on the waiter's stack and may become invalid at
80 77 * any time after that point (due to a wakeup from another source).
81 78  
82 79  
... ... @@ -87,18 +84,24 @@
87 84 put_task_struct(tsk);
88 85 goto out;
89 86  
90   - /* don't want to wake any writers */
91   - dont_wake_writers:
92   - waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
93   - if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
94   - goto out;
  87 + readers_only:
  88 + if (downgrading)
  89 + goto wake_readers;
95 90  
96   - /* grant an infinite number of read locks to the readers at the front
97   - * of the queue
98   - * - note we increment the 'active part' of the count by the number of
99   - * readers before waking any processes up
  91 + /* if we came through an up_xxxx() call, we only only wake someone up
  92 + * if we can transition the active part of the count from 0 -> 1 */
  93 + try_again_read:
  94 + oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem)
  95 + - RWSEM_ACTIVE_BIAS;
  96 + if (oldcount & RWSEM_ACTIVE_MASK)
  97 + /* Someone grabbed the sem already */
  98 + goto undo_read;
  99 +
  100 + wake_readers:
  101 + /* Grant an infinite number of read locks to the readers at the front
  102 + * of the queue. Note we increment the 'active part' of the count by
  103 + * the number of readers before waking any processes up.
100 104 */
101   - readers_only:
102 105 woken = 0;
103 106 do {
104 107 woken++;
105 108  
... ... @@ -138,10 +141,14 @@
138 141  
139 142 /* undo the change to the active count, but check for a transition
140 143 * 1->0 */
141   - undo:
  144 + undo_write:
142 145 if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK)
143 146 goto out;
144   - goto try_again;
  147 + goto try_again_write;
  148 + undo_read:
  149 + if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) & RWSEM_ACTIVE_MASK)
  150 + goto out;
  151 + goto try_again_read;
145 152 }
146 153  
147 154 /*