Commit 8cf5322ce69afea1fab6a6270db24d057d664798

Authored by Michel Lespinasse
Committed by Linus Torvalds
1 parent 9b0fc9c09f

rwsem: simplify __rwsem_do_wake

This is mostly for cleanup value:

- We don't need several gotos to handle the case where the first
  waiter is a writer. Two simple tests will do (and generate very
  similar code).

- In the remainder of the function, we know the first waiter is a reader,
  so we don't have to double check that. We can use do..while loops
  to iterate over the readers to wake (generates slightly better code).

Signed-off-by: Michel Lespinasse <walken@google.com>
Reviewed-by: Peter Hurley <peter@hurleysoftware.com>
Acked-by: Davidlohr Bueso <davidlohr.bueso@hp.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 2 changed files with 19 additions and 30 deletions Side-by-side Diff

lib/rwsem-spinlock.c
... ... @@ -70,26 +70,17 @@
70 70  
71 71 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
72 72  
73   - if (!wakewrite) {
74   - if (waiter->type == RWSEM_WAITING_FOR_WRITE)
75   - goto out;
76   - goto dont_wake_writers;
77   - }
78   -
79   - /*
80   - * as we support write lock stealing, we can't set sem->activity
81   - * to -1 here to indicate we get the lock. Instead, we wake it up
82   - * to let it go get it again.
83   - */
84 73 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
85   - wake_up_process(waiter->task);
  74 + if (wakewrite)
  75 + /* Wake up a writer. Note that we do not grant it the
  76 + * lock - it will have to acquire it when it runs. */
  77 + wake_up_process(waiter->task);
86 78 goto out;
87 79 }
88 80  
89 81 /* grant an infinite number of read locks to the front of the queue */
90   - dont_wake_writers:
91 82 woken = 0;
92   - while (waiter->type == RWSEM_WAITING_FOR_READ) {
  83 + do {
93 84 struct list_head *next = waiter->list.next;
94 85  
95 86 list_del(&waiter->list);
96 87  
... ... @@ -99,10 +90,10 @@
99 90 wake_up_process(tsk);
100 91 put_task_struct(tsk);
101 92 woken++;
102   - if (list_empty(&sem->wait_list))
  93 + if (next == &sem->wait_list)
103 94 break;
104 95 waiter = list_entry(next, struct rwsem_waiter, list);
105   - }
  96 + } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
106 97  
107 98 sem->activity += woken;
108 99  
... ... @@ -68,20 +68,17 @@
68 68 signed long woken, loop, adjustment;
69 69  
70 70 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
71   - if (waiter->type != RWSEM_WAITING_FOR_WRITE)
72   - goto readers_only;
73   -
74   - if (wake_type == RWSEM_WAKE_READ_OWNED)
75   - /* Another active reader was observed, so wakeup is not
76   - * likely to succeed. Save the atomic op.
77   - */
  71 + if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
  72 + if (wake_type != RWSEM_WAKE_READ_OWNED)
  73 + /* Wake writer at the front of the queue, but do not
  74 + * grant it the lock yet as we want other writers
  75 + * to be able to steal it. Readers, on the other hand,
  76 + * will block as they will notice the queued writer.
  77 + */
  78 + wake_up_process(waiter->task);
78 79 goto out;
  80 + }
79 81  
80   - /* Wake up the writing waiter and let the task grab the sem: */
81   - wake_up_process(waiter->task);
82   - goto out;
83   -
84   - readers_only:
85 82 /* If we come here from up_xxxx(), another thread might have reached
86 83 * rwsem_down_failed_common() before we acquired the spinlock and
87 84 * woken up a waiter, making it now active. We prefer to check for
... ... @@ -125,7 +122,8 @@
125 122 rwsem_atomic_add(adjustment, sem);
126 123  
127 124 next = sem->wait_list.next;
128   - for (loop = woken; loop > 0; loop--) {
  125 + loop = woken;
  126 + do {
129 127 waiter = list_entry(next, struct rwsem_waiter, list);
130 128 next = waiter->list.next;
131 129 tsk = waiter->task;
... ... @@ -133,7 +131,7 @@
133 131 waiter->task = NULL;
134 132 wake_up_process(tsk);
135 133 put_task_struct(tsk);
136   - }
  134 + } while (--loop);
137 135  
138 136 sem->wait_list.next = next;
139 137 next->prev = &sem->wait_list;