Commit ed428bfc3caaa4b1e6cd15ea12c90c30291903f0

Authored by Peter Zijlstra
Committed by Ingo Molnar
1 parent 1696a8bee3

locking: Move the rwsem code to kernel/locking/

Notably: changed lib/rwsem* targets from lib- to obj-, no idea about
the ramifications of that.

Suggested-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-g0kynfh5feriwc6p3h6kpbw6@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

Showing 9 changed files with 750 additions and 750 deletions Side-by-side Diff

... ... @@ -8,7 +8,7 @@
8 8 signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
9 9 extable.o params.o posix-timers.o \
10 10 kthread.o sys_ni.o posix-cpu-timers.o \
11   - hrtimer.o rwsem.o nsproxy.o \
  11 + hrtimer.o nsproxy.o \
12 12 notifier.o ksysfs.o cred.o reboot.o \
13 13 async.o range.o groups.o lglock.o smpboot.o
14 14  
kernel/locking/Makefile
1 1  
2   -obj-y += mutex.o semaphore.o
  2 +obj-y += mutex.o semaphore.o rwsem.o
3 3  
4 4 ifdef CONFIG_FUNCTION_TRACER
5 5 CFLAGS_REMOVE_lockdep.o = -pg
... ... @@ -20,4 +20,6 @@
20 20 obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
21 21 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
22 22 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
  23 +obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
  24 +obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
kernel/locking/rwsem-spinlock.c
  1 +/* rwsem-spinlock.c: R/W semaphores: contention handling functions for
  2 + * generic spinlock implementation
  3 + *
  4 + * Copyright (c) 2001 David Howells (dhowells@redhat.com).
  5 + * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
  6 + * - Derived also from comments by Linus
  7 + */
  8 +#include <linux/rwsem.h>
  9 +#include <linux/sched.h>
  10 +#include <linux/export.h>
  11 +
  12 +enum rwsem_waiter_type {
  13 + RWSEM_WAITING_FOR_WRITE,
  14 + RWSEM_WAITING_FOR_READ
  15 +};
  16 +
  17 +struct rwsem_waiter {
  18 + struct list_head list;
  19 + struct task_struct *task;
  20 + enum rwsem_waiter_type type;
  21 +};
  22 +
  23 +int rwsem_is_locked(struct rw_semaphore *sem)
  24 +{
  25 + int ret = 1;
  26 + unsigned long flags;
  27 +
  28 + if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
  29 + ret = (sem->activity != 0);
  30 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
  31 + }
  32 + return ret;
  33 +}
  34 +EXPORT_SYMBOL(rwsem_is_locked);
  35 +
  36 +/*
  37 + * initialise the semaphore
  38 + */
  39 +void __init_rwsem(struct rw_semaphore *sem, const char *name,
  40 + struct lock_class_key *key)
  41 +{
  42 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
  43 + /*
  44 + * Make sure we are not reinitializing a held semaphore:
  45 + */
  46 + debug_check_no_locks_freed((void *)sem, sizeof(*sem));
  47 + lockdep_init_map(&sem->dep_map, name, key, 0);
  48 +#endif
  49 + sem->activity = 0;
  50 + raw_spin_lock_init(&sem->wait_lock);
  51 + INIT_LIST_HEAD(&sem->wait_list);
  52 +}
  53 +EXPORT_SYMBOL(__init_rwsem);
  54 +
  55 +/*
  56 + * handle the lock release when processes blocked on it that can now run
  57 + * - if we come here, then:
  58 + * - the 'active count' _reached_ zero
  59 + * - the 'waiting count' is non-zero
  60 + * - the spinlock must be held by the caller
  61 + * - woken process blocks are discarded from the list after having task zeroed
  62 + * - writers are only woken if wakewrite is non-zero
  63 + */
  64 +static inline struct rw_semaphore *
  65 +__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
  66 +{
  67 + struct rwsem_waiter *waiter;
  68 + struct task_struct *tsk;
  69 + int woken;
  70 +
  71 + waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
  72 +
  73 + if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
  74 + if (wakewrite)
  75 + /* Wake up a writer. Note that we do not grant it the
  76 + * lock - it will have to acquire it when it runs. */
  77 + wake_up_process(waiter->task);
  78 + goto out;
  79 + }
  80 +
  81 + /* grant an infinite number of read locks to the front of the queue */
  82 + woken = 0;
  83 + do {
  84 + struct list_head *next = waiter->list.next;
  85 +
  86 + list_del(&waiter->list);
  87 + tsk = waiter->task;
  88 + smp_mb();
  89 + waiter->task = NULL;
  90 + wake_up_process(tsk);
  91 + put_task_struct(tsk);
  92 + woken++;
  93 + if (next == &sem->wait_list)
  94 + break;
  95 + waiter = list_entry(next, struct rwsem_waiter, list);
  96 + } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
  97 +
  98 + sem->activity += woken;
  99 +
  100 + out:
  101 + return sem;
  102 +}
  103 +
  104 +/*
  105 + * wake a single writer
  106 + */
  107 +static inline struct rw_semaphore *
  108 +__rwsem_wake_one_writer(struct rw_semaphore *sem)
  109 +{
  110 + struct rwsem_waiter *waiter;
  111 +
  112 + waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
  113 + wake_up_process(waiter->task);
  114 +
  115 + return sem;
  116 +}
  117 +
  118 +/*
  119 + * get a read lock on the semaphore
  120 + */
  121 +void __sched __down_read(struct rw_semaphore *sem)
  122 +{
  123 + struct rwsem_waiter waiter;
  124 + struct task_struct *tsk;
  125 + unsigned long flags;
  126 +
  127 + raw_spin_lock_irqsave(&sem->wait_lock, flags);
  128 +
  129 + if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
  130 + /* granted */
  131 + sem->activity++;
  132 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
  133 + goto out;
  134 + }
  135 +
  136 + tsk = current;
  137 + set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  138 +
  139 + /* set up my own style of waitqueue */
  140 + waiter.task = tsk;
  141 + waiter.type = RWSEM_WAITING_FOR_READ;
  142 + get_task_struct(tsk);
  143 +
  144 + list_add_tail(&waiter.list, &sem->wait_list);
  145 +
  146 + /* we don't need to touch the semaphore struct anymore */
  147 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
  148 +
  149 + /* wait to be given the lock */
  150 + for (;;) {
  151 + if (!waiter.task)
  152 + break;
  153 + schedule();
  154 + set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  155 + }
  156 +
  157 + tsk->state = TASK_RUNNING;
  158 + out:
  159 + ;
  160 +}
  161 +
  162 +/*
  163 + * trylock for reading -- returns 1 if successful, 0 if contention
  164 + */
  165 +int __down_read_trylock(struct rw_semaphore *sem)
  166 +{
  167 + unsigned long flags;
  168 + int ret = 0;
  169 +
  170 +
  171 + raw_spin_lock_irqsave(&sem->wait_lock, flags);
  172 +
  173 + if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
  174 + /* granted */
  175 + sem->activity++;
  176 + ret = 1;
  177 + }
  178 +
  179 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
  180 +
  181 + return ret;
  182 +}
  183 +
  184 +/*
  185 + * get a write lock on the semaphore
  186 + */
  187 +void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
  188 +{
  189 + struct rwsem_waiter waiter;
  190 + struct task_struct *tsk;
  191 + unsigned long flags;
  192 +
  193 + raw_spin_lock_irqsave(&sem->wait_lock, flags);
  194 +
  195 + /* set up my own style of waitqueue */
  196 + tsk = current;
  197 + waiter.task = tsk;
  198 + waiter.type = RWSEM_WAITING_FOR_WRITE;
  199 + list_add_tail(&waiter.list, &sem->wait_list);
  200 +
  201 + /* wait for someone to release the lock */
  202 + for (;;) {
  203 + /*
  204 + * That is the key to support write lock stealing: allows the
  205 + * task already on CPU to get the lock soon rather than put
  206 + * itself into sleep and waiting for system woke it or someone
  207 + * else in the head of the wait list up.
  208 + */
  209 + if (sem->activity == 0)
  210 + break;
  211 + set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  212 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
  213 + schedule();
  214 + raw_spin_lock_irqsave(&sem->wait_lock, flags);
  215 + }
  216 + /* got the lock */
  217 + sem->activity = -1;
  218 + list_del(&waiter.list);
  219 +
  220 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
  221 +}
  222 +
  223 +void __sched __down_write(struct rw_semaphore *sem)
  224 +{
  225 + __down_write_nested(sem, 0);
  226 +}
  227 +
  228 +/*
  229 + * trylock for writing -- returns 1 if successful, 0 if contention
  230 + */
  231 +int __down_write_trylock(struct rw_semaphore *sem)
  232 +{
  233 + unsigned long flags;
  234 + int ret = 0;
  235 +
  236 + raw_spin_lock_irqsave(&sem->wait_lock, flags);
  237 +
  238 + if (sem->activity == 0) {
  239 + /* got the lock */
  240 + sem->activity = -1;
  241 + ret = 1;
  242 + }
  243 +
  244 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
  245 +
  246 + return ret;
  247 +}
  248 +
  249 +/*
  250 + * release a read lock on the semaphore
  251 + */
  252 +void __up_read(struct rw_semaphore *sem)
  253 +{
  254 + unsigned long flags;
  255 +
  256 + raw_spin_lock_irqsave(&sem->wait_lock, flags);
  257 +
  258 + if (--sem->activity == 0 && !list_empty(&sem->wait_list))
  259 + sem = __rwsem_wake_one_writer(sem);
  260 +
  261 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
  262 +}
  263 +
  264 +/*
  265 + * release a write lock on the semaphore
  266 + */
  267 +void __up_write(struct rw_semaphore *sem)
  268 +{
  269 + unsigned long flags;
  270 +
  271 + raw_spin_lock_irqsave(&sem->wait_lock, flags);
  272 +
  273 + sem->activity = 0;
  274 + if (!list_empty(&sem->wait_list))
  275 + sem = __rwsem_do_wake(sem, 1);
  276 +
  277 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
  278 +}
  279 +
  280 +/*
  281 + * downgrade a write lock into a read lock
  282 + * - just wake up any readers at the front of the queue
  283 + */
  284 +void __downgrade_write(struct rw_semaphore *sem)
  285 +{
  286 + unsigned long flags;
  287 +
  288 + raw_spin_lock_irqsave(&sem->wait_lock, flags);
  289 +
  290 + sem->activity = 1;
  291 + if (!list_empty(&sem->wait_list))
  292 + sem = __rwsem_do_wake(sem, 0);
  293 +
  294 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
  295 +}
kernel/locking/rwsem-xadd.c
  1 +/* rwsem.c: R/W semaphores: contention handling functions
  2 + *
  3 + * Written by David Howells (dhowells@redhat.com).
  4 + * Derived from arch/i386/kernel/semaphore.c
  5 + *
  6 + * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
  7 + * and Michel Lespinasse <walken@google.com>
  8 + */
  9 +#include <linux/rwsem.h>
  10 +#include <linux/sched.h>
  11 +#include <linux/init.h>
  12 +#include <linux/export.h>
  13 +
  14 +/*
  15 + * Initialize an rwsem:
  16 + */
  17 +void __init_rwsem(struct rw_semaphore *sem, const char *name,
  18 + struct lock_class_key *key)
  19 +{
  20 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
  21 + /*
  22 + * Make sure we are not reinitializing a held semaphore:
  23 + */
  24 + debug_check_no_locks_freed((void *)sem, sizeof(*sem));
  25 + lockdep_init_map(&sem->dep_map, name, key, 0);
  26 +#endif
  27 + sem->count = RWSEM_UNLOCKED_VALUE;
  28 + raw_spin_lock_init(&sem->wait_lock);
  29 + INIT_LIST_HEAD(&sem->wait_list);
  30 +}
  31 +
  32 +EXPORT_SYMBOL(__init_rwsem);
  33 +
  34 +enum rwsem_waiter_type {
  35 + RWSEM_WAITING_FOR_WRITE,
  36 + RWSEM_WAITING_FOR_READ
  37 +};
  38 +
  39 +struct rwsem_waiter {
  40 + struct list_head list;
  41 + struct task_struct *task;
  42 + enum rwsem_waiter_type type;
  43 +};
  44 +
  45 +enum rwsem_wake_type {
  46 + RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
  47 + RWSEM_WAKE_READERS, /* Wake readers only */
  48 + RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
  49 +};
  50 +
  51 +/*
  52 + * handle the lock release when processes blocked on it that can now run
  53 + * - if we come here from up_xxxx(), then:
  54 + * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
  55 + * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
  56 + * - there must be someone on the queue
  57 + * - the spinlock must be held by the caller
  58 + * - woken process blocks are discarded from the list after having task zeroed
  59 + * - writers are only woken if downgrading is false
  60 + */
  61 +static struct rw_semaphore *
  62 +__rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
  63 +{
  64 + struct rwsem_waiter *waiter;
  65 + struct task_struct *tsk;
  66 + struct list_head *next;
  67 + long oldcount, woken, loop, adjustment;
  68 +
  69 + waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
  70 + if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
  71 + if (wake_type == RWSEM_WAKE_ANY)
  72 + /* Wake writer at the front of the queue, but do not
  73 + * grant it the lock yet as we want other writers
  74 + * to be able to steal it. Readers, on the other hand,
  75 + * will block as they will notice the queued writer.
  76 + */
  77 + wake_up_process(waiter->task);
  78 + goto out;
  79 + }
  80 +
  81 + /* Writers might steal the lock before we grant it to the next reader.
  82 + * We prefer to do the first reader grant before counting readers
  83 + * so we can bail out early if a writer stole the lock.
  84 + */
  85 + adjustment = 0;
  86 + if (wake_type != RWSEM_WAKE_READ_OWNED) {
  87 + adjustment = RWSEM_ACTIVE_READ_BIAS;
  88 + try_reader_grant:
  89 + oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
  90 + if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
  91 + /* A writer stole the lock. Undo our reader grant. */
  92 + if (rwsem_atomic_update(-adjustment, sem) &
  93 + RWSEM_ACTIVE_MASK)
  94 + goto out;
  95 + /* Last active locker left. Retry waking readers. */
  96 + goto try_reader_grant;
  97 + }
  98 + }
  99 +
  100 + /* Grant an infinite number of read locks to the readers at the front
  101 + * of the queue. Note we increment the 'active part' of the count by
  102 + * the number of readers before waking any processes up.
  103 + */
  104 + woken = 0;
  105 + do {
  106 + woken++;
  107 +
  108 + if (waiter->list.next == &sem->wait_list)
  109 + break;
  110 +
  111 + waiter = list_entry(waiter->list.next,
  112 + struct rwsem_waiter, list);
  113 +
  114 + } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
  115 +
  116 + adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
  117 + if (waiter->type != RWSEM_WAITING_FOR_WRITE)
  118 + /* hit end of list above */
  119 + adjustment -= RWSEM_WAITING_BIAS;
  120 +
  121 + if (adjustment)
  122 + rwsem_atomic_add(adjustment, sem);
  123 +
  124 + next = sem->wait_list.next;
  125 + loop = woken;
  126 + do {
  127 + waiter = list_entry(next, struct rwsem_waiter, list);
  128 + next = waiter->list.next;
  129 + tsk = waiter->task;
  130 + smp_mb();
  131 + waiter->task = NULL;
  132 + wake_up_process(tsk);
  133 + put_task_struct(tsk);
  134 + } while (--loop);
  135 +
  136 + sem->wait_list.next = next;
  137 + next->prev = &sem->wait_list;
  138 +
  139 + out:
  140 + return sem;
  141 +}
  142 +
  143 +/*
  144 + * wait for the read lock to be granted
  145 + */
  146 +struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
  147 +{
  148 + long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
  149 + struct rwsem_waiter waiter;
  150 + struct task_struct *tsk = current;
  151 +
  152 + /* set up my own style of waitqueue */
  153 + waiter.task = tsk;
  154 + waiter.type = RWSEM_WAITING_FOR_READ;
  155 + get_task_struct(tsk);
  156 +
  157 + raw_spin_lock_irq(&sem->wait_lock);
  158 + if (list_empty(&sem->wait_list))
  159 + adjustment += RWSEM_WAITING_BIAS;
  160 + list_add_tail(&waiter.list, &sem->wait_list);
  161 +
  162 + /* we're now waiting on the lock, but no longer actively locking */
  163 + count = rwsem_atomic_update(adjustment, sem);
  164 +
  165 + /* If there are no active locks, wake the front queued process(es).
  166 + *
  167 + * If there are no writers and we are first in the queue,
  168 + * wake our own waiter to join the existing active readers !
  169 + */
  170 + if (count == RWSEM_WAITING_BIAS ||
  171 + (count > RWSEM_WAITING_BIAS &&
  172 + adjustment != -RWSEM_ACTIVE_READ_BIAS))
  173 + sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
  174 +
  175 + raw_spin_unlock_irq(&sem->wait_lock);
  176 +
  177 + /* wait to be given the lock */
  178 + while (true) {
  179 + set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  180 + if (!waiter.task)
  181 + break;
  182 + schedule();
  183 + }
  184 +
  185 + tsk->state = TASK_RUNNING;
  186 +
  187 + return sem;
  188 +}
  189 +
  190 +/*
  191 + * wait until we successfully acquire the write lock
  192 + */
  193 +struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
  194 +{
  195 + long count, adjustment = -RWSEM_ACTIVE_WRITE_BIAS;
  196 + struct rwsem_waiter waiter;
  197 + struct task_struct *tsk = current;
  198 +
  199 + /* set up my own style of waitqueue */
  200 + waiter.task = tsk;
  201 + waiter.type = RWSEM_WAITING_FOR_WRITE;
  202 +
  203 + raw_spin_lock_irq(&sem->wait_lock);
  204 + if (list_empty(&sem->wait_list))
  205 + adjustment += RWSEM_WAITING_BIAS;
  206 + list_add_tail(&waiter.list, &sem->wait_list);
  207 +
  208 + /* we're now waiting on the lock, but no longer actively locking */
  209 + count = rwsem_atomic_update(adjustment, sem);
  210 +
  211 + /* If there were already threads queued before us and there are no
  212 + * active writers, the lock must be read owned; so we try to wake
  213 + * any read locks that were queued ahead of us. */
  214 + if (count > RWSEM_WAITING_BIAS &&
  215 + adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
  216 + sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS);
  217 +
  218 + /* wait until we successfully acquire the lock */
  219 + set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  220 + while (true) {
  221 + if (!(count & RWSEM_ACTIVE_MASK)) {
  222 + /* Try acquiring the write lock. */
  223 + count = RWSEM_ACTIVE_WRITE_BIAS;
  224 + if (!list_is_singular(&sem->wait_list))
  225 + count += RWSEM_WAITING_BIAS;
  226 +
  227 + if (sem->count == RWSEM_WAITING_BIAS &&
  228 + cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) ==
  229 + RWSEM_WAITING_BIAS)
  230 + break;
  231 + }
  232 +
  233 + raw_spin_unlock_irq(&sem->wait_lock);
  234 +
  235 + /* Block until there are no active lockers. */
  236 + do {
  237 + schedule();
  238 + set_task_state(tsk, TASK_UNINTERRUPTIBLE);
  239 + } while ((count = sem->count) & RWSEM_ACTIVE_MASK);
  240 +
  241 + raw_spin_lock_irq(&sem->wait_lock);
  242 + }
  243 +
  244 + list_del(&waiter.list);
  245 + raw_spin_unlock_irq(&sem->wait_lock);
  246 + tsk->state = TASK_RUNNING;
  247 +
  248 + return sem;
  249 +}
  250 +
  251 +/*
  252 + * handle waking up a waiter on the semaphore
  253 + * - up_read/up_write has decremented the active part of count if we come here
  254 + */
  255 +struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
  256 +{
  257 + unsigned long flags;
  258 +
  259 + raw_spin_lock_irqsave(&sem->wait_lock, flags);
  260 +
  261 + /* do nothing if list empty */
  262 + if (!list_empty(&sem->wait_list))
  263 + sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
  264 +
  265 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
  266 +
  267 + return sem;
  268 +}
  269 +
  270 +/*
  271 + * downgrade a write lock into a read lock
  272 + * - caller incremented waiting part of count and discovered it still negative
  273 + * - just wake up any readers at the front of the queue
  274 + */
  275 +struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
  276 +{
  277 + unsigned long flags;
  278 +
  279 + raw_spin_lock_irqsave(&sem->wait_lock, flags);
  280 +
  281 + /* do nothing if list empty */
  282 + if (!list_empty(&sem->wait_list))
  283 + sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
  284 +
  285 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
  286 +
  287 + return sem;
  288 +}
  289 +
  290 +EXPORT_SYMBOL(rwsem_down_read_failed);
  291 +EXPORT_SYMBOL(rwsem_down_write_failed);
  292 +EXPORT_SYMBOL(rwsem_wake);
  293 +EXPORT_SYMBOL(rwsem_downgrade_wake);
kernel/locking/rwsem.c
  1 +/* kernel/rwsem.c: R/W semaphores, public implementation
  2 + *
  3 + * Written by David Howells (dhowells@redhat.com).
  4 + * Derived from asm-i386/semaphore.h
  5 + */
  6 +
  7 +#include <linux/types.h>
  8 +#include <linux/kernel.h>
  9 +#include <linux/sched.h>
  10 +#include <linux/export.h>
  11 +#include <linux/rwsem.h>
  12 +
  13 +#include <linux/atomic.h>
  14 +
  15 +/*
  16 + * lock for reading
  17 + */
  18 +void __sched down_read(struct rw_semaphore *sem)
  19 +{
  20 + might_sleep();
  21 + rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
  22 +
  23 + LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
  24 +}
  25 +
  26 +EXPORT_SYMBOL(down_read);
  27 +
  28 +/*
  29 + * trylock for reading -- returns 1 if successful, 0 if contention
  30 + */
  31 +int down_read_trylock(struct rw_semaphore *sem)
  32 +{
  33 + int ret = __down_read_trylock(sem);
  34 +
  35 + if (ret == 1)
  36 + rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
  37 + return ret;
  38 +}
  39 +
  40 +EXPORT_SYMBOL(down_read_trylock);
  41 +
  42 +/*
  43 + * lock for writing
  44 + */
  45 +void __sched down_write(struct rw_semaphore *sem)
  46 +{
  47 + might_sleep();
  48 + rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
  49 +
  50 + LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
  51 +}
  52 +
  53 +EXPORT_SYMBOL(down_write);
  54 +
  55 +/*
  56 + * trylock for writing -- returns 1 if successful, 0 if contention
  57 + */
  58 +int down_write_trylock(struct rw_semaphore *sem)
  59 +{
  60 + int ret = __down_write_trylock(sem);
  61 +
  62 + if (ret == 1)
  63 + rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
  64 + return ret;
  65 +}
  66 +
  67 +EXPORT_SYMBOL(down_write_trylock);
  68 +
  69 +/*
  70 + * release a read lock
  71 + */
  72 +void up_read(struct rw_semaphore *sem)
  73 +{
  74 + rwsem_release(&sem->dep_map, 1, _RET_IP_);
  75 +
  76 + __up_read(sem);
  77 +}
  78 +
  79 +EXPORT_SYMBOL(up_read);
  80 +
  81 +/*
  82 + * release a write lock
  83 + */
  84 +void up_write(struct rw_semaphore *sem)
  85 +{
  86 + rwsem_release(&sem->dep_map, 1, _RET_IP_);
  87 +
  88 + __up_write(sem);
  89 +}
  90 +
  91 +EXPORT_SYMBOL(up_write);
  92 +
  93 +/*
  94 + * downgrade write lock to read lock
  95 + */
  96 +void downgrade_write(struct rw_semaphore *sem)
  97 +{
  98 + /*
  99 + * lockdep: a downgraded write will live on as a write
  100 + * dependency.
  101 + */
  102 + __downgrade_write(sem);
  103 +}
  104 +
  105 +EXPORT_SYMBOL(downgrade_write);
  106 +
  107 +#ifdef CONFIG_DEBUG_LOCK_ALLOC
  108 +
  109 +void down_read_nested(struct rw_semaphore *sem, int subclass)
  110 +{
  111 + might_sleep();
  112 + rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
  113 +
  114 + LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
  115 +}
  116 +
  117 +EXPORT_SYMBOL(down_read_nested);
  118 +
  119 +void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
  120 +{
  121 + might_sleep();
  122 + rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
  123 +
  124 + LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
  125 +}
  126 +
  127 +EXPORT_SYMBOL(_down_write_nest_lock);
  128 +
  129 +void down_read_non_owner(struct rw_semaphore *sem)
  130 +{
  131 + might_sleep();
  132 +
  133 + __down_read(sem);
  134 +}
  135 +
  136 +EXPORT_SYMBOL(down_read_non_owner);
  137 +
  138 +void down_write_nested(struct rw_semaphore *sem, int subclass)
  139 +{
  140 + might_sleep();
  141 + rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
  142 +
  143 + LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
  144 +}
  145 +
  146 +EXPORT_SYMBOL(down_write_nested);
  147 +
  148 +void up_read_non_owner(struct rw_semaphore *sem)
  149 +{
  150 + __up_read(sem);
  151 +}
  152 +
  153 +EXPORT_SYMBOL(up_read_non_owner);
  154 +
  155 +#endif
kernel/rwsem.c
1   -/* kernel/rwsem.c: R/W semaphores, public implementation
2   - *
3   - * Written by David Howells (dhowells@redhat.com).
4   - * Derived from asm-i386/semaphore.h
5   - */
6   -
7   -#include <linux/types.h>
8   -#include <linux/kernel.h>
9   -#include <linux/sched.h>
10   -#include <linux/export.h>
11   -#include <linux/rwsem.h>
12   -
13   -#include <linux/atomic.h>
14   -
15   -/*
16   - * lock for reading
17   - */
18   -void __sched down_read(struct rw_semaphore *sem)
19   -{
20   - might_sleep();
21   - rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
22   -
23   - LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
24   -}
25   -
26   -EXPORT_SYMBOL(down_read);
27   -
28   -/*
29   - * trylock for reading -- returns 1 if successful, 0 if contention
30   - */
31   -int down_read_trylock(struct rw_semaphore *sem)
32   -{
33   - int ret = __down_read_trylock(sem);
34   -
35   - if (ret == 1)
36   - rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
37   - return ret;
38   -}
39   -
40   -EXPORT_SYMBOL(down_read_trylock);
41   -
42   -/*
43   - * lock for writing
44   - */
45   -void __sched down_write(struct rw_semaphore *sem)
46   -{
47   - might_sleep();
48   - rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
49   -
50   - LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
51   -}
52   -
53   -EXPORT_SYMBOL(down_write);
54   -
55   -/*
56   - * trylock for writing -- returns 1 if successful, 0 if contention
57   - */
58   -int down_write_trylock(struct rw_semaphore *sem)
59   -{
60   - int ret = __down_write_trylock(sem);
61   -
62   - if (ret == 1)
63   - rwsem_acquire(&sem->dep_map, 0, 1, _RET_IP_);
64   - return ret;
65   -}
66   -
67   -EXPORT_SYMBOL(down_write_trylock);
68   -
69   -/*
70   - * release a read lock
71   - */
72   -void up_read(struct rw_semaphore *sem)
73   -{
74   - rwsem_release(&sem->dep_map, 1, _RET_IP_);
75   -
76   - __up_read(sem);
77   -}
78   -
79   -EXPORT_SYMBOL(up_read);
80   -
81   -/*
82   - * release a write lock
83   - */
84   -void up_write(struct rw_semaphore *sem)
85   -{
86   - rwsem_release(&sem->dep_map, 1, _RET_IP_);
87   -
88   - __up_write(sem);
89   -}
90   -
91   -EXPORT_SYMBOL(up_write);
92   -
93   -/*
94   - * downgrade write lock to read lock
95   - */
96   -void downgrade_write(struct rw_semaphore *sem)
97   -{
98   - /*
99   - * lockdep: a downgraded write will live on as a write
100   - * dependency.
101   - */
102   - __downgrade_write(sem);
103   -}
104   -
105   -EXPORT_SYMBOL(downgrade_write);
106   -
107   -#ifdef CONFIG_DEBUG_LOCK_ALLOC
108   -
109   -void down_read_nested(struct rw_semaphore *sem, int subclass)
110   -{
111   - might_sleep();
112   - rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
113   -
114   - LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
115   -}
116   -
117   -EXPORT_SYMBOL(down_read_nested);
118   -
119   -void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest)
120   -{
121   - might_sleep();
122   - rwsem_acquire_nest(&sem->dep_map, 0, 0, nest, _RET_IP_);
123   -
124   - LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
125   -}
126   -
127   -EXPORT_SYMBOL(_down_write_nest_lock);
128   -
129   -void down_read_non_owner(struct rw_semaphore *sem)
130   -{
131   - might_sleep();
132   -
133   - __down_read(sem);
134   -}
135   -
136   -EXPORT_SYMBOL(down_read_non_owner);
137   -
138   -void down_write_nested(struct rw_semaphore *sem, int subclass)
139   -{
140   - might_sleep();
141   - rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
142   -
143   - LOCK_CONTENDED(sem, __down_write_trylock, __down_write);
144   -}
145   -
146   -EXPORT_SYMBOL(down_write_nested);
147   -
148   -void up_read_non_owner(struct rw_semaphore *sem)
149   -{
150   - __up_read(sem);
151   -}
152   -
153   -EXPORT_SYMBOL(up_read_non_owner);
154   -
155   -#endif
... ... @@ -42,8 +42,6 @@
42 42 obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
43 43 obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
44 44 obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
45   -lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
46   -lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
47 45 lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
48 46  
49 47 CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
lib/rwsem-spinlock.c
1   -/* rwsem-spinlock.c: R/W semaphores: contention handling functions for
2   - * generic spinlock implementation
3   - *
4   - * Copyright (c) 2001 David Howells (dhowells@redhat.com).
5   - * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
6   - * - Derived also from comments by Linus
7   - */
8   -#include <linux/rwsem.h>
9   -#include <linux/sched.h>
10   -#include <linux/export.h>
11   -
12   -enum rwsem_waiter_type {
13   - RWSEM_WAITING_FOR_WRITE,
14   - RWSEM_WAITING_FOR_READ
15   -};
16   -
17   -struct rwsem_waiter {
18   - struct list_head list;
19   - struct task_struct *task;
20   - enum rwsem_waiter_type type;
21   -};
22   -
23   -int rwsem_is_locked(struct rw_semaphore *sem)
24   -{
25   - int ret = 1;
26   - unsigned long flags;
27   -
28   - if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
29   - ret = (sem->activity != 0);
30   - raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
31   - }
32   - return ret;
33   -}
34   -EXPORT_SYMBOL(rwsem_is_locked);
35   -
36   -/*
37   - * initialise the semaphore
38   - */
39   -void __init_rwsem(struct rw_semaphore *sem, const char *name,
40   - struct lock_class_key *key)
41   -{
42   -#ifdef CONFIG_DEBUG_LOCK_ALLOC
43   - /*
44   - * Make sure we are not reinitializing a held semaphore:
45   - */
46   - debug_check_no_locks_freed((void *)sem, sizeof(*sem));
47   - lockdep_init_map(&sem->dep_map, name, key, 0);
48   -#endif
49   - sem->activity = 0;
50   - raw_spin_lock_init(&sem->wait_lock);
51   - INIT_LIST_HEAD(&sem->wait_list);
52   -}
53   -EXPORT_SYMBOL(__init_rwsem);
54   -
55   -/*
56   - * handle the lock release when processes blocked on it that can now run
57   - * - if we come here, then:
58   - * - the 'active count' _reached_ zero
59   - * - the 'waiting count' is non-zero
60   - * - the spinlock must be held by the caller
61   - * - woken process blocks are discarded from the list after having task zeroed
62   - * - writers are only woken if wakewrite is non-zero
63   - */
64   -static inline struct rw_semaphore *
65   -__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
66   -{
67   - struct rwsem_waiter *waiter;
68   - struct task_struct *tsk;
69   - int woken;
70   -
71   - waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
72   -
73   - if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
74   - if (wakewrite)
75   - /* Wake up a writer. Note that we do not grant it the
76   - * lock - it will have to acquire it when it runs. */
77   - wake_up_process(waiter->task);
78   - goto out;
79   - }
80   -
81   - /* grant an infinite number of read locks to the front of the queue */
82   - woken = 0;
83   - do {
84   - struct list_head *next = waiter->list.next;
85   -
86   - list_del(&waiter->list);
87   - tsk = waiter->task;
88   - smp_mb();
89   - waiter->task = NULL;
90   - wake_up_process(tsk);
91   - put_task_struct(tsk);
92   - woken++;
93   - if (next == &sem->wait_list)
94   - break;
95   - waiter = list_entry(next, struct rwsem_waiter, list);
96   - } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
97   -
98   - sem->activity += woken;
99   -
100   - out:
101   - return sem;
102   -}
103   -
104   -/*
105   - * wake a single writer
106   - */
107   -static inline struct rw_semaphore *
108   -__rwsem_wake_one_writer(struct rw_semaphore *sem)
109   -{
110   - struct rwsem_waiter *waiter;
111   -
112   - waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
113   - wake_up_process(waiter->task);
114   -
115   - return sem;
116   -}
117   -
118   -/*
119   - * get a read lock on the semaphore
120   - */
121   -void __sched __down_read(struct rw_semaphore *sem)
122   -{
123   - struct rwsem_waiter waiter;
124   - struct task_struct *tsk;
125   - unsigned long flags;
126   -
127   - raw_spin_lock_irqsave(&sem->wait_lock, flags);
128   -
129   - if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
130   - /* granted */
131   - sem->activity++;
132   - raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
133   - goto out;
134   - }
135   -
136   - tsk = current;
137   - set_task_state(tsk, TASK_UNINTERRUPTIBLE);
138   -
139   - /* set up my own style of waitqueue */
140   - waiter.task = tsk;
141   - waiter.type = RWSEM_WAITING_FOR_READ;
142   - get_task_struct(tsk);
143   -
144   - list_add_tail(&waiter.list, &sem->wait_list);
145   -
146   - /* we don't need to touch the semaphore struct anymore */
147   - raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
148   -
149   - /* wait to be given the lock */
150   - for (;;) {
151   - if (!waiter.task)
152   - break;
153   - schedule();
154   - set_task_state(tsk, TASK_UNINTERRUPTIBLE);
155   - }
156   -
157   - tsk->state = TASK_RUNNING;
158   - out:
159   - ;
160   -}
161   -
162   -/*
163   - * trylock for reading -- returns 1 if successful, 0 if contention
164   - */
165   -int __down_read_trylock(struct rw_semaphore *sem)
166   -{
167   - unsigned long flags;
168   - int ret = 0;
169   -
170   -
171   - raw_spin_lock_irqsave(&sem->wait_lock, flags);
172   -
173   - if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
174   - /* granted */
175   - sem->activity++;
176   - ret = 1;
177   - }
178   -
179   - raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
180   -
181   - return ret;
182   -}
183   -
184   -/*
185   - * get a write lock on the semaphore
186   - */
187   -void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
188   -{
189   - struct rwsem_waiter waiter;
190   - struct task_struct *tsk;
191   - unsigned long flags;
192   -
193   - raw_spin_lock_irqsave(&sem->wait_lock, flags);
194   -
195   - /* set up my own style of waitqueue */
196   - tsk = current;
197   - waiter.task = tsk;
198   - waiter.type = RWSEM_WAITING_FOR_WRITE;
199   - list_add_tail(&waiter.list, &sem->wait_list);
200   -
201   - /* wait for someone to release the lock */
202   - for (;;) {
203   - /*
204   - * That is the key to support write lock stealing: allows the
205   - * task already on CPU to get the lock soon rather than put
206   - * itself into sleep and waiting for system woke it or someone
207   - * else in the head of the wait list up.
208   - */
209   - if (sem->activity == 0)
210   - break;
211   - set_task_state(tsk, TASK_UNINTERRUPTIBLE);
212   - raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
213   - schedule();
214   - raw_spin_lock_irqsave(&sem->wait_lock, flags);
215   - }
216   - /* got the lock */
217   - sem->activity = -1;
218   - list_del(&waiter.list);
219   -
220   - raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
221   -}
222   -
223   -void __sched __down_write(struct rw_semaphore *sem)
224   -{
225   - __down_write_nested(sem, 0);
226   -}
227   -
228   -/*
229   - * trylock for writing -- returns 1 if successful, 0 if contention
230   - */
231   -int __down_write_trylock(struct rw_semaphore *sem)
232   -{
233   - unsigned long flags;
234   - int ret = 0;
235   -
236   - raw_spin_lock_irqsave(&sem->wait_lock, flags);
237   -
238   - if (sem->activity == 0) {
239   - /* got the lock */
240   - sem->activity = -1;
241   - ret = 1;
242   - }
243   -
244   - raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
245   -
246   - return ret;
247   -}
248   -
249   -/*
250   - * release a read lock on the semaphore
251   - */
252   -void __up_read(struct rw_semaphore *sem)
253   -{
254   - unsigned long flags;
255   -
256   - raw_spin_lock_irqsave(&sem->wait_lock, flags);
257   -
258   - if (--sem->activity == 0 && !list_empty(&sem->wait_list))
259   - sem = __rwsem_wake_one_writer(sem);
260   -
261   - raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
262   -}
263   -
264   -/*
265   - * release a write lock on the semaphore
266   - */
267   -void __up_write(struct rw_semaphore *sem)
268   -{
269   - unsigned long flags;
270   -
271   - raw_spin_lock_irqsave(&sem->wait_lock, flags);
272   -
273   - sem->activity = 0;
274   - if (!list_empty(&sem->wait_list))
275   - sem = __rwsem_do_wake(sem, 1);
276   -
277   - raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
278   -}
279   -
280   -/*
281   - * downgrade a write lock into a read lock
282   - * - just wake up any readers at the front of the queue
283   - */
284   -void __downgrade_write(struct rw_semaphore *sem)
285   -{
286   - unsigned long flags;
287   -
288   - raw_spin_lock_irqsave(&sem->wait_lock, flags);
289   -
290   - sem->activity = 1;
291   - if (!list_empty(&sem->wait_list))
292   - sem = __rwsem_do_wake(sem, 0);
293   -
294   - raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
295   -}
1   -/* rwsem.c: R/W semaphores: contention handling functions
2   - *
3   - * Written by David Howells (dhowells@redhat.com).
4   - * Derived from arch/i386/kernel/semaphore.c
5   - *
6   - * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
7   - * and Michel Lespinasse <walken@google.com>
8   - */
9   -#include <linux/rwsem.h>
10   -#include <linux/sched.h>
11   -#include <linux/init.h>
12   -#include <linux/export.h>
13   -
14   -/*
15   - * Initialize an rwsem:
16   - */
17   -void __init_rwsem(struct rw_semaphore *sem, const char *name,
18   - struct lock_class_key *key)
19   -{
20   -#ifdef CONFIG_DEBUG_LOCK_ALLOC
21   - /*
22   - * Make sure we are not reinitializing a held semaphore:
23   - */
24   - debug_check_no_locks_freed((void *)sem, sizeof(*sem));
25   - lockdep_init_map(&sem->dep_map, name, key, 0);
26   -#endif
27   - sem->count = RWSEM_UNLOCKED_VALUE;
28   - raw_spin_lock_init(&sem->wait_lock);
29   - INIT_LIST_HEAD(&sem->wait_list);
30   -}
31   -
32   -EXPORT_SYMBOL(__init_rwsem);
33   -
34   -enum rwsem_waiter_type {
35   - RWSEM_WAITING_FOR_WRITE,
36   - RWSEM_WAITING_FOR_READ
37   -};
38   -
39   -struct rwsem_waiter {
40   - struct list_head list;
41   - struct task_struct *task;
42   - enum rwsem_waiter_type type;
43   -};
44   -
45   -enum rwsem_wake_type {
46   - RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
47   - RWSEM_WAKE_READERS, /* Wake readers only */
48   - RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
49   -};
50   -
51   -/*
52   - * handle the lock release when processes blocked on it that can now run
53   - * - if we come here from up_xxxx(), then:
54   - * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
55   - * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
56   - * - there must be someone on the queue
57   - * - the spinlock must be held by the caller
58   - * - woken process blocks are discarded from the list after having task zeroed
59   - * - writers are only woken if downgrading is false
60   - */
61   -static struct rw_semaphore *
62   -__rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
63   -{
64   - struct rwsem_waiter *waiter;
65   - struct task_struct *tsk;
66   - struct list_head *next;
67   - long oldcount, woken, loop, adjustment;
68   -
69   - waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
70   - if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
71   - if (wake_type == RWSEM_WAKE_ANY)
72   - /* Wake writer at the front of the queue, but do not
73   - * grant it the lock yet as we want other writers
74   - * to be able to steal it. Readers, on the other hand,
75   - * will block as they will notice the queued writer.
76   - */
77   - wake_up_process(waiter->task);
78   - goto out;
79   - }
80   -
81   - /* Writers might steal the lock before we grant it to the next reader.
82   - * We prefer to do the first reader grant before counting readers
83   - * so we can bail out early if a writer stole the lock.
84   - */
85   - adjustment = 0;
86   - if (wake_type != RWSEM_WAKE_READ_OWNED) {
87   - adjustment = RWSEM_ACTIVE_READ_BIAS;
88   - try_reader_grant:
89   - oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
90   - if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
91   - /* A writer stole the lock. Undo our reader grant. */
92   - if (rwsem_atomic_update(-adjustment, sem) &
93   - RWSEM_ACTIVE_MASK)
94   - goto out;
95   - /* Last active locker left. Retry waking readers. */
96   - goto try_reader_grant;
97   - }
98   - }
99   -
100   - /* Grant an infinite number of read locks to the readers at the front
101   - * of the queue. Note we increment the 'active part' of the count by
102   - * the number of readers before waking any processes up.
103   - */
104   - woken = 0;
105   - do {
106   - woken++;
107   -
108   - if (waiter->list.next == &sem->wait_list)
109   - break;
110   -
111   - waiter = list_entry(waiter->list.next,
112   - struct rwsem_waiter, list);
113   -
114   - } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
115   -
116   - adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
117   - if (waiter->type != RWSEM_WAITING_FOR_WRITE)
118   - /* hit end of list above */
119   - adjustment -= RWSEM_WAITING_BIAS;
120   -
121   - if (adjustment)
122   - rwsem_atomic_add(adjustment, sem);
123   -
124   - next = sem->wait_list.next;
125   - loop = woken;
126   - do {
127   - waiter = list_entry(next, struct rwsem_waiter, list);
128   - next = waiter->list.next;
129   - tsk = waiter->task;
130   - smp_mb();
131   - waiter->task = NULL;
132   - wake_up_process(tsk);
133   - put_task_struct(tsk);
134   - } while (--loop);
135   -
136   - sem->wait_list.next = next;
137   - next->prev = &sem->wait_list;
138   -
139   - out:
140   - return sem;
141   -}
142   -
143   -/*
144   - * wait for the read lock to be granted
145   - */
146   -struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
147   -{
148   - long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
149   - struct rwsem_waiter waiter;
150   - struct task_struct *tsk = current;
151   -
152   - /* set up my own style of waitqueue */
153   - waiter.task = tsk;
154   - waiter.type = RWSEM_WAITING_FOR_READ;
155   - get_task_struct(tsk);
156   -
157   - raw_spin_lock_irq(&sem->wait_lock);
158   - if (list_empty(&sem->wait_list))
159   - adjustment += RWSEM_WAITING_BIAS;
160   - list_add_tail(&waiter.list, &sem->wait_list);
161   -
162   - /* we're now waiting on the lock, but no longer actively locking */
163   - count = rwsem_atomic_update(adjustment, sem);
164   -
165   - /* If there are no active locks, wake the front queued process(es).
166   - *
167   - * If there are no writers and we are first in the queue,
168   - * wake our own waiter to join the existing active readers !
169   - */
170   - if (count == RWSEM_WAITING_BIAS ||
171   - (count > RWSEM_WAITING_BIAS &&
172   - adjustment != -RWSEM_ACTIVE_READ_BIAS))
173   - sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
174   -
175   - raw_spin_unlock_irq(&sem->wait_lock);
176   -
177   - /* wait to be given the lock */
178   - while (true) {
179   - set_task_state(tsk, TASK_UNINTERRUPTIBLE);
180   - if (!waiter.task)
181   - break;
182   - schedule();
183   - }
184   -
185   - tsk->state = TASK_RUNNING;
186   -
187   - return sem;
188   -}
189   -
190   -/*
191   - * wait until we successfully acquire the write lock
192   - */
193   -struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
194   -{
195   - long count, adjustment = -RWSEM_ACTIVE_WRITE_BIAS;
196   - struct rwsem_waiter waiter;
197   - struct task_struct *tsk = current;
198   -
199   - /* set up my own style of waitqueue */
200   - waiter.task = tsk;
201   - waiter.type = RWSEM_WAITING_FOR_WRITE;
202   -
203   - raw_spin_lock_irq(&sem->wait_lock);
204   - if (list_empty(&sem->wait_list))
205   - adjustment += RWSEM_WAITING_BIAS;
206   - list_add_tail(&waiter.list, &sem->wait_list);
207   -
208   - /* we're now waiting on the lock, but no longer actively locking */
209   - count = rwsem_atomic_update(adjustment, sem);
210   -
211   - /* If there were already threads queued before us and there are no
212   - * active writers, the lock must be read owned; so we try to wake
213   - * any read locks that were queued ahead of us. */
214   - if (count > RWSEM_WAITING_BIAS &&
215   - adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
216   - sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS);
217   -
218   - /* wait until we successfully acquire the lock */
219   - set_task_state(tsk, TASK_UNINTERRUPTIBLE);
220   - while (true) {
221   - if (!(count & RWSEM_ACTIVE_MASK)) {
222   - /* Try acquiring the write lock. */
223   - count = RWSEM_ACTIVE_WRITE_BIAS;
224   - if (!list_is_singular(&sem->wait_list))
225   - count += RWSEM_WAITING_BIAS;
226   -
227   - if (sem->count == RWSEM_WAITING_BIAS &&
228   - cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) ==
229   - RWSEM_WAITING_BIAS)
230   - break;
231   - }
232   -
233   - raw_spin_unlock_irq(&sem->wait_lock);
234   -
235   - /* Block until there are no active lockers. */
236   - do {
237   - schedule();
238   - set_task_state(tsk, TASK_UNINTERRUPTIBLE);
239   - } while ((count = sem->count) & RWSEM_ACTIVE_MASK);
240   -
241   - raw_spin_lock_irq(&sem->wait_lock);
242   - }
243   -
244   - list_del(&waiter.list);
245   - raw_spin_unlock_irq(&sem->wait_lock);
246   - tsk->state = TASK_RUNNING;
247   -
248   - return sem;
249   -}
250   -
251   -/*
252   - * handle waking up a waiter on the semaphore
253   - * - up_read/up_write has decremented the active part of count if we come here
254   - */
255   -struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
256   -{
257   - unsigned long flags;
258   -
259   - raw_spin_lock_irqsave(&sem->wait_lock, flags);
260   -
261   - /* do nothing if list empty */
262   - if (!list_empty(&sem->wait_list))
263   - sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
264   -
265   - raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
266   -
267   - return sem;
268   -}
269   -
270   -/*
271   - * downgrade a write lock into a read lock
272   - * - caller incremented waiting part of count and discovered it still negative
273   - * - just wake up any readers at the front of the queue
274   - */
275   -struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
276   -{
277   - unsigned long flags;
278   -
279   - raw_spin_lock_irqsave(&sem->wait_lock, flags);
280   -
281   - /* do nothing if list empty */
282   - if (!list_empty(&sem->wait_list))
283   - sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
284   -
285   - raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
286   -
287   - return sem;
288   -}
289   -
290   -EXPORT_SYMBOL(rwsem_down_read_failed);
291   -EXPORT_SYMBOL(rwsem_down_write_failed);
292   -EXPORT_SYMBOL(rwsem_wake);
293   -EXPORT_SYMBOL(rwsem_downgrade_wake);