Blame view
lib/rwsem-spinlock.c
6.66 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 |
/* rwsem-spinlock.c: R/W semaphores: contention handling functions for * generic spinlock implementation * * Copyright (c) 2001 David Howells (dhowells@redhat.com). * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de> * - Derived also from comments by Linus */ #include <linux/rwsem.h> #include <linux/sched.h> |
8bc3bcc93
|
10 |
#include <linux/export.h> |
1da177e4c
|
11 12 13 14 15 16 17 18 |
struct rwsem_waiter { struct list_head list; struct task_struct *task; unsigned int flags; #define RWSEM_WAITING_FOR_READ 0x00000001 #define RWSEM_WAITING_FOR_WRITE 0x00000002 }; |
29671f22a
|
19 20 21 22 |
int rwsem_is_locked(struct rw_semaphore *sem) { int ret = 1; unsigned long flags; |
ddb6c9b58
|
23 |
if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { |
29671f22a
|
24 |
ret = (sem->activity != 0); |
ddb6c9b58
|
25 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
29671f22a
|
26 27 28 29 |
} return ret; } EXPORT_SYMBOL(rwsem_is_locked); |
1da177e4c
|
30 31 32 |
/* * initialise the semaphore */ |
4ea2176df
|
33 34 |
void __init_rwsem(struct rw_semaphore *sem, const char *name, struct lock_class_key *key) |
1da177e4c
|
35 |
{ |
4ea2176df
|
36 37 38 39 40 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC /* * Make sure we are not reinitializing a held semaphore: */ debug_check_no_locks_freed((void *)sem, sizeof(*sem)); |
4dfbb9d8c
|
41 |
lockdep_init_map(&sem->dep_map, name, key, 0); |
4ea2176df
|
42 |
#endif |
1da177e4c
|
43 |
sem->activity = 0; |
ddb6c9b58
|
44 |
raw_spin_lock_init(&sem->wait_lock); |
1da177e4c
|
45 |
INIT_LIST_HEAD(&sem->wait_list); |
1da177e4c
|
46 |
} |
118d52da1
|
47 |
EXPORT_SYMBOL(__init_rwsem); |
1da177e4c
|
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
/* * handle the lock release when processes blocked on it that can now run * - if we come here, then: * - the 'active count' _reached_ zero * - the 'waiting count' is non-zero * - the spinlock must be held by the caller * - woken process blocks are discarded from the list after having task zeroed * - writers are only woken if wakewrite is non-zero */ static inline struct rw_semaphore * __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) { struct rwsem_waiter *waiter; struct task_struct *tsk; int woken; |
1da177e4c
|
64 65 66 67 68 69 70 |
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); if (!wakewrite) { if (waiter->flags & RWSEM_WAITING_FOR_WRITE) goto out; goto dont_wake_writers; } |
41ef8f826
|
71 72 73 74 |
/* * as we support write lock stealing, we can't set sem->activity * to -1 here to indicate we get the lock. Instead, we wake it up * to let it go get it again. |
1da177e4c
|
75 76 |
*/ if (waiter->flags & RWSEM_WAITING_FOR_WRITE) { |
41ef8f826
|
77 |
wake_up_process(waiter->task); |
1da177e4c
|
78 79 80 81 82 83 84 85 86 87 88 |
goto out; } /* grant an infinite number of read locks to the front of the queue */ dont_wake_writers: woken = 0; while (waiter->flags & RWSEM_WAITING_FOR_READ) { struct list_head *next = waiter->list.next; list_del(&waiter->list); tsk = waiter->task; |
d59dd4620
|
89 |
smp_mb(); |
1da177e4c
|
90 91 92 93 94 95 96 97 98 99 100 101 |
waiter->task = NULL; wake_up_process(tsk); put_task_struct(tsk); woken++; if (list_empty(&sem->wait_list)) break; waiter = list_entry(next, struct rwsem_waiter, list); } sem->activity += woken; out: |
1da177e4c
|
102 103 104 105 106 107 108 109 110 111 |
return sem; } /* * wake a single writer */ static inline struct rw_semaphore * __rwsem_wake_one_writer(struct rw_semaphore *sem) { struct rwsem_waiter *waiter; |
1da177e4c
|
112 113 |
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); |
41ef8f826
|
114 |
wake_up_process(waiter->task); |
1da177e4c
|
115 |
|
1da177e4c
|
116 117 118 119 120 121 |
return sem; } /* * get a read lock on the semaphore */ |
9f741cb8f
|
122 |
void __sched __down_read(struct rw_semaphore *sem) |
1da177e4c
|
123 124 125 |
{ struct rwsem_waiter waiter; struct task_struct *tsk; |
3eac4abaa
|
126 |
unsigned long flags; |
1da177e4c
|
127 |
|
ddb6c9b58
|
128 |
raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
129 130 131 132 |
if (sem->activity >= 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity++; |
ddb6c9b58
|
133 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
goto out; } tsk = current; set_task_state(tsk, TASK_UNINTERRUPTIBLE); /* set up my own style of waitqueue */ waiter.task = tsk; waiter.flags = RWSEM_WAITING_FOR_READ; get_task_struct(tsk); list_add_tail(&waiter.list, &sem->wait_list); /* we don't need to touch the semaphore struct anymore */ |
ddb6c9b58
|
148 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
149 150 151 152 153 154 155 156 157 158 |
/* wait to be given the lock */ for (;;) { if (!waiter.task) break; schedule(); set_task_state(tsk, TASK_UNINTERRUPTIBLE); } tsk->state = TASK_RUNNING; |
1da177e4c
|
159 |
out: |
c4e05116a
|
160 |
; |
1da177e4c
|
161 162 163 164 165 |
} /* * trylock for reading -- returns 1 if successful, 0 if contention */ |
9f741cb8f
|
166 |
int __down_read_trylock(struct rw_semaphore *sem) |
1da177e4c
|
167 168 169 |
{ unsigned long flags; int ret = 0; |
1da177e4c
|
170 |
|
ddb6c9b58
|
171 |
raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
172 173 174 175 176 177 |
if (sem->activity >= 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity++; ret = 1; } |
ddb6c9b58
|
178 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
179 |
|
1da177e4c
|
180 181 182 183 184 |
return ret; } /* * get a write lock on the semaphore |
1da177e4c
|
185 |
*/ |
9f741cb8f
|
186 |
void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) |
1da177e4c
|
187 188 189 |
{ struct rwsem_waiter waiter; struct task_struct *tsk; |
3eac4abaa
|
190 |
unsigned long flags; |
1da177e4c
|
191 |
|
ddb6c9b58
|
192 |
raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
193 |
|
1da177e4c
|
194 |
/* set up my own style of waitqueue */ |
41ef8f826
|
195 |
tsk = current; |
1da177e4c
|
196 197 |
waiter.task = tsk; waiter.flags = RWSEM_WAITING_FOR_WRITE; |
1da177e4c
|
198 |
list_add_tail(&waiter.list, &sem->wait_list); |
41ef8f826
|
199 |
/* wait for someone to release the lock */ |
1da177e4c
|
200 |
for (;;) { |
41ef8f826
|
201 202 203 204 205 206 207 |
/* * That is the key to support write lock stealing: allows the * task already on CPU to get the lock soon rather than put * itself into sleep and waiting for system woke it or someone * else in the head of the wait list up. */ if (sem->activity == 0) |
1da177e4c
|
208 |
break; |
1da177e4c
|
209 |
set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
41ef8f826
|
210 211 212 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); schedule(); raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
213 |
} |
41ef8f826
|
214 215 216 |
/* got the lock */ sem->activity = -1; list_del(&waiter.list); |
1da177e4c
|
217 |
|
41ef8f826
|
218 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
219 |
} |
9f741cb8f
|
220 |
void __sched __down_write(struct rw_semaphore *sem) |
4ea2176df
|
221 222 223 |
{ __down_write_nested(sem, 0); } |
1da177e4c
|
224 225 226 |
/* * trylock for writing -- returns 1 if successful, 0 if contention */ |
9f741cb8f
|
227 |
int __down_write_trylock(struct rw_semaphore *sem) |
1da177e4c
|
228 229 230 |
{ unsigned long flags; int ret = 0; |
ddb6c9b58
|
231 |
raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
232 |
|
41ef8f826
|
233 234 |
if (sem->activity == 0) { /* got the lock */ |
1da177e4c
|
235 236 237 |
sem->activity = -1; ret = 1; } |
ddb6c9b58
|
238 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
239 |
|
1da177e4c
|
240 241 242 243 244 245 |
return ret; } /* * release a read lock on the semaphore */ |
9f741cb8f
|
246 |
void __up_read(struct rw_semaphore *sem) |
1da177e4c
|
247 248 |
{ unsigned long flags; |
ddb6c9b58
|
249 |
raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
250 251 252 |
if (--sem->activity == 0 && !list_empty(&sem->wait_list)) sem = __rwsem_wake_one_writer(sem); |
ddb6c9b58
|
253 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
254 255 256 257 258 |
} /* * release a write lock on the semaphore */ |
9f741cb8f
|
259 |
void __up_write(struct rw_semaphore *sem) |
1da177e4c
|
260 261 |
{ unsigned long flags; |
ddb6c9b58
|
262 |
raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
263 264 265 266 |
sem->activity = 0; if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 1); |
ddb6c9b58
|
267 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
268 269 270 271 272 273 |
} /* * downgrade a write lock into a read lock * - just wake up any readers at the front of the queue */ |
9f741cb8f
|
274 |
void __downgrade_write(struct rw_semaphore *sem) |
1da177e4c
|
275 276 |
{ unsigned long flags; |
ddb6c9b58
|
277 |
raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
278 279 280 281 |
sem->activity = 1; if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 0); |
ddb6c9b58
|
282 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
283 |
} |