Blame view
lib/rwsem-spinlock.c
6.5 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 |
/* rwsem-spinlock.c: R/W semaphores: contention handling functions for * generic spinlock implementation * * Copyright (c) 2001 David Howells (dhowells@redhat.com). * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de> * - Derived also from comments by Linus */ #include <linux/rwsem.h> #include <linux/sched.h> |
8bc3bcc93
|
10 |
#include <linux/export.h> |
1da177e4c
|
11 |
|
e2d57f782
|
12 13 14 15 |
enum rwsem_waiter_type { RWSEM_WAITING_FOR_WRITE, RWSEM_WAITING_FOR_READ }; |
1da177e4c
|
16 17 18 |
struct rwsem_waiter { struct list_head list; struct task_struct *task; |
e2d57f782
|
19 |
enum rwsem_waiter_type type; |
1da177e4c
|
20 |
}; |
29671f22a
|
21 22 23 24 |
int rwsem_is_locked(struct rw_semaphore *sem) { int ret = 1; unsigned long flags; |
ddb6c9b58
|
25 |
if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { |
29671f22a
|
26 |
ret = (sem->activity != 0); |
ddb6c9b58
|
27 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
29671f22a
|
28 29 30 31 |
} return ret; } EXPORT_SYMBOL(rwsem_is_locked); |
1da177e4c
|
32 33 34 |
/* * initialise the semaphore */ |
4ea2176df
|
35 36 |
void __init_rwsem(struct rw_semaphore *sem, const char *name, struct lock_class_key *key) |
1da177e4c
|
37 |
{ |
4ea2176df
|
38 39 40 41 42 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC /* * Make sure we are not reinitializing a held semaphore: */ debug_check_no_locks_freed((void *)sem, sizeof(*sem)); |
4dfbb9d8c
|
43 |
lockdep_init_map(&sem->dep_map, name, key, 0); |
4ea2176df
|
44 |
#endif |
1da177e4c
|
45 |
sem->activity = 0; |
ddb6c9b58
|
46 |
raw_spin_lock_init(&sem->wait_lock); |
1da177e4c
|
47 |
INIT_LIST_HEAD(&sem->wait_list); |
1da177e4c
|
48 |
} |
118d52da1
|
49 |
EXPORT_SYMBOL(__init_rwsem); |
1da177e4c
|
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 |
/* * handle the lock release when processes blocked on it that can now run * - if we come here, then: * - the 'active count' _reached_ zero * - the 'waiting count' is non-zero * - the spinlock must be held by the caller * - woken process blocks are discarded from the list after having task zeroed * - writers are only woken if wakewrite is non-zero */ static inline struct rw_semaphore * __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) { struct rwsem_waiter *waiter; struct task_struct *tsk; int woken; |
1da177e4c
|
66 |
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); |
e2d57f782
|
67 |
if (waiter->type == RWSEM_WAITING_FOR_WRITE) { |
8cf5322ce
|
68 69 70 71 |
if (wakewrite) /* Wake up a writer. Note that we do not grant it the * lock - it will have to acquire it when it runs. */ wake_up_process(waiter->task); |
1da177e4c
|
72 73 74 75 |
goto out; } /* grant an infinite number of read locks to the front of the queue */ |
1da177e4c
|
76 |
woken = 0; |
8cf5322ce
|
77 |
do { |
1da177e4c
|
78 79 80 81 |
struct list_head *next = waiter->list.next; list_del(&waiter->list); tsk = waiter->task; |
d59dd4620
|
82 |
smp_mb(); |
1da177e4c
|
83 84 85 86 |
waiter->task = NULL; wake_up_process(tsk); put_task_struct(tsk); woken++; |
8cf5322ce
|
87 |
if (next == &sem->wait_list) |
1da177e4c
|
88 89 |
break; waiter = list_entry(next, struct rwsem_waiter, list); |
8cf5322ce
|
90 |
} while (waiter->type != RWSEM_WAITING_FOR_WRITE); |
1da177e4c
|
91 92 93 94 |
sem->activity += woken; out: |
1da177e4c
|
95 96 97 98 99 100 101 102 103 104 |
return sem; } /* * wake a single writer */ static inline struct rw_semaphore * __rwsem_wake_one_writer(struct rw_semaphore *sem) { struct rwsem_waiter *waiter; |
1da177e4c
|
105 106 |
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); |
41ef8f826
|
107 |
wake_up_process(waiter->task); |
1da177e4c
|
108 |
|
1da177e4c
|
109 110 111 112 113 114 |
return sem; } /* * get a read lock on the semaphore */ |
9f741cb8f
|
115 |
void __sched __down_read(struct rw_semaphore *sem) |
1da177e4c
|
116 117 118 |
{ struct rwsem_waiter waiter; struct task_struct *tsk; |
3eac4abaa
|
119 |
unsigned long flags; |
1da177e4c
|
120 |
|
ddb6c9b58
|
121 |
raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
122 123 124 125 |
if (sem->activity >= 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity++; |
ddb6c9b58
|
126 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
127 128 129 130 131 132 133 134 |
goto out; } tsk = current; set_task_state(tsk, TASK_UNINTERRUPTIBLE); /* set up my own style of waitqueue */ waiter.task = tsk; |
e2d57f782
|
135 |
waiter.type = RWSEM_WAITING_FOR_READ; |
1da177e4c
|
136 137 138 139 140 |
get_task_struct(tsk); list_add_tail(&waiter.list, &sem->wait_list); /* we don't need to touch the semaphore struct anymore */ |
ddb6c9b58
|
141 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
142 143 144 145 146 147 148 149 150 151 |
/* wait to be given the lock */ for (;;) { if (!waiter.task) break; schedule(); set_task_state(tsk, TASK_UNINTERRUPTIBLE); } tsk->state = TASK_RUNNING; |
1da177e4c
|
152 |
out: |
c4e05116a
|
153 |
; |
1da177e4c
|
154 155 156 157 158 |
} /* * trylock for reading -- returns 1 if successful, 0 if contention */ |
9f741cb8f
|
159 |
int __down_read_trylock(struct rw_semaphore *sem) |
1da177e4c
|
160 161 162 |
{ unsigned long flags; int ret = 0; |
1da177e4c
|
163 |
|
ddb6c9b58
|
164 |
raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
165 166 167 168 169 170 |
if (sem->activity >= 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity++; ret = 1; } |
ddb6c9b58
|
171 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
172 |
|
1da177e4c
|
173 174 175 176 177 |
return ret; } /* * get a write lock on the semaphore |
1da177e4c
|
178 |
*/ |
9f741cb8f
|
179 |
void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) |
1da177e4c
|
180 181 182 |
{ struct rwsem_waiter waiter; struct task_struct *tsk; |
3eac4abaa
|
183 |
unsigned long flags; |
1da177e4c
|
184 |
|
ddb6c9b58
|
185 |
raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
186 |
|
1da177e4c
|
187 |
/* set up my own style of waitqueue */ |
41ef8f826
|
188 |
tsk = current; |
1da177e4c
|
189 |
waiter.task = tsk; |
e2d57f782
|
190 |
waiter.type = RWSEM_WAITING_FOR_WRITE; |
1da177e4c
|
191 |
list_add_tail(&waiter.list, &sem->wait_list); |
41ef8f826
|
192 |
/* wait for someone to release the lock */ |
1da177e4c
|
193 |
for (;;) { |
41ef8f826
|
194 195 196 197 198 199 200 |
/* * That is the key to support write lock stealing: allows the * task already on CPU to get the lock soon rather than put * itself into sleep and waiting for system woke it or someone * else in the head of the wait list up. */ if (sem->activity == 0) |
1da177e4c
|
201 |
break; |
1da177e4c
|
202 |
set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
41ef8f826
|
203 204 205 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); schedule(); raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
206 |
} |
41ef8f826
|
207 208 209 |
/* got the lock */ sem->activity = -1; list_del(&waiter.list); |
1da177e4c
|
210 |
|
41ef8f826
|
211 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
212 |
} |
9f741cb8f
|
213 |
void __sched __down_write(struct rw_semaphore *sem) |
4ea2176df
|
214 215 216 |
{ __down_write_nested(sem, 0); } |
1da177e4c
|
217 218 219 |
/* * trylock for writing -- returns 1 if successful, 0 if contention */ |
9f741cb8f
|
220 |
int __down_write_trylock(struct rw_semaphore *sem) |
1da177e4c
|
221 222 223 |
{ unsigned long flags; int ret = 0; |
ddb6c9b58
|
224 |
raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
225 |
|
41ef8f826
|
226 227 |
if (sem->activity == 0) { /* got the lock */ |
1da177e4c
|
228 229 230 |
sem->activity = -1; ret = 1; } |
ddb6c9b58
|
231 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
232 |
|
1da177e4c
|
233 234 235 236 237 238 |
return ret; } /* * release a read lock on the semaphore */ |
9f741cb8f
|
239 |
void __up_read(struct rw_semaphore *sem) |
1da177e4c
|
240 241 |
{ unsigned long flags; |
ddb6c9b58
|
242 |
raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
243 244 245 |
if (--sem->activity == 0 && !list_empty(&sem->wait_list)) sem = __rwsem_wake_one_writer(sem); |
ddb6c9b58
|
246 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
247 248 249 250 251 |
} /* * release a write lock on the semaphore */ |
9f741cb8f
|
252 |
void __up_write(struct rw_semaphore *sem) |
1da177e4c
|
253 254 |
{ unsigned long flags; |
ddb6c9b58
|
255 |
raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
256 257 258 259 |
sem->activity = 0; if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 1); |
ddb6c9b58
|
260 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
261 262 263 264 265 266 |
} /* * downgrade a write lock into a read lock * - just wake up any readers at the front of the queue */ |
9f741cb8f
|
267 |
void __downgrade_write(struct rw_semaphore *sem) |
1da177e4c
|
268 269 |
{ unsigned long flags; |
ddb6c9b58
|
270 |
raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
271 272 273 274 |
sem->activity = 1; if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 0); |
ddb6c9b58
|
275 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
276 |
} |