Blame view
lib/rwsem-spinlock.c
7.01 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 |
/* rwsem-spinlock.c: R/W semaphores: contention handling functions for * generic spinlock implementation * * Copyright (c) 2001 David Howells (dhowells@redhat.com). * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de> * - Derived also from comments by Linus */ #include <linux/rwsem.h> #include <linux/sched.h> #include <linux/module.h> struct rwsem_waiter { struct list_head list; struct task_struct *task; unsigned int flags; #define RWSEM_WAITING_FOR_READ 0x00000001 #define RWSEM_WAITING_FOR_WRITE 0x00000002 }; |
29671f22a
|
19 20 21 22 |
int rwsem_is_locked(struct rw_semaphore *sem) { int ret = 1; unsigned long flags; |
ddb6c9b58
|
23 |
if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { |
29671f22a
|
24 |
ret = (sem->activity != 0); |
ddb6c9b58
|
25 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
29671f22a
|
26 27 28 29 |
} return ret; } EXPORT_SYMBOL(rwsem_is_locked); |
1da177e4c
|
30 31 32 |
/* * initialise the semaphore */ |
4ea2176df
|
33 34 |
void __init_rwsem(struct rw_semaphore *sem, const char *name, struct lock_class_key *key) |
1da177e4c
|
35 |
{ |
4ea2176df
|
36 37 38 39 40 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC /* * Make sure we are not reinitializing a held semaphore: */ debug_check_no_locks_freed((void *)sem, sizeof(*sem)); |
4dfbb9d8c
|
41 |
lockdep_init_map(&sem->dep_map, name, key, 0); |
4ea2176df
|
42 |
#endif |
1da177e4c
|
43 |
sem->activity = 0; |
ddb6c9b58
|
44 |
raw_spin_lock_init(&sem->wait_lock); |
1da177e4c
|
45 |
INIT_LIST_HEAD(&sem->wait_list); |
1da177e4c
|
46 |
} |
118d52da1
|
47 |
EXPORT_SYMBOL(__init_rwsem); |
1da177e4c
|
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
/* * handle the lock release when processes blocked on it that can now run * - if we come here, then: * - the 'active count' _reached_ zero * - the 'waiting count' is non-zero * - the spinlock must be held by the caller * - woken process blocks are discarded from the list after having task zeroed * - writers are only woken if wakewrite is non-zero */ static inline struct rw_semaphore * __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) { struct rwsem_waiter *waiter; struct task_struct *tsk; int woken; |
1da177e4c
|
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 |
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); if (!wakewrite) { if (waiter->flags & RWSEM_WAITING_FOR_WRITE) goto out; goto dont_wake_writers; } /* if we are allowed to wake writers try to grant a single write lock * if there's a writer at the front of the queue * - we leave the 'waiting count' incremented to signify potential * contention */ if (waiter->flags & RWSEM_WAITING_FOR_WRITE) { sem->activity = -1; list_del(&waiter->list); tsk = waiter->task; /* Don't touch waiter after ->task has been NULLed */ |
d59dd4620
|
82 |
smp_mb(); |
1da177e4c
|
83 84 85 86 87 88 89 90 91 92 93 94 95 96 |
waiter->task = NULL; wake_up_process(tsk); put_task_struct(tsk); goto out; } /* grant an infinite number of read locks to the front of the queue */ dont_wake_writers: woken = 0; while (waiter->flags & RWSEM_WAITING_FOR_READ) { struct list_head *next = waiter->list.next; list_del(&waiter->list); tsk = waiter->task; |
d59dd4620
|
97 |
smp_mb(); |
1da177e4c
|
98 99 100 101 102 103 104 105 106 107 108 109 |
waiter->task = NULL; wake_up_process(tsk); put_task_struct(tsk); woken++; if (list_empty(&sem->wait_list)) break; waiter = list_entry(next, struct rwsem_waiter, list); } sem->activity += woken; out: |
1da177e4c
|
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
return sem; } /* * wake a single writer */ static inline struct rw_semaphore * __rwsem_wake_one_writer(struct rw_semaphore *sem) { struct rwsem_waiter *waiter; struct task_struct *tsk; sem->activity = -1; waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); list_del(&waiter->list); tsk = waiter->task; |
d59dd4620
|
128 |
smp_mb(); |
1da177e4c
|
129 130 131 132 133 134 135 136 137 |
waiter->task = NULL; wake_up_process(tsk); put_task_struct(tsk); return sem; } /* * get a read lock on the semaphore */ |
9f741cb8f
|
138 |
void __sched __down_read(struct rw_semaphore *sem) |
1da177e4c
|
139 140 141 |
{ struct rwsem_waiter waiter; struct task_struct *tsk; |
3eac4abaa
|
142 |
unsigned long flags; |
1da177e4c
|
143 |
|
ddb6c9b58
|
144 |
raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
145 146 147 148 |
if (sem->activity >= 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity++; |
ddb6c9b58
|
149 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
goto out; } tsk = current; set_task_state(tsk, TASK_UNINTERRUPTIBLE); /* set up my own style of waitqueue */ waiter.task = tsk; waiter.flags = RWSEM_WAITING_FOR_READ; get_task_struct(tsk); list_add_tail(&waiter.list, &sem->wait_list); /* we don't need to touch the semaphore struct anymore */ |
ddb6c9b58
|
164 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
165 166 167 168 169 170 171 172 173 174 |
/* wait to be given the lock */ for (;;) { if (!waiter.task) break; schedule(); set_task_state(tsk, TASK_UNINTERRUPTIBLE); } tsk->state = TASK_RUNNING; |
1da177e4c
|
175 |
out: |
c4e05116a
|
176 |
; |
1da177e4c
|
177 178 179 180 181 |
} /* * trylock for reading -- returns 1 if successful, 0 if contention */ |
9f741cb8f
|
182 |
int __down_read_trylock(struct rw_semaphore *sem) |
1da177e4c
|
183 184 185 |
{ unsigned long flags; int ret = 0; |
1da177e4c
|
186 |
|
ddb6c9b58
|
187 |
raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
188 189 190 191 192 193 |
if (sem->activity >= 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity++; ret = 1; } |
ddb6c9b58
|
194 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
195 |
|
1da177e4c
|
196 197 198 199 200 201 202 |
return ret; } /* * get a write lock on the semaphore * - we increment the waiting count anyway to indicate an exclusive lock */ |
9f741cb8f
|
203 |
void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) |
1da177e4c
|
204 205 206 |
{ struct rwsem_waiter waiter; struct task_struct *tsk; |
3eac4abaa
|
207 |
unsigned long flags; |
1da177e4c
|
208 |
|
ddb6c9b58
|
209 |
raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
210 211 212 213 |
if (sem->activity == 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity = -1; |
ddb6c9b58
|
214 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
215 216 217 218 219 220 221 222 223 224 225 226 227 228 |
goto out; } tsk = current; set_task_state(tsk, TASK_UNINTERRUPTIBLE); /* set up my own style of waitqueue */ waiter.task = tsk; waiter.flags = RWSEM_WAITING_FOR_WRITE; get_task_struct(tsk); list_add_tail(&waiter.list, &sem->wait_list); /* we don't need to touch the semaphore struct anymore */ |
ddb6c9b58
|
229 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
230 231 232 233 234 235 236 237 238 239 |
/* wait to be given the lock */ for (;;) { if (!waiter.task) break; schedule(); set_task_state(tsk, TASK_UNINTERRUPTIBLE); } tsk->state = TASK_RUNNING; |
1da177e4c
|
240 |
out: |
c4e05116a
|
241 |
; |
1da177e4c
|
242 |
} |
9f741cb8f
|
243 |
void __sched __down_write(struct rw_semaphore *sem) |
4ea2176df
|
244 245 246 |
{ __down_write_nested(sem, 0); } |
1da177e4c
|
247 248 249 |
/* * trylock for writing -- returns 1 if successful, 0 if contention */ |
9f741cb8f
|
250 |
int __down_write_trylock(struct rw_semaphore *sem) |
1da177e4c
|
251 252 253 |
{ unsigned long flags; int ret = 0; |
ddb6c9b58
|
254 |
raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
255 256 257 258 259 260 |
if (sem->activity == 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity = -1; ret = 1; } |
ddb6c9b58
|
261 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
262 |
|
1da177e4c
|
263 264 265 266 267 268 |
return ret; } /* * release a read lock on the semaphore */ |
9f741cb8f
|
269 |
void __up_read(struct rw_semaphore *sem) |
1da177e4c
|
270 271 |
{ unsigned long flags; |
ddb6c9b58
|
272 |
raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
273 274 275 |
if (--sem->activity == 0 && !list_empty(&sem->wait_list)) sem = __rwsem_wake_one_writer(sem); |
ddb6c9b58
|
276 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
277 278 279 280 281 |
} /* * release a write lock on the semaphore */ |
9f741cb8f
|
282 |
void __up_write(struct rw_semaphore *sem) |
1da177e4c
|
283 284 |
{ unsigned long flags; |
ddb6c9b58
|
285 |
raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
286 287 288 289 |
sem->activity = 0; if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 1); |
ddb6c9b58
|
290 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
291 292 293 294 295 296 |
} /* * downgrade a write lock into a read lock * - just wake up any readers at the front of the queue */ |
9f741cb8f
|
297 |
void __downgrade_write(struct rw_semaphore *sem) |
1da177e4c
|
298 299 |
{ unsigned long flags; |
ddb6c9b58
|
300 |
raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
301 302 303 304 |
sem->activity = 1; if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 0); |
ddb6c9b58
|
305 |
raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
306 |
} |