Blame view
lib/rwsem-spinlock.c
6.94 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 |
/* rwsem-spinlock.c: R/W semaphores: contention handling functions for * generic spinlock implementation * * Copyright (c) 2001 David Howells (dhowells@redhat.com). * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de> * - Derived also from comments by Linus */ #include <linux/rwsem.h> #include <linux/sched.h> #include <linux/module.h> struct rwsem_waiter { struct list_head list; struct task_struct *task; unsigned int flags; #define RWSEM_WAITING_FOR_READ 0x00000001 #define RWSEM_WAITING_FOR_WRITE 0x00000002 }; |
29671f22a
|
19 20 21 22 23 24 25 26 27 28 29 30 |
int rwsem_is_locked(struct rw_semaphore *sem) { int ret = 1; unsigned long flags; if (spin_trylock_irqsave(&sem->wait_lock, flags)) { ret = (sem->activity != 0); spin_unlock_irqrestore(&sem->wait_lock, flags); } return ret; } EXPORT_SYMBOL(rwsem_is_locked); |
1da177e4c
|
31 32 33 |
/* * initialise the semaphore */ |
4ea2176df
|
34 35 |
void __init_rwsem(struct rw_semaphore *sem, const char *name, struct lock_class_key *key) |
1da177e4c
|
36 |
{ |
4ea2176df
|
37 38 39 40 41 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC /* * Make sure we are not reinitializing a held semaphore: */ debug_check_no_locks_freed((void *)sem, sizeof(*sem)); |
4dfbb9d8c
|
42 |
lockdep_init_map(&sem->dep_map, name, key, 0); |
4ea2176df
|
43 |
#endif |
1da177e4c
|
44 45 46 |
sem->activity = 0; spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); |
1da177e4c
|
47 |
} |
118d52da1
|
48 |
EXPORT_SYMBOL(__init_rwsem); |
1da177e4c
|
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
/* * handle the lock release when processes blocked on it that can now run * - if we come here, then: * - the 'active count' _reached_ zero * - the 'waiting count' is non-zero * - the spinlock must be held by the caller * - woken process blocks are discarded from the list after having task zeroed * - writers are only woken if wakewrite is non-zero */ static inline struct rw_semaphore * __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) { struct rwsem_waiter *waiter; struct task_struct *tsk; int woken; |
1da177e4c
|
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 |
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); if (!wakewrite) { if (waiter->flags & RWSEM_WAITING_FOR_WRITE) goto out; goto dont_wake_writers; } /* if we are allowed to wake writers try to grant a single write lock * if there's a writer at the front of the queue * - we leave the 'waiting count' incremented to signify potential * contention */ if (waiter->flags & RWSEM_WAITING_FOR_WRITE) { sem->activity = -1; list_del(&waiter->list); tsk = waiter->task; /* Don't touch waiter after ->task has been NULLed */ |
d59dd4620
|
83 |
smp_mb(); |
1da177e4c
|
84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
waiter->task = NULL; wake_up_process(tsk); put_task_struct(tsk); goto out; } /* grant an infinite number of read locks to the front of the queue */ dont_wake_writers: woken = 0; while (waiter->flags & RWSEM_WAITING_FOR_READ) { struct list_head *next = waiter->list.next; list_del(&waiter->list); tsk = waiter->task; |
d59dd4620
|
98 |
smp_mb(); |
1da177e4c
|
99 100 101 102 103 104 105 106 107 108 109 110 |
waiter->task = NULL; wake_up_process(tsk); put_task_struct(tsk); woken++; if (list_empty(&sem->wait_list)) break; waiter = list_entry(next, struct rwsem_waiter, list); } sem->activity += woken; out: |
1da177e4c
|
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
return sem; } /* * wake a single writer */ static inline struct rw_semaphore * __rwsem_wake_one_writer(struct rw_semaphore *sem) { struct rwsem_waiter *waiter; struct task_struct *tsk; sem->activity = -1; waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); list_del(&waiter->list); tsk = waiter->task; |
d59dd4620
|
129 |
smp_mb(); |
1da177e4c
|
130 131 132 133 134 135 136 137 138 |
waiter->task = NULL; wake_up_process(tsk); put_task_struct(tsk); return sem; } /* * get a read lock on the semaphore */ |
9f741cb8f
|
139 |
void __sched __down_read(struct rw_semaphore *sem) |
1da177e4c
|
140 141 142 |
{ struct rwsem_waiter waiter; struct task_struct *tsk; |
3eac4abaa
|
143 |
unsigned long flags; |
1da177e4c
|
144 |
|
3eac4abaa
|
145 |
spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
146 147 148 149 |
if (sem->activity >= 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity++; |
3eac4abaa
|
150 |
spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
151 152 153 154 155 156 157 158 159 160 161 162 163 164 |
goto out; } tsk = current; set_task_state(tsk, TASK_UNINTERRUPTIBLE); /* set up my own style of waitqueue */ waiter.task = tsk; waiter.flags = RWSEM_WAITING_FOR_READ; get_task_struct(tsk); list_add_tail(&waiter.list, &sem->wait_list); /* we don't need to touch the semaphore struct anymore */ |
3eac4abaa
|
165 |
spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
166 167 168 169 170 171 172 173 174 175 |
/* wait to be given the lock */ for (;;) { if (!waiter.task) break; schedule(); set_task_state(tsk, TASK_UNINTERRUPTIBLE); } tsk->state = TASK_RUNNING; |
1da177e4c
|
176 |
out: |
c4e05116a
|
177 |
; |
1da177e4c
|
178 179 180 181 182 |
} /* * trylock for reading -- returns 1 if successful, 0 if contention */ |
9f741cb8f
|
183 |
int __down_read_trylock(struct rw_semaphore *sem) |
1da177e4c
|
184 185 186 |
{ unsigned long flags; int ret = 0; |
1da177e4c
|
187 188 189 190 191 192 193 194 195 196 |
spin_lock_irqsave(&sem->wait_lock, flags); if (sem->activity >= 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity++; ret = 1; } spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
197 198 199 200 201 202 203 |
return ret; } /* * get a write lock on the semaphore * - we increment the waiting count anyway to indicate an exclusive lock */ |
9f741cb8f
|
204 |
void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) |
1da177e4c
|
205 206 207 |
{ struct rwsem_waiter waiter; struct task_struct *tsk; |
3eac4abaa
|
208 |
unsigned long flags; |
1da177e4c
|
209 |
|
3eac4abaa
|
210 |
spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4c
|
211 212 213 214 |
if (sem->activity == 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity = -1; |
3eac4abaa
|
215 |
spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
216 217 218 219 220 221 222 223 224 225 226 227 228 229 |
goto out; } tsk = current; set_task_state(tsk, TASK_UNINTERRUPTIBLE); /* set up my own style of waitqueue */ waiter.task = tsk; waiter.flags = RWSEM_WAITING_FOR_WRITE; get_task_struct(tsk); list_add_tail(&waiter.list, &sem->wait_list); /* we don't need to touch the semaphore struct anymore */ |
3eac4abaa
|
230 |
spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
231 232 233 234 235 236 237 238 239 240 |
/* wait to be given the lock */ for (;;) { if (!waiter.task) break; schedule(); set_task_state(tsk, TASK_UNINTERRUPTIBLE); } tsk->state = TASK_RUNNING; |
1da177e4c
|
241 |
out: |
c4e05116a
|
242 |
; |
1da177e4c
|
243 |
} |
9f741cb8f
|
244 |
void __sched __down_write(struct rw_semaphore *sem) |
4ea2176df
|
245 246 247 |
{ __down_write_nested(sem, 0); } |
1da177e4c
|
248 249 250 |
/* * trylock for writing -- returns 1 if successful, 0 if contention */ |
9f741cb8f
|
251 |
int __down_write_trylock(struct rw_semaphore *sem) |
1da177e4c
|
252 253 254 |
{ unsigned long flags; int ret = 0; |
1da177e4c
|
255 256 257 258 259 260 261 262 263 |
spin_lock_irqsave(&sem->wait_lock, flags); if (sem->activity == 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity = -1; ret = 1; } spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
264 265 266 267 268 269 |
return ret; } /* * release a read lock on the semaphore */ |
9f741cb8f
|
270 |
void __up_read(struct rw_semaphore *sem) |
1da177e4c
|
271 272 |
{ unsigned long flags; |
1da177e4c
|
273 274 275 276 277 278 |
spin_lock_irqsave(&sem->wait_lock, flags); if (--sem->activity == 0 && !list_empty(&sem->wait_list)) sem = __rwsem_wake_one_writer(sem); spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
279 280 281 282 283 |
} /* * release a write lock on the semaphore */ |
9f741cb8f
|
284 |
void __up_write(struct rw_semaphore *sem) |
1da177e4c
|
285 286 |
{ unsigned long flags; |
1da177e4c
|
287 288 289 290 291 292 293 |
spin_lock_irqsave(&sem->wait_lock, flags); sem->activity = 0; if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 1); spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
294 295 296 297 298 299 |
} /* * downgrade a write lock into a read lock * - just wake up any readers at the front of the queue */ |
9f741cb8f
|
300 |
void __downgrade_write(struct rw_semaphore *sem) |
1da177e4c
|
301 302 |
{ unsigned long flags; |
1da177e4c
|
303 304 305 306 307 308 309 |
spin_lock_irqsave(&sem->wait_lock, flags); sem->activity = 1; if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 0); spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4c
|
310 |
} |