Blame view
include/linux/rwsem.h
5.12 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 |
/* rwsem.h: R/W semaphores, public interface * * Written by David Howells (dhowells@redhat.com). * Derived from asm-i386/semaphore.h */ #ifndef _LINUX_RWSEM_H #define _LINUX_RWSEM_H #include <linux/linkage.h> |
1da177e4c
|
11 12 |
#include <linux/types.h> #include <linux/kernel.h> |
c16a87ce0
|
13 14 |
#include <linux/list.h> #include <linux/spinlock.h> |
60063497a
|
15 |
#include <linux/atomic.h> |
5db6c6fef
|
16 |
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
90631822c
|
17 |
#include <linux/osq_lock.h> |
5db6c6fef
|
18 |
#endif |
1da177e4c
|
19 20 21 22 23 24 |
struct rw_semaphore; #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK #include <linux/rwsem-spinlock.h> /* use a generic implementation */ #else |
1c8ed640d
|
25 26 |
/* All arch specific implementations share the same struct */ struct rw_semaphore { |
4fc828e24
|
27 |
long count; |
4fc828e24
|
28 |
struct list_head wait_list; |
ce069fc92
|
29 |
raw_spinlock_t wait_lock; |
5db6c6fef
|
30 |
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
ce069fc92
|
31 |
struct optimistic_spin_queue osq; /* spinner MCS lock */ |
4fc828e24
|
32 33 34 35 36 |
/* * Write owner. Used as a speculative check to see * if the owner is running on the cpu. */ struct task_struct *owner; |
4fc828e24
|
37 |
#endif |
1c8ed640d
|
38 39 40 41 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif }; |
d12337542
|
42 43 44 45 |
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); |
aac72277f
|
46 |
|
1c8ed640d
|
47 48 |
/* Include the arch specific part */ #include <asm/rwsem.h> |
41e5887fa
|
49 50 51 52 53 54 |
/* In all implementations count != 0 means locked */ static inline int rwsem_is_locked(struct rw_semaphore *sem) { return sem->count != 0; } |
1da177e4c
|
55 |
#endif |
12249b344
|
56 57 58 59 60 61 62 |
/* Common initializer macros and functions */ #ifdef CONFIG_DEBUG_LOCK_ALLOC # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } #else # define __RWSEM_DEP_MAP_INIT(lockname) #endif |
5db6c6fef
|
63 |
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
ce069fc92
|
64 |
#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL |
4fc828e24
|
65 |
#else |
ce069fc92
|
66 |
#define __RWSEM_OPT_INIT(lockname) |
4fc828e24
|
67 |
#endif |
12249b344
|
68 |
|
ce069fc92
|
69 70 71 72 73 74 |
#define __RWSEM_INITIALIZER(name) \ { .count = RWSEM_UNLOCKED_VALUE, \ .wait_list = LIST_HEAD_INIT((name).wait_list), \ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \ __RWSEM_OPT_INIT(name) \ __RWSEM_DEP_MAP_INIT(name) } |
12249b344
|
75 76 77 78 79 80 81 82 83 84 85 86 |
#define DECLARE_RWSEM(name) \ struct rw_semaphore name = __RWSEM_INITIALIZER(name) extern void __init_rwsem(struct rw_semaphore *sem, const char *name, struct lock_class_key *key); #define init_rwsem(sem) \ do { \ static struct lock_class_key __key; \ \ __init_rwsem((sem), #sem, &__key); \ } while (0) |
1da177e4c
|
87 |
/* |
4a444b1f0
|
88 89 90 91 92 93 94 95 96 97 98 |
* This is the same regardless of which rwsem implementation that is being used. * It is just a heuristic meant to be called by somebody alreadying holding the * rwsem to see if somebody from an incompatible type is wanting access to the * lock. */ static inline int rwsem_is_contended(struct rw_semaphore *sem) { return !list_empty(&sem->wait_list); } /* |
1da177e4c
|
99 100 |
* lock for reading */ |
4ea2176df
|
101 |
extern void down_read(struct rw_semaphore *sem); |
1da177e4c
|
102 103 104 105 |
/* * trylock for reading -- returns 1 if successful, 0 if contention */ |
4ea2176df
|
106 |
extern int down_read_trylock(struct rw_semaphore *sem); |
1da177e4c
|
107 108 109 110 |
/* * lock for writing */ |
4ea2176df
|
111 |
extern void down_write(struct rw_semaphore *sem); |
1da177e4c
|
112 113 114 115 |
/* * trylock for writing -- returns 1 if successful, 0 if contention */ |
4ea2176df
|
116 |
extern int down_write_trylock(struct rw_semaphore *sem); |
1da177e4c
|
117 118 119 120 |
/* * release a read lock */ |
4ea2176df
|
121 |
extern void up_read(struct rw_semaphore *sem); |
1da177e4c
|
122 123 124 125 |
/* * release a write lock */ |
4ea2176df
|
126 |
extern void up_write(struct rw_semaphore *sem); |
1da177e4c
|
127 128 129 130 |
/* * downgrade write lock to read lock */ |
4ea2176df
|
131 132 133 134 |
extern void downgrade_write(struct rw_semaphore *sem); #ifdef CONFIG_DEBUG_LOCK_ALLOC /* |
5fca80e8b
|
135 136 137 138 139 140 141 142 143 144 |
* nested locking. NOTE: rwsems are not allowed to recurse * (which occurs if the same task tries to acquire the same * lock instance multiple times), but multiple locks of the * same lock class might be taken, if the order of the locks * is always the same. This ordering rule can be expressed * to lockdep via the _nested() APIs, but enumerating the * subclasses that are used. (If the nesting relationship is * static then another method for expressing nested locking is * the explicit definition of lock class keys and the use of * lockdep_set_class() at lock initialization time. |
214e0aed6
|
145 |
* See Documentation/locking/lockdep-design.txt for more details.) |
4ea2176df
|
146 147 148 |
*/ extern void down_read_nested(struct rw_semaphore *sem, int subclass); extern void down_write_nested(struct rw_semaphore *sem, int subclass); |
1b963c81b
|
149 150 151 152 153 154 155 |
extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock); # define down_write_nest_lock(sem, nest_lock) \ do { \ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ _down_write_nest_lock(sem, &(nest_lock)->dep_map); \ } while (0); |
84759c6d1
|
156 157 158 159 160 161 162 163 |
/* * Take/release a lock when not the owner will release it. * * [ This API should be avoided as much as possible - the * proper abstraction for this case is completions. ] */ extern void down_read_non_owner(struct rw_semaphore *sem); extern void up_read_non_owner(struct rw_semaphore *sem); |
4ea2176df
|
164 165 |
#else # define down_read_nested(sem, subclass) down_read(sem) |
e65b9ad22
|
166 |
# define down_write_nest_lock(sem, nest_lock) down_write(sem) |
4ea2176df
|
167 |
# define down_write_nested(sem, subclass) down_write(sem) |
84759c6d1
|
168 169 |
# define down_read_non_owner(sem) down_read(sem) # define up_read_non_owner(sem) up_read(sem) |
4ea2176df
|
170 |
#endif |
1da177e4c
|
171 |
|
1da177e4c
|
172 |
#endif /* _LINUX_RWSEM_H */ |