Blame view
include/linux/spinlock.h
10.4 KB
1da177e4c
|
1 2 3 4 |
#ifndef __LINUX_SPINLOCK_H #define __LINUX_SPINLOCK_H /* |
fb1c8f93d
|
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
* include/linux/spinlock.h - generic spinlock/rwlock declarations * * here's the role of the various spinlock/rwlock related include files: * * on SMP builds: * * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the * initializers * * linux/spinlock_types.h: * defines the generic type and initializers * * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel * implementations, mostly inline assembly code * * (also included on UP-debug builds:) * * linux/spinlock_api_smp.h: * contains the prototypes for the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. * * on UP builds: * * linux/spinlock_type_up.h: * contains the generic, simplified UP spinlock type. * (which is an empty structure on non-debug builds) * * linux/spinlock_types.h: * defines the generic type and initializers * * linux/spinlock_up.h: * contains the __raw_spin_*()/etc. version of UP * builds. (which are NOPs on non-debug, non-preempt * builds) * * (included on UP-non-debug builds:) * * linux/spinlock_api_up.h: * builds the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. |
1da177e4c
|
47 |
*/ |
1da177e4c
|
48 49 50 51 52 53 |
#include <linux/preempt.h> #include <linux/linkage.h> #include <linux/compiler.h> #include <linux/thread_info.h> #include <linux/kernel.h> #include <linux/stringify.h> |
676dcb8bc
|
54 |
#include <linux/bottom_half.h> |
1da177e4c
|
55 |
|
1da177e4c
|
56 57 58 59 60 |
#include <asm/system.h> /* * Must define these before including other files, inline functions need them */ |
f83b5e323
|
61 |
#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME |
1da177e4c
|
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
#define LOCK_SECTION_START(extra) \ ".subsection 1 \t" \ extra \ ".ifndef " LOCK_SECTION_NAME " \t" \ LOCK_SECTION_NAME ": \t" \ ".endif " #define LOCK_SECTION_END \ ".previous \t" #define __lockfunc fastcall __attribute__((section(".spinlock.text"))) /* |
fb1c8f93d
|
81 |
* Pull the raw_spinlock_t and raw_rwlock_t definitions: |
1da177e4c
|
82 |
*/ |
fb1c8f93d
|
83 |
#include <linux/spinlock_types.h> |
1da177e4c
|
84 |
|
fb1c8f93d
|
85 |
extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock); |
1da177e4c
|
86 |
|
1da177e4c
|
87 |
/* |
fb1c8f93d
|
88 |
* Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): |
1da177e4c
|
89 |
*/ |
8a25d5deb
|
90 |
#ifdef CONFIG_SMP |
fb1c8f93d
|
91 |
# include <asm/spinlock.h> |
1da177e4c
|
92 |
#else |
fb1c8f93d
|
93 |
# include <linux/spinlock_up.h> |
1da177e4c
|
94 |
#endif |
8a25d5deb
|
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
#ifdef CONFIG_DEBUG_SPINLOCK extern void __spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key); # define spin_lock_init(lock) \ do { \ static struct lock_class_key __key; \ \ __spin_lock_init((lock), #lock, &__key); \ } while (0) #else # define spin_lock_init(lock) \ do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) #endif #ifdef CONFIG_DEBUG_SPINLOCK extern void __rwlock_init(rwlock_t *lock, const char *name, struct lock_class_key *key); # define rwlock_init(lock) \ do { \ static struct lock_class_key __key; \ \ __rwlock_init((lock), #lock, &__key); \ } while (0) #else # define rwlock_init(lock) \ do { *(lock) = RW_LOCK_UNLOCKED; } while (0) #endif |
fb1c8f93d
|
123 124 125 126 127 128 129 130 |
#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) /** * spin_unlock_wait - wait until the spinlock gets unlocked * @lock: the spinlock in question. */ #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) |
1da177e4c
|
131 |
/* |
fb1c8f93d
|
132 |
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations: |
1da177e4c
|
133 |
*/ |
fb1c8f93d
|
134 135 |
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) # include <linux/spinlock_api_smp.h> |
1da177e4c
|
136 |
#else |
fb1c8f93d
|
137 |
# include <linux/spinlock_api_up.h> |
1da177e4c
|
138 |
#endif |
fb1c8f93d
|
139 140 141 142 143 |
#ifdef CONFIG_DEBUG_SPINLOCK extern void _raw_spin_lock(spinlock_t *lock); #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) extern int _raw_spin_trylock(spinlock_t *lock); extern void _raw_spin_unlock(spinlock_t *lock); |
fb1c8f93d
|
144 145 146 147 148 149 150 |
extern void _raw_read_lock(rwlock_t *lock); extern int _raw_read_trylock(rwlock_t *lock); extern void _raw_read_unlock(rwlock_t *lock); extern void _raw_write_lock(rwlock_t *lock); extern int _raw_write_trylock(rwlock_t *lock); extern void _raw_write_unlock(rwlock_t *lock); #else |
fb1c8f93d
|
151 152 153 |
# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) # define _raw_spin_lock_flags(lock, flags) \ __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) |
8a25d5deb
|
154 155 |
# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) |
fb1c8f93d
|
156 |
# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) |
fb1c8f93d
|
157 |
# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) |
8a25d5deb
|
158 159 |
# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) |
fb1c8f93d
|
160 |
# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) |
8a25d5deb
|
161 |
# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) |
fb1c8f93d
|
162 |
#endif |
1da177e4c
|
163 |
|
fb1c8f93d
|
164 165 |
#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) #define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) |
1da177e4c
|
166 167 168 169 170 171 |
/* * Define the various spin_lock and rw_lock methods. Note we define these * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various * methods are defined as nops in the case they are not required. */ |
dcc8e559e
|
172 173 174 |
#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) #define read_trylock(lock) __cond_lock(lock, _read_trylock(lock)) #define write_trylock(lock) __cond_lock(lock, _write_trylock(lock)) |
1da177e4c
|
175 |
|
fb1c8f93d
|
176 |
#define spin_lock(lock) _spin_lock(lock) |
8a25d5deb
|
177 178 179 180 181 182 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC # define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) #else # define spin_lock_nested(lock, subclass) _spin_lock(lock) #endif |
fb1c8f93d
|
183 184 |
#define write_lock(lock) _write_lock(lock) #define read_lock(lock) _read_lock(lock) |
1da177e4c
|
185 |
|
fb1c8f93d
|
186 |
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
b8e6ec865
|
187 188 189 190 |
#define spin_lock_irqsave(lock, flags) flags = _spin_lock_irqsave(lock) #define read_lock_irqsave(lock, flags) flags = _read_lock_irqsave(lock) #define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock) |
cfd3ef234
|
191 192 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
b8e6ec865
|
193 194 |
#define spin_lock_irqsave_nested(lock, flags, subclass) \ flags = _spin_lock_irqsave_nested(lock, subclass) |
cfd3ef234
|
195 |
#else |
b8e6ec865
|
196 197 |
#define spin_lock_irqsave_nested(lock, flags, subclass) \ flags = _spin_lock_irqsave(lock) |
cfd3ef234
|
198 |
#endif |
1da177e4c
|
199 |
#else |
b8e6ec865
|
200 201 202 203 |
#define spin_lock_irqsave(lock, flags) _spin_lock_irqsave(lock, flags) #define read_lock_irqsave(lock, flags) _read_lock_irqsave(lock, flags) #define write_lock_irqsave(lock, flags) _write_lock_irqsave(lock, flags) |
cfd3ef234
|
204 205 |
#define spin_lock_irqsave_nested(lock, flags, subclass) \ spin_lock_irqsave(lock, flags) |
1da177e4c
|
206 207 208 209 210 211 212 213 214 215 |
#endif #define spin_lock_irq(lock) _spin_lock_irq(lock) #define spin_lock_bh(lock) _spin_lock_bh(lock) #define read_lock_irq(lock) _read_lock_irq(lock) #define read_lock_bh(lock) _read_lock_bh(lock) #define write_lock_irq(lock) _write_lock_irq(lock) #define write_lock_bh(lock) _write_lock_bh(lock) |
bda98685b
|
216 217 218 |
/* * We inline the unlock functions in the nondebug case: */ |
8a25d5deb
|
219 220 |
#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \ !defined(CONFIG_SMP) |
bda98685b
|
221 222 223 |
# define spin_unlock(lock) _spin_unlock(lock) # define read_unlock(lock) _read_unlock(lock) # define write_unlock(lock) _write_unlock(lock) |
bda98685b
|
224 225 226 227 |
# define spin_unlock_irq(lock) _spin_unlock_irq(lock) # define read_unlock_irq(lock) _read_unlock_irq(lock) # define write_unlock_irq(lock) _write_unlock_irq(lock) #else |
c75fb88db
|
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 |
# define spin_unlock(lock) \ do {__raw_spin_unlock(&(lock)->raw_lock); __release(lock); } while (0) # define read_unlock(lock) \ do {__raw_read_unlock(&(lock)->raw_lock); __release(lock); } while (0) # define write_unlock(lock) \ do {__raw_write_unlock(&(lock)->raw_lock); __release(lock); } while (0) # define spin_unlock_irq(lock) \ do { \ __raw_spin_unlock(&(lock)->raw_lock); \ __release(lock); \ local_irq_enable(); \ } while (0) # define read_unlock_irq(lock) \ do { \ __raw_read_unlock(&(lock)->raw_lock); \ __release(lock); \ local_irq_enable(); \ } while (0) # define write_unlock_irq(lock) \ do { \ __raw_write_unlock(&(lock)->raw_lock); \ __release(lock); \ local_irq_enable(); \ } while (0) |
bda98685b
|
252 |
#endif |
1da177e4c
|
253 |
|
fb1c8f93d
|
254 |
#define spin_unlock_irqrestore(lock, flags) \ |
b8e6ec865
|
255 |
_spin_unlock_irqrestore(lock, flags) |
1da177e4c
|
256 |
#define spin_unlock_bh(lock) _spin_unlock_bh(lock) |
fb1c8f93d
|
257 |
#define read_unlock_irqrestore(lock, flags) \ |
b8e6ec865
|
258 |
_read_unlock_irqrestore(lock, flags) |
fb1c8f93d
|
259 |
#define read_unlock_bh(lock) _read_unlock_bh(lock) |
1da177e4c
|
260 |
|
fb1c8f93d
|
261 |
#define write_unlock_irqrestore(lock, flags) \ |
b8e6ec865
|
262 |
_write_unlock_irqrestore(lock, flags) |
fb1c8f93d
|
263 |
#define write_unlock_bh(lock) _write_unlock_bh(lock) |
1da177e4c
|
264 |
|
dcc8e559e
|
265 |
#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) |
1da177e4c
|
266 267 268 269 |
#define spin_trylock_irq(lock) \ ({ \ local_irq_disable(); \ |
303912e2a
|
270 |
spin_trylock(lock) ? \ |
fb1c8f93d
|
271 |
1 : ({ local_irq_enable(); 0; }); \ |
1da177e4c
|
272 273 274 275 276 |
}) #define spin_trylock_irqsave(lock, flags) \ ({ \ local_irq_save(flags); \ |
303912e2a
|
277 |
spin_trylock(lock) ? \ |
fb1c8f93d
|
278 |
1 : ({ local_irq_restore(flags); 0; }); \ |
1da177e4c
|
279 |
}) |
e1f4a88c5
|
280 281 282 283 284 285 |
#define write_trylock_irqsave(lock, flags) \ ({ \ local_irq_save(flags); \ write_trylock(lock) ? \ 1 : ({ local_irq_restore(flags); 0; }); \ }) |
1da177e4c
|
286 |
/* |
e81ce1f7e
|
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 |
* Locks two spinlocks l1 and l2. * l1_first indicates if spinlock l1 should be taken first. */ static inline void double_spin_lock(spinlock_t *l1, spinlock_t *l2, bool l1_first) __acquires(l1) __acquires(l2) { if (l1_first) { spin_lock(l1); spin_lock(l2); } else { spin_lock(l2); spin_lock(l1); } } /* * Unlocks two spinlocks l1 and l2. * l1_taken_first indicates if spinlock l1 was taken first and therefore * should be released after spinlock l2. */ static inline void double_spin_unlock(spinlock_t *l1, spinlock_t *l2, bool l1_taken_first) __releases(l1) __releases(l2) { if (l1_taken_first) { spin_unlock(l2); spin_unlock(l1); } else { spin_unlock(l1); spin_unlock(l2); } } /* |
fb1c8f93d
|
324 325 |
* Pull the atomic_t declaration: * (asm-mips/atomic.h needs above definitions) |
1da177e4c
|
326 |
*/ |
fb1c8f93d
|
327 328 329 330 331 |
#include <asm/atomic.h> /** * atomic_dec_and_lock - lock on reaching reference count zero * @atomic: the atomic counter * @lock: the spinlock in question |
1da177e4c
|
332 |
*/ |
fb1c8f93d
|
333 334 |
extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); #define atomic_dec_and_lock(atomic, lock) \ |
dcc8e559e
|
335 |
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) |
1da177e4c
|
336 337 338 339 340 |
/** * spin_can_lock - would spin_trylock() succeed? * @lock: the spinlock in question. */ |
fb1c8f93d
|
341 |
#define spin_can_lock(lock) (!spin_is_locked(lock)) |
1da177e4c
|
342 343 |
#endif /* __LINUX_SPINLOCK_H */ |