Blame view
include/linux/spinlock.h
11.3 KB
1da177e4c
|
1 2 3 4 |
#ifndef __LINUX_SPINLOCK_H #define __LINUX_SPINLOCK_H /* |
fb1c8f93d
|
5 6 7 8 9 10 |
* include/linux/spinlock.h - generic spinlock/rwlock declarations * * here's the role of the various spinlock/rwlock related include files: * * on SMP builds: * |
fb3a6bbc9
|
11 |
* asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the |
fb1c8f93d
|
12 13 14 15 16 |
* initializers * * linux/spinlock_types.h: * defines the generic type and initializers * |
0199c4e68
|
17 |
* asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel |
fb1c8f93d
|
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
* implementations, mostly inline assembly code * * (also included on UP-debug builds:) * * linux/spinlock_api_smp.h: * contains the prototypes for the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. * * on UP builds: * * linux/spinlock_type_up.h: * contains the generic, simplified UP spinlock type. * (which is an empty structure on non-debug builds) * * linux/spinlock_types.h: * defines the generic type and initializers * * linux/spinlock_up.h: |
0199c4e68
|
37 |
* contains the arch_spin_*()/etc. version of UP |
fb1c8f93d
|
38 39 40 41 42 43 44 45 46 |
* builds. (which are NOPs on non-debug, non-preempt * builds) * * (included on UP-non-debug builds:) * * linux/spinlock_api_up.h: * builds the _spin_*() APIs. * * linux/spinlock.h: builds the final spin_*() APIs. |
1da177e4c
|
47 |
*/ |
3f307891c
|
48 |
#include <linux/typecheck.h> |
1da177e4c
|
49 50 51 |
#include <linux/preempt.h> #include <linux/linkage.h> #include <linux/compiler.h> |
df9ee2927
|
52 |
#include <linux/irqflags.h> |
1da177e4c
|
53 54 55 |
#include <linux/thread_info.h> #include <linux/kernel.h> #include <linux/stringify.h> |
676dcb8bc
|
56 |
#include <linux/bottom_half.h> |
96f951edb
|
57 |
#include <asm/barrier.h> |
1da177e4c
|
58 |
|
1da177e4c
|
59 60 61 62 |
/* * Must define these before including other files, inline functions need them */ |
75ddb0e87
|
63 |
#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME |
1da177e4c
|
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
#define LOCK_SECTION_START(extra) \ ".subsection 1 \t" \ extra \ ".ifndef " LOCK_SECTION_NAME " \t" \ LOCK_SECTION_NAME ": \t" \ ".endif " #define LOCK_SECTION_END \ ".previous \t" |
ec7015840
|
79 |
#define __lockfunc __attribute__((section(".spinlock.text"))) |
1da177e4c
|
80 81 |
/* |
fb3a6bbc9
|
82 |
* Pull the arch_spinlock_t and arch_rwlock_t definitions: |
1da177e4c
|
83 |
*/ |
fb1c8f93d
|
84 |
#include <linux/spinlock_types.h> |
1da177e4c
|
85 |
|
1da177e4c
|
86 |
/* |
25985edce
|
87 |
* Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): |
1da177e4c
|
88 |
*/ |
8a25d5deb
|
89 |
#ifdef CONFIG_SMP |
fb1c8f93d
|
90 |
# include <asm/spinlock.h> |
1da177e4c
|
91 |
#else |
fb1c8f93d
|
92 |
# include <linux/spinlock_up.h> |
1da177e4c
|
93 |
#endif |
8a25d5deb
|
94 |
#ifdef CONFIG_DEBUG_SPINLOCK |
c2f21ce2e
|
95 96 97 |
extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, struct lock_class_key *key); # define raw_spin_lock_init(lock) \ |
8a25d5deb
|
98 99 100 |
do { \ static struct lock_class_key __key; \ \ |
c2f21ce2e
|
101 |
__raw_spin_lock_init((lock), #lock, &__key); \ |
8a25d5deb
|
102 103 104 |
} while (0) #else |
c2f21ce2e
|
105 106 |
# define raw_spin_lock_init(lock) \ do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) |
8a25d5deb
|
107 |
#endif |
c2f21ce2e
|
108 |
#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) |
fb1c8f93d
|
109 |
|
95c354fe9
|
110 |
#ifdef CONFIG_GENERIC_LOCKBREAK |
c2f21ce2e
|
111 |
#define raw_spin_is_contended(lock) ((lock)->break_lock) |
95c354fe9
|
112 |
#else |
a5ef7ca0e
|
113 |
|
0199c4e68
|
114 |
#ifdef arch_spin_is_contended |
c2f21ce2e
|
115 |
#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) |
a5ef7ca0e
|
116 |
#else |
c2f21ce2e
|
117 |
#define raw_spin_is_contended(lock) (((void)(lock), 0)) |
0199c4e68
|
118 |
#endif /*arch_spin_is_contended*/ |
95c354fe9
|
119 |
#endif |
e0acd0a68
|
120 121 122 |
/* * Despite its name it doesn't necessarily has to be a full barrier. * It should only guarantee that a STORE before the critical section |
d956028e9
|
123 |
* can not be reordered with LOADs and STOREs inside this section. |
e0acd0a68
|
124 125 126 127 128 129 130 |
* spin_lock() is the one-way barrier, this LOAD can not escape out * of the region. So the default implementation simply ensures that * a STORE can not move into the critical section, smp_wmb() should * serialize it with another STORE done by spin_lock(). */ #ifndef smp_mb__before_spinlock #define smp_mb__before_spinlock() smp_wmb() |
ad4627695
|
131 |
#endif |
fb1c8f93d
|
132 |
/** |
c2f21ce2e
|
133 |
* raw_spin_unlock_wait - wait until the spinlock gets unlocked |
fb1c8f93d
|
134 135 |
* @lock: the spinlock in question. */ |
c2f21ce2e
|
136 |
#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) |
fb1c8f93d
|
137 |
|
fb1c8f93d
|
138 |
#ifdef CONFIG_DEBUG_SPINLOCK |
b97c4bc16
|
139 |
extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); |
9828ea9d7
|
140 141 |
#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) extern int do_raw_spin_trylock(raw_spinlock_t *lock); |
b97c4bc16
|
142 |
extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); |
fb1c8f93d
|
143 |
#else |
b97c4bc16
|
144 |
static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) |
c2f21ce2e
|
145 |
{ |
b97c4bc16
|
146 |
__acquire(lock); |
c2f21ce2e
|
147 148 149 150 |
arch_spin_lock(&lock->raw_lock); } static inline void |
b97c4bc16
|
151 |
do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) |
c2f21ce2e
|
152 |
{ |
b97c4bc16
|
153 |
__acquire(lock); |
c2f21ce2e
|
154 155 |
arch_spin_lock_flags(&lock->raw_lock, *flags); } |
9828ea9d7
|
156 |
static inline int do_raw_spin_trylock(raw_spinlock_t *lock) |
c2f21ce2e
|
157 158 159 |
{ return arch_spin_trylock(&(lock)->raw_lock); } |
b97c4bc16
|
160 |
static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) |
c2f21ce2e
|
161 162 |
{ arch_spin_unlock(&lock->raw_lock); |
b97c4bc16
|
163 |
__release(lock); |
c2f21ce2e
|
164 |
} |
fb1c8f93d
|
165 |
#endif |
1da177e4c
|
166 |
|
1da177e4c
|
167 |
/* |
ef12f1099
|
168 169 170 171 |
* Define the various spin_lock methods. Note we define these * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The * various methods are defined as nops in the case they are not * required. |
1da177e4c
|
172 |
*/ |
9c1721aa4
|
173 |
#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) |
1da177e4c
|
174 |
|
9c1721aa4
|
175 |
#define raw_spin_lock(lock) _raw_spin_lock(lock) |
8a25d5deb
|
176 177 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
9c1721aa4
|
178 179 |
# define raw_spin_lock_nested(lock, subclass) \ _raw_spin_lock_nested(lock, subclass) |
113948d84
|
180 181 |
# define raw_spin_lock_bh_nested(lock, subclass) \ _raw_spin_lock_bh_nested(lock, subclass) |
9c1721aa4
|
182 |
|
c2f21ce2e
|
183 |
# define raw_spin_lock_nest_lock(lock, nest_lock) \ |
b7d39aff9
|
184 185 |
do { \ typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ |
9c1721aa4
|
186 |
_raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ |
b7d39aff9
|
187 |
} while (0) |
8a25d5deb
|
188 |
#else |
4999201a5
|
189 190 191 192 193 194 195 |
/* * Always evaluate the 'subclass' argument to avoid that the compiler * warns about set-but-not-used variables when building with * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. */ # define raw_spin_lock_nested(lock, subclass) \ _raw_spin_lock(((void)(subclass), (lock))) |
9c1721aa4
|
196 |
# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) |
113948d84
|
197 |
# define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock) |
8a25d5deb
|
198 |
#endif |
fb1c8f93d
|
199 |
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
b8e6ec865
|
200 |
|
c2f21ce2e
|
201 |
#define raw_spin_lock_irqsave(lock, flags) \ |
3f307891c
|
202 203 |
do { \ typecheck(unsigned long, flags); \ |
9c1721aa4
|
204 |
flags = _raw_spin_lock_irqsave(lock); \ |
3f307891c
|
205 |
} while (0) |
cfd3ef234
|
206 207 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
c2f21ce2e
|
208 |
#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
3f307891c
|
209 210 |
do { \ typecheck(unsigned long, flags); \ |
9c1721aa4
|
211 |
flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ |
3f307891c
|
212 |
} while (0) |
cfd3ef234
|
213 |
#else |
c2f21ce2e
|
214 |
#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
3f307891c
|
215 216 |
do { \ typecheck(unsigned long, flags); \ |
9c1721aa4
|
217 |
flags = _raw_spin_lock_irqsave(lock); \ |
3f307891c
|
218 |
} while (0) |
cfd3ef234
|
219 |
#endif |
1da177e4c
|
220 |
#else |
b8e6ec865
|
221 |
|
c2f21ce2e
|
222 |
#define raw_spin_lock_irqsave(lock, flags) \ |
3f307891c
|
223 224 |
do { \ typecheck(unsigned long, flags); \ |
9c1721aa4
|
225 |
_raw_spin_lock_irqsave(lock, flags); \ |
3f307891c
|
226 |
} while (0) |
ef12f1099
|
227 |
|
c2f21ce2e
|
228 229 |
#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ raw_spin_lock_irqsave(lock, flags) |
cfd3ef234
|
230 |
|
1da177e4c
|
231 |
#endif |
9c1721aa4
|
232 233 234 235 |
#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) #define raw_spin_unlock(lock) _raw_spin_unlock(lock) #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) |
1da177e4c
|
236 |
|
c2f21ce2e
|
237 238 239 |
#define raw_spin_unlock_irqrestore(lock, flags) \ do { \ typecheck(unsigned long, flags); \ |
9c1721aa4
|
240 |
_raw_spin_unlock_irqrestore(lock, flags); \ |
3f307891c
|
241 |
} while (0) |
9c1721aa4
|
242 |
#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) |
1da177e4c
|
243 |
|
9c1721aa4
|
244 245 |
#define raw_spin_trylock_bh(lock) \ __cond_lock(lock, _raw_spin_trylock_bh(lock)) |
1da177e4c
|
246 |
|
c2f21ce2e
|
247 |
#define raw_spin_trylock_irq(lock) \ |
1da177e4c
|
248 249 |
({ \ local_irq_disable(); \ |
c2f21ce2e
|
250 |
raw_spin_trylock(lock) ? \ |
fb1c8f93d
|
251 |
1 : ({ local_irq_enable(); 0; }); \ |
1da177e4c
|
252 |
}) |
c2f21ce2e
|
253 |
#define raw_spin_trylock_irqsave(lock, flags) \ |
1da177e4c
|
254 255 |
({ \ local_irq_save(flags); \ |
c2f21ce2e
|
256 |
raw_spin_trylock(lock) ? \ |
fb1c8f93d
|
257 |
1 : ({ local_irq_restore(flags); 0; }); \ |
1da177e4c
|
258 |
}) |
c2f21ce2e
|
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 |
/** * raw_spin_can_lock - would raw_spin_trylock() succeed? * @lock: the spinlock in question. */ #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) /* Include rwlock functions */ #include <linux/rwlock.h> /* * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: */ #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) # include <linux/spinlock_api_smp.h> #else # include <linux/spinlock_api_up.h> #endif /* * Map the spin_lock functions to the raw variants for PREEMPT_RT=n */ |
3490565b6
|
280 |
static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) |
c2f21ce2e
|
281 282 283 284 285 286 287 288 289 |
{ return &lock->rlock; } #define spin_lock_init(_lock) \ do { \ spinlock_check(_lock); \ raw_spin_lock_init(&(_lock)->rlock); \ } while (0) |
3490565b6
|
290 |
static __always_inline void spin_lock(spinlock_t *lock) |
c2f21ce2e
|
291 292 293 |
{ raw_spin_lock(&lock->rlock); } |
3490565b6
|
294 |
static __always_inline void spin_lock_bh(spinlock_t *lock) |
c2f21ce2e
|
295 296 297 |
{ raw_spin_lock_bh(&lock->rlock); } |
3490565b6
|
298 |
static __always_inline int spin_trylock(spinlock_t *lock) |
c2f21ce2e
|
299 300 301 302 303 304 305 306 |
{ return raw_spin_trylock(&lock->rlock); } #define spin_lock_nested(lock, subclass) \ do { \ raw_spin_lock_nested(spinlock_check(lock), subclass); \ } while (0) |
113948d84
|
307 308 309 310 |
#define spin_lock_bh_nested(lock, subclass) \ do { \ raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\ } while (0) |
c2f21ce2e
|
311 312 313 314 |
#define spin_lock_nest_lock(lock, nest_lock) \ do { \ raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ } while (0) |
3490565b6
|
315 |
static __always_inline void spin_lock_irq(spinlock_t *lock) |
c2f21ce2e
|
316 317 318 319 320 321 322 323 324 325 326 327 328 |
{ raw_spin_lock_irq(&lock->rlock); } #define spin_lock_irqsave(lock, flags) \ do { \ raw_spin_lock_irqsave(spinlock_check(lock), flags); \ } while (0) #define spin_lock_irqsave_nested(lock, flags, subclass) \ do { \ raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ } while (0) |
3490565b6
|
329 |
static __always_inline void spin_unlock(spinlock_t *lock) |
c2f21ce2e
|
330 331 332 |
{ raw_spin_unlock(&lock->rlock); } |
3490565b6
|
333 |
static __always_inline void spin_unlock_bh(spinlock_t *lock) |
c2f21ce2e
|
334 335 336 |
{ raw_spin_unlock_bh(&lock->rlock); } |
3490565b6
|
337 |
static __always_inline void spin_unlock_irq(spinlock_t *lock) |
c2f21ce2e
|
338 339 340 |
{ raw_spin_unlock_irq(&lock->rlock); } |
3490565b6
|
341 |
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
c2f21ce2e
|
342 343 344 |
{ raw_spin_unlock_irqrestore(&lock->rlock, flags); } |
3490565b6
|
345 |
static __always_inline int spin_trylock_bh(spinlock_t *lock) |
c2f21ce2e
|
346 347 348 |
{ return raw_spin_trylock_bh(&lock->rlock); } |
3490565b6
|
349 |
static __always_inline int spin_trylock_irq(spinlock_t *lock) |
c2f21ce2e
|
350 351 352 353 354 355 356 357 |
{ return raw_spin_trylock_irq(&lock->rlock); } #define spin_trylock_irqsave(lock, flags) \ ({ \ raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ }) |
3490565b6
|
358 |
static __always_inline void spin_unlock_wait(spinlock_t *lock) |
c2f21ce2e
|
359 360 361 |
{ raw_spin_unlock_wait(&lock->rlock); } |
3490565b6
|
362 |
static __always_inline int spin_is_locked(spinlock_t *lock) |
c2f21ce2e
|
363 364 365 |
{ return raw_spin_is_locked(&lock->rlock); } |
3490565b6
|
366 |
static __always_inline int spin_is_contended(spinlock_t *lock) |
c2f21ce2e
|
367 368 369 |
{ return raw_spin_is_contended(&lock->rlock); } |
3490565b6
|
370 |
static __always_inline int spin_can_lock(spinlock_t *lock) |
c2f21ce2e
|
371 372 373 |
{ return raw_spin_can_lock(&lock->rlock); } |
4ebc1b4b0
|
374 |
#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) |
c2f21ce2e
|
375 |
|
1da177e4c
|
376 |
/* |
fb1c8f93d
|
377 378 |
* Pull the atomic_t declaration: * (asm-mips/atomic.h needs above definitions) |
1da177e4c
|
379 |
*/ |
60063497a
|
380 |
#include <linux/atomic.h> |
fb1c8f93d
|
381 382 383 384 |
/** * atomic_dec_and_lock - lock on reaching reference count zero * @atomic: the atomic counter * @lock: the spinlock in question |
dc07e721a
|
385 386 387 |
* * Decrements @atomic by 1. If the result is 0, returns true and locks * @lock. Returns false for all other cases. |
1da177e4c
|
388 |
*/ |
fb1c8f93d
|
389 390 |
extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); #define atomic_dec_and_lock(atomic, lock) \ |
dcc8e559e
|
391 |
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) |
1da177e4c
|
392 |
|
1da177e4c
|
393 |
#endif /* __LINUX_SPINLOCK_H */ |