Blame view
include/linux/lockref.h
1.42 KB
0f8f2aaaa
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 |
#ifndef __LINUX_LOCKREF_H #define __LINUX_LOCKREF_H /* * Locked reference counts. * * These are different from just plain atomic refcounts in that they * are atomic with respect to the spinlock that goes with them. In * particular, there can be implementations that don't actually get * the spinlock for the common decrement/increment operations, but they * still have to check that the operation is done semantically as if * the spinlock had been taken (using a cmpxchg operation that covers * both the lock and the count word, or using memory transactions, for * example). */ #include <linux/spinlock.h> |
57f4257ea
|
18 19 20 21 |
#include <generated/bounds.h> #define USE_CMPXCHG_LOCKREF \ (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \ |
597d795a2
|
22 |
IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4) |
0f8f2aaaa
|
23 24 |
struct lockref { |
bc08b449e
|
25 |
union { |
57f4257ea
|
26 |
#if USE_CMPXCHG_LOCKREF |
bc08b449e
|
27 28 29 30 |
aligned_u64 lock_count; #endif struct { spinlock_t lock; |
360f54796
|
31 |
int count; |
bc08b449e
|
32 33 |
}; }; |
0f8f2aaaa
|
34 |
}; |
2f4f12e57
|
35 |
extern void lockref_get(struct lockref *); |
360f54796
|
36 |
extern int lockref_put_return(struct lockref *); |
2f4f12e57
|
37 38 39 |
extern int lockref_get_not_zero(struct lockref *); extern int lockref_get_or_lock(struct lockref *); extern int lockref_put_or_lock(struct lockref *); |
0f8f2aaaa
|
40 |
|
e7d33bb5e
|
41 42 |
extern void lockref_mark_dead(struct lockref *); extern int lockref_get_not_dead(struct lockref *); |
e66cf1610
|
43 44 45 46 47 |
/* Must be called under spinlock for reliable results */ static inline int __lockref_is_dead(const struct lockref *l) { return ((int)l->count < 0); } |
0f8f2aaaa
|
48 |
#endif /* __LINUX_LOCKREF_H */ |