Blame view
lib/lockref.c
3.88 KB
2f4f12e57 lockref: uninline... |
1 2 |
#include <linux/export.h> #include <linux/lockref.h> |
57f4257ea lockref: use BLOA... |
3 |
#if USE_CMPXCHG_LOCKREF |
bc08b449e lockref: implemen... |
4 5 6 7 8 9 10 11 |
/* * Note that the "cmpxchg()" reloads the "old" value for the * failure case. */ #define CMPXCHG_LOOP(CODE, SUCCESS) do { \ struct lockref old; \ BUILD_BUG_ON(sizeof(old) != 8); \ |
4d3199e4c locking: Remove A... |
12 |
old.lock_count = READ_ONCE(lockref->lock_count); \ |
bc08b449e lockref: implemen... |
13 14 15 |
while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ struct lockref new = old, prev = old; \ CODE \ |
d2212b4dc lockref: allow re... |
16 17 18 |
old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \ old.lock_count, \ new.lock_count); \ |
bc08b449e lockref: implemen... |
19 20 21 |
if (likely(old.lock_count == prev.lock_count)) { \ SUCCESS; \ } \ |
3a6bfbc91 arch, locking: Ci... |
22 |
cpu_relax_lowlatency(); \ |
bc08b449e lockref: implemen... |
23 24 25 26 27 28 29 30 |
} \ } while (0) #else #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0) #endif |
2f4f12e57 lockref: uninline... |
31 32 |
/** * lockref_get - Increments reference count unconditionally |
44a0cf929 lockref: fix docb... |
33 |
* @lockref: pointer to lockref structure |
2f4f12e57 lockref: uninline... |
34 35 36 37 38 39 |
* * This operation is only valid if you already hold a reference * to the object, so you know the count cannot be zero. */ void lockref_get(struct lockref *lockref) { |
bc08b449e lockref: implemen... |
40 41 42 43 44 |
CMPXCHG_LOOP( new.count++; , return; ); |
2f4f12e57 lockref: uninline... |
45 46 47 48 49 50 51 |
spin_lock(&lockref->lock); lockref->count++; spin_unlock(&lockref->lock); } EXPORT_SYMBOL(lockref_get); /** |
360f54796 dcache: let the d... |
52 |
* lockref_get_not_zero - Increments count unless the count is 0 or dead |
44a0cf929 lockref: fix docb... |
53 |
* @lockref: pointer to lockref structure |
2f4f12e57 lockref: uninline... |
54 55 56 57 |
* Return: 1 if count updated successfully or 0 if count was zero */ int lockref_get_not_zero(struct lockref *lockref) { |
bc08b449e lockref: implemen... |
58 59 60 61 |
int retval; CMPXCHG_LOOP( new.count++; |
360f54796 dcache: let the d... |
62 |
if (old.count <= 0) |
bc08b449e lockref: implemen... |
63 64 65 66 |
return 0; , return 1; ); |
2f4f12e57 lockref: uninline... |
67 68 |
spin_lock(&lockref->lock); |
bc08b449e lockref: implemen... |
69 |
retval = 0; |
360f54796 dcache: let the d... |
70 |
if (lockref->count > 0) { |
2f4f12e57 lockref: uninline... |
71 72 73 74 75 76 77 78 79 |
lockref->count++; retval = 1; } spin_unlock(&lockref->lock); return retval; } EXPORT_SYMBOL(lockref_get_not_zero); /** |
360f54796 dcache: let the d... |
80 |
* lockref_get_or_lock - Increments count unless the count is 0 or dead |
44a0cf929 lockref: fix docb... |
81 |
* @lockref: pointer to lockref structure |
2f4f12e57 lockref: uninline... |
82 83 84 85 86 |
* Return: 1 if count updated successfully or 0 if count was zero * and we got the lock instead. */ int lockref_get_or_lock(struct lockref *lockref) { |
bc08b449e lockref: implemen... |
87 88 |
CMPXCHG_LOOP( new.count++; |
360f54796 dcache: let the d... |
89 |
if (old.count <= 0) |
bc08b449e lockref: implemen... |
90 91 92 93 |
break; , return 1; ); |
2f4f12e57 lockref: uninline... |
94 |
spin_lock(&lockref->lock); |
360f54796 dcache: let the d... |
95 |
if (lockref->count <= 0) |
2f4f12e57 lockref: uninline... |
96 97 98 99 100 101 102 103 |
return 0; lockref->count++; spin_unlock(&lockref->lock); return 1; } EXPORT_SYMBOL(lockref_get_or_lock); /** |
360f54796 dcache: let the d... |
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 |
* lockref_put_return - Decrement reference count if possible * @lockref: pointer to lockref structure * * Decrement the reference count and return the new value. * If the lockref was dead or locked, return an error. */ int lockref_put_return(struct lockref *lockref) { CMPXCHG_LOOP( new.count--; if (old.count <= 0) return -1; , return new.count; ); return -1; } EXPORT_SYMBOL(lockref_put_return); /** |
2f4f12e57 lockref: uninline... |
124 |
* lockref_put_or_lock - decrements count unless count <= 1 before decrement |
44a0cf929 lockref: fix docb... |
125 |
* @lockref: pointer to lockref structure |
2f4f12e57 lockref: uninline... |
126 127 128 129 |
* Return: 1 if count updated successfully or 0 if count <= 1 and lock taken */ int lockref_put_or_lock(struct lockref *lockref) { |
bc08b449e lockref: implemen... |
130 131 132 133 134 135 136 |
CMPXCHG_LOOP( new.count--; if (old.count <= 1) break; , return 1; ); |
2f4f12e57 lockref: uninline... |
137 138 139 140 141 142 143 144 |
spin_lock(&lockref->lock); if (lockref->count <= 1) return 0; lockref->count--; spin_unlock(&lockref->lock); return 1; } EXPORT_SYMBOL(lockref_put_or_lock); |
e7d33bb5e lockref: add abil... |
145 146 147 148 149 150 151 152 153 154 |
/** * lockref_mark_dead - mark lockref dead * @lockref: pointer to lockref structure */ void lockref_mark_dead(struct lockref *lockref) { assert_spin_locked(&lockref->lock); lockref->count = -128; } |
e66cf1610 GFS2: Use lockref... |
155 |
EXPORT_SYMBOL(lockref_mark_dead); |
e7d33bb5e lockref: add abil... |
156 157 158 159 160 161 162 163 164 165 166 167 |
/** * lockref_get_not_dead - Increments count unless the ref is dead * @lockref: pointer to lockref structure * Return: 1 if count updated successfully or 0 if lockref was dead */ int lockref_get_not_dead(struct lockref *lockref) { int retval; CMPXCHG_LOOP( new.count++; |
360f54796 dcache: let the d... |
168 |
if (old.count < 0) |
e7d33bb5e lockref: add abil... |
169 170 171 172 173 174 175 |
return 0; , return 1; ); spin_lock(&lockref->lock); retval = 0; |
360f54796 dcache: let the d... |
176 |
if (lockref->count >= 0) { |
e7d33bb5e lockref: add abil... |
177 178 179 180 181 182 183 |
lockref->count++; retval = 1; } spin_unlock(&lockref->lock); return retval; } EXPORT_SYMBOL(lockref_get_not_dead); |