Blame view
lib/refcount.c
11.4 KB
b24413180
|
1 |
// SPDX-License-Identifier: GPL-2.0 |
29dee3c03
|
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
/* * Variant of atomic_t specialized for reference counts. * * The interface matches the atomic_t interface (to aid in porting) but only * provides the few functions one should use for reference counting. * * It differs in that the counter saturates at UINT_MAX and will not move once * there. This avoids wrapping the counter and causing 'spurious' * use-after-free issues. * * Memory ordering rules are slightly relaxed wrt regular atomic_t functions * and provide only what is strictly required for refcounts. * * The increments are fully relaxed; these will not provide ordering. The * rationale is that whatever is used to obtain the object we're increasing the * reference count on will provide the ordering. For locked data structures, * its the lock acquire, for RCU/lockless data structures its the dependent * load. * * Do note that inc_not_zero() provides a control dependency which will order * future stores against the inc, this ensures we'll never modify the object * if we did not in fact acquire a reference. * * The decrements will provide release order, such that all the prior loads and * stores will be issued before, it also provides a control dependency, which * will order us against the subsequent free(). * * The control dependency is against the load of the cmpxchg (ll/sc) that * succeeded. This means the stores aren't fully ordered, but this is fine * because the 1->0 transition indicates no concurrency. * * Note that the allocator is responsible for ordering things between free() * and alloc(). * |
47b8f3ab9
|
36 37 38 |
* The decrements dec_and_test() and sub_and_test() also provide acquire * ordering on success. * |
29dee3c03
|
39 |
*/ |
75a040ff1
|
40 |
#include <linux/mutex.h> |
29dee3c03
|
41 |
#include <linux/refcount.h> |
75a040ff1
|
42 |
#include <linux/spinlock.h> |
29dee3c03
|
43 |
#include <linux/bug.h> |
bd174169c
|
44 |
/** |
afed7bcf9
|
45 |
* refcount_add_not_zero_checked - add a value to a refcount unless it is 0 |
bd174169c
|
46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
* @i: the value to add to the refcount * @r: the refcount * * Will saturate at UINT_MAX and WARN. * * Provides no memory ordering, it is assumed the caller has guaranteed the * object memory to be stable (RCU, etc.). It does provide a control dependency * and thereby orders future stores. See the comment on top. * * Use of this function is not recommended for the normal reference counting * use case in which references are taken and released one at a time. In these * cases, refcount_inc(), or one of its variants, should instead be used to * increment a reference count. * * Return: false if the passed refcount is 0, true otherwise */ |
afed7bcf9
|
62 |
bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r) |
29dee3c03
|
63 |
{ |
b78c0d471
|
64 |
unsigned int new, val = atomic_read(&r->refs); |
29dee3c03
|
65 |
|
b78c0d471
|
66 |
do { |
29dee3c03
|
67 68 69 70 71 72 73 74 75 |
if (!val) return false; if (unlikely(val == UINT_MAX)) return true; new = val + i; if (new < val) new = UINT_MAX; |
29dee3c03
|
76 |
|
b78c0d471
|
77 |
} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new)); |
29dee3c03
|
78 |
|
9dcfe2c75
|
79 80 |
WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory. "); |
29dee3c03
|
81 82 83 |
return true; } |
afed7bcf9
|
84 |
EXPORT_SYMBOL(refcount_add_not_zero_checked); |
29dee3c03
|
85 |
|
bd174169c
|
86 |
/** |
afed7bcf9
|
87 |
* refcount_add_checked - add a value to a refcount |
bd174169c
|
88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
* @i: the value to add to the refcount * @r: the refcount * * Similar to atomic_add(), but will saturate at UINT_MAX and WARN. * * Provides no memory ordering, it is assumed the caller has guaranteed the * object memory to be stable (RCU, etc.). It does provide a control dependency * and thereby orders future stores. See the comment on top. * * Use of this function is not recommended for the normal reference counting * use case in which references are taken and released one at a time. In these * cases, refcount_inc(), or one of its variants, should instead be used to * increment a reference count. */ |
afed7bcf9
|
102 |
void refcount_add_checked(unsigned int i, refcount_t *r) |
29dee3c03
|
103 |
{ |
afed7bcf9
|
104 105 |
WARN_ONCE(!refcount_add_not_zero_checked(i, r), "refcount_t: addition on 0; use-after-free. "); |
29dee3c03
|
106 |
} |
afed7bcf9
|
107 |
EXPORT_SYMBOL(refcount_add_checked); |
29dee3c03
|
108 |
|
bd174169c
|
109 |
/** |
afed7bcf9
|
110 |
* refcount_inc_not_zero_checked - increment a refcount unless it is 0 |
bd174169c
|
111 112 113 |
* @r: the refcount to increment * * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN. |
29dee3c03
|
114 115 116 117 |
* * Provides no memory ordering, it is assumed the caller has guaranteed the * object memory to be stable (RCU, etc.). It does provide a control dependency * and thereby orders future stores. See the comment on top. |
bd174169c
|
118 119 |
* * Return: true if the increment was successful, false otherwise |
29dee3c03
|
120 |
*/ |
afed7bcf9
|
121 |
bool refcount_inc_not_zero_checked(refcount_t *r) |
29dee3c03
|
122 |
{ |
b78c0d471
|
123 |
unsigned int new, val = atomic_read(&r->refs); |
29dee3c03
|
124 |
|
b78c0d471
|
125 |
do { |
29dee3c03
|
126 127 128 129 130 131 132 |
new = val + 1; if (!val) return false; if (unlikely(!new)) return true; |
b78c0d471
|
133 |
} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new)); |
29dee3c03
|
134 |
|
9dcfe2c75
|
135 136 |
WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory. "); |
29dee3c03
|
137 138 139 |
return true; } |
afed7bcf9
|
140 |
EXPORT_SYMBOL(refcount_inc_not_zero_checked); |
29dee3c03
|
141 |
|
bd174169c
|
142 |
/** |
afed7bcf9
|
143 |
* refcount_inc_checked - increment a refcount |
bd174169c
|
144 145 146 |
* @r: the refcount to increment * * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN. |
29dee3c03
|
147 148 |
* * Provides no memory ordering, it is assumed the caller already has a |
bd174169c
|
149 150 151 152 |
* reference on the object. * * Will WARN if the refcount is 0, as this represents a possible use-after-free * condition. |
29dee3c03
|
153 |
*/ |
afed7bcf9
|
154 |
void refcount_inc_checked(refcount_t *r) |
29dee3c03
|
155 |
{ |
afed7bcf9
|
156 157 |
WARN_ONCE(!refcount_inc_not_zero_checked(r), "refcount_t: increment on 0; use-after-free. "); |
29dee3c03
|
158 |
} |
afed7bcf9
|
159 |
EXPORT_SYMBOL(refcount_inc_checked); |
29dee3c03
|
160 |
|
bd174169c
|
161 |
/** |
afed7bcf9
|
162 |
* refcount_sub_and_test_checked - subtract from a refcount and test if it is 0 |
bd174169c
|
163 164 165 166 167 168 169 170 |
* @i: amount to subtract from the refcount * @r: the refcount * * Similar to atomic_dec_and_test(), but it will WARN, return false and * ultimately leak on underflow and will fail to decrement when saturated * at UINT_MAX. * * Provides release memory ordering, such that prior loads and stores are done |
47b8f3ab9
|
171 172 |
* before, and provides an acquire ordering on success such that free() * must come after. |
bd174169c
|
173 174 175 176 177 178 179 180 |
* * Use of this function is not recommended for the normal reference counting * use case in which references are taken and released one at a time. In these * cases, refcount_dec(), or one of its variants, should instead be used to * decrement a reference count. * * Return: true if the resulting refcount is 0, false otherwise */ |
afed7bcf9
|
181 |
bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r) |
29dee3c03
|
182 |
{ |
b78c0d471
|
183 |
unsigned int new, val = atomic_read(&r->refs); |
29dee3c03
|
184 |
|
b78c0d471
|
185 |
do { |
29dee3c03
|
186 187 188 189 190 |
if (unlikely(val == UINT_MAX)) return false; new = val - i; if (new > val) { |
9dcfe2c75
|
191 192 |
WARN_ONCE(new > val, "refcount_t: underflow; use-after-free. "); |
29dee3c03
|
193 194 |
return false; } |
b78c0d471
|
195 |
} while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); |
29dee3c03
|
196 |
|
47b8f3ab9
|
197 198 199 200 201 |
if (!new) { smp_acquire__after_ctrl_dep(); return true; } return false; |
29dee3c03
|
202 |
} |
afed7bcf9
|
203 |
EXPORT_SYMBOL(refcount_sub_and_test_checked); |
29dee3c03
|
204 |
|
bd174169c
|
205 |
/** |
afed7bcf9
|
206 |
* refcount_dec_and_test_checked - decrement a refcount and test if it is 0 |
bd174169c
|
207 208 |
* @r: the refcount * |
29dee3c03
|
209 210 211 212 |
* Similar to atomic_dec_and_test(), it will WARN on underflow and fail to * decrement when saturated at UINT_MAX. * * Provides release memory ordering, such that prior loads and stores are done |
47b8f3ab9
|
213 214 |
* before, and provides an acquire ordering on success such that free() * must come after. |
bd174169c
|
215 216 |
* * Return: true if the resulting refcount is 0, false otherwise |
29dee3c03
|
217 |
*/ |
afed7bcf9
|
218 |
bool refcount_dec_and_test_checked(refcount_t *r) |
29dee3c03
|
219 |
{ |
afed7bcf9
|
220 |
return refcount_sub_and_test_checked(1, r); |
29dee3c03
|
221 |
} |
afed7bcf9
|
222 |
EXPORT_SYMBOL(refcount_dec_and_test_checked); |
29dee3c03
|
223 |
|
bd174169c
|
224 |
/** |
afed7bcf9
|
225 |
* refcount_dec_checked - decrement a refcount |
bd174169c
|
226 227 |
* @r: the refcount * |
29dee3c03
|
228 229 230 231 232 233 |
* Similar to atomic_dec(), it will WARN on underflow and fail to decrement * when saturated at UINT_MAX. * * Provides release memory ordering, such that prior loads and stores are done * before. */ |
afed7bcf9
|
234 |
void refcount_dec_checked(refcount_t *r) |
29dee3c03
|
235 |
{ |
afed7bcf9
|
236 237 |
WARN_ONCE(refcount_dec_and_test_checked(r), "refcount_t: decrement hit 0; leaking memory. "); |
29dee3c03
|
238 |
} |
afed7bcf9
|
239 |
EXPORT_SYMBOL(refcount_dec_checked); |
29dee3c03
|
240 |
|
bd174169c
|
241 242 243 244 |
/** * refcount_dec_if_one - decrement a refcount if it is 1 * @r: the refcount * |
29dee3c03
|
245 246 247 248 249 250 251 252 253 |
* No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the * success thereof. * * Like all decrement operations, it provides release memory order and provides * a control dependency. * * It can be used like a try-delete operator; this explicit case is provided * and not cmpxchg in generic, because that would allow implementing unsafe * operations. |
bd174169c
|
254 255 |
* * Return: true if the resulting refcount is 0, false otherwise |
29dee3c03
|
256 257 258 |
*/ bool refcount_dec_if_one(refcount_t *r) { |
b78c0d471
|
259 260 261 |
int val = 1; return atomic_try_cmpxchg_release(&r->refs, &val, 0); |
29dee3c03
|
262 |
} |
d557d1b58
|
263 |
EXPORT_SYMBOL(refcount_dec_if_one); |
29dee3c03
|
264 |
|
bd174169c
|
265 266 267 268 |
/** * refcount_dec_not_one - decrement a refcount if it is not 1 * @r: the refcount * |
29dee3c03
|
269 270 271 272 |
* No atomic_t counterpart, it decrements unless the value is 1, in which case * it will return false. * * Was often done like: atomic_add_unless(&var, -1, 1) |
bd174169c
|
273 274 |
* * Return: true if the decrement operation was successful, false otherwise |
29dee3c03
|
275 276 277 |
*/ bool refcount_dec_not_one(refcount_t *r) { |
b78c0d471
|
278 |
unsigned int new, val = atomic_read(&r->refs); |
29dee3c03
|
279 |
|
b78c0d471
|
280 |
do { |
29dee3c03
|
281 282 283 284 285 286 287 288 |
if (unlikely(val == UINT_MAX)) return true; if (val == 1) return false; new = val - 1; if (new > val) { |
9dcfe2c75
|
289 290 |
WARN_ONCE(new > val, "refcount_t: underflow; use-after-free. "); |
29dee3c03
|
291 292 |
return true; } |
b78c0d471
|
293 |
} while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); |
29dee3c03
|
294 295 296 |
return true; } |
d557d1b58
|
297 |
EXPORT_SYMBOL(refcount_dec_not_one); |
29dee3c03
|
298 |
|
bd174169c
|
299 300 301 302 303 304 |
/** * refcount_dec_and_mutex_lock - return holding mutex if able to decrement * refcount to 0 * @r: the refcount * @lock: the mutex to be locked * |
29dee3c03
|
305 306 307 308 309 310 |
* Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail * to decrement when saturated at UINT_MAX. * * Provides release memory ordering, such that prior loads and stores are done * before, and provides a control dependency such that free() must come after. * See the comment on top. |
bd174169c
|
311 312 313 |
* * Return: true and hold mutex if able to decrement refcount to 0, false * otherwise |
29dee3c03
|
314 315 316 317 318 319 320 321 322 323 324 325 326 327 |
*/ bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) { if (refcount_dec_not_one(r)) return false; mutex_lock(lock); if (!refcount_dec_and_test(r)) { mutex_unlock(lock); return false; } return true; } |
d557d1b58
|
328 |
EXPORT_SYMBOL(refcount_dec_and_mutex_lock); |
29dee3c03
|
329 |
|
bd174169c
|
330 331 332 333 334 335 |
/** * refcount_dec_and_lock - return holding spinlock if able to decrement * refcount to 0 * @r: the refcount * @lock: the spinlock to be locked * |
29dee3c03
|
336 337 338 339 340 341 |
* Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to * decrement when saturated at UINT_MAX. * * Provides release memory ordering, such that prior loads and stores are done * before, and provides a control dependency such that free() must come after. * See the comment on top. |
bd174169c
|
342 343 344 |
* * Return: true and hold spinlock if able to decrement refcount to 0, false * otherwise |
29dee3c03
|
345 346 347 348 349 350 351 352 353 354 355 356 357 358 |
*/ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) { if (refcount_dec_not_one(r)) return false; spin_lock(lock); if (!refcount_dec_and_test(r)) { spin_unlock(lock); return false; } return true; } |
d557d1b58
|
359 |
EXPORT_SYMBOL(refcount_dec_and_lock); |
29dee3c03
|
360 |
|
7ea959c45
|
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 |
/** * refcount_dec_and_lock_irqsave - return holding spinlock with disabled * interrupts if able to decrement refcount to 0 * @r: the refcount * @lock: the spinlock to be locked * @flags: saved IRQ-flags if the is acquired * * Same as refcount_dec_and_lock() above except that the spinlock is acquired * with disabled interupts. * * Return: true and hold spinlock if able to decrement refcount to 0, false * otherwise */ bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock, unsigned long *flags) { if (refcount_dec_not_one(r)) return false; spin_lock_irqsave(lock, *flags); if (!refcount_dec_and_test(r)) { spin_unlock_irqrestore(lock, *flags); return false; } return true; } EXPORT_SYMBOL(refcount_dec_and_lock_irqsave); |