Blame view
lib/refcount.c
4.77 KB
b24413180 License cleanup: ... |
1 |
// SPDX-License-Identifier: GPL-2.0 |
29dee3c03 locking/refcounts... |
2 |
/* |
fb041bb7c locking/refcount:... |
3 |
* Out-of-line refcount functions. |
29dee3c03 locking/refcounts... |
4 |
*/ |
75a040ff1 locking/refcounts... |
5 |
#include <linux/mutex.h> |
29dee3c03 locking/refcounts... |
6 |
#include <linux/refcount.h> |
75a040ff1 locking/refcounts... |
7 |
#include <linux/spinlock.h> |
29dee3c03 locking/refcounts... |
8 |
#include <linux/bug.h> |
1eb085d94 locking/refcount:... |
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
#define REFCOUNT_WARN(str) WARN_ONCE(1, "refcount_t: " str ". ") void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t) { refcount_set(r, REFCOUNT_SATURATED); switch (t) { case REFCOUNT_ADD_NOT_ZERO_OVF: REFCOUNT_WARN("saturated; leaking memory"); break; case REFCOUNT_ADD_OVF: REFCOUNT_WARN("saturated; leaking memory"); break; case REFCOUNT_ADD_UAF: REFCOUNT_WARN("addition on 0; use-after-free"); break; case REFCOUNT_SUB_UAF: REFCOUNT_WARN("underflow; use-after-free"); break; case REFCOUNT_DEC_LEAK: REFCOUNT_WARN("decrement hit 0; leaking memory"); break; default: REFCOUNT_WARN("unknown saturation event!?"); } } EXPORT_SYMBOL(refcount_warn_saturate); |
bd174169c locking/refcount:... |
37 38 39 40 |
/** * refcount_dec_if_one - decrement a refcount if it is 1 * @r: the refcount * |
29dee3c03 locking/refcounts... |
41 42 43 44 45 46 47 48 49 |
* No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the * success thereof. * * Like all decrement operations, it provides release memory order and provides * a control dependency. * * It can be used like a try-delete operator; this explicit case is provided * and not cmpxchg in generic, because that would allow implementing unsafe * operations. |
bd174169c locking/refcount:... |
50 51 |
* * Return: true if the resulting refcount is 0, false otherwise |
29dee3c03 locking/refcounts... |
52 53 54 |
*/ bool refcount_dec_if_one(refcount_t *r) { |
b78c0d471 locking/refcounts... |
55 56 57 |
int val = 1; return atomic_try_cmpxchg_release(&r->refs, &val, 0); |
29dee3c03 locking/refcounts... |
58 |
} |
d557d1b58 refcount: change ... |
59 |
EXPORT_SYMBOL(refcount_dec_if_one); |
29dee3c03 locking/refcounts... |
60 |
|
bd174169c locking/refcount:... |
61 62 63 64 |
/** * refcount_dec_not_one - decrement a refcount if it is not 1 * @r: the refcount * |
29dee3c03 locking/refcounts... |
65 66 67 68 |
* No atomic_t counterpart, it decrements unless the value is 1, in which case * it will return false. * * Was often done like: atomic_add_unless(&var, -1, 1) |
bd174169c locking/refcount:... |
69 70 |
* * Return: true if the decrement operation was successful, false otherwise |
29dee3c03 locking/refcounts... |
71 72 73 |
*/ bool refcount_dec_not_one(refcount_t *r) { |
b78c0d471 locking/refcounts... |
74 |
unsigned int new, val = atomic_read(&r->refs); |
29dee3c03 locking/refcounts... |
75 |
|
b78c0d471 locking/refcounts... |
76 |
do { |
23e6b169c locking/refcount:... |
77 |
if (unlikely(val == REFCOUNT_SATURATED)) |
29dee3c03 locking/refcounts... |
78 79 80 81 82 83 84 |
return true; if (val == 1) return false; new = val - 1; if (new > val) { |
9dcfe2c75 locking/refcounts... |
85 86 |
WARN_ONCE(new > val, "refcount_t: underflow; use-after-free. "); |
29dee3c03 locking/refcounts... |
87 88 |
return true; } |
b78c0d471 locking/refcounts... |
89 |
} while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); |
29dee3c03 locking/refcounts... |
90 91 92 |
return true; } |
d557d1b58 refcount: change ... |
93 |
EXPORT_SYMBOL(refcount_dec_not_one); |
29dee3c03 locking/refcounts... |
94 |
|
bd174169c locking/refcount:... |
95 96 97 98 99 100 |
/** * refcount_dec_and_mutex_lock - return holding mutex if able to decrement * refcount to 0 * @r: the refcount * @lock: the mutex to be locked * |
29dee3c03 locking/refcounts... |
101 |
* Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail |
23e6b169c locking/refcount:... |
102 |
* to decrement when saturated at REFCOUNT_SATURATED. |
29dee3c03 locking/refcounts... |
103 104 105 106 |
* * Provides release memory ordering, such that prior loads and stores are done * before, and provides a control dependency such that free() must come after. * See the comment on top. |
bd174169c locking/refcount:... |
107 108 109 |
* * Return: true and hold mutex if able to decrement refcount to 0, false * otherwise |
29dee3c03 locking/refcounts... |
110 111 112 113 114 115 116 117 118 119 120 121 122 123 |
*/ bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) { if (refcount_dec_not_one(r)) return false; mutex_lock(lock); if (!refcount_dec_and_test(r)) { mutex_unlock(lock); return false; } return true; } |
d557d1b58 refcount: change ... |
124 |
EXPORT_SYMBOL(refcount_dec_and_mutex_lock); |
29dee3c03 locking/refcounts... |
125 |
|
bd174169c locking/refcount:... |
126 127 128 129 130 131 |
/** * refcount_dec_and_lock - return holding spinlock if able to decrement * refcount to 0 * @r: the refcount * @lock: the spinlock to be locked * |
29dee3c03 locking/refcounts... |
132 |
* Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to |
23e6b169c locking/refcount:... |
133 |
* decrement when saturated at REFCOUNT_SATURATED. |
29dee3c03 locking/refcounts... |
134 135 136 137 |
* * Provides release memory ordering, such that prior loads and stores are done * before, and provides a control dependency such that free() must come after. * See the comment on top. |
bd174169c locking/refcount:... |
138 139 140 |
* * Return: true and hold spinlock if able to decrement refcount to 0, false * otherwise |
29dee3c03 locking/refcounts... |
141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
*/ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) { if (refcount_dec_not_one(r)) return false; spin_lock(lock); if (!refcount_dec_and_test(r)) { spin_unlock(lock); return false; } return true; } |
d557d1b58 refcount: change ... |
155 |
EXPORT_SYMBOL(refcount_dec_and_lock); |
29dee3c03 locking/refcounts... |
156 |
|
7ea959c45 locking/refcounts... |
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 |
/** * refcount_dec_and_lock_irqsave - return holding spinlock with disabled * interrupts if able to decrement refcount to 0 * @r: the refcount * @lock: the spinlock to be locked * @flags: saved IRQ-flags if the is acquired * * Same as refcount_dec_and_lock() above except that the spinlock is acquired * with disabled interupts. * * Return: true and hold spinlock if able to decrement refcount to 0, false * otherwise */ bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock, unsigned long *flags) { if (refcount_dec_not_one(r)) return false; spin_lock_irqsave(lock, *flags); if (!refcount_dec_and_test(r)) { spin_unlock_irqrestore(lock, *flags); return false; } return true; } EXPORT_SYMBOL(refcount_dec_and_lock_irqsave); |