Blame view

lib/refcount.c 10.2 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
29dee3c03   Peter Zijlstra   locking/refcounts...
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
  /*
   * Variant of atomic_t specialized for reference counts.
   *
   * The interface matches the atomic_t interface (to aid in porting) but only
   * provides the few functions one should use for reference counting.
   *
   * It differs in that the counter saturates at UINT_MAX and will not move once
   * there. This avoids wrapping the counter and causing 'spurious'
   * use-after-free issues.
   *
   * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
   * and provide only what is strictly required for refcounts.
   *
   * The increments are fully relaxed; these will not provide ordering. The
   * rationale is that whatever is used to obtain the object we're increasing the
   * reference count on will provide the ordering. For locked data structures,
   * its the lock acquire, for RCU/lockless data structures its the dependent
   * load.
   *
   * Do note that inc_not_zero() provides a control dependency which will order
   * future stores against the inc, this ensures we'll never modify the object
   * if we did not in fact acquire a reference.
   *
   * The decrements will provide release order, such that all the prior loads and
   * stores will be issued before, it also provides a control dependency, which
   * will order us against the subsequent free().
   *
   * The control dependency is against the load of the cmpxchg (ll/sc) that
   * succeeded. This means the stores aren't fully ordered, but this is fine
   * because the 1->0 transition indicates no concurrency.
   *
   * Note that the allocator is responsible for ordering things between free()
   * and alloc().
   *
   */
  
  #include <linux/refcount.h>
  #include <linux/bug.h>
fd25d19f6   Kees Cook   locking/refcount:...
40
  #ifdef CONFIG_REFCOUNT_FULL
bd174169c   David Windsor   locking/refcount:...
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
  /**
   * refcount_add_not_zero - add a value to a refcount unless it is 0
   * @i: the value to add to the refcount
   * @r: the refcount
   *
   * Will saturate at UINT_MAX and WARN.
   *
   * Provides no memory ordering, it is assumed the caller has guaranteed the
   * object memory to be stable (RCU, etc.). It does provide a control dependency
   * and thereby orders future stores. See the comment on top.
   *
   * Use of this function is not recommended for the normal reference counting
   * use case in which references are taken and released one at a time.  In these
   * cases, refcount_inc(), or one of its variants, should instead be used to
   * increment a reference count.
   *
   * Return: false if the passed refcount is 0, true otherwise
   */
29dee3c03   Peter Zijlstra   locking/refcounts...
59
60
  bool refcount_add_not_zero(unsigned int i, refcount_t *r)
  {
b78c0d471   Peter Zijlstra   locking/refcounts...
61
  	unsigned int new, val = atomic_read(&r->refs);
29dee3c03   Peter Zijlstra   locking/refcounts...
62

b78c0d471   Peter Zijlstra   locking/refcounts...
63
  	do {
29dee3c03   Peter Zijlstra   locking/refcounts...
64
65
66
67
68
69
70
71
72
  		if (!val)
  			return false;
  
  		if (unlikely(val == UINT_MAX))
  			return true;
  
  		new = val + i;
  		if (new < val)
  			new = UINT_MAX;
29dee3c03   Peter Zijlstra   locking/refcounts...
73

b78c0d471   Peter Zijlstra   locking/refcounts...
74
  	} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
29dee3c03   Peter Zijlstra   locking/refcounts...
75

9dcfe2c75   Ingo Molnar   locking/refcounts...
76
77
  	WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.
  ");
29dee3c03   Peter Zijlstra   locking/refcounts...
78
79
80
  
  	return true;
  }
d557d1b58   Greg Kroah-Hartman   refcount: change ...
81
  EXPORT_SYMBOL(refcount_add_not_zero);
29dee3c03   Peter Zijlstra   locking/refcounts...
82

bd174169c   David Windsor   locking/refcount:...
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
  /**
   * refcount_add - add a value to a refcount
   * @i: the value to add to the refcount
   * @r: the refcount
   *
   * Similar to atomic_add(), but will saturate at UINT_MAX and WARN.
   *
   * Provides no memory ordering, it is assumed the caller has guaranteed the
   * object memory to be stable (RCU, etc.). It does provide a control dependency
   * and thereby orders future stores. See the comment on top.
   *
   * Use of this function is not recommended for the normal reference counting
   * use case in which references are taken and released one at a time.  In these
   * cases, refcount_inc(), or one of its variants, should instead be used to
   * increment a reference count.
   */
29dee3c03   Peter Zijlstra   locking/refcounts...
99
100
  void refcount_add(unsigned int i, refcount_t *r)
  {
9dcfe2c75   Ingo Molnar   locking/refcounts...
101
102
  	WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.
  ");
29dee3c03   Peter Zijlstra   locking/refcounts...
103
  }
d557d1b58   Greg Kroah-Hartman   refcount: change ...
104
  EXPORT_SYMBOL(refcount_add);
29dee3c03   Peter Zijlstra   locking/refcounts...
105

bd174169c   David Windsor   locking/refcount:...
106
107
108
109
110
  /**
   * refcount_inc_not_zero - increment a refcount unless it is 0
   * @r: the refcount to increment
   *
   * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
29dee3c03   Peter Zijlstra   locking/refcounts...
111
112
113
114
   *
   * Provides no memory ordering, it is assumed the caller has guaranteed the
   * object memory to be stable (RCU, etc.). It does provide a control dependency
   * and thereby orders future stores. See the comment on top.
bd174169c   David Windsor   locking/refcount:...
115
116
   *
   * Return: true if the increment was successful, false otherwise
29dee3c03   Peter Zijlstra   locking/refcounts...
117
118
119
   */
  bool refcount_inc_not_zero(refcount_t *r)
  {
b78c0d471   Peter Zijlstra   locking/refcounts...
120
  	unsigned int new, val = atomic_read(&r->refs);
29dee3c03   Peter Zijlstra   locking/refcounts...
121

b78c0d471   Peter Zijlstra   locking/refcounts...
122
  	do {
29dee3c03   Peter Zijlstra   locking/refcounts...
123
124
125
126
127
128
129
  		new = val + 1;
  
  		if (!val)
  			return false;
  
  		if (unlikely(!new))
  			return true;
b78c0d471   Peter Zijlstra   locking/refcounts...
130
  	} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
29dee3c03   Peter Zijlstra   locking/refcounts...
131

9dcfe2c75   Ingo Molnar   locking/refcounts...
132
133
  	WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.
  ");
29dee3c03   Peter Zijlstra   locking/refcounts...
134
135
136
  
  	return true;
  }
d557d1b58   Greg Kroah-Hartman   refcount: change ...
137
  EXPORT_SYMBOL(refcount_inc_not_zero);
29dee3c03   Peter Zijlstra   locking/refcounts...
138

bd174169c   David Windsor   locking/refcount:...
139
140
141
142
143
  /**
   * refcount_inc - increment a refcount
   * @r: the refcount to increment
   *
   * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
29dee3c03   Peter Zijlstra   locking/refcounts...
144
145
   *
   * Provides no memory ordering, it is assumed the caller already has a
bd174169c   David Windsor   locking/refcount:...
146
147
148
149
   * reference on the object.
   *
   * Will WARN if the refcount is 0, as this represents a possible use-after-free
   * condition.
29dee3c03   Peter Zijlstra   locking/refcounts...
150
151
152
   */
  void refcount_inc(refcount_t *r)
  {
9dcfe2c75   Ingo Molnar   locking/refcounts...
153
154
  	WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.
  ");
29dee3c03   Peter Zijlstra   locking/refcounts...
155
  }
d557d1b58   Greg Kroah-Hartman   refcount: change ...
156
  EXPORT_SYMBOL(refcount_inc);
29dee3c03   Peter Zijlstra   locking/refcounts...
157

bd174169c   David Windsor   locking/refcount:...
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
  /**
   * refcount_sub_and_test - subtract from a refcount and test if it is 0
   * @i: amount to subtract from the refcount
   * @r: the refcount
   *
   * Similar to atomic_dec_and_test(), but it will WARN, return false and
   * ultimately leak on underflow and will fail to decrement when saturated
   * at UINT_MAX.
   *
   * Provides release memory ordering, such that prior loads and stores are done
   * before, and provides a control dependency such that free() must come after.
   * See the comment on top.
   *
   * Use of this function is not recommended for the normal reference counting
   * use case in which references are taken and released one at a time.  In these
   * cases, refcount_dec(), or one of its variants, should instead be used to
   * decrement a reference count.
   *
   * Return: true if the resulting refcount is 0, false otherwise
   */
29dee3c03   Peter Zijlstra   locking/refcounts...
178
179
  bool refcount_sub_and_test(unsigned int i, refcount_t *r)
  {
b78c0d471   Peter Zijlstra   locking/refcounts...
180
  	unsigned int new, val = atomic_read(&r->refs);
29dee3c03   Peter Zijlstra   locking/refcounts...
181

b78c0d471   Peter Zijlstra   locking/refcounts...
182
  	do {
29dee3c03   Peter Zijlstra   locking/refcounts...
183
184
185
186
187
  		if (unlikely(val == UINT_MAX))
  			return false;
  
  		new = val - i;
  		if (new > val) {
9dcfe2c75   Ingo Molnar   locking/refcounts...
188
189
  			WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.
  ");
29dee3c03   Peter Zijlstra   locking/refcounts...
190
191
  			return false;
  		}
b78c0d471   Peter Zijlstra   locking/refcounts...
192
  	} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
29dee3c03   Peter Zijlstra   locking/refcounts...
193
194
195
  
  	return !new;
  }
d557d1b58   Greg Kroah-Hartman   refcount: change ...
196
  EXPORT_SYMBOL(refcount_sub_and_test);
29dee3c03   Peter Zijlstra   locking/refcounts...
197

bd174169c   David Windsor   locking/refcount:...
198
199
200
201
  /**
   * refcount_dec_and_test - decrement a refcount and test if it is 0
   * @r: the refcount
   *
29dee3c03   Peter Zijlstra   locking/refcounts...
202
203
204
205
206
207
   * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
   * decrement when saturated at UINT_MAX.
   *
   * Provides release memory ordering, such that prior loads and stores are done
   * before, and provides a control dependency such that free() must come after.
   * See the comment on top.
bd174169c   David Windsor   locking/refcount:...
208
209
   *
   * Return: true if the resulting refcount is 0, false otherwise
29dee3c03   Peter Zijlstra   locking/refcounts...
210
211
212
213
214
   */
  bool refcount_dec_and_test(refcount_t *r)
  {
  	return refcount_sub_and_test(1, r);
  }
d557d1b58   Greg Kroah-Hartman   refcount: change ...
215
  EXPORT_SYMBOL(refcount_dec_and_test);
29dee3c03   Peter Zijlstra   locking/refcounts...
216

bd174169c   David Windsor   locking/refcount:...
217
218
219
220
  /**
   * refcount_dec - decrement a refcount
   * @r: the refcount
   *
29dee3c03   Peter Zijlstra   locking/refcounts...
221
222
223
224
225
226
   * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
   * when saturated at UINT_MAX.
   *
   * Provides release memory ordering, such that prior loads and stores are done
   * before.
   */
29dee3c03   Peter Zijlstra   locking/refcounts...
227
228
  void refcount_dec(refcount_t *r)
  {
9dcfe2c75   Ingo Molnar   locking/refcounts...
229
230
  	WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.
  ");
29dee3c03   Peter Zijlstra   locking/refcounts...
231
  }
d557d1b58   Greg Kroah-Hartman   refcount: change ...
232
  EXPORT_SYMBOL(refcount_dec);
fd25d19f6   Kees Cook   locking/refcount:...
233
  #endif /* CONFIG_REFCOUNT_FULL */
29dee3c03   Peter Zijlstra   locking/refcounts...
234

bd174169c   David Windsor   locking/refcount:...
235
236
237
238
  /**
   * refcount_dec_if_one - decrement a refcount if it is 1
   * @r: the refcount
   *
29dee3c03   Peter Zijlstra   locking/refcounts...
239
240
241
242
243
244
245
246
247
   * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
   * success thereof.
   *
   * Like all decrement operations, it provides release memory order and provides
   * a control dependency.
   *
   * It can be used like a try-delete operator; this explicit case is provided
   * and not cmpxchg in generic, because that would allow implementing unsafe
   * operations.
bd174169c   David Windsor   locking/refcount:...
248
249
   *
   * Return: true if the resulting refcount is 0, false otherwise
29dee3c03   Peter Zijlstra   locking/refcounts...
250
251
252
   */
  bool refcount_dec_if_one(refcount_t *r)
  {
b78c0d471   Peter Zijlstra   locking/refcounts...
253
254
255
  	int val = 1;
  
  	return atomic_try_cmpxchg_release(&r->refs, &val, 0);
29dee3c03   Peter Zijlstra   locking/refcounts...
256
  }
d557d1b58   Greg Kroah-Hartman   refcount: change ...
257
  EXPORT_SYMBOL(refcount_dec_if_one);
29dee3c03   Peter Zijlstra   locking/refcounts...
258

bd174169c   David Windsor   locking/refcount:...
259
260
261
262
  /**
   * refcount_dec_not_one - decrement a refcount if it is not 1
   * @r: the refcount
   *
29dee3c03   Peter Zijlstra   locking/refcounts...
263
264
265
266
   * No atomic_t counterpart, it decrements unless the value is 1, in which case
   * it will return false.
   *
   * Was often done like: atomic_add_unless(&var, -1, 1)
bd174169c   David Windsor   locking/refcount:...
267
268
   *
   * Return: true if the decrement operation was successful, false otherwise
29dee3c03   Peter Zijlstra   locking/refcounts...
269
270
271
   */
  bool refcount_dec_not_one(refcount_t *r)
  {
b78c0d471   Peter Zijlstra   locking/refcounts...
272
  	unsigned int new, val = atomic_read(&r->refs);
29dee3c03   Peter Zijlstra   locking/refcounts...
273

b78c0d471   Peter Zijlstra   locking/refcounts...
274
  	do {
29dee3c03   Peter Zijlstra   locking/refcounts...
275
276
277
278
279
280
281
282
  		if (unlikely(val == UINT_MAX))
  			return true;
  
  		if (val == 1)
  			return false;
  
  		new = val - 1;
  		if (new > val) {
9dcfe2c75   Ingo Molnar   locking/refcounts...
283
284
  			WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.
  ");
29dee3c03   Peter Zijlstra   locking/refcounts...
285
286
  			return true;
  		}
b78c0d471   Peter Zijlstra   locking/refcounts...
287
  	} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
29dee3c03   Peter Zijlstra   locking/refcounts...
288
289
290
  
  	return true;
  }
d557d1b58   Greg Kroah-Hartman   refcount: change ...
291
  EXPORT_SYMBOL(refcount_dec_not_one);
29dee3c03   Peter Zijlstra   locking/refcounts...
292

bd174169c   David Windsor   locking/refcount:...
293
294
295
296
297
298
  /**
   * refcount_dec_and_mutex_lock - return holding mutex if able to decrement
   *                               refcount to 0
   * @r: the refcount
   * @lock: the mutex to be locked
   *
29dee3c03   Peter Zijlstra   locking/refcounts...
299
300
301
302
303
304
   * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
   * to decrement when saturated at UINT_MAX.
   *
   * Provides release memory ordering, such that prior loads and stores are done
   * before, and provides a control dependency such that free() must come after.
   * See the comment on top.
bd174169c   David Windsor   locking/refcount:...
305
306
307
   *
   * Return: true and hold mutex if able to decrement refcount to 0, false
   *         otherwise
29dee3c03   Peter Zijlstra   locking/refcounts...
308
309
310
311
312
313
314
315
316
317
318
319
320
321
   */
  bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
  {
  	if (refcount_dec_not_one(r))
  		return false;
  
  	mutex_lock(lock);
  	if (!refcount_dec_and_test(r)) {
  		mutex_unlock(lock);
  		return false;
  	}
  
  	return true;
  }
d557d1b58   Greg Kroah-Hartman   refcount: change ...
322
  EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
29dee3c03   Peter Zijlstra   locking/refcounts...
323

bd174169c   David Windsor   locking/refcount:...
324
325
326
327
328
329
  /**
   * refcount_dec_and_lock - return holding spinlock if able to decrement
   *                         refcount to 0
   * @r: the refcount
   * @lock: the spinlock to be locked
   *
29dee3c03   Peter Zijlstra   locking/refcounts...
330
331
332
333
334
335
   * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
   * decrement when saturated at UINT_MAX.
   *
   * Provides release memory ordering, such that prior loads and stores are done
   * before, and provides a control dependency such that free() must come after.
   * See the comment on top.
bd174169c   David Windsor   locking/refcount:...
336
337
338
   *
   * Return: true and hold spinlock if able to decrement refcount to 0, false
   *         otherwise
29dee3c03   Peter Zijlstra   locking/refcounts...
339
340
341
342
343
344
345
346
347
348
349
350
351
352
   */
  bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
  {
  	if (refcount_dec_not_one(r))
  		return false;
  
  	spin_lock(lock);
  	if (!refcount_dec_and_test(r)) {
  		spin_unlock(lock);
  		return false;
  	}
  
  	return true;
  }
d557d1b58   Greg Kroah-Hartman   refcount: change ...
353
  EXPORT_SYMBOL(refcount_dec_and_lock);
29dee3c03   Peter Zijlstra   locking/refcounts...
354