Blame view

lib/refcount.c 11.4 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
29dee3c03   Peter Zijlstra   locking/refcounts...
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
  /*
   * Variant of atomic_t specialized for reference counts.
   *
   * The interface matches the atomic_t interface (to aid in porting) but only
   * provides the few functions one should use for reference counting.
   *
   * It differs in that the counter saturates at UINT_MAX and will not move once
   * there. This avoids wrapping the counter and causing 'spurious'
   * use-after-free issues.
   *
   * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
   * and provide only what is strictly required for refcounts.
   *
   * The increments are fully relaxed; these will not provide ordering. The
   * rationale is that whatever is used to obtain the object we're increasing the
   * reference count on will provide the ordering. For locked data structures,
   * its the lock acquire, for RCU/lockless data structures its the dependent
   * load.
   *
   * Do note that inc_not_zero() provides a control dependency which will order
   * future stores against the inc, this ensures we'll never modify the object
   * if we did not in fact acquire a reference.
   *
   * The decrements will provide release order, such that all the prior loads and
   * stores will be issued before, it also provides a control dependency, which
   * will order us against the subsequent free().
   *
   * The control dependency is against the load of the cmpxchg (ll/sc) that
   * succeeded. This means the stores aren't fully ordered, but this is fine
   * because the 1->0 transition indicates no concurrency.
   *
   * Note that the allocator is responsible for ordering things between free()
   * and alloc().
   *
47b8f3ab9   Elena Reshetova   refcount_t: Add A...
36
37
38
   * The decrements dec_and_test() and sub_and_test() also provide acquire
   * ordering on success.
   *
29dee3c03   Peter Zijlstra   locking/refcounts...
39
   */
75a040ff1   Alexey Dobriyan   locking/refcounts...
40
  #include <linux/mutex.h>
29dee3c03   Peter Zijlstra   locking/refcounts...
41
  #include <linux/refcount.h>
75a040ff1   Alexey Dobriyan   locking/refcounts...
42
  #include <linux/spinlock.h>
29dee3c03   Peter Zijlstra   locking/refcounts...
43
  #include <linux/bug.h>
bd174169c   David Windsor   locking/refcount:...
44
  /**
afed7bcf9   Mark Rutland   locking/refcount:...
45
   * refcount_add_not_zero_checked - add a value to a refcount unless it is 0
bd174169c   David Windsor   locking/refcount:...
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
   * @i: the value to add to the refcount
   * @r: the refcount
   *
   * Will saturate at UINT_MAX and WARN.
   *
   * Provides no memory ordering, it is assumed the caller has guaranteed the
   * object memory to be stable (RCU, etc.). It does provide a control dependency
   * and thereby orders future stores. See the comment on top.
   *
   * Use of this function is not recommended for the normal reference counting
   * use case in which references are taken and released one at a time.  In these
   * cases, refcount_inc(), or one of its variants, should instead be used to
   * increment a reference count.
   *
   * Return: false if the passed refcount is 0, true otherwise
   */
afed7bcf9   Mark Rutland   locking/refcount:...
62
  bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r)
29dee3c03   Peter Zijlstra   locking/refcounts...
63
  {
b78c0d471   Peter Zijlstra   locking/refcounts...
64
  	unsigned int new, val = atomic_read(&r->refs);
29dee3c03   Peter Zijlstra   locking/refcounts...
65

b78c0d471   Peter Zijlstra   locking/refcounts...
66
  	do {
29dee3c03   Peter Zijlstra   locking/refcounts...
67
68
69
70
71
72
73
74
75
  		if (!val)
  			return false;
  
  		if (unlikely(val == UINT_MAX))
  			return true;
  
  		new = val + i;
  		if (new < val)
  			new = UINT_MAX;
29dee3c03   Peter Zijlstra   locking/refcounts...
76

b78c0d471   Peter Zijlstra   locking/refcounts...
77
  	} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
29dee3c03   Peter Zijlstra   locking/refcounts...
78

9dcfe2c75   Ingo Molnar   locking/refcounts...
79
80
  	WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.
  ");
29dee3c03   Peter Zijlstra   locking/refcounts...
81
82
83
  
  	return true;
  }
afed7bcf9   Mark Rutland   locking/refcount:...
84
  EXPORT_SYMBOL(refcount_add_not_zero_checked);
29dee3c03   Peter Zijlstra   locking/refcounts...
85

bd174169c   David Windsor   locking/refcount:...
86
  /**
afed7bcf9   Mark Rutland   locking/refcount:...
87
   * refcount_add_checked - add a value to a refcount
bd174169c   David Windsor   locking/refcount:...
88
89
90
91
92
93
94
95
96
97
98
99
100
101
   * @i: the value to add to the refcount
   * @r: the refcount
   *
   * Similar to atomic_add(), but will saturate at UINT_MAX and WARN.
   *
   * Provides no memory ordering, it is assumed the caller has guaranteed the
   * object memory to be stable (RCU, etc.). It does provide a control dependency
   * and thereby orders future stores. See the comment on top.
   *
   * Use of this function is not recommended for the normal reference counting
   * use case in which references are taken and released one at a time.  In these
   * cases, refcount_inc(), or one of its variants, should instead be used to
   * increment a reference count.
   */
afed7bcf9   Mark Rutland   locking/refcount:...
102
  void refcount_add_checked(unsigned int i, refcount_t *r)
29dee3c03   Peter Zijlstra   locking/refcounts...
103
  {
afed7bcf9   Mark Rutland   locking/refcount:...
104
105
  	WARN_ONCE(!refcount_add_not_zero_checked(i, r), "refcount_t: addition on 0; use-after-free.
  ");
29dee3c03   Peter Zijlstra   locking/refcounts...
106
  }
afed7bcf9   Mark Rutland   locking/refcount:...
107
  EXPORT_SYMBOL(refcount_add_checked);
29dee3c03   Peter Zijlstra   locking/refcounts...
108

bd174169c   David Windsor   locking/refcount:...
109
  /**
afed7bcf9   Mark Rutland   locking/refcount:...
110
   * refcount_inc_not_zero_checked - increment a refcount unless it is 0
bd174169c   David Windsor   locking/refcount:...
111
112
113
   * @r: the refcount to increment
   *
   * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
29dee3c03   Peter Zijlstra   locking/refcounts...
114
115
116
117
   *
   * Provides no memory ordering, it is assumed the caller has guaranteed the
   * object memory to be stable (RCU, etc.). It does provide a control dependency
   * and thereby orders future stores. See the comment on top.
bd174169c   David Windsor   locking/refcount:...
118
119
   *
   * Return: true if the increment was successful, false otherwise
29dee3c03   Peter Zijlstra   locking/refcounts...
120
   */
afed7bcf9   Mark Rutland   locking/refcount:...
121
  bool refcount_inc_not_zero_checked(refcount_t *r)
29dee3c03   Peter Zijlstra   locking/refcounts...
122
  {
b78c0d471   Peter Zijlstra   locking/refcounts...
123
  	unsigned int new, val = atomic_read(&r->refs);
29dee3c03   Peter Zijlstra   locking/refcounts...
124

b78c0d471   Peter Zijlstra   locking/refcounts...
125
  	do {
29dee3c03   Peter Zijlstra   locking/refcounts...
126
127
128
129
130
131
132
  		new = val + 1;
  
  		if (!val)
  			return false;
  
  		if (unlikely(!new))
  			return true;
b78c0d471   Peter Zijlstra   locking/refcounts...
133
  	} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
29dee3c03   Peter Zijlstra   locking/refcounts...
134

9dcfe2c75   Ingo Molnar   locking/refcounts...
135
136
  	WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.
  ");
29dee3c03   Peter Zijlstra   locking/refcounts...
137
138
139
  
  	return true;
  }
afed7bcf9   Mark Rutland   locking/refcount:...
140
  EXPORT_SYMBOL(refcount_inc_not_zero_checked);
29dee3c03   Peter Zijlstra   locking/refcounts...
141

bd174169c   David Windsor   locking/refcount:...
142
  /**
afed7bcf9   Mark Rutland   locking/refcount:...
143
   * refcount_inc_checked - increment a refcount
bd174169c   David Windsor   locking/refcount:...
144
145
146
   * @r: the refcount to increment
   *
   * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
29dee3c03   Peter Zijlstra   locking/refcounts...
147
148
   *
   * Provides no memory ordering, it is assumed the caller already has a
bd174169c   David Windsor   locking/refcount:...
149
150
151
152
   * reference on the object.
   *
   * Will WARN if the refcount is 0, as this represents a possible use-after-free
   * condition.
29dee3c03   Peter Zijlstra   locking/refcounts...
153
   */
afed7bcf9   Mark Rutland   locking/refcount:...
154
  void refcount_inc_checked(refcount_t *r)
29dee3c03   Peter Zijlstra   locking/refcounts...
155
  {
afed7bcf9   Mark Rutland   locking/refcount:...
156
157
  	WARN_ONCE(!refcount_inc_not_zero_checked(r), "refcount_t: increment on 0; use-after-free.
  ");
29dee3c03   Peter Zijlstra   locking/refcounts...
158
  }
afed7bcf9   Mark Rutland   locking/refcount:...
159
  EXPORT_SYMBOL(refcount_inc_checked);
29dee3c03   Peter Zijlstra   locking/refcounts...
160

bd174169c   David Windsor   locking/refcount:...
161
  /**
afed7bcf9   Mark Rutland   locking/refcount:...
162
   * refcount_sub_and_test_checked - subtract from a refcount and test if it is 0
bd174169c   David Windsor   locking/refcount:...
163
164
165
166
167
168
169
170
   * @i: amount to subtract from the refcount
   * @r: the refcount
   *
   * Similar to atomic_dec_and_test(), but it will WARN, return false and
   * ultimately leak on underflow and will fail to decrement when saturated
   * at UINT_MAX.
   *
   * Provides release memory ordering, such that prior loads and stores are done
47b8f3ab9   Elena Reshetova   refcount_t: Add A...
171
172
   * before, and provides an acquire ordering on success such that free()
   * must come after.
bd174169c   David Windsor   locking/refcount:...
173
174
175
176
177
178
179
180
   *
   * Use of this function is not recommended for the normal reference counting
   * use case in which references are taken and released one at a time.  In these
   * cases, refcount_dec(), or one of its variants, should instead be used to
   * decrement a reference count.
   *
   * Return: true if the resulting refcount is 0, false otherwise
   */
afed7bcf9   Mark Rutland   locking/refcount:...
181
  bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r)
29dee3c03   Peter Zijlstra   locking/refcounts...
182
  {
b78c0d471   Peter Zijlstra   locking/refcounts...
183
  	unsigned int new, val = atomic_read(&r->refs);
29dee3c03   Peter Zijlstra   locking/refcounts...
184

b78c0d471   Peter Zijlstra   locking/refcounts...
185
  	do {
29dee3c03   Peter Zijlstra   locking/refcounts...
186
187
188
189
190
  		if (unlikely(val == UINT_MAX))
  			return false;
  
  		new = val - i;
  		if (new > val) {
9dcfe2c75   Ingo Molnar   locking/refcounts...
191
192
  			WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.
  ");
29dee3c03   Peter Zijlstra   locking/refcounts...
193
194
  			return false;
  		}
b78c0d471   Peter Zijlstra   locking/refcounts...
195
  	} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
29dee3c03   Peter Zijlstra   locking/refcounts...
196

47b8f3ab9   Elena Reshetova   refcount_t: Add A...
197
198
199
200
201
  	if (!new) {
  		smp_acquire__after_ctrl_dep();
  		return true;
  	}
  	return false;
29dee3c03   Peter Zijlstra   locking/refcounts...
202
  }
afed7bcf9   Mark Rutland   locking/refcount:...
203
  EXPORT_SYMBOL(refcount_sub_and_test_checked);
29dee3c03   Peter Zijlstra   locking/refcounts...
204

bd174169c   David Windsor   locking/refcount:...
205
  /**
afed7bcf9   Mark Rutland   locking/refcount:...
206
   * refcount_dec_and_test_checked - decrement a refcount and test if it is 0
bd174169c   David Windsor   locking/refcount:...
207
208
   * @r: the refcount
   *
29dee3c03   Peter Zijlstra   locking/refcounts...
209
210
211
212
   * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
   * decrement when saturated at UINT_MAX.
   *
   * Provides release memory ordering, such that prior loads and stores are done
47b8f3ab9   Elena Reshetova   refcount_t: Add A...
213
214
   * before, and provides an acquire ordering on success such that free()
   * must come after.
bd174169c   David Windsor   locking/refcount:...
215
216
   *
   * Return: true if the resulting refcount is 0, false otherwise
29dee3c03   Peter Zijlstra   locking/refcounts...
217
   */
afed7bcf9   Mark Rutland   locking/refcount:...
218
  bool refcount_dec_and_test_checked(refcount_t *r)
29dee3c03   Peter Zijlstra   locking/refcounts...
219
  {
afed7bcf9   Mark Rutland   locking/refcount:...
220
  	return refcount_sub_and_test_checked(1, r);
29dee3c03   Peter Zijlstra   locking/refcounts...
221
  }
afed7bcf9   Mark Rutland   locking/refcount:...
222
  EXPORT_SYMBOL(refcount_dec_and_test_checked);
29dee3c03   Peter Zijlstra   locking/refcounts...
223

bd174169c   David Windsor   locking/refcount:...
224
  /**
afed7bcf9   Mark Rutland   locking/refcount:...
225
   * refcount_dec_checked - decrement a refcount
bd174169c   David Windsor   locking/refcount:...
226
227
   * @r: the refcount
   *
29dee3c03   Peter Zijlstra   locking/refcounts...
228
229
230
231
232
233
   * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
   * when saturated at UINT_MAX.
   *
   * Provides release memory ordering, such that prior loads and stores are done
   * before.
   */
afed7bcf9   Mark Rutland   locking/refcount:...
234
  void refcount_dec_checked(refcount_t *r)
29dee3c03   Peter Zijlstra   locking/refcounts...
235
  {
afed7bcf9   Mark Rutland   locking/refcount:...
236
237
  	WARN_ONCE(refcount_dec_and_test_checked(r), "refcount_t: decrement hit 0; leaking memory.
  ");
29dee3c03   Peter Zijlstra   locking/refcounts...
238
  }
afed7bcf9   Mark Rutland   locking/refcount:...
239
  EXPORT_SYMBOL(refcount_dec_checked);
29dee3c03   Peter Zijlstra   locking/refcounts...
240

bd174169c   David Windsor   locking/refcount:...
241
242
243
244
  /**
   * refcount_dec_if_one - decrement a refcount if it is 1
   * @r: the refcount
   *
29dee3c03   Peter Zijlstra   locking/refcounts...
245
246
247
248
249
250
251
252
253
   * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
   * success thereof.
   *
   * Like all decrement operations, it provides release memory order and provides
   * a control dependency.
   *
   * It can be used like a try-delete operator; this explicit case is provided
   * and not cmpxchg in generic, because that would allow implementing unsafe
   * operations.
bd174169c   David Windsor   locking/refcount:...
254
255
   *
   * Return: true if the resulting refcount is 0, false otherwise
29dee3c03   Peter Zijlstra   locking/refcounts...
256
257
258
   */
  bool refcount_dec_if_one(refcount_t *r)
  {
b78c0d471   Peter Zijlstra   locking/refcounts...
259
260
261
  	int val = 1;
  
  	return atomic_try_cmpxchg_release(&r->refs, &val, 0);
29dee3c03   Peter Zijlstra   locking/refcounts...
262
  }
d557d1b58   Greg Kroah-Hartman   refcount: change ...
263
  EXPORT_SYMBOL(refcount_dec_if_one);
29dee3c03   Peter Zijlstra   locking/refcounts...
264

bd174169c   David Windsor   locking/refcount:...
265
266
267
268
  /**
   * refcount_dec_not_one - decrement a refcount if it is not 1
   * @r: the refcount
   *
29dee3c03   Peter Zijlstra   locking/refcounts...
269
270
271
272
   * No atomic_t counterpart, it decrements unless the value is 1, in which case
   * it will return false.
   *
   * Was often done like: atomic_add_unless(&var, -1, 1)
bd174169c   David Windsor   locking/refcount:...
273
274
   *
   * Return: true if the decrement operation was successful, false otherwise
29dee3c03   Peter Zijlstra   locking/refcounts...
275
276
277
   */
  bool refcount_dec_not_one(refcount_t *r)
  {
b78c0d471   Peter Zijlstra   locking/refcounts...
278
  	unsigned int new, val = atomic_read(&r->refs);
29dee3c03   Peter Zijlstra   locking/refcounts...
279

b78c0d471   Peter Zijlstra   locking/refcounts...
280
  	do {
29dee3c03   Peter Zijlstra   locking/refcounts...
281
282
283
284
285
286
287
288
  		if (unlikely(val == UINT_MAX))
  			return true;
  
  		if (val == 1)
  			return false;
  
  		new = val - 1;
  		if (new > val) {
9dcfe2c75   Ingo Molnar   locking/refcounts...
289
290
  			WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.
  ");
29dee3c03   Peter Zijlstra   locking/refcounts...
291
292
  			return true;
  		}
b78c0d471   Peter Zijlstra   locking/refcounts...
293
  	} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
29dee3c03   Peter Zijlstra   locking/refcounts...
294
295
296
  
  	return true;
  }
d557d1b58   Greg Kroah-Hartman   refcount: change ...
297
  EXPORT_SYMBOL(refcount_dec_not_one);
29dee3c03   Peter Zijlstra   locking/refcounts...
298

bd174169c   David Windsor   locking/refcount:...
299
300
301
302
303
304
  /**
   * refcount_dec_and_mutex_lock - return holding mutex if able to decrement
   *                               refcount to 0
   * @r: the refcount
   * @lock: the mutex to be locked
   *
29dee3c03   Peter Zijlstra   locking/refcounts...
305
306
307
308
309
310
   * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
   * to decrement when saturated at UINT_MAX.
   *
   * Provides release memory ordering, such that prior loads and stores are done
   * before, and provides a control dependency such that free() must come after.
   * See the comment on top.
bd174169c   David Windsor   locking/refcount:...
311
312
313
   *
   * Return: true and hold mutex if able to decrement refcount to 0, false
   *         otherwise
29dee3c03   Peter Zijlstra   locking/refcounts...
314
315
316
317
318
319
320
321
322
323
324
325
326
327
   */
  bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
  {
  	if (refcount_dec_not_one(r))
  		return false;
  
  	mutex_lock(lock);
  	if (!refcount_dec_and_test(r)) {
  		mutex_unlock(lock);
  		return false;
  	}
  
  	return true;
  }
d557d1b58   Greg Kroah-Hartman   refcount: change ...
328
  EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
29dee3c03   Peter Zijlstra   locking/refcounts...
329

bd174169c   David Windsor   locking/refcount:...
330
331
332
333
334
335
  /**
   * refcount_dec_and_lock - return holding spinlock if able to decrement
   *                         refcount to 0
   * @r: the refcount
   * @lock: the spinlock to be locked
   *
29dee3c03   Peter Zijlstra   locking/refcounts...
336
337
338
339
340
341
   * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
   * decrement when saturated at UINT_MAX.
   *
   * Provides release memory ordering, such that prior loads and stores are done
   * before, and provides a control dependency such that free() must come after.
   * See the comment on top.
bd174169c   David Windsor   locking/refcount:...
342
343
344
   *
   * Return: true and hold spinlock if able to decrement refcount to 0, false
   *         otherwise
29dee3c03   Peter Zijlstra   locking/refcounts...
345
346
347
348
349
350
351
352
353
354
355
356
357
358
   */
  bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
  {
  	if (refcount_dec_not_one(r))
  		return false;
  
  	spin_lock(lock);
  	if (!refcount_dec_and_test(r)) {
  		spin_unlock(lock);
  		return false;
  	}
  
  	return true;
  }
d557d1b58   Greg Kroah-Hartman   refcount: change ...
359
  EXPORT_SYMBOL(refcount_dec_and_lock);
29dee3c03   Peter Zijlstra   locking/refcounts...
360

7ea959c45   Anna-Maria Gleixner   locking/refcounts...
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
  /**
   * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
   *                                 interrupts if able to decrement refcount to 0
   * @r: the refcount
   * @lock: the spinlock to be locked
   * @flags: saved IRQ-flags if the is acquired
   *
   * Same as refcount_dec_and_lock() above except that the spinlock is acquired
   * with disabled interupts.
   *
   * Return: true and hold spinlock if able to decrement refcount to 0, false
   *         otherwise
   */
  bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
  				   unsigned long *flags)
  {
  	if (refcount_dec_not_one(r))
  		return false;
  
  	spin_lock_irqsave(lock, *flags);
  	if (!refcount_dec_and_test(r)) {
  		spin_unlock_irqrestore(lock, *flags);
  		return false;
  	}
  
  	return true;
  }
  EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);