Blame view

lib/refcount.c 11.2 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
29dee3c03   Peter Zijlstra   locking/refcounts...
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
  /*
   * Variant of atomic_t specialized for reference counts.
   *
   * The interface matches the atomic_t interface (to aid in porting) but only
   * provides the few functions one should use for reference counting.
   *
   * It differs in that the counter saturates at UINT_MAX and will not move once
   * there. This avoids wrapping the counter and causing 'spurious'
   * use-after-free issues.
   *
   * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
   * and provide only what is strictly required for refcounts.
   *
   * The increments are fully relaxed; these will not provide ordering. The
   * rationale is that whatever is used to obtain the object we're increasing the
   * reference count on will provide the ordering. For locked data structures,
   * its the lock acquire, for RCU/lockless data structures its the dependent
   * load.
   *
   * Do note that inc_not_zero() provides a control dependency which will order
   * future stores against the inc, this ensures we'll never modify the object
   * if we did not in fact acquire a reference.
   *
   * The decrements will provide release order, such that all the prior loads and
   * stores will be issued before, it also provides a control dependency, which
   * will order us against the subsequent free().
   *
   * The control dependency is against the load of the cmpxchg (ll/sc) that
   * succeeded. This means the stores aren't fully ordered, but this is fine
   * because the 1->0 transition indicates no concurrency.
   *
   * Note that the allocator is responsible for ordering things between free()
   * and alloc().
   *
   */
75a040ff1   Alexey Dobriyan   locking/refcounts...
37
  #include <linux/mutex.h>
29dee3c03   Peter Zijlstra   locking/refcounts...
38
  #include <linux/refcount.h>
75a040ff1   Alexey Dobriyan   locking/refcounts...
39
  #include <linux/spinlock.h>
29dee3c03   Peter Zijlstra   locking/refcounts...
40
  #include <linux/bug.h>
bd174169c   David Windsor   locking/refcount:...
41
  /**
afed7bcf9   Mark Rutland   locking/refcount:...
42
   * refcount_add_not_zero_checked - add a value to a refcount unless it is 0
bd174169c   David Windsor   locking/refcount:...
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
   * @i: the value to add to the refcount
   * @r: the refcount
   *
   * Will saturate at UINT_MAX and WARN.
   *
   * Provides no memory ordering, it is assumed the caller has guaranteed the
   * object memory to be stable (RCU, etc.). It does provide a control dependency
   * and thereby orders future stores. See the comment on top.
   *
   * Use of this function is not recommended for the normal reference counting
   * use case in which references are taken and released one at a time.  In these
   * cases, refcount_inc(), or one of its variants, should instead be used to
   * increment a reference count.
   *
   * Return: false if the passed refcount is 0, true otherwise
   */
afed7bcf9   Mark Rutland   locking/refcount:...
59
  bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r)
29dee3c03   Peter Zijlstra   locking/refcounts...
60
  {
b78c0d471   Peter Zijlstra   locking/refcounts...
61
  	unsigned int new, val = atomic_read(&r->refs);
29dee3c03   Peter Zijlstra   locking/refcounts...
62

b78c0d471   Peter Zijlstra   locking/refcounts...
63
  	do {
29dee3c03   Peter Zijlstra   locking/refcounts...
64
65
66
67
68
69
70
71
72
  		if (!val)
  			return false;
  
  		if (unlikely(val == UINT_MAX))
  			return true;
  
  		new = val + i;
  		if (new < val)
  			new = UINT_MAX;
29dee3c03   Peter Zijlstra   locking/refcounts...
73

b78c0d471   Peter Zijlstra   locking/refcounts...
74
  	} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
29dee3c03   Peter Zijlstra   locking/refcounts...
75

9dcfe2c75   Ingo Molnar   locking/refcounts...
76
77
  	WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.
  ");
29dee3c03   Peter Zijlstra   locking/refcounts...
78
79
80
  
  	return true;
  }
afed7bcf9   Mark Rutland   locking/refcount:...
81
  EXPORT_SYMBOL(refcount_add_not_zero_checked);
29dee3c03   Peter Zijlstra   locking/refcounts...
82

bd174169c   David Windsor   locking/refcount:...
83
  /**
afed7bcf9   Mark Rutland   locking/refcount:...
84
   * refcount_add_checked - add a value to a refcount
bd174169c   David Windsor   locking/refcount:...
85
86
87
88
89
90
91
92
93
94
95
96
97
98
   * @i: the value to add to the refcount
   * @r: the refcount
   *
   * Similar to atomic_add(), but will saturate at UINT_MAX and WARN.
   *
   * Provides no memory ordering, it is assumed the caller has guaranteed the
   * object memory to be stable (RCU, etc.). It does provide a control dependency
   * and thereby orders future stores. See the comment on top.
   *
   * Use of this function is not recommended for the normal reference counting
   * use case in which references are taken and released one at a time.  In these
   * cases, refcount_inc(), or one of its variants, should instead be used to
   * increment a reference count.
   */
afed7bcf9   Mark Rutland   locking/refcount:...
99
  void refcount_add_checked(unsigned int i, refcount_t *r)
29dee3c03   Peter Zijlstra   locking/refcounts...
100
  {
afed7bcf9   Mark Rutland   locking/refcount:...
101
102
  	WARN_ONCE(!refcount_add_not_zero_checked(i, r), "refcount_t: addition on 0; use-after-free.
  ");
29dee3c03   Peter Zijlstra   locking/refcounts...
103
  }
afed7bcf9   Mark Rutland   locking/refcount:...
104
  EXPORT_SYMBOL(refcount_add_checked);
29dee3c03   Peter Zijlstra   locking/refcounts...
105

bd174169c   David Windsor   locking/refcount:...
106
  /**
afed7bcf9   Mark Rutland   locking/refcount:...
107
   * refcount_inc_not_zero_checked - increment a refcount unless it is 0
bd174169c   David Windsor   locking/refcount:...
108
109
110
   * @r: the refcount to increment
   *
   * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
29dee3c03   Peter Zijlstra   locking/refcounts...
111
112
113
114
   *
   * Provides no memory ordering, it is assumed the caller has guaranteed the
   * object memory to be stable (RCU, etc.). It does provide a control dependency
   * and thereby orders future stores. See the comment on top.
bd174169c   David Windsor   locking/refcount:...
115
116
   *
   * Return: true if the increment was successful, false otherwise
29dee3c03   Peter Zijlstra   locking/refcounts...
117
   */
afed7bcf9   Mark Rutland   locking/refcount:...
118
  bool refcount_inc_not_zero_checked(refcount_t *r)
29dee3c03   Peter Zijlstra   locking/refcounts...
119
  {
b78c0d471   Peter Zijlstra   locking/refcounts...
120
  	unsigned int new, val = atomic_read(&r->refs);
29dee3c03   Peter Zijlstra   locking/refcounts...
121

b78c0d471   Peter Zijlstra   locking/refcounts...
122
  	do {
29dee3c03   Peter Zijlstra   locking/refcounts...
123
124
125
126
127
128
129
  		new = val + 1;
  
  		if (!val)
  			return false;
  
  		if (unlikely(!new))
  			return true;
b78c0d471   Peter Zijlstra   locking/refcounts...
130
  	} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
29dee3c03   Peter Zijlstra   locking/refcounts...
131

9dcfe2c75   Ingo Molnar   locking/refcounts...
132
133
  	WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.
  ");
29dee3c03   Peter Zijlstra   locking/refcounts...
134
135
136
  
  	return true;
  }
afed7bcf9   Mark Rutland   locking/refcount:...
137
  EXPORT_SYMBOL(refcount_inc_not_zero_checked);
29dee3c03   Peter Zijlstra   locking/refcounts...
138

bd174169c   David Windsor   locking/refcount:...
139
  /**
afed7bcf9   Mark Rutland   locking/refcount:...
140
   * refcount_inc_checked - increment a refcount
bd174169c   David Windsor   locking/refcount:...
141
142
143
   * @r: the refcount to increment
   *
   * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
29dee3c03   Peter Zijlstra   locking/refcounts...
144
145
   *
   * Provides no memory ordering, it is assumed the caller already has a
bd174169c   David Windsor   locking/refcount:...
146
147
148
149
   * reference on the object.
   *
   * Will WARN if the refcount is 0, as this represents a possible use-after-free
   * condition.
29dee3c03   Peter Zijlstra   locking/refcounts...
150
   */
afed7bcf9   Mark Rutland   locking/refcount:...
151
  void refcount_inc_checked(refcount_t *r)
29dee3c03   Peter Zijlstra   locking/refcounts...
152
  {
afed7bcf9   Mark Rutland   locking/refcount:...
153
154
  	WARN_ONCE(!refcount_inc_not_zero_checked(r), "refcount_t: increment on 0; use-after-free.
  ");
29dee3c03   Peter Zijlstra   locking/refcounts...
155
  }
afed7bcf9   Mark Rutland   locking/refcount:...
156
  EXPORT_SYMBOL(refcount_inc_checked);
29dee3c03   Peter Zijlstra   locking/refcounts...
157

bd174169c   David Windsor   locking/refcount:...
158
  /**
afed7bcf9   Mark Rutland   locking/refcount:...
159
   * refcount_sub_and_test_checked - subtract from a refcount and test if it is 0
bd174169c   David Windsor   locking/refcount:...
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
   * @i: amount to subtract from the refcount
   * @r: the refcount
   *
   * Similar to atomic_dec_and_test(), but it will WARN, return false and
   * ultimately leak on underflow and will fail to decrement when saturated
   * at UINT_MAX.
   *
   * Provides release memory ordering, such that prior loads and stores are done
   * before, and provides a control dependency such that free() must come after.
   * See the comment on top.
   *
   * Use of this function is not recommended for the normal reference counting
   * use case in which references are taken and released one at a time.  In these
   * cases, refcount_dec(), or one of its variants, should instead be used to
   * decrement a reference count.
   *
   * Return: true if the resulting refcount is 0, false otherwise
   */
afed7bcf9   Mark Rutland   locking/refcount:...
178
  bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r)
29dee3c03   Peter Zijlstra   locking/refcounts...
179
  {
b78c0d471   Peter Zijlstra   locking/refcounts...
180
  	unsigned int new, val = atomic_read(&r->refs);
29dee3c03   Peter Zijlstra   locking/refcounts...
181

b78c0d471   Peter Zijlstra   locking/refcounts...
182
  	do {
29dee3c03   Peter Zijlstra   locking/refcounts...
183
184
185
186
187
  		if (unlikely(val == UINT_MAX))
  			return false;
  
  		new = val - i;
  		if (new > val) {
9dcfe2c75   Ingo Molnar   locking/refcounts...
188
189
  			WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.
  ");
29dee3c03   Peter Zijlstra   locking/refcounts...
190
191
  			return false;
  		}
b78c0d471   Peter Zijlstra   locking/refcounts...
192
  	} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
29dee3c03   Peter Zijlstra   locking/refcounts...
193
194
195
  
  	return !new;
  }
afed7bcf9   Mark Rutland   locking/refcount:...
196
  EXPORT_SYMBOL(refcount_sub_and_test_checked);
29dee3c03   Peter Zijlstra   locking/refcounts...
197

bd174169c   David Windsor   locking/refcount:...
198
  /**
afed7bcf9   Mark Rutland   locking/refcount:...
199
   * refcount_dec_and_test_checked - decrement a refcount and test if it is 0
bd174169c   David Windsor   locking/refcount:...
200
201
   * @r: the refcount
   *
29dee3c03   Peter Zijlstra   locking/refcounts...
202
203
204
205
206
207
   * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
   * decrement when saturated at UINT_MAX.
   *
   * Provides release memory ordering, such that prior loads and stores are done
   * before, and provides a control dependency such that free() must come after.
   * See the comment on top.
bd174169c   David Windsor   locking/refcount:...
208
209
   *
   * Return: true if the resulting refcount is 0, false otherwise
29dee3c03   Peter Zijlstra   locking/refcounts...
210
   */
afed7bcf9   Mark Rutland   locking/refcount:...
211
  bool refcount_dec_and_test_checked(refcount_t *r)
29dee3c03   Peter Zijlstra   locking/refcounts...
212
  {
afed7bcf9   Mark Rutland   locking/refcount:...
213
  	return refcount_sub_and_test_checked(1, r);
29dee3c03   Peter Zijlstra   locking/refcounts...
214
  }
afed7bcf9   Mark Rutland   locking/refcount:...
215
  EXPORT_SYMBOL(refcount_dec_and_test_checked);
29dee3c03   Peter Zijlstra   locking/refcounts...
216

bd174169c   David Windsor   locking/refcount:...
217
  /**
afed7bcf9   Mark Rutland   locking/refcount:...
218
   * refcount_dec_checked - decrement a refcount
bd174169c   David Windsor   locking/refcount:...
219
220
   * @r: the refcount
   *
29dee3c03   Peter Zijlstra   locking/refcounts...
221
222
223
224
225
226
   * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
   * when saturated at UINT_MAX.
   *
   * Provides release memory ordering, such that prior loads and stores are done
   * before.
   */
afed7bcf9   Mark Rutland   locking/refcount:...
227
  void refcount_dec_checked(refcount_t *r)
29dee3c03   Peter Zijlstra   locking/refcounts...
228
  {
afed7bcf9   Mark Rutland   locking/refcount:...
229
230
  	WARN_ONCE(refcount_dec_and_test_checked(r), "refcount_t: decrement hit 0; leaking memory.
  ");
29dee3c03   Peter Zijlstra   locking/refcounts...
231
  }
afed7bcf9   Mark Rutland   locking/refcount:...
232
  EXPORT_SYMBOL(refcount_dec_checked);
29dee3c03   Peter Zijlstra   locking/refcounts...
233

bd174169c   David Windsor   locking/refcount:...
234
235
236
237
  /**
   * refcount_dec_if_one - decrement a refcount if it is 1
   * @r: the refcount
   *
29dee3c03   Peter Zijlstra   locking/refcounts...
238
239
240
241
242
243
244
245
246
   * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
   * success thereof.
   *
   * Like all decrement operations, it provides release memory order and provides
   * a control dependency.
   *
   * It can be used like a try-delete operator; this explicit case is provided
   * and not cmpxchg in generic, because that would allow implementing unsafe
   * operations.
bd174169c   David Windsor   locking/refcount:...
247
248
   *
   * Return: true if the resulting refcount is 0, false otherwise
29dee3c03   Peter Zijlstra   locking/refcounts...
249
250
251
   */
  bool refcount_dec_if_one(refcount_t *r)
  {
b78c0d471   Peter Zijlstra   locking/refcounts...
252
253
254
  	int val = 1;
  
  	return atomic_try_cmpxchg_release(&r->refs, &val, 0);
29dee3c03   Peter Zijlstra   locking/refcounts...
255
  }
d557d1b58   Greg Kroah-Hartman   refcount: change ...
256
  EXPORT_SYMBOL(refcount_dec_if_one);
29dee3c03   Peter Zijlstra   locking/refcounts...
257

bd174169c   David Windsor   locking/refcount:...
258
259
260
261
  /**
   * refcount_dec_not_one - decrement a refcount if it is not 1
   * @r: the refcount
   *
29dee3c03   Peter Zijlstra   locking/refcounts...
262
263
264
265
   * No atomic_t counterpart, it decrements unless the value is 1, in which case
   * it will return false.
   *
   * Was often done like: atomic_add_unless(&var, -1, 1)
bd174169c   David Windsor   locking/refcount:...
266
267
   *
   * Return: true if the decrement operation was successful, false otherwise
29dee3c03   Peter Zijlstra   locking/refcounts...
268
269
270
   */
  bool refcount_dec_not_one(refcount_t *r)
  {
b78c0d471   Peter Zijlstra   locking/refcounts...
271
  	unsigned int new, val = atomic_read(&r->refs);
29dee3c03   Peter Zijlstra   locking/refcounts...
272

b78c0d471   Peter Zijlstra   locking/refcounts...
273
  	do {
29dee3c03   Peter Zijlstra   locking/refcounts...
274
275
276
277
278
279
280
281
  		if (unlikely(val == UINT_MAX))
  			return true;
  
  		if (val == 1)
  			return false;
  
  		new = val - 1;
  		if (new > val) {
9dcfe2c75   Ingo Molnar   locking/refcounts...
282
283
  			WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.
  ");
29dee3c03   Peter Zijlstra   locking/refcounts...
284
285
  			return true;
  		}
b78c0d471   Peter Zijlstra   locking/refcounts...
286
  	} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
29dee3c03   Peter Zijlstra   locking/refcounts...
287
288
289
  
  	return true;
  }
d557d1b58   Greg Kroah-Hartman   refcount: change ...
290
  EXPORT_SYMBOL(refcount_dec_not_one);
29dee3c03   Peter Zijlstra   locking/refcounts...
291

bd174169c   David Windsor   locking/refcount:...
292
293
294
295
296
297
  /**
   * refcount_dec_and_mutex_lock - return holding mutex if able to decrement
   *                               refcount to 0
   * @r: the refcount
   * @lock: the mutex to be locked
   *
29dee3c03   Peter Zijlstra   locking/refcounts...
298
299
300
301
302
303
   * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
   * to decrement when saturated at UINT_MAX.
   *
   * Provides release memory ordering, such that prior loads and stores are done
   * before, and provides a control dependency such that free() must come after.
   * See the comment on top.
bd174169c   David Windsor   locking/refcount:...
304
305
306
   *
   * Return: true and hold mutex if able to decrement refcount to 0, false
   *         otherwise
29dee3c03   Peter Zijlstra   locking/refcounts...
307
308
309
310
311
312
313
314
315
316
317
318
319
320
   */
  bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
  {
  	if (refcount_dec_not_one(r))
  		return false;
  
  	mutex_lock(lock);
  	if (!refcount_dec_and_test(r)) {
  		mutex_unlock(lock);
  		return false;
  	}
  
  	return true;
  }
d557d1b58   Greg Kroah-Hartman   refcount: change ...
321
  EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
29dee3c03   Peter Zijlstra   locking/refcounts...
322

bd174169c   David Windsor   locking/refcount:...
323
324
325
326
327
328
  /**
   * refcount_dec_and_lock - return holding spinlock if able to decrement
   *                         refcount to 0
   * @r: the refcount
   * @lock: the spinlock to be locked
   *
29dee3c03   Peter Zijlstra   locking/refcounts...
329
330
331
332
333
334
   * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
   * decrement when saturated at UINT_MAX.
   *
   * Provides release memory ordering, such that prior loads and stores are done
   * before, and provides a control dependency such that free() must come after.
   * See the comment on top.
bd174169c   David Windsor   locking/refcount:...
335
336
337
   *
   * Return: true and hold spinlock if able to decrement refcount to 0, false
   *         otherwise
29dee3c03   Peter Zijlstra   locking/refcounts...
338
339
340
341
342
343
344
345
346
347
348
349
350
351
   */
  bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
  {
  	if (refcount_dec_not_one(r))
  		return false;
  
  	spin_lock(lock);
  	if (!refcount_dec_and_test(r)) {
  		spin_unlock(lock);
  		return false;
  	}
  
  	return true;
  }
d557d1b58   Greg Kroah-Hartman   refcount: change ...
352
  EXPORT_SYMBOL(refcount_dec_and_lock);
29dee3c03   Peter Zijlstra   locking/refcounts...
353

7ea959c45   Anna-Maria Gleixner   locking/refcounts...
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
  /**
   * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
   *                                 interrupts if able to decrement refcount to 0
   * @r: the refcount
   * @lock: the spinlock to be locked
   * @flags: saved IRQ-flags if the is acquired
   *
   * Same as refcount_dec_and_lock() above except that the spinlock is acquired
   * with disabled interupts.
   *
   * Return: true and hold spinlock if able to decrement refcount to 0, false
   *         otherwise
   */
  bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
  				   unsigned long *flags)
  {
  	if (refcount_dec_not_one(r))
  		return false;
  
  	spin_lock_irqsave(lock, *flags);
  	if (!refcount_dec_and_test(r)) {
  		spin_unlock_irqrestore(lock, *flags);
  		return false;
  	}
  
  	return true;
  }
  EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);