Blame view

include/linux/percpu-refcount.h 9.91 KB
215e262f2   Kent Overstreet   percpu: implement...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
  /*
   * Percpu refcounts:
   * (C) 2012 Google, Inc.
   * Author: Kent Overstreet <koverstreet@google.com>
   *
   * This implements a refcount with similar semantics to atomic_t - atomic_inc(),
   * atomic_dec_and_test() - but percpu.
   *
   * There's one important difference between percpu refs and normal atomic_t
   * refcounts; you have to keep track of your initial refcount, and then when you
   * start shutting down you call percpu_ref_kill() _before_ dropping the initial
   * refcount.
   *
   * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
   * than an atomic_t - this is because of the way shutdown works, see
eecc16ba9   Tejun Heo   percpu_ref: repla...
16
   * percpu_ref_kill()/PERCPU_COUNT_BIAS.
215e262f2   Kent Overstreet   percpu: implement...
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
   *
   * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
   * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
   * puts the ref back in single atomic_t mode, collecting the per cpu refs and
   * issuing the appropriate barriers, and then marks the ref as shutting down so
   * that percpu_ref_put() will check for the ref hitting 0.  After it returns,
   * it's safe to drop the initial ref.
   *
   * USAGE:
   *
   * See fs/aio.c for some example usage; it's used there for struct kioctx, which
   * is created when userspaces calls io_setup(), and destroyed when userspace
   * calls io_destroy() or the process exits.
   *
   * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
d7cdb9680   Jesper Dangaard Brouer   treewide: fix syn...
32
   * calls percpu_ref_kill(), then hlist_del_rcu() and synchronize_rcu() to remove
215e262f2   Kent Overstreet   percpu: implement...
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
   * the kioctx from the proccess's list of kioctxs - after that, there can't be
   * any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop
   * the initial ref with percpu_ref_put().
   *
   * Code that does a two stage shutdown like this often needs some kind of
   * explicit synchronization to ensure the initial refcount can only be dropped
   * once - percpu_ref_kill() does this for you, it returns true once and false if
   * someone else already called it. The aio code uses it this way, but it's not
   * necessary if the code has some other mechanism to synchronize teardown.
   * around.
   */
  
  #ifndef _LINUX_PERCPU_REFCOUNT_H
  #define _LINUX_PERCPU_REFCOUNT_H
  
  #include <linux/atomic.h>
  #include <linux/kernel.h>
  #include <linux/percpu.h>
  #include <linux/rcupdate.h>
a34375ef9   Tejun Heo   percpu-refcount: ...
52
  #include <linux/gfp.h>
215e262f2   Kent Overstreet   percpu: implement...
53
54
  
  struct percpu_ref;
ac899061a   Tejun Heo   percpu-refcount: ...
55
  typedef void (percpu_ref_func_t)(struct percpu_ref *);
215e262f2   Kent Overstreet   percpu: implement...
56

9e804d1f5   Tejun Heo   percpu_ref: renam...
57
58
59
  /* flags set in the lower bits of percpu_ref->percpu_count_ptr */
  enum {
  	__PERCPU_REF_ATOMIC	= 1LU << 0,	/* operating in atomic mode */
27344a901   Tejun Heo   percpu_ref: add P...
60
61
62
63
  	__PERCPU_REF_DEAD	= 1LU << 1,	/* (being) killed */
  	__PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
  
  	__PERCPU_REF_FLAG_BITS	= 2,
9e804d1f5   Tejun Heo   percpu_ref: renam...
64
  };
2aad2a86f   Tejun Heo   percpu_ref: add P...
65
66
67
68
  /* @flags for percpu_ref_init() */
  enum {
  	/*
  	 * Start w/ ref == 1 in atomic mode.  Can be switched to percpu
1cae13e75   Tejun Heo   percpu_ref: make ...
69
70
71
  	 * operation using percpu_ref_switch_to_percpu().  If initialized
  	 * with this flag, the ref will stay in atomic mode until
  	 * percpu_ref_switch_to_percpu() is invoked on it.
2aad2a86f   Tejun Heo   percpu_ref: add P...
72
73
74
75
76
77
78
79
80
  	 */
  	PERCPU_REF_INIT_ATOMIC	= 1 << 0,
  
  	/*
  	 * Start dead w/ ref == 0 in atomic mode.  Must be revived with
  	 * percpu_ref_reinit() before used.  Implies INIT_ATOMIC.
  	 */
  	PERCPU_REF_INIT_DEAD	= 1 << 1,
  };
215e262f2   Kent Overstreet   percpu: implement...
81
  struct percpu_ref {
e625305b3   Tejun Heo   percpu-refcount: ...
82
  	atomic_long_t		count;
215e262f2   Kent Overstreet   percpu: implement...
83
84
  	/*
  	 * The low bit of the pointer indicates whether the ref is in percpu
9a1049da9   Tejun Heo   percpu-refcount: ...
85
  	 * mode; if set, then get/put will manipulate the atomic_t.
215e262f2   Kent Overstreet   percpu: implement...
86
  	 */
eecc16ba9   Tejun Heo   percpu_ref: repla...
87
  	unsigned long		percpu_count_ptr;
ac899061a   Tejun Heo   percpu-refcount: ...
88
  	percpu_ref_func_t	*release;
9e804d1f5   Tejun Heo   percpu_ref: renam...
89
  	percpu_ref_func_t	*confirm_switch;
1cae13e75   Tejun Heo   percpu_ref: make ...
90
  	bool			force_atomic:1;
215e262f2   Kent Overstreet   percpu: implement...
91
92
  	struct rcu_head		rcu;
  };
acac7883e   Tejun Heo   percpu-refcount: ...
93
  int __must_check percpu_ref_init(struct percpu_ref *ref,
2aad2a86f   Tejun Heo   percpu_ref: add P...
94
95
  				 percpu_ref_func_t *release, unsigned int flags,
  				 gfp_t gfp);
9a1049da9   Tejun Heo   percpu-refcount: ...
96
  void percpu_ref_exit(struct percpu_ref *ref);
490c79a65   Tejun Heo   percpu_ref: decou...
97
98
  void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
  				 percpu_ref_func_t *confirm_switch);
f47ad4578   Tejun Heo   percpu_ref: decou...
99
  void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
dbece3a0f   Tejun Heo   percpu-refcount: ...
100
101
  void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
  				 percpu_ref_func_t *confirm_kill);
f47ad4578   Tejun Heo   percpu_ref: decou...
102
  void percpu_ref_reinit(struct percpu_ref *ref);
dbece3a0f   Tejun Heo   percpu-refcount: ...
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
  
  /**
   * percpu_ref_kill - drop the initial ref
   * @ref: percpu_ref to kill
   *
   * Must be used to drop the initial ref on a percpu refcount; must be called
   * precisely once before shutdown.
   *
   * Puts @ref in non percpu mode, then does a call_rcu() before gathering up the
   * percpu counters and dropping the initial ref.
   */
  static inline void percpu_ref_kill(struct percpu_ref *ref)
  {
  	return percpu_ref_kill_and_confirm(ref, NULL);
  }
215e262f2   Kent Overstreet   percpu: implement...
118

eae7975dd   Tejun Heo   percpu-refcount: ...
119
120
121
122
  /*
   * Internal helper.  Don't use outside percpu-refcount proper.  The
   * function doesn't return the pointer and let the caller test it for NULL
   * because doing so forces the compiler to generate two conditional
eecc16ba9   Tejun Heo   percpu_ref: repla...
123
   * branches as it can't assume that @ref->percpu_count is not NULL.
eae7975dd   Tejun Heo   percpu-refcount: ...
124
   */
9e804d1f5   Tejun Heo   percpu_ref: renam...
125
126
  static inline bool __ref_is_percpu(struct percpu_ref *ref,
  					  unsigned long __percpu **percpu_countp)
eae7975dd   Tejun Heo   percpu-refcount: ...
127
  {
6810e4a39   Tejun Heo   percpu_ref: remov...
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
  	unsigned long percpu_ptr;
  
  	/*
  	 * The value of @ref->percpu_count_ptr is tested for
  	 * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then
  	 * used as a pointer.  If the compiler generates a separate fetch
  	 * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
  	 * between contaminating the pointer value, meaning that
  	 * ACCESS_ONCE() is required when fetching it.
  	 *
  	 * Also, we need a data dependency barrier to be paired with
  	 * smp_store_release() in __percpu_ref_switch_to_percpu().
  	 *
  	 * Use lockless deref which contains both.
  	 */
  	percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
2d7227828   Tejun Heo   percpu-refcount: ...
144

4aab3b5b3   Tejun Heo   percpu-ref: fix D...
145
146
147
148
149
150
151
  	/*
  	 * Theoretically, the following could test just ATOMIC; however,
  	 * then we'd have to mask off DEAD separately as DEAD may be
  	 * visible without ATOMIC if we race with percpu_ref_kill().  DEAD
  	 * implies ATOMIC anyway.  Test them together.
  	 */
  	if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
eae7975dd   Tejun Heo   percpu-refcount: ...
152
  		return false;
eecc16ba9   Tejun Heo   percpu_ref: repla...
153
  	*percpu_countp = (unsigned long __percpu *)percpu_ptr;
eae7975dd   Tejun Heo   percpu-refcount: ...
154
155
  	return true;
  }
215e262f2   Kent Overstreet   percpu: implement...
156
157
  
  /**
e8ea14cc6   Johannes Weiner   mm: memcontrol: t...
158
   * percpu_ref_get_many - increment a percpu refcount
ac899061a   Tejun Heo   percpu-refcount: ...
159
   * @ref: percpu_ref to get
e8ea14cc6   Johannes Weiner   mm: memcontrol: t...
160
   * @nr: number of references to get
215e262f2   Kent Overstreet   percpu: implement...
161
   *
e8ea14cc6   Johannes Weiner   mm: memcontrol: t...
162
   * Analogous to atomic_long_add().
6251f9976   Tejun Heo   percpu_ref: minor...
163
164
165
   *
   * This function is safe to call as long as @ref is between init and exit.
   */
e8ea14cc6   Johannes Weiner   mm: memcontrol: t...
166
  static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
215e262f2   Kent Overstreet   percpu: implement...
167
  {
eecc16ba9   Tejun Heo   percpu_ref: repla...
168
  	unsigned long __percpu *percpu_count;
215e262f2   Kent Overstreet   percpu: implement...
169

a4244454d   Tejun Heo   percpu-refcount: ...
170
  	rcu_read_lock_sched();
215e262f2   Kent Overstreet   percpu: implement...
171

9e804d1f5   Tejun Heo   percpu_ref: renam...
172
  	if (__ref_is_percpu(ref, &percpu_count))
e8ea14cc6   Johannes Weiner   mm: memcontrol: t...
173
  		this_cpu_add(*percpu_count, nr);
215e262f2   Kent Overstreet   percpu: implement...
174
  	else
e8ea14cc6   Johannes Weiner   mm: memcontrol: t...
175
  		atomic_long_add(nr, &ref->count);
215e262f2   Kent Overstreet   percpu: implement...
176

a4244454d   Tejun Heo   percpu-refcount: ...
177
  	rcu_read_unlock_sched();
215e262f2   Kent Overstreet   percpu: implement...
178
179
180
  }
  
  /**
e8ea14cc6   Johannes Weiner   mm: memcontrol: t...
181
182
183
184
185
186
187
188
189
190
191
192
193
   * percpu_ref_get - increment a percpu refcount
   * @ref: percpu_ref to get
   *
   * Analagous to atomic_long_inc().
   *
   * This function is safe to call as long as @ref is between init and exit.
   */
  static inline void percpu_ref_get(struct percpu_ref *ref)
  {
  	percpu_ref_get_many(ref, 1);
  }
  
  /**
4fb6e2504   Tejun Heo   percpu-refcount: ...
194
195
196
197
198
199
   * percpu_ref_tryget - try to increment a percpu refcount
   * @ref: percpu_ref to try-get
   *
   * Increment a percpu refcount unless its count already reached zero.
   * Returns %true on success; %false on failure.
   *
6251f9976   Tejun Heo   percpu_ref: minor...
200
   * This function is safe to call as long as @ref is between init and exit.
4fb6e2504   Tejun Heo   percpu-refcount: ...
201
202
203
   */
  static inline bool percpu_ref_tryget(struct percpu_ref *ref)
  {
eecc16ba9   Tejun Heo   percpu_ref: repla...
204
  	unsigned long __percpu *percpu_count;
6251f9976   Tejun Heo   percpu_ref: minor...
205
  	int ret;
4fb6e2504   Tejun Heo   percpu-refcount: ...
206
207
  
  	rcu_read_lock_sched();
9e804d1f5   Tejun Heo   percpu_ref: renam...
208
  	if (__ref_is_percpu(ref, &percpu_count)) {
eecc16ba9   Tejun Heo   percpu_ref: repla...
209
  		this_cpu_inc(*percpu_count);
4fb6e2504   Tejun Heo   percpu-refcount: ...
210
211
  		ret = true;
  	} else {
e625305b3   Tejun Heo   percpu-refcount: ...
212
  		ret = atomic_long_inc_not_zero(&ref->count);
4fb6e2504   Tejun Heo   percpu-refcount: ...
213
214
215
216
217
218
219
220
  	}
  
  	rcu_read_unlock_sched();
  
  	return ret;
  }
  
  /**
2070d50e1   Tejun Heo   percpu-refcount: ...
221
   * percpu_ref_tryget_live - try to increment a live percpu refcount
dbece3a0f   Tejun Heo   percpu-refcount: ...
222
223
224
225
226
   * @ref: percpu_ref to try-get
   *
   * Increment a percpu refcount unless it has already been killed.  Returns
   * %true on success; %false on failure.
   *
6251f9976   Tejun Heo   percpu_ref: minor...
227
228
229
230
231
   * Completion of percpu_ref_kill() in itself doesn't guarantee that this
   * function will fail.  For such guarantee, percpu_ref_kill_and_confirm()
   * should be used.  After the confirm_kill callback is invoked, it's
   * guaranteed that no new reference will be given out by
   * percpu_ref_tryget_live().
4fb6e2504   Tejun Heo   percpu-refcount: ...
232
   *
6251f9976   Tejun Heo   percpu_ref: minor...
233
   * This function is safe to call as long as @ref is between init and exit.
dbece3a0f   Tejun Heo   percpu-refcount: ...
234
   */
2070d50e1   Tejun Heo   percpu-refcount: ...
235
  static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
dbece3a0f   Tejun Heo   percpu-refcount: ...
236
  {
eecc16ba9   Tejun Heo   percpu_ref: repla...
237
  	unsigned long __percpu *percpu_count;
dbece3a0f   Tejun Heo   percpu-refcount: ...
238
  	int ret = false;
a4244454d   Tejun Heo   percpu-refcount: ...
239
  	rcu_read_lock_sched();
dbece3a0f   Tejun Heo   percpu-refcount: ...
240

9e804d1f5   Tejun Heo   percpu_ref: renam...
241
  	if (__ref_is_percpu(ref, &percpu_count)) {
eecc16ba9   Tejun Heo   percpu_ref: repla...
242
  		this_cpu_inc(*percpu_count);
dbece3a0f   Tejun Heo   percpu-refcount: ...
243
  		ret = true;
6810e4a39   Tejun Heo   percpu_ref: remov...
244
  	} else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
490c79a65   Tejun Heo   percpu_ref: decou...
245
  		ret = atomic_long_inc_not_zero(&ref->count);
dbece3a0f   Tejun Heo   percpu-refcount: ...
246
  	}
a4244454d   Tejun Heo   percpu-refcount: ...
247
  	rcu_read_unlock_sched();
dbece3a0f   Tejun Heo   percpu-refcount: ...
248
249
250
251
252
  
  	return ret;
  }
  
  /**
e8ea14cc6   Johannes Weiner   mm: memcontrol: t...
253
   * percpu_ref_put_many - decrement a percpu refcount
ac899061a   Tejun Heo   percpu-refcount: ...
254
   * @ref: percpu_ref to put
e8ea14cc6   Johannes Weiner   mm: memcontrol: t...
255
   * @nr: number of references to put
215e262f2   Kent Overstreet   percpu: implement...
256
257
258
   *
   * Decrement the refcount, and if 0, call the release function (which was passed
   * to percpu_ref_init())
6251f9976   Tejun Heo   percpu_ref: minor...
259
260
   *
   * This function is safe to call as long as @ref is between init and exit.
215e262f2   Kent Overstreet   percpu: implement...
261
   */
e8ea14cc6   Johannes Weiner   mm: memcontrol: t...
262
  static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
215e262f2   Kent Overstreet   percpu: implement...
263
  {
eecc16ba9   Tejun Heo   percpu_ref: repla...
264
  	unsigned long __percpu *percpu_count;
215e262f2   Kent Overstreet   percpu: implement...
265

a4244454d   Tejun Heo   percpu-refcount: ...
266
  	rcu_read_lock_sched();
215e262f2   Kent Overstreet   percpu: implement...
267

9e804d1f5   Tejun Heo   percpu_ref: renam...
268
  	if (__ref_is_percpu(ref, &percpu_count))
e8ea14cc6   Johannes Weiner   mm: memcontrol: t...
269
270
  		this_cpu_sub(*percpu_count, nr);
  	else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
215e262f2   Kent Overstreet   percpu: implement...
271
  		ref->release(ref);
a4244454d   Tejun Heo   percpu-refcount: ...
272
  	rcu_read_unlock_sched();
215e262f2   Kent Overstreet   percpu: implement...
273
  }
2d7227828   Tejun Heo   percpu-refcount: ...
274
  /**
e8ea14cc6   Johannes Weiner   mm: memcontrol: t...
275
276
277
278
279
280
281
282
283
284
285
286
287
288
   * percpu_ref_put - decrement a percpu refcount
   * @ref: percpu_ref to put
   *
   * Decrement the refcount, and if 0, call the release function (which was passed
   * to percpu_ref_init())
   *
   * This function is safe to call as long as @ref is between init and exit.
   */
  static inline void percpu_ref_put(struct percpu_ref *ref)
  {
  	percpu_ref_put_many(ref, 1);
  }
  
  /**
4c907baf3   Tejun Heo   percpu_ref: imple...
289
290
291
292
293
294
295
296
297
298
299
300
301
302
   * percpu_ref_is_dying - test whether a percpu refcount is dying or dead
   * @ref: percpu_ref to test
   *
   * Returns %true if @ref is dying or dead.
   *
   * This function is safe to call as long as @ref is between init and exit
   * and the caller is responsible for synchronizing against state changes.
   */
  static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
  {
  	return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
  }
  
  /**
2d7227828   Tejun Heo   percpu-refcount: ...
303
304
305
306
   * percpu_ref_is_zero - test whether a percpu refcount reached zero
   * @ref: percpu_ref to test
   *
   * Returns %true if @ref reached zero.
6251f9976   Tejun Heo   percpu_ref: minor...
307
308
   *
   * This function is safe to call as long as @ref is between init and exit.
2d7227828   Tejun Heo   percpu-refcount: ...
309
310
311
   */
  static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
  {
eecc16ba9   Tejun Heo   percpu_ref: repla...
312
  	unsigned long __percpu *percpu_count;
2d7227828   Tejun Heo   percpu-refcount: ...
313

9e804d1f5   Tejun Heo   percpu_ref: renam...
314
  	if (__ref_is_percpu(ref, &percpu_count))
2d7227828   Tejun Heo   percpu-refcount: ...
315
  		return false;
e625305b3   Tejun Heo   percpu-refcount: ...
316
  	return !atomic_long_read(&ref->count);
2d7227828   Tejun Heo   percpu-refcount: ...
317
  }
215e262f2   Kent Overstreet   percpu: implement...
318
  #endif