Blame view
lib/percpu-refcount.c
15.3 KB
457c89965 treewide: Add SPD... |
1 |
// SPDX-License-Identifier: GPL-2.0-only |
a818e526c lib/percpu-refcou... |
2 |
#define pr_fmt(fmt) "%s: " fmt, __func__ |
215e262f2 percpu: implement... |
3 4 |
#include <linux/kernel.h> |
490c79a65 percpu_ref: decou... |
5 6 |
#include <linux/sched.h> #include <linux/wait.h> |
2b0d3d3e4 percpu_ref: reduc... |
7 |
#include <linux/slab.h> |
3375efedd percpu_ref: Dump ... |
8 |
#include <linux/mm.h> |
215e262f2 percpu: implement... |
9 10 11 12 13 14 15 16 17 |
#include <linux/percpu-refcount.h> /* * Initially, a percpu refcount is just a set of percpu counters. Initially, we * don't try to detect the ref hitting 0 - which means that get/put can just * increment or decrement the local counter. Note that the counter on a * particular cpu can (and will) wrap - this is fine, when we go to shutdown the * percpu counters will all sum to the correct value * |
bdb428c82 lib+mm: fix few s... |
18 |
* (More precisely: because modular arithmetic is commutative the sum of all the |
eecc16ba9 percpu_ref: repla... |
19 20 |
* percpu_count vars will be equal to what it would have been if all the gets * and puts were done to a single integer, even if some of the percpu integers |
215e262f2 percpu: implement... |
21 22 23 24 25 26 27 28 29 30 31 32 |
* overflow or underflow). * * The real trick to implementing percpu refcounts is shutdown. We can't detect * the ref hitting 0 on every put - this would require global synchronization * and defeat the whole purpose of using percpu refs. * * What we do is require the user to keep track of the initial refcount; we know * the ref can't hit 0 before the user drops the initial ref, so as long as we * convert to non percpu mode before the initial ref is dropped everything * works. * * Converting to non percpu mode is done with some RCUish stuff in |
e625305b3 percpu-refcount: ... |
33 34 |
* percpu_ref_kill. Additionally, we need a bias value so that the * atomic_long_t can't hit 0 before we've added up all the percpu refs. |
215e262f2 percpu: implement... |
35 |
*/ |
eecc16ba9 percpu_ref: repla... |
36 |
#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1)) |
215e262f2 percpu: implement... |
37 |
|
33e465ce7 percpu_ref: allow... |
38 |
static DEFINE_SPINLOCK(percpu_ref_switch_lock); |
490c79a65 percpu_ref: decou... |
39 |
static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq); |
eecc16ba9 percpu_ref: repla... |
40 |
static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref) |
eae7975dd percpu-refcount: ... |
41 |
{ |
eecc16ba9 percpu_ref: repla... |
42 |
return (unsigned long __percpu *) |
27344a901 percpu_ref: add P... |
43 |
(ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD); |
eae7975dd percpu-refcount: ... |
44 |
} |
215e262f2 percpu: implement... |
45 46 |
/** * percpu_ref_init - initialize a percpu refcount |
ac899061a percpu-refcount: ... |
47 48 |
* @ref: percpu_ref to initialize * @release: function which will be called when refcount hits 0 |
2aad2a86f percpu_ref: add P... |
49 |
* @flags: PERCPU_REF_INIT_* flags |
a34375ef9 percpu-refcount: ... |
50 |
* @gfp: allocation mask to use |
215e262f2 percpu: implement... |
51 |
* |
15617dffa percpu_ref: Fix c... |
52 53 54 55 |
* Initializes @ref. @ref starts out in percpu mode with a refcount of 1 unless * @flags contains PERCPU_REF_INIT_ATOMIC or PERCPU_REF_INIT_DEAD. These flags * change the start state to atomic with the latter setting the initial refcount * to 0. See the definitions of PERCPU_REF_INIT_* flags for flag behaviors. |
215e262f2 percpu: implement... |
56 57 58 59 |
* * Note that @release must not sleep - it may potentially be called from RCU * callback context by percpu_ref_kill(). */ |
a34375ef9 percpu-refcount: ... |
60 |
int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, |
2aad2a86f percpu_ref: add P... |
61 |
unsigned int flags, gfp_t gfp) |
215e262f2 percpu: implement... |
62 |
{ |
27344a901 percpu_ref: add P... |
63 64 |
size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS, __alignof__(unsigned long)); |
2aad2a86f percpu_ref: add P... |
65 |
unsigned long start_count = 0; |
2b0d3d3e4 percpu_ref: reduc... |
66 |
struct percpu_ref_data *data; |
215e262f2 percpu: implement... |
67 |
|
27344a901 percpu_ref: add P... |
68 69 |
ref->percpu_count_ptr = (unsigned long) __alloc_percpu_gfp(sizeof(unsigned long), align, gfp); |
eecc16ba9 percpu_ref: repla... |
70 |
if (!ref->percpu_count_ptr) |
215e262f2 percpu: implement... |
71 |
return -ENOMEM; |
2b0d3d3e4 percpu_ref: reduc... |
72 73 74 |
data = kzalloc(sizeof(*ref->data), gfp); if (!data) { free_percpu((void __percpu *)ref->percpu_count_ptr); |
20b413c38 percpu_ref_init()... |
75 |
ref->percpu_count_ptr = 0; |
2b0d3d3e4 percpu_ref: reduc... |
76 77 78 79 80 |
return -ENOMEM; } data->force_atomic = flags & PERCPU_REF_INIT_ATOMIC; data->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT; |
1cae13e75 percpu_ref: make ... |
81 |
|
7d9ab9b6a percpu_ref: relea... |
82 |
if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) { |
2aad2a86f percpu_ref: add P... |
83 |
ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; |
2b0d3d3e4 percpu_ref: reduc... |
84 |
data->allow_reinit = true; |
7d9ab9b6a percpu_ref: relea... |
85 |
} else { |
2aad2a86f percpu_ref: add P... |
86 |
start_count += PERCPU_COUNT_BIAS; |
7d9ab9b6a percpu_ref: relea... |
87 |
} |
2aad2a86f percpu_ref: add P... |
88 89 90 91 92 |
if (flags & PERCPU_REF_INIT_DEAD) ref->percpu_count_ptr |= __PERCPU_REF_DEAD; else start_count++; |
2b0d3d3e4 percpu_ref: reduc... |
93 |
atomic_long_set(&data->count, start_count); |
2aad2a86f percpu_ref: add P... |
94 |
|
2b0d3d3e4 percpu_ref: reduc... |
95 96 97 98 |
data->release = release; data->confirm_switch = NULL; data->ref = ref; ref->data = data; |
215e262f2 percpu: implement... |
99 100 |
return 0; } |
5e9dd373d percpu_refcount: ... |
101 |
EXPORT_SYMBOL_GPL(percpu_ref_init); |
215e262f2 percpu: implement... |
102 |
|
2b0d3d3e4 percpu_ref: reduc... |
103 104 105 106 107 108 |
static void __percpu_ref_exit(struct percpu_ref *ref) { unsigned long __percpu *percpu_count = percpu_count_ptr(ref); if (percpu_count) { /* non-NULL confirm_switch indicates switching in progress */ |
7ea6bf2e6 percpu_ref: don't... |
109 |
WARN_ON_ONCE(ref->data && ref->data->confirm_switch); |
2b0d3d3e4 percpu_ref: reduc... |
110 111 112 113 |
free_percpu(percpu_count); ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD; } } |
bc497bd33 percpu-refcount: ... |
114 |
/** |
9a1049da9 percpu-refcount: ... |
115 116 |
* percpu_ref_exit - undo percpu_ref_init() * @ref: percpu_ref to exit |
bc497bd33 percpu-refcount: ... |
117 |
* |
9a1049da9 percpu-refcount: ... |
118 119 120 121 122 |
* This function exits @ref. The caller is responsible for ensuring that * @ref is no longer in active use. The usual places to invoke this * function from are the @ref->release() callback or in init failure path * where percpu_ref_init() succeeded but other parts of the initialization * of the embedding object failed. |
bc497bd33 percpu-refcount: ... |
123 |
*/ |
9a1049da9 percpu-refcount: ... |
124 |
void percpu_ref_exit(struct percpu_ref *ref) |
bc497bd33 percpu-refcount: ... |
125 |
{ |
2b0d3d3e4 percpu_ref: reduc... |
126 127 |
struct percpu_ref_data *data = ref->data; unsigned long flags; |
bc497bd33 percpu-refcount: ... |
128 |
|
2b0d3d3e4 percpu_ref: reduc... |
129 130 131 132 133 134 135 136 137 138 139 140 |
__percpu_ref_exit(ref); if (!data) return; spin_lock_irqsave(&percpu_ref_switch_lock, flags); ref->percpu_count_ptr |= atomic_long_read(&ref->data->count) << __PERCPU_REF_FLAG_BITS; ref->data = NULL; spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); kfree(data); |
bc497bd33 percpu-refcount: ... |
141 |
} |
9a1049da9 percpu-refcount: ... |
142 |
EXPORT_SYMBOL_GPL(percpu_ref_exit); |
bc497bd33 percpu-refcount: ... |
143 |
|
490c79a65 percpu_ref: decou... |
144 145 |
static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu) { |
2b0d3d3e4 percpu_ref: reduc... |
146 147 148 |
struct percpu_ref_data *data = container_of(rcu, struct percpu_ref_data, rcu); struct percpu_ref *ref = data->ref; |
490c79a65 percpu_ref: decou... |
149 |
|
2b0d3d3e4 percpu_ref: reduc... |
150 151 |
data->confirm_switch(ref); data->confirm_switch = NULL; |
490c79a65 percpu_ref: decou... |
152 |
wake_up_all(&percpu_ref_switch_waitq); |
2b0d3d3e4 percpu_ref: reduc... |
153 154 |
if (!data->allow_reinit) __percpu_ref_exit(ref); |
7d9ab9b6a percpu_ref: relea... |
155 |
|
490c79a65 percpu_ref: decou... |
156 157 158 159 160 |
/* drop ref from percpu_ref_switch_to_atomic() */ percpu_ref_put(ref); } static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu) |
215e262f2 percpu: implement... |
161 |
{ |
2b0d3d3e4 percpu_ref: reduc... |
162 163 164 |
struct percpu_ref_data *data = container_of(rcu, struct percpu_ref_data, rcu); struct percpu_ref *ref = data->ref; |
eecc16ba9 percpu_ref: repla... |
165 |
unsigned long __percpu *percpu_count = percpu_count_ptr(ref); |
3375efedd percpu_ref: Dump ... |
166 |
static atomic_t underflows; |
e625305b3 percpu-refcount: ... |
167 |
unsigned long count = 0; |
215e262f2 percpu: implement... |
168 |
int cpu; |
215e262f2 percpu: implement... |
169 |
for_each_possible_cpu(cpu) |
eecc16ba9 percpu_ref: repla... |
170 |
count += *per_cpu_ptr(percpu_count, cpu); |
215e262f2 percpu: implement... |
171 |
|
a818e526c lib/percpu-refcou... |
172 173 |
pr_debug("global %lu percpu %lu ", |
2b0d3d3e4 percpu_ref: reduc... |
174 |
atomic_long_read(&data->count), count); |
215e262f2 percpu: implement... |
175 176 177 178 179 180 181 182 183 184 185 186 187 |
/* * It's crucial that we sum the percpu counters _before_ adding the sum * to &ref->count; since gets could be happening on one cpu while puts * happen on another, adding a single cpu's count could cause * @ref->count to hit 0 before we've got a consistent value - but the * sum of all the counts will be consistent and correct. * * Subtracting the bias value then has to happen _after_ adding count to * &ref->count; we need the bias value to prevent &ref->count from * reaching 0 before we add the percpu counts. But doing it at the same * time is equivalent and saves us atomic operations: */ |
2b0d3d3e4 percpu_ref: reduc... |
188 |
atomic_long_add((long)count - PERCPU_COUNT_BIAS, &data->count); |
215e262f2 percpu: implement... |
189 |
|
3375efedd percpu_ref: Dump ... |
190 191 192 193 194 195 196 |
if (WARN_ONCE(atomic_long_read(&data->count) <= 0, "percpu ref (%ps) <= 0 (%ld) after switching to atomic", data->release, atomic_long_read(&data->count)) && atomic_inc_return(&underflows) < 4) { pr_err("%s(): percpu_ref underflow", __func__); mem_dump_obj(data); } |
687b0ad27 percpu-refcount: ... |
197 |
|
490c79a65 percpu_ref: decou... |
198 199 200 |
/* @ref is viewed as dead on all CPUs, send out switch confirmation */ percpu_ref_call_confirm_rcu(rcu); } |
dbece3a0f percpu-refcount: ... |
201 |
|
490c79a65 percpu_ref: decou... |
202 203 204 205 206 207 208 |
static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref) { } static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref, percpu_ref_func_t *confirm_switch) { |
b2302c7fd percpu_ref: reorg... |
209 |
if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) { |
18808354b percpu_ref: unify... |
210 |
if (confirm_switch) |
b2302c7fd percpu_ref: reorg... |
211 |
confirm_switch(ref); |
b2302c7fd percpu_ref: reorg... |
212 |
return; |
490c79a65 percpu_ref: decou... |
213 |
} |
215e262f2 percpu: implement... |
214 |
|
b2302c7fd percpu_ref: reorg... |
215 216 217 218 219 220 221 |
/* switching from percpu to atomic */ ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; /* * Non-NULL ->confirm_switch is used to indicate that switching is * in progress. Use noop one if unspecified. */ |
2b0d3d3e4 percpu_ref: reduc... |
222 223 |
ref->data->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch; |
b2302c7fd percpu_ref: reorg... |
224 225 |
percpu_ref_get(ref); /* put after confirmation */ |
2b0d3d3e4 percpu_ref: reduc... |
226 |
call_rcu(&ref->data->rcu, percpu_ref_switch_to_atomic_rcu); |
215e262f2 percpu: implement... |
227 |
} |
a22373701 percpu_ref: reloc... |
228 |
|
f47ad4578 percpu_ref: decou... |
229 |
static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) |
a22373701 percpu_ref: reloc... |
230 |
{ |
eecc16ba9 percpu_ref: repla... |
231 |
unsigned long __percpu *percpu_count = percpu_count_ptr(ref); |
a22373701 percpu_ref: reloc... |
232 |
int cpu; |
eecc16ba9 percpu_ref: repla... |
233 |
BUG_ON(!percpu_count); |
a22373701 percpu_ref: reloc... |
234 |
|
f47ad4578 percpu_ref: decou... |
235 236 |
if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) return; |
2b0d3d3e4 percpu_ref: reduc... |
237 |
if (WARN_ON_ONCE(!ref->data->allow_reinit)) |
7d9ab9b6a percpu_ref: relea... |
238 |
return; |
2b0d3d3e4 percpu_ref: reduc... |
239 |
atomic_long_add(PERCPU_COUNT_BIAS, &ref->data->count); |
a22373701 percpu_ref: reloc... |
240 241 |
/* |
b393e8b33 percpu: READ_ONCE... |
242 243 244 245 |
* Restore per-cpu operation. smp_store_release() is paired * with READ_ONCE() in __ref_is_percpu() and guarantees that the * zeroing is visible to all percpu accesses which can see the * following __PERCPU_REF_ATOMIC clearing. |
a22373701 percpu_ref: reloc... |
246 247 |
*/ for_each_possible_cpu(cpu) |
eecc16ba9 percpu_ref: repla... |
248 |
*per_cpu_ptr(percpu_count, cpu) = 0; |
a22373701 percpu_ref: reloc... |
249 |
|
eecc16ba9 percpu_ref: repla... |
250 |
smp_store_release(&ref->percpu_count_ptr, |
f47ad4578 percpu_ref: decou... |
251 252 |
ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC); } |
3f49bdd95 percpu_ref: restr... |
253 254 255 |
static void __percpu_ref_switch_mode(struct percpu_ref *ref, percpu_ref_func_t *confirm_switch) { |
2b0d3d3e4 percpu_ref: reduc... |
256 |
struct percpu_ref_data *data = ref->data; |
33e465ce7 percpu_ref: allow... |
257 |
lockdep_assert_held(&percpu_ref_switch_lock); |
3f49bdd95 percpu_ref: restr... |
258 259 260 261 |
/* * If the previous ATOMIC switching hasn't finished yet, wait for * its completion. If the caller ensures that ATOMIC switching * isn't in progress, this function can be called from any context. |
3f49bdd95 percpu_ref: restr... |
262 |
*/ |
2b0d3d3e4 percpu_ref: reduc... |
263 |
wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch, |
33e465ce7 percpu_ref: allow... |
264 |
percpu_ref_switch_lock); |
3f49bdd95 percpu_ref: restr... |
265 |
|
9e9da02a6 percpu_ref: Don't... |
266 |
if (data->force_atomic || percpu_ref_is_dying(ref)) |
3f49bdd95 percpu_ref: restr... |
267 268 269 270 |
__percpu_ref_switch_to_atomic(ref, confirm_switch); else __percpu_ref_switch_to_percpu(ref); } |
f47ad4578 percpu_ref: decou... |
271 |
/** |
b2302c7fd percpu_ref: reorg... |
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 |
* percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode * @ref: percpu_ref to switch to atomic mode * @confirm_switch: optional confirmation callback * * There's no reason to use this function for the usual reference counting. * Use percpu_ref_kill[_and_confirm](). * * Schedule switching of @ref to atomic mode. All its percpu counts will * be collected to the main atomic counter. On completion, when all CPUs * are guaraneed to be in atomic mode, @confirm_switch, which may not * block, is invoked. This function may be invoked concurrently with all * the get/put operations and can safely be mixed with kill and reinit * operations. Note that @ref will stay in atomic mode across kill/reinit * cycles until percpu_ref_switch_to_percpu() is called. * |
3f49bdd95 percpu_ref: restr... |
287 288 289 |
* This function may block if @ref is in the process of switching to atomic * mode. If the caller ensures that @ref is not in the process of * switching to atomic mode, this function can be called from any context. |
b2302c7fd percpu_ref: reorg... |
290 291 292 293 |
*/ void percpu_ref_switch_to_atomic(struct percpu_ref *ref, percpu_ref_func_t *confirm_switch) { |
33e465ce7 percpu_ref: allow... |
294 295 296 |
unsigned long flags; spin_lock_irqsave(&percpu_ref_switch_lock, flags); |
2b0d3d3e4 percpu_ref: reduc... |
297 |
ref->data->force_atomic = true; |
3f49bdd95 percpu_ref: restr... |
298 |
__percpu_ref_switch_mode(ref, confirm_switch); |
33e465ce7 percpu_ref: allow... |
299 300 |
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); |
b2302c7fd percpu_ref: reorg... |
301 |
} |
210f7cdcf percpu-refcount: ... |
302 303 304 305 306 307 308 309 310 311 312 313 314 |
EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic); /** * percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode * @ref: percpu_ref to switch to atomic mode * * Schedule switching the ref to atomic mode, and wait for the * switch to complete. Caller must ensure that no other thread * will switch back to percpu mode. */ void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref) { percpu_ref_switch_to_atomic(ref, NULL); |
2b0d3d3e4 percpu_ref: reduc... |
315 |
wait_event(percpu_ref_switch_waitq, !ref->data->confirm_switch); |
210f7cdcf percpu-refcount: ... |
316 317 |
} EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync); |
b2302c7fd percpu_ref: reorg... |
318 319 |
/** |
f47ad4578 percpu_ref: decou... |
320 321 322 323 324 325 326 327 |
* percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode * @ref: percpu_ref to switch to percpu mode * * There's no reason to use this function for the usual reference counting. * To re-use an expired ref, use percpu_ref_reinit(). * * Switch @ref to percpu mode. This function may be invoked concurrently * with all the get/put operations and can safely be mixed with kill and |
1cae13e75 percpu_ref: make ... |
328 329 330 331 |
* reinit operations. This function reverses the sticky atomic state set * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is * dying or dead, the actual switching takes place on the following * percpu_ref_reinit(). |
f47ad4578 percpu_ref: decou... |
332 |
* |
3f49bdd95 percpu_ref: restr... |
333 334 335 |
* This function may block if @ref is in the process of switching to atomic * mode. If the caller ensures that @ref is not in the process of * switching to atomic mode, this function can be called from any context. |
f47ad4578 percpu_ref: decou... |
336 337 338 |
*/ void percpu_ref_switch_to_percpu(struct percpu_ref *ref) { |
33e465ce7 percpu_ref: allow... |
339 340 341 |
unsigned long flags; spin_lock_irqsave(&percpu_ref_switch_lock, flags); |
2b0d3d3e4 percpu_ref: reduc... |
342 |
ref->data->force_atomic = false; |
3f49bdd95 percpu_ref: restr... |
343 |
__percpu_ref_switch_mode(ref, NULL); |
33e465ce7 percpu_ref: allow... |
344 345 |
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); |
a22373701 percpu_ref: reloc... |
346 |
} |
210f7cdcf percpu-refcount: ... |
347 |
EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu); |
490c79a65 percpu_ref: decou... |
348 349 350 351 352 353 354 355 356 357 358 359 360 |
/** * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation * @ref: percpu_ref to kill * @confirm_kill: optional confirmation callback * * Equivalent to percpu_ref_kill() but also schedules kill confirmation if * @confirm_kill is not NULL. @confirm_kill, which may not block, will be * called after @ref is seen as dead from all CPUs at which point all * further invocations of percpu_ref_tryget_live() will fail. See * percpu_ref_tryget_live() for details. * * This function normally doesn't block and can be called from any context |
f47ad4578 percpu_ref: decou... |
361 |
* but it may block if @confirm_kill is specified and @ref is in the |
a2f5630cb percpu_ref: remov... |
362 |
* process of switching to atomic mode by percpu_ref_switch_to_atomic(). |
b3a5d1119 percpu_ref: Updat... |
363 364 |
* * There are no implied RCU grace periods between kill and release. |
490c79a65 percpu_ref: decou... |
365 366 367 368 |
*/ void percpu_ref_kill_and_confirm(struct percpu_ref *ref, percpu_ref_func_t *confirm_kill) { |
33e465ce7 percpu_ref: allow... |
369 370 371 |
unsigned long flags; spin_lock_irqsave(&percpu_ref_switch_lock, flags); |
9e9da02a6 percpu_ref: Don't... |
372 |
WARN_ONCE(percpu_ref_is_dying(ref), |
2b0d3d3e4 percpu_ref: reduc... |
373 374 |
"%s called more than once on %ps!", __func__, ref->data->release); |
490c79a65 percpu_ref: decou... |
375 376 |
ref->percpu_count_ptr |= __PERCPU_REF_DEAD; |
3f49bdd95 percpu_ref: restr... |
377 |
__percpu_ref_switch_mode(ref, confirm_kill); |
490c79a65 percpu_ref: decou... |
378 |
percpu_ref_put(ref); |
33e465ce7 percpu_ref: allow... |
379 380 |
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); |
490c79a65 percpu_ref: decou... |
381 382 |
} EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); |
f47ad4578 percpu_ref: decou... |
383 384 |
/** |
2b0d3d3e4 percpu_ref: reduc... |
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 |
* percpu_ref_is_zero - test whether a percpu refcount reached zero * @ref: percpu_ref to test * * Returns %true if @ref reached zero. * * This function is safe to call as long as @ref is between init and exit. */ bool percpu_ref_is_zero(struct percpu_ref *ref) { unsigned long __percpu *percpu_count; unsigned long count, flags; if (__ref_is_percpu(ref, &percpu_count)) return false; /* protect us from being destroyed */ spin_lock_irqsave(&percpu_ref_switch_lock, flags); if (ref->data) count = atomic_long_read(&ref->data->count); else count = ref->percpu_count_ptr >> __PERCPU_REF_FLAG_BITS; spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); return count == 0; } EXPORT_SYMBOL_GPL(percpu_ref_is_zero); /** |
f47ad4578 percpu_ref: decou... |
413 414 415 416 |
* percpu_ref_reinit - re-initialize a percpu refcount * @ref: perpcu_ref to re-initialize * * Re-initialize @ref so that it's in the same state as when it finished |
1cae13e75 percpu_ref: make ... |
417 418 |
* percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been * initialized successfully and reached 0 but not exited. |
f47ad4578 percpu_ref: decou... |
419 420 421 422 423 424 |
* * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while * this function is in progress. */ void percpu_ref_reinit(struct percpu_ref *ref) { |
18c9a6bbe percpu-refcount: ... |
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 |
WARN_ON_ONCE(!percpu_ref_is_zero(ref)); percpu_ref_resurrect(ref); } EXPORT_SYMBOL_GPL(percpu_ref_reinit); /** * percpu_ref_resurrect - modify a percpu refcount from dead to live * @ref: perpcu_ref to resurrect * * Modify @ref so that it's in the same state as before percpu_ref_kill() was * called. @ref must be dead but must not yet have exited. * * If @ref->release() frees @ref then the caller is responsible for * guaranteeing that @ref->release() does not get called while this * function is in progress. * * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while * this function is in progress. */ void percpu_ref_resurrect(struct percpu_ref *ref) { unsigned long __percpu *percpu_count; |
33e465ce7 percpu_ref: allow... |
448 449 450 |
unsigned long flags; spin_lock_irqsave(&percpu_ref_switch_lock, flags); |
9e9da02a6 percpu_ref: Don't... |
451 |
WARN_ON_ONCE(!percpu_ref_is_dying(ref)); |
18c9a6bbe percpu-refcount: ... |
452 |
WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count)); |
f47ad4578 percpu_ref: decou... |
453 454 455 |
ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD; percpu_ref_get(ref); |
3f49bdd95 percpu_ref: restr... |
456 |
__percpu_ref_switch_mode(ref, NULL); |
33e465ce7 percpu_ref: allow... |
457 458 |
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); |
f47ad4578 percpu_ref: decou... |
459 |
} |
18c9a6bbe percpu-refcount: ... |
460 |
EXPORT_SYMBOL_GPL(percpu_ref_resurrect); |