Commit 4b20db3de8dab005b07c74161cb041db8c5ff3a7
Committed by
Dave Airlie
1 parent
d714455619
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
kref: Implement kref_get_unless_zero v3
This function is intended to simplify locking around refcounting for objects that can be looked up from a lookup structure, and which are removed from that lookup structure in the object destructor. Operations on such objects require at least a read lock around lookup + kref_get, and a write lock around kref_put + remove from lookup structure. Furthermore, RCU implementations become extremely tricky. With a lookup followed by a kref_get_unless_zero *with return value check* locking in the kref_put path can be deferred to the actual removal from the lookup structure and RCU lookups become trivial. v2: Formatting fixes. v3: Invert the return value. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Showing 1 changed file with 21 additions and 0 deletions Inline Diff
include/linux/kref.h
1 | /* | 1 | /* |
2 | * kref.h - library routines for handling generic reference counted objects | 2 | * kref.h - library routines for handling generic reference counted objects |
3 | * | 3 | * |
4 | * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> | 4 | * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> |
5 | * Copyright (C) 2004 IBM Corp. | 5 | * Copyright (C) 2004 IBM Corp. |
6 | * | 6 | * |
7 | * based on kobject.h which was: | 7 | * based on kobject.h which was: |
8 | * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org> | 8 | * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org> |
9 | * Copyright (C) 2002-2003 Open Source Development Labs | 9 | * Copyright (C) 2002-2003 Open Source Development Labs |
10 | * | 10 | * |
11 | * This file is released under the GPLv2. | 11 | * This file is released under the GPLv2. |
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #ifndef _KREF_H_ | 15 | #ifndef _KREF_H_ |
16 | #define _KREF_H_ | 16 | #define _KREF_H_ |
17 | 17 | ||
18 | #include <linux/bug.h> | 18 | #include <linux/bug.h> |
19 | #include <linux/atomic.h> | 19 | #include <linux/atomic.h> |
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/mutex.h> | 21 | #include <linux/mutex.h> |
22 | 22 | ||
23 | struct kref { | 23 | struct kref { |
24 | atomic_t refcount; | 24 | atomic_t refcount; |
25 | }; | 25 | }; |
26 | 26 | ||
27 | /** | 27 | /** |
28 | * kref_init - initialize object. | 28 | * kref_init - initialize object. |
29 | * @kref: object in question. | 29 | * @kref: object in question. |
30 | */ | 30 | */ |
31 | static inline void kref_init(struct kref *kref) | 31 | static inline void kref_init(struct kref *kref) |
32 | { | 32 | { |
33 | atomic_set(&kref->refcount, 1); | 33 | atomic_set(&kref->refcount, 1); |
34 | } | 34 | } |
35 | 35 | ||
36 | /** | 36 | /** |
37 | * kref_get - increment refcount for object. | 37 | * kref_get - increment refcount for object. |
38 | * @kref: object. | 38 | * @kref: object. |
39 | */ | 39 | */ |
40 | static inline void kref_get(struct kref *kref) | 40 | static inline void kref_get(struct kref *kref) |
41 | { | 41 | { |
42 | WARN_ON(!atomic_read(&kref->refcount)); | 42 | WARN_ON(!atomic_read(&kref->refcount)); |
43 | atomic_inc(&kref->refcount); | 43 | atomic_inc(&kref->refcount); |
44 | } | 44 | } |
45 | 45 | ||
46 | /** | 46 | /** |
47 | * kref_sub - subtract a number of refcounts for object. | 47 | * kref_sub - subtract a number of refcounts for object. |
48 | * @kref: object. | 48 | * @kref: object. |
49 | * @count: Number of recounts to subtract. | 49 | * @count: Number of recounts to subtract. |
50 | * @release: pointer to the function that will clean up the object when the | 50 | * @release: pointer to the function that will clean up the object when the |
51 | * last reference to the object is released. | 51 | * last reference to the object is released. |
52 | * This pointer is required, and it is not acceptable to pass kfree | 52 | * This pointer is required, and it is not acceptable to pass kfree |
53 | * in as this function. If the caller does pass kfree to this | 53 | * in as this function. If the caller does pass kfree to this |
54 | * function, you will be publicly mocked mercilessly by the kref | 54 | * function, you will be publicly mocked mercilessly by the kref |
55 | * maintainer, and anyone else who happens to notice it. You have | 55 | * maintainer, and anyone else who happens to notice it. You have |
56 | * been warned. | 56 | * been warned. |
57 | * | 57 | * |
58 | * Subtract @count from the refcount, and if 0, call release(). | 58 | * Subtract @count from the refcount, and if 0, call release(). |
59 | * Return 1 if the object was removed, otherwise return 0. Beware, if this | 59 | * Return 1 if the object was removed, otherwise return 0. Beware, if this |
60 | * function returns 0, you still can not count on the kref from remaining in | 60 | * function returns 0, you still can not count on the kref from remaining in |
61 | * memory. Only use the return value if you want to see if the kref is now | 61 | * memory. Only use the return value if you want to see if the kref is now |
62 | * gone, not present. | 62 | * gone, not present. |
63 | */ | 63 | */ |
64 | static inline int kref_sub(struct kref *kref, unsigned int count, | 64 | static inline int kref_sub(struct kref *kref, unsigned int count, |
65 | void (*release)(struct kref *kref)) | 65 | void (*release)(struct kref *kref)) |
66 | { | 66 | { |
67 | WARN_ON(release == NULL); | 67 | WARN_ON(release == NULL); |
68 | 68 | ||
69 | if (atomic_sub_and_test((int) count, &kref->refcount)) { | 69 | if (atomic_sub_and_test((int) count, &kref->refcount)) { |
70 | release(kref); | 70 | release(kref); |
71 | return 1; | 71 | return 1; |
72 | } | 72 | } |
73 | return 0; | 73 | return 0; |
74 | } | 74 | } |
75 | 75 | ||
76 | /** | 76 | /** |
77 | * kref_put - decrement refcount for object. | 77 | * kref_put - decrement refcount for object. |
78 | * @kref: object. | 78 | * @kref: object. |
79 | * @release: pointer to the function that will clean up the object when the | 79 | * @release: pointer to the function that will clean up the object when the |
80 | * last reference to the object is released. | 80 | * last reference to the object is released. |
81 | * This pointer is required, and it is not acceptable to pass kfree | 81 | * This pointer is required, and it is not acceptable to pass kfree |
82 | * in as this function. If the caller does pass kfree to this | 82 | * in as this function. If the caller does pass kfree to this |
83 | * function, you will be publicly mocked mercilessly by the kref | 83 | * function, you will be publicly mocked mercilessly by the kref |
84 | * maintainer, and anyone else who happens to notice it. You have | 84 | * maintainer, and anyone else who happens to notice it. You have |
85 | * been warned. | 85 | * been warned. |
86 | * | 86 | * |
87 | * Decrement the refcount, and if 0, call release(). | 87 | * Decrement the refcount, and if 0, call release(). |
88 | * Return 1 if the object was removed, otherwise return 0. Beware, if this | 88 | * Return 1 if the object was removed, otherwise return 0. Beware, if this |
89 | * function returns 0, you still can not count on the kref from remaining in | 89 | * function returns 0, you still can not count on the kref from remaining in |
90 | * memory. Only use the return value if you want to see if the kref is now | 90 | * memory. Only use the return value if you want to see if the kref is now |
91 | * gone, not present. | 91 | * gone, not present. |
92 | */ | 92 | */ |
93 | static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)) | 93 | static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)) |
94 | { | 94 | { |
95 | return kref_sub(kref, 1, release); | 95 | return kref_sub(kref, 1, release); |
96 | } | 96 | } |
97 | 97 | ||
98 | static inline int kref_put_mutex(struct kref *kref, | 98 | static inline int kref_put_mutex(struct kref *kref, |
99 | void (*release)(struct kref *kref), | 99 | void (*release)(struct kref *kref), |
100 | struct mutex *lock) | 100 | struct mutex *lock) |
101 | { | 101 | { |
102 | WARN_ON(release == NULL); | 102 | WARN_ON(release == NULL); |
103 | if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) { | 103 | if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) { |
104 | mutex_lock(lock); | 104 | mutex_lock(lock); |
105 | if (unlikely(!atomic_dec_and_test(&kref->refcount))) { | 105 | if (unlikely(!atomic_dec_and_test(&kref->refcount))) { |
106 | mutex_unlock(lock); | 106 | mutex_unlock(lock); |
107 | return 0; | 107 | return 0; |
108 | } | 108 | } |
109 | release(kref); | 109 | release(kref); |
110 | return 1; | 110 | return 1; |
111 | } | 111 | } |
112 | return 0; | 112 | return 0; |
113 | } | 113 | } |
114 | |||
115 | /** | ||
116 | * kref_get_unless_zero - Increment refcount for object unless it is zero. | ||
117 | * @kref: object. | ||
118 | * | ||
119 | * Return non-zero if the increment succeeded. Otherwise return 0. | ||
120 | * | ||
121 | * This function is intended to simplify locking around refcounting for | ||
122 | * objects that can be looked up from a lookup structure, and which are | ||
123 | * removed from that lookup structure in the object destructor. | ||
124 | * Operations on such objects require at least a read lock around | ||
125 | * lookup + kref_get, and a write lock around kref_put + remove from lookup | ||
126 | * structure. Furthermore, RCU implementations become extremely tricky. | ||
127 | * With a lookup followed by a kref_get_unless_zero *with return value check* | ||
128 | * locking in the kref_put path can be deferred to the actual removal from | ||
129 | * the lookup structure and RCU lookups become trivial. | ||
130 | */ | ||
131 | static inline int __must_check kref_get_unless_zero(struct kref *kref) | ||
132 | { | ||
133 | return atomic_add_unless(&kref->refcount, 1, 0); | ||
134 | } | ||
114 | #endif /* _KREF_H_ */ | 135 | #endif /* _KREF_H_ */ |
115 | 136 |