Blame view
include/linux/idr.h
9.6 KB
68cf618c6
|
1 |
/* SPDX-License-Identifier: GPL-2.0-only */ |
1da177e4c
|
2 3 4 5 6 |
/* * include/linux/idr.h * * 2002-10-18 written by Jim Houston jim.houston@ccur.com * Copyright (C) 2002 by Concurrent Computer Corporation |
1da177e4c
|
7 8 9 10 |
* * Small id to pointer translation service avoiding fixed sized * tables. */ |
f668ab1ac
|
11 12 13 |
#ifndef __IDR_H__ #define __IDR_H__ |
0a835c4f0
|
14 15 |
#include <linux/radix-tree.h> #include <linux/gfp.h> |
7ad3d4d85
|
16 |
#include <linux/percpu.h> |
0a835c4f0
|
17 18 19 |
struct idr { struct radix_tree_root idr_rt; |
6ce711f27
|
20 |
unsigned int idr_base; |
0a835c4f0
|
21 22 |
unsigned int idr_next; }; |
1da177e4c
|
23 |
|
050a6b47d
|
24 |
/* |
0a835c4f0
|
25 26 |
* The IDR API does not expose the tagging functionality of the radix tree * to users. Use tag 0 to track whether a node has free space below it. |
050a6b47d
|
27 |
*/ |
0a835c4f0
|
28 |
#define IDR_FREE 0 |
1da177e4c
|
29 |
|
0a835c4f0
|
30 |
/* Set the IDR flag and the IDR_FREE tag */ |
fa290cda1
|
31 32 |
#define IDR_RT_MARKER (ROOT_IS_IDR | (__force gfp_t) \ (1 << (ROOT_TAG_SHIFT + IDR_FREE))) |
1da177e4c
|
33 |
|
f6bb2a2c0
|
34 35 |
#define IDR_INIT_BASE(name, base) { \ .idr_rt = RADIX_TREE_INIT(name, IDR_RT_MARKER), \ |
6ce711f27
|
36 37 |
.idr_base = (base), \ .idr_next = 0, \ |
1da177e4c
|
38 |
} |
1da177e4c
|
39 |
|
f9c46d6ea
|
40 |
/** |
6ce711f27
|
41 |
* IDR_INIT() - Initialise an IDR. |
f6bb2a2c0
|
42 |
* @name: Name of IDR. |
6ce711f27
|
43 44 45 |
* * A freshly-initialised IDR contains no IDs. */ |
f6bb2a2c0
|
46 |
#define IDR_INIT(name) IDR_INIT_BASE(name, 0) |
6ce711f27
|
47 48 |
/** |
f6bb2a2c0
|
49 50 |
* DEFINE_IDR() - Define a statically-allocated IDR. * @name: Name of IDR. |
ac665d942
|
51 52 53 54 |
* * An IDR defined using this macro is ready for use with no additional * initialisation required. It contains no IDs. */ |
f6bb2a2c0
|
55 |
#define DEFINE_IDR(name) struct idr name = IDR_INIT(name) |
ac665d942
|
56 57 |
/** |
444306129
|
58 59 60 61 62 63 64 |
* idr_get_cursor - Return the current position of the cyclic allocator * @idr: idr handle * * The value returned is the value that will be next returned from * idr_alloc_cyclic() if it is free (otherwise the search will start from * this position). */ |
0a835c4f0
|
65 |
static inline unsigned int idr_get_cursor(const struct idr *idr) |
444306129
|
66 |
{ |
0a835c4f0
|
67 |
return READ_ONCE(idr->idr_next); |
444306129
|
68 69 70 71 72 73 74 75 76 77 78 79 |
} /** * idr_set_cursor - Set the current position of the cyclic allocator * @idr: idr handle * @val: new position * * The next call to idr_alloc_cyclic() will return @val if it is free * (otherwise the search will start from this position). */ static inline void idr_set_cursor(struct idr *idr, unsigned int val) { |
0a835c4f0
|
80 |
WRITE_ONCE(idr->idr_next, val); |
444306129
|
81 82 83 |
} /** |
56083ab17
|
84 |
* DOC: idr sync |
f9c46d6ea
|
85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
* idr synchronization (stolen from radix-tree.h) * * idr_find() is able to be called locklessly, using RCU. The caller must * ensure calls to this function are made within rcu_read_lock() regions. * Other readers (lock-free or otherwise) and modifications may be running * concurrently. * * It is still required that the caller manage the synchronization and * lifetimes of the items. So if RCU lock-free lookups are used, typically * this would mean that the items have their own locks, or are amenable to * lock-free access; and that the items are freed by RCU (or only freed after * having been deleted from the idr tree *and* a synchronize_rcu() grace * period). */ |
3c60e868c
|
99 100 101 102 103 104 105 106 107 108 |
#define idr_lock(idr) xa_lock(&(idr)->idr_rt) #define idr_unlock(idr) xa_unlock(&(idr)->idr_rt) #define idr_lock_bh(idr) xa_lock_bh(&(idr)->idr_rt) #define idr_unlock_bh(idr) xa_unlock_bh(&(idr)->idr_rt) #define idr_lock_irq(idr) xa_lock_irq(&(idr)->idr_rt) #define idr_unlock_irq(idr) xa_unlock_irq(&(idr)->idr_rt) #define idr_lock_irqsave(idr, flags) \ xa_lock_irqsave(&(idr)->idr_rt, flags) #define idr_unlock_irqrestore(idr, flags) \ xa_unlock_irqrestore(&(idr)->idr_rt, flags) |
d5c7409f7
|
109 |
void idr_preload(gfp_t gfp_mask); |
388f79fda
|
110 |
|
6ce711f27
|
111 112 |
int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t); int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id, |
e096f6a76
|
113 |
unsigned long max, gfp_t); |
6ce711f27
|
114 115 116 |
int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t); void *idr_remove(struct idr *, unsigned long id); void *idr_find(const struct idr *, unsigned long id); |
0a835c4f0
|
117 |
int idr_for_each(const struct idr *, |
96d7fa421
|
118 |
int (*fn)(int id, void *p, void *data), void *data); |
0a835c4f0
|
119 |
void *idr_get_next(struct idr *, int *nextid); |
7a4575778
|
120 |
void *idr_get_next_ul(struct idr *, unsigned long *nextid); |
234a4624e
|
121 |
void *idr_replace(struct idr *, void *, unsigned long id); |
0a835c4f0
|
122 |
void idr_destroy(struct idr *); |
6ce711f27
|
123 124 125 126 127 128 129 130 131 |
/** * idr_init_base() - Initialise an IDR. * @idr: IDR handle. * @base: The base value for the IDR. * * This variation of idr_init() creates an IDR which will allocate IDs * starting at %base. */ static inline void idr_init_base(struct idr *idr, int base) |
0a835c4f0
|
132 |
{ |
6ce711f27
|
133 134 135 |
INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER); idr->idr_base = base; idr->idr_next = 0; |
0a835c4f0
|
136 |
} |
6ce711f27
|
137 138 139 140 141 142 143 |
/** * idr_init() - Initialise an IDR. * @idr: IDR handle. * * Initialise a dynamically allocated IDR. To initialise a * statically allocated IDR, use DEFINE_IDR(). */ |
0a835c4f0
|
144 145 |
static inline void idr_init(struct idr *idr) { |
6ce711f27
|
146 |
idr_init_base(idr, 0); |
0a835c4f0
|
147 |
} |
ac665d942
|
148 149 150 151 152 153 |
/** * idr_is_empty() - Are there any IDs allocated? * @idr: IDR handle. * * Return: %true if any IDs have been allocated from this IDR. */ |
0a835c4f0
|
154 155 156 157 158 |
static inline bool idr_is_empty(const struct idr *idr) { return radix_tree_empty(&idr->idr_rt) && radix_tree_tagged(&idr->idr_rt, IDR_FREE); } |
f668ab1ac
|
159 |
|
49038ef4f
|
160 |
/** |
d5c7409f7
|
161 162 163 164 165 166 167 |
* idr_preload_end - end preload section started with idr_preload() * * Each idr_preload() should be matched with an invocation of this * function. See idr_preload() for details. */ static inline void idr_preload_end(void) { |
cfa6705d8
|
168 |
local_unlock(&radix_tree_preloads.lock); |
d5c7409f7
|
169 170 171 |
} /** |
7a4575778
|
172 173 174 175 |
* idr_for_each_entry() - Iterate over an IDR's elements of a given type. * @idr: IDR handle. * @entry: The type * to use as cursor * @id: Entry ID. |
b949be585
|
176 177 |
* * @entry and @id do not need to be initialized before the loop, and |
7a4575778
|
178 |
* after normal termination @entry is left with the value NULL. This |
b949be585
|
179 |
* is convenient for a "not found" value. |
49038ef4f
|
180 |
*/ |
0a835c4f0
|
181 |
#define idr_for_each_entry(idr, entry, id) \ |
f6341c5af
|
182 |
for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; id += 1U) |
49038ef4f
|
183 |
|
a55bbd375
|
184 |
/** |
7a4575778
|
185 186 187 |
* idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type. * @idr: IDR handle. * @entry: The type * to use as cursor. |
e33d2b74d
|
188 |
* @tmp: A temporary placeholder for ID. |
7a4575778
|
189 190 191 192 193 194 |
* @id: Entry ID. * * @entry and @id do not need to be initialized before the loop, and * after normal termination @entry is left with the value NULL. This * is convenient for a "not found" value. */ |
e33d2b74d
|
195 196 197 198 |
#define idr_for_each_entry_ul(idr, entry, tmp, id) \ for (tmp = 0, id = 0; \ tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \ tmp = id, ++id) |
7a4575778
|
199 200 201 202 203 204 |
/** * idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type * @idr: IDR handle. * @entry: The type * to use as a cursor. * @id: Entry ID. |
a55bbd375
|
205 |
* |
7a4575778
|
206 |
* Continue to iterate over entries, continuing after the current position. |
a55bbd375
|
207 |
*/ |
0a835c4f0
|
208 209 |
#define idr_for_each_entry_continue(idr, entry, id) \ for ((entry) = idr_get_next((idr), &(id)); \ |
a55bbd375
|
210 |
entry; \ |
0a835c4f0
|
211 |
++id, (entry) = idr_get_next((idr), &(id))) |
a55bbd375
|
212 |
|
d39d71496
|
213 214 215 216 217 218 219 220 221 222 223 224 225 |
/** * idr_for_each_entry_continue_ul() - Continue iteration over an IDR's elements of a given type * @idr: IDR handle. * @entry: The type * to use as a cursor. * @tmp: A temporary placeholder for ID. * @id: Entry ID. * * Continue to iterate over entries, continuing after the current position. */ #define idr_for_each_entry_continue_ul(idr, entry, tmp, id) \ for (tmp = id; \ tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \ tmp = id, ++id) |
c8615d371
|
226 |
/* |
f32f004cd
|
227 |
* IDA - ID Allocator, use when translation from id to pointer isn't necessary. |
72dba584b
|
228 229 |
*/ #define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ |
0a835c4f0
|
230 |
#define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long)) |
ed9f524ac
|
231 |
#define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) |
72dba584b
|
232 233 |
struct ida_bitmap { |
72dba584b
|
234 235 236 237 |
unsigned long bitmap[IDA_BITMAP_LONGS]; }; struct ida { |
f32f004cd
|
238 |
struct xarray xa; |
72dba584b
|
239 |
}; |
f32f004cd
|
240 |
#define IDA_INIT_FLAGS (XA_FLAGS_LOCK_IRQ | XA_FLAGS_ALLOC) |
f6bb2a2c0
|
241 |
#define IDA_INIT(name) { \ |
f32f004cd
|
242 |
.xa = XARRAY_INIT(name, IDA_INIT_FLAGS) \ |
0a835c4f0
|
243 |
} |
f6bb2a2c0
|
244 |
#define DEFINE_IDA(name) struct ida name = IDA_INIT(name) |
72dba584b
|
245 |
|
5ade60dda
|
246 247 |
int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t); void ida_free(struct ida *, unsigned int id); |
72dba584b
|
248 |
void ida_destroy(struct ida *ida); |
72dba584b
|
249 |
|
5ade60dda
|
250 251 252 253 254 255 256 |
/** * ida_alloc() - Allocate an unused ID. * @ida: IDA handle. * @gfp: Memory allocation flags. * * Allocate an ID between 0 and %INT_MAX, inclusive. * |
3b6742618
|
257 258 |
* Context: Any context. It is safe to call this function without * locking in your code. |
5ade60dda
|
259 260 261 262 263 264 265 |
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated, * or %-ENOSPC if there are no free IDs. */ static inline int ida_alloc(struct ida *ida, gfp_t gfp) { return ida_alloc_range(ida, 0, ~0, gfp); } |
88eca0207
|
266 |
|
5ade60dda
|
267 268 269 270 271 272 273 274 |
/** * ida_alloc_min() - Allocate an unused ID. * @ida: IDA handle. * @min: Lowest ID to allocate. * @gfp: Memory allocation flags. * * Allocate an ID between @min and %INT_MAX, inclusive. * |
3b6742618
|
275 276 |
* Context: Any context. It is safe to call this function without * locking in your code. |
5ade60dda
|
277 278 279 280 |
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated, * or %-ENOSPC if there are no free IDs. */ static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp) |
0a835c4f0
|
281 |
{ |
5ade60dda
|
282 |
return ida_alloc_range(ida, min, ~0, gfp); |
0a835c4f0
|
283 |
} |
9749f30f1
|
284 |
/** |
5ade60dda
|
285 286 287 288 289 290 |
* ida_alloc_max() - Allocate an unused ID. * @ida: IDA handle. * @max: Highest ID to allocate. * @gfp: Memory allocation flags. * * Allocate an ID between 0 and @max, inclusive. |
49038ef4f
|
291 |
* |
3b6742618
|
292 293 |
* Context: Any context. It is safe to call this function without * locking in your code. |
5ade60dda
|
294 295 |
* Return: The allocated ID, or %-ENOMEM if memory could not be allocated, * or %-ENOSPC if there are no free IDs. |
9749f30f1
|
296 |
*/ |
5ade60dda
|
297 |
static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp) |
49038ef4f
|
298 |
{ |
5ade60dda
|
299 |
return ida_alloc_range(ida, 0, max, gfp); |
49038ef4f
|
300 |
} |
0a835c4f0
|
301 302 |
static inline void ida_init(struct ida *ida) { |
f32f004cd
|
303 |
xa_init_flags(&ida->xa, IDA_INIT_FLAGS); |
0a835c4f0
|
304 |
} |
3264ceec8
|
305 306 307 308 |
/* * ida_simple_get() and ida_simple_remove() are deprecated. Use * ida_alloc() and ida_free() instead respectively. */ |
5ade60dda
|
309 310 311 |
#define ida_simple_get(ida, start, end, gfp) \ ida_alloc_range(ida, start, (end) - 1, gfp) #define ida_simple_remove(ida, id) ida_free(ida, id) |
0a835c4f0
|
312 |
static inline bool ida_is_empty(const struct ida *ida) |
99c494077
|
313 |
{ |
f32f004cd
|
314 |
return xa_empty(&ida->xa); |
99c494077
|
315 |
} |
f668ab1ac
|
316 |
#endif /* __IDR_H__ */ |