Blame view
lib/genalloc.c
11.1 KB
f14f75b81
|
1 |
/* |
7f184275a
|
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 |
* Basic general purpose allocator for managing special purpose * memory, for example, memory that is not managed by the regular * kmalloc/kfree interface. Uses for this includes on-device special * memory, uncached memory etc. * * It is safe to use the allocator in NMI handlers and other special * unblockable contexts that could otherwise deadlock on locks. This * is implemented by using atomic operations and retries on any * conflicts. The disadvantage is that there may be livelocks in * extreme cases. For better scalability, one allocator can be used * for each CPU. * * The lockless operation only works if there is enough memory * available. If new memory is added to the pool a lock has to be * still taken. So any user relying on locklessness has to ensure * that sufficient memory is preallocated. * * The basic atomic operation of this allocator is cmpxchg on long. * On architectures that don't have NMI-safe cmpxchg implementation, * the allocator can NOT be used in NMI handler. So code uses the * allocator in NMI handler should depend on * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. |
f14f75b81
|
24 |
* |
f14f75b81
|
25 26 27 28 29 |
* Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ |
5a0e3ad6a
|
30 |
#include <linux/slab.h> |
f14f75b81
|
31 |
#include <linux/module.h> |
243797f59
|
32 |
#include <linux/bitmap.h> |
7f184275a
|
33 34 |
#include <linux/rculist.h> #include <linux/interrupt.h> |
f14f75b81
|
35 |
#include <linux/genalloc.h> |
7f184275a
|
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 |
static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set) { unsigned long val, nval; nval = *addr; do { val = nval; if (val & mask_to_set) return -EBUSY; cpu_relax(); } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val); return 0; } static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear) { unsigned long val, nval; nval = *addr; do { val = nval; if ((val & mask_to_clear) != mask_to_clear) return -EBUSY; cpu_relax(); } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val); return 0; } /* * bitmap_set_ll - set the specified number of bits at the specified position * @map: pointer to a bitmap * @start: a bit position in @map * @nr: number of bits to set * * Set @nr bits start from @start in @map lock-lessly. Several users * can set/clear the same bitmap simultaneously without lock. If two * users set the same bit, one user will return remain bits, otherwise * return 0. */ static int bitmap_set_ll(unsigned long *map, int start, int nr) { unsigned long *p = map + BIT_WORD(start); const int size = start + nr; int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); while (nr - bits_to_set >= 0) { if (set_bits_ll(p, mask_to_set)) return nr; nr -= bits_to_set; bits_to_set = BITS_PER_LONG; mask_to_set = ~0UL; p++; } if (nr) { mask_to_set &= BITMAP_LAST_WORD_MASK(size); if (set_bits_ll(p, mask_to_set)) return nr; } return 0; } /* * bitmap_clear_ll - clear the specified number of bits at the specified position * @map: pointer to a bitmap * @start: a bit position in @map * @nr: number of bits to set * * Clear @nr bits start from @start in @map lock-lessly. Several users * can set/clear the same bitmap simultaneously without lock. If two * users clear the same bit, one user will return remain bits, * otherwise return 0. */ static int bitmap_clear_ll(unsigned long *map, int start, int nr) { unsigned long *p = map + BIT_WORD(start); const int size = start + nr; int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); while (nr - bits_to_clear >= 0) { if (clear_bits_ll(p, mask_to_clear)) return nr; nr -= bits_to_clear; bits_to_clear = BITS_PER_LONG; mask_to_clear = ~0UL; p++; } if (nr) { mask_to_clear &= BITMAP_LAST_WORD_MASK(size); if (clear_bits_ll(p, mask_to_clear)) return nr; } return 0; } |
f14f75b81
|
135 |
|
a58cbd7c2
|
136 137 |
/** * gen_pool_create - create a new special memory pool |
929f97276
|
138 139 |
* @min_alloc_order: log base 2 of number of bytes each bitmap bit represents * @nid: node id of the node the pool structure should be allocated on, or -1 |
a58cbd7c2
|
140 141 142 |
* * Create a new special memory pool that can be used to manage special purpose * memory not managed by the regular kmalloc/kfree interface. |
929f97276
|
143 144 |
*/ struct gen_pool *gen_pool_create(int min_alloc_order, int nid) |
f14f75b81
|
145 |
{ |
929f97276
|
146 |
struct gen_pool *pool; |
f14f75b81
|
147 |
|
929f97276
|
148 149 |
pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); if (pool != NULL) { |
7f184275a
|
150 |
spin_lock_init(&pool->lock); |
929f97276
|
151 152 153 154 |
INIT_LIST_HEAD(&pool->chunks); pool->min_alloc_order = min_alloc_order; } return pool; |
f14f75b81
|
155 156 |
} EXPORT_SYMBOL(gen_pool_create); |
a58cbd7c2
|
157 |
/** |
3c8f370de
|
158 |
* gen_pool_add_virt - add a new chunk of special memory to the pool |
929f97276
|
159 |
* @pool: pool to add new memory chunk to |
3c8f370de
|
160 161 |
* @virt: virtual starting address of memory chunk to add to pool * @phys: physical starting address of memory chunk to add to pool |
929f97276
|
162 163 164 |
* @size: size in bytes of the memory chunk to add to pool * @nid: node id of the node the chunk structure and bitmap should be * allocated on, or -1 |
a58cbd7c2
|
165 166 |
* * Add a new chunk of special memory to the specified pool. |
3c8f370de
|
167 168 |
* * Returns 0 on success or a -ve errno on failure. |
f14f75b81
|
169 |
*/ |
3c8f370de
|
170 171 |
int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, size_t size, int nid) |
f14f75b81
|
172 |
{ |
929f97276
|
173 174 175 176 |
struct gen_pool_chunk *chunk; int nbits = size >> pool->min_alloc_order; int nbytes = sizeof(struct gen_pool_chunk) + (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE; |
f14f75b81
|
177 |
|
94f6030ca
|
178 |
chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid); |
929f97276
|
179 |
if (unlikely(chunk == NULL)) |
3c8f370de
|
180 |
return -ENOMEM; |
f14f75b81
|
181 |
|
3c8f370de
|
182 183 184 |
chunk->phys_addr = phys; chunk->start_addr = virt; chunk->end_addr = virt + size; |
7f184275a
|
185 |
atomic_set(&chunk->avail, size); |
f14f75b81
|
186 |
|
7f184275a
|
187 188 189 |
spin_lock(&pool->lock); list_add_rcu(&chunk->next_chunk, &pool->chunks); spin_unlock(&pool->lock); |
929f97276
|
190 191 |
return 0; |
f14f75b81
|
192 |
} |
3c8f370de
|
193 194 195 196 197 198 199 200 201 202 203 |
EXPORT_SYMBOL(gen_pool_add_virt); /** * gen_pool_virt_to_phys - return the physical address of memory * @pool: pool to allocate from * @addr: starting address of memory * * Returns the physical address on success, or -1 on error. */ phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) { |
3c8f370de
|
204 |
struct gen_pool_chunk *chunk; |
7f184275a
|
205 |
phys_addr_t paddr = -1; |
3c8f370de
|
206 |
|
7f184275a
|
207 208 209 210 211 212 |
rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { if (addr >= chunk->start_addr && addr < chunk->end_addr) { paddr = chunk->phys_addr + (addr - chunk->start_addr); break; } |
3c8f370de
|
213 |
} |
7f184275a
|
214 |
rcu_read_unlock(); |
3c8f370de
|
215 |
|
7f184275a
|
216 |
return paddr; |
3c8f370de
|
217 218 |
} EXPORT_SYMBOL(gen_pool_virt_to_phys); |
f14f75b81
|
219 |
|
a58cbd7c2
|
220 221 |
/** * gen_pool_destroy - destroy a special memory pool |
322acc96d
|
222 |
* @pool: pool to destroy |
a58cbd7c2
|
223 224 225 |
* * Destroy the specified special memory pool. Verifies that there are no * outstanding allocations. |
322acc96d
|
226 227 228 229 230 231 232 |
*/ void gen_pool_destroy(struct gen_pool *pool) { struct list_head *_chunk, *_next_chunk; struct gen_pool_chunk *chunk; int order = pool->min_alloc_order; int bit, end_bit; |
322acc96d
|
233 234 235 236 237 238 239 240 241 242 243 244 245 246 |
list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); list_del(&chunk->next_chunk); end_bit = (chunk->end_addr - chunk->start_addr) >> order; bit = find_next_bit(chunk->bits, end_bit, 0); BUG_ON(bit < end_bit); kfree(chunk); } kfree(pool); return; } EXPORT_SYMBOL(gen_pool_destroy); |
a58cbd7c2
|
247 248 |
/** * gen_pool_alloc - allocate special memory from the pool |
929f97276
|
249 250 |
* @pool: pool to allocate from * @size: number of bytes to allocate from the pool |
a58cbd7c2
|
251 252 |
* * Allocate the requested number of bytes from the specified pool. |
7f184275a
|
253 254 |
* Uses a first-fit algorithm. Can not be used in NMI handler on * architectures without NMI-safe cmpxchg implementation. |
f14f75b81
|
255 |
*/ |
929f97276
|
256 |
unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) |
f14f75b81
|
257 |
{ |
929f97276
|
258 |
struct gen_pool_chunk *chunk; |
7f184275a
|
259 |
unsigned long addr = 0; |
929f97276
|
260 |
int order = pool->min_alloc_order; |
7f184275a
|
261 262 263 264 265 |
int nbits, start_bit = 0, end_bit, remain; #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG BUG_ON(in_nmi()); #endif |
f14f75b81
|
266 |
|
929f97276
|
267 268 |
if (size == 0) return 0; |
f14f75b81
|
269 |
|
929f97276
|
270 |
nbits = (size + (1UL << order) - 1) >> order; |
7f184275a
|
271 272 273 274 |
rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { if (size > atomic_read(&chunk->avail)) continue; |
929f97276
|
275 276 |
end_bit = (chunk->end_addr - chunk->start_addr) >> order; |
7f184275a
|
277 278 279 280 |
retry: start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, start_bit, nbits, 0); if (start_bit >= end_bit) |
243797f59
|
281 |
continue; |
7f184275a
|
282 283 284 285 286 287 |
remain = bitmap_set_ll(chunk->bits, start_bit, nbits); if (remain) { remain = bitmap_clear_ll(chunk->bits, start_bit, nbits - remain); BUG_ON(remain); goto retry; |
f14f75b81
|
288 |
} |
243797f59
|
289 290 |
addr = chunk->start_addr + ((unsigned long)start_bit << order); |
7f184275a
|
291 292 293 |
size = nbits << order; atomic_sub(size, &chunk->avail); break; |
929f97276
|
294 |
} |
7f184275a
|
295 296 |
rcu_read_unlock(); return addr; |
929f97276
|
297 298 |
} EXPORT_SYMBOL(gen_pool_alloc); |
f14f75b81
|
299 |
|
a58cbd7c2
|
300 301 |
/** * gen_pool_free - free allocated special memory back to the pool |
929f97276
|
302 303 304 |
* @pool: pool to free to * @addr: starting address of memory to free back to pool * @size: size in bytes of memory to free |
a58cbd7c2
|
305 |
* |
7f184275a
|
306 307 308 |
* Free previously allocated special memory back to the specified * pool. Can not be used in NMI handler on architectures without * NMI-safe cmpxchg implementation. |
929f97276
|
309 310 311 |
*/ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) { |
929f97276
|
312 |
struct gen_pool_chunk *chunk; |
929f97276
|
313 |
int order = pool->min_alloc_order; |
7f184275a
|
314 |
int start_bit, nbits, remain; |
929f97276
|
315 |
|
7f184275a
|
316 317 318 |
#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG BUG_ON(in_nmi()); #endif |
929f97276
|
319 |
|
7f184275a
|
320 321 322 |
nbits = (size + (1UL << order) - 1) >> order; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { |
929f97276
|
323 324 |
if (addr >= chunk->start_addr && addr < chunk->end_addr) { BUG_ON(addr + size > chunk->end_addr); |
7f184275a
|
325 326 327 328 329 330 331 |
start_bit = (addr - chunk->start_addr) >> order; remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); BUG_ON(remain); size = nbits << order; atomic_add(size, &chunk->avail); rcu_read_unlock(); return; |
f14f75b81
|
332 |
} |
f14f75b81
|
333 |
} |
7f184275a
|
334 335 |
rcu_read_unlock(); BUG(); |
f14f75b81
|
336 337 |
} EXPORT_SYMBOL(gen_pool_free); |
7f184275a
|
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 |
/** * gen_pool_for_each_chunk - call func for every chunk of generic memory pool * @pool: the generic memory pool * @func: func to call * @data: additional data used by @func * * Call @func for every chunk of generic memory pool. The @func is * called with rcu_read_lock held. */ void gen_pool_for_each_chunk(struct gen_pool *pool, void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data), void *data) { struct gen_pool_chunk *chunk; rcu_read_lock(); list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) func(pool, chunk, data); rcu_read_unlock(); } EXPORT_SYMBOL(gen_pool_for_each_chunk); /** * gen_pool_avail - get available free space of the pool * @pool: pool to get available free space * * Return available free space of the specified pool. */ size_t gen_pool_avail(struct gen_pool *pool) { struct gen_pool_chunk *chunk; size_t avail = 0; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) avail += atomic_read(&chunk->avail); rcu_read_unlock(); return avail; } EXPORT_SYMBOL_GPL(gen_pool_avail); /** * gen_pool_size - get size in bytes of memory managed by the pool * @pool: pool to get size * * Return size in bytes of memory managed by the pool. */ size_t gen_pool_size(struct gen_pool *pool) { struct gen_pool_chunk *chunk; size_t size = 0; rcu_read_lock(); list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) size += chunk->end_addr - chunk->start_addr; rcu_read_unlock(); return size; } EXPORT_SYMBOL_GPL(gen_pool_size); |