Blame view
mm/slob.c
17.2 KB
10cef6029 [PATCH] slob: int... |
1 2 3 4 5 |
/* * SLOB Allocator: Simple List Of Blocks * * Matt Mackall <mpm@selenic.com> 12/30/03 * |
6193a2ff1 slob: initial NUM... |
6 7 |
* NUMA support by Paul Mundt, 2007. * |
10cef6029 [PATCH] slob: int... |
8 9 10 11 |
* How SLOB works: * * The core of SLOB is a traditional K&R style heap allocator, with * support for returning aligned objects. The granularity of this |
553948491 slob: improved al... |
12 13 |
* allocator is as little as 2 bytes, however typically most architectures * will require 4 bytes on 32-bit and 8 bytes on 64-bit. |
95b35127f slob: rework free... |
14 |
* |
20cecbae4 slob: reduce exte... |
15 16 17 18 19 20 21 22 23 24 25 |
* The slob heap is a set of linked list of pages from alloc_pages(), * and within each page, there is a singly-linked list of free blocks * (slob_t). The heap is grown on demand. To reduce fragmentation, * heap pages are segregated into three lists, with objects less than * 256 bytes, objects less than 1024 bytes, and all other objects. * * Allocation from heap involves first searching for a page with * sufficient free blocks (using a next-fit-like approach) followed by * a first-fit scan of the page. Deallocation inserts objects back * into the free list in address order, so this is effectively an * address-ordered first fit. |
10cef6029 [PATCH] slob: int... |
26 27 |
* * Above this is an implementation of kmalloc/kfree. Blocks returned |
553948491 slob: improved al... |
28 |
* from kmalloc are prepended with a 4-byte header with the kmalloc size. |
10cef6029 [PATCH] slob: int... |
29 |
* If kmalloc is asked for objects of PAGE_SIZE or larger, it calls |
6193a2ff1 slob: initial NUM... |
30 |
* alloc_pages() directly, allocating compound pages so the page order |
d87a133fc slob: remove bigb... |
31 32 33 34 |
* does not have to be separately tracked, and also stores the exact * allocation size in page->private so that it can be used to accurately * provide ksize(). These objects are detected in kfree() because slob_page() * is false for them. |
10cef6029 [PATCH] slob: int... |
35 36 |
* * SLAB is emulated on top of SLOB by simply calling constructors and |
95b35127f slob: rework free... |
37 38 39 40 |
* destructors for every SLAB allocation. Objects are returned with the * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which * case the low-level allocator will fragment blocks to create the proper * alignment. Again, objects of page-size or greater are allocated by |
6193a2ff1 slob: initial NUM... |
41 |
* calling alloc_pages(). As SLAB objects know their size, no separate |
95b35127f slob: rework free... |
42 |
* size bookkeeping is necessary and there is essentially no allocation |
d87a133fc slob: remove bigb... |
43 44 |
* space overhead, and compound pages aren't needed for multi-page * allocations. |
6193a2ff1 slob: initial NUM... |
45 46 47 48 |
* * NUMA support in SLOB is fairly simplistic, pushing most of the real * logic down to the page allocator, and simply doing the node accounting * on the upper levels. In the event that a node id is explicitly |
6484eb3e2 page allocator: d... |
49 |
* provided, alloc_pages_exact_node() with the specified node id is used |
6193a2ff1 slob: initial NUM... |
50 51 52 53 54 55 56 57 |
* instead. The common case (or when the node id isn't explicitly provided) * will default to the current node, as per numa_node_id(). * * Node aware pages are still inserted in to the global freelist, and * these are scanned for by matching against the node id encoded in the * page flags. As a result, block allocations that can be satisfied from * the freelist will only be done so on pages residing on the same node, * in order to prevent random node placement. |
10cef6029 [PATCH] slob: int... |
58 |
*/ |
95b35127f slob: rework free... |
59 |
#include <linux/kernel.h> |
10cef6029 [PATCH] slob: int... |
60 61 |
#include <linux/slab.h> #include <linux/mm.h> |
1f0532eb6 mm: SLOB fix recl... |
62 |
#include <linux/swap.h> /* struct reclaim_state */ |
10cef6029 [PATCH] slob: int... |
63 64 65 |
#include <linux/cache.h> #include <linux/init.h> #include <linux/module.h> |
afc0cedbe slob: implement R... |
66 |
#include <linux/rcupdate.h> |
95b35127f slob: rework free... |
67 |
#include <linux/list.h> |
02af61bb5 tracing, kmemtrac... |
68 |
#include <linux/kmemtrace.h> |
4374e616d kmemleak: Add the... |
69 |
#include <linux/kmemleak.h> |
95b35127f slob: rework free... |
70 |
#include <asm/atomic.h> |
95b35127f slob: rework free... |
71 72 73 74 75 76 77 78 |
/* * slob_block has a field 'units', which indicates size of block if +ve, * or offset of next block if -ve (in SLOB_UNITs). * * Free blocks of size 1 unit simply contain the offset of the next block. * Those with larger size contain their size in the first SLOB_UNIT of * memory, and the offset of the next free block in the second SLOB_UNIT. */ |
553948491 slob: improved al... |
79 |
#if PAGE_SIZE <= (32767 * 2) |
95b35127f slob: rework free... |
80 81 82 83 |
typedef s16 slobidx_t; #else typedef s32 slobidx_t; #endif |
10cef6029 [PATCH] slob: int... |
84 |
struct slob_block { |
95b35127f slob: rework free... |
85 |
slobidx_t units; |
553948491 slob: improved al... |
86 |
}; |
10cef6029 [PATCH] slob: int... |
87 |
typedef struct slob_block slob_t; |
95b35127f slob: rework free... |
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
/* * We use struct page fields to manage some slob allocation aspects, * however to avoid the horrible mess in include/linux/mm_types.h, we'll * just define our own struct page type variant here. */ struct slob_page { union { struct { unsigned long flags; /* mandatory */ atomic_t _count; /* mandatory */ slobidx_t units; /* free units left in page */ unsigned long pad[2]; slob_t *free; /* first free slob_t in page */ struct list_head list; /* linked list of free pages */ }; struct page page; }; }; static inline void struct_slob_page_wrong_size(void) { BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); } /* * free_slob_page: call before a slob_page is returned to the page allocator. */ static inline void free_slob_page(struct slob_page *sp) { reset_page_mapcount(&sp->page); sp->page.mapping = NULL; } /* |
20cecbae4 slob: reduce exte... |
119 |
* All partially free slob pages go on these lists. |
95b35127f slob: rework free... |
120 |
*/ |
20cecbae4 slob: reduce exte... |
121 122 123 124 125 |
#define SLOB_BREAK1 256 #define SLOB_BREAK2 1024 static LIST_HEAD(free_slob_small); static LIST_HEAD(free_slob_medium); static LIST_HEAD(free_slob_large); |
95b35127f slob: rework free... |
126 127 |
/* |
6e9ed0cc4 slob: clean up th... |
128 |
* is_slob_page: True for all slob pages (false for bigblock pages) |
95b35127f slob: rework free... |
129 |
*/ |
6e9ed0cc4 slob: clean up th... |
130 |
static inline int is_slob_page(struct slob_page *sp) |
95b35127f slob: rework free... |
131 |
{ |
7303f2409 slob: use PG_slab... |
132 |
return PageSlab((struct page *)sp); |
95b35127f slob: rework free... |
133 134 135 136 |
} static inline void set_slob_page(struct slob_page *sp) { |
7303f2409 slob: use PG_slab... |
137 |
__SetPageSlab((struct page *)sp); |
95b35127f slob: rework free... |
138 139 140 141 |
} static inline void clear_slob_page(struct slob_page *sp) { |
7303f2409 slob: use PG_slab... |
142 |
__ClearPageSlab((struct page *)sp); |
95b35127f slob: rework free... |
143 |
} |
6e9ed0cc4 slob: clean up th... |
144 145 146 147 |
static inline struct slob_page *slob_page(const void *addr) { return (struct slob_page *)virt_to_page(addr); } |
95b35127f slob: rework free... |
148 149 150 151 152 |
/* * slob_page_free: true for pages on free_slob_pages list. */ static inline int slob_page_free(struct slob_page *sp) { |
9023cb7e8 slob: record page... |
153 |
return PageSlobFree((struct page *)sp); |
95b35127f slob: rework free... |
154 |
} |
20cecbae4 slob: reduce exte... |
155 |
static void set_slob_page_free(struct slob_page *sp, struct list_head *list) |
95b35127f slob: rework free... |
156 |
{ |
20cecbae4 slob: reduce exte... |
157 |
list_add(&sp->list, list); |
9023cb7e8 slob: record page... |
158 |
__SetPageSlobFree((struct page *)sp); |
95b35127f slob: rework free... |
159 160 161 162 163 |
} static inline void clear_slob_page_free(struct slob_page *sp) { list_del(&sp->list); |
9023cb7e8 slob: record page... |
164 |
__ClearPageSlobFree((struct page *)sp); |
95b35127f slob: rework free... |
165 |
} |
10cef6029 [PATCH] slob: int... |
166 167 168 |
#define SLOB_UNIT sizeof(slob_t) #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT) #define SLOB_ALIGN L1_CACHE_BYTES |
afc0cedbe slob: implement R... |
169 170 171 172 173 174 175 176 177 |
/* * struct slob_rcu is inserted at the tail of allocated slob blocks, which * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free * the block using call_rcu. */ struct slob_rcu { struct rcu_head head; int size; }; |
95b35127f slob: rework free... |
178 179 180 |
/* * slob_lock protects all slob allocator structures. */ |
10cef6029 [PATCH] slob: int... |
181 |
static DEFINE_SPINLOCK(slob_lock); |
10cef6029 [PATCH] slob: int... |
182 |
|
95b35127f slob: rework free... |
183 184 185 186 187 188 189 |
/* * Encode the given size and next info into a free slob block s. */ static void set_slob(slob_t *s, slobidx_t size, slob_t *next) { slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); slobidx_t offset = next - base; |
bcb4ddb46 [PATCH] MM: SLOB ... |
190 |
|
95b35127f slob: rework free... |
191 192 193 194 195 196 |
if (size > 1) { s[0].units = size; s[1].units = offset; } else s[0].units = -offset; } |
10cef6029 [PATCH] slob: int... |
197 |
|
95b35127f slob: rework free... |
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 |
/* * Return the size of a slob block. */ static slobidx_t slob_units(slob_t *s) { if (s->units > 0) return s->units; return 1; } /* * Return the next free slob block pointer after this one. */ static slob_t *slob_next(slob_t *s) { slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); slobidx_t next; if (s[0].units < 0) next = -s[0].units; else next = s[1].units; return base+next; } /* * Returns true if s is the last free block in its page. */ static int slob_last(slob_t *s) { return !((unsigned long)slob_next(s) & ~PAGE_MASK); } |
6e9ed0cc4 slob: clean up th... |
230 |
static void *slob_new_pages(gfp_t gfp, int order, int node) |
6193a2ff1 slob: initial NUM... |
231 232 233 234 235 |
{ void *page; #ifdef CONFIG_NUMA if (node != -1) |
6484eb3e2 page allocator: d... |
236 |
page = alloc_pages_exact_node(node, gfp, order); |
6193a2ff1 slob: initial NUM... |
237 238 239 240 241 242 243 244 245 |
else #endif page = alloc_pages(gfp, order); if (!page) return NULL; return page_address(page); } |
6e9ed0cc4 slob: clean up th... |
246 247 |
static void slob_free_pages(void *b, int order) { |
1f0532eb6 mm: SLOB fix recl... |
248 249 |
if (current->reclaim_state) current->reclaim_state->reclaimed_slab += 1 << order; |
6e9ed0cc4 slob: clean up th... |
250 251 |
free_pages((unsigned long)b, order); } |
95b35127f slob: rework free... |
252 253 254 255 |
/* * Allocate a slob block within a given slob_page sp. */ static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) |
10cef6029 [PATCH] slob: int... |
256 |
{ |
6e9ed0cc4 slob: clean up th... |
257 |
slob_t *prev, *cur, *aligned = NULL; |
10cef6029 [PATCH] slob: int... |
258 |
int delta = 0, units = SLOB_UNITS(size); |
10cef6029 [PATCH] slob: int... |
259 |
|
95b35127f slob: rework free... |
260 261 |
for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { slobidx_t avail = slob_units(cur); |
10cef6029 [PATCH] slob: int... |
262 263 264 265 |
if (align) { aligned = (slob_t *)ALIGN((unsigned long)cur, align); delta = aligned - cur; } |
95b35127f slob: rework free... |
266 267 |
if (avail >= units + delta) { /* room enough? */ slob_t *next; |
10cef6029 [PATCH] slob: int... |
268 |
if (delta) { /* need to fragment head to align? */ |
95b35127f slob: rework free... |
269 270 271 |
next = slob_next(cur); set_slob(aligned, avail - delta, next); set_slob(cur, delta, aligned); |
10cef6029 [PATCH] slob: int... |
272 273 |
prev = cur; cur = aligned; |
95b35127f slob: rework free... |
274 |
avail = slob_units(cur); |
10cef6029 [PATCH] slob: int... |
275 |
} |
95b35127f slob: rework free... |
276 277 278 279 280 281 282 283 284 285 286 287 |
next = slob_next(cur); if (avail == units) { /* exact fit? unlink. */ if (prev) set_slob(prev, slob_units(prev), next); else sp->free = next; } else { /* fragment */ if (prev) set_slob(prev, slob_units(prev), cur + units); else sp->free = cur + units; set_slob(cur + units, avail - units, next); |
10cef6029 [PATCH] slob: int... |
288 |
} |
95b35127f slob: rework free... |
289 290 291 |
sp->units -= units; if (!sp->units) clear_slob_page_free(sp); |
10cef6029 [PATCH] slob: int... |
292 293 |
return cur; } |
95b35127f slob: rework free... |
294 295 296 297 |
if (slob_last(cur)) return NULL; } } |
10cef6029 [PATCH] slob: int... |
298 |
|
95b35127f slob: rework free... |
299 300 301 |
/* * slob_alloc: entry point into the slob allocator. */ |
6193a2ff1 slob: initial NUM... |
302 |
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) |
95b35127f slob: rework free... |
303 304 |
{ struct slob_page *sp; |
d6269543e slob: reduce list... |
305 |
struct list_head *prev; |
20cecbae4 slob: reduce exte... |
306 |
struct list_head *slob_list; |
95b35127f slob: rework free... |
307 308 |
slob_t *b = NULL; unsigned long flags; |
10cef6029 [PATCH] slob: int... |
309 |
|
20cecbae4 slob: reduce exte... |
310 311 312 313 314 315 |
if (size < SLOB_BREAK1) slob_list = &free_slob_small; else if (size < SLOB_BREAK2) slob_list = &free_slob_medium; else slob_list = &free_slob_large; |
95b35127f slob: rework free... |
316 317 |
spin_lock_irqsave(&slob_lock, flags); /* Iterate through each partially free page, try to find room */ |
20cecbae4 slob: reduce exte... |
318 |
list_for_each_entry(sp, slob_list, list) { |
6193a2ff1 slob: initial NUM... |
319 320 321 322 323 324 325 326 |
#ifdef CONFIG_NUMA /* * If there's a node specification, search for a partial * page with a matching node id in the freelist. */ if (node != -1 && page_to_nid(&sp->page) != node) continue; #endif |
d6269543e slob: reduce list... |
327 328 329 |
/* Enough room on this page? */ if (sp->units < SLOB_UNITS(size)) continue; |
6193a2ff1 slob: initial NUM... |
330 |
|
d6269543e slob: reduce list... |
331 332 333 334 335 336 337 338 339 |
/* Attempt to alloc */ prev = sp->list.prev; b = slob_page_alloc(sp, size, align); if (!b) continue; /* Improve fragment distribution and reduce our average * search time by starting our next search here. (see * Knuth vol 1, sec 2.5, pg 449) */ |
20cecbae4 slob: reduce exte... |
340 341 342 |
if (prev != slob_list->prev && slob_list->next != prev->next) list_move_tail(slob_list, prev->next); |
d6269543e slob: reduce list... |
343 |
break; |
10cef6029 [PATCH] slob: int... |
344 |
} |
95b35127f slob: rework free... |
345 346 347 348 |
spin_unlock_irqrestore(&slob_lock, flags); /* Not enough space: must allocate a new page */ if (!b) { |
6e9ed0cc4 slob: clean up th... |
349 |
b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); |
95b35127f slob: rework free... |
350 |
if (!b) |
6e9ed0cc4 slob: clean up th... |
351 352 |
return NULL; sp = slob_page(b); |
95b35127f slob: rework free... |
353 354 355 356 357 358 359 |
set_slob_page(sp); spin_lock_irqsave(&slob_lock, flags); sp->units = SLOB_UNITS(PAGE_SIZE); sp->free = b; INIT_LIST_HEAD(&sp->list); set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); |
20cecbae4 slob: reduce exte... |
360 |
set_slob_page_free(sp, slob_list); |
95b35127f slob: rework free... |
361 362 363 364 |
b = slob_page_alloc(sp, size, align); BUG_ON(!b); spin_unlock_irqrestore(&slob_lock, flags); } |
d07dbea46 Slab allocators: ... |
365 366 |
if (unlikely((gfp & __GFP_ZERO) && b)) memset(b, 0, size); |
95b35127f slob: rework free... |
367 |
return b; |
10cef6029 [PATCH] slob: int... |
368 |
} |
95b35127f slob: rework free... |
369 370 371 |
/* * slob_free: entry point into the slob allocator. */ |
10cef6029 [PATCH] slob: int... |
372 373 |
static void slob_free(void *block, int size) { |
95b35127f slob: rework free... |
374 375 376 |
struct slob_page *sp; slob_t *prev, *next, *b = (slob_t *)block; slobidx_t units; |
10cef6029 [PATCH] slob: int... |
377 |
unsigned long flags; |
2408c5503 {slub, slob}: use... |
378 |
if (unlikely(ZERO_OR_NULL_PTR(block))) |
10cef6029 [PATCH] slob: int... |
379 |
return; |
95b35127f slob: rework free... |
380 |
BUG_ON(!size); |
10cef6029 [PATCH] slob: int... |
381 |
|
6e9ed0cc4 slob: clean up th... |
382 |
sp = slob_page(block); |
95b35127f slob: rework free... |
383 |
units = SLOB_UNITS(size); |
10cef6029 [PATCH] slob: int... |
384 |
|
10cef6029 [PATCH] slob: int... |
385 |
spin_lock_irqsave(&slob_lock, flags); |
10cef6029 [PATCH] slob: int... |
386 |
|
95b35127f slob: rework free... |
387 388 389 390 |
if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) { /* Go directly to page allocator. Do not pass slob allocator */ if (slob_page_free(sp)) clear_slob_page_free(sp); |
6fb8f4243 slob: fix lockup ... |
391 |
spin_unlock_irqrestore(&slob_lock, flags); |
95b35127f slob: rework free... |
392 393 |
clear_slob_page(sp); free_slob_page(sp); |
1f0532eb6 mm: SLOB fix recl... |
394 |
slob_free_pages(b, 0); |
6fb8f4243 slob: fix lockup ... |
395 |
return; |
95b35127f slob: rework free... |
396 |
} |
10cef6029 [PATCH] slob: int... |
397 |
|
95b35127f slob: rework free... |
398 399 400 401 402 403 404 |
if (!slob_page_free(sp)) { /* This slob page is about to become partially free. Easy! */ sp->units = units; sp->free = b; set_slob(b, units, (void *)((unsigned long)(b + SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK)); |
20cecbae4 slob: reduce exte... |
405 |
set_slob_page_free(sp, &free_slob_small); |
95b35127f slob: rework free... |
406 407 408 409 410 411 412 413 |
goto out; } /* * Otherwise the page is already partially free, so find reinsertion * point. */ sp->units += units; |
10cef6029 [PATCH] slob: int... |
414 |
|
95b35127f slob: rework free... |
415 |
if (b < sp->free) { |
679299b32 slob: fix free bl... |
416 417 418 419 |
if (b + units == sp->free) { units += slob_units(sp->free); sp->free = slob_next(sp->free); } |
95b35127f slob: rework free... |
420 421 422 423 424 425 426 427 428 |
set_slob(b, units, sp->free); sp->free = b; } else { prev = sp->free; next = slob_next(prev); while (b > next) { prev = next; next = slob_next(prev); } |
10cef6029 [PATCH] slob: int... |
429 |
|
95b35127f slob: rework free... |
430 431 432 433 434 435 436 437 438 439 440 441 442 |
if (!slob_last(prev) && b + units == next) { units += slob_units(next); set_slob(b, units, slob_next(next)); } else set_slob(b, units, next); if (prev + slob_units(prev) == b) { units = slob_units(b) + slob_units(prev); set_slob(prev, units, slob_next(b)); } else set_slob(prev, slob_units(prev), b); } out: |
10cef6029 [PATCH] slob: int... |
443 444 |
spin_unlock_irqrestore(&slob_lock, flags); } |
95b35127f slob: rework free... |
445 446 447 |
/* * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. */ |
553948491 slob: improved al... |
448 449 450 451 452 453 454 |
#ifndef ARCH_KMALLOC_MINALIGN #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long) #endif #ifndef ARCH_SLAB_MINALIGN #define ARCH_SLAB_MINALIGN __alignof__(unsigned long) #endif |
6193a2ff1 slob: initial NUM... |
455 |
void *__kmalloc_node(size_t size, gfp_t gfp, int node) |
10cef6029 [PATCH] slob: int... |
456 |
{ |
6cb8f9132 Slab allocators: ... |
457 |
unsigned int *m; |
553948491 slob: improved al... |
458 |
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
3eae2cb24 kmemtrace: SLOB h... |
459 |
void *ret; |
553948491 slob: improved al... |
460 |
|
19cefdffb lockdep: annotate... |
461 |
lockdep_trace_alloc(gfp); |
cf40bd16f lockdep: annotate... |
462 |
|
553948491 slob: improved al... |
463 |
if (size < PAGE_SIZE - align) { |
6cb8f9132 Slab allocators: ... |
464 465 |
if (!size) return ZERO_SIZE_PTR; |
6193a2ff1 slob: initial NUM... |
466 |
m = slob_alloc(size + align, gfp, align, node); |
3eae2cb24 kmemtrace: SLOB h... |
467 |
|
239f49c08 slob: Fix to retu... |
468 469 470 |
if (!m) return NULL; *m = size; |
3eae2cb24 kmemtrace: SLOB h... |
471 |
ret = (void *)m + align; |
ca2b84cb3 kmemtrace: use tr... |
472 473 |
trace_kmalloc_node(_RET_IP_, ret, size, size + align, gfp, node); |
d87a133fc slob: remove bigb... |
474 |
} else { |
3eae2cb24 kmemtrace: SLOB h... |
475 |
unsigned int order = get_order(size); |
d87a133fc slob: remove bigb... |
476 |
|
6e9ed0cc4 slob: clean up th... |
477 |
ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node); |
d87a133fc slob: remove bigb... |
478 479 480 481 482 |
if (ret) { struct page *page; page = virt_to_page(ret); page->private = size; } |
3eae2cb24 kmemtrace: SLOB h... |
483 |
|
ca2b84cb3 kmemtrace: use tr... |
484 485 |
trace_kmalloc_node(_RET_IP_, ret, size, PAGE_SIZE << order, gfp, node); |
10cef6029 [PATCH] slob: int... |
486 |
} |
3eae2cb24 kmemtrace: SLOB h... |
487 |
|
4374e616d kmemleak: Add the... |
488 |
kmemleak_alloc(ret, size, 1, gfp); |
3eae2cb24 kmemtrace: SLOB h... |
489 |
return ret; |
10cef6029 [PATCH] slob: int... |
490 |
} |
6193a2ff1 slob: initial NUM... |
491 |
EXPORT_SYMBOL(__kmalloc_node); |
10cef6029 [PATCH] slob: int... |
492 493 494 |
void kfree(const void *block) { |
95b35127f slob: rework free... |
495 |
struct slob_page *sp; |
10cef6029 [PATCH] slob: int... |
496 |
|
2121db74b kmemtrace: trace ... |
497 |
trace_kfree(_RET_IP_, block); |
2408c5503 {slub, slob}: use... |
498 |
if (unlikely(ZERO_OR_NULL_PTR(block))) |
10cef6029 [PATCH] slob: int... |
499 |
return; |
4374e616d kmemleak: Add the... |
500 |
kmemleak_free(block); |
10cef6029 [PATCH] slob: int... |
501 |
|
6e9ed0cc4 slob: clean up th... |
502 503 |
sp = slob_page(block); if (is_slob_page(sp)) { |
553948491 slob: improved al... |
504 505 506 |
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); unsigned int *m = (unsigned int *)(block - align); slob_free(m, *m + align); |
d87a133fc slob: remove bigb... |
507 508 |
} else put_page(&sp->page); |
10cef6029 [PATCH] slob: int... |
509 |
} |
10cef6029 [PATCH] slob: int... |
510 |
EXPORT_SYMBOL(kfree); |
d87a133fc slob: remove bigb... |
511 |
/* can't use ksize for kmem_cache_alloc memory, only kmalloc */ |
fd76bab2f slab: introduce k... |
512 |
size_t ksize(const void *block) |
10cef6029 [PATCH] slob: int... |
513 |
{ |
95b35127f slob: rework free... |
514 |
struct slob_page *sp; |
10cef6029 [PATCH] slob: int... |
515 |
|
ef8b4520b Slab allocators: ... |
516 517 |
BUG_ON(!block); if (unlikely(block == ZERO_SIZE_PTR)) |
10cef6029 [PATCH] slob: int... |
518 |
return 0; |
6e9ed0cc4 slob: clean up th... |
519 520 |
sp = slob_page(block); if (is_slob_page(sp)) { |
70096a561 SLOB: fix bogus k... |
521 522 523 524 |
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); unsigned int *m = (unsigned int *)(block - align); return SLOB_UNITS(*m) * SLOB_UNIT; } else |
d87a133fc slob: remove bigb... |
525 |
return sp->page.private; |
10cef6029 [PATCH] slob: int... |
526 |
} |
b1aabecd5 mm: Export symbol... |
527 |
EXPORT_SYMBOL(ksize); |
10cef6029 [PATCH] slob: int... |
528 529 530 |
struct kmem_cache { unsigned int size, align; |
afc0cedbe slob: implement R... |
531 |
unsigned long flags; |
10cef6029 [PATCH] slob: int... |
532 |
const char *name; |
51cc50685 SL*B: drop kmem c... |
533 |
void (*ctor)(void *); |
10cef6029 [PATCH] slob: int... |
534 535 536 |
}; struct kmem_cache *kmem_cache_create(const char *name, size_t size, |
51cc50685 SL*B: drop kmem c... |
537 |
size_t align, unsigned long flags, void (*ctor)(void *)) |
10cef6029 [PATCH] slob: int... |
538 539 |
{ struct kmem_cache *c; |
0701a9e64 slob: fix bug - w... |
540 |
c = slob_alloc(sizeof(struct kmem_cache), |
5e18e2b8b slob: do not pass... |
541 |
GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1); |
10cef6029 [PATCH] slob: int... |
542 543 544 545 |
if (c) { c->name = name; c->size = size; |
afc0cedbe slob: implement R... |
546 |
if (flags & SLAB_DESTROY_BY_RCU) { |
afc0cedbe slob: implement R... |
547 548 549 550 |
/* leave room for rcu footer at the end of object */ c->size += sizeof(struct slob_rcu); } c->flags = flags; |
10cef6029 [PATCH] slob: int... |
551 |
c->ctor = ctor; |
10cef6029 [PATCH] slob: int... |
552 |
/* ignore alignment unless it's forced */ |
5af608399 slab allocators: ... |
553 |
c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; |
553948491 slob: improved al... |
554 555 |
if (c->align < ARCH_SLAB_MINALIGN) c->align = ARCH_SLAB_MINALIGN; |
10cef6029 [PATCH] slob: int... |
556 557 |
if (c->align < align) c->align = align; |
bc0055aee slob: handle SLAB... |
558 559 560 |
} else if (flags & SLAB_PANIC) panic("Cannot create slab cache %s ", name); |
10cef6029 [PATCH] slob: int... |
561 |
|
4374e616d kmemleak: Add the... |
562 |
kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL); |
10cef6029 [PATCH] slob: int... |
563 564 565 |
return c; } EXPORT_SYMBOL(kmem_cache_create); |
133d205a1 [PATCH] Make kmem... |
566 |
void kmem_cache_destroy(struct kmem_cache *c) |
10cef6029 [PATCH] slob: int... |
567 |
{ |
4374e616d kmemleak: Add the... |
568 |
kmemleak_free(c); |
7ed9f7e5d fix RCU-callback-... |
569 570 |
if (c->flags & SLAB_DESTROY_BY_RCU) rcu_barrier(); |
10cef6029 [PATCH] slob: int... |
571 |
slob_free(c, sizeof(struct kmem_cache)); |
10cef6029 [PATCH] slob: int... |
572 573 |
} EXPORT_SYMBOL(kmem_cache_destroy); |
6193a2ff1 slob: initial NUM... |
574 |
void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) |
10cef6029 [PATCH] slob: int... |
575 576 |
{ void *b; |
3eae2cb24 kmemtrace: SLOB h... |
577 |
if (c->size < PAGE_SIZE) { |
6193a2ff1 slob: initial NUM... |
578 |
b = slob_alloc(c->size, flags, c->align, node); |
ca2b84cb3 kmemtrace: use tr... |
579 580 581 |
trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, SLOB_UNITS(c->size) * SLOB_UNIT, flags, node); |
3eae2cb24 kmemtrace: SLOB h... |
582 |
} else { |
6e9ed0cc4 slob: clean up th... |
583 |
b = slob_new_pages(flags, get_order(c->size), node); |
ca2b84cb3 kmemtrace: use tr... |
584 585 586 |
trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, PAGE_SIZE << get_order(c->size), flags, node); |
3eae2cb24 kmemtrace: SLOB h... |
587 |
} |
10cef6029 [PATCH] slob: int... |
588 589 |
if (c->ctor) |
51cc50685 SL*B: drop kmem c... |
590 |
c->ctor(b); |
10cef6029 [PATCH] slob: int... |
591 |
|
4374e616d kmemleak: Add the... |
592 |
kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); |
10cef6029 [PATCH] slob: int... |
593 594 |
return b; } |
6193a2ff1 slob: initial NUM... |
595 |
EXPORT_SYMBOL(kmem_cache_alloc_node); |
10cef6029 [PATCH] slob: int... |
596 |
|
afc0cedbe slob: implement R... |
597 |
static void __kmem_cache_free(void *b, int size) |
10cef6029 [PATCH] slob: int... |
598 |
{ |
afc0cedbe slob: implement R... |
599 600 |
if (size < PAGE_SIZE) slob_free(b, size); |
10cef6029 [PATCH] slob: int... |
601 |
else |
6e9ed0cc4 slob: clean up th... |
602 |
slob_free_pages(b, get_order(size)); |
afc0cedbe slob: implement R... |
603 604 605 606 607 608 609 610 611 612 613 614 |
} static void kmem_rcu_free(struct rcu_head *head) { struct slob_rcu *slob_rcu = (struct slob_rcu *)head; void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu)); __kmem_cache_free(b, slob_rcu->size); } void kmem_cache_free(struct kmem_cache *c, void *b) { |
4374e616d kmemleak: Add the... |
615 |
kmemleak_free_recursive(b, c->flags); |
afc0cedbe slob: implement R... |
616 617 618 619 620 621 622 |
if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { struct slob_rcu *slob_rcu; slob_rcu = b + (c->size - sizeof(struct slob_rcu)); INIT_RCU_HEAD(&slob_rcu->head); slob_rcu->size = c->size; call_rcu(&slob_rcu->head, kmem_rcu_free); } else { |
afc0cedbe slob: implement R... |
623 624 |
__kmem_cache_free(b, c->size); } |
3eae2cb24 kmemtrace: SLOB h... |
625 |
|
ca2b84cb3 kmemtrace: use tr... |
626 |
trace_kmem_cache_free(_RET_IP_, b); |
10cef6029 [PATCH] slob: int... |
627 628 629 630 631 632 633 634 635 636 637 638 639 640 |
} EXPORT_SYMBOL(kmem_cache_free); unsigned int kmem_cache_size(struct kmem_cache *c) { return c->size; } EXPORT_SYMBOL(kmem_cache_size); const char *kmem_cache_name(struct kmem_cache *c) { return c->name; } EXPORT_SYMBOL(kmem_cache_name); |
2e892f43c [PATCH] Cleanup s... |
641 642 643 644 645 |
int kmem_cache_shrink(struct kmem_cache *d) { return 0; } EXPORT_SYMBOL(kmem_cache_shrink); |
55935a34a [PATCH] More slab... |
646 |
int kmem_ptr_validate(struct kmem_cache *a, const void *b) |
2e892f43c [PATCH] Cleanup s... |
647 648 649 |
{ return 0; } |
84a01c2f8 slob: sparsemem s... |
650 651 652 653 654 655 |
static unsigned int slob_ready __read_mostly; int slab_is_available(void) { return slob_ready; } |
bcb4ddb46 [PATCH] MM: SLOB ... |
656 657 |
void __init kmem_cache_init(void) { |
84a01c2f8 slob: sparsemem s... |
658 |
slob_ready = 1; |
10cef6029 [PATCH] slob: int... |
659 |
} |
bbff2e433 slab: remove dupl... |
660 661 662 663 664 |
void __init kmem_cache_init_late(void) { /* Nothing to do */ } |