Blame view
mm/slob.c
18 KB
b24413180
|
1 |
// SPDX-License-Identifier: GPL-2.0 |
10cef6029
|
2 3 4 5 6 |
/* * SLOB Allocator: Simple List Of Blocks * * Matt Mackall <mpm@selenic.com> 12/30/03 * |
6193a2ff1
|
7 8 |
* NUMA support by Paul Mundt, 2007. * |
10cef6029
|
9 10 11 12 |
* How SLOB works: * * The core of SLOB is a traditional K&R style heap allocator, with * support for returning aligned objects. The granularity of this |
553948491
|
13 14 |
* allocator is as little as 2 bytes, however typically most architectures * will require 4 bytes on 32-bit and 8 bytes on 64-bit. |
95b35127f
|
15 |
* |
20cecbae4
|
16 17 18 19 20 21 22 23 24 25 26 |
* The slob heap is a set of linked list of pages from alloc_pages(), * and within each page, there is a singly-linked list of free blocks * (slob_t). The heap is grown on demand. To reduce fragmentation, * heap pages are segregated into three lists, with objects less than * 256 bytes, objects less than 1024 bytes, and all other objects. * * Allocation from heap involves first searching for a page with * sufficient free blocks (using a next-fit-like approach) followed by * a first-fit scan of the page. Deallocation inserts objects back * into the free list in address order, so this is effectively an * address-ordered first fit. |
10cef6029
|
27 28 |
* * Above this is an implementation of kmalloc/kfree. Blocks returned |
553948491
|
29 |
* from kmalloc are prepended with a 4-byte header with the kmalloc size. |
10cef6029
|
30 |
* If kmalloc is asked for objects of PAGE_SIZE or larger, it calls |
6193a2ff1
|
31 |
* alloc_pages() directly, allocating compound pages so the page order |
999d8795d
|
32 33 |
* does not have to be separately tracked. * These objects are detected in kfree() because PageSlab() |
d87a133fc
|
34 |
* is false for them. |
10cef6029
|
35 36 |
* * SLAB is emulated on top of SLOB by simply calling constructors and |
95b35127f
|
37 38 39 40 |
* destructors for every SLAB allocation. Objects are returned with the * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which * case the low-level allocator will fragment blocks to create the proper * alignment. Again, objects of page-size or greater are allocated by |
6193a2ff1
|
41 |
* calling alloc_pages(). As SLAB objects know their size, no separate |
95b35127f
|
42 |
* size bookkeeping is necessary and there is essentially no allocation |
d87a133fc
|
43 44 |
* space overhead, and compound pages aren't needed for multi-page * allocations. |
6193a2ff1
|
45 46 47 48 |
* * NUMA support in SLOB is fairly simplistic, pushing most of the real * logic down to the page allocator, and simply doing the node accounting * on the upper levels. In the event that a node id is explicitly |
96db800f5
|
49 |
* provided, __alloc_pages_node() with the specified node id is used |
6193a2ff1
|
50 51 52 53 54 55 56 57 |
* instead. The common case (or when the node id isn't explicitly provided) * will default to the current node, as per numa_node_id(). * * Node aware pages are still inserted in to the global freelist, and * these are scanned for by matching against the node id encoded in the * page flags. As a result, block allocations that can be satisfied from * the freelist will only be done so on pages residing on the same node, * in order to prevent random node placement. |
10cef6029
|
58 |
*/ |
95b35127f
|
59 |
#include <linux/kernel.h> |
10cef6029
|
60 |
#include <linux/slab.h> |
97d066091
|
61 |
|
10cef6029
|
62 |
#include <linux/mm.h> |
1f0532eb6
|
63 |
#include <linux/swap.h> /* struct reclaim_state */ |
10cef6029
|
64 65 |
#include <linux/cache.h> #include <linux/init.h> |
b95f1b31b
|
66 |
#include <linux/export.h> |
afc0cedbe
|
67 |
#include <linux/rcupdate.h> |
95b35127f
|
68 |
#include <linux/list.h> |
4374e616d
|
69 |
#include <linux/kmemleak.h> |
039ca4e74
|
70 71 |
#include <trace/events/kmem.h> |
60063497a
|
72 |
#include <linux/atomic.h> |
95b35127f
|
73 |
|
b9ce5ef49
|
74 |
#include "slab.h" |
95b35127f
|
75 76 77 78 79 80 81 82 |
/* * slob_block has a field 'units', which indicates size of block if +ve, * or offset of next block if -ve (in SLOB_UNITs). * * Free blocks of size 1 unit simply contain the offset of the next block. * Those with larger size contain their size in the first SLOB_UNIT of * memory, and the offset of the next free block in the second SLOB_UNIT. */ |
553948491
|
83 |
#if PAGE_SIZE <= (32767 * 2) |
95b35127f
|
84 85 86 87 |
typedef s16 slobidx_t; #else typedef s32 slobidx_t; #endif |
10cef6029
|
88 |
struct slob_block { |
95b35127f
|
89 |
slobidx_t units; |
553948491
|
90 |
}; |
10cef6029
|
91 |
typedef struct slob_block slob_t; |
95b35127f
|
92 |
/* |
20cecbae4
|
93 |
* All partially free slob pages go on these lists. |
95b35127f
|
94 |
*/ |
20cecbae4
|
95 96 97 98 99 |
#define SLOB_BREAK1 256 #define SLOB_BREAK2 1024 static LIST_HEAD(free_slob_small); static LIST_HEAD(free_slob_medium); static LIST_HEAD(free_slob_large); |
95b35127f
|
100 101 |
/* |
95b35127f
|
102 103 |
* slob_page_free: true for pages on free_slob_pages list. */ |
b8c24c4ae
|
104 |
static inline int slob_page_free(struct page *sp) |
95b35127f
|
105 |
{ |
b8c24c4ae
|
106 |
return PageSlobFree(sp); |
95b35127f
|
107 |
} |
b8c24c4ae
|
108 |
static void set_slob_page_free(struct page *sp, struct list_head *list) |
95b35127f
|
109 |
{ |
adab7b681
|
110 |
list_add(&sp->slab_list, list); |
b8c24c4ae
|
111 |
__SetPageSlobFree(sp); |
95b35127f
|
112 |
} |
b8c24c4ae
|
113 |
static inline void clear_slob_page_free(struct page *sp) |
95b35127f
|
114 |
{ |
adab7b681
|
115 |
list_del(&sp->slab_list); |
b8c24c4ae
|
116 |
__ClearPageSlobFree(sp); |
95b35127f
|
117 |
} |
10cef6029
|
118 |
#define SLOB_UNIT sizeof(slob_t) |
a6d78159f
|
119 |
#define SLOB_UNITS(size) DIV_ROUND_UP(size, SLOB_UNIT) |
10cef6029
|
120 |
|
afc0cedbe
|
121 122 |
/* * struct slob_rcu is inserted at the tail of allocated slob blocks, which |
5f0d5a3ae
|
123 |
* were created with a SLAB_TYPESAFE_BY_RCU slab. slob_rcu is used to free |
afc0cedbe
|
124 125 126 127 128 129 |
* the block using call_rcu. */ struct slob_rcu { struct rcu_head head; int size; }; |
95b35127f
|
130 131 132 |
/* * slob_lock protects all slob allocator structures. */ |
10cef6029
|
133 |
static DEFINE_SPINLOCK(slob_lock); |
10cef6029
|
134 |
|
95b35127f
|
135 136 137 138 139 140 141 |
/* * Encode the given size and next info into a free slob block s. */ static void set_slob(slob_t *s, slobidx_t size, slob_t *next) { slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); slobidx_t offset = next - base; |
bcb4ddb46
|
142 |
|
95b35127f
|
143 144 145 146 147 148 |
if (size > 1) { s[0].units = size; s[1].units = offset; } else s[0].units = -offset; } |
10cef6029
|
149 |
|
95b35127f
|
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 |
/* * Return the size of a slob block. */ static slobidx_t slob_units(slob_t *s) { if (s->units > 0) return s->units; return 1; } /* * Return the next free slob block pointer after this one. */ static slob_t *slob_next(slob_t *s) { slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); slobidx_t next; if (s[0].units < 0) next = -s[0].units; else next = s[1].units; return base+next; } /* * Returns true if s is the last free block in its page. */ static int slob_last(slob_t *s) { return !((unsigned long)slob_next(s) & ~PAGE_MASK); } |
6e9ed0cc4
|
182 |
static void *slob_new_pages(gfp_t gfp, int order, int node) |
6193a2ff1
|
183 |
{ |
6a486c0ad
|
184 |
struct page *page; |
6193a2ff1
|
185 186 |
#ifdef CONFIG_NUMA |
90f2cbbc4
|
187 |
if (node != NUMA_NO_NODE) |
96db800f5
|
188 |
page = __alloc_pages_node(node, gfp, order); |
6193a2ff1
|
189 190 191 192 193 194 |
else #endif page = alloc_pages(gfp, order); if (!page) return NULL; |
6a486c0ad
|
195 196 |
mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE, 1 << order); |
6193a2ff1
|
197 198 |
return page_address(page); } |
6e9ed0cc4
|
199 200 |
static void slob_free_pages(void *b, int order) { |
6a486c0ad
|
201 |
struct page *sp = virt_to_page(b); |
1f0532eb6
|
202 203 |
if (current->reclaim_state) current->reclaim_state->reclaimed_slab += 1 << order; |
6a486c0ad
|
204 205 206 207 |
mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE, -(1 << order)); __free_pages(sp, order); |
6e9ed0cc4
|
208 |
} |
95b35127f
|
209 |
/* |
130e8e09e
|
210 211 212 213 |
* slob_page_alloc() - Allocate a slob block within a given slob_page sp. * @sp: Page to look in. * @size: Size of the allocation. * @align: Allocation alignment. |
59bb47985
|
214 |
* @align_offset: Offset in the allocated block that will be aligned. |
130e8e09e
|
215 216 217 218 219 220 221 222 |
* @page_removed_from_list: Return parameter. * * Tries to find a chunk of memory at least @size bytes big within @page. * * Return: Pointer to memory if allocated, %NULL otherwise. If the * allocation fills up @page then the page is removed from the * freelist, in this case @page_removed_from_list will be set to * true (set to false otherwise). |
95b35127f
|
223 |
*/ |
130e8e09e
|
224 |
static void *slob_page_alloc(struct page *sp, size_t size, int align, |
59bb47985
|
225 |
int align_offset, bool *page_removed_from_list) |
10cef6029
|
226 |
{ |
6e9ed0cc4
|
227 |
slob_t *prev, *cur, *aligned = NULL; |
10cef6029
|
228 |
int delta = 0, units = SLOB_UNITS(size); |
10cef6029
|
229 |
|
130e8e09e
|
230 |
*page_removed_from_list = false; |
b8c24c4ae
|
231 |
for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) { |
95b35127f
|
232 |
slobidx_t avail = slob_units(cur); |
59bb47985
|
233 234 235 236 237 238 239 |
/* * 'aligned' will hold the address of the slob block so that the * address 'aligned'+'align_offset' is aligned according to the * 'align' parameter. This is for kmalloc() which prepends the * allocated block with its size, so that the block itself is * aligned when needed. */ |
10cef6029
|
240 |
if (align) { |
59bb47985
|
241 242 243 |
aligned = (slob_t *) (ALIGN((unsigned long)cur + align_offset, align) - align_offset); |
10cef6029
|
244 245 |
delta = aligned - cur; } |
95b35127f
|
246 247 |
if (avail >= units + delta) { /* room enough? */ slob_t *next; |
10cef6029
|
248 |
if (delta) { /* need to fragment head to align? */ |
95b35127f
|
249 250 251 |
next = slob_next(cur); set_slob(aligned, avail - delta, next); set_slob(cur, delta, aligned); |
10cef6029
|
252 253 |
prev = cur; cur = aligned; |
95b35127f
|
254 |
avail = slob_units(cur); |
10cef6029
|
255 |
} |
95b35127f
|
256 257 258 259 260 |
next = slob_next(cur); if (avail == units) { /* exact fit? unlink. */ if (prev) set_slob(prev, slob_units(prev), next); else |
b8c24c4ae
|
261 |
sp->freelist = next; |
95b35127f
|
262 263 264 265 |
} else { /* fragment */ if (prev) set_slob(prev, slob_units(prev), cur + units); else |
b8c24c4ae
|
266 |
sp->freelist = cur + units; |
95b35127f
|
267 |
set_slob(cur + units, avail - units, next); |
10cef6029
|
268 |
} |
95b35127f
|
269 |
sp->units -= units; |
130e8e09e
|
270 |
if (!sp->units) { |
95b35127f
|
271 |
clear_slob_page_free(sp); |
130e8e09e
|
272 273 |
*page_removed_from_list = true; } |
10cef6029
|
274 275 |
return cur; } |
95b35127f
|
276 277 278 279 |
if (slob_last(cur)) return NULL; } } |
10cef6029
|
280 |
|
95b35127f
|
281 282 283 |
/* * slob_alloc: entry point into the slob allocator. */ |
59bb47985
|
284 285 |
static void *slob_alloc(size_t size, gfp_t gfp, int align, int node, int align_offset) |
95b35127f
|
286 |
{ |
b8c24c4ae
|
287 |
struct page *sp; |
20cecbae4
|
288 |
struct list_head *slob_list; |
95b35127f
|
289 290 |
slob_t *b = NULL; unsigned long flags; |
130e8e09e
|
291 |
bool _unused; |
10cef6029
|
292 |
|
20cecbae4
|
293 294 295 296 297 298 |
if (size < SLOB_BREAK1) slob_list = &free_slob_small; else if (size < SLOB_BREAK2) slob_list = &free_slob_medium; else slob_list = &free_slob_large; |
95b35127f
|
299 300 |
spin_lock_irqsave(&slob_lock, flags); /* Iterate through each partially free page, try to find room */ |
adab7b681
|
301 |
list_for_each_entry(sp, slob_list, slab_list) { |
130e8e09e
|
302 |
bool page_removed_from_list = false; |
6193a2ff1
|
303 304 305 306 307 |
#ifdef CONFIG_NUMA /* * If there's a node specification, search for a partial * page with a matching node id in the freelist. */ |
90f2cbbc4
|
308 |
if (node != NUMA_NO_NODE && page_to_nid(sp) != node) |
6193a2ff1
|
309 310 |
continue; #endif |
d6269543e
|
311 312 313 |
/* Enough room on this page? */ if (sp->units < SLOB_UNITS(size)) continue; |
6193a2ff1
|
314 |
|
59bb47985
|
315 |
b = slob_page_alloc(sp, size, align, align_offset, &page_removed_from_list); |
d6269543e
|
316 317 |
if (!b) continue; |
130e8e09e
|
318 319 320 321 322 323 324 325 326 327 328 329 |
/* * If slob_page_alloc() removed sp from the list then we * cannot call list functions on sp. If so allocation * did not fragment the page anyway so optimisation is * unnecessary. */ if (!page_removed_from_list) { /* * Improve fragment distribution and reduce our average * search time by starting our next search here. (see * Knuth vol 1, sec 2.5, pg 449) */ |
adab7b681
|
330 331 |
if (!list_is_first(&sp->slab_list, slob_list)) list_rotate_to_front(&sp->slab_list, slob_list); |
130e8e09e
|
332 |
} |
d6269543e
|
333 |
break; |
10cef6029
|
334 |
} |
95b35127f
|
335 336 337 338 |
spin_unlock_irqrestore(&slob_lock, flags); /* Not enough space: must allocate a new page */ if (!b) { |
6e9ed0cc4
|
339 |
b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); |
95b35127f
|
340 |
if (!b) |
6e9ed0cc4
|
341 |
return NULL; |
b5568280c
|
342 343 |
sp = virt_to_page(b); __SetPageSlab(sp); |
95b35127f
|
344 345 346 |
spin_lock_irqsave(&slob_lock, flags); sp->units = SLOB_UNITS(PAGE_SIZE); |
b8c24c4ae
|
347 |
sp->freelist = b; |
adab7b681
|
348 |
INIT_LIST_HEAD(&sp->slab_list); |
95b35127f
|
349 |
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); |
20cecbae4
|
350 |
set_slob_page_free(sp, slob_list); |
59bb47985
|
351 |
b = slob_page_alloc(sp, size, align, align_offset, &_unused); |
95b35127f
|
352 353 354 |
BUG_ON(!b); spin_unlock_irqrestore(&slob_lock, flags); } |
9f88faee3
|
355 |
if (unlikely(gfp & __GFP_ZERO)) |
d07dbea46
|
356 |
memset(b, 0, size); |
95b35127f
|
357 |
return b; |
10cef6029
|
358 |
} |
95b35127f
|
359 360 361 |
/* * slob_free: entry point into the slob allocator. */ |
10cef6029
|
362 363 |
static void slob_free(void *block, int size) { |
b8c24c4ae
|
364 |
struct page *sp; |
95b35127f
|
365 366 |
slob_t *prev, *next, *b = (slob_t *)block; slobidx_t units; |
10cef6029
|
367 |
unsigned long flags; |
d602dabae
|
368 |
struct list_head *slob_list; |
10cef6029
|
369 |
|
2408c5503
|
370 |
if (unlikely(ZERO_OR_NULL_PTR(block))) |
10cef6029
|
371 |
return; |
95b35127f
|
372 |
BUG_ON(!size); |
10cef6029
|
373 |
|
b5568280c
|
374 |
sp = virt_to_page(block); |
95b35127f
|
375 |
units = SLOB_UNITS(size); |
10cef6029
|
376 |
|
10cef6029
|
377 |
spin_lock_irqsave(&slob_lock, flags); |
10cef6029
|
378 |
|
95b35127f
|
379 380 381 382 |
if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) { /* Go directly to page allocator. Do not pass slob allocator */ if (slob_page_free(sp)) clear_slob_page_free(sp); |
6fb8f4243
|
383 |
spin_unlock_irqrestore(&slob_lock, flags); |
b5568280c
|
384 |
__ClearPageSlab(sp); |
22b751c3d
|
385 |
page_mapcount_reset(sp); |
1f0532eb6
|
386 |
slob_free_pages(b, 0); |
6fb8f4243
|
387 |
return; |
95b35127f
|
388 |
} |
10cef6029
|
389 |
|
95b35127f
|
390 391 392 |
if (!slob_page_free(sp)) { /* This slob page is about to become partially free. Easy! */ sp->units = units; |
b8c24c4ae
|
393 |
sp->freelist = b; |
95b35127f
|
394 395 396 |
set_slob(b, units, (void *)((unsigned long)(b + SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK)); |
d602dabae
|
397 398 399 400 401 402 403 |
if (size < SLOB_BREAK1) slob_list = &free_slob_small; else if (size < SLOB_BREAK2) slob_list = &free_slob_medium; else slob_list = &free_slob_large; set_slob_page_free(sp, slob_list); |
95b35127f
|
404 405 406 407 408 409 410 411 |
goto out; } /* * Otherwise the page is already partially free, so find reinsertion * point. */ sp->units += units; |
10cef6029
|
412 |
|
b8c24c4ae
|
413 414 415 416 |
if (b < (slob_t *)sp->freelist) { if (b + units == sp->freelist) { units += slob_units(sp->freelist); sp->freelist = slob_next(sp->freelist); |
679299b32
|
417 |
} |
b8c24c4ae
|
418 419 |
set_slob(b, units, sp->freelist); sp->freelist = b; |
95b35127f
|
420 |
} else { |
b8c24c4ae
|
421 |
prev = sp->freelist; |
95b35127f
|
422 423 424 425 426 |
next = slob_next(prev); while (b > next) { prev = next; next = slob_next(prev); } |
10cef6029
|
427 |
|
95b35127f
|
428 429 430 431 432 433 434 435 436 437 438 439 440 |
if (!slob_last(prev) && b + units == next) { units += slob_units(next); set_slob(b, units, slob_next(next)); } else set_slob(b, units, next); if (prev + slob_units(prev) == b) { units = slob_units(b) + slob_units(prev); set_slob(prev, units, slob_next(b)); } else set_slob(prev, slob_units(prev), b); } out: |
10cef6029
|
441 442 |
spin_unlock_irqrestore(&slob_lock, flags); } |
95b35127f
|
443 444 445 |
/* * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. */ |
f3f741019
|
446 447 |
static __always_inline void * __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) |
10cef6029
|
448 |
{ |
6cb8f9132
|
449 |
unsigned int *m; |
59bb47985
|
450 |
int minalign = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
3eae2cb24
|
451 |
void *ret; |
553948491
|
452 |
|
bd50cfa89
|
453 |
gfp &= gfp_allowed_mask; |
d92a8cfcb
|
454 455 |
fs_reclaim_acquire(gfp); fs_reclaim_release(gfp); |
cf40bd16f
|
456 |
|
59bb47985
|
457 458 459 460 461 462 463 464 465 |
if (size < PAGE_SIZE - minalign) { int align = minalign; /* * For power of two sizes, guarantee natural alignment for * kmalloc()'d objects. */ if (is_power_of_2(size)) align = max(minalign, (int) size); |
6cb8f9132
|
466 467 |
if (!size) return ZERO_SIZE_PTR; |
59bb47985
|
468 |
m = slob_alloc(size + minalign, gfp, align, node, minalign); |
3eae2cb24
|
469 |
|
239f49c08
|
470 471 472 |
if (!m) return NULL; *m = size; |
59bb47985
|
473 |
ret = (void *)m + minalign; |
3eae2cb24
|
474 |
|
f3f741019
|
475 |
trace_kmalloc_node(caller, ret, |
59bb47985
|
476 |
size, size + minalign, gfp, node); |
d87a133fc
|
477 |
} else { |
3eae2cb24
|
478 |
unsigned int order = get_order(size); |
d87a133fc
|
479 |
|
8df275af8
|
480 481 482 |
if (likely(order)) gfp |= __GFP_COMP; ret = slob_new_pages(gfp, order, node); |
3eae2cb24
|
483 |
|
f3f741019
|
484 |
trace_kmalloc_node(caller, ret, |
ca2b84cb3
|
485 |
size, PAGE_SIZE << order, gfp, node); |
10cef6029
|
486 |
} |
3eae2cb24
|
487 |
|
4374e616d
|
488 |
kmemleak_alloc(ret, size, 1, gfp); |
3eae2cb24
|
489 |
return ret; |
10cef6029
|
490 |
} |
f3f741019
|
491 |
|
f1b6eb6e6
|
492 |
void *__kmalloc(size_t size, gfp_t gfp) |
f3f741019
|
493 |
{ |
f1b6eb6e6
|
494 |
return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_); |
f3f741019
|
495 |
} |
f1b6eb6e6
|
496 |
EXPORT_SYMBOL(__kmalloc); |
10cef6029
|
497 |
|
f3f741019
|
498 499 500 501 502 503 |
void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) { return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller); } #ifdef CONFIG_NUMA |
82bd5508b
|
504 |
void *__kmalloc_node_track_caller(size_t size, gfp_t gfp, |
f3f741019
|
505 506 507 508 509 |
int node, unsigned long caller) { return __do_kmalloc_node(size, gfp, node, caller); } #endif |
f3f741019
|
510 |
|
10cef6029
|
511 512 |
void kfree(const void *block) { |
b8c24c4ae
|
513 |
struct page *sp; |
10cef6029
|
514 |
|
2121db74b
|
515 |
trace_kfree(_RET_IP_, block); |
2408c5503
|
516 |
if (unlikely(ZERO_OR_NULL_PTR(block))) |
10cef6029
|
517 |
return; |
4374e616d
|
518 |
kmemleak_free(block); |
10cef6029
|
519 |
|
b5568280c
|
520 521 |
sp = virt_to_page(block); if (PageSlab(sp)) { |
789306e5a
|
522 |
int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
553948491
|
523 524 |
unsigned int *m = (unsigned int *)(block - align); slob_free(m, *m + align); |
6a486c0ad
|
525 526 527 528 529 530 531 |
} else { unsigned int order = compound_order(sp); mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE, -(1 << order)); __free_pages(sp, order); } |
10cef6029
|
532 |
} |
10cef6029
|
533 |
EXPORT_SYMBOL(kfree); |
d87a133fc
|
534 |
/* can't use ksize for kmem_cache_alloc memory, only kmalloc */ |
10d1f8cb3
|
535 |
size_t __ksize(const void *block) |
10cef6029
|
536 |
{ |
b8c24c4ae
|
537 |
struct page *sp; |
999d8795d
|
538 539 |
int align; unsigned int *m; |
10cef6029
|
540 |
|
ef8b4520b
|
541 542 |
BUG_ON(!block); if (unlikely(block == ZERO_SIZE_PTR)) |
10cef6029
|
543 |
return 0; |
b5568280c
|
544 |
sp = virt_to_page(block); |
999d8795d
|
545 |
if (unlikely(!PageSlab(sp))) |
a50b854e0
|
546 |
return page_size(sp); |
999d8795d
|
547 |
|
789306e5a
|
548 |
align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
999d8795d
|
549 550 |
m = (unsigned int *)(block - align); return SLOB_UNITS(*m) * SLOB_UNIT; |
10cef6029
|
551 |
} |
10d1f8cb3
|
552 |
EXPORT_SYMBOL(__ksize); |
10cef6029
|
553 |
|
d50112edd
|
554 |
int __kmem_cache_create(struct kmem_cache *c, slab_flags_t flags) |
10cef6029
|
555 |
{ |
5f0d5a3ae
|
556 |
if (flags & SLAB_TYPESAFE_BY_RCU) { |
278b1bb13
|
557 558 |
/* leave room for rcu footer at the end of object */ c->size += sizeof(struct slob_rcu); |
039363f38
|
559 |
} |
278b1bb13
|
560 |
c->flags = flags; |
278b1bb13
|
561 |
return 0; |
10cef6029
|
562 |
} |
10cef6029
|
563 |
|
c21a6daf4
|
564 |
static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node) |
10cef6029
|
565 566 |
{ void *b; |
bd50cfa89
|
567 |
flags &= gfp_allowed_mask; |
d92a8cfcb
|
568 569 |
fs_reclaim_acquire(flags); fs_reclaim_release(flags); |
bd50cfa89
|
570 |
|
3eae2cb24
|
571 |
if (c->size < PAGE_SIZE) { |
59bb47985
|
572 |
b = slob_alloc(c->size, flags, c->align, node, 0); |
fe74fe2bf
|
573 |
trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, |
ca2b84cb3
|
574 575 |
SLOB_UNITS(c->size) * SLOB_UNIT, flags, node); |
3eae2cb24
|
576 |
} else { |
6e9ed0cc4
|
577 |
b = slob_new_pages(flags, get_order(c->size), node); |
fe74fe2bf
|
578 |
trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size, |
ca2b84cb3
|
579 580 |
PAGE_SIZE << get_order(c->size), flags, node); |
3eae2cb24
|
581 |
} |
10cef6029
|
582 |
|
128227e7f
|
583 584 |
if (b && c->ctor) { WARN_ON_ONCE(flags & __GFP_ZERO); |
51cc50685
|
585 |
c->ctor(b); |
128227e7f
|
586 |
} |
10cef6029
|
587 |
|
4374e616d
|
588 |
kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); |
10cef6029
|
589 590 |
return b; } |
f1b6eb6e6
|
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 |
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) { return slob_alloc_node(cachep, flags, NUMA_NO_NODE); } EXPORT_SYMBOL(kmem_cache_alloc); #ifdef CONFIG_NUMA void *__kmalloc_node(size_t size, gfp_t gfp, int node) { return __do_kmalloc_node(size, gfp, node, _RET_IP_); } EXPORT_SYMBOL(__kmalloc_node); void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node) { return slob_alloc_node(cachep, gfp, node); } |
6193a2ff1
|
609 |
EXPORT_SYMBOL(kmem_cache_alloc_node); |
f1b6eb6e6
|
610 |
#endif |
10cef6029
|
611 |
|
afc0cedbe
|
612 |
static void __kmem_cache_free(void *b, int size) |
10cef6029
|
613 |
{ |
afc0cedbe
|
614 615 |
if (size < PAGE_SIZE) slob_free(b, size); |
10cef6029
|
616 |
else |
6e9ed0cc4
|
617 |
slob_free_pages(b, get_order(size)); |
afc0cedbe
|
618 619 620 621 622 623 624 625 626 627 628 629 |
} static void kmem_rcu_free(struct rcu_head *head) { struct slob_rcu *slob_rcu = (struct slob_rcu *)head; void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu)); __kmem_cache_free(b, slob_rcu->size); } void kmem_cache_free(struct kmem_cache *c, void *b) { |
4374e616d
|
630 |
kmemleak_free_recursive(b, c->flags); |
5f0d5a3ae
|
631 |
if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) { |
afc0cedbe
|
632 633 |
struct slob_rcu *slob_rcu; slob_rcu = b + (c->size - sizeof(struct slob_rcu)); |
afc0cedbe
|
634 635 636 |
slob_rcu->size = c->size; call_rcu(&slob_rcu->head, kmem_rcu_free); } else { |
afc0cedbe
|
637 638 |
__kmem_cache_free(b, c->size); } |
3eae2cb24
|
639 |
|
ca2b84cb3
|
640 |
trace_kmem_cache_free(_RET_IP_, b); |
10cef6029
|
641 642 |
} EXPORT_SYMBOL(kmem_cache_free); |
484748f0b
|
643 644 645 646 647 |
void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) { __kmem_cache_free_bulk(s, size, p); } EXPORT_SYMBOL(kmem_cache_free_bulk); |
865762a81
|
648 |
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, |
484748f0b
|
649 650 651 652 653 |
void **p) { return __kmem_cache_alloc_bulk(s, flags, size, p); } EXPORT_SYMBOL(kmem_cache_alloc_bulk); |
945cf2b61
|
654 655 656 657 658 |
int __kmem_cache_shutdown(struct kmem_cache *c) { /* No way to check for remaining objects */ return 0; } |
52b4b950b
|
659 660 661 |
void __kmem_cache_release(struct kmem_cache *c) { } |
89e364db7
|
662 |
int __kmem_cache_shrink(struct kmem_cache *d) |
2e892f43c
|
663 664 665 |
{ return 0; } |
2e892f43c
|
666 |
|
9b030cb86
|
667 668 669 670 671 672 |
struct kmem_cache kmem_cache_boot = { .name = "kmem_cache", .size = sizeof(struct kmem_cache), .flags = SLAB_PANIC, .align = ARCH_KMALLOC_MINALIGN, }; |
bcb4ddb46
|
673 674 |
void __init kmem_cache_init(void) { |
9b030cb86
|
675 |
kmem_cache = &kmem_cache_boot; |
97d066091
|
676 |
slab_state = UP; |
10cef6029
|
677 |
} |
bbff2e433
|
678 679 680 |
void __init kmem_cache_init_late(void) { |
97d066091
|
681 |
slab_state = FULL; |
bbff2e433
|
682 |
} |