Blame view
mm/slub.c
128 KB
81819f0fc SLUB core |
1 2 3 4 |
/* * SLUB: A slab allocator that limits cache line use instead of queuing * objects in per cpu and per node lists. * |
881db7fb0 slub: Invert lock... |
5 6 |
* The allocator synchronizes using per slab locks or atomic operatios * and only uses a centralized lock to manage a pool of partial slabs. |
81819f0fc SLUB core |
7 |
* |
cde535359 Christoph has moved |
8 |
* (C) 2007 SGI, Christoph Lameter |
881db7fb0 slub: Invert lock... |
9 |
* (C) 2011 Linux Foundation, Christoph Lameter |
81819f0fc SLUB core |
10 11 12 |
*/ #include <linux/mm.h> |
1eb5ac646 mm: SLUB fix recl... |
13 |
#include <linux/swap.h> /* struct reclaim_state */ |
81819f0fc SLUB core |
14 15 16 17 18 |
#include <linux/module.h> #include <linux/bit_spinlock.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/slab.h> |
7b3c3a50a proc: move /proc/... |
19 |
#include <linux/proc_fs.h> |
81819f0fc SLUB core |
20 |
#include <linux/seq_file.h> |
5a896d9e7 slub: add hooks f... |
21 |
#include <linux/kmemcheck.h> |
81819f0fc SLUB core |
22 23 24 25 |
#include <linux/cpu.h> #include <linux/cpuset.h> #include <linux/mempolicy.h> #include <linux/ctype.h> |
3ac7fe5a4 infrastructure to... |
26 |
#include <linux/debugobjects.h> |
81819f0fc SLUB core |
27 |
#include <linux/kallsyms.h> |
b9049e234 memory hotplug: m... |
28 |
#include <linux/memory.h> |
f8bd2258e remove div_long_l... |
29 |
#include <linux/math64.h> |
773ff60e8 SLUB: failslab su... |
30 |
#include <linux/fault-inject.h> |
bfa71457a SLUB: Fix missing... |
31 |
#include <linux/stacktrace.h> |
81819f0fc SLUB core |
32 |
|
4a92379bd slub tracing: mov... |
33 |
#include <trace/events/kmem.h> |
81819f0fc SLUB core |
34 35 |
/* * Lock order: |
881db7fb0 slub: Invert lock... |
36 37 38 |
* 1. slub_lock (Global Semaphore) * 2. node->list_lock * 3. slab_lock(page) (Only on some arches and for debugging) |
81819f0fc SLUB core |
39 |
* |
881db7fb0 slub: Invert lock... |
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
* slub_lock * * The role of the slub_lock is to protect the list of all the slabs * and to synchronize major metadata changes to slab cache structures. * * The slab_lock is only used for debugging and on arches that do not * have the ability to do a cmpxchg_double. It only protects the second * double word in the page struct. Meaning * A. page->freelist -> List of object free in a page * B. page->counters -> Counters of objects * C. page->frozen -> frozen state * * If a slab is frozen then it is exempt from list management. It is not * on any list. The processor that froze the slab is the one who can * perform list operations on the page. Other processors may put objects * onto the freelist but the processor that froze the slab is the only * one that can retrieve the objects from the page's freelist. |
81819f0fc SLUB core |
57 58 59 60 61 62 63 64 65 66 67 68 |
* * The list_lock protects the partial and full list on each node and * the partial slab counter. If taken then no new slabs may be added or * removed from the lists nor make the number of partial slabs be modified. * (Note that the total number of slabs is an atomic value that may be * modified without taking the list lock). * * The list_lock is a centralized lock and thus we avoid taking it as * much as possible. As long as SLUB does not have to handle partial * slabs, operations can continue without any centralized lock. F.e. * allocating a long series of objects that fill up slabs does not require * the list lock. |
81819f0fc SLUB core |
69 70 71 72 73 74 75 76 |
* Interrupts are disabled during allocation and deallocation in order to * make the slab allocator safe to use in the context of an irq. In addition * interrupts are disabled to ensure that the processor does not change * while handling per_cpu slabs, due to kernel preemption. * * SLUB assigns one slab for allocation to each processor. * Allocations only occur from these slabs called cpu slabs. * |
672bba3a4 SLUB: update comm... |
77 78 |
* Slabs with free elements are kept on a partial list and during regular * operations no list for full slabs is used. If an object in a full slab is |
81819f0fc SLUB core |
79 |
* freed then the slab will show up again on the partial lists. |
672bba3a4 SLUB: update comm... |
80 81 |
* We track full slabs for debugging purposes though because otherwise we * cannot scan all objects. |
81819f0fc SLUB core |
82 83 84 85 86 87 88 |
* * Slabs are freed when they become empty. Teardown and setup is * minimal so we rely on the page allocators per cpu caches for * fast frees and allocs. * * Overloading of page flags that are otherwise used for LRU management. * |
4b6f07504 SLUB: Define func... |
89 90 91 92 93 94 95 96 97 98 99 100 |
* PageActive The slab is frozen and exempt from list processing. * This means that the slab is dedicated to a purpose * such as satisfying allocations for a specific * processor. Objects may be freed in the slab while * it is frozen but slab_free will then skip the usual * list operations. It is up to the processor holding * the slab to integrate the slab into the slab lists * when the slab is no longer needed. * * One use of this flag is to mark slabs that are * used for allocations. Then such a slab becomes a cpu * slab. The cpu slab may be equipped with an additional |
dfb4f0960 SLUB: Avoid page ... |
101 |
* freelist that allows lockless access to |
894b8788d slub: support con... |
102 103 |
* free objects in addition to the regular freelist * that requires the slab lock. |
81819f0fc SLUB core |
104 105 106 |
* * PageError Slab requires special handling due to debug * options set. This moves slab handling out of |
894b8788d slub: support con... |
107 |
* the fast path and disables lockless freelists. |
81819f0fc SLUB core |
108 |
*/ |
af537b0a6 slub: Use kmem_ca... |
109 110 111 112 113 |
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ SLAB_TRACE | SLAB_DEBUG_FREE) static inline int kmem_cache_debug(struct kmem_cache *s) { |
5577bd8a8 SLUB: Do our own ... |
114 |
#ifdef CONFIG_SLUB_DEBUG |
af537b0a6 slub: Use kmem_ca... |
115 |
return unlikely(s->flags & SLAB_DEBUG_FLAGS); |
5577bd8a8 SLUB: Do our own ... |
116 |
#else |
af537b0a6 slub: Use kmem_ca... |
117 |
return 0; |
5577bd8a8 SLUB: Do our own ... |
118 |
#endif |
af537b0a6 slub: Use kmem_ca... |
119 |
} |
5577bd8a8 SLUB: Do our own ... |
120 |
|
81819f0fc SLUB core |
121 122 123 |
/* * Issues still to be resolved: * |
81819f0fc SLUB core |
124 125 |
* - Support PAGE_ALLOC_DEBUG. Should be easy to do. * |
81819f0fc SLUB core |
126 127 128 129 130 |
* - Variable sizing of the per node arrays */ /* Enable to test recovery from slab corruption on boot */ #undef SLUB_RESILIENCY_TEST |
b789ef518 slub: Add cmpxchg... |
131 132 |
/* Enable to log cmpxchg failures */ #undef SLUB_DEBUG_CMPXCHG |
81819f0fc SLUB core |
133 |
/* |
2086d26a0 SLUB: Free slabs ... |
134 135 136 |
* Mininum number of partial slabs. These will be left on the partial * lists even if they are empty. kmem_cache_shrink may reclaim them. */ |
76be89500 SLUB: Improve hac... |
137 |
#define MIN_PARTIAL 5 |
e95eed571 SLUB: Add MIN_PAR... |
138 |
|
2086d26a0 SLUB: Free slabs ... |
139 140 141 142 143 144 |
/* * Maximum number of desirable partial slabs. * The existence of more partial slabs makes kmem_cache_shrink * sort the partial list by the number of objects in the. */ #define MAX_PARTIAL 10 |
81819f0fc SLUB core |
145 146 |
#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \ SLAB_POISON | SLAB_STORE_USER) |
672bba3a4 SLUB: update comm... |
147 |
|
81819f0fc SLUB core |
148 |
/* |
3de472138 slub: use size an... |
149 150 151 |
* Debugging flags that require metadata to be stored in the slab. These get * disabled when slub_debug=O is used and a cache's min order increases with * metadata. |
fa5ec8a1f slub: add option ... |
152 |
*/ |
3de472138 slub: use size an... |
153 |
#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) |
fa5ec8a1f slub: add option ... |
154 155 |
/* |
81819f0fc SLUB core |
156 157 158 |
* Set of flags that will prevent slab merging */ #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ |
4c13dd3b4 failslab: add abi... |
159 160 |
SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ SLAB_FAILSLAB) |
81819f0fc SLUB core |
161 162 |
#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ |
5a896d9e7 slub: add hooks f... |
163 |
SLAB_CACHE_DMA | SLAB_NOTRACK) |
81819f0fc SLUB core |
164 |
|
210b5c061 SLUB: cleanup - d... |
165 166 |
#define OO_SHIFT 16 #define OO_MASK ((1 << OO_SHIFT) - 1) |
50d5c41cd slub: Do not use ... |
167 |
#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */ |
210b5c061 SLUB: cleanup - d... |
168 |
|
81819f0fc SLUB core |
169 |
/* Internal SLUB flags */ |
f90ec3901 SLUB: Constants n... |
170 |
#define __OBJECT_POISON 0x80000000UL /* Poison object */ |
b789ef518 slub: Add cmpxchg... |
171 |
#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */ |
81819f0fc SLUB core |
172 173 174 175 176 177 178 179 180 |
static int kmem_size = sizeof(struct kmem_cache); #ifdef CONFIG_SMP static struct notifier_block slab_notifier; #endif static enum { DOWN, /* No slab functionality available */ |
51df11428 slub: Dynamically... |
181 |
PARTIAL, /* Kmem_cache_node works */ |
672bba3a4 SLUB: update comm... |
182 |
UP, /* Everything works but does not show up in sysfs */ |
81819f0fc SLUB core |
183 184 185 186 187 |
SYSFS /* Sysfs up */ } slab_state = DOWN; /* A list of all slab caches on the system */ static DECLARE_RWSEM(slub_lock); |
5af328a51 mm/slub.c: make c... |
188 |
static LIST_HEAD(slab_caches); |
81819f0fc SLUB core |
189 |
|
02cbc8744 SLUB: move tracki... |
190 191 192 |
/* * Tracking user of a slab. */ |
d6543e393 slub: Enable back... |
193 |
#define TRACK_ADDRS_COUNT 16 |
02cbc8744 SLUB: move tracki... |
194 |
struct track { |
ce71e27c6 SLUB: Replace __b... |
195 |
unsigned long addr; /* Called from address */ |
d6543e393 slub: Enable back... |
196 197 198 |
#ifdef CONFIG_STACKTRACE unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */ #endif |
02cbc8744 SLUB: move tracki... |
199 200 201 202 203 204 |
int cpu; /* Was running on cpu */ int pid; /* Pid context */ unsigned long when; /* When did the operation occur */ }; enum track_item { TRACK_ALLOC, TRACK_FREE }; |
ab4d5ed5e slub: Enable sysf... |
205 |
#ifdef CONFIG_SYSFS |
81819f0fc SLUB core |
206 207 208 |
static int sysfs_slab_add(struct kmem_cache *); static int sysfs_slab_alias(struct kmem_cache *, const char *); static void sysfs_slab_remove(struct kmem_cache *); |
8ff12cfc0 SLUB: Support for... |
209 |
|
81819f0fc SLUB core |
210 |
#else |
0c7100132 SLUB: add some mo... |
211 212 213 |
static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; } |
151c602f7 SLUB: Fix sysfs r... |
214 215 |
static inline void sysfs_slab_remove(struct kmem_cache *s) { |
84c1cf624 SLUB: Fix merged ... |
216 |
kfree(s->name); |
151c602f7 SLUB: Fix sysfs r... |
217 218 |
kfree(s); } |
8ff12cfc0 SLUB: Support for... |
219 |
|
81819f0fc SLUB core |
220 |
#endif |
4fdccdfbb slub: Add statist... |
221 |
static inline void stat(const struct kmem_cache *s, enum stat_item si) |
8ff12cfc0 SLUB: Support for... |
222 223 |
{ #ifdef CONFIG_SLUB_STATS |
84e554e68 SLUB: Make slub s... |
224 |
__this_cpu_inc(s->cpu_slab->stat[si]); |
8ff12cfc0 SLUB: Support for... |
225 226 |
#endif } |
81819f0fc SLUB core |
227 228 229 230 231 232 233 234 235 236 237 |
/******************************************************************** * Core slab cache functions *******************************************************************/ int slab_is_available(void) { return slab_state >= UP; } static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) { |
81819f0fc SLUB core |
238 |
return s->node[node]; |
81819f0fc SLUB core |
239 |
} |
6446faa2f slub: Fix up comm... |
240 |
/* Verify that a pointer has an address that is valid within a slab page */ |
02cbc8744 SLUB: move tracki... |
241 242 243 244 |
static inline int check_valid_pointer(struct kmem_cache *s, struct page *page, const void *object) { void *base; |
a973e9dd1 Revert "unique en... |
245 |
if (!object) |
02cbc8744 SLUB: move tracki... |
246 |
return 1; |
a973e9dd1 Revert "unique en... |
247 |
base = page_address(page); |
39b264641 slub: Store max n... |
248 |
if (object < base || object >= base + page->objects * s->size || |
02cbc8744 SLUB: move tracki... |
249 250 251 252 253 254 |
(object - base) % s->size) { return 0; } return 1; } |
7656c72b5 SLUB: add macros ... |
255 256 257 258 |
static inline void *get_freepointer(struct kmem_cache *s, void *object) { return *(void **)(object + s->offset); } |
1393d9a18 slub: Make CONFIG... |
259 260 261 262 263 264 265 266 267 268 269 |
static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) { void *p; #ifdef CONFIG_DEBUG_PAGEALLOC probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p)); #else p = get_freepointer(s, object); #endif return p; } |
7656c72b5 SLUB: add macros ... |
270 271 272 273 274 275 |
static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) { *(void **)(object + s->offset) = fp; } /* Loop over all objects in a slab */ |
224a88be4 slub: for_each_ob... |
276 277 |
#define for_each_object(__p, __s, __addr, __objects) \ for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\ |
7656c72b5 SLUB: add macros ... |
278 |
__p += (__s)->size) |
7656c72b5 SLUB: add macros ... |
279 280 281 282 283 |
/* Determine object index from a given position */ static inline int slab_index(void *p, struct kmem_cache *s, void *addr) { return (p - addr) / s->size; } |
d71f606f6 slub: fix ksize()... |
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 |
static inline size_t slab_ksize(const struct kmem_cache *s) { #ifdef CONFIG_SLUB_DEBUG /* * Debugging requires use of the padding between object * and whatever may come after it. */ if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) return s->objsize; #endif /* * If we have the need to store the freelist pointer * back there or track user information then we can * only use the space before that information. */ if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER)) return s->inuse; /* * Else we can use all the padding etc for the allocation */ return s->size; } |
ab9a0f196 slub: automatical... |
307 308 309 310 |
static inline int order_objects(int order, unsigned long size, int reserved) { return ((PAGE_SIZE << order) - reserved) / size; } |
834f3d119 slub: Add kmem_ca... |
311 |
static inline struct kmem_cache_order_objects oo_make(int order, |
ab9a0f196 slub: automatical... |
312 |
unsigned long size, int reserved) |
834f3d119 slub: Add kmem_ca... |
313 314 |
{ struct kmem_cache_order_objects x = { |
ab9a0f196 slub: automatical... |
315 |
(order << OO_SHIFT) + order_objects(order, size, reserved) |
834f3d119 slub: Add kmem_ca... |
316 317 318 319 320 321 322 |
}; return x; } static inline int oo_order(struct kmem_cache_order_objects x) { |
210b5c061 SLUB: cleanup - d... |
323 |
return x.x >> OO_SHIFT; |
834f3d119 slub: Add kmem_ca... |
324 325 326 327 |
} static inline int oo_objects(struct kmem_cache_order_objects x) { |
210b5c061 SLUB: cleanup - d... |
328 |
return x.x & OO_MASK; |
834f3d119 slub: Add kmem_ca... |
329 |
} |
881db7fb0 slub: Invert lock... |
330 331 332 333 334 335 336 337 338 339 340 341 |
/* * Per slab locking using the pagelock */ static __always_inline void slab_lock(struct page *page) { bit_spin_lock(PG_locked, &page->flags); } static __always_inline void slab_unlock(struct page *page) { __bit_spin_unlock(PG_locked, &page->flags); } |
1d07171c5 slub: disable int... |
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 |
/* Interrupts must be disabled (for the fallback code to work right) */ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, void *freelist_old, unsigned long counters_old, void *freelist_new, unsigned long counters_new, const char *n) { VM_BUG_ON(!irqs_disabled()); #ifdef CONFIG_CMPXCHG_DOUBLE if (s->flags & __CMPXCHG_DOUBLE) { if (cmpxchg_double(&page->freelist, freelist_old, counters_old, freelist_new, counters_new)) return 1; } else #endif { slab_lock(page); if (page->freelist == freelist_old && page->counters == counters_old) { page->freelist = freelist_new; page->counters = counters_new; slab_unlock(page); return 1; } slab_unlock(page); } cpu_relax(); stat(s, CMPXCHG_DOUBLE_FAIL); #ifdef SLUB_DEBUG_CMPXCHG printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name); #endif return 0; } |
b789ef518 slub: Add cmpxchg... |
377 378 379 380 381 382 383 384 385 386 387 388 389 390 |
static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, void *freelist_old, unsigned long counters_old, void *freelist_new, unsigned long counters_new, const char *n) { #ifdef CONFIG_CMPXCHG_DOUBLE if (s->flags & __CMPXCHG_DOUBLE) { if (cmpxchg_double(&page->freelist, freelist_old, counters_old, freelist_new, counters_new)) return 1; } else #endif { |
1d07171c5 slub: disable int... |
391 392 393 |
unsigned long flags; local_irq_save(flags); |
881db7fb0 slub: Invert lock... |
394 |
slab_lock(page); |
b789ef518 slub: Add cmpxchg... |
395 396 397 |
if (page->freelist == freelist_old && page->counters == counters_old) { page->freelist = freelist_new; page->counters = counters_new; |
881db7fb0 slub: Invert lock... |
398 |
slab_unlock(page); |
1d07171c5 slub: disable int... |
399 |
local_irq_restore(flags); |
b789ef518 slub: Add cmpxchg... |
400 401 |
return 1; } |
881db7fb0 slub: Invert lock... |
402 |
slab_unlock(page); |
1d07171c5 slub: disable int... |
403 |
local_irq_restore(flags); |
b789ef518 slub: Add cmpxchg... |
404 405 406 407 408 409 410 411 412 413 414 |
} cpu_relax(); stat(s, CMPXCHG_DOUBLE_FAIL); #ifdef SLUB_DEBUG_CMPXCHG printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name); #endif return 0; } |
41ecc55b8 SLUB: add CONFIG_... |
415 416 |
#ifdef CONFIG_SLUB_DEBUG /* |
5f80b13ae slub: get_map() f... |
417 418 |
* Determine a map of object in use on a page. * |
881db7fb0 slub: Invert lock... |
419 |
* Node listlock must be held to guarantee that the page does |
5f80b13ae slub: get_map() f... |
420 421 422 423 424 425 426 427 428 429 |
* not vanish from under us. */ static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map) { void *p; void *addr = page_address(page); for (p = page->freelist; p; p = get_freepointer(s, p)) set_bit(slab_index(p, s, addr), map); } |
41ecc55b8 SLUB: add CONFIG_... |
430 431 432 |
/* * Debug settings: */ |
f0630fff5 SLUB: support slu... |
433 434 435 |
#ifdef CONFIG_SLUB_DEBUG_ON static int slub_debug = DEBUG_DEFAULT_FLAGS; #else |
41ecc55b8 SLUB: add CONFIG_... |
436 |
static int slub_debug; |
f0630fff5 SLUB: support slu... |
437 |
#endif |
41ecc55b8 SLUB: add CONFIG_... |
438 439 |
static char *slub_debug_slabs; |
fa5ec8a1f slub: add option ... |
440 |
static int disable_higher_order_debug; |
41ecc55b8 SLUB: add CONFIG_... |
441 |
|
7656c72b5 SLUB: add macros ... |
442 |
/* |
81819f0fc SLUB core |
443 444 445 446 |
* Object debugging */ static void print_section(char *text, u8 *addr, unsigned int length) { |
ffc79d288 slub: use print_h... |
447 448 |
print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr, length, 1); |
81819f0fc SLUB core |
449 |
} |
81819f0fc SLUB core |
450 451 452 453 454 455 456 457 458 459 460 461 462 463 |
static struct track *get_track(struct kmem_cache *s, void *object, enum track_item alloc) { struct track *p; if (s->offset) p = object + s->offset + sizeof(void *); else p = object + s->inuse; return p + alloc; } static void set_track(struct kmem_cache *s, void *object, |
ce71e27c6 SLUB: Replace __b... |
464 |
enum track_item alloc, unsigned long addr) |
81819f0fc SLUB core |
465 |
{ |
1a00df4a2 slub: use get_tra... |
466 |
struct track *p = get_track(s, object, alloc); |
81819f0fc SLUB core |
467 |
|
81819f0fc SLUB core |
468 |
if (addr) { |
d6543e393 slub: Enable back... |
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 |
#ifdef CONFIG_STACKTRACE struct stack_trace trace; int i; trace.nr_entries = 0; trace.max_entries = TRACK_ADDRS_COUNT; trace.entries = p->addrs; trace.skip = 3; save_stack_trace(&trace); /* See rant in lockdep.c */ if (trace.nr_entries != 0 && trace.entries[trace.nr_entries - 1] == ULONG_MAX) trace.nr_entries--; for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++) p->addrs[i] = 0; #endif |
81819f0fc SLUB core |
487 488 |
p->addr = addr; p->cpu = smp_processor_id(); |
88e4ccf29 slub: current is ... |
489 |
p->pid = current->pid; |
81819f0fc SLUB core |
490 491 492 493 |
p->when = jiffies; } else memset(p, 0, sizeof(struct track)); } |
81819f0fc SLUB core |
494 495 |
static void init_tracking(struct kmem_cache *s, void *object) { |
249226847 SLUB: change erro... |
496 497 |
if (!(s->flags & SLAB_STORE_USER)) return; |
ce71e27c6 SLUB: Replace __b... |
498 499 |
set_track(s, object, TRACK_FREE, 0UL); set_track(s, object, TRACK_ALLOC, 0UL); |
81819f0fc SLUB core |
500 501 502 503 504 505 |
} static void print_track(const char *s, struct track *t) { if (!t->addr) return; |
7daf705f3 Start using the n... |
506 507 |
printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d ", |
ce71e27c6 SLUB: Replace __b... |
508 |
s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); |
d6543e393 slub: Enable back... |
509 510 511 512 513 514 515 516 517 518 519 |
#ifdef CONFIG_STACKTRACE { int i; for (i = 0; i < TRACK_ADDRS_COUNT; i++) if (t->addrs[i]) printk(KERN_ERR "\t%pS ", (void *)t->addrs[i]); else break; } #endif |
249226847 SLUB: change erro... |
520 521 522 523 524 525 526 527 528 529 530 531 532 |
} static void print_tracking(struct kmem_cache *s, void *object) { if (!(s->flags & SLAB_STORE_USER)) return; print_track("Allocated", get_track(s, object, TRACK_ALLOC)); print_track("Freed", get_track(s, object, TRACK_FREE)); } static void print_page_info(struct page *page) { |
39b264641 slub: Store max n... |
533 534 535 |
printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx ", page, page->objects, page->inuse, page->freelist, page->flags); |
249226847 SLUB: change erro... |
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 |
} static void slab_bug(struct kmem_cache *s, char *fmt, ...) { va_list args; char buf[100]; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); printk(KERN_ERR "========================================" "===================================== "); printk(KERN_ERR "BUG %s: %s ", s->name, buf); printk(KERN_ERR "----------------------------------------" "------------------------------------- "); |
81819f0fc SLUB core |
556 |
} |
249226847 SLUB: change erro... |
557 558 559 560 561 562 563 564 565 566 567 568 569 |
static void slab_fix(struct kmem_cache *s, char *fmt, ...) { va_list args; char buf[100]; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); printk(KERN_ERR "FIX %s: %s ", s->name, buf); } static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) |
81819f0fc SLUB core |
570 571 |
{ unsigned int off; /* Offset of last byte */ |
a973e9dd1 Revert "unique en... |
572 |
u8 *addr = page_address(page); |
249226847 SLUB: change erro... |
573 574 575 576 577 578 579 580 581 582 583 |
print_tracking(s, p); print_page_info(page); printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p ", p, p - addr, get_freepointer(s, p)); if (p > addr + 16) |
ffc79d288 slub: use print_h... |
584 |
print_section("Bytes b4 ", p - 16, 16); |
81819f0fc SLUB core |
585 |
|
ffc79d288 slub: use print_h... |
586 587 |
print_section("Object ", p, min_t(unsigned long, s->objsize, PAGE_SIZE)); |
81819f0fc SLUB core |
588 |
if (s->flags & SLAB_RED_ZONE) |
ffc79d288 slub: use print_h... |
589 |
print_section("Redzone ", p + s->objsize, |
81819f0fc SLUB core |
590 |
s->inuse - s->objsize); |
81819f0fc SLUB core |
591 592 593 594 |
if (s->offset) off = s->offset + sizeof(void *); else off = s->inuse; |
249226847 SLUB: change erro... |
595 |
if (s->flags & SLAB_STORE_USER) |
81819f0fc SLUB core |
596 |
off += 2 * sizeof(struct track); |
81819f0fc SLUB core |
597 598 599 |
if (off != s->size) /* Beginning of the filler is the free pointer */ |
ffc79d288 slub: use print_h... |
600 |
print_section("Padding ", p + off, s->size - off); |
249226847 SLUB: change erro... |
601 602 |
dump_stack(); |
81819f0fc SLUB core |
603 604 605 606 607 |
} static void object_err(struct kmem_cache *s, struct page *page, u8 *object, char *reason) { |
3dc506378 slab_err: Pass pa... |
608 |
slab_bug(s, "%s", reason); |
249226847 SLUB: change erro... |
609 |
print_trailer(s, page, object); |
81819f0fc SLUB core |
610 |
} |
249226847 SLUB: change erro... |
611 |
static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...) |
81819f0fc SLUB core |
612 613 614 |
{ va_list args; char buf[100]; |
249226847 SLUB: change erro... |
615 616 |
va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); |
81819f0fc SLUB core |
617 |
va_end(args); |
3dc506378 slab_err: Pass pa... |
618 |
slab_bug(s, "%s", buf); |
249226847 SLUB: change erro... |
619 |
print_page_info(page); |
81819f0fc SLUB core |
620 621 |
dump_stack(); } |
f7cb19336 SLUB: Pass active... |
622 |
static void init_object(struct kmem_cache *s, void *object, u8 val) |
81819f0fc SLUB core |
623 624 625 626 627 |
{ u8 *p = object; if (s->flags & __OBJECT_POISON) { memset(p, POISON_FREE, s->objsize - 1); |
064287807 SLUB: Fix coding ... |
628 |
p[s->objsize - 1] = POISON_END; |
81819f0fc SLUB core |
629 630 631 |
} if (s->flags & SLAB_RED_ZONE) |
f7cb19336 SLUB: Pass active... |
632 |
memset(p + s->objsize, val, s->inuse - s->objsize); |
81819f0fc SLUB core |
633 |
} |
249226847 SLUB: change erro... |
634 635 636 637 638 639 640 641 642 643 |
static void restore_bytes(struct kmem_cache *s, char *message, u8 data, void *from, void *to) { slab_fix(s, "Restoring 0x%p-0x%p=0x%x ", from, to - 1, data); memset(from, data, to - from); } static int check_bytes_and_report(struct kmem_cache *s, struct page *page, u8 *object, char *what, |
064287807 SLUB: Fix coding ... |
644 |
u8 *start, unsigned int value, unsigned int bytes) |
249226847 SLUB: change erro... |
645 646 647 |
{ u8 *fault; u8 *end; |
798248206 lib/string.c: int... |
648 |
fault = memchr_inv(start, value, bytes); |
249226847 SLUB: change erro... |
649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 |
if (!fault) return 1; end = start + bytes; while (end > fault && end[-1] == value) end--; slab_bug(s, "%s overwritten", what); printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x ", fault, end - 1, fault[0], value); print_trailer(s, page, object); restore_bytes(s, what, value, fault, end); return 0; |
81819f0fc SLUB core |
664 |
} |
81819f0fc SLUB core |
665 666 667 668 669 670 671 |
/* * Object layout: * * object address * Bytes of the object to be managed. * If the freepointer may overlay the object then the free * pointer is the first word of the object. |
672bba3a4 SLUB: update comm... |
672 |
* |
81819f0fc SLUB core |
673 674 675 676 677 |
* Poisoning uses 0x6b (POISON_FREE) and the last byte is * 0xa5 (POISON_END) * * object + s->objsize * Padding to reach word boundary. This is also used for Redzoning. |
672bba3a4 SLUB: update comm... |
678 679 680 |
* Padding is extended by another word if Redzoning is enabled and * objsize == inuse. * |
81819f0fc SLUB core |
681 682 683 684 |
* We fill with 0xbb (RED_INACTIVE) for inactive objects and with * 0xcc (RED_ACTIVE) for objects in use. * * object + s->inuse |
672bba3a4 SLUB: update comm... |
685 686 |
* Meta data starts here. * |
81819f0fc SLUB core |
687 688 |
* A. Free pointer (if we cannot overwrite object on free) * B. Tracking data for SLAB_STORE_USER |
672bba3a4 SLUB: update comm... |
689 |
* C. Padding to reach required alignment boundary or at mininum |
6446faa2f slub: Fix up comm... |
690 |
* one word if debugging is on to be able to detect writes |
672bba3a4 SLUB: update comm... |
691 692 693 |
* before the word boundary. * * Padding is done using 0x5a (POISON_INUSE) |
81819f0fc SLUB core |
694 695 |
* * object + s->size |
672bba3a4 SLUB: update comm... |
696 |
* Nothing is used beyond s->size. |
81819f0fc SLUB core |
697 |
* |
672bba3a4 SLUB: update comm... |
698 699 |
* If slabcaches are merged then the objsize and inuse boundaries are mostly * ignored. And therefore no slab options that rely on these boundaries |
81819f0fc SLUB core |
700 701 |
* may be used with merged slabcaches. */ |
81819f0fc SLUB core |
702 703 704 705 706 707 708 709 710 711 712 713 714 715 |
static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p) { unsigned long off = s->inuse; /* The end of info */ if (s->offset) /* Freepointer is placed after the object. */ off += sizeof(void *); if (s->flags & SLAB_STORE_USER) /* We also have user information there */ off += 2 * sizeof(struct track); if (s->size == off) return 1; |
249226847 SLUB: change erro... |
716 717 |
return check_bytes_and_report(s, page, p, "Object padding", p + off, POISON_INUSE, s->size - off); |
81819f0fc SLUB core |
718 |
} |
39b264641 slub: Store max n... |
719 |
/* Check the pad bytes at the end of a slab page */ |
81819f0fc SLUB core |
720 721 |
static int slab_pad_check(struct kmem_cache *s, struct page *page) { |
249226847 SLUB: change erro... |
722 723 724 725 726 |
u8 *start; u8 *fault; u8 *end; int length; int remainder; |
81819f0fc SLUB core |
727 728 729 |
if (!(s->flags & SLAB_POISON)) return 1; |
a973e9dd1 Revert "unique en... |
730 |
start = page_address(page); |
ab9a0f196 slub: automatical... |
731 |
length = (PAGE_SIZE << compound_order(page)) - s->reserved; |
39b264641 slub: Store max n... |
732 733 |
end = start + length; remainder = length % s->size; |
81819f0fc SLUB core |
734 735 |
if (!remainder) return 1; |
798248206 lib/string.c: int... |
736 |
fault = memchr_inv(end - remainder, POISON_INUSE, remainder); |
249226847 SLUB: change erro... |
737 738 739 740 741 742 |
if (!fault) return 1; while (end > fault && end[-1] == POISON_INUSE) end--; slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1); |
ffc79d288 slub: use print_h... |
743 |
print_section("Padding ", end - remainder, remainder); |
249226847 SLUB: change erro... |
744 |
|
8a3d271de slub: fix slab_pa... |
745 |
restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end); |
249226847 SLUB: change erro... |
746 |
return 0; |
81819f0fc SLUB core |
747 748 749 |
} static int check_object(struct kmem_cache *s, struct page *page, |
f7cb19336 SLUB: Pass active... |
750 |
void *object, u8 val) |
81819f0fc SLUB core |
751 752 753 754 755 |
{ u8 *p = object; u8 *endobject = object + s->objsize; if (s->flags & SLAB_RED_ZONE) { |
249226847 SLUB: change erro... |
756 |
if (!check_bytes_and_report(s, page, object, "Redzone", |
f7cb19336 SLUB: Pass active... |
757 |
endobject, val, s->inuse - s->objsize)) |
81819f0fc SLUB core |
758 |
return 0; |
81819f0fc SLUB core |
759 |
} else { |
3adbefee6 SLUB: fix checkpa... |
760 761 762 763 |
if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) { check_bytes_and_report(s, page, p, "Alignment padding", endobject, POISON_INUSE, s->inuse - s->objsize); } |
81819f0fc SLUB core |
764 765 766 |
} if (s->flags & SLAB_POISON) { |
f7cb19336 SLUB: Pass active... |
767 |
if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && |
249226847 SLUB: change erro... |
768 769 770 |
(!check_bytes_and_report(s, page, p, "Poison", p, POISON_FREE, s->objsize - 1) || !check_bytes_and_report(s, page, p, "Poison", |
064287807 SLUB: Fix coding ... |
771 |
p + s->objsize - 1, POISON_END, 1))) |
81819f0fc SLUB core |
772 |
return 0; |
81819f0fc SLUB core |
773 774 775 776 777 |
/* * check_pad_bytes cleans up on its own. */ check_pad_bytes(s, page, p); } |
f7cb19336 SLUB: Pass active... |
778 |
if (!s->offset && val == SLUB_RED_ACTIVE) |
81819f0fc SLUB core |
779 780 781 782 783 784 785 786 787 788 |
/* * Object and freepointer overlap. Cannot check * freepointer while object is allocated. */ return 1; /* Check free pointer validity */ if (!check_valid_pointer(s, page, get_freepointer(s, p))) { object_err(s, page, p, "Freepointer corrupt"); /* |
9f6c708e5 slub: Fix incorre... |
789 |
* No choice but to zap it and thus lose the remainder |
81819f0fc SLUB core |
790 |
* of the free objects in this slab. May cause |
672bba3a4 SLUB: update comm... |
791 |
* another error because the object count is now wrong. |
81819f0fc SLUB core |
792 |
*/ |
a973e9dd1 Revert "unique en... |
793 |
set_freepointer(s, p, NULL); |
81819f0fc SLUB core |
794 795 796 797 798 799 800 |
return 0; } return 1; } static int check_slab(struct kmem_cache *s, struct page *page) { |
39b264641 slub: Store max n... |
801 |
int maxobj; |
81819f0fc SLUB core |
802 803 804 |
VM_BUG_ON(!irqs_disabled()); if (!PageSlab(page)) { |
249226847 SLUB: change erro... |
805 |
slab_err(s, page, "Not a valid slab page"); |
81819f0fc SLUB core |
806 807 |
return 0; } |
39b264641 slub: Store max n... |
808 |
|
ab9a0f196 slub: automatical... |
809 |
maxobj = order_objects(compound_order(page), s->size, s->reserved); |
39b264641 slub: Store max n... |
810 811 812 813 814 815 |
if (page->objects > maxobj) { slab_err(s, page, "objects %u > max %u", s->name, page->objects, maxobj); return 0; } if (page->inuse > page->objects) { |
249226847 SLUB: change erro... |
816 |
slab_err(s, page, "inuse %u > max %u", |
39b264641 slub: Store max n... |
817 |
s->name, page->inuse, page->objects); |
81819f0fc SLUB core |
818 819 820 821 822 823 824 825 |
return 0; } /* Slab_pad_check fixes things up after itself */ slab_pad_check(s, page); return 1; } /* |
672bba3a4 SLUB: update comm... |
826 827 |
* Determine if a certain object on a page is on the freelist. Must hold the * slab lock to guarantee that the chains are in a consistent state. |
81819f0fc SLUB core |
828 829 830 831 |
*/ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) { int nr = 0; |
881db7fb0 slub: Invert lock... |
832 |
void *fp; |
81819f0fc SLUB core |
833 |
void *object = NULL; |
224a88be4 slub: for_each_ob... |
834 |
unsigned long max_objects; |
81819f0fc SLUB core |
835 |
|
881db7fb0 slub: Invert lock... |
836 |
fp = page->freelist; |
39b264641 slub: Store max n... |
837 |
while (fp && nr <= page->objects) { |
81819f0fc SLUB core |
838 839 840 841 842 843 |
if (fp == search) return 1; if (!check_valid_pointer(s, page, fp)) { if (object) { object_err(s, page, object, "Freechain corrupt"); |
a973e9dd1 Revert "unique en... |
844 |
set_freepointer(s, object, NULL); |
81819f0fc SLUB core |
845 846 |
break; } else { |
249226847 SLUB: change erro... |
847 |
slab_err(s, page, "Freepointer corrupt"); |
a973e9dd1 Revert "unique en... |
848 |
page->freelist = NULL; |
39b264641 slub: Store max n... |
849 |
page->inuse = page->objects; |
249226847 SLUB: change erro... |
850 |
slab_fix(s, "Freelist cleared"); |
81819f0fc SLUB core |
851 852 853 854 855 856 857 858 |
return 0; } break; } object = fp; fp = get_freepointer(s, object); nr++; } |
ab9a0f196 slub: automatical... |
859 |
max_objects = order_objects(compound_order(page), s->size, s->reserved); |
210b5c061 SLUB: cleanup - d... |
860 861 |
if (max_objects > MAX_OBJS_PER_PAGE) max_objects = MAX_OBJS_PER_PAGE; |
224a88be4 slub: for_each_ob... |
862 863 864 865 866 867 868 |
if (page->objects != max_objects) { slab_err(s, page, "Wrong number of objects. Found %d but " "should be %d", page->objects, max_objects); page->objects = max_objects; slab_fix(s, "Number of objects adjusted."); } |
39b264641 slub: Store max n... |
869 |
if (page->inuse != page->objects - nr) { |
70d71228a slub: remove obje... |
870 |
slab_err(s, page, "Wrong object count. Counter is %d but " |
39b264641 slub: Store max n... |
871 872 |
"counted were %d", page->inuse, page->objects - nr); page->inuse = page->objects - nr; |
249226847 SLUB: change erro... |
873 |
slab_fix(s, "Object count adjusted."); |
81819f0fc SLUB core |
874 875 876 |
} return search == NULL; } |
0121c619d slub: Whitespace ... |
877 878 |
static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) |
3ec097421 SLUB: Simplify de... |
879 880 881 882 883 884 885 886 887 888 |
{ if (s->flags & SLAB_TRACE) { printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p ", s->name, alloc ? "alloc" : "free", object, page->inuse, page->freelist); if (!alloc) |
ffc79d288 slub: use print_h... |
889 |
print_section("Object ", (void *)object, s->objsize); |
3ec097421 SLUB: Simplify de... |
890 891 892 893 |
dump_stack(); } } |
643b11384 slub: enable trac... |
894 |
/* |
c016b0bde slub: Extract hoo... |
895 896 897 898 899 |
* Hooks for other subsystems that check memory allocations. In a typical * production configuration these hooks all should produce no code at all. */ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) { |
c1d508365 slub: Move gfpfla... |
900 |
flags &= gfp_allowed_mask; |
c016b0bde slub: Extract hoo... |
901 902 903 904 905 906 907 908 |
lockdep_trace_alloc(flags); might_sleep_if(flags & __GFP_WAIT); return should_failslab(s->objsize, flags, s->flags); } static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) { |
c1d508365 slub: Move gfpfla... |
909 |
flags &= gfp_allowed_mask; |
b3d41885d slub: fix kmemche... |
910 |
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); |
c016b0bde slub: Extract hoo... |
911 912 913 914 915 916 |
kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags); } static inline void slab_free_hook(struct kmem_cache *s, void *x) { kmemleak_free_recursive(x, s->flags); |
c016b0bde slub: Extract hoo... |
917 |
|
d3f661d69 slub: Get rid of ... |
918 919 920 921 922 923 924 925 926 927 928 929 |
/* * Trouble is that we may no longer disable interupts in the fast path * So in order to make the debug calls that expect irqs to be * disabled we need to disable interrupts temporarily. */ #if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP) { unsigned long flags; local_irq_save(flags); kmemcheck_slab_free(s, x, s->objsize); debug_check_no_locks_freed(x, s->objsize); |
d3f661d69 slub: Get rid of ... |
930 931 932 |
local_irq_restore(flags); } #endif |
f9b615de4 slub: Fix debugob... |
933 934 |
if (!(s->flags & SLAB_DEBUG_OBJECTS)) debug_check_no_obj_freed(x, s->objsize); |
c016b0bde slub: Extract hoo... |
935 936 937 |
} /* |
672bba3a4 SLUB: update comm... |
938 |
* Tracking of fully allocated slabs for debugging purposes. |
5cc6eee8a slub: explicit li... |
939 940 |
* * list_lock must be held. |
643b11384 slub: enable trac... |
941 |
*/ |
5cc6eee8a slub: explicit li... |
942 943 |
static void add_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) |
643b11384 slub: enable trac... |
944 |
{ |
5cc6eee8a slub: explicit li... |
945 946 |
if (!(s->flags & SLAB_STORE_USER)) return; |
643b11384 slub: enable trac... |
947 |
list_add(&page->lru, &n->full); |
643b11384 slub: enable trac... |
948 |
} |
5cc6eee8a slub: explicit li... |
949 950 951 |
/* * list_lock must be held. */ |
643b11384 slub: enable trac... |
952 953 |
static void remove_full(struct kmem_cache *s, struct page *page) { |
643b11384 slub: enable trac... |
954 955 |
if (!(s->flags & SLAB_STORE_USER)) return; |
643b11384 slub: enable trac... |
956 |
list_del(&page->lru); |
643b11384 slub: enable trac... |
957 |
} |
0f389ec63 slub: No need for... |
958 959 960 961 962 963 964 |
/* Tracking of the number of slabs for debugging purposes */ static inline unsigned long slabs_node(struct kmem_cache *s, int node) { struct kmem_cache_node *n = get_node(s, node); return atomic_long_read(&n->nr_slabs); } |
26c02cf05 SLUB: fix build w... |
965 966 967 968 |
static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) { return atomic_long_read(&n->nr_slabs); } |
205ab99dd slub: Update stat... |
969 |
static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) |
0f389ec63 slub: No need for... |
970 971 972 973 974 975 976 977 978 |
{ struct kmem_cache_node *n = get_node(s, node); /* * May be called early in order to allocate a slab for the * kmem_cache_node structure. Solve the chicken-egg * dilemma by deferring the increment of the count during * bootstrap (see early_kmem_cache_node_alloc). */ |
7340cc841 slub: reduce diff... |
979 |
if (n) { |
0f389ec63 slub: No need for... |
980 |
atomic_long_inc(&n->nr_slabs); |
205ab99dd slub: Update stat... |
981 982 |
atomic_long_add(objects, &n->total_objects); } |
0f389ec63 slub: No need for... |
983 |
} |
205ab99dd slub: Update stat... |
984 |
static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) |
0f389ec63 slub: No need for... |
985 986 987 988 |
{ struct kmem_cache_node *n = get_node(s, node); atomic_long_dec(&n->nr_slabs); |
205ab99dd slub: Update stat... |
989 |
atomic_long_sub(objects, &n->total_objects); |
0f389ec63 slub: No need for... |
990 991 992 |
} /* Object debug checks for alloc/free paths */ |
3ec097421 SLUB: Simplify de... |
993 994 995 996 997 |
static void setup_object_debug(struct kmem_cache *s, struct page *page, void *object) { if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) return; |
f7cb19336 SLUB: Pass active... |
998 |
init_object(s, object, SLUB_RED_INACTIVE); |
3ec097421 SLUB: Simplify de... |
999 1000 |
init_tracking(s, object); } |
1537066c6 slub: Force no in... |
1001 |
static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page, |
ce71e27c6 SLUB: Replace __b... |
1002 |
void *object, unsigned long addr) |
81819f0fc SLUB core |
1003 1004 1005 |
{ if (!check_slab(s, page)) goto bad; |
81819f0fc SLUB core |
1006 1007 |
if (!check_valid_pointer(s, page, object)) { object_err(s, page, object, "Freelist Pointer check fails"); |
70d71228a slub: remove obje... |
1008 |
goto bad; |
81819f0fc SLUB core |
1009 |
} |
f7cb19336 SLUB: Pass active... |
1010 |
if (!check_object(s, page, object, SLUB_RED_INACTIVE)) |
81819f0fc SLUB core |
1011 |
goto bad; |
81819f0fc SLUB core |
1012 |
|
3ec097421 SLUB: Simplify de... |
1013 1014 1015 1016 |
/* Success perform special debug activities for allocs */ if (s->flags & SLAB_STORE_USER) set_track(s, object, TRACK_ALLOC, addr); trace(s, page, object, 1); |
f7cb19336 SLUB: Pass active... |
1017 |
init_object(s, object, SLUB_RED_ACTIVE); |
81819f0fc SLUB core |
1018 |
return 1; |
3ec097421 SLUB: Simplify de... |
1019 |
|
81819f0fc SLUB core |
1020 1021 1022 1023 1024 |
bad: if (PageSlab(page)) { /* * If this is a slab page then lets do the best we can * to avoid issues in the future. Marking all objects |
672bba3a4 SLUB: update comm... |
1025 |
* as used avoids touching the remaining objects. |
81819f0fc SLUB core |
1026 |
*/ |
249226847 SLUB: change erro... |
1027 |
slab_fix(s, "Marking all objects used"); |
39b264641 slub: Store max n... |
1028 |
page->inuse = page->objects; |
a973e9dd1 Revert "unique en... |
1029 |
page->freelist = NULL; |
81819f0fc SLUB core |
1030 1031 1032 |
} return 0; } |
1537066c6 slub: Force no in... |
1033 1034 |
static noinline int free_debug_processing(struct kmem_cache *s, struct page *page, void *object, unsigned long addr) |
81819f0fc SLUB core |
1035 |
{ |
5c2e4bbbd slub: Disable int... |
1036 1037 1038 1039 |
unsigned long flags; int rc = 0; local_irq_save(flags); |
881db7fb0 slub: Invert lock... |
1040 |
slab_lock(page); |
81819f0fc SLUB core |
1041 1042 1043 1044 |
if (!check_slab(s, page)) goto fail; if (!check_valid_pointer(s, page, object)) { |
70d71228a slub: remove obje... |
1045 |
slab_err(s, page, "Invalid object pointer 0x%p", object); |
81819f0fc SLUB core |
1046 1047 1048 1049 |
goto fail; } if (on_freelist(s, page, object)) { |
249226847 SLUB: change erro... |
1050 |
object_err(s, page, object, "Object already free"); |
81819f0fc SLUB core |
1051 1052 |
goto fail; } |
f7cb19336 SLUB: Pass active... |
1053 |
if (!check_object(s, page, object, SLUB_RED_ACTIVE)) |
5c2e4bbbd slub: Disable int... |
1054 |
goto out; |
81819f0fc SLUB core |
1055 1056 |
if (unlikely(s != page->slab)) { |
3adbefee6 SLUB: fix checkpa... |
1057 |
if (!PageSlab(page)) { |
70d71228a slub: remove obje... |
1058 1059 |
slab_err(s, page, "Attempt to free object(0x%p) " "outside of slab", object); |
3adbefee6 SLUB: fix checkpa... |
1060 |
} else if (!page->slab) { |
81819f0fc SLUB core |
1061 |
printk(KERN_ERR |
70d71228a slub: remove obje... |
1062 1063 |
"SLUB <none>: no slab for object 0x%p. ", |
81819f0fc SLUB core |
1064 |
object); |
70d71228a slub: remove obje... |
1065 |
dump_stack(); |
064287807 SLUB: Fix coding ... |
1066 |
} else |
249226847 SLUB: change erro... |
1067 1068 |
object_err(s, page, object, "page slab pointer corrupt."); |
81819f0fc SLUB core |
1069 1070 |
goto fail; } |
3ec097421 SLUB: Simplify de... |
1071 |
|
3ec097421 SLUB: Simplify de... |
1072 1073 1074 |
if (s->flags & SLAB_STORE_USER) set_track(s, object, TRACK_FREE, addr); trace(s, page, object, 0); |
f7cb19336 SLUB: Pass active... |
1075 |
init_object(s, object, SLUB_RED_INACTIVE); |
5c2e4bbbd slub: Disable int... |
1076 1077 |
rc = 1; out: |
881db7fb0 slub: Invert lock... |
1078 |
slab_unlock(page); |
5c2e4bbbd slub: Disable int... |
1079 1080 |
local_irq_restore(flags); return rc; |
3ec097421 SLUB: Simplify de... |
1081 |
|
81819f0fc SLUB core |
1082 |
fail: |
249226847 SLUB: change erro... |
1083 |
slab_fix(s, "Object at 0x%p not freed", object); |
5c2e4bbbd slub: Disable int... |
1084 |
goto out; |
81819f0fc SLUB core |
1085 |
} |
41ecc55b8 SLUB: add CONFIG_... |
1086 1087 |
static int __init setup_slub_debug(char *str) { |
f0630fff5 SLUB: support slu... |
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 |
slub_debug = DEBUG_DEFAULT_FLAGS; if (*str++ != '=' || !*str) /* * No options specified. Switch on full debugging. */ goto out; if (*str == ',') /* * No options but restriction on slabs. This means full * debugging for slabs matching a pattern. */ goto check_slabs; |
fa5ec8a1f slub: add option ... |
1101 1102 1103 1104 1105 1106 1107 1108 |
if (tolower(*str) == 'o') { /* * Avoid enabling debugging on caches if its minimum order * would increase as a result. */ disable_higher_order_debug = 1; goto out; } |
f0630fff5 SLUB: support slu... |
1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 |
slub_debug = 0; if (*str == '-') /* * Switch off all debugging measures. */ goto out; /* * Determine which debug features should be switched on */ |
064287807 SLUB: Fix coding ... |
1119 |
for (; *str && *str != ','; str++) { |
f0630fff5 SLUB: support slu... |
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 |
switch (tolower(*str)) { case 'f': slub_debug |= SLAB_DEBUG_FREE; break; case 'z': slub_debug |= SLAB_RED_ZONE; break; case 'p': slub_debug |= SLAB_POISON; break; case 'u': slub_debug |= SLAB_STORE_USER; break; case 't': slub_debug |= SLAB_TRACE; break; |
4c13dd3b4 failslab: add abi... |
1136 1137 1138 |
case 'a': slub_debug |= SLAB_FAILSLAB; break; |
f0630fff5 SLUB: support slu... |
1139 1140 |
default: printk(KERN_ERR "slub_debug option '%c' " |
064287807 SLUB: Fix coding ... |
1141 1142 |
"unknown. skipped ", *str); |
f0630fff5 SLUB: support slu... |
1143 |
} |
41ecc55b8 SLUB: add CONFIG_... |
1144 |
} |
f0630fff5 SLUB: support slu... |
1145 |
check_slabs: |
41ecc55b8 SLUB: add CONFIG_... |
1146 1147 |
if (*str == ',') slub_debug_slabs = str + 1; |
f0630fff5 SLUB: support slu... |
1148 |
out: |
41ecc55b8 SLUB: add CONFIG_... |
1149 1150 1151 1152 |
return 1; } __setup("slub_debug", setup_slub_debug); |
ba0268a8b SLUB: accurately ... |
1153 1154 |
static unsigned long kmem_cache_flags(unsigned long objsize, unsigned long flags, const char *name, |
51cc50685 SL*B: drop kmem c... |
1155 |
void (*ctor)(void *)) |
41ecc55b8 SLUB: add CONFIG_... |
1156 1157 |
{ /* |
e153362a5 slub: Remove objs... |
1158 |
* Enable debugging if selected on the kernel commandline. |
41ecc55b8 SLUB: add CONFIG_... |
1159 |
*/ |
e153362a5 slub: Remove objs... |
1160 |
if (slub_debug && (!slub_debug_slabs || |
3de472138 slub: use size an... |
1161 1162 |
!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))) flags |= slub_debug; |
ba0268a8b SLUB: accurately ... |
1163 1164 |
return flags; |
41ecc55b8 SLUB: add CONFIG_... |
1165 1166 |
} #else |
3ec097421 SLUB: Simplify de... |
1167 1168 |
static inline void setup_object_debug(struct kmem_cache *s, struct page *page, void *object) {} |
41ecc55b8 SLUB: add CONFIG_... |
1169 |
|
3ec097421 SLUB: Simplify de... |
1170 |
static inline int alloc_debug_processing(struct kmem_cache *s, |
ce71e27c6 SLUB: Replace __b... |
1171 |
struct page *page, void *object, unsigned long addr) { return 0; } |
41ecc55b8 SLUB: add CONFIG_... |
1172 |
|
3ec097421 SLUB: Simplify de... |
1173 |
static inline int free_debug_processing(struct kmem_cache *s, |
ce71e27c6 SLUB: Replace __b... |
1174 |
struct page *page, void *object, unsigned long addr) { return 0; } |
41ecc55b8 SLUB: add CONFIG_... |
1175 |
|
41ecc55b8 SLUB: add CONFIG_... |
1176 1177 1178 |
static inline int slab_pad_check(struct kmem_cache *s, struct page *page) { return 1; } static inline int check_object(struct kmem_cache *s, struct page *page, |
f7cb19336 SLUB: Pass active... |
1179 |
void *object, u8 val) { return 1; } |
5cc6eee8a slub: explicit li... |
1180 1181 |
static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) {} |
2cfb7455d slub: Rework allo... |
1182 |
static inline void remove_full(struct kmem_cache *s, struct page *page) {} |
ba0268a8b SLUB: accurately ... |
1183 1184 |
static inline unsigned long kmem_cache_flags(unsigned long objsize, unsigned long flags, const char *name, |
51cc50685 SL*B: drop kmem c... |
1185 |
void (*ctor)(void *)) |
ba0268a8b SLUB: accurately ... |
1186 1187 1188 |
{ return flags; } |
41ecc55b8 SLUB: add CONFIG_... |
1189 |
#define slub_debug 0 |
0f389ec63 slub: No need for... |
1190 |
|
fdaa45e95 slub: Fix build e... |
1191 |
#define disable_higher_order_debug 0 |
0f389ec63 slub: No need for... |
1192 1193 |
static inline unsigned long slabs_node(struct kmem_cache *s, int node) { return 0; } |
26c02cf05 SLUB: fix build w... |
1194 1195 |
static inline unsigned long node_nr_slabs(struct kmem_cache_node *n) { return 0; } |
205ab99dd slub: Update stat... |
1196 1197 1198 1199 |
static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) {} static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) {} |
7d550c56a slub: Add dummy f... |
1200 1201 1202 1203 1204 1205 1206 1207 |
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) { return 0; } static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) {} static inline void slab_free_hook(struct kmem_cache *s, void *x) {} |
ab4d5ed5e slub: Enable sysf... |
1208 |
#endif /* CONFIG_SLUB_DEBUG */ |
205ab99dd slub: Update stat... |
1209 |
|
81819f0fc SLUB core |
1210 1211 1212 |
/* * Slab allocation and freeing */ |
65c3376aa slub: Fallback to... |
1213 1214 1215 1216 |
static inline struct page *alloc_slab_page(gfp_t flags, int node, struct kmem_cache_order_objects oo) { int order = oo_order(oo); |
b1eeab676 kmemcheck: add ho... |
1217 |
flags |= __GFP_NOTRACK; |
2154a3363 slub: Use a const... |
1218 |
if (node == NUMA_NO_NODE) |
65c3376aa slub: Fallback to... |
1219 1220 |
return alloc_pages(flags, order); else |
6b65aaf30 slub: Use alloc_p... |
1221 |
return alloc_pages_exact_node(node, flags, order); |
65c3376aa slub: Fallback to... |
1222 |
} |
81819f0fc SLUB core |
1223 1224 |
static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) { |
064287807 SLUB: Fix coding ... |
1225 |
struct page *page; |
834f3d119 slub: Add kmem_ca... |
1226 |
struct kmem_cache_order_objects oo = s->oo; |
ba52270d1 SLUB: Don't pass ... |
1227 |
gfp_t alloc_gfp; |
81819f0fc SLUB core |
1228 |
|
7e0528dad slub: Push irq di... |
1229 1230 1231 1232 |
flags &= gfp_allowed_mask; if (flags & __GFP_WAIT) local_irq_enable(); |
b7a49f0d4 slub: Determine g... |
1233 |
flags |= s->allocflags; |
e12ba74d8 Group short-lived... |
1234 |
|
ba52270d1 SLUB: Don't pass ... |
1235 1236 1237 1238 1239 1240 1241 |
/* * Let the initial higher-order allocation fail under memory pressure * so we fall-back to the minimum order allocation. */ alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL; page = alloc_slab_page(alloc_gfp, node, oo); |
65c3376aa slub: Fallback to... |
1242 1243 1244 1245 1246 1247 1248 |
if (unlikely(!page)) { oo = s->min; /* * Allocation may have failed due to fragmentation. * Try a lower order alloc if possible */ page = alloc_slab_page(flags, node, oo); |
81819f0fc SLUB core |
1249 |
|
7e0528dad slub: Push irq di... |
1250 1251 |
if (page) stat(s, ORDER_FALLBACK); |
65c3376aa slub: Fallback to... |
1252 |
} |
5a896d9e7 slub: add hooks f... |
1253 |
|
7e0528dad slub: Push irq di... |
1254 1255 1256 1257 1258 |
if (flags & __GFP_WAIT) local_irq_disable(); if (!page) return NULL; |
5a896d9e7 slub: add hooks f... |
1259 |
if (kmemcheck_enabled |
5086c389c SLUB: Fix some co... |
1260 |
&& !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) { |
b1eeab676 kmemcheck: add ho... |
1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 |
int pages = 1 << oo_order(oo); kmemcheck_alloc_shadow(page, oo_order(oo), flags, node); /* * Objects from caches that have a constructor don't get * cleared when they're allocated, so we need to do it here. */ if (s->ctor) kmemcheck_mark_uninitialized_pages(page, pages); else kmemcheck_mark_unallocated_pages(page, pages); |
5a896d9e7 slub: add hooks f... |
1273 |
} |
834f3d119 slub: Add kmem_ca... |
1274 |
page->objects = oo_objects(oo); |
81819f0fc SLUB core |
1275 1276 1277 |
mod_zone_page_state(page_zone(page), (s->flags & SLAB_RECLAIM_ACCOUNT) ? NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, |
65c3376aa slub: Fallback to... |
1278 |
1 << oo_order(oo)); |
81819f0fc SLUB core |
1279 1280 1281 1282 1283 1284 1285 |
return page; } static void setup_object(struct kmem_cache *s, struct page *page, void *object) { |
3ec097421 SLUB: Simplify de... |
1286 |
setup_object_debug(s, page, object); |
4f1049345 slab allocators: ... |
1287 |
if (unlikely(s->ctor)) |
51cc50685 SL*B: drop kmem c... |
1288 |
s->ctor(object); |
81819f0fc SLUB core |
1289 1290 1291 1292 1293 |
} static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) { struct page *page; |
81819f0fc SLUB core |
1294 |
void *start; |
81819f0fc SLUB core |
1295 1296 |
void *last; void *p; |
6cb062296 Categorize GFP flags |
1297 |
BUG_ON(flags & GFP_SLAB_BUG_MASK); |
81819f0fc SLUB core |
1298 |
|
6cb062296 Categorize GFP flags |
1299 1300 |
page = allocate_slab(s, flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); |
81819f0fc SLUB core |
1301 1302 |
if (!page) goto out; |
205ab99dd slub: Update stat... |
1303 |
inc_slabs_node(s, page_to_nid(page), page->objects); |
81819f0fc SLUB core |
1304 1305 |
page->slab = s; page->flags |= 1 << PG_slab; |
81819f0fc SLUB core |
1306 1307 |
start = page_address(page); |
81819f0fc SLUB core |
1308 1309 |
if (unlikely(s->flags & SLAB_POISON)) |
834f3d119 slub: Add kmem_ca... |
1310 |
memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page)); |
81819f0fc SLUB core |
1311 1312 |
last = start; |
224a88be4 slub: for_each_ob... |
1313 |
for_each_object(p, s, start, page->objects) { |
81819f0fc SLUB core |
1314 1315 1316 1317 1318 |
setup_object(s, page, last); set_freepointer(s, last, p); last = p; } setup_object(s, page, last); |
a973e9dd1 Revert "unique en... |
1319 |
set_freepointer(s, last, NULL); |
81819f0fc SLUB core |
1320 1321 |
page->freelist = start; |
e6e82ea11 slub: Prepare inu... |
1322 |
page->inuse = page->objects; |
8cb0a5068 slub: Move page->... |
1323 |
page->frozen = 1; |
81819f0fc SLUB core |
1324 |
out: |
81819f0fc SLUB core |
1325 1326 1327 1328 1329 |
return page; } static void __free_slab(struct kmem_cache *s, struct page *page) { |
834f3d119 slub: Add kmem_ca... |
1330 1331 |
int order = compound_order(page); int pages = 1 << order; |
81819f0fc SLUB core |
1332 |
|
af537b0a6 slub: Use kmem_ca... |
1333 |
if (kmem_cache_debug(s)) { |
81819f0fc SLUB core |
1334 1335 1336 |
void *p; slab_pad_check(s, page); |
224a88be4 slub: for_each_ob... |
1337 1338 |
for_each_object(p, s, page_address(page), page->objects) |
f7cb19336 SLUB: Pass active... |
1339 |
check_object(s, page, p, SLUB_RED_INACTIVE); |
81819f0fc SLUB core |
1340 |
} |
b1eeab676 kmemcheck: add ho... |
1341 |
kmemcheck_free_shadow(page, compound_order(page)); |
5a896d9e7 slub: add hooks f... |
1342 |
|
81819f0fc SLUB core |
1343 1344 1345 |
mod_zone_page_state(page_zone(page), (s->flags & SLAB_RECLAIM_ACCOUNT) ? NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, |
064287807 SLUB: Fix coding ... |
1346 |
-pages); |
81819f0fc SLUB core |
1347 |
|
49bd5221c slub: Move map/fl... |
1348 1349 |
__ClearPageSlab(page); reset_page_mapcount(page); |
1eb5ac646 mm: SLUB fix recl... |
1350 1351 |
if (current->reclaim_state) current->reclaim_state->reclaimed_slab += pages; |
834f3d119 slub: Add kmem_ca... |
1352 |
__free_pages(page, order); |
81819f0fc SLUB core |
1353 |
} |
da9a638c6 slub,rcu: don't a... |
1354 1355 |
#define need_reserve_slab_rcu \ (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) |
81819f0fc SLUB core |
1356 1357 1358 |
static void rcu_free_slab(struct rcu_head *h) { struct page *page; |
da9a638c6 slub,rcu: don't a... |
1359 1360 1361 1362 |
if (need_reserve_slab_rcu) page = virt_to_head_page(h); else page = container_of((struct list_head *)h, struct page, lru); |
81819f0fc SLUB core |
1363 1364 1365 1366 1367 1368 |
__free_slab(page->slab, page); } static void free_slab(struct kmem_cache *s, struct page *page) { if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { |
da9a638c6 slub,rcu: don't a... |
1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 |
struct rcu_head *head; if (need_reserve_slab_rcu) { int order = compound_order(page); int offset = (PAGE_SIZE << order) - s->reserved; VM_BUG_ON(s->reserved != sizeof(*head)); head = page_address(page) + offset; } else { /* * RCU free overloads the RCU head over the LRU */ head = (void *)&page->lru; } |
81819f0fc SLUB core |
1383 1384 1385 1386 1387 1388 1389 1390 |
call_rcu(head, rcu_free_slab); } else __free_slab(s, page); } static void discard_slab(struct kmem_cache *s, struct page *page) { |
205ab99dd slub: Update stat... |
1391 |
dec_slabs_node(s, page_to_nid(page), page->objects); |
81819f0fc SLUB core |
1392 1393 1394 1395 |
free_slab(s, page); } /* |
5cc6eee8a slub: explicit li... |
1396 1397 1398 |
* Management of partially allocated slabs. * * list_lock must be held. |
81819f0fc SLUB core |
1399 |
*/ |
5cc6eee8a slub: explicit li... |
1400 |
static inline void add_partial(struct kmem_cache_node *n, |
7c2e132c5 Add parameter to ... |
1401 |
struct page *page, int tail) |
81819f0fc SLUB core |
1402 |
{ |
e95eed571 SLUB: Add MIN_PAR... |
1403 |
n->nr_partial++; |
136333d10 slub: explicitly ... |
1404 |
if (tail == DEACTIVATE_TO_TAIL) |
7c2e132c5 Add parameter to ... |
1405 1406 1407 |
list_add_tail(&page->lru, &n->partial); else list_add(&page->lru, &n->partial); |
81819f0fc SLUB core |
1408 |
} |
5cc6eee8a slub: explicit li... |
1409 1410 1411 1412 |
/* * list_lock must be held. */ static inline void remove_partial(struct kmem_cache_node *n, |
62e346a83 slub: extract com... |
1413 1414 1415 1416 1417 |
struct page *page) { list_del(&page->lru); n->nr_partial--; } |
81819f0fc SLUB core |
1418 |
/* |
5cc6eee8a slub: explicit li... |
1419 1420 |
* Lock slab, remove from the partial list and put the object into the * per cpu freelist. |
81819f0fc SLUB core |
1421 |
* |
497b66f2e slub: return obje... |
1422 1423 |
* Returns a list of objects or NULL if it fails. * |
672bba3a4 SLUB: update comm... |
1424 |
* Must hold list_lock. |
81819f0fc SLUB core |
1425 |
*/ |
497b66f2e slub: return obje... |
1426 |
static inline void *acquire_slab(struct kmem_cache *s, |
acd19fd1a slub: pass kmem_c... |
1427 |
struct kmem_cache_node *n, struct page *page, |
49e225858 slub: per cpu cac... |
1428 |
int mode) |
81819f0fc SLUB core |
1429 |
{ |
2cfb7455d slub: Rework allo... |
1430 1431 1432 |
void *freelist; unsigned long counters; struct page new; |
2cfb7455d slub: Rework allo... |
1433 1434 1435 1436 1437 1438 1439 1440 1441 |
/* * Zap the freelist and set the frozen bit. * The old freelist is the list of objects for the * per cpu allocation list. */ do { freelist = page->freelist; counters = page->counters; new.counters = counters; |
49e225858 slub: per cpu cac... |
1442 1443 |
if (mode) new.inuse = page->objects; |
2cfb7455d slub: Rework allo... |
1444 1445 1446 |
VM_BUG_ON(new.frozen); new.frozen = 1; |
1d07171c5 slub: disable int... |
1447 |
} while (!__cmpxchg_double_slab(s, page, |
2cfb7455d slub: Rework allo... |
1448 1449 1450 1451 1452 |
freelist, counters, NULL, new.counters, "lock and freeze")); remove_partial(n, page); |
49e225858 slub: per cpu cac... |
1453 |
return freelist; |
81819f0fc SLUB core |
1454 |
} |
49e225858 slub: per cpu cac... |
1455 |
static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); |
81819f0fc SLUB core |
1456 |
/* |
672bba3a4 SLUB: update comm... |
1457 |
* Try to allocate a partial slab from a specific node. |
81819f0fc SLUB core |
1458 |
*/ |
497b66f2e slub: return obje... |
1459 |
static void *get_partial_node(struct kmem_cache *s, |
acd19fd1a slub: pass kmem_c... |
1460 |
struct kmem_cache_node *n, struct kmem_cache_cpu *c) |
81819f0fc SLUB core |
1461 |
{ |
49e225858 slub: per cpu cac... |
1462 1463 |
struct page *page, *page2; void *object = NULL; |
81819f0fc SLUB core |
1464 1465 1466 1467 |
/* * Racy check. If we mistakenly see no partial slabs then we * just allocate an empty slab. If we mistakenly try to get a |
672bba3a4 SLUB: update comm... |
1468 1469 |
* partial slab and there is none available then get_partials() * will return NULL. |
81819f0fc SLUB core |
1470 1471 1472 1473 1474 |
*/ if (!n || !n->nr_partial) return NULL; spin_lock(&n->list_lock); |
49e225858 slub: per cpu cac... |
1475 |
list_for_each_entry_safe(page, page2, &n->partial, lru) { |
12d79634f slub: Code optimi... |
1476 |
void *t = acquire_slab(s, n, page, object == NULL); |
49e225858 slub: per cpu cac... |
1477 1478 1479 1480 |
int available; if (!t) break; |
12d79634f slub: Code optimi... |
1481 |
if (!object) { |
49e225858 slub: per cpu cac... |
1482 1483 1484 |
c->page = page; c->node = page_to_nid(page); stat(s, ALLOC_FROM_PARTIAL); |
49e225858 slub: per cpu cac... |
1485 1486 1487 1488 1489 1490 1491 1492 |
object = t; available = page->objects - page->inuse; } else { page->freelist = t; available = put_cpu_partial(s, page, 0); } if (kmem_cache_debug(s) || available > s->cpu_partial / 2) break; |
497b66f2e slub: return obje... |
1493 |
} |
81819f0fc SLUB core |
1494 |
spin_unlock(&n->list_lock); |
497b66f2e slub: return obje... |
1495 |
return object; |
81819f0fc SLUB core |
1496 1497 1498 |
} /* |
672bba3a4 SLUB: update comm... |
1499 |
* Get a page from somewhere. Search in increasing NUMA distances. |
81819f0fc SLUB core |
1500 |
*/ |
acd19fd1a slub: pass kmem_c... |
1501 1502 |
static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags, struct kmem_cache_cpu *c) |
81819f0fc SLUB core |
1503 1504 1505 |
{ #ifdef CONFIG_NUMA struct zonelist *zonelist; |
dd1a239f6 mm: have zonelist... |
1506 |
struct zoneref *z; |
54a6eb5c4 mm: use two zonel... |
1507 1508 |
struct zone *zone; enum zone_type high_zoneidx = gfp_zone(flags); |
497b66f2e slub: return obje... |
1509 |
void *object; |
81819f0fc SLUB core |
1510 1511 |
/* |
672bba3a4 SLUB: update comm... |
1512 1513 1514 1515 |
* The defrag ratio allows a configuration of the tradeoffs between * inter node defragmentation and node local allocations. A lower * defrag_ratio increases the tendency to do local allocations * instead of attempting to obtain partial slabs from other nodes. |
81819f0fc SLUB core |
1516 |
* |
672bba3a4 SLUB: update comm... |
1517 1518 1519 1520 |
* If the defrag_ratio is set to 0 then kmalloc() always * returns node local objects. If the ratio is higher then kmalloc() * may return off node objects because partial slabs are obtained * from other nodes and filled up. |
81819f0fc SLUB core |
1521 |
* |
6446faa2f slub: Fix up comm... |
1522 |
* If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes |
672bba3a4 SLUB: update comm... |
1523 1524 1525 1526 1527 |
* defrag_ratio = 1000) then every (well almost) allocation will * first attempt to defrag slab caches on other nodes. This means * scanning over all nodes to look for partial slabs which may be * expensive if we do it every time we are trying to find a slab * with available objects. |
81819f0fc SLUB core |
1528 |
*/ |
9824601ea SLUB: rename defr... |
1529 1530 |
if (!s->remote_node_defrag_ratio || get_cycles() % 1024 > s->remote_node_defrag_ratio) |
81819f0fc SLUB core |
1531 |
return NULL; |
c0ff7453b cpuset,mm: fix no... |
1532 |
get_mems_allowed(); |
0e88460da mm: introduce nod... |
1533 |
zonelist = node_zonelist(slab_node(current->mempolicy), flags); |
54a6eb5c4 mm: use two zonel... |
1534 |
for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { |
81819f0fc SLUB core |
1535 |
struct kmem_cache_node *n; |
54a6eb5c4 mm: use two zonel... |
1536 |
n = get_node(s, zone_to_nid(zone)); |
81819f0fc SLUB core |
1537 |
|
54a6eb5c4 mm: use two zonel... |
1538 |
if (n && cpuset_zone_allowed_hardwall(zone, flags) && |
3b89d7d88 slub: move min_pa... |
1539 |
n->nr_partial > s->min_partial) { |
497b66f2e slub: return obje... |
1540 1541 |
object = get_partial_node(s, n, c); if (object) { |
c0ff7453b cpuset,mm: fix no... |
1542 |
put_mems_allowed(); |
497b66f2e slub: return obje... |
1543 |
return object; |
c0ff7453b cpuset,mm: fix no... |
1544 |
} |
81819f0fc SLUB core |
1545 1546 |
} } |
c0ff7453b cpuset,mm: fix no... |
1547 |
put_mems_allowed(); |
81819f0fc SLUB core |
1548 1549 1550 1551 1552 1553 1554 |
#endif return NULL; } /* * Get a partial page, lock it and return it. */ |
497b66f2e slub: return obje... |
1555 |
static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, |
acd19fd1a slub: pass kmem_c... |
1556 |
struct kmem_cache_cpu *c) |
81819f0fc SLUB core |
1557 |
{ |
497b66f2e slub: return obje... |
1558 |
void *object; |
2154a3363 slub: Use a const... |
1559 |
int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; |
81819f0fc SLUB core |
1560 |
|
497b66f2e slub: return obje... |
1561 1562 1563 |
object = get_partial_node(s, get_node(s, searchnode), c); if (object || node != NUMA_NO_NODE) return object; |
81819f0fc SLUB core |
1564 |
|
acd19fd1a slub: pass kmem_c... |
1565 |
return get_any_partial(s, flags, c); |
81819f0fc SLUB core |
1566 |
} |
8a5ec0ba4 Lockless (and pre... |
1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 |
#ifdef CONFIG_PREEMPT /* * Calculate the next globally unique transaction for disambiguiation * during cmpxchg. The transactions start with the cpu number and are then * incremented by CONFIG_NR_CPUS. */ #define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS) #else /* * No preemption supported therefore also no need to check for * different cpus. */ #define TID_STEP 1 #endif static inline unsigned long next_tid(unsigned long tid) { return tid + TID_STEP; } static inline unsigned int tid_to_cpu(unsigned long tid) { return tid % TID_STEP; } static inline unsigned long tid_to_event(unsigned long tid) { return tid / TID_STEP; } static inline unsigned int init_tid(int cpu) { return cpu; } static inline void note_cmpxchg_failure(const char *n, const struct kmem_cache *s, unsigned long tid) { #ifdef SLUB_DEBUG_CMPXCHG unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); printk(KERN_INFO "%s %s: cmpxchg redo ", n, s->name); #ifdef CONFIG_PREEMPT if (tid_to_cpu(tid) != tid_to_cpu(actual_tid)) printk("due to cpu change %d -> %d ", tid_to_cpu(tid), tid_to_cpu(actual_tid)); else #endif if (tid_to_event(tid) != tid_to_event(actual_tid)) printk("due to cpu running other code. Event %ld->%ld ", tid_to_event(tid), tid_to_event(actual_tid)); else printk("for unknown reason: actual=%lx was=%lx target=%lx ", actual_tid, tid, next_tid(tid)); #endif |
4fdccdfbb slub: Add statist... |
1626 |
stat(s, CMPXCHG_DOUBLE_CPU_FAIL); |
8a5ec0ba4 Lockless (and pre... |
1627 |
} |
8a5ec0ba4 Lockless (and pre... |
1628 1629 |
void init_kmem_cache_cpus(struct kmem_cache *s) { |
8a5ec0ba4 Lockless (and pre... |
1630 1631 1632 1633 |
int cpu; for_each_possible_cpu(cpu) per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); |
8a5ec0ba4 Lockless (and pre... |
1634 |
} |
2cfb7455d slub: Rework allo... |
1635 1636 1637 1638 |
/* * Remove the cpu slab */ |
dfb4f0960 SLUB: Avoid page ... |
1639 |
static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) |
81819f0fc SLUB core |
1640 |
{ |
2cfb7455d slub: Rework allo... |
1641 |
enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE }; |
dfb4f0960 SLUB: Avoid page ... |
1642 |
struct page *page = c->page; |
2cfb7455d slub: Rework allo... |
1643 1644 1645 1646 1647 |
struct kmem_cache_node *n = get_node(s, page_to_nid(page)); int lock = 0; enum slab_modes l = M_NONE, m = M_NONE; void *freelist; void *nextfree; |
136333d10 slub: explicitly ... |
1648 |
int tail = DEACTIVATE_TO_HEAD; |
2cfb7455d slub: Rework allo... |
1649 1650 1651 1652 |
struct page new; struct page old; if (page->freelist) { |
84e554e68 SLUB: Make slub s... |
1653 |
stat(s, DEACTIVATE_REMOTE_FREES); |
136333d10 slub: explicitly ... |
1654 |
tail = DEACTIVATE_TO_TAIL; |
2cfb7455d slub: Rework allo... |
1655 1656 1657 1658 1659 1660 |
} c->tid = next_tid(c->tid); c->page = NULL; freelist = c->freelist; c->freelist = NULL; |
894b8788d slub: support con... |
1661 |
/* |
2cfb7455d slub: Rework allo... |
1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 |
* Stage one: Free all available per cpu objects back * to the page freelist while it is still frozen. Leave the * last one. * * There is no need to take the list->lock because the page * is still frozen. */ while (freelist && (nextfree = get_freepointer(s, freelist))) { void *prior; unsigned long counters; do { prior = page->freelist; counters = page->counters; set_freepointer(s, freelist, prior); new.counters = counters; new.inuse--; VM_BUG_ON(!new.frozen); |
1d07171c5 slub: disable int... |
1680 |
} while (!__cmpxchg_double_slab(s, page, |
2cfb7455d slub: Rework allo... |
1681 1682 1683 1684 1685 1686 |
prior, counters, freelist, new.counters, "drain percpu freelist")); freelist = nextfree; } |
894b8788d slub: support con... |
1687 |
/* |
2cfb7455d slub: Rework allo... |
1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 |
* Stage two: Ensure that the page is unfrozen while the * list presence reflects the actual number of objects * during unfreeze. * * We setup the list membership and then perform a cmpxchg * with the count. If there is a mismatch then the page * is not unfrozen but the page is on the wrong list. * * Then we restart the process which may have to remove * the page from the list that we just put it on again * because the number of objects in the slab may have * changed. |
894b8788d slub: support con... |
1700 |
*/ |
2cfb7455d slub: Rework allo... |
1701 |
redo: |
894b8788d slub: support con... |
1702 |
|
2cfb7455d slub: Rework allo... |
1703 1704 1705 |
old.freelist = page->freelist; old.counters = page->counters; VM_BUG_ON(!old.frozen); |
7c2e132c5 Add parameter to ... |
1706 |
|
2cfb7455d slub: Rework allo... |
1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 |
/* Determine target state of the slab */ new.counters = old.counters; if (freelist) { new.inuse--; set_freepointer(s, freelist, old.freelist); new.freelist = freelist; } else new.freelist = old.freelist; new.frozen = 0; |
81107188f slub: Fix partial... |
1717 |
if (!new.inuse && n->nr_partial > s->min_partial) |
2cfb7455d slub: Rework allo... |
1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 |
m = M_FREE; else if (new.freelist) { m = M_PARTIAL; if (!lock) { lock = 1; /* * Taking the spinlock removes the possiblity * that acquire_slab() will see a slab page that * is frozen */ spin_lock(&n->list_lock); } } else { m = M_FULL; if (kmem_cache_debug(s) && !lock) { lock = 1; /* * This also ensures that the scanning of full * slabs from diagnostic functions will not see * any frozen slabs. */ spin_lock(&n->list_lock); } } if (l != m) { if (l == M_PARTIAL) remove_partial(n, page); else if (l == M_FULL) |
894b8788d slub: support con... |
1750 |
|
2cfb7455d slub: Rework allo... |
1751 1752 1753 1754 1755 |
remove_full(s, page); if (m == M_PARTIAL) { add_partial(n, page, tail); |
136333d10 slub: explicitly ... |
1756 |
stat(s, tail); |
2cfb7455d slub: Rework allo... |
1757 1758 |
} else if (m == M_FULL) { |
894b8788d slub: support con... |
1759 |
|
2cfb7455d slub: Rework allo... |
1760 1761 1762 1763 1764 1765 1766 |
stat(s, DEACTIVATE_FULL); add_full(s, n, page); } } l = m; |
1d07171c5 slub: disable int... |
1767 |
if (!__cmpxchg_double_slab(s, page, |
2cfb7455d slub: Rework allo... |
1768 1769 1770 1771 |
old.freelist, old.counters, new.freelist, new.counters, "unfreezing slab")) goto redo; |
2cfb7455d slub: Rework allo... |
1772 1773 1774 1775 1776 1777 1778 |
if (lock) spin_unlock(&n->list_lock); if (m == M_FREE) { stat(s, DEACTIVATE_EMPTY); discard_slab(s, page); stat(s, FREE_SLAB); |
894b8788d slub: support con... |
1779 |
} |
81819f0fc SLUB core |
1780 |
} |
49e225858 slub: per cpu cac... |
1781 1782 1783 1784 1785 |
/* Unfreeze all the cpu partial slabs */ static void unfreeze_partials(struct kmem_cache *s) { struct kmem_cache_node *n = NULL; struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab); |
9ada19342 slub: move discar... |
1786 |
struct page *page, *discard_page = NULL; |
49e225858 slub: per cpu cac... |
1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 |
while ((page = c->partial)) { enum slab_modes { M_PARTIAL, M_FREE }; enum slab_modes l, m; struct page new; struct page old; c->partial = page->next; l = M_FREE; do { old.freelist = page->freelist; old.counters = page->counters; VM_BUG_ON(!old.frozen); new.counters = old.counters; new.freelist = old.freelist; new.frozen = 0; |
dcc3be6a5 slub: Discard sla... |
1807 |
if (!new.inuse && (!n || n->nr_partial > s->min_partial)) |
49e225858 slub: per cpu cac... |
1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 |
m = M_FREE; else { struct kmem_cache_node *n2 = get_node(s, page_to_nid(page)); m = M_PARTIAL; if (n != n2) { if (n) spin_unlock(&n->list_lock); n = n2; spin_lock(&n->list_lock); } } if (l != m) { if (l == M_PARTIAL) remove_partial(n, page); else |
f64ae042d slub: use correct... |
1827 1828 |
add_partial(n, page, DEACTIVATE_TO_TAIL); |
49e225858 slub: per cpu cac... |
1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 |
l = m; } } while (!cmpxchg_double_slab(s, page, old.freelist, old.counters, new.freelist, new.counters, "unfreezing slab")); if (m == M_FREE) { |
9ada19342 slub: move discar... |
1839 1840 |
page->next = discard_page; discard_page = page; |
49e225858 slub: per cpu cac... |
1841 1842 1843 1844 1845 |
} } if (n) spin_unlock(&n->list_lock); |
9ada19342 slub: move discar... |
1846 1847 1848 1849 1850 1851 1852 1853 1854 |
while (discard_page) { page = discard_page; discard_page = discard_page->next; stat(s, DEACTIVATE_EMPTY); discard_slab(s, page); stat(s, FREE_SLAB); } |
49e225858 slub: per cpu cac... |
1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 |
} /* * Put a page that was just frozen (in __slab_free) into a partial page * slot if available. This is done without interrupts disabled and without * preemption disabled. The cmpxchg is racy and may put the partial page * onto a random cpus partial slot. * * If we did not find a slot then simply move all the partials to the * per node partial list. */ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) { struct page *oldpage; int pages; int pobjects; do { pages = 0; pobjects = 0; oldpage = this_cpu_read(s->cpu_slab->partial); if (oldpage) { pobjects = oldpage->pobjects; pages = oldpage->pages; if (drain && pobjects > s->cpu_partial) { unsigned long flags; /* * partial array is full. Move the existing * set to the per node partial list. */ local_irq_save(flags); unfreeze_partials(s); local_irq_restore(flags); pobjects = 0; pages = 0; } } pages++; pobjects += page->objects - page->inuse; page->pages = pages; page->pobjects = pobjects; page->next = oldpage; |
42d623a8c slub: use irqsafe... |
1900 |
} while (irqsafe_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); |
49e225858 slub: per cpu cac... |
1901 1902 1903 |
stat(s, CPU_PARTIAL_FREE); return pobjects; } |
dfb4f0960 SLUB: Avoid page ... |
1904 |
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) |
81819f0fc SLUB core |
1905 |
{ |
84e554e68 SLUB: Make slub s... |
1906 |
stat(s, CPUSLAB_FLUSH); |
dfb4f0960 SLUB: Avoid page ... |
1907 |
deactivate_slab(s, c); |
81819f0fc SLUB core |
1908 1909 1910 1911 |
} /* * Flush cpu slab. |
6446faa2f slub: Fix up comm... |
1912 |
* |
81819f0fc SLUB core |
1913 1914 |
* Called from IPI handler with interrupts disabled. */ |
0c7100132 SLUB: add some mo... |
1915 |
static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) |
81819f0fc SLUB core |
1916 |
{ |
9dfc6e68b SLUB: Use this_cp... |
1917 |
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); |
81819f0fc SLUB core |
1918 |
|
49e225858 slub: per cpu cac... |
1919 1920 1921 1922 1923 1924 |
if (likely(c)) { if (c->page) flush_slab(s, c); unfreeze_partials(s); } |
81819f0fc SLUB core |
1925 1926 1927 1928 1929 |
} static void flush_cpu_slab(void *d) { struct kmem_cache *s = d; |
81819f0fc SLUB core |
1930 |
|
dfb4f0960 SLUB: Avoid page ... |
1931 |
__flush_cpu_slab(s, smp_processor_id()); |
81819f0fc SLUB core |
1932 1933 1934 1935 |
} static void flush_all(struct kmem_cache *s) { |
15c8b6c1a on_each_cpu(): ki... |
1936 |
on_each_cpu(flush_cpu_slab, s, 1); |
81819f0fc SLUB core |
1937 1938 1939 |
} /* |
dfb4f0960 SLUB: Avoid page ... |
1940 1941 1942 1943 1944 1945 |
* Check if the objects in a per cpu structure fit numa * locality expectations. */ static inline int node_match(struct kmem_cache_cpu *c, int node) { #ifdef CONFIG_NUMA |
2154a3363 slub: Use a const... |
1946 |
if (node != NUMA_NO_NODE && c->node != node) |
dfb4f0960 SLUB: Avoid page ... |
1947 1948 1949 1950 |
return 0; #endif return 1; } |
781b2ba6e SLUB: Out-of-memo... |
1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 |
static int count_free(struct page *page) { return page->objects - page->inuse; } static unsigned long count_partial(struct kmem_cache_node *n, int (*get_count)(struct page *)) { unsigned long flags; unsigned long x = 0; struct page *page; spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) x += get_count(page); spin_unlock_irqrestore(&n->list_lock, flags); return x; } |
26c02cf05 SLUB: fix build w... |
1969 1970 1971 1972 1973 1974 1975 1976 |
static inline unsigned long node_nr_objs(struct kmem_cache_node *n) { #ifdef CONFIG_SLUB_DEBUG return atomic_long_read(&n->total_objects); #else return 0; #endif } |
781b2ba6e SLUB: Out-of-memo... |
1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 |
static noinline void slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { int node; printk(KERN_WARNING "SLUB: Unable to allocate memory on node %d (gfp=0x%x) ", nid, gfpflags); printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, " "default order: %d, min order: %d ", s->name, s->objsize, s->size, oo_order(s->oo), oo_order(s->min)); |
fa5ec8a1f slub: add option ... |
1990 1991 1992 1993 |
if (oo_order(s->min) > get_order(s->objsize)) printk(KERN_WARNING " %s debugging increased min order, use " "slub_debug=O to disable. ", s->name); |
781b2ba6e SLUB: Out-of-memo... |
1994 1995 1996 1997 1998 1999 2000 2001 |
for_each_online_node(node) { struct kmem_cache_node *n = get_node(s, node); unsigned long nr_slabs; unsigned long nr_objs; unsigned long nr_free; if (!n) continue; |
26c02cf05 SLUB: fix build w... |
2002 2003 2004 |
nr_free = count_partial(n, count_free); nr_slabs = node_nr_slabs(n); nr_objs = node_nr_objs(n); |
781b2ba6e SLUB: Out-of-memo... |
2005 2006 2007 2008 2009 2010 2011 |
printk(KERN_WARNING " node %d: slabs: %ld, objs: %ld, free: %ld ", node, nr_slabs, nr_objs, nr_free); } } |
497b66f2e slub: return obje... |
2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 |
static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, int node, struct kmem_cache_cpu **pc) { void *object; struct kmem_cache_cpu *c; struct page *page = new_slab(s, flags, node); if (page) { c = __this_cpu_ptr(s->cpu_slab); if (c->page) flush_slab(s, c); /* * No other reference to the page yet so we can * muck around with it freely without cmpxchg */ object = page->freelist; page->freelist = NULL; stat(s, ALLOC_SLAB); c->node = page_to_nid(page); c->page = page; *pc = c; } else object = NULL; return object; } |
dfb4f0960 SLUB: Avoid page ... |
2040 |
/* |
894b8788d slub: support con... |
2041 2042 2043 |
* Slow path. The lockless freelist is empty or we need to perform * debugging duties. * |
894b8788d slub: support con... |
2044 2045 2046 |
* Processing is still very fast if new objects have been freed to the * regular freelist. In that case we simply take over the regular freelist * as the lockless freelist and zap the regular freelist. |
81819f0fc SLUB core |
2047 |
* |
894b8788d slub: support con... |
2048 2049 2050 |
* If that is not working then we fall back to the partial lists. We take the * first element of the freelist as the object to allocate now and move the * rest of the freelist to the lockless freelist. |
81819f0fc SLUB core |
2051 |
* |
894b8788d slub: support con... |
2052 |
* And if we were unable to get a new slab from the partial slab lists then |
6446faa2f slub: Fix up comm... |
2053 2054 |
* we need to allocate a new slab. This is the slowest path since it involves * a call to the page allocator and the setup of a new slab. |
81819f0fc SLUB core |
2055 |
*/ |
ce71e27c6 SLUB: Replace __b... |
2056 2057 |
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, unsigned long addr, struct kmem_cache_cpu *c) |
81819f0fc SLUB core |
2058 |
{ |
81819f0fc SLUB core |
2059 |
void **object; |
8a5ec0ba4 Lockless (and pre... |
2060 |
unsigned long flags; |
2cfb7455d slub: Rework allo... |
2061 2062 |
struct page new; unsigned long counters; |
8a5ec0ba4 Lockless (and pre... |
2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 |
local_irq_save(flags); #ifdef CONFIG_PREEMPT /* * We may have been preempted and rescheduled on a different * cpu before disabling interrupts. Need to reload cpu area * pointer. */ c = this_cpu_ptr(s->cpu_slab); #endif |
81819f0fc SLUB core |
2073 |
|
497b66f2e slub: return obje... |
2074 |
if (!c->page) |
81819f0fc SLUB core |
2075 |
goto new_slab; |
49e225858 slub: per cpu cac... |
2076 |
redo: |
fc59c0530 slub: Get rid of ... |
2077 |
if (unlikely(!node_match(c, node))) { |
e36a2652d slub: Add statist... |
2078 |
stat(s, ALLOC_NODE_MISMATCH); |
fc59c0530 slub: Get rid of ... |
2079 2080 2081 |
deactivate_slab(s, c); goto new_slab; } |
6446faa2f slub: Fix up comm... |
2082 |
|
2cfb7455d slub: Rework allo... |
2083 2084 2085 |
stat(s, ALLOC_SLOWPATH); do { |
497b66f2e slub: return obje... |
2086 2087 |
object = c->page->freelist; counters = c->page->counters; |
2cfb7455d slub: Rework allo... |
2088 |
new.counters = counters; |
2cfb7455d slub: Rework allo... |
2089 |
VM_BUG_ON(!new.frozen); |
03e404af2 slub: fast releas... |
2090 2091 2092 2093 2094 2095 2096 2097 |
/* * If there is no object left then we use this loop to * deactivate the slab which is simple since no objects * are left in the slab and therefore we do not need to * put the page back onto the partial list. * * If there are objects left then we retrieve them * and use them to refill the per cpu queue. |
497b66f2e slub: return obje... |
2098 |
*/ |
03e404af2 slub: fast releas... |
2099 |
|
497b66f2e slub: return obje... |
2100 |
new.inuse = c->page->objects; |
03e404af2 slub: fast releas... |
2101 |
new.frozen = object != NULL; |
497b66f2e slub: return obje... |
2102 |
} while (!__cmpxchg_double_slab(s, c->page, |
2cfb7455d slub: Rework allo... |
2103 2104 2105 |
object, counters, NULL, new.counters, "__slab_alloc")); |
6446faa2f slub: Fix up comm... |
2106 |
|
49e225858 slub: per cpu cac... |
2107 |
if (!object) { |
03e404af2 slub: fast releas... |
2108 2109 |
c->page = NULL; stat(s, DEACTIVATE_BYPASS); |
fc59c0530 slub: Get rid of ... |
2110 |
goto new_slab; |
03e404af2 slub: fast releas... |
2111 |
} |
6446faa2f slub: Fix up comm... |
2112 |
|
84e554e68 SLUB: Make slub s... |
2113 |
stat(s, ALLOC_REFILL); |
6446faa2f slub: Fix up comm... |
2114 |
|
894b8788d slub: support con... |
2115 |
load_freelist: |
ff12059ed SLUB: this_cpu: R... |
2116 |
c->freelist = get_freepointer(s, object); |
8a5ec0ba4 Lockless (and pre... |
2117 2118 |
c->tid = next_tid(c->tid); local_irq_restore(flags); |
81819f0fc SLUB core |
2119 |
return object; |
81819f0fc SLUB core |
2120 |
new_slab: |
2cfb7455d slub: Rework allo... |
2121 |
|
49e225858 slub: per cpu cac... |
2122 2123 2124 2125 2126 2127 2128 |
if (c->partial) { c->page = c->partial; c->partial = c->page->next; c->node = page_to_nid(c->page); stat(s, CPU_PARTIAL_ALLOC); c->freelist = NULL; goto redo; |
81819f0fc SLUB core |
2129 |
} |
49e225858 slub: per cpu cac... |
2130 |
/* Then do expensive stuff like retrieving pages from the partial lists */ |
497b66f2e slub: return obje... |
2131 |
object = get_partial(s, gfpflags, node, c); |
b811c202a SLUB: simplify IR... |
2132 |
|
497b66f2e slub: return obje... |
2133 |
if (unlikely(!object)) { |
01ad8a7bc slub: Eliminate r... |
2134 |
|
497b66f2e slub: return obje... |
2135 |
object = new_slab_objects(s, gfpflags, node, &c); |
2cfb7455d slub: Rework allo... |
2136 |
|
497b66f2e slub: return obje... |
2137 2138 2139 |
if (unlikely(!object)) { if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit()) slab_out_of_memory(s, gfpflags, node); |
9e577e8b4 slub: When alloca... |
2140 |
|
497b66f2e slub: return obje... |
2141 2142 2143 |
local_irq_restore(flags); return NULL; } |
81819f0fc SLUB core |
2144 |
} |
2cfb7455d slub: Rework allo... |
2145 |
|
497b66f2e slub: return obje... |
2146 |
if (likely(!kmem_cache_debug(s))) |
4b6f07504 SLUB: Define func... |
2147 |
goto load_freelist; |
2cfb7455d slub: Rework allo... |
2148 |
|
497b66f2e slub: return obje... |
2149 2150 2151 |
/* Only entered in the debug case */ if (!alloc_debug_processing(s, c->page, object, addr)) goto new_slab; /* Slab failed checks. Next slab needed */ |
894b8788d slub: support con... |
2152 |
|
2cfb7455d slub: Rework allo... |
2153 |
c->freelist = get_freepointer(s, object); |
442b06bce slub: Remove node... |
2154 |
deactivate_slab(s, c); |
15b7c5142 SLUB: Optimize sl... |
2155 |
c->node = NUMA_NO_NODE; |
a71ae47a2 slub: Fix double ... |
2156 2157 |
local_irq_restore(flags); return object; |
894b8788d slub: support con... |
2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 |
} /* * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) * have the fastpath folded into their functions. So no function call * overhead for requests that can be satisfied on the fastpath. * * The fastpath works by first checking if the lockless freelist can be used. * If not then __slab_alloc is called for slow processing. * * Otherwise we can simply pick the next object from the lockless free list. */ |
064287807 SLUB: Fix coding ... |
2170 |
static __always_inline void *slab_alloc(struct kmem_cache *s, |
ce71e27c6 SLUB: Replace __b... |
2171 |
gfp_t gfpflags, int node, unsigned long addr) |
894b8788d slub: support con... |
2172 |
{ |
894b8788d slub: support con... |
2173 |
void **object; |
dfb4f0960 SLUB: Avoid page ... |
2174 |
struct kmem_cache_cpu *c; |
8a5ec0ba4 Lockless (and pre... |
2175 |
unsigned long tid; |
1f84260c8 SLUB: Alternate f... |
2176 |
|
c016b0bde slub: Extract hoo... |
2177 |
if (slab_pre_alloc_hook(s, gfpflags)) |
773ff60e8 SLUB: failslab su... |
2178 |
return NULL; |
1f84260c8 SLUB: Alternate f... |
2179 |
|
8a5ec0ba4 Lockless (and pre... |
2180 |
redo: |
8a5ec0ba4 Lockless (and pre... |
2181 2182 2183 2184 2185 2186 2187 |
/* * Must read kmem_cache cpu data via this cpu ptr. Preemption is * enabled. We may switch back and forth between cpus while * reading from one cpu area. That does not matter as long * as we end up on the original cpu again when doing the cmpxchg. */ |
9dfc6e68b SLUB: Use this_cp... |
2188 |
c = __this_cpu_ptr(s->cpu_slab); |
8a5ec0ba4 Lockless (and pre... |
2189 |
|
8a5ec0ba4 Lockless (and pre... |
2190 2191 2192 2193 2194 2195 2196 2197 |
/* * The transaction ids are globally unique per cpu and per operation on * a per cpu queue. Thus they can be guarantee that the cmpxchg_double * occurs on the right processor and that there was no operation on the * linked list in between. */ tid = c->tid; barrier(); |
8a5ec0ba4 Lockless (and pre... |
2198 |
|
9dfc6e68b SLUB: Use this_cp... |
2199 |
object = c->freelist; |
9dfc6e68b SLUB: Use this_cp... |
2200 |
if (unlikely(!object || !node_match(c, node))) |
894b8788d slub: support con... |
2201 |
|
dfb4f0960 SLUB: Avoid page ... |
2202 |
object = __slab_alloc(s, gfpflags, node, addr, c); |
894b8788d slub: support con... |
2203 2204 |
else { |
8a5ec0ba4 Lockless (and pre... |
2205 |
/* |
25985edce Fix common misspe... |
2206 |
* The cmpxchg will only match if there was no additional |
8a5ec0ba4 Lockless (and pre... |
2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 |
* operation and if we are on the right processor. * * The cmpxchg does the following atomically (without lock semantics!) * 1. Relocate first pointer to the current per cpu area. * 2. Verify that tid and freelist have not been changed * 3. If they were not changed replace tid and freelist * * Since this is without lock semantics the protection is only against * code executing on this cpu *not* from access by other cpus. */ |
30106b8ce slub: Fix the loc... |
2217 |
if (unlikely(!irqsafe_cpu_cmpxchg_double( |
8a5ec0ba4 Lockless (and pre... |
2218 2219 |
s->cpu_slab->freelist, s->cpu_slab->tid, object, tid, |
1393d9a18 slub: Make CONFIG... |
2220 |
get_freepointer_safe(s, object), next_tid(tid)))) { |
8a5ec0ba4 Lockless (and pre... |
2221 2222 2223 2224 |
note_cmpxchg_failure("slab_alloc", s, tid); goto redo; } |
84e554e68 SLUB: Make slub s... |
2225 |
stat(s, ALLOC_FASTPATH); |
894b8788d slub: support con... |
2226 |
} |
8a5ec0ba4 Lockless (and pre... |
2227 |
|
74e2134ff SLUB: Fix __GFP_Z... |
2228 |
if (unlikely(gfpflags & __GFP_ZERO) && object) |
ff12059ed SLUB: this_cpu: R... |
2229 |
memset(object, 0, s->objsize); |
d07dbea46 Slab allocators: ... |
2230 |
|
c016b0bde slub: Extract hoo... |
2231 |
slab_post_alloc_hook(s, gfpflags, object); |
5a896d9e7 slub: add hooks f... |
2232 |
|
894b8788d slub: support con... |
2233 |
return object; |
81819f0fc SLUB core |
2234 2235 2236 2237 |
} void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) { |
2154a3363 slub: Use a const... |
2238 |
void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); |
5b882be4e kmemtrace: SLUB h... |
2239 |
|
ca2b84cb3 kmemtrace: use tr... |
2240 |
trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags); |
5b882be4e kmemtrace: SLUB h... |
2241 2242 |
return ret; |
81819f0fc SLUB core |
2243 2244 |
} EXPORT_SYMBOL(kmem_cache_alloc); |
0f24f1287 tracing, slab: De... |
2245 |
#ifdef CONFIG_TRACING |
4a92379bd slub tracing: mov... |
2246 2247 2248 2249 2250 2251 2252 2253 2254 |
void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) { void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags); return ret; } EXPORT_SYMBOL(kmem_cache_alloc_trace); void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) |
5b882be4e kmemtrace: SLUB h... |
2255 |
{ |
4a92379bd slub tracing: mov... |
2256 2257 2258 |
void *ret = kmalloc_order(size, flags, order); trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); return ret; |
5b882be4e kmemtrace: SLUB h... |
2259 |
} |
4a92379bd slub tracing: mov... |
2260 |
EXPORT_SYMBOL(kmalloc_order_trace); |
5b882be4e kmemtrace: SLUB h... |
2261 |
#endif |
81819f0fc SLUB core |
2262 2263 2264 |
#ifdef CONFIG_NUMA void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) { |
5b882be4e kmemtrace: SLUB h... |
2265 |
void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); |
ca2b84cb3 kmemtrace: use tr... |
2266 2267 |
trace_kmem_cache_alloc_node(_RET_IP_, ret, s->objsize, s->size, gfpflags, node); |
5b882be4e kmemtrace: SLUB h... |
2268 2269 |
return ret; |
81819f0fc SLUB core |
2270 2271 |
} EXPORT_SYMBOL(kmem_cache_alloc_node); |
81819f0fc SLUB core |
2272 |
|
0f24f1287 tracing, slab: De... |
2273 |
#ifdef CONFIG_TRACING |
4a92379bd slub tracing: mov... |
2274 |
void *kmem_cache_alloc_node_trace(struct kmem_cache *s, |
5b882be4e kmemtrace: SLUB h... |
2275 |
gfp_t gfpflags, |
4a92379bd slub tracing: mov... |
2276 |
int node, size_t size) |
5b882be4e kmemtrace: SLUB h... |
2277 |
{ |
4a92379bd slub tracing: mov... |
2278 2279 2280 2281 2282 |
void *ret = slab_alloc(s, gfpflags, node, _RET_IP_); trace_kmalloc_node(_RET_IP_, ret, size, s->size, gfpflags, node); return ret; |
5b882be4e kmemtrace: SLUB h... |
2283 |
} |
4a92379bd slub tracing: mov... |
2284 |
EXPORT_SYMBOL(kmem_cache_alloc_node_trace); |
5b882be4e kmemtrace: SLUB h... |
2285 |
#endif |
5d1f57e4d slub: Move NUMA-r... |
2286 |
#endif |
5b882be4e kmemtrace: SLUB h... |
2287 |
|
81819f0fc SLUB core |
2288 |
/* |
894b8788d slub: support con... |
2289 2290 |
* Slow patch handling. This may still be called frequently since objects * have a longer lifetime than the cpu slabs in most processing loads. |
81819f0fc SLUB core |
2291 |
* |
894b8788d slub: support con... |
2292 2293 2294 |
* So we still attempt to reduce cache line usage. Just take the slab * lock and free the item. If there is no additional partial page * handling required then we can return immediately. |
81819f0fc SLUB core |
2295 |
*/ |
894b8788d slub: support con... |
2296 |
static void __slab_free(struct kmem_cache *s, struct page *page, |
ff12059ed SLUB: this_cpu: R... |
2297 |
void *x, unsigned long addr) |
81819f0fc SLUB core |
2298 2299 2300 |
{ void *prior; void **object = (void *)x; |
2cfb7455d slub: Rework allo... |
2301 2302 2303 2304 2305 |
int was_frozen; int inuse; struct page new; unsigned long counters; struct kmem_cache_node *n = NULL; |
61728d1ef slub: Pass kmem_c... |
2306 |
unsigned long uninitialized_var(flags); |
81819f0fc SLUB core |
2307 |
|
8a5ec0ba4 Lockless (and pre... |
2308 |
stat(s, FREE_SLOWPATH); |
81819f0fc SLUB core |
2309 |
|
8dc16c6c0 slub: Move debug ... |
2310 |
if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr)) |
80f08c191 slub: Avoid disab... |
2311 |
return; |
6446faa2f slub: Fix up comm... |
2312 |
|
2cfb7455d slub: Rework allo... |
2313 2314 2315 2316 2317 2318 2319 2320 |
do { prior = page->freelist; counters = page->counters; set_freepointer(s, object, prior); new.counters = counters; was_frozen = new.frozen; new.inuse--; if ((!new.inuse || !prior) && !was_frozen && !n) { |
49e225858 slub: per cpu cac... |
2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 |
if (!kmem_cache_debug(s) && !prior) /* * Slab was on no list before and will be partially empty * We can defer the list move and instead freeze it. */ new.frozen = 1; else { /* Needs to be taken off a list */ n = get_node(s, page_to_nid(page)); /* * Speculatively acquire the list_lock. * If the cmpxchg does not succeed then we may * drop the list_lock without any processing. * * Otherwise the list_lock will synchronize with * other processors updating the list of slabs. */ spin_lock_irqsave(&n->list_lock, flags); } |
2cfb7455d slub: Rework allo... |
2344 2345 |
} inuse = new.inuse; |
81819f0fc SLUB core |
2346 |
|
2cfb7455d slub: Rework allo... |
2347 2348 2349 2350 |
} while (!cmpxchg_double_slab(s, page, prior, counters, object, new.counters, "__slab_free")); |
81819f0fc SLUB core |
2351 |
|
2cfb7455d slub: Rework allo... |
2352 |
if (likely(!n)) { |
49e225858 slub: per cpu cac... |
2353 2354 2355 2356 2357 2358 2359 2360 2361 |
/* * If we just froze the page then put it onto the * per cpu partial list. */ if (new.frozen && !was_frozen) put_cpu_partial(s, page, 1); /* |
2cfb7455d slub: Rework allo... |
2362 2363 2364 2365 2366 |
* The list lock was not taken therefore no list * activity can be necessary. */ if (was_frozen) stat(s, FREE_FROZEN); |
80f08c191 slub: Avoid disab... |
2367 |
return; |
2cfb7455d slub: Rework allo... |
2368 |
} |
81819f0fc SLUB core |
2369 2370 |
/* |
2cfb7455d slub: Rework allo... |
2371 2372 |
* was_frozen may have been set after we acquired the list_lock in * an earlier loop. So we need to check it here again. |
81819f0fc SLUB core |
2373 |
*/ |
2cfb7455d slub: Rework allo... |
2374 2375 2376 2377 2378 |
if (was_frozen) stat(s, FREE_FROZEN); else { if (unlikely(!inuse && n->nr_partial > s->min_partial)) goto slab_empty; |
81819f0fc SLUB core |
2379 |
|
2cfb7455d slub: Rework allo... |
2380 2381 2382 2383 2384 2385 |
/* * Objects left in the slab. If it was not on the partial list before * then add it. */ if (unlikely(!prior)) { remove_full(s, page); |
136333d10 slub: explicitly ... |
2386 |
add_partial(n, page, DEACTIVATE_TO_TAIL); |
2cfb7455d slub: Rework allo... |
2387 2388 |
stat(s, FREE_ADD_PARTIAL); } |
8ff12cfc0 SLUB: Support for... |
2389 |
} |
80f08c191 slub: Avoid disab... |
2390 |
spin_unlock_irqrestore(&n->list_lock, flags); |
81819f0fc SLUB core |
2391 2392 2393 |
return; slab_empty: |
a973e9dd1 Revert "unique en... |
2394 |
if (prior) { |
81819f0fc SLUB core |
2395 |
/* |
6fbabb20f slub: Fix full li... |
2396 |
* Slab on the partial list. |
81819f0fc SLUB core |
2397 |
*/ |
5cc6eee8a slub: explicit li... |
2398 |
remove_partial(n, page); |
84e554e68 SLUB: Make slub s... |
2399 |
stat(s, FREE_REMOVE_PARTIAL); |
6fbabb20f slub: Fix full li... |
2400 2401 2402 |
} else /* Slab must be on the full list */ remove_full(s, page); |
2cfb7455d slub: Rework allo... |
2403 |
|
80f08c191 slub: Avoid disab... |
2404 |
spin_unlock_irqrestore(&n->list_lock, flags); |
84e554e68 SLUB: Make slub s... |
2405 |
stat(s, FREE_SLAB); |
81819f0fc SLUB core |
2406 |
discard_slab(s, page); |
81819f0fc SLUB core |
2407 |
} |
894b8788d slub: support con... |
2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 |
/* * Fastpath with forced inlining to produce a kfree and kmem_cache_free that * can perform fastpath freeing without additional function calls. * * The fastpath is only possible if we are freeing to the current cpu slab * of this processor. This typically the case if we have just allocated * the item before. * * If fastpath is not possible then fall back to __slab_free where we deal * with all sorts of special processing. */ |
064287807 SLUB: Fix coding ... |
2419 |
static __always_inline void slab_free(struct kmem_cache *s, |
ce71e27c6 SLUB: Replace __b... |
2420 |
struct page *page, void *x, unsigned long addr) |
894b8788d slub: support con... |
2421 2422 |
{ void **object = (void *)x; |
dfb4f0960 SLUB: Avoid page ... |
2423 |
struct kmem_cache_cpu *c; |
8a5ec0ba4 Lockless (and pre... |
2424 |
unsigned long tid; |
1f84260c8 SLUB: Alternate f... |
2425 |
|
c016b0bde slub: Extract hoo... |
2426 |
slab_free_hook(s, x); |
8a5ec0ba4 Lockless (and pre... |
2427 2428 2429 2430 2431 2432 2433 |
redo: /* * Determine the currently cpus per cpu slab. * The cpu may change afterward. However that does not matter since * data is retrieved via this pointer. If we are on the same cpu * during the cmpxchg then the free will succedd. */ |
9dfc6e68b SLUB: Use this_cp... |
2434 |
c = __this_cpu_ptr(s->cpu_slab); |
c016b0bde slub: Extract hoo... |
2435 |
|
8a5ec0ba4 Lockless (and pre... |
2436 2437 |
tid = c->tid; barrier(); |
c016b0bde slub: Extract hoo... |
2438 |
|
442b06bce slub: Remove node... |
2439 |
if (likely(page == c->page)) { |
ff12059ed SLUB: this_cpu: R... |
2440 |
set_freepointer(s, object, c->freelist); |
8a5ec0ba4 Lockless (and pre... |
2441 |
|
30106b8ce slub: Fix the loc... |
2442 |
if (unlikely(!irqsafe_cpu_cmpxchg_double( |
8a5ec0ba4 Lockless (and pre... |
2443 2444 2445 2446 2447 2448 2449 |
s->cpu_slab->freelist, s->cpu_slab->tid, c->freelist, tid, object, next_tid(tid)))) { note_cmpxchg_failure("slab_free", s, tid); goto redo; } |
84e554e68 SLUB: Make slub s... |
2450 |
stat(s, FREE_FASTPATH); |
894b8788d slub: support con... |
2451 |
} else |
ff12059ed SLUB: this_cpu: R... |
2452 |
__slab_free(s, page, x, addr); |
894b8788d slub: support con... |
2453 |
|
894b8788d slub: support con... |
2454 |
} |
81819f0fc SLUB core |
2455 2456 |
void kmem_cache_free(struct kmem_cache *s, void *x) { |
77c5e2d01 slub: fix object ... |
2457 |
struct page *page; |
81819f0fc SLUB core |
2458 |
|
b49af68ff Add virt_to_head_... |
2459 |
page = virt_to_head_page(x); |
81819f0fc SLUB core |
2460 |
|
ce71e27c6 SLUB: Replace __b... |
2461 |
slab_free(s, page, x, _RET_IP_); |
5b882be4e kmemtrace: SLUB h... |
2462 |
|
ca2b84cb3 kmemtrace: use tr... |
2463 |
trace_kmem_cache_free(_RET_IP_, x); |
81819f0fc SLUB core |
2464 2465 |
} EXPORT_SYMBOL(kmem_cache_free); |
81819f0fc SLUB core |
2466 |
/* |
672bba3a4 SLUB: update comm... |
2467 2468 2469 2470 |
* Object placement in a slab is made very easy because we always start at * offset 0. If we tune the size of the object to the alignment then we can * get the required alignment by putting one properly sized object after * another. |
81819f0fc SLUB core |
2471 2472 2473 2474 |
* * Notice that the allocation order determines the sizes of the per cpu * caches. Each processor has always one slab available for allocations. * Increasing the allocation order reduces the number of times that slabs |
672bba3a4 SLUB: update comm... |
2475 |
* must be moved on and off the partial lists and is therefore a factor in |
81819f0fc SLUB core |
2476 |
* locking overhead. |
81819f0fc SLUB core |
2477 2478 2479 2480 2481 2482 2483 2484 2485 |
*/ /* * Mininum / Maximum order of slab pages. This influences locking overhead * and slab fragmentation. A higher order reduces the number of partial slabs * and increases the number of allocations possible without having to * take the list_lock. */ static int slub_min_order; |
114e9e89e slub: Drop DEFAUL... |
2486 |
static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; |
9b2cd506e slub: Calculate m... |
2487 |
static int slub_min_objects; |
81819f0fc SLUB core |
2488 2489 2490 |
/* * Merge control. If this is set then no merging of slab caches will occur. |
672bba3a4 SLUB: update comm... |
2491 |
* (Could be removed. This was introduced to pacify the merge skeptics.) |
81819f0fc SLUB core |
2492 2493 2494 2495 |
*/ static int slub_nomerge; /* |
81819f0fc SLUB core |
2496 2497 |
* Calculate the order of allocation given an slab object size. * |
672bba3a4 SLUB: update comm... |
2498 2499 2500 2501 |
* The order of allocation has significant impact on performance and other * system components. Generally order 0 allocations should be preferred since * order 0 does not cause fragmentation in the page allocator. Larger objects * be problematic to put into order 0 slabs because there may be too much |
c124f5b54 slub: pack object... |
2502 |
* unused space left. We go to a higher order if more than 1/16th of the slab |
672bba3a4 SLUB: update comm... |
2503 2504 2505 2506 2507 2508 |
* would be wasted. * * In order to reach satisfactory performance we must ensure that a minimum * number of objects is in one slab. Otherwise we may generate too much * activity on the partial lists which requires taking the list_lock. This is * less a concern for large slabs though which are rarely used. |
81819f0fc SLUB core |
2509 |
* |
672bba3a4 SLUB: update comm... |
2510 2511 2512 2513 |
* slub_max_order specifies the order where we begin to stop considering the * number of objects in a slab as critical. If we reach slub_max_order then * we try to keep the page order as low as possible. So we accept more waste * of space in favor of a small page order. |
81819f0fc SLUB core |
2514 |
* |
672bba3a4 SLUB: update comm... |
2515 2516 2517 2518 |
* Higher order allocations also allow the placement of more objects in a * slab and thereby reduce object handling overhead. If the user has * requested a higher mininum order then we start with that one instead of * the smallest order which will fit the object. |
81819f0fc SLUB core |
2519 |
*/ |
5e6d444ea SLUB: rework slab... |
2520 |
static inline int slab_order(int size, int min_objects, |
ab9a0f196 slub: automatical... |
2521 |
int max_order, int fract_leftover, int reserved) |
81819f0fc SLUB core |
2522 2523 2524 |
{ int order; int rem; |
6300ea750 SLUB: ensure that... |
2525 |
int min_order = slub_min_order; |
81819f0fc SLUB core |
2526 |
|
ab9a0f196 slub: automatical... |
2527 |
if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE) |
210b5c061 SLUB: cleanup - d... |
2528 |
return get_order(size * MAX_OBJS_PER_PAGE) - 1; |
39b264641 slub: Store max n... |
2529 |
|
6300ea750 SLUB: ensure that... |
2530 |
for (order = max(min_order, |
5e6d444ea SLUB: rework slab... |
2531 2532 |
fls(min_objects * size - 1) - PAGE_SHIFT); order <= max_order; order++) { |
81819f0fc SLUB core |
2533 |
|
5e6d444ea SLUB: rework slab... |
2534 |
unsigned long slab_size = PAGE_SIZE << order; |
81819f0fc SLUB core |
2535 |
|
ab9a0f196 slub: automatical... |
2536 |
if (slab_size < min_objects * size + reserved) |
81819f0fc SLUB core |
2537 |
continue; |
ab9a0f196 slub: automatical... |
2538 |
rem = (slab_size - reserved) % size; |
81819f0fc SLUB core |
2539 |
|
5e6d444ea SLUB: rework slab... |
2540 |
if (rem <= slab_size / fract_leftover) |
81819f0fc SLUB core |
2541 2542 2543 |
break; } |
672bba3a4 SLUB: update comm... |
2544 |
|
81819f0fc SLUB core |
2545 2546 |
return order; } |
ab9a0f196 slub: automatical... |
2547 |
static inline int calculate_order(int size, int reserved) |
5e6d444ea SLUB: rework slab... |
2548 2549 2550 2551 |
{ int order; int min_objects; int fraction; |
e8120ff1f SLUB: Fix default... |
2552 |
int max_objects; |
5e6d444ea SLUB: rework slab... |
2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 |
/* * Attempt to find best configuration for a slab. This * works by first attempting to generate a layout with * the best configuration and backing off gradually. * * First we reduce the acceptable waste in a slab. Then * we reduce the minimum objects required in a slab. */ min_objects = slub_min_objects; |
9b2cd506e slub: Calculate m... |
2563 2564 |
if (!min_objects) min_objects = 4 * (fls(nr_cpu_ids) + 1); |
ab9a0f196 slub: automatical... |
2565 |
max_objects = order_objects(slub_max_order, size, reserved); |
e8120ff1f SLUB: Fix default... |
2566 |
min_objects = min(min_objects, max_objects); |
5e6d444ea SLUB: rework slab... |
2567 |
while (min_objects > 1) { |
c124f5b54 slub: pack object... |
2568 |
fraction = 16; |
5e6d444ea SLUB: rework slab... |
2569 2570 |
while (fraction >= 4) { order = slab_order(size, min_objects, |
ab9a0f196 slub: automatical... |
2571 |
slub_max_order, fraction, reserved); |
5e6d444ea SLUB: rework slab... |
2572 2573 2574 2575 |
if (order <= slub_max_order) return order; fraction /= 2; } |
5086c389c SLUB: Fix some co... |
2576 |
min_objects--; |
5e6d444ea SLUB: rework slab... |
2577 2578 2579 2580 2581 2582 |
} /* * We were unable to place multiple objects in a slab. Now * lets see if we can place a single object there. */ |
ab9a0f196 slub: automatical... |
2583 |
order = slab_order(size, 1, slub_max_order, 1, reserved); |
5e6d444ea SLUB: rework slab... |
2584 2585 2586 2587 2588 2589 |
if (order <= slub_max_order) return order; /* * Doh this slab cannot be placed using slub_max_order. */ |
ab9a0f196 slub: automatical... |
2590 |
order = slab_order(size, 1, MAX_ORDER, 1, reserved); |
818cf5909 slub: enforce MAX... |
2591 |
if (order < MAX_ORDER) |
5e6d444ea SLUB: rework slab... |
2592 2593 2594 |
return order; return -ENOSYS; } |
81819f0fc SLUB core |
2595 |
/* |
672bba3a4 SLUB: update comm... |
2596 |
* Figure out what the alignment of the objects will be. |
81819f0fc SLUB core |
2597 2598 2599 2600 2601 |
*/ static unsigned long calculate_alignment(unsigned long flags, unsigned long align, unsigned long size) { /* |
6446faa2f slub: Fix up comm... |
2602 2603 |
* If the user wants hardware cache aligned objects then follow that * suggestion if the object is sufficiently large. |
81819f0fc SLUB core |
2604 |
* |
6446faa2f slub: Fix up comm... |
2605 2606 |
* The hardware cache alignment cannot override the specified * alignment though. If that is greater then use it. |
81819f0fc SLUB core |
2607 |
*/ |
b62103867 slub: Do not cros... |
2608 2609 2610 2611 2612 2613 |
if (flags & SLAB_HWCACHE_ALIGN) { unsigned long ralign = cache_line_size(); while (size <= ralign / 2) ralign /= 2; align = max(align, ralign); } |
81819f0fc SLUB core |
2614 2615 |
if (align < ARCH_SLAB_MINALIGN) |
b62103867 slub: Do not cros... |
2616 |
align = ARCH_SLAB_MINALIGN; |
81819f0fc SLUB core |
2617 2618 2619 |
return ALIGN(align, sizeof(void *)); } |
5595cffc8 SLUB: dynamic per... |
2620 2621 |
static void init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) |
81819f0fc SLUB core |
2622 2623 |
{ n->nr_partial = 0; |
81819f0fc SLUB core |
2624 2625 |
spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); |
8ab1372fa SLUB: Fix CONFIG_... |
2626 |
#ifdef CONFIG_SLUB_DEBUG |
0f389ec63 slub: No need for... |
2627 |
atomic_long_set(&n->nr_slabs, 0); |
02b71b701 slub: fixed unini... |
2628 |
atomic_long_set(&n->total_objects, 0); |
643b11384 slub: enable trac... |
2629 |
INIT_LIST_HEAD(&n->full); |
8ab1372fa SLUB: Fix CONFIG_... |
2630 |
#endif |
81819f0fc SLUB core |
2631 |
} |
55136592f slub: Remove dyna... |
2632 |
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) |
4c93c355d SLUB: Place kmem_... |
2633 |
{ |
6c182dc0d slub: Remove stat... |
2634 2635 |
BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); |
4c93c355d SLUB: Place kmem_... |
2636 |
|
8a5ec0ba4 Lockless (and pre... |
2637 |
/* |
d4d84fef6 slub: always alig... |
2638 2639 |
* Must align to double word boundary for the double cmpxchg * instructions to work; see __pcpu_double_call_return_bool(). |
8a5ec0ba4 Lockless (and pre... |
2640 |
*/ |
d4d84fef6 slub: always alig... |
2641 2642 |
s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 2 * sizeof(void *)); |
8a5ec0ba4 Lockless (and pre... |
2643 2644 2645 2646 2647 |
if (!s->cpu_slab) return 0; init_kmem_cache_cpus(s); |
4c93c355d SLUB: Place kmem_... |
2648 |
|
8a5ec0ba4 Lockless (and pre... |
2649 |
return 1; |
4c93c355d SLUB: Place kmem_... |
2650 |
} |
4c93c355d SLUB: Place kmem_... |
2651 |
|
51df11428 slub: Dynamically... |
2652 |
static struct kmem_cache *kmem_cache_node; |
81819f0fc SLUB core |
2653 2654 2655 2656 2657 2658 |
/* * No kmalloc_node yet so do it by hand. We know that this is the first * slab on the node for this slabcache. There are no concurrent accesses * possible. * * Note that this function only works on the kmalloc_node_cache |
4c93c355d SLUB: Place kmem_... |
2659 2660 |
* when allocating for the kmalloc_node_cache. This is used for bootstrapping * memory on a fresh node that has no slab structures yet. |
81819f0fc SLUB core |
2661 |
*/ |
55136592f slub: Remove dyna... |
2662 |
static void early_kmem_cache_node_alloc(int node) |
81819f0fc SLUB core |
2663 2664 2665 |
{ struct page *page; struct kmem_cache_node *n; |
51df11428 slub: Dynamically... |
2666 |
BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node)); |
81819f0fc SLUB core |
2667 |
|
51df11428 slub: Dynamically... |
2668 |
page = new_slab(kmem_cache_node, GFP_NOWAIT, node); |
81819f0fc SLUB core |
2669 2670 |
BUG_ON(!page); |
a2f92ee7e SLUB: do not fail... |
2671 2672 2673 2674 2675 2676 2677 2678 |
if (page_to_nid(page) != node) { printk(KERN_ERR "SLUB: Unable to allocate memory from " "node %d ", node); printk(KERN_ERR "SLUB: Allocating a useless per node structure " "in order to be able to continue "); } |
81819f0fc SLUB core |
2679 2680 |
n = page->freelist; BUG_ON(!n); |
51df11428 slub: Dynamically... |
2681 |
page->freelist = get_freepointer(kmem_cache_node, n); |
e6e82ea11 slub: Prepare inu... |
2682 |
page->inuse = 1; |
8cb0a5068 slub: Move page->... |
2683 |
page->frozen = 0; |
51df11428 slub: Dynamically... |
2684 |
kmem_cache_node->node[node] = n; |
8ab1372fa SLUB: Fix CONFIG_... |
2685 |
#ifdef CONFIG_SLUB_DEBUG |
f7cb19336 SLUB: Pass active... |
2686 |
init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); |
51df11428 slub: Dynamically... |
2687 |
init_tracking(kmem_cache_node, n); |
8ab1372fa SLUB: Fix CONFIG_... |
2688 |
#endif |
51df11428 slub: Dynamically... |
2689 2690 |
init_kmem_cache_node(n, kmem_cache_node); inc_slabs_node(kmem_cache_node, node, page->objects); |
6446faa2f slub: Fix up comm... |
2691 |
|
136333d10 slub: explicitly ... |
2692 |
add_partial(n, page, DEACTIVATE_TO_HEAD); |
81819f0fc SLUB core |
2693 2694 2695 2696 2697 |
} static void free_kmem_cache_nodes(struct kmem_cache *s) { int node; |
f64dc58c5 Memoryless nodes:... |
2698 |
for_each_node_state(node, N_NORMAL_MEMORY) { |
81819f0fc SLUB core |
2699 |
struct kmem_cache_node *n = s->node[node]; |
51df11428 slub: Dynamically... |
2700 |
|
73367bd8e slub: move kmem_c... |
2701 |
if (n) |
51df11428 slub: Dynamically... |
2702 |
kmem_cache_free(kmem_cache_node, n); |
81819f0fc SLUB core |
2703 2704 2705 |
s->node[node] = NULL; } } |
55136592f slub: Remove dyna... |
2706 |
static int init_kmem_cache_nodes(struct kmem_cache *s) |
81819f0fc SLUB core |
2707 2708 |
{ int node; |
81819f0fc SLUB core |
2709 |
|
f64dc58c5 Memoryless nodes:... |
2710 |
for_each_node_state(node, N_NORMAL_MEMORY) { |
81819f0fc SLUB core |
2711 |
struct kmem_cache_node *n; |
73367bd8e slub: move kmem_c... |
2712 |
if (slab_state == DOWN) { |
55136592f slub: Remove dyna... |
2713 |
early_kmem_cache_node_alloc(node); |
73367bd8e slub: move kmem_c... |
2714 2715 |
continue; } |
51df11428 slub: Dynamically... |
2716 |
n = kmem_cache_alloc_node(kmem_cache_node, |
55136592f slub: Remove dyna... |
2717 |
GFP_KERNEL, node); |
81819f0fc SLUB core |
2718 |
|
73367bd8e slub: move kmem_c... |
2719 2720 2721 |
if (!n) { free_kmem_cache_nodes(s); return 0; |
81819f0fc SLUB core |
2722 |
} |
73367bd8e slub: move kmem_c... |
2723 |
|
81819f0fc SLUB core |
2724 |
s->node[node] = n; |
5595cffc8 SLUB: dynamic per... |
2725 |
init_kmem_cache_node(n, s); |
81819f0fc SLUB core |
2726 2727 2728 |
} return 1; } |
81819f0fc SLUB core |
2729 |
|
c0bdb232b slub: rename calc... |
2730 |
static void set_min_partial(struct kmem_cache *s, unsigned long min) |
3b89d7d88 slub: move min_pa... |
2731 2732 2733 2734 2735 2736 2737 |
{ if (min < MIN_PARTIAL) min = MIN_PARTIAL; else if (min > MAX_PARTIAL) min = MAX_PARTIAL; s->min_partial = min; } |
81819f0fc SLUB core |
2738 2739 2740 2741 |
/* * calculate_sizes() determines the order and the distribution of data within * a slab object. */ |
06b285dc3 slub: Make the or... |
2742 |
static int calculate_sizes(struct kmem_cache *s, int forced_order) |
81819f0fc SLUB core |
2743 2744 2745 2746 |
{ unsigned long flags = s->flags; unsigned long size = s->objsize; unsigned long align = s->align; |
834f3d119 slub: Add kmem_ca... |
2747 |
int order; |
81819f0fc SLUB core |
2748 2749 |
/* |
d8b42bf54 slub: Rearrange #... |
2750 2751 2752 2753 2754 2755 2756 2757 |
* Round up object size to the next word boundary. We can only * place the free pointer at word boundaries and this determines * the possible location of the free pointer. */ size = ALIGN(size, sizeof(void *)); #ifdef CONFIG_SLUB_DEBUG /* |
81819f0fc SLUB core |
2758 2759 2760 2761 2762 |
* Determine if we can poison the object itself. If the user of * the slab may touch the object after free or before allocation * then we should never poison the object itself. */ if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) && |
c59def9f2 Slab allocators: ... |
2763 |
!s->ctor) |
81819f0fc SLUB core |
2764 2765 2766 |
s->flags |= __OBJECT_POISON; else s->flags &= ~__OBJECT_POISON; |
81819f0fc SLUB core |
2767 2768 |
/* |
672bba3a4 SLUB: update comm... |
2769 |
* If we are Redzoning then check if there is some space between the |
81819f0fc SLUB core |
2770 |
* end of the object and the free pointer. If not then add an |
672bba3a4 SLUB: update comm... |
2771 |
* additional word to have some bytes to store Redzone information. |
81819f0fc SLUB core |
2772 2773 2774 |
*/ if ((flags & SLAB_RED_ZONE) && size == s->objsize) size += sizeof(void *); |
41ecc55b8 SLUB: add CONFIG_... |
2775 |
#endif |
81819f0fc SLUB core |
2776 2777 |
/* |
672bba3a4 SLUB: update comm... |
2778 2779 |
* With that we have determined the number of bytes in actual use * by the object. This is the potential offset to the free pointer. |
81819f0fc SLUB core |
2780 2781 2782 2783 |
*/ s->inuse = size; if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) || |
c59def9f2 Slab allocators: ... |
2784 |
s->ctor)) { |
81819f0fc SLUB core |
2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 |
/* * Relocate free pointer after the object if it is not * permitted to overwrite the first word of the object on * kmem_cache_free. * * This is the case if we do RCU, have a constructor or * destructor or are poisoning the objects. */ s->offset = size; size += sizeof(void *); } |
c12b3c625 SLUB Debug: Fix o... |
2796 |
#ifdef CONFIG_SLUB_DEBUG |
81819f0fc SLUB core |
2797 2798 2799 2800 2801 2802 |
if (flags & SLAB_STORE_USER) /* * Need to store information about allocs and frees after * the object. */ size += 2 * sizeof(struct track); |
be7b3fbce SLUB: after objec... |
2803 |
if (flags & SLAB_RED_ZONE) |
81819f0fc SLUB core |
2804 2805 2806 2807 |
/* * Add some empty padding so that we can catch * overwrites from earlier objects rather than let * tracking information or the free pointer be |
0211a9c85 trivial: fix an -... |
2808 |
* corrupted if a user writes before the start |
81819f0fc SLUB core |
2809 2810 2811 |
* of the object. */ size += sizeof(void *); |
41ecc55b8 SLUB: add CONFIG_... |
2812 |
#endif |
672bba3a4 SLUB: update comm... |
2813 |
|
81819f0fc SLUB core |
2814 2815 |
/* * Determine the alignment based on various parameters that the |
65c02d4cf SLUB: add support... |
2816 2817 |
* user specified and the dynamic determination of cache line size * on bootup. |
81819f0fc SLUB core |
2818 2819 |
*/ align = calculate_alignment(flags, align, s->objsize); |
dcb0ce1bd slub: change kmem... |
2820 |
s->align = align; |
81819f0fc SLUB core |
2821 2822 2823 2824 2825 2826 2827 2828 |
/* * SLUB stores one object immediately after another beginning from * offset 0. In order to align the objects we have to simply size * each object to conform to the alignment. */ size = ALIGN(size, align); s->size = size; |
06b285dc3 slub: Make the or... |
2829 2830 2831 |
if (forced_order >= 0) order = forced_order; else |
ab9a0f196 slub: automatical... |
2832 |
order = calculate_order(size, s->reserved); |
81819f0fc SLUB core |
2833 |
|
834f3d119 slub: Add kmem_ca... |
2834 |
if (order < 0) |
81819f0fc SLUB core |
2835 |
return 0; |
b7a49f0d4 slub: Determine g... |
2836 |
s->allocflags = 0; |
834f3d119 slub: Add kmem_ca... |
2837 |
if (order) |
b7a49f0d4 slub: Determine g... |
2838 2839 2840 2841 2842 2843 2844 |
s->allocflags |= __GFP_COMP; if (s->flags & SLAB_CACHE_DMA) s->allocflags |= SLUB_DMA; if (s->flags & SLAB_RECLAIM_ACCOUNT) s->allocflags |= __GFP_RECLAIMABLE; |
81819f0fc SLUB core |
2845 2846 2847 |
/* * Determine the number of objects per slab */ |
ab9a0f196 slub: automatical... |
2848 2849 |
s->oo = oo_make(order, size, s->reserved); s->min = oo_make(get_order(size), size, s->reserved); |
205ab99dd slub: Update stat... |
2850 2851 |
if (oo_objects(s->oo) > oo_objects(s->max)) s->max = s->oo; |
81819f0fc SLUB core |
2852 |
|
834f3d119 slub: Add kmem_ca... |
2853 |
return !!oo_objects(s->oo); |
81819f0fc SLUB core |
2854 2855 |
} |
55136592f slub: Remove dyna... |
2856 |
static int kmem_cache_open(struct kmem_cache *s, |
81819f0fc SLUB core |
2857 2858 |
const char *name, size_t size, size_t align, unsigned long flags, |
51cc50685 SL*B: drop kmem c... |
2859 |
void (*ctor)(void *)) |
81819f0fc SLUB core |
2860 2861 2862 2863 |
{ memset(s, 0, kmem_size); s->name = name; s->ctor = ctor; |
81819f0fc SLUB core |
2864 |
s->objsize = size; |
81819f0fc SLUB core |
2865 |
s->align = align; |
ba0268a8b SLUB: accurately ... |
2866 |
s->flags = kmem_cache_flags(size, flags, name, ctor); |
ab9a0f196 slub: automatical... |
2867 |
s->reserved = 0; |
81819f0fc SLUB core |
2868 |
|
da9a638c6 slub,rcu: don't a... |
2869 2870 |
if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU)) s->reserved = sizeof(struct rcu_head); |
81819f0fc SLUB core |
2871 |
|
06b285dc3 slub: Make the or... |
2872 |
if (!calculate_sizes(s, -1)) |
81819f0fc SLUB core |
2873 |
goto error; |
3de472138 slub: use size an... |
2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 |
if (disable_higher_order_debug) { /* * Disable debugging flags that store metadata if the min slab * order increased. */ if (get_order(s->size) > get_order(s->objsize)) { s->flags &= ~DEBUG_METADATA_FLAGS; s->offset = 0; if (!calculate_sizes(s, -1)) goto error; } } |
81819f0fc SLUB core |
2886 |
|
b789ef518 slub: Add cmpxchg... |
2887 2888 2889 2890 2891 |
#ifdef CONFIG_CMPXCHG_DOUBLE if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0) /* Enable fast mode */ s->flags |= __CMPXCHG_DOUBLE; #endif |
3b89d7d88 slub: move min_pa... |
2892 2893 2894 2895 |
/* * The larger the object size is, the more pages we want on the partial * list to avoid pounding the page allocator excessively. */ |
49e225858 slub: per cpu cac... |
2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 |
set_min_partial(s, ilog2(s->size) / 2); /* * cpu_partial determined the maximum number of objects kept in the * per cpu partial lists of a processor. * * Per cpu partial lists mainly contain slabs that just have one * object freed. If they are used for allocation then they can be * filled up again with minimal effort. The slab will never hit the * per node partial lists and therefore no locking will be required. * * This setting also determines * * A) The number of objects from per cpu partial slabs dumped to the * per node list when we reach the limit. |
9f2649041 slub: correct com... |
2911 |
* B) The number of objects in cpu partial slabs to extract from the |
49e225858 slub: per cpu cac... |
2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 |
* per node list when we run out of per cpu objects. We only fetch 50% * to keep some capacity around for frees. */ if (s->size >= PAGE_SIZE) s->cpu_partial = 2; else if (s->size >= 1024) s->cpu_partial = 6; else if (s->size >= 256) s->cpu_partial = 13; else s->cpu_partial = 30; |
81819f0fc SLUB core |
2923 2924 |
s->refcount = 1; #ifdef CONFIG_NUMA |
e2cb96b7e slub: Disable NUM... |
2925 |
s->remote_node_defrag_ratio = 1000; |
81819f0fc SLUB core |
2926 |
#endif |
55136592f slub: Remove dyna... |
2927 |
if (!init_kmem_cache_nodes(s)) |
dfb4f0960 SLUB: Avoid page ... |
2928 |
goto error; |
81819f0fc SLUB core |
2929 |
|
55136592f slub: Remove dyna... |
2930 |
if (alloc_kmem_cache_cpus(s)) |
81819f0fc SLUB core |
2931 |
return 1; |
ff12059ed SLUB: this_cpu: R... |
2932 |
|
4c93c355d SLUB: Place kmem_... |
2933 |
free_kmem_cache_nodes(s); |
81819f0fc SLUB core |
2934 2935 2936 2937 2938 |
error: if (flags & SLAB_PANIC) panic("Cannot create slab %s size=%lu realsize=%u " "order=%u offset=%u flags=%lx ", |
834f3d119 slub: Add kmem_ca... |
2939 |
s->name, (unsigned long)size, s->size, oo_order(s->oo), |
81819f0fc SLUB core |
2940 2941 2942 |
s->offset, flags); return 0; } |
81819f0fc SLUB core |
2943 2944 |
/* |
81819f0fc SLUB core |
2945 2946 2947 2948 2949 2950 2951 |
* Determine the size of a slab object */ unsigned int kmem_cache_size(struct kmem_cache *s) { return s->objsize; } EXPORT_SYMBOL(kmem_cache_size); |
33b12c381 slub: Dump list o... |
2952 2953 2954 2955 2956 2957 |
static void list_slab_objects(struct kmem_cache *s, struct page *page, const char *text) { #ifdef CONFIG_SLUB_DEBUG void *addr = page_address(page); void *p; |
a5dd5c117 slub: Fix signedn... |
2958 2959 |
unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) * sizeof(long), GFP_ATOMIC); |
bbd7d57bf slub: Potential s... |
2960 2961 |
if (!map) return; |
33b12c381 slub: Dump list o... |
2962 2963 |
slab_err(s, page, "%s", text); slab_lock(page); |
33b12c381 slub: Dump list o... |
2964 |
|
5f80b13ae slub: get_map() f... |
2965 |
get_map(s, page, map); |
33b12c381 slub: Dump list o... |
2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 |
for_each_object(p, s, addr, page->objects) { if (!test_bit(slab_index(p, s, addr), map)) { printk(KERN_ERR "INFO: Object 0x%p @offset=%tu ", p, p - addr); print_tracking(s, p); } } slab_unlock(page); |
bbd7d57bf slub: Potential s... |
2976 |
kfree(map); |
33b12c381 slub: Dump list o... |
2977 2978 |
#endif } |
81819f0fc SLUB core |
2979 |
/* |
599870b17 slub: free_list()... |
2980 |
* Attempt to free all partial slabs on a node. |
69cb8e6b7 slub: free slabs ... |
2981 2982 |
* This is called from kmem_cache_close(). We must be the last thread * using the cache and therefore we do not need to lock anymore. |
81819f0fc SLUB core |
2983 |
*/ |
599870b17 slub: free_list()... |
2984 |
static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) |
81819f0fc SLUB core |
2985 |
{ |
81819f0fc SLUB core |
2986 |
struct page *page, *h; |
33b12c381 slub: Dump list o... |
2987 |
list_for_each_entry_safe(page, h, &n->partial, lru) { |
81819f0fc SLUB core |
2988 |
if (!page->inuse) { |
5cc6eee8a slub: explicit li... |
2989 |
remove_partial(n, page); |
81819f0fc SLUB core |
2990 |
discard_slab(s, page); |
33b12c381 slub: Dump list o... |
2991 2992 2993 |
} else { list_slab_objects(s, page, "Objects remaining on kmem_cache_close()"); |
599870b17 slub: free_list()... |
2994 |
} |
33b12c381 slub: Dump list o... |
2995 |
} |
81819f0fc SLUB core |
2996 2997 2998 |
} /* |
672bba3a4 SLUB: update comm... |
2999 |
* Release all resources used by a slab cache. |
81819f0fc SLUB core |
3000 |
*/ |
0c7100132 SLUB: add some mo... |
3001 |
static inline int kmem_cache_close(struct kmem_cache *s) |
81819f0fc SLUB core |
3002 3003 3004 3005 |
{ int node; flush_all(s); |
9dfc6e68b SLUB: Use this_cp... |
3006 |
free_percpu(s->cpu_slab); |
81819f0fc SLUB core |
3007 |
/* Attempt to free all objects */ |
f64dc58c5 Memoryless nodes:... |
3008 |
for_each_node_state(node, N_NORMAL_MEMORY) { |
81819f0fc SLUB core |
3009 |
struct kmem_cache_node *n = get_node(s, node); |
599870b17 slub: free_list()... |
3010 3011 |
free_partial(s, n); if (n->nr_partial || slabs_node(s, node)) |
81819f0fc SLUB core |
3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 |
return 1; } free_kmem_cache_nodes(s); return 0; } /* * Close a cache and release the kmem_cache structure * (must be used for caches created using kmem_cache_create) */ void kmem_cache_destroy(struct kmem_cache *s) { down_write(&slub_lock); s->refcount--; if (!s->refcount) { list_del(&s->list); |
69cb8e6b7 slub: free slabs ... |
3028 |
up_write(&slub_lock); |
d629d8195 slub: improve kme... |
3029 3030 3031 3032 3033 3034 |
if (kmem_cache_close(s)) { printk(KERN_ERR "SLUB %s: %s called for cache that " "still has objects. ", s->name, __func__); dump_stack(); } |
d76b1590e slub: Fix kmem_ca... |
3035 3036 |
if (s->flags & SLAB_DESTROY_BY_RCU) rcu_barrier(); |
81819f0fc SLUB core |
3037 |
sysfs_slab_remove(s); |
69cb8e6b7 slub: free slabs ... |
3038 3039 |
} else up_write(&slub_lock); |
81819f0fc SLUB core |
3040 3041 3042 3043 3044 3045 |
} EXPORT_SYMBOL(kmem_cache_destroy); /******************************************************************** * Kmalloc subsystem *******************************************************************/ |
51df11428 slub: Dynamically... |
3046 |
struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; |
81819f0fc SLUB core |
3047 |
EXPORT_SYMBOL(kmalloc_caches); |
51df11428 slub: Dynamically... |
3048 |
static struct kmem_cache *kmem_cache; |
55136592f slub: Remove dyna... |
3049 |
#ifdef CONFIG_ZONE_DMA |
51df11428 slub: Dynamically... |
3050 |
static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT]; |
55136592f slub: Remove dyna... |
3051 |
#endif |
81819f0fc SLUB core |
3052 3053 |
static int __init setup_slub_min_order(char *str) { |
064287807 SLUB: Fix coding ... |
3054 |
get_option(&str, &slub_min_order); |
81819f0fc SLUB core |
3055 3056 3057 3058 3059 3060 3061 3062 |
return 1; } __setup("slub_min_order=", setup_slub_min_order); static int __init setup_slub_max_order(char *str) { |
064287807 SLUB: Fix coding ... |
3063 |
get_option(&str, &slub_max_order); |
818cf5909 slub: enforce MAX... |
3064 |
slub_max_order = min(slub_max_order, MAX_ORDER - 1); |
81819f0fc SLUB core |
3065 3066 3067 3068 3069 3070 3071 3072 |
return 1; } __setup("slub_max_order=", setup_slub_max_order); static int __init setup_slub_min_objects(char *str) { |
064287807 SLUB: Fix coding ... |
3073 |
get_option(&str, &slub_min_objects); |
81819f0fc SLUB core |
3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 |
return 1; } __setup("slub_min_objects=", setup_slub_min_objects); static int __init setup_slub_nomerge(char *str) { slub_nomerge = 1; return 1; } __setup("slub_nomerge", setup_slub_nomerge); |
51df11428 slub: Dynamically... |
3087 3088 |
static struct kmem_cache *__init create_kmalloc_cache(const char *name, int size, unsigned int flags) |
81819f0fc SLUB core |
3089 |
{ |
51df11428 slub: Dynamically... |
3090 3091 3092 |
struct kmem_cache *s; s = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); |
83b519e8b slab: setup alloc... |
3093 3094 3095 3096 |
/* * This function is called with IRQs disabled during early-boot on * single CPU so there's no need to take slub_lock here. */ |
55136592f slub: Remove dyna... |
3097 |
if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, |
319d1e240 slub: Drop fallba... |
3098 |
flags, NULL)) |
81819f0fc SLUB core |
3099 3100 3101 |
goto panic; list_add(&s->list, &slab_caches); |
51df11428 slub: Dynamically... |
3102 |
return s; |
81819f0fc SLUB core |
3103 3104 3105 3106 |
panic: panic("Creation of kmalloc slab %s size=%d failed. ", name, size); |
51df11428 slub: Dynamically... |
3107 |
return NULL; |
81819f0fc SLUB core |
3108 |
} |
f1b263393 SLUB: faster more... |
3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 |
/* * Conversion table for small slabs sizes / 8 to the index in the * kmalloc array. This is necessary for slabs < 192 since we have non power * of two cache sizes there. The size of larger slabs can be determined using * fls. */ static s8 size_index[24] = { 3, /* 8 */ 4, /* 16 */ 5, /* 24 */ 5, /* 32 */ 6, /* 40 */ 6, /* 48 */ 6, /* 56 */ 6, /* 64 */ 1, /* 72 */ 1, /* 80 */ 1, /* 88 */ 1, /* 96 */ 7, /* 104 */ 7, /* 112 */ 7, /* 120 */ 7, /* 128 */ 2, /* 136 */ 2, /* 144 */ 2, /* 152 */ 2, /* 160 */ 2, /* 168 */ 2, /* 176 */ 2, /* 184 */ 2 /* 192 */ }; |
acdfcd04d SLUB: fix ARCH_KM... |
3141 3142 3143 3144 |
static inline int size_index_elem(size_t bytes) { return (bytes - 1) / 8; } |
81819f0fc SLUB core |
3145 3146 |
static struct kmem_cache *get_slab(size_t size, gfp_t flags) { |
f1b263393 SLUB: faster more... |
3147 |
int index; |
81819f0fc SLUB core |
3148 |
|
f1b263393 SLUB: faster more... |
3149 3150 3151 |
if (size <= 192) { if (!size) return ZERO_SIZE_PTR; |
81819f0fc SLUB core |
3152 |
|
acdfcd04d SLUB: fix ARCH_KM... |
3153 |
index = size_index[size_index_elem(size)]; |
aadb4bc4a SLUB: direct pass... |
3154 |
} else |
f1b263393 SLUB: faster more... |
3155 |
index = fls(size - 1); |
81819f0fc SLUB core |
3156 3157 |
#ifdef CONFIG_ZONE_DMA |
f1b263393 SLUB: faster more... |
3158 |
if (unlikely((flags & SLUB_DMA))) |
51df11428 slub: Dynamically... |
3159 |
return kmalloc_dma_caches[index]; |
f1b263393 SLUB: faster more... |
3160 |
|
81819f0fc SLUB core |
3161 |
#endif |
51df11428 slub: Dynamically... |
3162 |
return kmalloc_caches[index]; |
81819f0fc SLUB core |
3163 3164 3165 3166 |
} void *__kmalloc(size_t size, gfp_t flags) { |
aadb4bc4a SLUB: direct pass... |
3167 |
struct kmem_cache *s; |
5b882be4e kmemtrace: SLUB h... |
3168 |
void *ret; |
81819f0fc SLUB core |
3169 |
|
ffadd4d0f SLUB: Introduce a... |
3170 |
if (unlikely(size > SLUB_MAX_SIZE)) |
eada35efc slub: kmalloc pag... |
3171 |
return kmalloc_large(size, flags); |
aadb4bc4a SLUB: direct pass... |
3172 3173 3174 3175 |
s = get_slab(size, flags); if (unlikely(ZERO_OR_NULL_PTR(s))) |
6cb8f9132 Slab allocators: ... |
3176 |
return s; |
2154a3363 slub: Use a const... |
3177 |
ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_); |
5b882be4e kmemtrace: SLUB h... |
3178 |
|
ca2b84cb3 kmemtrace: use tr... |
3179 |
trace_kmalloc(_RET_IP_, ret, size, s->size, flags); |
5b882be4e kmemtrace: SLUB h... |
3180 3181 |
return ret; |
81819f0fc SLUB core |
3182 3183 |
} EXPORT_SYMBOL(__kmalloc); |
5d1f57e4d slub: Move NUMA-r... |
3184 |
#ifdef CONFIG_NUMA |
f619cfe1b slub: Add kmalloc... |
3185 3186 |
static void *kmalloc_large_node(size_t size, gfp_t flags, int node) { |
b1eeab676 kmemcheck: add ho... |
3187 |
struct page *page; |
e4f7c0b44 kmemleak: Trace t... |
3188 |
void *ptr = NULL; |
f619cfe1b slub: Add kmalloc... |
3189 |
|
b1eeab676 kmemcheck: add ho... |
3190 3191 |
flags |= __GFP_COMP | __GFP_NOTRACK; page = alloc_pages_node(node, flags, get_order(size)); |
f619cfe1b slub: Add kmalloc... |
3192 |
if (page) |
e4f7c0b44 kmemleak: Trace t... |
3193 3194 3195 3196 |
ptr = page_address(page); kmemleak_alloc(ptr, size, 1, flags); return ptr; |
f619cfe1b slub: Add kmalloc... |
3197 |
} |
81819f0fc SLUB core |
3198 3199 |
void *__kmalloc_node(size_t size, gfp_t flags, int node) { |
aadb4bc4a SLUB: direct pass... |
3200 |
struct kmem_cache *s; |
5b882be4e kmemtrace: SLUB h... |
3201 |
void *ret; |
81819f0fc SLUB core |
3202 |
|
057685cf5 Merge branch 'for... |
3203 |
if (unlikely(size > SLUB_MAX_SIZE)) { |
5b882be4e kmemtrace: SLUB h... |
3204 |
ret = kmalloc_large_node(size, flags, node); |
ca2b84cb3 kmemtrace: use tr... |
3205 3206 3207 |
trace_kmalloc_node(_RET_IP_, ret, size, PAGE_SIZE << get_order(size), flags, node); |
5b882be4e kmemtrace: SLUB h... |
3208 3209 3210 |
return ret; } |
aadb4bc4a SLUB: direct pass... |
3211 3212 3213 3214 |
s = get_slab(size, flags); if (unlikely(ZERO_OR_NULL_PTR(s))) |
6cb8f9132 Slab allocators: ... |
3215 |
return s; |
5b882be4e kmemtrace: SLUB h... |
3216 |
ret = slab_alloc(s, flags, node, _RET_IP_); |
ca2b84cb3 kmemtrace: use tr... |
3217 |
trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node); |
5b882be4e kmemtrace: SLUB h... |
3218 3219 |
return ret; |
81819f0fc SLUB core |
3220 3221 3222 3223 3224 3225 |
} EXPORT_SYMBOL(__kmalloc_node); #endif size_t ksize(const void *object) { |
272c1d21d SLUB: return ZERO... |
3226 |
struct page *page; |
81819f0fc SLUB core |
3227 |
|
ef8b4520b Slab allocators: ... |
3228 |
if (unlikely(object == ZERO_SIZE_PTR)) |
272c1d21d SLUB: return ZERO... |
3229 |
return 0; |
294a80a8e SLUB's ksize() fa... |
3230 |
page = virt_to_head_page(object); |
294a80a8e SLUB's ksize() fa... |
3231 |
|
76994412f slub: ksize() abu... |
3232 3233 |
if (unlikely(!PageSlab(page))) { WARN_ON(!PageCompound(page)); |
294a80a8e SLUB's ksize() fa... |
3234 |
return PAGE_SIZE << compound_order(page); |
76994412f slub: ksize() abu... |
3235 |
} |
81819f0fc SLUB core |
3236 |
|
b3d41885d slub: fix kmemche... |
3237 |
return slab_ksize(page->slab); |
81819f0fc SLUB core |
3238 |
} |
b1aabecd5 mm: Export symbol... |
3239 |
EXPORT_SYMBOL(ksize); |
81819f0fc SLUB core |
3240 |
|
d18a90dd8 slub: Add method ... |
3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 |
#ifdef CONFIG_SLUB_DEBUG bool verify_mem_not_deleted(const void *x) { struct page *page; void *object = (void *)x; unsigned long flags; bool rv; if (unlikely(ZERO_OR_NULL_PTR(x))) return false; local_irq_save(flags); page = virt_to_head_page(x); if (unlikely(!PageSlab(page))) { /* maybe it was from stack? */ rv = true; goto out_unlock; } slab_lock(page); if (on_freelist(page->slab, page, object)) { object_err(page->slab, page, object, "Object is on free-list"); rv = false; } else { rv = true; } slab_unlock(page); out_unlock: local_irq_restore(flags); return rv; } EXPORT_SYMBOL(verify_mem_not_deleted); #endif |
81819f0fc SLUB core |
3276 3277 |
void kfree(const void *x) { |
81819f0fc SLUB core |
3278 |
struct page *page; |
5bb983b0c SLUB: Deal with a... |
3279 |
void *object = (void *)x; |
81819f0fc SLUB core |
3280 |
|
2121db74b kmemtrace: trace ... |
3281 |
trace_kfree(_RET_IP_, x); |
2408c5503 {slub, slob}: use... |
3282 |
if (unlikely(ZERO_OR_NULL_PTR(x))) |
81819f0fc SLUB core |
3283 |
return; |
b49af68ff Add virt_to_head_... |
3284 |
page = virt_to_head_page(x); |
aadb4bc4a SLUB: direct pass... |
3285 |
if (unlikely(!PageSlab(page))) { |
0937502af slub: Add check f... |
3286 |
BUG_ON(!PageCompound(page)); |
e4f7c0b44 kmemleak: Trace t... |
3287 |
kmemleak_free(x); |
aadb4bc4a SLUB: direct pass... |
3288 3289 3290 |
put_page(page); return; } |
ce71e27c6 SLUB: Replace __b... |
3291 |
slab_free(page->slab, page, object, _RET_IP_); |
81819f0fc SLUB core |
3292 3293 |
} EXPORT_SYMBOL(kfree); |
2086d26a0 SLUB: Free slabs ... |
3294 |
/* |
672bba3a4 SLUB: update comm... |
3295 3296 3297 3298 3299 3300 3301 3302 |
* kmem_cache_shrink removes empty slabs from the partial lists and sorts * the remaining slabs by the number of items in use. The slabs with the * most items in use come first. New allocations will then fill those up * and thus they can be removed from the partial lists. * * The slabs with the least items are placed last. This results in them * being allocated from last increasing the chance that the last objects * are freed in them. |
2086d26a0 SLUB: Free slabs ... |
3303 3304 3305 3306 3307 3308 3309 3310 |
*/ int kmem_cache_shrink(struct kmem_cache *s) { int node; int i; struct kmem_cache_node *n; struct page *page; struct page *t; |
205ab99dd slub: Update stat... |
3311 |
int objects = oo_objects(s->max); |
2086d26a0 SLUB: Free slabs ... |
3312 |
struct list_head *slabs_by_inuse = |
834f3d119 slub: Add kmem_ca... |
3313 |
kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL); |
2086d26a0 SLUB: Free slabs ... |
3314 3315 3316 3317 3318 3319 |
unsigned long flags; if (!slabs_by_inuse) return -ENOMEM; flush_all(s); |
f64dc58c5 Memoryless nodes:... |
3320 |
for_each_node_state(node, N_NORMAL_MEMORY) { |
2086d26a0 SLUB: Free slabs ... |
3321 3322 3323 3324 |
n = get_node(s, node); if (!n->nr_partial) continue; |
834f3d119 slub: Add kmem_ca... |
3325 |
for (i = 0; i < objects; i++) |
2086d26a0 SLUB: Free slabs ... |
3326 3327 3328 3329 3330 |
INIT_LIST_HEAD(slabs_by_inuse + i); spin_lock_irqsave(&n->list_lock, flags); /* |
672bba3a4 SLUB: update comm... |
3331 |
* Build lists indexed by the items in use in each slab. |
2086d26a0 SLUB: Free slabs ... |
3332 |
* |
672bba3a4 SLUB: update comm... |
3333 3334 |
* Note that concurrent frees may occur while we hold the * list_lock. page->inuse here is the upper limit. |
2086d26a0 SLUB: Free slabs ... |
3335 3336 |
*/ list_for_each_entry_safe(page, t, &n->partial, lru) { |
69cb8e6b7 slub: free slabs ... |
3337 3338 3339 |
list_move(&page->lru, slabs_by_inuse + page->inuse); if (!page->inuse) n->nr_partial--; |
2086d26a0 SLUB: Free slabs ... |
3340 |
} |
2086d26a0 SLUB: Free slabs ... |
3341 |
/* |
672bba3a4 SLUB: update comm... |
3342 3343 |
* Rebuild the partial list with the slabs filled up most * first and the least used slabs at the end. |
2086d26a0 SLUB: Free slabs ... |
3344 |
*/ |
69cb8e6b7 slub: free slabs ... |
3345 |
for (i = objects - 1; i > 0; i--) |
2086d26a0 SLUB: Free slabs ... |
3346 |
list_splice(slabs_by_inuse + i, n->partial.prev); |
2086d26a0 SLUB: Free slabs ... |
3347 |
spin_unlock_irqrestore(&n->list_lock, flags); |
69cb8e6b7 slub: free slabs ... |
3348 3349 3350 3351 |
/* Release empty slabs */ list_for_each_entry_safe(page, t, slabs_by_inuse, lru) discard_slab(s, page); |
2086d26a0 SLUB: Free slabs ... |
3352 3353 3354 3355 3356 3357 |
} kfree(slabs_by_inuse); return 0; } EXPORT_SYMBOL(kmem_cache_shrink); |
92a5bbc11 SLUB: Fix memory ... |
3358 |
#if defined(CONFIG_MEMORY_HOTPLUG) |
b9049e234 memory hotplug: m... |
3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 |
static int slab_mem_going_offline_callback(void *arg) { struct kmem_cache *s; down_read(&slub_lock); list_for_each_entry(s, &slab_caches, list) kmem_cache_shrink(s); up_read(&slub_lock); return 0; } static void slab_mem_offline_callback(void *arg) { struct kmem_cache_node *n; struct kmem_cache *s; struct memory_notify *marg = arg; int offline_node; offline_node = marg->status_change_nid; /* * If the node still has available memory. we need kmem_cache_node * for it yet. */ if (offline_node < 0) return; down_read(&slub_lock); list_for_each_entry(s, &slab_caches, list) { n = get_node(s, offline_node); if (n) { /* * if n->nr_slabs > 0, slabs still exist on the node * that is going down. We were unable to free them, |
c9404c9c3 Fix misspelling o... |
3394 |
* and offline_pages() function shouldn't call this |
b9049e234 memory hotplug: m... |
3395 3396 |
* callback. So, we must fail. */ |
0f389ec63 slub: No need for... |
3397 |
BUG_ON(slabs_node(s, offline_node)); |
b9049e234 memory hotplug: m... |
3398 3399 |
s->node[offline_node] = NULL; |
8de66a0c0 slub: Fix up miss... |
3400 |
kmem_cache_free(kmem_cache_node, n); |
b9049e234 memory hotplug: m... |
3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 |
} } up_read(&slub_lock); } static int slab_mem_going_online_callback(void *arg) { struct kmem_cache_node *n; struct kmem_cache *s; struct memory_notify *marg = arg; int nid = marg->status_change_nid; int ret = 0; /* * If the node's memory is already available, then kmem_cache_node is * already created. Nothing to do. */ if (nid < 0) return 0; /* |
0121c619d slub: Whitespace ... |
3422 |
* We are bringing a node online. No memory is available yet. We must |
b9049e234 memory hotplug: m... |
3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 |
* allocate a kmem_cache_node structure in order to bring the node * online. */ down_read(&slub_lock); list_for_each_entry(s, &slab_caches, list) { /* * XXX: kmem_cache_alloc_node will fallback to other nodes * since memory is not yet available from the node that * is brought up. */ |
8de66a0c0 slub: Fix up miss... |
3433 |
n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL); |
b9049e234 memory hotplug: m... |
3434 3435 3436 3437 |
if (!n) { ret = -ENOMEM; goto out; } |
5595cffc8 SLUB: dynamic per... |
3438 |
init_kmem_cache_node(n, s); |
b9049e234 memory hotplug: m... |
3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 |
s->node[nid] = n; } out: up_read(&slub_lock); return ret; } static int slab_memory_callback(struct notifier_block *self, unsigned long action, void *arg) { int ret = 0; switch (action) { case MEM_GOING_ONLINE: ret = slab_mem_going_online_callback(arg); break; case MEM_GOING_OFFLINE: ret = slab_mem_going_offline_callback(arg); break; case MEM_OFFLINE: case MEM_CANCEL_ONLINE: slab_mem_offline_callback(arg); break; case MEM_ONLINE: case MEM_CANCEL_OFFLINE: break; } |
dc19f9db3 memcg: memory hot... |
3466 3467 3468 3469 |
if (ret) ret = notifier_from_errno(ret); else ret = NOTIFY_OK; |
b9049e234 memory hotplug: m... |
3470 3471 3472 3473 |
return ret; } #endif /* CONFIG_MEMORY_HOTPLUG */ |
81819f0fc SLUB core |
3474 3475 3476 |
/******************************************************************** * Basic setup of slabs *******************************************************************/ |
51df11428 slub: Dynamically... |
3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 |
/* * Used for early kmem_cache structures that were allocated using * the page allocator */ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s) { int node; list_add(&s->list, &slab_caches); s->refcount = -1; for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n = get_node(s, node); struct page *p; if (n) { list_for_each_entry(p, &n->partial, lru) p->slab = s; |
607bf324a slub: Fix a typo ... |
3496 |
#ifdef CONFIG_SLUB_DEBUG |
51df11428 slub: Dynamically... |
3497 3498 3499 3500 3501 3502 |
list_for_each_entry(p, &n->full, lru) p->slab = s; #endif } } } |
81819f0fc SLUB core |
3503 3504 3505 |
void __init kmem_cache_init(void) { int i; |
4b356be01 SLUB: minimum ali... |
3506 |
int caches = 0; |
51df11428 slub: Dynamically... |
3507 3508 |
struct kmem_cache *temp_kmem_cache; int order; |
51df11428 slub: Dynamically... |
3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 |
struct kmem_cache *temp_kmem_cache_node; unsigned long kmalloc_size; kmem_size = offsetof(struct kmem_cache, node) + nr_node_ids * sizeof(struct kmem_cache_node *); /* Allocate two kmem_caches from the page allocator */ kmalloc_size = ALIGN(kmem_size, cache_line_size()); order = get_order(2 * kmalloc_size); kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order); |
81819f0fc SLUB core |
3519 3520 |
/* * Must first have the slab cache available for the allocations of the |
672bba3a4 SLUB: update comm... |
3521 |
* struct kmem_cache_node's. There is special bootstrap code in |
81819f0fc SLUB core |
3522 3523 |
* kmem_cache_open for slab_state == DOWN. */ |
51df11428 slub: Dynamically... |
3524 3525 3526 3527 3528 |
kmem_cache_node = (void *)kmem_cache + kmalloc_size; kmem_cache_open(kmem_cache_node, "kmem_cache_node", sizeof(struct kmem_cache_node), 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); |
b9049e234 memory hotplug: m... |
3529 |
|
0c40ba4fd ipc: define the s... |
3530 |
hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); |
81819f0fc SLUB core |
3531 3532 3533 |
/* Able to allocate the per node structures */ slab_state = PARTIAL; |
51df11428 slub: Dynamically... |
3534 3535 3536 3537 3538 |
temp_kmem_cache = kmem_cache; kmem_cache_open(kmem_cache, "kmem_cache", kmem_size, 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); memcpy(kmem_cache, temp_kmem_cache, kmem_size); |
81819f0fc SLUB core |
3539 |
|
51df11428 slub: Dynamically... |
3540 3541 3542 3543 3544 3545 |
/* * Allocate kmem_cache_node properly from the kmem_cache slab. * kmem_cache_node is separately allocated so no need to * update any list pointers. */ temp_kmem_cache_node = kmem_cache_node; |
81819f0fc SLUB core |
3546 |
|
51df11428 slub: Dynamically... |
3547 3548 3549 3550 3551 3552 |
kmem_cache_node = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); memcpy(kmem_cache_node, temp_kmem_cache_node, kmem_size); kmem_cache_bootstrap_fixup(kmem_cache_node); caches++; |
51df11428 slub: Dynamically... |
3553 3554 3555 3556 3557 3558 |
kmem_cache_bootstrap_fixup(kmem_cache); caches++; /* Free temporary boot structure */ free_pages((unsigned long)temp_kmem_cache, order); /* Now we can use the kmem_cache to allocate kmalloc slabs */ |
f1b263393 SLUB: faster more... |
3559 3560 3561 3562 |
/* * Patch up the size_index table if we have strange large alignment * requirements for the kmalloc array. This is only the case for |
6446faa2f slub: Fix up comm... |
3563 |
* MIPS it seems. The standard arches will not generate any code here. |
f1b263393 SLUB: faster more... |
3564 3565 3566 3567 3568 3569 3570 3571 3572 |
* * Largest permitted alignment is 256 bytes due to the way we * handle the index determination for the smaller caches. * * Make sure that nothing crazy happens if someone starts tinkering * around with ARCH_KMALLOC_MINALIGN */ BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 || (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1))); |
acdfcd04d SLUB: fix ARCH_KM... |
3573 3574 3575 3576 3577 3578 |
for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) { int elem = size_index_elem(i); if (elem >= ARRAY_SIZE(size_index)) break; size_index[elem] = KMALLOC_SHIFT_LOW; } |
f1b263393 SLUB: faster more... |
3579 |
|
acdfcd04d SLUB: fix ARCH_KM... |
3580 3581 3582 3583 3584 3585 3586 3587 |
if (KMALLOC_MIN_SIZE == 64) { /* * The 96 byte size cache is not used if the alignment * is 64 byte. */ for (i = 64 + 8; i <= 96; i += 8) size_index[size_index_elem(i)] = 7; } else if (KMALLOC_MIN_SIZE == 128) { |
41d54d3bf slub: Do not use ... |
3588 3589 3590 3591 3592 3593 |
/* * The 192 byte sized cache is not used if the alignment * is 128 byte. Redirect kmalloc to use the 256 byte cache * instead. */ for (i = 128 + 8; i <= 192; i += 8) |
acdfcd04d SLUB: fix ARCH_KM... |
3594 |
size_index[size_index_elem(i)] = 8; |
41d54d3bf slub: Do not use ... |
3595 |
} |
51df11428 slub: Dynamically... |
3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 |
/* Caches that are not of the two-to-the-power-of size */ if (KMALLOC_MIN_SIZE <= 32) { kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0); caches++; } if (KMALLOC_MIN_SIZE <= 64) { kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0); caches++; } for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0); caches++; } |
81819f0fc SLUB core |
3611 3612 3613 |
slab_state = UP; /* Provide the correct kmalloc names now that the caches are up */ |
84c1cf624 SLUB: Fix merged ... |
3614 3615 3616 3617 3618 3619 3620 3621 3622 |
if (KMALLOC_MIN_SIZE <= 32) { kmalloc_caches[1]->name = kstrdup(kmalloc_caches[1]->name, GFP_NOWAIT); BUG_ON(!kmalloc_caches[1]->name); } if (KMALLOC_MIN_SIZE <= 64) { kmalloc_caches[2]->name = kstrdup(kmalloc_caches[2]->name, GFP_NOWAIT); BUG_ON(!kmalloc_caches[2]->name); } |
d7278bd7d slub: Check kaspr... |
3623 3624 3625 3626 |
for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) { char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i); BUG_ON(!s); |
51df11428 slub: Dynamically... |
3627 |
kmalloc_caches[i]->name = s; |
d7278bd7d slub: Check kaspr... |
3628 |
} |
81819f0fc SLUB core |
3629 3630 3631 |
#ifdef CONFIG_SMP register_cpu_notifier(&slab_notifier); |
9dfc6e68b SLUB: Use this_cp... |
3632 |
#endif |
81819f0fc SLUB core |
3633 |
|
55136592f slub: Remove dyna... |
3634 |
#ifdef CONFIG_ZONE_DMA |
51df11428 slub: Dynamically... |
3635 3636 |
for (i = 0; i < SLUB_PAGE_SHIFT; i++) { struct kmem_cache *s = kmalloc_caches[i]; |
55136592f slub: Remove dyna... |
3637 |
|
51df11428 slub: Dynamically... |
3638 |
if (s && s->size) { |
55136592f slub: Remove dyna... |
3639 3640 3641 3642 |
char *name = kasprintf(GFP_NOWAIT, "dma-kmalloc-%d", s->objsize); BUG_ON(!name); |
51df11428 slub: Dynamically... |
3643 3644 |
kmalloc_dma_caches[i] = create_kmalloc_cache(name, s->objsize, SLAB_CACHE_DMA); |
55136592f slub: Remove dyna... |
3645 3646 3647 |
} } #endif |
3adbefee6 SLUB: fix checkpa... |
3648 3649 |
printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d," |
4b356be01 SLUB: minimum ali... |
3650 3651 3652 |
" CPUs=%d, Nodes=%d ", caches, cache_line_size(), |
81819f0fc SLUB core |
3653 3654 3655 |
slub_min_order, slub_max_order, slub_min_objects, nr_cpu_ids, nr_node_ids); } |
7e85ee0c1 slab,slub: don't ... |
3656 3657 |
void __init kmem_cache_init_late(void) { |
7e85ee0c1 slab,slub: don't ... |
3658 |
} |
81819f0fc SLUB core |
3659 3660 3661 3662 3663 3664 3665 |
/* * Find a mergeable slab cache */ static int slab_unmergeable(struct kmem_cache *s) { if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) return 1; |
c59def9f2 Slab allocators: ... |
3666 |
if (s->ctor) |
81819f0fc SLUB core |
3667 |
return 1; |
8ffa68755 SLUB: Fix NUMA / ... |
3668 3669 3670 3671 3672 |
/* * We may have set a slab to be unmergeable during bootstrap. */ if (s->refcount < 0) return 1; |
81819f0fc SLUB core |
3673 3674 3675 3676 |
return 0; } static struct kmem_cache *find_mergeable(size_t size, |
ba0268a8b SLUB: accurately ... |
3677 |
size_t align, unsigned long flags, const char *name, |
51cc50685 SL*B: drop kmem c... |
3678 |
void (*ctor)(void *)) |
81819f0fc SLUB core |
3679 |
{ |
5b95a4acf SLUB: use list_fo... |
3680 |
struct kmem_cache *s; |
81819f0fc SLUB core |
3681 3682 3683 |
if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) return NULL; |
c59def9f2 Slab allocators: ... |
3684 |
if (ctor) |
81819f0fc SLUB core |
3685 3686 3687 3688 3689 |
return NULL; size = ALIGN(size, sizeof(void *)); align = calculate_alignment(flags, align, size); size = ALIGN(size, align); |
ba0268a8b SLUB: accurately ... |
3690 |
flags = kmem_cache_flags(size, flags, name, NULL); |
81819f0fc SLUB core |
3691 |
|
5b95a4acf SLUB: use list_fo... |
3692 |
list_for_each_entry(s, &slab_caches, list) { |
81819f0fc SLUB core |
3693 3694 3695 3696 3697 |
if (slab_unmergeable(s)) continue; if (size > s->size) continue; |
ba0268a8b SLUB: accurately ... |
3698 |
if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME)) |
81819f0fc SLUB core |
3699 3700 3701 3702 3703 |
continue; /* * Check if alignment is compatible. * Courtesy of Adrian Drzewiecki */ |
064287807 SLUB: Fix coding ... |
3704 |
if ((s->size & ~(align - 1)) != s->size) |
81819f0fc SLUB core |
3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 |
continue; if (s->size - size >= sizeof(void *)) continue; return s; } return NULL; } struct kmem_cache *kmem_cache_create(const char *name, size_t size, |
51cc50685 SL*B: drop kmem c... |
3716 |
size_t align, unsigned long flags, void (*ctor)(void *)) |
81819f0fc SLUB core |
3717 3718 |
{ struct kmem_cache *s; |
84c1cf624 SLUB: Fix merged ... |
3719 |
char *n; |
81819f0fc SLUB core |
3720 |
|
fe1ff49d0 mm: kmem_cache_cr... |
3721 3722 |
if (WARN_ON(!name)) return NULL; |
81819f0fc SLUB core |
3723 |
down_write(&slub_lock); |
ba0268a8b SLUB: accurately ... |
3724 |
s = find_mergeable(size, align, flags, name, ctor); |
81819f0fc SLUB core |
3725 3726 3727 3728 3729 3730 3731 3732 |
if (s) { s->refcount++; /* * Adjust the object sizes so that we clear * the complete object on kzalloc. */ s->objsize = max(s->objsize, (int)size); s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); |
6446faa2f slub: Fix up comm... |
3733 |
|
7b8f3b66d slub: avoid leaki... |
3734 |
if (sysfs_slab_alias(s, name)) { |
7b8f3b66d slub: avoid leaki... |
3735 |
s->refcount--; |
81819f0fc SLUB core |
3736 |
goto err; |
7b8f3b66d slub: avoid leaki... |
3737 |
} |
2bce64858 slub: Allow remov... |
3738 |
up_write(&slub_lock); |
a0e1d1be2 SLUB: Move sysfs ... |
3739 3740 |
return s; } |
6446faa2f slub: Fix up comm... |
3741 |
|
84c1cf624 SLUB: Fix merged ... |
3742 3743 3744 |
n = kstrdup(name, GFP_KERNEL); if (!n) goto err; |
a0e1d1be2 SLUB: Move sysfs ... |
3745 3746 |
s = kmalloc(kmem_size, GFP_KERNEL); if (s) { |
84c1cf624 SLUB: Fix merged ... |
3747 |
if (kmem_cache_open(s, n, |
c59def9f2 Slab allocators: ... |
3748 |
size, align, flags, ctor)) { |
81819f0fc SLUB core |
3749 |
list_add(&s->list, &slab_caches); |
7b8f3b66d slub: avoid leaki... |
3750 |
if (sysfs_slab_add(s)) { |
7b8f3b66d slub: avoid leaki... |
3751 |
list_del(&s->list); |
84c1cf624 SLUB: Fix merged ... |
3752 |
kfree(n); |
7b8f3b66d slub: avoid leaki... |
3753 |
kfree(s); |
a0e1d1be2 SLUB: Move sysfs ... |
3754 |
goto err; |
7b8f3b66d slub: avoid leaki... |
3755 |
} |
2bce64858 slub: Allow remov... |
3756 |
up_write(&slub_lock); |
a0e1d1be2 SLUB: Move sysfs ... |
3757 3758 |
return s; } |
84c1cf624 SLUB: Fix merged ... |
3759 |
kfree(n); |
a0e1d1be2 SLUB: Move sysfs ... |
3760 |
kfree(s); |
81819f0fc SLUB core |
3761 |
} |
68cee4f11 slub: Fix slub_lo... |
3762 |
err: |
81819f0fc SLUB core |
3763 |
up_write(&slub_lock); |
81819f0fc SLUB core |
3764 |
|
81819f0fc SLUB core |
3765 3766 3767 3768 3769 3770 3771 3772 |
if (flags & SLAB_PANIC) panic("Cannot create slabcache %s ", name); else s = NULL; return s; } EXPORT_SYMBOL(kmem_cache_create); |
81819f0fc SLUB core |
3773 |
#ifdef CONFIG_SMP |
27390bc33 SLUB: fix locking... |
3774 |
/* |
672bba3a4 SLUB: update comm... |
3775 3776 |
* Use the cpu notifier to insure that the cpu slabs are flushed when * necessary. |
81819f0fc SLUB core |
3777 3778 3779 3780 3781 |
*/ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { long cpu = (long)hcpu; |
5b95a4acf SLUB: use list_fo... |
3782 3783 |
struct kmem_cache *s; unsigned long flags; |
81819f0fc SLUB core |
3784 3785 3786 |
switch (action) { case CPU_UP_CANCELED: |
8bb784428 Add suspend-relat... |
3787 |
case CPU_UP_CANCELED_FROZEN: |
81819f0fc SLUB core |
3788 |
case CPU_DEAD: |
8bb784428 Add suspend-relat... |
3789 |
case CPU_DEAD_FROZEN: |
5b95a4acf SLUB: use list_fo... |
3790 3791 3792 3793 3794 3795 3796 |
down_read(&slub_lock); list_for_each_entry(s, &slab_caches, list) { local_irq_save(flags); __flush_cpu_slab(s, cpu); local_irq_restore(flags); } up_read(&slub_lock); |
81819f0fc SLUB core |
3797 3798 3799 3800 3801 3802 |
break; default: break; } return NOTIFY_OK; } |
064287807 SLUB: Fix coding ... |
3803 |
static struct notifier_block __cpuinitdata slab_notifier = { |
3adbefee6 SLUB: fix checkpa... |
3804 |
.notifier_call = slab_cpuup_callback |
064287807 SLUB: Fix coding ... |
3805 |
}; |
81819f0fc SLUB core |
3806 3807 |
#endif |
ce71e27c6 SLUB: Replace __b... |
3808 |
void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) |
81819f0fc SLUB core |
3809 |
{ |
aadb4bc4a SLUB: direct pass... |
3810 |
struct kmem_cache *s; |
94b528d05 kmemtrace: SLUB h... |
3811 |
void *ret; |
aadb4bc4a SLUB: direct pass... |
3812 |
|
ffadd4d0f SLUB: Introduce a... |
3813 |
if (unlikely(size > SLUB_MAX_SIZE)) |
eada35efc slub: kmalloc pag... |
3814 |
return kmalloc_large(size, gfpflags); |
aadb4bc4a SLUB: direct pass... |
3815 |
s = get_slab(size, gfpflags); |
81819f0fc SLUB core |
3816 |
|
2408c5503 {slub, slob}: use... |
3817 |
if (unlikely(ZERO_OR_NULL_PTR(s))) |
6cb8f9132 Slab allocators: ... |
3818 |
return s; |
81819f0fc SLUB core |
3819 |
|
2154a3363 slub: Use a const... |
3820 |
ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller); |
94b528d05 kmemtrace: SLUB h... |
3821 |
|
25985edce Fix common misspe... |
3822 |
/* Honor the call site pointer we received. */ |
ca2b84cb3 kmemtrace: use tr... |
3823 |
trace_kmalloc(caller, ret, size, s->size, gfpflags); |
94b528d05 kmemtrace: SLUB h... |
3824 3825 |
return ret; |
81819f0fc SLUB core |
3826 |
} |
5d1f57e4d slub: Move NUMA-r... |
3827 |
#ifdef CONFIG_NUMA |
81819f0fc SLUB core |
3828 |
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, |
ce71e27c6 SLUB: Replace __b... |
3829 |
int node, unsigned long caller) |
81819f0fc SLUB core |
3830 |
{ |
aadb4bc4a SLUB: direct pass... |
3831 |
struct kmem_cache *s; |
94b528d05 kmemtrace: SLUB h... |
3832 |
void *ret; |
aadb4bc4a SLUB: direct pass... |
3833 |
|
d3e14aa33 slub: __kmalloc_n... |
3834 3835 3836 3837 3838 3839 3840 3841 3842 |
if (unlikely(size > SLUB_MAX_SIZE)) { ret = kmalloc_large_node(size, gfpflags, node); trace_kmalloc_node(caller, ret, size, PAGE_SIZE << get_order(size), gfpflags, node); return ret; } |
eada35efc slub: kmalloc pag... |
3843 |
|
aadb4bc4a SLUB: direct pass... |
3844 |
s = get_slab(size, gfpflags); |
81819f0fc SLUB core |
3845 |
|
2408c5503 {slub, slob}: use... |
3846 |
if (unlikely(ZERO_OR_NULL_PTR(s))) |
6cb8f9132 Slab allocators: ... |
3847 |
return s; |
81819f0fc SLUB core |
3848 |
|
94b528d05 kmemtrace: SLUB h... |
3849 |
ret = slab_alloc(s, gfpflags, node, caller); |
25985edce Fix common misspe... |
3850 |
/* Honor the call site pointer we received. */ |
ca2b84cb3 kmemtrace: use tr... |
3851 |
trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node); |
94b528d05 kmemtrace: SLUB h... |
3852 3853 |
return ret; |
81819f0fc SLUB core |
3854 |
} |
5d1f57e4d slub: Move NUMA-r... |
3855 |
#endif |
81819f0fc SLUB core |
3856 |
|
ab4d5ed5e slub: Enable sysf... |
3857 |
#ifdef CONFIG_SYSFS |
205ab99dd slub: Update stat... |
3858 3859 3860 3861 3862 3863 3864 3865 3866 |
static int count_inuse(struct page *page) { return page->inuse; } static int count_total(struct page *page) { return page->objects; } |
ab4d5ed5e slub: Enable sysf... |
3867 |
#endif |
205ab99dd slub: Update stat... |
3868 |
|
ab4d5ed5e slub: Enable sysf... |
3869 |
#ifdef CONFIG_SLUB_DEBUG |
434e245dd SLUB: Do not allo... |
3870 3871 |
static int validate_slab(struct kmem_cache *s, struct page *page, unsigned long *map) |
53e15af03 slub: validation ... |
3872 3873 |
{ void *p; |
a973e9dd1 Revert "unique en... |
3874 |
void *addr = page_address(page); |
53e15af03 slub: validation ... |
3875 3876 3877 3878 3879 3880 |
if (!check_slab(s, page) || !on_freelist(s, page, NULL)) return 0; /* Now we know that a valid freelist exists */ |
39b264641 slub: Store max n... |
3881 |
bitmap_zero(map, page->objects); |
53e15af03 slub: validation ... |
3882 |
|
5f80b13ae slub: get_map() f... |
3883 3884 3885 3886 3887 |
get_map(s, page, map); for_each_object(p, s, addr, page->objects) { if (test_bit(slab_index(p, s, addr), map)) if (!check_object(s, page, p, SLUB_RED_INACTIVE)) return 0; |
53e15af03 slub: validation ... |
3888 |
} |
224a88be4 slub: for_each_ob... |
3889 |
for_each_object(p, s, addr, page->objects) |
7656c72b5 SLUB: add macros ... |
3890 |
if (!test_bit(slab_index(p, s, addr), map)) |
37d57443d slub: Fix a crash... |
3891 |
if (!check_object(s, page, p, SLUB_RED_ACTIVE)) |
53e15af03 slub: validation ... |
3892 3893 3894 |
return 0; return 1; } |
434e245dd SLUB: Do not allo... |
3895 3896 |
static void validate_slab_slab(struct kmem_cache *s, struct page *page, unsigned long *map) |
53e15af03 slub: validation ... |
3897 |
{ |
881db7fb0 slub: Invert lock... |
3898 3899 3900 |
slab_lock(page); validate_slab(s, page, map); slab_unlock(page); |
53e15af03 slub: validation ... |
3901 |
} |
434e245dd SLUB: Do not allo... |
3902 3903 |
static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n, unsigned long *map) |
53e15af03 slub: validation ... |
3904 3905 3906 3907 3908 3909 3910 3911 |
{ unsigned long count = 0; struct page *page; unsigned long flags; spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) { |
434e245dd SLUB: Do not allo... |
3912 |
validate_slab_slab(s, page, map); |
53e15af03 slub: validation ... |
3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 |
count++; } if (count != n->nr_partial) printk(KERN_ERR "SLUB %s: %ld partial slabs counted but " "counter=%ld ", s->name, count, n->nr_partial); if (!(s->flags & SLAB_STORE_USER)) goto out; list_for_each_entry(page, &n->full, lru) { |
434e245dd SLUB: Do not allo... |
3924 |
validate_slab_slab(s, page, map); |
53e15af03 slub: validation ... |
3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 |
count++; } if (count != atomic_long_read(&n->nr_slabs)) printk(KERN_ERR "SLUB: %s %ld slabs counted but " "counter=%ld ", s->name, count, atomic_long_read(&n->nr_slabs)); out: spin_unlock_irqrestore(&n->list_lock, flags); return count; } |
434e245dd SLUB: Do not allo... |
3937 |
static long validate_slab_cache(struct kmem_cache *s) |
53e15af03 slub: validation ... |
3938 3939 3940 |
{ int node; unsigned long count = 0; |
205ab99dd slub: Update stat... |
3941 |
unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * |
434e245dd SLUB: Do not allo... |
3942 3943 3944 3945 |
sizeof(unsigned long), GFP_KERNEL); if (!map) return -ENOMEM; |
53e15af03 slub: validation ... |
3946 3947 |
flush_all(s); |
f64dc58c5 Memoryless nodes:... |
3948 |
for_each_node_state(node, N_NORMAL_MEMORY) { |
53e15af03 slub: validation ... |
3949 |
struct kmem_cache_node *n = get_node(s, node); |
434e245dd SLUB: Do not allo... |
3950 |
count += validate_slab_node(s, n, map); |
53e15af03 slub: validation ... |
3951 |
} |
434e245dd SLUB: Do not allo... |
3952 |
kfree(map); |
53e15af03 slub: validation ... |
3953 3954 |
return count; } |
88a420e4e slub: add ability... |
3955 |
/* |
672bba3a4 SLUB: update comm... |
3956 |
* Generate lists of code addresses where slabcache objects are allocated |
88a420e4e slub: add ability... |
3957 3958 3959 3960 3961 |
* and freed. */ struct location { unsigned long count; |
ce71e27c6 SLUB: Replace __b... |
3962 |
unsigned long addr; |
45edfa580 SLUB: include lif... |
3963 3964 3965 3966 3967 |
long long sum_time; long min_time; long max_time; long min_pid; long max_pid; |
174596a0b cpumask: convert mm/ |
3968 |
DECLARE_BITMAP(cpus, NR_CPUS); |
45edfa580 SLUB: include lif... |
3969 |
nodemask_t nodes; |
88a420e4e slub: add ability... |
3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 |
}; struct loc_track { unsigned long max; unsigned long count; struct location *loc; }; static void free_loc_track(struct loc_track *t) { if (t->max) free_pages((unsigned long)t->loc, get_order(sizeof(struct location) * t->max)); } |
68dff6a9a SLUB slab validat... |
3984 |
static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags) |
88a420e4e slub: add ability... |
3985 3986 3987 |
{ struct location *l; int order; |
88a420e4e slub: add ability... |
3988 |
order = get_order(sizeof(struct location) * max); |
68dff6a9a SLUB slab validat... |
3989 |
l = (void *)__get_free_pages(flags, order); |
88a420e4e slub: add ability... |
3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 |
if (!l) return 0; if (t->count) { memcpy(l, t->loc, sizeof(struct location) * t->count); free_loc_track(t); } t->max = max; t->loc = l; return 1; } static int add_location(struct loc_track *t, struct kmem_cache *s, |
45edfa580 SLUB: include lif... |
4003 |
const struct track *track) |
88a420e4e slub: add ability... |
4004 4005 4006 |
{ long start, end, pos; struct location *l; |
ce71e27c6 SLUB: Replace __b... |
4007 |
unsigned long caddr; |
45edfa580 SLUB: include lif... |
4008 |
unsigned long age = jiffies - track->when; |
88a420e4e slub: add ability... |
4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 |
start = -1; end = t->count; for ( ; ; ) { pos = start + (end - start + 1) / 2; /* * There is nothing at "end". If we end up there * we need to add something to before end. */ if (pos == end) break; caddr = t->loc[pos].addr; |
45edfa580 SLUB: include lif... |
4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 |
if (track->addr == caddr) { l = &t->loc[pos]; l->count++; if (track->when) { l->sum_time += age; if (age < l->min_time) l->min_time = age; if (age > l->max_time) l->max_time = age; if (track->pid < l->min_pid) l->min_pid = track->pid; if (track->pid > l->max_pid) l->max_pid = track->pid; |
174596a0b cpumask: convert mm/ |
4039 4040 |
cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); |
45edfa580 SLUB: include lif... |
4041 4042 |
} node_set(page_to_nid(virt_to_page(track)), l->nodes); |
88a420e4e slub: add ability... |
4043 4044 |
return 1; } |
45edfa580 SLUB: include lif... |
4045 |
if (track->addr < caddr) |
88a420e4e slub: add ability... |
4046 4047 4048 4049 4050 4051 |
end = pos; else start = pos; } /* |
672bba3a4 SLUB: update comm... |
4052 |
* Not found. Insert new tracking element. |
88a420e4e slub: add ability... |
4053 |
*/ |
68dff6a9a SLUB slab validat... |
4054 |
if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC)) |
88a420e4e slub: add ability... |
4055 4056 4057 4058 4059 4060 4061 4062 |
return 0; l = t->loc + pos; if (pos < t->count) memmove(l + 1, l, (t->count - pos) * sizeof(struct location)); t->count++; l->count = 1; |
45edfa580 SLUB: include lif... |
4063 4064 4065 4066 4067 4068 |
l->addr = track->addr; l->sum_time = age; l->min_time = age; l->max_time = age; l->min_pid = track->pid; l->max_pid = track->pid; |
174596a0b cpumask: convert mm/ |
4069 4070 |
cpumask_clear(to_cpumask(l->cpus)); cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); |
45edfa580 SLUB: include lif... |
4071 4072 |
nodes_clear(l->nodes); node_set(page_to_nid(virt_to_page(track)), l->nodes); |
88a420e4e slub: add ability... |
4073 4074 4075 4076 |
return 1; } static void process_slab(struct loc_track *t, struct kmem_cache *s, |
bbd7d57bf slub: Potential s... |
4077 |
struct page *page, enum track_item alloc, |
a5dd5c117 slub: Fix signedn... |
4078 |
unsigned long *map) |
88a420e4e slub: add ability... |
4079 |
{ |
a973e9dd1 Revert "unique en... |
4080 |
void *addr = page_address(page); |
88a420e4e slub: add ability... |
4081 |
void *p; |
39b264641 slub: Store max n... |
4082 |
bitmap_zero(map, page->objects); |
5f80b13ae slub: get_map() f... |
4083 |
get_map(s, page, map); |
88a420e4e slub: add ability... |
4084 |
|
224a88be4 slub: for_each_ob... |
4085 |
for_each_object(p, s, addr, page->objects) |
45edfa580 SLUB: include lif... |
4086 4087 |
if (!test_bit(slab_index(p, s, addr), map)) add_location(t, s, get_track(s, p, alloc)); |
88a420e4e slub: add ability... |
4088 4089 4090 4091 4092 |
} static int list_locations(struct kmem_cache *s, char *buf, enum track_item alloc) { |
e374d4835 slub: fix shadowe... |
4093 |
int len = 0; |
88a420e4e slub: add ability... |
4094 |
unsigned long i; |
68dff6a9a SLUB slab validat... |
4095 |
struct loc_track t = { 0, 0, NULL }; |
88a420e4e slub: add ability... |
4096 |
int node; |
bbd7d57bf slub: Potential s... |
4097 4098 |
unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) * sizeof(unsigned long), GFP_KERNEL); |
88a420e4e slub: add ability... |
4099 |
|
bbd7d57bf slub: Potential s... |
4100 4101 4102 |
if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), GFP_TEMPORARY)) { kfree(map); |
68dff6a9a SLUB slab validat... |
4103 4104 |
return sprintf(buf, "Out of memory "); |
bbd7d57bf slub: Potential s... |
4105 |
} |
88a420e4e slub: add ability... |
4106 4107 |
/* Push back cpu slabs */ flush_all(s); |
f64dc58c5 Memoryless nodes:... |
4108 |
for_each_node_state(node, N_NORMAL_MEMORY) { |
88a420e4e slub: add ability... |
4109 4110 4111 |
struct kmem_cache_node *n = get_node(s, node); unsigned long flags; struct page *page; |
9e86943b6 SLUB: use atomic_... |
4112 |
if (!atomic_long_read(&n->nr_slabs)) |
88a420e4e slub: add ability... |
4113 4114 4115 4116 |
continue; spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) |
bbd7d57bf slub: Potential s... |
4117 |
process_slab(&t, s, page, alloc, map); |
88a420e4e slub: add ability... |
4118 |
list_for_each_entry(page, &n->full, lru) |
bbd7d57bf slub: Potential s... |
4119 |
process_slab(&t, s, page, alloc, map); |
88a420e4e slub: add ability... |
4120 4121 4122 4123 |
spin_unlock_irqrestore(&n->list_lock, flags); } for (i = 0; i < t.count; i++) { |
45edfa580 SLUB: include lif... |
4124 |
struct location *l = &t.loc[i]; |
88a420e4e slub: add ability... |
4125 |
|
9c2462472 KSYM_SYMBOL_LEN f... |
4126 |
if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100) |
88a420e4e slub: add ability... |
4127 |
break; |
e374d4835 slub: fix shadowe... |
4128 |
len += sprintf(buf + len, "%7ld ", l->count); |
45edfa580 SLUB: include lif... |
4129 4130 |
if (l->addr) |
62c70bce8 mm: convert sprin... |
4131 |
len += sprintf(buf + len, "%pS", (void *)l->addr); |
88a420e4e slub: add ability... |
4132 |
else |
e374d4835 slub: fix shadowe... |
4133 |
len += sprintf(buf + len, "<not-available>"); |
45edfa580 SLUB: include lif... |
4134 4135 |
if (l->sum_time != l->min_time) { |
e374d4835 slub: fix shadowe... |
4136 |
len += sprintf(buf + len, " age=%ld/%ld/%ld", |
f8bd2258e remove div_long_l... |
4137 4138 4139 |
l->min_time, (long)div_u64(l->sum_time, l->count), l->max_time); |
45edfa580 SLUB: include lif... |
4140 |
} else |
e374d4835 slub: fix shadowe... |
4141 |
len += sprintf(buf + len, " age=%ld", |
45edfa580 SLUB: include lif... |
4142 4143 4144 |
l->min_time); if (l->min_pid != l->max_pid) |
e374d4835 slub: fix shadowe... |
4145 |
len += sprintf(buf + len, " pid=%ld-%ld", |
45edfa580 SLUB: include lif... |
4146 4147 |
l->min_pid, l->max_pid); else |
e374d4835 slub: fix shadowe... |
4148 |
len += sprintf(buf + len, " pid=%ld", |
45edfa580 SLUB: include lif... |
4149 |
l->min_pid); |
174596a0b cpumask: convert mm/ |
4150 4151 |
if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus)) && |
e374d4835 slub: fix shadowe... |
4152 4153 4154 |
len < PAGE_SIZE - 60) { len += sprintf(buf + len, " cpus="); len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, |
174596a0b cpumask: convert mm/ |
4155 |
to_cpumask(l->cpus)); |
45edfa580 SLUB: include lif... |
4156 |
} |
62bc62a87 page allocator: u... |
4157 |
if (nr_online_nodes > 1 && !nodes_empty(l->nodes) && |
e374d4835 slub: fix shadowe... |
4158 4159 4160 |
len < PAGE_SIZE - 60) { len += sprintf(buf + len, " nodes="); len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50, |
45edfa580 SLUB: include lif... |
4161 4162 |
l->nodes); } |
e374d4835 slub: fix shadowe... |
4163 4164 |
len += sprintf(buf + len, " "); |
88a420e4e slub: add ability... |
4165 4166 4167 |
} free_loc_track(&t); |
bbd7d57bf slub: Potential s... |
4168 |
kfree(map); |
88a420e4e slub: add ability... |
4169 |
if (!t.count) |
e374d4835 slub: fix shadowe... |
4170 4171 4172 |
len += sprintf(buf, "No data "); return len; |
88a420e4e slub: add ability... |
4173 |
} |
ab4d5ed5e slub: Enable sysf... |
4174 |
#endif |
88a420e4e slub: add ability... |
4175 |
|
a5a84755c slub: Move functi... |
4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 |
#ifdef SLUB_RESILIENCY_TEST static void resiliency_test(void) { u8 *p; BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || SLUB_PAGE_SHIFT < 10); printk(KERN_ERR "SLUB resiliency testing "); printk(KERN_ERR "----------------------- "); printk(KERN_ERR "A. Corruption after allocation "); p = kzalloc(16, GFP_KERNEL); p[16] = 0x12; printk(KERN_ERR " 1. kmalloc-16: Clobber Redzone/next pointer" " 0x12->0x%p ", p + 16); validate_slab_cache(kmalloc_caches[4]); /* Hmmm... The next two are dangerous */ p = kzalloc(32, GFP_KERNEL); p[32 + sizeof(void *)] = 0x34; printk(KERN_ERR " 2. kmalloc-32: Clobber next pointer/next slab" " 0x34 -> -0x%p ", p); printk(KERN_ERR "If allocated object is overwritten then not detectable "); validate_slab_cache(kmalloc_caches[5]); p = kzalloc(64, GFP_KERNEL); p += 64 + (get_cycles() & 0xff) * sizeof(void *); *p = 0x56; printk(KERN_ERR " 3. kmalloc-64: corrupting random byte 0x56->0x%p ", p); printk(KERN_ERR "If allocated object is overwritten then not detectable "); validate_slab_cache(kmalloc_caches[6]); printk(KERN_ERR " B. Corruption after free "); p = kzalloc(128, GFP_KERNEL); kfree(p); *p = 0x78; printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p ", p); validate_slab_cache(kmalloc_caches[7]); p = kzalloc(256, GFP_KERNEL); kfree(p); p[50] = 0x9a; printk(KERN_ERR " 2. kmalloc-256: Clobber 50th byte 0x9a->0x%p ", p); validate_slab_cache(kmalloc_caches[8]); p = kzalloc(512, GFP_KERNEL); kfree(p); p[512] = 0xab; printk(KERN_ERR " 3. kmalloc-512: Clobber redzone 0xab->0x%p ", p); validate_slab_cache(kmalloc_caches[9]); } #else #ifdef CONFIG_SYSFS static void resiliency_test(void) {}; #endif #endif |
ab4d5ed5e slub: Enable sysf... |
4261 |
#ifdef CONFIG_SYSFS |
81819f0fc SLUB core |
4262 |
enum slab_stat_type { |
205ab99dd slub: Update stat... |
4263 4264 4265 4266 4267 |
SL_ALL, /* All slabs */ SL_PARTIAL, /* Only partially allocated slabs */ SL_CPU, /* Only slabs used for cpu caches */ SL_OBJECTS, /* Determine allocated objects not slabs */ SL_TOTAL /* Determine object capacity not slabs */ |
81819f0fc SLUB core |
4268 |
}; |
205ab99dd slub: Update stat... |
4269 |
#define SO_ALL (1 << SL_ALL) |
81819f0fc SLUB core |
4270 4271 4272 |
#define SO_PARTIAL (1 << SL_PARTIAL) #define SO_CPU (1 << SL_CPU) #define SO_OBJECTS (1 << SL_OBJECTS) |
205ab99dd slub: Update stat... |
4273 |
#define SO_TOTAL (1 << SL_TOTAL) |
81819f0fc SLUB core |
4274 |
|
62e5c4b4d slub: fix possibl... |
4275 4276 |
static ssize_t show_slab_objects(struct kmem_cache *s, char *buf, unsigned long flags) |
81819f0fc SLUB core |
4277 4278 |
{ unsigned long total = 0; |
81819f0fc SLUB core |
4279 4280 4281 4282 4283 4284 |
int node; int x; unsigned long *nodes; unsigned long *per_cpu; nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); |
62e5c4b4d slub: fix possibl... |
4285 4286 |
if (!nodes) return -ENOMEM; |
81819f0fc SLUB core |
4287 |
per_cpu = nodes + nr_node_ids; |
205ab99dd slub: Update stat... |
4288 4289 |
if (flags & SO_CPU) { int cpu; |
81819f0fc SLUB core |
4290 |
|
205ab99dd slub: Update stat... |
4291 |
for_each_possible_cpu(cpu) { |
9dfc6e68b SLUB: Use this_cp... |
4292 |
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); |
bc6697d8a slub: avoid poten... |
4293 |
int node = ACCESS_ONCE(c->node); |
49e225858 slub: per cpu cac... |
4294 |
struct page *page; |
dfb4f0960 SLUB: Avoid page ... |
4295 |
|
bc6697d8a slub: avoid poten... |
4296 |
if (node < 0) |
205ab99dd slub: Update stat... |
4297 |
continue; |
bc6697d8a slub: avoid poten... |
4298 4299 4300 4301 |
page = ACCESS_ONCE(c->page); if (page) { if (flags & SO_TOTAL) x = page->objects; |
205ab99dd slub: Update stat... |
4302 |
else if (flags & SO_OBJECTS) |
bc6697d8a slub: avoid poten... |
4303 |
x = page->inuse; |
81819f0fc SLUB core |
4304 4305 |
else x = 1; |
205ab99dd slub: Update stat... |
4306 |
|
81819f0fc SLUB core |
4307 |
total += x; |
bc6697d8a slub: avoid poten... |
4308 |
nodes[node] += x; |
81819f0fc SLUB core |
4309 |
} |
49e225858 slub: per cpu cac... |
4310 4311 4312 4313 |
page = c->partial; if (page) { x = page->pobjects; |
bc6697d8a slub: avoid poten... |
4314 4315 |
total += x; nodes[node] += x; |
49e225858 slub: per cpu cac... |
4316 |
} |
bc6697d8a slub: avoid poten... |
4317 |
per_cpu[node]++; |
81819f0fc SLUB core |
4318 4319 |
} } |
04d94879c slub: Avoid use o... |
4320 |
lock_memory_hotplug(); |
ab4d5ed5e slub: Enable sysf... |
4321 |
#ifdef CONFIG_SLUB_DEBUG |
205ab99dd slub: Update stat... |
4322 4323 4324 4325 4326 4327 4328 4329 4330 |
if (flags & SO_ALL) { for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n = get_node(s, node); if (flags & SO_TOTAL) x = atomic_long_read(&n->total_objects); else if (flags & SO_OBJECTS) x = atomic_long_read(&n->total_objects) - count_partial(n, count_free); |
81819f0fc SLUB core |
4331 |
|
81819f0fc SLUB core |
4332 |
else |
205ab99dd slub: Update stat... |
4333 |
x = atomic_long_read(&n->nr_slabs); |
81819f0fc SLUB core |
4334 4335 4336 |
total += x; nodes[node] += x; } |
ab4d5ed5e slub: Enable sysf... |
4337 4338 4339 |
} else #endif if (flags & SO_PARTIAL) { |
205ab99dd slub: Update stat... |
4340 4341 |
for_each_node_state(node, N_NORMAL_MEMORY) { struct kmem_cache_node *n = get_node(s, node); |
81819f0fc SLUB core |
4342 |
|
205ab99dd slub: Update stat... |
4343 4344 4345 4346 |
if (flags & SO_TOTAL) x = count_partial(n, count_total); else if (flags & SO_OBJECTS) x = count_partial(n, count_inuse); |
81819f0fc SLUB core |
4347 |
else |
205ab99dd slub: Update stat... |
4348 |
x = n->nr_partial; |
81819f0fc SLUB core |
4349 4350 4351 4352 |
total += x; nodes[node] += x; } } |
81819f0fc SLUB core |
4353 4354 |
x = sprintf(buf, "%lu", total); #ifdef CONFIG_NUMA |
f64dc58c5 Memoryless nodes:... |
4355 |
for_each_node_state(node, N_NORMAL_MEMORY) |
81819f0fc SLUB core |
4356 4357 4358 4359 |
if (nodes[node]) x += sprintf(buf + x, " N%d=%lu", node, nodes[node]); #endif |
04d94879c slub: Avoid use o... |
4360 |
unlock_memory_hotplug(); |
81819f0fc SLUB core |
4361 4362 4363 4364 |
kfree(nodes); return x + sprintf(buf + x, " "); } |
ab4d5ed5e slub: Enable sysf... |
4365 |
#ifdef CONFIG_SLUB_DEBUG |
81819f0fc SLUB core |
4366 4367 4368 |
static int any_slab_objects(struct kmem_cache *s) { int node; |
81819f0fc SLUB core |
4369 |
|
dfb4f0960 SLUB: Avoid page ... |
4370 |
for_each_online_node(node) { |
81819f0fc SLUB core |
4371 |
struct kmem_cache_node *n = get_node(s, node); |
dfb4f0960 SLUB: Avoid page ... |
4372 4373 |
if (!n) continue; |
4ea33e2dc slub: fix atomic ... |
4374 |
if (atomic_long_read(&n->total_objects)) |
81819f0fc SLUB core |
4375 4376 4377 4378 |
return 1; } return 0; } |
ab4d5ed5e slub: Enable sysf... |
4379 |
#endif |
81819f0fc SLUB core |
4380 4381 |
#define to_slab_attr(n) container_of(n, struct slab_attribute, attr) |
497888cf6 treewide: fix pot... |
4382 |
#define to_slab(n) container_of(n, struct kmem_cache, kobj) |
81819f0fc SLUB core |
4383 4384 4385 4386 4387 4388 4389 4390 |
struct slab_attribute { struct attribute attr; ssize_t (*show)(struct kmem_cache *s, char *buf); ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count); }; #define SLAB_ATTR_RO(_name) \ |
ab067e99d mm: restrict acce... |
4391 4392 |
static struct slab_attribute _name##_attr = \ __ATTR(_name, 0400, _name##_show, NULL) |
81819f0fc SLUB core |
4393 4394 4395 |
#define SLAB_ATTR(_name) \ static struct slab_attribute _name##_attr = \ |
ab067e99d mm: restrict acce... |
4396 |
__ATTR(_name, 0600, _name##_show, _name##_store) |
81819f0fc SLUB core |
4397 |
|
81819f0fc SLUB core |
4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 |
static ssize_t slab_size_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d ", s->size); } SLAB_ATTR_RO(slab_size); static ssize_t align_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d ", s->align); } SLAB_ATTR_RO(align); static ssize_t object_size_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d ", s->objsize); } SLAB_ATTR_RO(object_size); static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) { |
834f3d119 slub: Add kmem_ca... |
4421 4422 |
return sprintf(buf, "%d ", oo_objects(s->oo)); |
81819f0fc SLUB core |
4423 4424 |
} SLAB_ATTR_RO(objs_per_slab); |
06b285dc3 slub: Make the or... |
4425 4426 4427 |
static ssize_t order_store(struct kmem_cache *s, const char *buf, size_t length) { |
0121c619d slub: Whitespace ... |
4428 4429 4430 4431 4432 4433 |
unsigned long order; int err; err = strict_strtoul(buf, 10, &order); if (err) return err; |
06b285dc3 slub: Make the or... |
4434 4435 4436 4437 4438 4439 4440 |
if (order > slub_max_order || order < slub_min_order) return -EINVAL; calculate_sizes(s, order); return length; } |
81819f0fc SLUB core |
4441 4442 |
static ssize_t order_show(struct kmem_cache *s, char *buf) { |
834f3d119 slub: Add kmem_ca... |
4443 4444 |
return sprintf(buf, "%d ", oo_order(s->oo)); |
81819f0fc SLUB core |
4445 |
} |
06b285dc3 slub: Make the or... |
4446 |
SLAB_ATTR(order); |
81819f0fc SLUB core |
4447 |
|
73d342b16 slub: add min_par... |
4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 |
static ssize_t min_partial_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%lu ", s->min_partial); } static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, size_t length) { unsigned long min; int err; err = strict_strtoul(buf, 10, &min); if (err) return err; |
c0bdb232b slub: rename calc... |
4463 |
set_min_partial(s, min); |
73d342b16 slub: add min_par... |
4464 4465 4466 |
return length; } SLAB_ATTR(min_partial); |
49e225858 slub: per cpu cac... |
4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 |
static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%u ", s->cpu_partial); } static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, size_t length) { unsigned long objects; int err; err = strict_strtoul(buf, 10, &objects); if (err) return err; s->cpu_partial = objects; flush_all(s); return length; } SLAB_ATTR(cpu_partial); |
81819f0fc SLUB core |
4488 4489 |
static ssize_t ctor_show(struct kmem_cache *s, char *buf) { |
62c70bce8 mm: convert sprin... |
4490 4491 4492 4493 |
if (!s->ctor) return 0; return sprintf(buf, "%pS ", s->ctor); |
81819f0fc SLUB core |
4494 4495 |
} SLAB_ATTR_RO(ctor); |
81819f0fc SLUB core |
4496 4497 4498 4499 4500 4501 |
static ssize_t aliases_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d ", s->refcount - 1); } SLAB_ATTR_RO(aliases); |
81819f0fc SLUB core |
4502 4503 |
static ssize_t partial_show(struct kmem_cache *s, char *buf) { |
d9acf4b7b slub: rename slab... |
4504 |
return show_slab_objects(s, buf, SO_PARTIAL); |
81819f0fc SLUB core |
4505 4506 4507 4508 4509 |
} SLAB_ATTR_RO(partial); static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) { |
d9acf4b7b slub: rename slab... |
4510 |
return show_slab_objects(s, buf, SO_CPU); |
81819f0fc SLUB core |
4511 4512 4513 4514 4515 |
} SLAB_ATTR_RO(cpu_slabs); static ssize_t objects_show(struct kmem_cache *s, char *buf) { |
205ab99dd slub: Update stat... |
4516 |
return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); |
81819f0fc SLUB core |
4517 4518 |
} SLAB_ATTR_RO(objects); |
205ab99dd slub: Update stat... |
4519 4520 4521 4522 4523 |
static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) { return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); } SLAB_ATTR_RO(objects_partial); |
49e225858 slub: per cpu cac... |
4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 |
static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) { int objects = 0; int pages = 0; int cpu; int len; for_each_online_cpu(cpu) { struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial; if (page) { pages += page->pages; objects += page->pobjects; } } len = sprintf(buf, "%d(%d)", objects, pages); #ifdef CONFIG_SMP for_each_online_cpu(cpu) { struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial; if (page && len < PAGE_SIZE - 20) len += sprintf(buf + len, " C%d=%d(%d)", cpu, page->pobjects, page->pages); } #endif return len + sprintf(buf + len, " "); } SLAB_ATTR_RO(slabs_cpu_partial); |
a5a84755c slub: Move functi... |
4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 |
static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d ", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); } static ssize_t reclaim_account_store(struct kmem_cache *s, const char *buf, size_t length) { s->flags &= ~SLAB_RECLAIM_ACCOUNT; if (buf[0] == '1') s->flags |= SLAB_RECLAIM_ACCOUNT; return length; } SLAB_ATTR(reclaim_account); static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d ", !!(s->flags & SLAB_HWCACHE_ALIGN)); } SLAB_ATTR_RO(hwcache_align); #ifdef CONFIG_ZONE_DMA static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d ", !!(s->flags & SLAB_CACHE_DMA)); } SLAB_ATTR_RO(cache_dma); #endif static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d ", !!(s->flags & SLAB_DESTROY_BY_RCU)); } SLAB_ATTR_RO(destroy_by_rcu); |
ab9a0f196 slub: automatical... |
4593 4594 4595 4596 4597 4598 |
static ssize_t reserved_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d ", s->reserved); } SLAB_ATTR_RO(reserved); |
ab4d5ed5e slub: Enable sysf... |
4599 |
#ifdef CONFIG_SLUB_DEBUG |
a5a84755c slub: Move functi... |
4600 4601 4602 4603 4604 |
static ssize_t slabs_show(struct kmem_cache *s, char *buf) { return show_slab_objects(s, buf, SO_ALL); } SLAB_ATTR_RO(slabs); |
205ab99dd slub: Update stat... |
4605 4606 4607 4608 4609 |
static ssize_t total_objects_show(struct kmem_cache *s, char *buf) { return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); } SLAB_ATTR_RO(total_objects); |
81819f0fc SLUB core |
4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 |
static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d ", !!(s->flags & SLAB_DEBUG_FREE)); } static ssize_t sanity_checks_store(struct kmem_cache *s, const char *buf, size_t length) { s->flags &= ~SLAB_DEBUG_FREE; |
b789ef518 slub: Add cmpxchg... |
4620 4621 |
if (buf[0] == '1') { s->flags &= ~__CMPXCHG_DOUBLE; |
81819f0fc SLUB core |
4622 |
s->flags |= SLAB_DEBUG_FREE; |
b789ef518 slub: Add cmpxchg... |
4623 |
} |
81819f0fc SLUB core |
4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 |
return length; } SLAB_ATTR(sanity_checks); static ssize_t trace_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d ", !!(s->flags & SLAB_TRACE)); } static ssize_t trace_store(struct kmem_cache *s, const char *buf, size_t length) { s->flags &= ~SLAB_TRACE; |
b789ef518 slub: Add cmpxchg... |
4638 4639 |
if (buf[0] == '1') { s->flags &= ~__CMPXCHG_DOUBLE; |
81819f0fc SLUB core |
4640 |
s->flags |= SLAB_TRACE; |
b789ef518 slub: Add cmpxchg... |
4641 |
} |
81819f0fc SLUB core |
4642 4643 4644 |
return length; } SLAB_ATTR(trace); |
81819f0fc SLUB core |
4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 |
static ssize_t red_zone_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d ", !!(s->flags & SLAB_RED_ZONE)); } static ssize_t red_zone_store(struct kmem_cache *s, const char *buf, size_t length) { if (any_slab_objects(s)) return -EBUSY; s->flags &= ~SLAB_RED_ZONE; |
b789ef518 slub: Add cmpxchg... |
4658 4659 |
if (buf[0] == '1') { s->flags &= ~__CMPXCHG_DOUBLE; |
81819f0fc SLUB core |
4660 |
s->flags |= SLAB_RED_ZONE; |
b789ef518 slub: Add cmpxchg... |
4661 |
} |
06b285dc3 slub: Make the or... |
4662 |
calculate_sizes(s, -1); |
81819f0fc SLUB core |
4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 |
return length; } SLAB_ATTR(red_zone); static ssize_t poison_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d ", !!(s->flags & SLAB_POISON)); } static ssize_t poison_store(struct kmem_cache *s, const char *buf, size_t length) { if (any_slab_objects(s)) return -EBUSY; s->flags &= ~SLAB_POISON; |
b789ef518 slub: Add cmpxchg... |
4680 4681 |
if (buf[0] == '1') { s->flags &= ~__CMPXCHG_DOUBLE; |
81819f0fc SLUB core |
4682 |
s->flags |= SLAB_POISON; |
b789ef518 slub: Add cmpxchg... |
4683 |
} |
06b285dc3 slub: Make the or... |
4684 |
calculate_sizes(s, -1); |
81819f0fc SLUB core |
4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 |
return length; } SLAB_ATTR(poison); static ssize_t store_user_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d ", !!(s->flags & SLAB_STORE_USER)); } static ssize_t store_user_store(struct kmem_cache *s, const char *buf, size_t length) { if (any_slab_objects(s)) return -EBUSY; s->flags &= ~SLAB_STORE_USER; |
b789ef518 slub: Add cmpxchg... |
4702 4703 |
if (buf[0] == '1') { s->flags &= ~__CMPXCHG_DOUBLE; |
81819f0fc SLUB core |
4704 |
s->flags |= SLAB_STORE_USER; |
b789ef518 slub: Add cmpxchg... |
4705 |
} |
06b285dc3 slub: Make the or... |
4706 |
calculate_sizes(s, -1); |
81819f0fc SLUB core |
4707 4708 4709 |
return length; } SLAB_ATTR(store_user); |
53e15af03 slub: validation ... |
4710 4711 4712 4713 4714 4715 4716 4717 |
static ssize_t validate_show(struct kmem_cache *s, char *buf) { return 0; } static ssize_t validate_store(struct kmem_cache *s, const char *buf, size_t length) { |
434e245dd SLUB: Do not allo... |
4718 4719 4720 4721 4722 4723 4724 4725 |
int ret = -EINVAL; if (buf[0] == '1') { ret = validate_slab_cache(s); if (ret >= 0) ret = length; } return ret; |
53e15af03 slub: validation ... |
4726 4727 |
} SLAB_ATTR(validate); |
a5a84755c slub: Move functi... |
4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 |
static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf) { if (!(s->flags & SLAB_STORE_USER)) return -ENOSYS; return list_locations(s, buf, TRACK_ALLOC); } SLAB_ATTR_RO(alloc_calls); static ssize_t free_calls_show(struct kmem_cache *s, char *buf) { if (!(s->flags & SLAB_STORE_USER)) return -ENOSYS; return list_locations(s, buf, TRACK_FREE); } SLAB_ATTR_RO(free_calls); #endif /* CONFIG_SLUB_DEBUG */ #ifdef CONFIG_FAILSLAB static ssize_t failslab_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d ", !!(s->flags & SLAB_FAILSLAB)); } static ssize_t failslab_store(struct kmem_cache *s, const char *buf, size_t length) { s->flags &= ~SLAB_FAILSLAB; if (buf[0] == '1') s->flags |= SLAB_FAILSLAB; return length; } SLAB_ATTR(failslab); |
ab4d5ed5e slub: Enable sysf... |
4762 |
#endif |
53e15af03 slub: validation ... |
4763 |
|
2086d26a0 SLUB: Free slabs ... |
4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 |
static ssize_t shrink_show(struct kmem_cache *s, char *buf) { return 0; } static ssize_t shrink_store(struct kmem_cache *s, const char *buf, size_t length) { if (buf[0] == '1') { int rc = kmem_cache_shrink(s); if (rc) return rc; } else return -EINVAL; return length; } SLAB_ATTR(shrink); |
81819f0fc SLUB core |
4782 |
#ifdef CONFIG_NUMA |
9824601ea SLUB: rename defr... |
4783 |
static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) |
81819f0fc SLUB core |
4784 |
{ |
9824601ea SLUB: rename defr... |
4785 4786 |
return sprintf(buf, "%d ", s->remote_node_defrag_ratio / 10); |
81819f0fc SLUB core |
4787 |
} |
9824601ea SLUB: rename defr... |
4788 |
static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, |
81819f0fc SLUB core |
4789 4790 |
const char *buf, size_t length) { |
0121c619d slub: Whitespace ... |
4791 4792 4793 4794 4795 4796 |
unsigned long ratio; int err; err = strict_strtoul(buf, 10, &ratio); if (err) return err; |
e2cb96b7e slub: Disable NUM... |
4797 |
if (ratio <= 100) |
0121c619d slub: Whitespace ... |
4798 |
s->remote_node_defrag_ratio = ratio * 10; |
81819f0fc SLUB core |
4799 |
|
81819f0fc SLUB core |
4800 4801 |
return length; } |
9824601ea SLUB: rename defr... |
4802 |
SLAB_ATTR(remote_node_defrag_ratio); |
81819f0fc SLUB core |
4803 |
#endif |
8ff12cfc0 SLUB: Support for... |
4804 |
#ifdef CONFIG_SLUB_STATS |
8ff12cfc0 SLUB: Support for... |
4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 |
static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) { unsigned long sum = 0; int cpu; int len; int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL); if (!data) return -ENOMEM; for_each_online_cpu(cpu) { |
9dfc6e68b SLUB: Use this_cp... |
4816 |
unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; |
8ff12cfc0 SLUB: Support for... |
4817 4818 4819 4820 4821 4822 |
data[cpu] = x; sum += x; } len = sprintf(buf, "%lu", sum); |
50ef37b96 slub: Fixes to pe... |
4823 |
#ifdef CONFIG_SMP |
8ff12cfc0 SLUB: Support for... |
4824 4825 |
for_each_online_cpu(cpu) { if (data[cpu] && len < PAGE_SIZE - 20) |
50ef37b96 slub: Fixes to pe... |
4826 |
len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]); |
8ff12cfc0 SLUB: Support for... |
4827 |
} |
50ef37b96 slub: Fixes to pe... |
4828 |
#endif |
8ff12cfc0 SLUB: Support for... |
4829 4830 4831 4832 |
kfree(data); return len + sprintf(buf + len, " "); } |
78eb00cc5 slub: allow stats... |
4833 4834 4835 4836 4837 |
static void clear_stat(struct kmem_cache *s, enum stat_item si) { int cpu; for_each_online_cpu(cpu) |
9dfc6e68b SLUB: Use this_cp... |
4838 |
per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; |
78eb00cc5 slub: allow stats... |
4839 |
} |
8ff12cfc0 SLUB: Support for... |
4840 4841 4842 4843 4844 |
#define STAT_ATTR(si, text) \ static ssize_t text##_show(struct kmem_cache *s, char *buf) \ { \ return show_stat(s, buf, si); \ } \ |
78eb00cc5 slub: allow stats... |
4845 4846 4847 4848 4849 4850 4851 4852 4853 |
static ssize_t text##_store(struct kmem_cache *s, \ const char *buf, size_t length) \ { \ if (buf[0] != '0') \ return -EINVAL; \ clear_stat(s, si); \ return length; \ } \ SLAB_ATTR(text); \ |
8ff12cfc0 SLUB: Support for... |
4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 |
STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath); STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath); STAT_ATTR(FREE_FASTPATH, free_fastpath); STAT_ATTR(FREE_SLOWPATH, free_slowpath); STAT_ATTR(FREE_FROZEN, free_frozen); STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); STAT_ATTR(ALLOC_SLAB, alloc_slab); STAT_ATTR(ALLOC_REFILL, alloc_refill); |
e36a2652d slub: Add statist... |
4865 |
STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch); |
8ff12cfc0 SLUB: Support for... |
4866 4867 4868 4869 4870 4871 4872 |
STAT_ATTR(FREE_SLAB, free_slab); STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); STAT_ATTR(DEACTIVATE_FULL, deactivate_full); STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); |
03e404af2 slub: fast releas... |
4873 |
STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass); |
65c3376aa slub: Fallback to... |
4874 |
STAT_ATTR(ORDER_FALLBACK, order_fallback); |
b789ef518 slub: Add cmpxchg... |
4875 4876 |
STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail); STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail); |
49e225858 slub: per cpu cac... |
4877 4878 |
STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc); STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free); |
8ff12cfc0 SLUB: Support for... |
4879 |
#endif |
064287807 SLUB: Fix coding ... |
4880 |
static struct attribute *slab_attrs[] = { |
81819f0fc SLUB core |
4881 4882 4883 4884 |
&slab_size_attr.attr, &object_size_attr.attr, &objs_per_slab_attr.attr, &order_attr.attr, |
73d342b16 slub: add min_par... |
4885 |
&min_partial_attr.attr, |
49e225858 slub: per cpu cac... |
4886 |
&cpu_partial_attr.attr, |
81819f0fc SLUB core |
4887 |
&objects_attr.attr, |
205ab99dd slub: Update stat... |
4888 |
&objects_partial_attr.attr, |
81819f0fc SLUB core |
4889 4890 4891 |
&partial_attr.attr, &cpu_slabs_attr.attr, &ctor_attr.attr, |
81819f0fc SLUB core |
4892 4893 |
&aliases_attr.attr, &align_attr.attr, |
81819f0fc SLUB core |
4894 4895 4896 |
&hwcache_align_attr.attr, &reclaim_account_attr.attr, &destroy_by_rcu_attr.attr, |
a5a84755c slub: Move functi... |
4897 |
&shrink_attr.attr, |
ab9a0f196 slub: automatical... |
4898 |
&reserved_attr.attr, |
49e225858 slub: per cpu cac... |
4899 |
&slabs_cpu_partial_attr.attr, |
ab4d5ed5e slub: Enable sysf... |
4900 |
#ifdef CONFIG_SLUB_DEBUG |
a5a84755c slub: Move functi... |
4901 4902 4903 4904 |
&total_objects_attr.attr, &slabs_attr.attr, &sanity_checks_attr.attr, &trace_attr.attr, |
81819f0fc SLUB core |
4905 4906 4907 |
&red_zone_attr.attr, &poison_attr.attr, &store_user_attr.attr, |
53e15af03 slub: validation ... |
4908 |
&validate_attr.attr, |
88a420e4e slub: add ability... |
4909 4910 |
&alloc_calls_attr.attr, &free_calls_attr.attr, |
ab4d5ed5e slub: Enable sysf... |
4911 |
#endif |
81819f0fc SLUB core |
4912 4913 4914 4915 |
#ifdef CONFIG_ZONE_DMA &cache_dma_attr.attr, #endif #ifdef CONFIG_NUMA |
9824601ea SLUB: rename defr... |
4916 |
&remote_node_defrag_ratio_attr.attr, |
81819f0fc SLUB core |
4917 |
#endif |
8ff12cfc0 SLUB: Support for... |
4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 |
#ifdef CONFIG_SLUB_STATS &alloc_fastpath_attr.attr, &alloc_slowpath_attr.attr, &free_fastpath_attr.attr, &free_slowpath_attr.attr, &free_frozen_attr.attr, &free_add_partial_attr.attr, &free_remove_partial_attr.attr, &alloc_from_partial_attr.attr, &alloc_slab_attr.attr, &alloc_refill_attr.attr, |
e36a2652d slub: Add statist... |
4929 |
&alloc_node_mismatch_attr.attr, |
8ff12cfc0 SLUB: Support for... |
4930 4931 4932 4933 4934 4935 4936 |
&free_slab_attr.attr, &cpuslab_flush_attr.attr, &deactivate_full_attr.attr, &deactivate_empty_attr.attr, &deactivate_to_head_attr.attr, &deactivate_to_tail_attr.attr, &deactivate_remote_frees_attr.attr, |
03e404af2 slub: fast releas... |
4937 |
&deactivate_bypass_attr.attr, |
65c3376aa slub: Fallback to... |
4938 |
&order_fallback_attr.attr, |
b789ef518 slub: Add cmpxchg... |
4939 4940 |
&cmpxchg_double_fail_attr.attr, &cmpxchg_double_cpu_fail_attr.attr, |
49e225858 slub: per cpu cac... |
4941 4942 |
&cpu_partial_alloc_attr.attr, &cpu_partial_free_attr.attr, |
8ff12cfc0 SLUB: Support for... |
4943 |
#endif |
4c13dd3b4 failslab: add abi... |
4944 4945 4946 |
#ifdef CONFIG_FAILSLAB &failslab_attr.attr, #endif |
81819f0fc SLUB core |
4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 |
NULL }; static struct attribute_group slab_attr_group = { .attrs = slab_attrs, }; static ssize_t slab_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct slab_attribute *attribute; struct kmem_cache *s; int err; attribute = to_slab_attr(attr); s = to_slab(kobj); if (!attribute->show) return -EIO; err = attribute->show(s, buf); return err; } static ssize_t slab_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) { struct slab_attribute *attribute; struct kmem_cache *s; int err; attribute = to_slab_attr(attr); s = to_slab(kobj); if (!attribute->store) return -EIO; err = attribute->store(s, buf, len); return err; } |
151c602f7 SLUB: Fix sysfs r... |
4991 4992 4993 |
static void kmem_cache_release(struct kobject *kobj) { struct kmem_cache *s = to_slab(kobj); |
84c1cf624 SLUB: Fix merged ... |
4994 |
kfree(s->name); |
151c602f7 SLUB: Fix sysfs r... |
4995 4996 |
kfree(s); } |
52cf25d0a Driver core: Cons... |
4997 |
static const struct sysfs_ops slab_sysfs_ops = { |
81819f0fc SLUB core |
4998 4999 5000 5001 5002 5003 |
.show = slab_attr_show, .store = slab_attr_store, }; static struct kobj_type slab_ktype = { .sysfs_ops = &slab_sysfs_ops, |
151c602f7 SLUB: Fix sysfs r... |
5004 |
.release = kmem_cache_release |
81819f0fc SLUB core |
5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 |
}; static int uevent_filter(struct kset *kset, struct kobject *kobj) { struct kobj_type *ktype = get_ktype(kobj); if (ktype == &slab_ktype) return 1; return 0; } |
9cd43611c kobject: Constify... |
5015 |
static const struct kset_uevent_ops slab_uevent_ops = { |
81819f0fc SLUB core |
5016 5017 |
.filter = uevent_filter, }; |
27c3a314d kset: convert slu... |
5018 |
static struct kset *slab_kset; |
81819f0fc SLUB core |
5019 5020 5021 5022 |
#define ID_STR_LENGTH 64 /* Create a unique string id for a slab cache: |
6446faa2f slub: Fix up comm... |
5023 5024 |
* * Format :[flags-]size |
81819f0fc SLUB core |
5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 |
*/ static char *create_unique_id(struct kmem_cache *s) { char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL); char *p = name; BUG_ON(!name); *p++ = ':'; /* * First flags affecting slabcache operations. We will only * get here for aliasable slabs so we do not need to support * too many flags. The flags here must cover all flags that * are matched during merging to guarantee that the id is * unique. */ if (s->flags & SLAB_CACHE_DMA) *p++ = 'd'; if (s->flags & SLAB_RECLAIM_ACCOUNT) *p++ = 'a'; if (s->flags & SLAB_DEBUG_FREE) *p++ = 'F'; |
5a896d9e7 slub: add hooks f... |
5047 5048 |
if (!(s->flags & SLAB_NOTRACK)) *p++ = 't'; |
81819f0fc SLUB core |
5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 |
if (p != name + 1) *p++ = '-'; p += sprintf(p, "%07d", s->size); BUG_ON(p > name + ID_STR_LENGTH - 1); return name; } static int sysfs_slab_add(struct kmem_cache *s) { int err; const char *name; int unmergeable; if (slab_state < SYSFS) /* Defer until later */ return 0; unmergeable = slab_unmergeable(s); if (unmergeable) { /* * Slabcache can never be merged so we can use the name proper. * This is typically the case for debug situations. In that * case we can catch duplicate names easily. */ |
27c3a314d kset: convert slu... |
5073 |
sysfs_remove_link(&slab_kset->kobj, s->name); |
81819f0fc SLUB core |
5074 5075 5076 5077 5078 5079 5080 5081 |
name = s->name; } else { /* * Create a unique name for the slab as a target * for the symlinks. */ name = create_unique_id(s); } |
27c3a314d kset: convert slu... |
5082 |
s->kobj.kset = slab_kset; |
1eada11c8 Kobject: convert ... |
5083 5084 5085 |
err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name); if (err) { kobject_put(&s->kobj); |
81819f0fc SLUB core |
5086 |
return err; |
1eada11c8 Kobject: convert ... |
5087 |
} |
81819f0fc SLUB core |
5088 5089 |
err = sysfs_create_group(&s->kobj, &slab_attr_group); |
5788d8ad6 slub: release kob... |
5090 5091 5092 |
if (err) { kobject_del(&s->kobj); kobject_put(&s->kobj); |
81819f0fc SLUB core |
5093 |
return err; |
5788d8ad6 slub: release kob... |
5094 |
} |
81819f0fc SLUB core |
5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 |
kobject_uevent(&s->kobj, KOBJ_ADD); if (!unmergeable) { /* Setup first alias */ sysfs_slab_alias(s, s->name); kfree(name); } return 0; } static void sysfs_slab_remove(struct kmem_cache *s) { |
2bce64858 slub: Allow remov... |
5106 5107 5108 5109 5110 5111 |
if (slab_state < SYSFS) /* * Sysfs has not been setup yet so no need to remove the * cache from sysfs. */ return; |
81819f0fc SLUB core |
5112 5113 |
kobject_uevent(&s->kobj, KOBJ_REMOVE); kobject_del(&s->kobj); |
151c602f7 SLUB: Fix sysfs r... |
5114 |
kobject_put(&s->kobj); |
81819f0fc SLUB core |
5115 5116 5117 5118 |
} /* * Need to buffer aliases during bootup until sysfs becomes |
9f6c708e5 slub: Fix incorre... |
5119 |
* available lest we lose that information. |
81819f0fc SLUB core |
5120 5121 5122 5123 5124 5125 |
*/ struct saved_alias { struct kmem_cache *s; const char *name; struct saved_alias *next; }; |
5af328a51 mm/slub.c: make c... |
5126 |
static struct saved_alias *alias_list; |
81819f0fc SLUB core |
5127 5128 5129 5130 5131 5132 5133 5134 5135 |
static int sysfs_slab_alias(struct kmem_cache *s, const char *name) { struct saved_alias *al; if (slab_state == SYSFS) { /* * If we have a leftover link then remove it. */ |
27c3a314d kset: convert slu... |
5136 5137 |
sysfs_remove_link(&slab_kset->kobj, name); return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); |
81819f0fc SLUB core |
5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 |
} al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); if (!al) return -ENOMEM; al->s = s; al->name = name; al->next = alias_list; alias_list = al; return 0; } static int __init slab_sysfs_init(void) { |
5b95a4acf SLUB: use list_fo... |
5153 |
struct kmem_cache *s; |
81819f0fc SLUB core |
5154 |
int err; |
2bce64858 slub: Allow remov... |
5155 |
down_write(&slub_lock); |
0ff21e466 kobject: convert ... |
5156 |
slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); |
27c3a314d kset: convert slu... |
5157 |
if (!slab_kset) { |
2bce64858 slub: Allow remov... |
5158 |
up_write(&slub_lock); |
81819f0fc SLUB core |
5159 5160 5161 5162 |
printk(KERN_ERR "Cannot register slab subsystem. "); return -ENOSYS; } |
26a7bd030 SLUB: get rid of ... |
5163 |
slab_state = SYSFS; |
5b95a4acf SLUB: use list_fo... |
5164 |
list_for_each_entry(s, &slab_caches, list) { |
26a7bd030 SLUB: get rid of ... |
5165 |
err = sysfs_slab_add(s); |
5d540fb71 slub: do not fail... |
5166 5167 5168 5169 |
if (err) printk(KERN_ERR "SLUB: Unable to add boot slab %s" " to sysfs ", s->name); |
26a7bd030 SLUB: get rid of ... |
5170 |
} |
81819f0fc SLUB core |
5171 5172 5173 5174 5175 5176 |
while (alias_list) { struct saved_alias *al = alias_list; alias_list = alias_list->next; err = sysfs_slab_alias(al->s, al->name); |
5d540fb71 slub: do not fail... |
5177 5178 5179 5180 |
if (err) printk(KERN_ERR "SLUB: Unable to add boot slab alias" " %s to sysfs ", s->name); |
81819f0fc SLUB core |
5181 5182 |
kfree(al); } |
2bce64858 slub: Allow remov... |
5183 |
up_write(&slub_lock); |
81819f0fc SLUB core |
5184 5185 5186 5187 5188 |
resiliency_test(); return 0; } __initcall(slab_sysfs_init); |
ab4d5ed5e slub: Enable sysf... |
5189 |
#endif /* CONFIG_SYSFS */ |
57ed3eda9 slub: provide /pr... |
5190 5191 5192 5193 |
/* * The /proc/slabinfo ABI */ |
158a96242 Unify /proc/slabi... |
5194 |
#ifdef CONFIG_SLABINFO |
57ed3eda9 slub: provide /pr... |
5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 |
static void print_slabinfo_header(struct seq_file *m) { seq_puts(m, "slabinfo - version: 2.1 "); seq_puts(m, "# name <active_objs> <num_objs> <objsize> " "<objperslab> <pagesperslab>"); seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); seq_putc(m, ' '); } static void *s_start(struct seq_file *m, loff_t *pos) { loff_t n = *pos; down_read(&slub_lock); if (!n) print_slabinfo_header(m); return seq_list_start(&slab_caches, *pos); } static void *s_next(struct seq_file *m, void *p, loff_t *pos) { return seq_list_next(p, &slab_caches, pos); } static void s_stop(struct seq_file *m, void *p) { up_read(&slub_lock); } static int s_show(struct seq_file *m, void *p) { unsigned long nr_partials = 0; unsigned long nr_slabs = 0; unsigned long nr_inuse = 0; |
205ab99dd slub: Update stat... |
5233 5234 |
unsigned long nr_objs = 0; unsigned long nr_free = 0; |
57ed3eda9 slub: provide /pr... |
5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 |
struct kmem_cache *s; int node; s = list_entry(p, struct kmem_cache, list); for_each_online_node(node) { struct kmem_cache_node *n = get_node(s, node); if (!n) continue; nr_partials += n->nr_partial; nr_slabs += atomic_long_read(&n->nr_slabs); |
205ab99dd slub: Update stat... |
5248 5249 |
nr_objs += atomic_long_read(&n->total_objects); nr_free += count_partial(n, count_free); |
57ed3eda9 slub: provide /pr... |
5250 |
} |
205ab99dd slub: Update stat... |
5251 |
nr_inuse = nr_objs - nr_free; |
57ed3eda9 slub: provide /pr... |
5252 5253 |
seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse, |
834f3d119 slub: Add kmem_ca... |
5254 5255 |
nr_objs, s->size, oo_objects(s->oo), (1 << oo_order(s->oo))); |
57ed3eda9 slub: provide /pr... |
5256 5257 5258 5259 5260 5261 5262 |
seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0); seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs, 0UL); seq_putc(m, ' '); return 0; } |
7b3c3a50a proc: move /proc/... |
5263 |
static const struct seq_operations slabinfo_op = { |
57ed3eda9 slub: provide /pr... |
5264 5265 5266 5267 5268 |
.start = s_start, .next = s_next, .stop = s_stop, .show = s_show, }; |
7b3c3a50a proc: move /proc/... |
5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 |
static int slabinfo_open(struct inode *inode, struct file *file) { return seq_open(file, &slabinfo_op); } static const struct file_operations proc_slabinfo_operations = { .open = slabinfo_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int __init slab_proc_init(void) { |
ab067e99d mm: restrict acce... |
5283 |
proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations); |
7b3c3a50a proc: move /proc/... |
5284 5285 5286 |
return 0; } module_init(slab_proc_init); |
158a96242 Unify /proc/slabi... |
5287 |
#endif /* CONFIG_SLABINFO */ |