Commit 039363f38bfe5f6281e9eae5e0518b11577d9d50

Authored by Christoph Lameter
Committed by Pekka Enberg
1 parent 068ce415be

mm, sl[aou]b: Extract common code for kmem_cache_create()

Kmem_cache_create() does a variety of sanity checks but those
vary depending on the allocator. Use the strictest tests and put them into
a slab_common file. Make the tests conditional on CONFIG_DEBUG_VM.

This patch has the effect of adding sanity checks for SLUB and SLOB
under CONFIG_DEBUG_VM and removes the checks in SLAB for !CONFIG_DEBUG_VM.

Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>

Showing 6 changed files with 87 additions and 31 deletions Side-by-side Diff

include/linux/slab.h
... ... @@ -130,6 +130,10 @@
130 130 void kmem_cache_free(struct kmem_cache *, void *);
131 131 unsigned int kmem_cache_size(struct kmem_cache *);
132 132  
  133 +/* Slab internal function */
  134 +struct kmem_cache *__kmem_cache_create(const char *, size_t, size_t,
  135 + unsigned long,
  136 + void (*)(void *));
133 137 /*
134 138 * Please use this macro to create slab caches. Simply specify the
135 139 * name of the structure and maybe some flags that are listed above.
... ... @@ -16,7 +16,8 @@
16 16 readahead.o swap.o truncate.o vmscan.o shmem.o \
17 17 prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
18 18 page_isolation.o mm_init.o mmu_context.o percpu.o \
19   - compaction.o $(mmu-y)
  19 + compaction.o slab_common.o $(mmu-y)
  20 +
20 21 obj-y += init-mm.o
21 22  
22 23 ifdef CONFIG_NO_BOOTMEM
... ... @@ -1558,7 +1558,7 @@
1558 1558 * bug.
1559 1559 */
1560 1560  
1561   - sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
  1561 + sizes[INDEX_AC].cs_cachep = __kmem_cache_create(names[INDEX_AC].name,
1562 1562 sizes[INDEX_AC].cs_size,
1563 1563 ARCH_KMALLOC_MINALIGN,
1564 1564 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
... ... @@ -1566,7 +1566,7 @@
1566 1566  
1567 1567 if (INDEX_AC != INDEX_L3) {
1568 1568 sizes[INDEX_L3].cs_cachep =
1569   - kmem_cache_create(names[INDEX_L3].name,
  1569 + __kmem_cache_create(names[INDEX_L3].name,
1570 1570 sizes[INDEX_L3].cs_size,
1571 1571 ARCH_KMALLOC_MINALIGN,
1572 1572 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1573 1573  
... ... @@ -1584,14 +1584,14 @@
1584 1584 * allow tighter packing of the smaller caches.
1585 1585 */
1586 1586 if (!sizes->cs_cachep) {
1587   - sizes->cs_cachep = kmem_cache_create(names->name,
  1587 + sizes->cs_cachep = __kmem_cache_create(names->name,
1588 1588 sizes->cs_size,
1589 1589 ARCH_KMALLOC_MINALIGN,
1590 1590 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1591 1591 NULL);
1592 1592 }
1593 1593 #ifdef CONFIG_ZONE_DMA
1594   - sizes->cs_dmacachep = kmem_cache_create(
  1594 + sizes->cs_dmacachep = __kmem_cache_create(
1595 1595 names->name_dma,
1596 1596 sizes->cs_size,
1597 1597 ARCH_KMALLOC_MINALIGN,
... ... @@ -2220,7 +2220,7 @@
2220 2220 }
2221 2221  
2222 2222 /**
2223   - * kmem_cache_create - Create a cache.
  2223 + * __kmem_cache_create - Create a cache.
2224 2224 * @name: A string which is used in /proc/slabinfo to identify this cache.
2225 2225 * @size: The size of objects to be created in this cache.
2226 2226 * @align: The required alignment for the objects.
... ... @@ -2247,7 +2247,7 @@
2247 2247 * as davem.
2248 2248 */
2249 2249 struct kmem_cache *
2250   -kmem_cache_create (const char *name, size_t size, size_t align,
  2250 +__kmem_cache_create (const char *name, size_t size, size_t align,
2251 2251 unsigned long flags, void (*ctor)(void *))
2252 2252 {
2253 2253 size_t left_over, slab_size, ralign;
... ... @@ -2388,7 +2388,7 @@
2388 2388 /* Get cache's description obj. */
2389 2389 cachep = kmem_cache_zalloc(&cache_cache, gfp);
2390 2390 if (!cachep)
2391   - goto oops;
  2391 + return NULL;
2392 2392  
2393 2393 cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
2394 2394 cachep->object_size = size;
... ... @@ -2445,8 +2445,7 @@
2445 2445 printk(KERN_ERR
2446 2446 "kmem_cache_create: couldn't create cache %s.\n", name);
2447 2447 kmem_cache_free(&cache_cache, cachep);
2448   - cachep = NULL;
2449   - goto oops;
  2448 + return NULL;
2450 2449 }
2451 2450 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2452 2451 + sizeof(struct slab), align);
... ... @@ -2504,8 +2503,7 @@
2504 2503  
2505 2504 if (setup_cpu_cache(cachep, gfp)) {
2506 2505 __kmem_cache_destroy(cachep);
2507   - cachep = NULL;
2508   - goto oops;
  2506 + return NULL;
2509 2507 }
2510 2508  
2511 2509 if (flags & SLAB_DEBUG_OBJECTS) {
2512 2510  
... ... @@ -2521,16 +2519,12 @@
2521 2519 /* cache setup completed, link it into the list */
2522 2520 list_add(&cachep->list, &cache_chain);
2523 2521 oops:
2524   - if (!cachep && (flags & SLAB_PANIC))
2525   - panic("kmem_cache_create(): failed to create slab `%s'\n",
2526   - name);
2527 2522 if (slab_is_available()) {
2528 2523 mutex_unlock(&cache_chain_mutex);
2529 2524 put_online_cpus();
2530 2525 }
2531 2526 return cachep;
2532 2527 }
2533   -EXPORT_SYMBOL(kmem_cache_create);
2534 2528  
2535 2529 #if DEBUG
2536 2530 static void check_irq_off(void)
  1 +/*
  2 + * Slab allocator functions that are independent of the allocator strategy
  3 + *
  4 + * (C) 2012 Christoph Lameter <cl@linux.com>
  5 + */
  6 +#include <linux/slab.h>
  7 +
  8 +#include <linux/mm.h>
  9 +#include <linux/poison.h>
  10 +#include <linux/interrupt.h>
  11 +#include <linux/memory.h>
  12 +#include <linux/compiler.h>
  13 +#include <linux/module.h>
  14 +
  15 +#include <asm/cacheflush.h>
  16 +#include <asm/tlbflush.h>
  17 +#include <asm/page.h>
  18 +
  19 +/*
  20 + * kmem_cache_create - Create a cache.
  21 + * @name: A string which is used in /proc/slabinfo to identify this cache.
  22 + * @size: The size of objects to be created in this cache.
  23 + * @align: The required alignment for the objects.
  24 + * @flags: SLAB flags
  25 + * @ctor: A constructor for the objects.
  26 + *
  27 + * Returns a ptr to the cache on success, NULL on failure.
  28 + * Cannot be called within a interrupt, but can be interrupted.
  29 + * The @ctor is run when new pages are allocated by the cache.
  30 + *
  31 + * The flags are
  32 + *
  33 + * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
  34 + * to catch references to uninitialised memory.
  35 + *
  36 + * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
  37 + * for buffer overruns.
  38 + *
  39 + * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
  40 + * cacheline. This can be beneficial if you're counting cycles as closely
  41 + * as davem.
  42 + */
  43 +
  44 +struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align,
  45 + unsigned long flags, void (*ctor)(void *))
  46 +{
  47 + struct kmem_cache *s = NULL;
  48 +
  49 +#ifdef CONFIG_DEBUG_VM
  50 + if (!name || in_interrupt() || size < sizeof(void *) ||
  51 + size > KMALLOC_MAX_SIZE) {
  52 + printk(KERN_ERR "kmem_cache_create(%s) integrity check"
  53 + " failed\n", name);
  54 + goto out;
  55 + }
  56 +#endif
  57 +
  58 + s = __kmem_cache_create(name, size, align, flags, ctor);
  59 +
  60 +#ifdef CONFIG_DEBUG_VM
  61 +out:
  62 +#endif
  63 + if (!s && (flags & SLAB_PANIC))
  64 + panic("kmem_cache_create: Failed to create slab '%s'\n", name);
  65 +
  66 + return s;
  67 +}
  68 +EXPORT_SYMBOL(kmem_cache_create);
... ... @@ -506,7 +506,7 @@
506 506 }
507 507 EXPORT_SYMBOL(ksize);
508 508  
509   -struct kmem_cache *kmem_cache_create(const char *name, size_t size,
  509 +struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
510 510 size_t align, unsigned long flags, void (*ctor)(void *))
511 511 {
512 512 struct kmem_cache *c;
513 513  
514 514  
... ... @@ -529,13 +529,11 @@
529 529 c->align = ARCH_SLAB_MINALIGN;
530 530 if (c->align < align)
531 531 c->align = align;
532   - } else if (flags & SLAB_PANIC)
533   - panic("Cannot create slab cache %s\n", name);
534 532  
535   - kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
  533 + kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
  534 + }
536 535 return c;
537 536 }
538   -EXPORT_SYMBOL(kmem_cache_create);
539 537  
540 538 void kmem_cache_destroy(struct kmem_cache *c)
541 539 {
... ... @@ -3920,15 +3920,12 @@
3920 3920 return NULL;
3921 3921 }
3922 3922  
3923   -struct kmem_cache *kmem_cache_create(const char *name, size_t size,
  3923 +struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
3924 3924 size_t align, unsigned long flags, void (*ctor)(void *))
3925 3925 {
3926 3926 struct kmem_cache *s;
3927 3927 char *n;
3928 3928  
3929   - if (WARN_ON(!name))
3930   - return NULL;
3931   -
3932 3929 down_write(&slub_lock);
3933 3930 s = find_mergeable(size, align, flags, name, ctor);
3934 3931 if (s) {
3935 3932  
... ... @@ -3972,14 +3969,8 @@
3972 3969 kfree(n);
3973 3970 err:
3974 3971 up_write(&slub_lock);
3975   -
3976   - if (flags & SLAB_PANIC)
3977   - panic("Cannot create slabcache %s\n", name);
3978   - else
3979   - s = NULL;
3980 3972 return s;
3981 3973 }
3982   -EXPORT_SYMBOL(kmem_cache_create);
3983 3974  
3984 3975 #ifdef CONFIG_SMP
3985 3976 /*