Commit 3ec0974210fe1b7c0618ad6e39a882a4237d7de2

Authored by Christoph Lameter
Committed by Linus Torvalds
1 parent a35afb830f

SLUB: Simplify debug code

Consolidate functionality into the #ifdef section.

Extract tracing into one subroutine.

Move object debug processing into the #ifdef section so that the
code in __slab_alloc and __slab_free becomes minimal.

Reduce number of functions we need to provide stubs for in the !SLUB_DEBUG case.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 57 additions and 55 deletions Side-by-side Diff

... ... @@ -742,6 +742,22 @@
742 742 return search == NULL;
743 743 }
744 744  
  745 +static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
  746 +{
  747 + if (s->flags & SLAB_TRACE) {
  748 + printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
  749 + s->name,
  750 + alloc ? "alloc" : "free",
  751 + object, page->inuse,
  752 + page->freelist);
  753 +
  754 + if (!alloc)
  755 + print_section("Object", (void *)object, s->objsize);
  756 +
  757 + dump_stack();
  758 + }
  759 +}
  760 +
745 761 /*
746 762 * Tracking of fully allocated slabs for debugging purposes.
747 763 */
748 764  
... ... @@ -766,9 +782,19 @@
766 782 spin_unlock(&n->list_lock);
767 783 }
768 784  
769   -static int alloc_object_checks(struct kmem_cache *s, struct page *page,
770   - void *object)
  785 +static void setup_object_debug(struct kmem_cache *s, struct page *page,
  786 + void *object)
771 787 {
  788 + if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
  789 + return;
  790 +
  791 + init_object(s, object, 0);
  792 + init_tracking(s, object);
  793 +}
  794 +
  795 +static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
  796 + void *object, void *addr)
  797 +{
772 798 if (!check_slab(s, page))
773 799 goto bad;
774 800  
775 801  
776 802  
... ... @@ -782,13 +808,16 @@
782 808 goto bad;
783 809 }
784 810  
785   - if (!object)
786   - return 1;
787   -
788   - if (!check_object(s, page, object, 0))
  811 + if (object && !check_object(s, page, object, 0))
789 812 goto bad;
790 813  
  814 + /* Success perform special debug activities for allocs */
  815 + if (s->flags & SLAB_STORE_USER)
  816 + set_track(s, object, TRACK_ALLOC, addr);
  817 + trace(s, page, object, 1);
  818 + init_object(s, object, 1);
791 819 return 1;
  820 +
792 821 bad:
793 822 if (PageSlab(page)) {
794 823 /*
... ... @@ -806,8 +835,8 @@
806 835 return 0;
807 836 }
808 837  
809   -static int free_object_checks(struct kmem_cache *s, struct page *page,
810   - void *object)
  838 +static int free_debug_processing(struct kmem_cache *s, struct page *page,
  839 + void *object, void *addr)
811 840 {
812 841 if (!check_slab(s, page))
813 842 goto fail;
814 843  
815 844  
... ... @@ -841,29 +870,22 @@
841 870 "to slab %s", object, page->slab->name);
842 871 goto fail;
843 872 }
  873 +
  874 + /* Special debug activities for freeing objects */
  875 + if (!SlabFrozen(page) && !page->freelist)
  876 + remove_full(s, page);
  877 + if (s->flags & SLAB_STORE_USER)
  878 + set_track(s, object, TRACK_FREE, addr);
  879 + trace(s, page, object, 0);
  880 + init_object(s, object, 0);
844 881 return 1;
  882 +
845 883 fail:
846 884 printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n",
847 885 s->name, page, object);
848 886 return 0;
849 887 }
850 888  
851   -static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
852   -{
853   - if (s->flags & SLAB_TRACE) {
854   - printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
855   - s->name,
856   - alloc ? "alloc" : "free",
857   - object, page->inuse,
858   - page->freelist);
859   -
860   - if (!alloc)
861   - print_section("Object", (void *)object, s->objsize);
862   -
863   - dump_stack();
864   - }
865   -}
866   -
867 889 static int __init setup_slub_debug(char *str)
868 890 {
869 891 if (!str || *str != '=')
870 892  
871 893  
872 894  
873 895  
... ... @@ -932,26 +954,20 @@
932 954 s->flags |= slub_debug;
933 955 }
934 956 #else
  957 +static inline void setup_object_debug(struct kmem_cache *s,
  958 + struct page *page, void *object) {}
935 959  
936   -static inline int alloc_object_checks(struct kmem_cache *s,
937   - struct page *page, void *object) { return 0; }
  960 +static inline int alloc_debug_processing(struct kmem_cache *s,
  961 + struct page *page, void *object, void *addr) { return 0; }
938 962  
939   -static inline int free_object_checks(struct kmem_cache *s,
940   - struct page *page, void *object) { return 0; }
  963 +static inline int free_debug_processing(struct kmem_cache *s,
  964 + struct page *page, void *object, void *addr) { return 0; }
941 965  
942   -static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
943   -static inline void remove_full(struct kmem_cache *s, struct page *page) {}
944   -static inline void trace(struct kmem_cache *s, struct page *page,
945   - void *object, int alloc) {}
946   -static inline void init_object(struct kmem_cache *s,
947   - void *object, int active) {}
948   -static inline void init_tracking(struct kmem_cache *s, void *object) {}
949 966 static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
950 967 { return 1; }
951 968 static inline int check_object(struct kmem_cache *s, struct page *page,
952 969 void *object, int active) { return 1; }
953   -static inline void set_track(struct kmem_cache *s, void *object,
954   - enum track_item alloc, void *addr) {}
  970 +static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
955 971 static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {}
956 972 #define slub_debug 0
957 973 #endif
... ... @@ -988,11 +1004,7 @@
988 1004 static void setup_object(struct kmem_cache *s, struct page *page,
989 1005 void *object)
990 1006 {
991   - if (SlabDebug(page)) {
992   - init_object(s, object, 0);
993   - init_tracking(s, object);
994   - }
995   -
  1007 + setup_object_debug(s, page, object);
996 1008 if (unlikely(s->ctor))
997 1009 s->ctor(object, s, 0);
998 1010 }
999 1011  
... ... @@ -1449,12 +1461,8 @@
1449 1461 return NULL;
1450 1462 debug:
1451 1463 object = page->freelist;
1452   - if (!alloc_object_checks(s, page, object))
  1464 + if (!alloc_debug_processing(s, page, object, addr))
1453 1465 goto another_slab;
1454   - if (s->flags & SLAB_STORE_USER)
1455   - set_track(s, object, TRACK_ALLOC, addr);
1456   - trace(s, page, object, 1);
1457   - init_object(s, object, 1);
1458 1466  
1459 1467 page->inuse++;
1460 1468 page->freelist = object[page->offset];
1461 1469  
... ... @@ -1561,14 +1569,8 @@
1561 1569 return;
1562 1570  
1563 1571 debug:
1564   - if (!free_object_checks(s, page, x))
  1572 + if (!free_debug_processing(s, page, x, addr))
1565 1573 goto out_unlock;
1566   - if (!SlabFrozen(page) && !page->freelist)
1567   - remove_full(s, page);
1568   - if (s->flags & SLAB_STORE_USER)
1569   - set_track(s, x, TRACK_FREE, addr);
1570   - trace(s, page, object, 0);
1571   - init_object(s, object, 0);
1572 1574 goto checks_ok;
1573 1575 }
1574 1576  
... ... @@ -1805,7 +1807,7 @@
1805 1807 page->freelist = get_freepointer(kmalloc_caches, n);
1806 1808 page->inuse++;
1807 1809 kmalloc_caches->node[node] = n;
1808   - init_object(kmalloc_caches, n, 1);
  1810 + setup_object_debug(kmalloc_caches, page, n);
1809 1811 init_kmem_cache_node(n);
1810 1812 atomic_long_inc(&n->nr_slabs);
1811 1813 add_partial(n, page);