Commit 1be1cb7b47f0744141ed61cdb25648819ae1a56f

Authored by Thomas Gleixner
1 parent 3e8ebb5c43

debugobjects: replace static objects when slab cache becomes available

Impact: refactor/consolidate object management, prepare for delayed free

debugobjects allocates static reference objects to track objects which
are initialized or activated before the slab cache becomes
available. These static reference objects have to be handled
seperately in free_object(). The handling of these objects is in the
way of implementing a delayed free functionality. The delayed free is
required to avoid callbacks into the mm code from
debug_check_no_obj_freed().

Replace the static object references with dynamic ones after the slab
cache has been initialized. The static objects are now marked initdata.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
LKML-Reference: <200903162049.58058.nickpiggin@yahoo.com.au>

Showing 1 changed file with 63 additions and 3 deletions Side-by-side Diff

... ... @@ -30,7 +30,7 @@
30 30  
31 31 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
32 32  
33   -static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE];
  33 +static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
34 34  
35 35 static DEFINE_SPINLOCK(pool_lock);
36 36  
... ... @@ -884,6 +884,63 @@
884 884 }
885 885  
886 886 /*
  887 + * Convert the statically allocated objects to dynamic ones:
  888 + */
  889 +static int debug_objects_replace_static_objects(void)
  890 +{
  891 + struct debug_bucket *db = obj_hash;
  892 + struct hlist_node *node, *tmp;
  893 + struct debug_obj *obj, *new;
  894 + HLIST_HEAD(objects);
  895 + int i, cnt = 0;
  896 +
  897 + for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
  898 + obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
  899 + if (!obj)
  900 + goto free;
  901 + hlist_add_head(&obj->node, &objects);
  902 + }
  903 +
  904 + /*
  905 + * When debug_objects_mem_init() is called we know that only
  906 + * one CPU is up, so disabling interrupts is enough
  907 + * protection. This avoids the lockdep hell of lock ordering.
  908 + */
  909 + local_irq_disable();
  910 +
  911 + /* Remove the statically allocated objects from the pool */
  912 + hlist_for_each_entry_safe(obj, node, tmp, &obj_pool, node)
  913 + hlist_del(&obj->node);
  914 + /* Move the allocated objects to the pool */
  915 + hlist_move_list(&objects, &obj_pool);
  916 +
  917 + /* Replace the active object references */
  918 + for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
  919 + hlist_move_list(&db->list, &objects);
  920 +
  921 + hlist_for_each_entry(obj, node, &objects, node) {
  922 + new = hlist_entry(obj_pool.first, typeof(*obj), node);
  923 + hlist_del(&new->node);
  924 + /* copy object data */
  925 + *new = *obj;
  926 + hlist_add_head(&new->node, &db->list);
  927 + cnt++;
  928 + }
  929 + }
  930 +
  931 + printk(KERN_DEBUG "ODEBUG: %d of %d active objects replaced\n", cnt,
  932 + obj_pool_used);
  933 + local_irq_enable();
  934 + return 0;
  935 +free:
  936 + hlist_for_each_entry_safe(obj, node, tmp, &objects, node) {
  937 + hlist_del(&obj->node);
  938 + kmem_cache_free(obj_cache, obj);
  939 + }
  940 + return -ENOMEM;
  941 +}
  942 +
  943 +/*
887 944 * Called after the kmem_caches are functional to setup a dedicated
888 945 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
889 946 * prevents that the debug code is called on kmem_cache_free() for the
890 947  
... ... @@ -898,9 +955,12 @@
898 955 sizeof (struct debug_obj), 0,
899 956 SLAB_DEBUG_OBJECTS, NULL);
900 957  
901   - if (!obj_cache)
  958 + if (!obj_cache || debug_objects_replace_static_objects()) {
902 959 debug_objects_enabled = 0;
903   - else
  960 + if (obj_cache)
  961 + kmem_cache_destroy(obj_cache);
  962 + printk(KERN_WARNING "ODEBUG: out of memory.\n");
  963 + } else
904 964 debug_objects_selftest();
905 965 }