Commit ab4d5ed5eeda4f57c50d14131ce1b1da75d0c938

Authored by Christoph Lameter
Committed by Pekka Enberg
1 parent 15b7c51420

slub: Enable sysfs support for !CONFIG_SLUB_DEBUG

Currently disabling CONFIG_SLUB_DEBUG also disabled SYSFS support meaning
that the slabs cannot be tuned without DEBUG.

Make SYSFS support independent of CONFIG_SLUB_DEBUG

Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>

Showing 3 changed files with 37 additions and 7 deletions Side-by-side Diff

include/linux/slub_def.h
... ... @@ -87,7 +87,7 @@
87 87 unsigned long min_partial;
88 88 const char *name; /* Name (only for display!) */
89 89 struct list_head list; /* List of slab caches */
90   -#ifdef CONFIG_SLUB_DEBUG
  90 +#ifdef CONFIG_SYSFS
91 91 struct kobject kobj; /* For sysfs */
92 92 #endif
93 93  
... ... @@ -353,7 +353,7 @@
353 353 config SLUB_STATS
354 354 default n
355 355 bool "Enable SLUB performance statistics"
356   - depends on SLUB && SLUB_DEBUG && SYSFS
  356 + depends on SLUB && SYSFS
357 357 help
358 358 SLUB statistics are useful to debug SLUBs allocation behavior in
359 359 order find ways to optimize the allocator. This should never be
... ... @@ -198,7 +198,7 @@
198 198  
199 199 enum track_item { TRACK_ALLOC, TRACK_FREE };
200 200  
201   -#ifdef CONFIG_SLUB_DEBUG
  201 +#ifdef CONFIG_SYSFS
202 202 static int sysfs_slab_add(struct kmem_cache *);
203 203 static int sysfs_slab_alias(struct kmem_cache *, const char *);
204 204 static void sysfs_slab_remove(struct kmem_cache *);
... ... @@ -1102,7 +1102,7 @@
1102 1102 static inline void slab_free_hook_irq(struct kmem_cache *s,
1103 1103 void *object) {}
1104 1104  
1105   -#endif
  1105 +#endif /* CONFIG_SLUB_DEBUG */
1106 1106  
1107 1107 /*
1108 1108 * Slab allocation and freeing
... ... @@ -3373,7 +3373,7 @@
3373 3373 }
3374 3374 #endif
3375 3375  
3376   -#ifdef CONFIG_SLUB_DEBUG
  3376 +#ifdef CONFIG_SYSFS
3377 3377 static int count_inuse(struct page *page)
3378 3378 {
3379 3379 return page->inuse;
3380 3380  
... ... @@ -3383,7 +3383,9 @@
3383 3383 {
3384 3384 return page->objects;
3385 3385 }
  3386 +#endif
3386 3387  
  3388 +#ifdef CONFIG_SLUB_DEBUG
3387 3389 static int validate_slab(struct kmem_cache *s, struct page *page,
3388 3390 unsigned long *map)
3389 3391 {
... ... @@ -3474,6 +3476,7 @@
3474 3476 kfree(map);
3475 3477 return count;
3476 3478 }
  3479 +#endif
3477 3480  
3478 3481 #ifdef SLUB_RESILIENCY_TEST
3479 3482 static void resiliency_test(void)
3480 3483  
3481 3484  
... ... @@ -3532,9 +3535,12 @@
3532 3535 validate_slab_cache(kmalloc_caches[9]);
3533 3536 }
3534 3537 #else
  3538 +#ifdef CONFIG_SYSFS
3535 3539 static void resiliency_test(void) {};
3536 3540 #endif
  3541 +#endif
3537 3542  
  3543 +#ifdef CONFIG_DEBUG
3538 3544 /*
3539 3545 * Generate lists of code addresses where slabcache objects are allocated
3540 3546 * and freed.
3541 3547  
... ... @@ -3763,7 +3769,9 @@
3763 3769 len += sprintf(buf, "No data\n");
3764 3770 return len;
3765 3771 }
  3772 +#endif
3766 3773  
  3774 +#ifdef CONFIG_SYSFS
3767 3775 enum slab_stat_type {
3768 3776 SL_ALL, /* All slabs */
3769 3777 SL_PARTIAL, /* Only partially allocated slabs */
... ... @@ -3816,6 +3824,8 @@
3816 3824 }
3817 3825 }
3818 3826  
  3827 + down_read(&slub_lock);
  3828 +#ifdef CONFIG_SLUB_DEBUG
3819 3829 if (flags & SO_ALL) {
3820 3830 for_each_node_state(node, N_NORMAL_MEMORY) {
3821 3831 struct kmem_cache_node *n = get_node(s, node);
... ... @@ -3832,7 +3842,9 @@
3832 3842 nodes[node] += x;
3833 3843 }
3834 3844  
3835   - } else if (flags & SO_PARTIAL) {
  3845 + } else
  3846 +#endif
  3847 + if (flags & SO_PARTIAL) {
3836 3848 for_each_node_state(node, N_NORMAL_MEMORY) {
3837 3849 struct kmem_cache_node *n = get_node(s, node);
3838 3850  
... ... @@ -3857,6 +3869,7 @@
3857 3869 return x + sprintf(buf + x, "\n");
3858 3870 }
3859 3871  
  3872 +#ifdef CONFIG_SLUB_DEBUG
3860 3873 static int any_slab_objects(struct kmem_cache *s)
3861 3874 {
3862 3875 int node;
... ... @@ -3872,6 +3885,7 @@
3872 3885 }
3873 3886 return 0;
3874 3887 }
  3888 +#endif
3875 3889  
3876 3890 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
3877 3891 #define to_slab(n) container_of(n, struct kmem_cache, kobj);
3878 3892  
... ... @@ -3973,11 +3987,13 @@
3973 3987 }
3974 3988 SLAB_ATTR_RO(aliases);
3975 3989  
  3990 +#ifdef CONFIG_SLUB_DEBUG
3976 3991 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
3977 3992 {
3978 3993 return show_slab_objects(s, buf, SO_ALL);
3979 3994 }
3980 3995 SLAB_ATTR_RO(slabs);
  3996 +#endif
3981 3997  
3982 3998 static ssize_t partial_show(struct kmem_cache *s, char *buf)
3983 3999 {
... ... @@ -4003,6 +4019,7 @@
4003 4019 }
4004 4020 SLAB_ATTR_RO(objects_partial);
4005 4021  
  4022 +#ifdef CONFIG_SLUB_DEBUG
4006 4023 static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
4007 4024 {
4008 4025 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
... ... @@ -4055,6 +4072,7 @@
4055 4072 }
4056 4073 SLAB_ATTR(failslab);
4057 4074 #endif
  4075 +#endif
4058 4076  
4059 4077 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
4060 4078 {
... ... @@ -4091,6 +4109,7 @@
4091 4109 }
4092 4110 SLAB_ATTR_RO(destroy_by_rcu);
4093 4111  
  4112 +#ifdef CONFIG_SLUB_DEBUG
4094 4113 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
4095 4114 {
4096 4115 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
... ... @@ -4166,6 +4185,7 @@
4166 4185 return ret;
4167 4186 }
4168 4187 SLAB_ATTR(validate);
  4188 +#endif
4169 4189  
4170 4190 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
4171 4191 {
... ... @@ -4186,6 +4206,7 @@
4186 4206 }
4187 4207 SLAB_ATTR(shrink);
4188 4208  
  4209 +#ifdef CONFIG_SLUB_DEBUG
4189 4210 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4190 4211 {
4191 4212 if (!(s->flags & SLAB_STORE_USER))
... ... @@ -4201,6 +4222,7 @@
4201 4222 return list_locations(s, buf, TRACK_FREE);
4202 4223 }
4203 4224 SLAB_ATTR_RO(free_calls);
  4225 +#endif
4204 4226  
4205 4227 #ifdef CONFIG_NUMA
4206 4228 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
4207 4229  
4208 4230  
4209 4231  
4210 4232  
4211 4233  
4212 4234  
4213 4235  
... ... @@ -4307,25 +4329,33 @@
4307 4329 &min_partial_attr.attr,
4308 4330 &objects_attr.attr,
4309 4331 &objects_partial_attr.attr,
  4332 +#ifdef CONFIG_SLUB_DEBUG
4310 4333 &total_objects_attr.attr,
4311 4334 &slabs_attr.attr,
  4335 +#endif
4312 4336 &partial_attr.attr,
4313 4337 &cpu_slabs_attr.attr,
4314 4338 &ctor_attr.attr,
4315 4339 &aliases_attr.attr,
4316 4340 &align_attr.attr,
  4341 +#ifdef CONFIG_SLUB_DEBUG
4317 4342 &sanity_checks_attr.attr,
4318 4343 &trace_attr.attr,
  4344 +#endif
4319 4345 &hwcache_align_attr.attr,
4320 4346 &reclaim_account_attr.attr,
4321 4347 &destroy_by_rcu_attr.attr,
  4348 +#ifdef CONFIG_SLUB_DEBUG
4322 4349 &red_zone_attr.attr,
4323 4350 &poison_attr.attr,
4324 4351 &store_user_attr.attr,
4325 4352 &validate_attr.attr,
  4353 +#endif
4326 4354 &shrink_attr.attr,
  4355 +#ifdef CONFIG_SLUB_DEBUG
4327 4356 &alloc_calls_attr.attr,
4328 4357 &free_calls_attr.attr,
  4358 +#endif
4329 4359 #ifdef CONFIG_ZONE_DMA
4330 4360 &cache_dma_attr.attr,
4331 4361 #endif
... ... @@ -4608,7 +4638,7 @@
4608 4638 }
4609 4639  
4610 4640 __initcall(slab_sysfs_init);
4611   -#endif
  4641 +#endif /* CONFIG_SYSFS */
4612 4642  
4613 4643 /*
4614 4644 * The /proc/slabinfo ABI