Commit 40779859de0f73b40390c6401a024d06cf024290

Authored by Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
  SLAB: Record actual last user of freed objects.
  slub: always align cpu_slab to honor cmpxchg_double requirement

Showing 3 changed files Side-by-side Diff

include/linux/percpu.h
... ... @@ -259,6 +259,9 @@
259 259 * Special handling for cmpxchg_double. cmpxchg_double is passed two
260 260 * percpu variables. The first has to be aligned to a double word
261 261 * boundary and the second has to follow directly thereafter.
  262 + * We enforce this on all architectures even if they don't support
  263 + * a double cmpxchg instruction, since it's a cheap requirement, and it
  264 + * avoids breaking the requirement for architectures with the instruction.
262 265 */
263 266 #define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
264 267 ({ \
... ... @@ -3604,13 +3604,14 @@
3604 3604 * Release an obj back to its cache. If the obj has a constructed state, it must
3605 3605 * be in this state _before_ it is released. Called with disabled ints.
3606 3606 */
3607   -static inline void __cache_free(struct kmem_cache *cachep, void *objp)
  3607 +static inline void __cache_free(struct kmem_cache *cachep, void *objp,
  3608 + void *caller)
3608 3609 {
3609 3610 struct array_cache *ac = cpu_cache_get(cachep);
3610 3611  
3611 3612 check_irq_off();
3612 3613 kmemleak_free_recursive(objp, cachep->flags);
3613   - objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
  3614 + objp = cache_free_debugcheck(cachep, objp, caller);
3614 3615  
3615 3616 kmemcheck_slab_free(cachep, objp, obj_size(cachep));
3616 3617  
... ... @@ -3801,7 +3802,7 @@
3801 3802 debug_check_no_locks_freed(objp, obj_size(cachep));
3802 3803 if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3803 3804 debug_check_no_obj_freed(objp, obj_size(cachep));
3804   - __cache_free(cachep, objp);
  3805 + __cache_free(cachep, objp, __builtin_return_address(0));
3805 3806 local_irq_restore(flags);
3806 3807  
3807 3808 trace_kmem_cache_free(_RET_IP_, objp);
... ... @@ -3831,7 +3832,7 @@
3831 3832 c = virt_to_cache(objp);
3832 3833 debug_check_no_locks_freed(objp, obj_size(c));
3833 3834 debug_check_no_obj_freed(objp, obj_size(c));
3834   - __cache_free(c, (void *)objp);
  3835 + __cache_free(c, (void *)objp, __builtin_return_address(0));
3835 3836 local_irq_restore(flags);
3836 3837 }
3837 3838 EXPORT_SYMBOL(kfree);
... ... @@ -2320,16 +2320,12 @@
2320 2320 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
2321 2321 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
2322 2322  
2323   -#ifdef CONFIG_CMPXCHG_LOCAL
2324 2323 /*
2325   - * Must align to double word boundary for the double cmpxchg instructions
2326   - * to work.
  2324 + * Must align to double word boundary for the double cmpxchg
  2325 + * instructions to work; see __pcpu_double_call_return_bool().
2327 2326 */
2328   - s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 2 * sizeof(void *));
2329   -#else
2330   - /* Regular alignment is sufficient */
2331   - s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
2332   -#endif
  2327 + s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
  2328 + 2 * sizeof(void *));
2333 2329  
2334 2330 if (!s->cpu_slab)
2335 2331 return 0;