Commit ddc2e812d592457747c4367fb73edcaa8e1e49ff
Committed by
Linus Torvalds
1 parent
8d3c138b77
Exists in
master
and in
7 other branches
[PATCH] slab: verify pointers before free
Passing an invalid pointer to kfree() and kmem_cache_free() is likely to cause bad memory corruption or even take down the whole system because the bad pointer is likely reused immediately due to the per-CPU caches. Until now, we don't do any verification for this if CONFIG_DEBUG_SLAB is disabled. As suggested by Linus, add PageSlab check to page_to_cache() and page_to_slab() to verify pointers passed to kfree(). Also, move the stronger check from cache_free_debugcheck() to kmem_cache_free() to ensure the passed pointer actually belongs to the cache we're about to free the object. For page_to_cache() and page_to_slab(), the assertions should have virtually no extra cost (two instructions, no data cache pressure) and for kmem_cache_free() the overhead should be minimal. Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Manfred Spraul <manfred@colorfullife.com> Cc: Christoph Lameter <clameter@engr.sgi.com> Cc: Linus Torvalds <torvalds@osdl.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 1 changed file with 4 additions and 9 deletions Side-by-side Diff
mm/slab.c
... | ... | @@ -592,6 +592,7 @@ |
592 | 592 | { |
593 | 593 | if (unlikely(PageCompound(page))) |
594 | 594 | page = (struct page *)page_private(page); |
595 | + BUG_ON(!PageSlab(page)); | |
595 | 596 | return (struct kmem_cache *)page->lru.next; |
596 | 597 | } |
597 | 598 | |
... | ... | @@ -604,6 +605,7 @@ |
604 | 605 | { |
605 | 606 | if (unlikely(PageCompound(page))) |
606 | 607 | page = (struct page *)page_private(page); |
608 | + BUG_ON(!PageSlab(page)); | |
607 | 609 | return (struct slab *)page->lru.prev; |
608 | 610 | } |
609 | 611 | |
... | ... | @@ -2669,15 +2671,6 @@ |
2669 | 2671 | kfree_debugcheck(objp); |
2670 | 2672 | page = virt_to_page(objp); |
2671 | 2673 | |
2672 | - if (page_get_cache(page) != cachep) { | |
2673 | - printk(KERN_ERR "mismatch in kmem_cache_free: expected " | |
2674 | - "cache %p, got %p\n", | |
2675 | - page_get_cache(page), cachep); | |
2676 | - printk(KERN_ERR "%p is %s.\n", cachep, cachep->name); | |
2677 | - printk(KERN_ERR "%p is %s.\n", page_get_cache(page), | |
2678 | - page_get_cache(page)->name); | |
2679 | - WARN_ON(1); | |
2680 | - } | |
2681 | 2674 | slabp = page_get_slab(page); |
2682 | 2675 | |
2683 | 2676 | if (cachep->flags & SLAB_RED_ZONE) { |
... | ... | @@ -3392,6 +3385,8 @@ |
3392 | 3385 | void kmem_cache_free(struct kmem_cache *cachep, void *objp) |
3393 | 3386 | { |
3394 | 3387 | unsigned long flags; |
3388 | + | |
3389 | + BUG_ON(virt_to_cache(objp) != cachep); | |
3395 | 3390 | |
3396 | 3391 | local_irq_save(flags); |
3397 | 3392 | __cache_free(cachep, objp); |