Commit 53238a60dd4a679f6fe5613a7ed46899587205cf
1 parent
e4f7c0b44a
Exists in
master
and in
4 other branches
kmemleak: Allow partial freeing of memory blocks
Functions like free_bootmem() are allowed to free only part of a memory block. This patch adds support for this via the kmemleak_free_part() callback which removes the original object and creates one or two additional objects as a result of the memory block split. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Cc: Ingo Molnar <mingo@elte.hu> Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
Showing 2 changed files with 85 additions and 14 deletions Side-by-side Diff
include/linux/kmemleak.h
... | ... | @@ -27,6 +27,7 @@ |
27 | 27 | extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, |
28 | 28 | gfp_t gfp); |
29 | 29 | extern void kmemleak_free(const void *ptr); |
30 | +extern void kmemleak_free_part(const void *ptr, size_t size); | |
30 | 31 | extern void kmemleak_padding(const void *ptr, unsigned long offset, |
31 | 32 | size_t size); |
32 | 33 | extern void kmemleak_not_leak(const void *ptr); |
... | ... | @@ -69,6 +70,9 @@ |
69 | 70 | { |
70 | 71 | } |
71 | 72 | static inline void kmemleak_free(const void *ptr) |
73 | +{ | |
74 | +} | |
75 | +static inline void kmemleak_free_part(const void *ptr, size_t size) | |
72 | 76 | { |
73 | 77 | } |
74 | 78 | static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) |
mm/kmemleak.c
... | ... | @@ -210,6 +210,7 @@ |
210 | 210 | enum { |
211 | 211 | KMEMLEAK_ALLOC, |
212 | 212 | KMEMLEAK_FREE, |
213 | + KMEMLEAK_FREE_PART, | |
213 | 214 | KMEMLEAK_NOT_LEAK, |
214 | 215 | KMEMLEAK_IGNORE, |
215 | 216 | KMEMLEAK_SCAN_AREA, |
216 | 217 | |
217 | 218 | |
218 | 219 | |
... | ... | @@ -523,27 +524,17 @@ |
523 | 524 | * Remove the metadata (struct kmemleak_object) for a memory block from the |
524 | 525 | * object_list and object_tree_root and decrement its use_count. |
525 | 526 | */ |
526 | -static void delete_object(unsigned long ptr) | |
527 | +static void __delete_object(struct kmemleak_object *object) | |
527 | 528 | { |
528 | 529 | unsigned long flags; |
529 | - struct kmemleak_object *object; | |
530 | 530 | |
531 | 531 | write_lock_irqsave(&kmemleak_lock, flags); |
532 | - object = lookup_object(ptr, 0); | |
533 | - if (!object) { | |
534 | -#ifdef DEBUG | |
535 | - kmemleak_warn("Freeing unknown object at 0x%08lx\n", | |
536 | - ptr); | |
537 | -#endif | |
538 | - write_unlock_irqrestore(&kmemleak_lock, flags); | |
539 | - return; | |
540 | - } | |
541 | 532 | prio_tree_remove(&object_tree_root, &object->tree_node); |
542 | 533 | list_del_rcu(&object->object_list); |
543 | 534 | write_unlock_irqrestore(&kmemleak_lock, flags); |
544 | 535 | |
545 | 536 | WARN_ON(!(object->flags & OBJECT_ALLOCATED)); |
546 | - WARN_ON(atomic_read(&object->use_count) < 1); | |
537 | + WARN_ON(atomic_read(&object->use_count) < 2); | |
547 | 538 | |
548 | 539 | /* |
549 | 540 | * Locking here also ensures that the corresponding memory block |
... | ... | @@ -556,6 +547,64 @@ |
556 | 547 | } |
557 | 548 | |
558 | 549 | /* |
550 | + * Look up the metadata (struct kmemleak_object) corresponding to ptr and | |
551 | + * delete it. | |
552 | + */ | |
553 | +static void delete_object_full(unsigned long ptr) | |
554 | +{ | |
555 | + struct kmemleak_object *object; | |
556 | + | |
557 | + object = find_and_get_object(ptr, 0); | |
558 | + if (!object) { | |
559 | +#ifdef DEBUG | |
560 | + kmemleak_warn("Freeing unknown object at 0x%08lx\n", | |
561 | + ptr); | |
562 | +#endif | |
563 | + return; | |
564 | + } | |
565 | + __delete_object(object); | |
566 | + put_object(object); | |
567 | +} | |
568 | + | |
569 | +/* | |
570 | + * Look up the metadata (struct kmemleak_object) corresponding to ptr and | |
571 | + * delete it. If the memory block is partially freed, the function may create | |
572 | + * additional metadata for the remaining parts of the block. | |
573 | + */ | |
574 | +static void delete_object_part(unsigned long ptr, size_t size) | |
575 | +{ | |
576 | + struct kmemleak_object *object; | |
577 | + unsigned long start, end; | |
578 | + | |
579 | + object = find_and_get_object(ptr, 1); | |
580 | + if (!object) { | |
581 | +#ifdef DEBUG | |
582 | + kmemleak_warn("Partially freeing unknown object at 0x%08lx " | |
583 | + "(size %zu)\n", ptr, size); | |
584 | +#endif | |
585 | + return; | |
586 | + } | |
587 | + __delete_object(object); | |
588 | + | |
589 | + /* | |
590 | + * Create one or two objects that may result from the memory block | |
591 | + * split. Note that partial freeing is only done by free_bootmem() and | |
592 | + * this happens before kmemleak_init() is called. The path below is | |
593 | + * only executed during early log recording in kmemleak_init(), so | |
594 | + * GFP_KERNEL is enough. | |
595 | + */ | |
596 | + start = object->pointer; | |
597 | + end = object->pointer + object->size; | |
598 | + if (ptr > start) | |
599 | + create_object(start, ptr - start, object->min_count, | |
600 | + GFP_KERNEL); | |
601 | + if (ptr + size < end) | |
602 | + create_object(ptr + size, end - ptr - size, object->min_count, | |
603 | + GFP_KERNEL); | |
604 | + | |
605 | + put_object(object); | |
606 | +} | |
607 | +/* | |
559 | 608 | * Make a object permanently as gray-colored so that it can no longer be |
560 | 609 | * reported as a leak. This is used in general to mark a false positive. |
561 | 610 | */ |
562 | 611 | |
... | ... | @@ -719,13 +768,28 @@ |
719 | 768 | pr_debug("%s(0x%p)\n", __func__, ptr); |
720 | 769 | |
721 | 770 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) |
722 | - delete_object((unsigned long)ptr); | |
771 | + delete_object_full((unsigned long)ptr); | |
723 | 772 | else if (atomic_read(&kmemleak_early_log)) |
724 | 773 | log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); |
725 | 774 | } |
726 | 775 | EXPORT_SYMBOL_GPL(kmemleak_free); |
727 | 776 | |
728 | 777 | /* |
778 | + * Partial memory freeing function callback. This function is usually called | |
779 | + * from bootmem allocator when (part of) a memory block is freed. | |
780 | + */ | |
781 | +void kmemleak_free_part(const void *ptr, size_t size) | |
782 | +{ | |
783 | + pr_debug("%s(0x%p)\n", __func__, ptr); | |
784 | + | |
785 | + if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | |
786 | + delete_object_part((unsigned long)ptr, size); | |
787 | + else if (atomic_read(&kmemleak_early_log)) | |
788 | + log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0); | |
789 | +} | |
790 | +EXPORT_SYMBOL_GPL(kmemleak_free_part); | |
791 | + | |
792 | +/* | |
729 | 793 | * Mark an already allocated memory block as a false positive. This will cause |
730 | 794 | * the block to no longer be reported as leak and always be scanned. |
731 | 795 | */ |
... | ... | @@ -1318,7 +1382,7 @@ |
1318 | 1382 | |
1319 | 1383 | rcu_read_lock(); |
1320 | 1384 | list_for_each_entry_rcu(object, &object_list, object_list) |
1321 | - delete_object(object->pointer); | |
1385 | + delete_object_full(object->pointer); | |
1322 | 1386 | rcu_read_unlock(); |
1323 | 1387 | mutex_unlock(&scan_mutex); |
1324 | 1388 | |
... | ... | @@ -1412,6 +1476,9 @@ |
1412 | 1476 | break; |
1413 | 1477 | case KMEMLEAK_FREE: |
1414 | 1478 | kmemleak_free(log->ptr); |
1479 | + break; | |
1480 | + case KMEMLEAK_FREE_PART: | |
1481 | + kmemleak_free_part(log->ptr, log->size); | |
1415 | 1482 | break; |
1416 | 1483 | case KMEMLEAK_NOT_LEAK: |
1417 | 1484 | kmemleak_not_leak(log->ptr); |