Commit c017b4be3e84176cab10eca5e6c4faeb8cfc6f3e

Authored by Catalin Marinas
1 parent e7cb55b946

kmemleak: Simplify the kmemleak_scan_area() function prototype

This function was taking non-necessary arguments which can be determined
by kmemleak. The patch also modifies the calling sites.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>

Showing 4 changed files with 27 additions and 39 deletions Side-by-side Diff

include/linux/kmemleak.h
... ... @@ -32,8 +32,7 @@
32 32 size_t size) __ref;
33 33 extern void kmemleak_not_leak(const void *ptr) __ref;
34 34 extern void kmemleak_ignore(const void *ptr) __ref;
35   -extern void kmemleak_scan_area(const void *ptr, unsigned long offset,
36   - size_t length, gfp_t gfp) __ref;
  35 +extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
37 36 extern void kmemleak_no_scan(const void *ptr) __ref;
38 37  
39 38 static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
... ... @@ -84,8 +83,7 @@
84 83 static inline void kmemleak_ignore(const void *ptr)
85 84 {
86 85 }
87   -static inline void kmemleak_scan_area(const void *ptr, unsigned long offset,
88   - size_t length, gfp_t gfp)
  86 +static inline void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
89 87 {
90 88 }
91 89 static inline void kmemleak_erase(void **ptr)
... ... @@ -2043,9 +2043,7 @@
2043 2043 unsigned int i;
2044 2044  
2045 2045 /* only scan the sections containing data */
2046   - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
2047   - (unsigned long)mod->module_core,
2048   - sizeof(struct module), GFP_KERNEL);
  2046 + kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2049 2047  
2050 2048 for (i = 1; i < hdr->e_shnum; i++) {
2051 2049 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
... ... @@ -2054,8 +2052,7 @@
2054 2052 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
2055 2053 continue;
2056 2054  
2057   - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
2058   - (unsigned long)mod->module_core,
  2055 + kmemleak_scan_area((void *)sechdrs[i].sh_addr,
2059 2056 sechdrs[i].sh_size, GFP_KERNEL);
2060 2057 }
2061 2058 }
... ... @@ -119,8 +119,8 @@
119 119 /* scanning area inside a memory block */
120 120 struct kmemleak_scan_area {
121 121 struct hlist_node node;
122   - unsigned long offset;
123   - size_t length;
  122 + unsigned long start;
  123 + size_t size;
124 124 };
125 125  
126 126 #define KMEMLEAK_GREY 0
... ... @@ -241,8 +241,6 @@
241 241 const void *ptr; /* allocated/freed memory block */
242 242 size_t size; /* memory block size */
243 243 int min_count; /* minimum reference count */
244   - unsigned long offset; /* scan area offset */
245   - size_t length; /* scan area length */
246 244 unsigned long trace[MAX_TRACE]; /* stack trace */
247 245 unsigned int trace_len; /* stack trace length */
248 246 };
249 247  
... ... @@ -720,14 +718,13 @@
720 718 * Add a scanning area to the object. If at least one such area is added,
721 719 * kmemleak will only scan these ranges rather than the whole memory block.
722 720 */
723   -static void add_scan_area(unsigned long ptr, unsigned long offset,
724   - size_t length, gfp_t gfp)
  721 +static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
725 722 {
726 723 unsigned long flags;
727 724 struct kmemleak_object *object;
728 725 struct kmemleak_scan_area *area;
729 726  
730   - object = find_and_get_object(ptr, 0);
  727 + object = find_and_get_object(ptr, 1);
731 728 if (!object) {
732 729 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
733 730 ptr);
... ... @@ -741,7 +738,7 @@
741 738 }
742 739  
743 740 spin_lock_irqsave(&object->lock, flags);
744   - if (offset + length > object->size) {
  741 + if (ptr + size > object->pointer + object->size) {
745 742 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
746 743 dump_object_info(object);
747 744 kmem_cache_free(scan_area_cache, area);
... ... @@ -749,8 +746,8 @@
749 746 }
750 747  
751 748 INIT_HLIST_NODE(&area->node);
752   - area->offset = offset;
753   - area->length = length;
  749 + area->start = ptr;
  750 + area->size = size;
754 751  
755 752 hlist_add_head(&area->node, &object->area_list);
756 753 out_unlock:
... ... @@ -786,7 +783,7 @@
786 783 * processed later once kmemleak is fully initialized.
787 784 */
788 785 static void __init log_early(int op_type, const void *ptr, size_t size,
789   - int min_count, unsigned long offset, size_t length)
  786 + int min_count)
790 787 {
791 788 unsigned long flags;
792 789 struct early_log *log;
... ... @@ -808,8 +805,6 @@
808 805 log->ptr = ptr;
809 806 log->size = size;
810 807 log->min_count = min_count;
811   - log->offset = offset;
812   - log->length = length;
813 808 if (op_type == KMEMLEAK_ALLOC)
814 809 log->trace_len = __save_stack_trace(log->trace);
815 810 crt_early_log++;
... ... @@ -858,7 +853,7 @@
858 853 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
859 854 create_object((unsigned long)ptr, size, min_count, gfp);
860 855 else if (atomic_read(&kmemleak_early_log))
861   - log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0);
  856 + log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
862 857 }
863 858 EXPORT_SYMBOL_GPL(kmemleak_alloc);
864 859  
... ... @@ -873,7 +868,7 @@
873 868 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
874 869 delete_object_full((unsigned long)ptr);
875 870 else if (atomic_read(&kmemleak_early_log))
876   - log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0);
  871 + log_early(KMEMLEAK_FREE, ptr, 0, 0);
877 872 }
878 873 EXPORT_SYMBOL_GPL(kmemleak_free);
879 874  
... ... @@ -888,7 +883,7 @@
888 883 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
889 884 delete_object_part((unsigned long)ptr, size);
890 885 else if (atomic_read(&kmemleak_early_log))
891   - log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0);
  886 + log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
892 887 }
893 888 EXPORT_SYMBOL_GPL(kmemleak_free_part);
894 889  
... ... @@ -903,7 +898,7 @@
903 898 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
904 899 make_gray_object((unsigned long)ptr);
905 900 else if (atomic_read(&kmemleak_early_log))
906   - log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0);
  901 + log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
907 902 }
908 903 EXPORT_SYMBOL(kmemleak_not_leak);
909 904  
910 905  
911 906  
912 907  
... ... @@ -919,22 +914,21 @@
919 914 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
920 915 make_black_object((unsigned long)ptr);
921 916 else if (atomic_read(&kmemleak_early_log))
922   - log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0);
  917 + log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
923 918 }
924 919 EXPORT_SYMBOL(kmemleak_ignore);
925 920  
926 921 /*
927 922 * Limit the range to be scanned in an allocated memory block.
928 923 */
929   -void __ref kmemleak_scan_area(const void *ptr, unsigned long offset,
930   - size_t length, gfp_t gfp)
  924 +void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
931 925 {
932 926 pr_debug("%s(0x%p)\n", __func__, ptr);
933 927  
934 928 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
935   - add_scan_area((unsigned long)ptr, offset, length, gfp);
  929 + add_scan_area((unsigned long)ptr, size, gfp);
936 930 else if (atomic_read(&kmemleak_early_log))
937   - log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length);
  931 + log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
938 932 }
939 933 EXPORT_SYMBOL(kmemleak_scan_area);
940 934  
... ... @@ -948,7 +942,7 @@
948 942 if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
949 943 object_no_scan((unsigned long)ptr);
950 944 else if (atomic_read(&kmemleak_early_log))
951   - log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0);
  945 + log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
952 946 }
953 947 EXPORT_SYMBOL(kmemleak_no_scan);
954 948  
... ... @@ -1075,9 +1069,9 @@
1075 1069 }
1076 1070 } else
1077 1071 hlist_for_each_entry(area, elem, &object->area_list, node)
1078   - scan_block((void *)(object->pointer + area->offset),
1079   - (void *)(object->pointer + area->offset
1080   - + area->length), object, 0);
  1072 + scan_block((void *)area->start,
  1073 + (void *)(area->start + area->size),
  1074 + object, 0);
1081 1075 out:
1082 1076 spin_unlock_irqrestore(&object->lock, flags);
1083 1077 }
... ... @@ -1642,8 +1636,7 @@
1642 1636 kmemleak_ignore(log->ptr);
1643 1637 break;
1644 1638 case KMEMLEAK_SCAN_AREA:
1645   - kmemleak_scan_area(log->ptr, log->offset, log->length,
1646   - GFP_KERNEL);
  1639 + kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
1647 1640 break;
1648 1641 case KMEMLEAK_NO_SCAN:
1649 1642 kmemleak_no_scan(log->ptr);
... ... @@ -2584,8 +2584,8 @@
2584 2584 * kmemleak does not treat the ->s_mem pointer as a reference
2585 2585 * to the object. Otherwise we will not report the leak.
2586 2586 */
2587   - kmemleak_scan_area(slabp, offsetof(struct slab, list),
2588   - sizeof(struct list_head), local_flags);
  2587 + kmemleak_scan_area(&slabp->list, sizeof(struct list_head),
  2588 + local_flags);
2589 2589 if (!slabp)
2590 2590 return NULL;
2591 2591 } else {