Commit 8910ae896c8c961ef9c7d309262730bd2859e747

Authored by Li Zefan
Committed by Linus Torvalds
1 parent 5f3bf19aeb

kmemleak: change some global variables to int

They don't have to be atomic_t, because they are simple boolean toggles.

Signed-off-by: Li Zefan <lizefan@huawei.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 40 additions and 40 deletions Side-by-side Diff

... ... @@ -192,15 +192,15 @@
192 192 static struct kmem_cache *scan_area_cache;
193 193  
194 194 /* set if tracing memory operations is enabled */
195   -static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
  195 +static int kmemleak_enabled;
196 196 /* set in the late_initcall if there were no errors */
197   -static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
  197 +static int kmemleak_initialized;
198 198 /* enables or disables early logging of the memory operations */
199   -static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
  199 +static int kmemleak_early_log = 1;
200 200 /* set if a kmemleak warning was issued */
201   -static atomic_t kmemleak_warning = ATOMIC_INIT(0);
  201 +static int kmemleak_warning;
202 202 /* set if a fatal kmemleak error has occurred */
203   -static atomic_t kmemleak_error = ATOMIC_INIT(0);
  203 +static int kmemleak_error;
204 204  
205 205 /* minimum and maximum address that may be valid pointers */
206 206 static unsigned long min_addr = ULONG_MAX;
... ... @@ -268,7 +268,7 @@
268 268 #define kmemleak_warn(x...) do { \
269 269 pr_warning(x); \
270 270 dump_stack(); \
271   - atomic_set(&kmemleak_warning, 1); \
  271 + kmemleak_warning = 1; \
272 272 } while (0)
273 273  
274 274 /*
... ... @@ -806,7 +806,7 @@
806 806 unsigned long flags;
807 807 struct early_log *log;
808 808  
809   - if (atomic_read(&kmemleak_error)) {
  809 + if (kmemleak_error) {
810 810 /* kmemleak stopped recording, just count the requests */
811 811 crt_early_log++;
812 812 return;
... ... @@ -841,7 +841,7 @@
841 841 unsigned long flags;
842 842 int i;
843 843  
844   - if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr))
  844 + if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
845 845 return;
846 846  
847 847 /*
848 848  
... ... @@ -894,9 +894,9 @@
894 894 {
895 895 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
896 896  
897   - if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
  897 + if (kmemleak_enabled && ptr && !IS_ERR(ptr))
898 898 create_object((unsigned long)ptr, size, min_count, gfp);
899   - else if (atomic_read(&kmemleak_early_log))
  899 + else if (kmemleak_early_log)
900 900 log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
901 901 }
902 902 EXPORT_SYMBOL_GPL(kmemleak_alloc);
903 903  
... ... @@ -920,11 +920,11 @@
920 920 * Percpu allocations are only scanned and not reported as leaks
921 921 * (min_count is set to 0).
922 922 */
923   - if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
  923 + if (kmemleak_enabled && ptr && !IS_ERR(ptr))
924 924 for_each_possible_cpu(cpu)
925 925 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
926 926 size, 0, GFP_KERNEL);
927   - else if (atomic_read(&kmemleak_early_log))
  927 + else if (kmemleak_early_log)
928 928 log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
929 929 }
930 930 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
931 931  
... ... @@ -940,9 +940,9 @@
940 940 {
941 941 pr_debug("%s(0x%p)\n", __func__, ptr);
942 942  
943   - if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
  943 + if (kmemleak_enabled && ptr && !IS_ERR(ptr))
944 944 delete_object_full((unsigned long)ptr);
945   - else if (atomic_read(&kmemleak_early_log))
  945 + else if (kmemleak_early_log)
946 946 log_early(KMEMLEAK_FREE, ptr, 0, 0);
947 947 }
948 948 EXPORT_SYMBOL_GPL(kmemleak_free);
949 949  
... ... @@ -960,9 +960,9 @@
960 960 {
961 961 pr_debug("%s(0x%p)\n", __func__, ptr);
962 962  
963   - if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
  963 + if (kmemleak_enabled && ptr && !IS_ERR(ptr))
964 964 delete_object_part((unsigned long)ptr, size);
965   - else if (atomic_read(&kmemleak_early_log))
  965 + else if (kmemleak_early_log)
966 966 log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
967 967 }
968 968 EXPORT_SYMBOL_GPL(kmemleak_free_part);
969 969  
... ... @@ -980,11 +980,11 @@
980 980  
981 981 pr_debug("%s(0x%p)\n", __func__, ptr);
982 982  
983   - if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
  983 + if (kmemleak_enabled && ptr && !IS_ERR(ptr))
984 984 for_each_possible_cpu(cpu)
985 985 delete_object_full((unsigned long)per_cpu_ptr(ptr,
986 986 cpu));
987   - else if (atomic_read(&kmemleak_early_log))
  987 + else if (kmemleak_early_log)
988 988 log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
989 989 }
990 990 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
991 991  
... ... @@ -1000,9 +1000,9 @@
1000 1000 {
1001 1001 pr_debug("%s(0x%p)\n", __func__, ptr);
1002 1002  
1003   - if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
  1003 + if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1004 1004 make_gray_object((unsigned long)ptr);
1005   - else if (atomic_read(&kmemleak_early_log))
  1005 + else if (kmemleak_early_log)
1006 1006 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
1007 1007 }
1008 1008 EXPORT_SYMBOL(kmemleak_not_leak);
1009 1009  
... ... @@ -1020,9 +1020,9 @@
1020 1020 {
1021 1021 pr_debug("%s(0x%p)\n", __func__, ptr);
1022 1022  
1023   - if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
  1023 + if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1024 1024 make_black_object((unsigned long)ptr);
1025   - else if (atomic_read(&kmemleak_early_log))
  1025 + else if (kmemleak_early_log)
1026 1026 log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
1027 1027 }
1028 1028 EXPORT_SYMBOL(kmemleak_ignore);
1029 1029  
... ... @@ -1042,9 +1042,9 @@
1042 1042 {
1043 1043 pr_debug("%s(0x%p)\n", __func__, ptr);
1044 1044  
1045   - if (atomic_read(&kmemleak_enabled) && ptr && size && !IS_ERR(ptr))
  1045 + if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1046 1046 add_scan_area((unsigned long)ptr, size, gfp);
1047   - else if (atomic_read(&kmemleak_early_log))
  1047 + else if (kmemleak_early_log)
1048 1048 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
1049 1049 }
1050 1050 EXPORT_SYMBOL(kmemleak_scan_area);
1051 1051  
... ... @@ -1062,9 +1062,9 @@
1062 1062 {
1063 1063 pr_debug("%s(0x%p)\n", __func__, ptr);
1064 1064  
1065   - if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
  1065 + if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1066 1066 object_no_scan((unsigned long)ptr);
1067   - else if (atomic_read(&kmemleak_early_log))
  1067 + else if (kmemleak_early_log)
1068 1068 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
1069 1069 }
1070 1070 EXPORT_SYMBOL(kmemleak_no_scan);
... ... @@ -1089,7 +1089,7 @@
1089 1089 */
1090 1090 static int scan_should_stop(void)
1091 1091 {
1092   - if (!atomic_read(&kmemleak_enabled))
  1092 + if (!kmemleak_enabled)
1093 1093 return 1;
1094 1094  
1095 1095 /*
1096 1096  
... ... @@ -1630,14 +1630,14 @@
1630 1630 return ret;
1631 1631  
1632 1632 if (strncmp(buf, "clear", 5) == 0) {
1633   - if (atomic_read(&kmemleak_enabled))
  1633 + if (kmemleak_enabled)
1634 1634 kmemleak_clear();
1635 1635 else
1636 1636 __kmemleak_do_cleanup();
1637 1637 goto out;
1638 1638 }
1639 1639  
1640   - if (!atomic_read(&kmemleak_enabled)) {
  1640 + if (!kmemleak_enabled) {
1641 1641 ret = -EBUSY;
1642 1642 goto out;
1643 1643 }
1644 1644  
1645 1645  
... ... @@ -1726,14 +1726,14 @@
1726 1726 static void kmemleak_disable(void)
1727 1727 {
1728 1728 /* atomically check whether it was already invoked */
1729   - if (atomic_cmpxchg(&kmemleak_error, 0, 1))
  1729 + if (cmpxchg(&kmemleak_error, 0, 1))
1730 1730 return;
1731 1731  
1732 1732 /* stop any memory operation tracing */
1733   - atomic_set(&kmemleak_enabled, 0);
  1733 + kmemleak_enabled = 0;
1734 1734  
1735 1735 /* check whether it is too early for a kernel thread */
1736   - if (atomic_read(&kmemleak_initialized))
  1736 + if (kmemleak_initialized)
1737 1737 schedule_work(&cleanup_work);
1738 1738  
1739 1739 pr_info("Kernel memory leak detector disabled\n");
1740 1740  
... ... @@ -1775,9 +1775,10 @@
1775 1775 int i;
1776 1776 unsigned long flags;
1777 1777  
  1778 + kmemleak_early_log = 0;
  1779 +
1778 1780 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1779 1781 if (!kmemleak_skip_disable) {
1780   - atomic_set(&kmemleak_early_log, 0);
1781 1782 kmemleak_disable();
1782 1783 return;
1783 1784 }
1784 1785  
... ... @@ -1795,12 +1796,11 @@
1795 1796  
1796 1797 /* the kernel is still in UP mode, so disabling the IRQs is enough */
1797 1798 local_irq_save(flags);
1798   - atomic_set(&kmemleak_early_log, 0);
1799   - if (atomic_read(&kmemleak_error)) {
  1799 + if (kmemleak_error) {
1800 1800 local_irq_restore(flags);
1801 1801 return;
1802 1802 } else
1803   - atomic_set(&kmemleak_enabled, 1);
  1803 + kmemleak_enabled = 1;
1804 1804 local_irq_restore(flags);
1805 1805  
1806 1806 /*
1807 1807  
... ... @@ -1844,9 +1844,9 @@
1844 1844 log->op_type);
1845 1845 }
1846 1846  
1847   - if (atomic_read(&kmemleak_warning)) {
  1847 + if (kmemleak_warning) {
1848 1848 print_log_trace(log);
1849   - atomic_set(&kmemleak_warning, 0);
  1849 + kmemleak_warning = 0;
1850 1850 }
1851 1851 }
1852 1852 }
1853 1853  
... ... @@ -1858,9 +1858,9 @@
1858 1858 {
1859 1859 struct dentry *dentry;
1860 1860  
1861   - atomic_set(&kmemleak_initialized, 1);
  1861 + kmemleak_initialized = 1;
1862 1862  
1863   - if (atomic_read(&kmemleak_error)) {
  1863 + if (kmemleak_error) {
1864 1864 /*
1865 1865 * Some error occurred and kmemleak was disabled. There is a
1866 1866 * small chance that kmemleak_disable() was called immediately