Commit e30825f1869a75b29a69dc8e0aaaaccc492092cf

Authored by Joonsoo Kim
Committed by Linus Torvalds
1 parent eefa864b70

mm/debug-pagealloc: prepare boottime configurable on/off

Until now, debug-pagealloc needs extra flags in struct page, so we need to
recompile whole source code when we decide to use it.  This is really
painful, because it takes some time to recompile and sometimes rebuild is
not possible due to third party module depending on struct page.  So, we
can't use this good feature in many cases.

Now, we have the page extension feature that allows us to insert extra
flags to outside of struct page.  This gets rid of third party module
issue mentioned above.  And, this allows us to determine if we need extra
memory for this page extension in boottime.  With these property, we can
avoid using debug-pagealloc in boottime with low computational overhead in
the kernel built with CONFIG_DEBUG_PAGEALLOC.  This will help our
development process greatly.

This patch is the preparation step to achive above goal.  debug-pagealloc
originally uses extra field of struct page, but, after this patch, it will
use field of struct page_ext.  Because memory for page_ext is allocated
later than initialization of page allocator in CONFIG_SPARSEMEM, we should
disable debug-pagealloc feature temporarily until initialization of
page_ext.  This patch implements this.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Dave Hansen <dave@sr71.net>
Cc: Michal Nazarewicz <mina86@mina86.com>
Cc: Jungsoo Son <jungsoo.son@lge.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 8 changed files with 106 additions and 44 deletions Side-by-side Diff

... ... @@ -19,6 +19,7 @@
19 19 #include <linux/bit_spinlock.h>
20 20 #include <linux/shrinker.h>
21 21 #include <linux/resource.h>
  22 +#include <linux/page_ext.h>
22 23  
23 24 struct mempolicy;
24 25 struct anon_vma;
25 26  
26 27  
27 28  
28 29  
... ... @@ -2155,20 +2156,36 @@
2155 2156 unsigned int pages_per_huge_page);
2156 2157 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
2157 2158  
  2159 +extern struct page_ext_operations debug_guardpage_ops;
  2160 +extern struct page_ext_operations page_poisoning_ops;
  2161 +
2158 2162 #ifdef CONFIG_DEBUG_PAGEALLOC
2159 2163 extern unsigned int _debug_guardpage_minorder;
  2164 +extern bool _debug_guardpage_enabled;
2160 2165  
2161 2166 static inline unsigned int debug_guardpage_minorder(void)
2162 2167 {
2163 2168 return _debug_guardpage_minorder;
2164 2169 }
2165 2170  
  2171 +static inline bool debug_guardpage_enabled(void)
  2172 +{
  2173 + return _debug_guardpage_enabled;
  2174 +}
  2175 +
2166 2176 static inline bool page_is_guard(struct page *page)
2167 2177 {
2168   - return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
  2178 + struct page_ext *page_ext;
  2179 +
  2180 + if (!debug_guardpage_enabled())
  2181 + return false;
  2182 +
  2183 + page_ext = lookup_page_ext(page);
  2184 + return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
2169 2185 }
2170 2186 #else
2171 2187 static inline unsigned int debug_guardpage_minorder(void) { return 0; }
  2188 +static inline bool debug_guardpage_enabled(void) { return false; }
2172 2189 static inline bool page_is_guard(struct page *page) { return false; }
2173 2190 #endif /* CONFIG_DEBUG_PAGEALLOC */
2174 2191  
include/linux/mm_types.h
... ... @@ -10,7 +10,6 @@
10 10 #include <linux/rwsem.h>
11 11 #include <linux/completion.h>
12 12 #include <linux/cpumask.h>
13   -#include <linux/page-debug-flags.h>
14 13 #include <linux/uprobes.h>
15 14 #include <linux/page-flags-layout.h>
16 15 #include <asm/page.h>
... ... @@ -186,9 +185,6 @@
186 185 void *virtual; /* Kernel virtual address (NULL if
187 186 not kmapped, ie. highmem) */
188 187 #endif /* WANT_PAGE_VIRTUAL */
189   -#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
190   - unsigned long debug_flags; /* Use atomic bitops on this */
191   -#endif
192 188  
193 189 #ifdef CONFIG_KMEMCHECK
194 190 /*
include/linux/page-debug-flags.h
1   -#ifndef LINUX_PAGE_DEBUG_FLAGS_H
2   -#define LINUX_PAGE_DEBUG_FLAGS_H
3   -
4   -/*
5   - * page->debug_flags bits:
6   - *
7   - * PAGE_DEBUG_FLAG_POISON is set for poisoned pages. This is used to
8   - * implement generic debug pagealloc feature. The pages are filled with
9   - * poison patterns and set this flag after free_pages(). The poisoned
10   - * pages are verified whether the patterns are not corrupted and clear
11   - * the flag before alloc_pages().
12   - */
13   -
14   -enum page_debug_flags {
15   - PAGE_DEBUG_FLAG_POISON, /* Page is poisoned */
16   - PAGE_DEBUG_FLAG_GUARD,
17   -};
18   -
19   -/*
20   - * Ensure that CONFIG_WANT_PAGE_DEBUG_FLAGS reliably
21   - * gets turned off when no debug features are enabling it!
22   - */
23   -
24   -#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
25   -#if !defined(CONFIG_PAGE_POISONING) && \
26   - !defined(CONFIG_PAGE_GUARD) \
27   -/* && !defined(CONFIG_PAGE_DEBUG_SOMETHING_ELSE) && ... */
28   -#error WANT_PAGE_DEBUG_FLAGS is turned on with no debug features!
29   -#endif
30   -#endif /* CONFIG_WANT_PAGE_DEBUG_FLAGS */
31   -
32   -#endif /* LINUX_PAGE_DEBUG_FLAGS_H */
include/linux/page_ext.h
... ... @@ -10,6 +10,21 @@
10 10 #ifdef CONFIG_PAGE_EXTENSION
11 11  
12 12 /*
  13 + * page_ext->flags bits:
  14 + *
  15 + * PAGE_EXT_DEBUG_POISON is set for poisoned pages. This is used to
  16 + * implement generic debug pagealloc feature. The pages are filled with
  17 + * poison patterns and set this flag after free_pages(). The poisoned
  18 + * pages are verified whether the patterns are not corrupted and clear
  19 + * the flag before alloc_pages().
  20 + */
  21 +
  22 +enum page_ext_flags {
  23 + PAGE_EXT_DEBUG_POISON, /* Page is poisoned */
  24 + PAGE_EXT_DEBUG_GUARD,
  25 +};
  26 +
  27 +/*
13 28 * Page Extension can be considered as an extended mem_map.
14 29 * A page_ext page is associated with every page descriptor. The
15 30 * page_ext helps us add more information about the page.
... ... @@ -12,6 +12,7 @@
12 12 depends on DEBUG_KERNEL
13 13 depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
14 14 depends on !KMEMCHECK
  15 + select PAGE_EXTENSION
15 16 select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
16 17 select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC
17 18 ---help---
mm/debug-pagealloc.c
... ... @@ -2,23 +2,49 @@
2 2 #include <linux/string.h>
3 3 #include <linux/mm.h>
4 4 #include <linux/highmem.h>
5   -#include <linux/page-debug-flags.h>
  5 +#include <linux/page_ext.h>
6 6 #include <linux/poison.h>
7 7 #include <linux/ratelimit.h>
8 8  
  9 +static bool page_poisoning_enabled __read_mostly;
  10 +
  11 +static bool need_page_poisoning(void)
  12 +{
  13 + return true;
  14 +}
  15 +
  16 +static void init_page_poisoning(void)
  17 +{
  18 + page_poisoning_enabled = true;
  19 +}
  20 +
  21 +struct page_ext_operations page_poisoning_ops = {
  22 + .need = need_page_poisoning,
  23 + .init = init_page_poisoning,
  24 +};
  25 +
9 26 static inline void set_page_poison(struct page *page)
10 27 {
11   - __set_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags);
  28 + struct page_ext *page_ext;
  29 +
  30 + page_ext = lookup_page_ext(page);
  31 + __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
12 32 }
13 33  
14 34 static inline void clear_page_poison(struct page *page)
15 35 {
16   - __clear_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags);
  36 + struct page_ext *page_ext;
  37 +
  38 + page_ext = lookup_page_ext(page);
  39 + __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
17 40 }
18 41  
19 42 static inline bool page_poison(struct page *page)
20 43 {
21   - return test_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags);
  44 + struct page_ext *page_ext;
  45 +
  46 + page_ext = lookup_page_ext(page);
  47 + return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
22 48 }
23 49  
24 50 static void poison_page(struct page *page)
... ... @@ -95,6 +121,9 @@
95 121  
96 122 void kernel_map_pages(struct page *page, int numpages, int enable)
97 123 {
  124 + if (!page_poisoning_enabled)
  125 + return;
  126 +
98 127 if (enable)
99 128 unpoison_pages(page, numpages);
100 129 else
... ... @@ -56,7 +56,7 @@
56 56 #include <linux/prefetch.h>
57 57 #include <linux/mm_inline.h>
58 58 #include <linux/migrate.h>
59   -#include <linux/page-debug-flags.h>
  59 +#include <linux/page_ext.h>
60 60 #include <linux/hugetlb.h>
61 61 #include <linux/sched/rt.h>
62 62  
63 63  
... ... @@ -425,7 +425,23 @@
425 425  
426 426 #ifdef CONFIG_DEBUG_PAGEALLOC
427 427 unsigned int _debug_guardpage_minorder;
  428 +bool _debug_guardpage_enabled __read_mostly;
428 429  
  430 +static bool need_debug_guardpage(void)
  431 +{
  432 + return true;
  433 +}
  434 +
  435 +static void init_debug_guardpage(void)
  436 +{
  437 + _debug_guardpage_enabled = true;
  438 +}
  439 +
  440 +struct page_ext_operations debug_guardpage_ops = {
  441 + .need = need_debug_guardpage,
  442 + .init = init_debug_guardpage,
  443 +};
  444 +
429 445 static int __init debug_guardpage_minorder_setup(char *buf)
430 446 {
431 447 unsigned long res;
... ... @@ -443,7 +459,14 @@
443 459 static inline void set_page_guard(struct zone *zone, struct page *page,
444 460 unsigned int order, int migratetype)
445 461 {
446   - __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
  462 + struct page_ext *page_ext;
  463 +
  464 + if (!debug_guardpage_enabled())
  465 + return;
  466 +
  467 + page_ext = lookup_page_ext(page);
  468 + __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
  469 +
447 470 INIT_LIST_HEAD(&page->lru);
448 471 set_page_private(page, order);
449 472 /* Guard pages are not available for any usage */
450 473  
... ... @@ -453,12 +476,20 @@
453 476 static inline void clear_page_guard(struct zone *zone, struct page *page,
454 477 unsigned int order, int migratetype)
455 478 {
456   - __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
  479 + struct page_ext *page_ext;
  480 +
  481 + if (!debug_guardpage_enabled())
  482 + return;
  483 +
  484 + page_ext = lookup_page_ext(page);
  485 + __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
  486 +
457 487 set_page_private(page, 0);
458 488 if (!is_migrate_isolate(migratetype))
459 489 __mod_zone_freepage_state(zone, (1 << order), migratetype);
460 490 }
461 491 #else
  492 +struct page_ext_operations debug_guardpage_ops = { NULL, };
462 493 static inline void set_page_guard(struct zone *zone, struct page *page,
463 494 unsigned int order, int migratetype) {}
464 495 static inline void clear_page_guard(struct zone *zone, struct page *page,
... ... @@ -869,6 +900,7 @@
869 900 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
870 901  
871 902 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
  903 + debug_guardpage_enabled() &&
872 904 high < debug_guardpage_minorder()) {
873 905 /*
874 906 * Mark as guard pages (or page), that will allow to
... ... @@ -51,6 +51,10 @@
51 51 */
52 52  
53 53 static struct page_ext_operations *page_ext_ops[] = {
  54 + &debug_guardpage_ops,
  55 +#ifdef CONFIG_PAGE_POISONING
  56 + &page_poisoning_ops,
  57 +#endif
54 58 };
55 59  
56 60 static unsigned long total_usage;