Commit aff622495c9a0b56148192e53bdec539f5e147f2

Authored by Rik van Riel
Committed by Linus Torvalds
1 parent 7be62de99a

vmscan: only defer compaction for failed order and higher

Currently a failed order-9 (transparent hugepage) compaction can lead to
memory compaction being temporarily disabled for a memory zone.  Even if
we only need compaction for an order 2 allocation, eg.  for jumbo frames
networking.

The fix is relatively straightforward: keep track of the highest order at
which compaction is succeeding, and only defer compaction for orders at
which compaction is failing.

Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Hillf Danton <dhillf@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 5 changed files with 27 additions and 8 deletions Side-by-side Diff

include/linux/compaction.h
... ... @@ -34,20 +34,26 @@
34 34 * allocation success. 1 << compact_defer_limit compactions are skipped up
35 35 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
36 36 */
37   -static inline void defer_compaction(struct zone *zone)
  37 +static inline void defer_compaction(struct zone *zone, int order)
38 38 {
39 39 zone->compact_considered = 0;
40 40 zone->compact_defer_shift++;
41 41  
  42 + if (order < zone->compact_order_failed)
  43 + zone->compact_order_failed = order;
  44 +
42 45 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
43 46 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
44 47 }
45 48  
46 49 /* Returns true if compaction should be skipped this time */
47   -static inline bool compaction_deferred(struct zone *zone)
  50 +static inline bool compaction_deferred(struct zone *zone, int order)
48 51 {
49 52 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
50 53  
  54 + if (order < zone->compact_order_failed)
  55 + return false;
  56 +
51 57 /* Avoid possible overflow */
52 58 if (++zone->compact_considered > defer_limit)
53 59 zone->compact_considered = defer_limit;
54 60  
... ... @@ -73,11 +79,11 @@
73 79 return COMPACT_SKIPPED;
74 80 }
75 81  
76   -static inline void defer_compaction(struct zone *zone)
  82 +static inline void defer_compaction(struct zone *zone, int order)
77 83 {
78 84 }
79 85  
80   -static inline bool compaction_deferred(struct zone *zone)
  86 +static inline bool compaction_deferred(struct zone *zone, int order)
81 87 {
82 88 return 1;
83 89 }
include/linux/mmzone.h
... ... @@ -365,6 +365,7 @@
365 365 */
366 366 unsigned int compact_considered;
367 367 unsigned int compact_defer_shift;
  368 + int compact_order_failed;
368 369 #endif
369 370  
370 371 ZONE_PADDING(_pad1_)
... ... @@ -695,8 +695,18 @@
695 695 INIT_LIST_HEAD(&cc->freepages);
696 696 INIT_LIST_HEAD(&cc->migratepages);
697 697  
698   - if (cc->order < 0 || !compaction_deferred(zone))
  698 + if (cc->order < 0 || !compaction_deferred(zone, cc->order))
699 699 compact_zone(zone, cc);
  700 +
  701 + if (cc->order > 0) {
  702 + int ok = zone_watermark_ok(zone, cc->order,
  703 + low_wmark_pages(zone), 0, 0);
  704 + if (ok && cc->order > zone->compact_order_failed)
  705 + zone->compact_order_failed = cc->order + 1;
  706 + /* Currently async compaction is never deferred. */
  707 + else if (!ok && cc->sync)
  708 + defer_compaction(zone, cc->order);
  709 + }
700 710  
701 711 VM_BUG_ON(!list_empty(&cc->freepages));
702 712 VM_BUG_ON(!list_empty(&cc->migratepages));
... ... @@ -1990,7 +1990,7 @@
1990 1990 if (!order)
1991 1991 return NULL;
1992 1992  
1993   - if (compaction_deferred(preferred_zone)) {
  1993 + if (compaction_deferred(preferred_zone, order)) {
1994 1994 *deferred_compaction = true;
1995 1995 return NULL;
1996 1996 }
... ... @@ -2012,6 +2012,8 @@
2012 2012 if (page) {
2013 2013 preferred_zone->compact_considered = 0;
2014 2014 preferred_zone->compact_defer_shift = 0;
  2015 + if (order >= preferred_zone->compact_order_failed)
  2016 + preferred_zone->compact_order_failed = order + 1;
2015 2017 count_vm_event(COMPACTSUCCESS);
2016 2018 return page;
2017 2019 }
... ... @@ -2028,7 +2030,7 @@
2028 2030 * defer if the failure was a sync compaction failure.
2029 2031 */
2030 2032 if (sync_migration)
2031   - defer_compaction(preferred_zone);
  2033 + defer_compaction(preferred_zone, order);
2032 2034  
2033 2035 cond_resched();
2034 2036 }
... ... @@ -2198,7 +2198,7 @@
2198 2198 * If compaction is deferred, reclaim up to a point where
2199 2199 * compaction will have a chance of success when re-enabled
2200 2200 */
2201   - if (compaction_deferred(zone))
  2201 + if (compaction_deferred(zone, sc->order))
2202 2202 return watermark_ok;
2203 2203  
2204 2204 /* If compaction is not ready to start, keep reclaiming */