Commit 85aa125f001f87f96a72e9e6ee515490843b1202

Authored by Michal Nazarewicz
Committed by Marek Szyprowski
1 parent 03d44192f6

mm: compaction: introduce isolate_freepages_range()

This commit introduces isolate_freepages_range() function which
generalises isolate_freepages_block() so that it can be used on
arbitrary PFN ranges.

isolate_freepages_block() is left with only minor changes.

Signed-off-by: Michal Nazarewicz <mina86@mina86.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Tested-by: Rob Clark <rob.clark@linaro.org>
Tested-by: Ohad Ben-Cohen <ohad@wizery.com>
Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
Tested-by: Robert Nelson <robertcnelson@gmail.com>
Tested-by: Barry Song <Baohua.Song@csr.com>

Showing 1 changed file with 93 additions and 18 deletions Side-by-side Diff

... ... @@ -54,24 +54,20 @@
54 54 return count;
55 55 }
56 56  
57   -/* Isolate free pages onto a private freelist. Must hold zone->lock */
58   -static unsigned long isolate_freepages_block(struct zone *zone,
59   - unsigned long blockpfn,
60   - struct list_head *freelist)
  57 +/*
  58 + * Isolate free pages onto a private freelist. Caller must hold zone->lock.
  59 + * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
  60 + * pages inside of the pageblock (even though it may still end up isolating
  61 + * some pages).
  62 + */
  63 +static unsigned long isolate_freepages_block(unsigned long blockpfn,
  64 + unsigned long end_pfn,
  65 + struct list_head *freelist,
  66 + bool strict)
61 67 {
62   - unsigned long zone_end_pfn, end_pfn;
63 68 int nr_scanned = 0, total_isolated = 0;
64 69 struct page *cursor;
65 70  
66   - /* Get the last PFN we should scan for free pages at */
67   - zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
68   - end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn);
69   -
70   - /* Find the first usable PFN in the block to initialse page cursor */
71   - for (; blockpfn < end_pfn; blockpfn++) {
72   - if (pfn_valid_within(blockpfn))
73   - break;
74   - }
75 71 cursor = pfn_to_page(blockpfn);
76 72  
77 73 /* Isolate free pages. This assumes the block is valid */
78 74  
79 75  
80 76  
81 77  
... ... @@ -79,15 +75,23 @@
79 75 int isolated, i;
80 76 struct page *page = cursor;
81 77  
82   - if (!pfn_valid_within(blockpfn))
  78 + if (!pfn_valid_within(blockpfn)) {
  79 + if (strict)
  80 + return 0;
83 81 continue;
  82 + }
84 83 nr_scanned++;
85 84  
86   - if (!PageBuddy(page))
  85 + if (!PageBuddy(page)) {
  86 + if (strict)
  87 + return 0;
87 88 continue;
  89 + }
88 90  
89 91 /* Found a free page, break it into order-0 pages */
90 92 isolated = split_free_page(page);
  93 + if (!isolated && strict)
  94 + return 0;
91 95 total_isolated += isolated;
92 96 for (i = 0; i < isolated; i++) {
93 97 list_add(&page->lru, freelist);
... ... @@ -105,6 +109,73 @@
105 109 return total_isolated;
106 110 }
107 111  
  112 +/**
  113 + * isolate_freepages_range() - isolate free pages.
  114 + * @start_pfn: The first PFN to start isolating.
  115 + * @end_pfn: The one-past-last PFN.
  116 + *
  117 + * Non-free pages, invalid PFNs, or zone boundaries within the
  118 + * [start_pfn, end_pfn) range are considered errors, cause function to
  119 + * undo its actions and return zero.
  120 + *
  121 + * Otherwise, function returns one-past-the-last PFN of isolated page
  122 + * (which may be greater then end_pfn if end fell in a middle of
  123 + * a free page).
  124 + */
  125 +static unsigned long
  126 +isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
  127 +{
  128 + unsigned long isolated, pfn, block_end_pfn, flags;
  129 + struct zone *zone = NULL;
  130 + LIST_HEAD(freelist);
  131 +
  132 + if (pfn_valid(start_pfn))
  133 + zone = page_zone(pfn_to_page(start_pfn));
  134 +
  135 + for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
  136 + if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn)))
  137 + break;
  138 +
  139 + /*
  140 + * On subsequent iterations ALIGN() is actually not needed,
  141 + * but we keep it that we not to complicate the code.
  142 + */
  143 + block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
  144 + block_end_pfn = min(block_end_pfn, end_pfn);
  145 +
  146 + spin_lock_irqsave(&zone->lock, flags);
  147 + isolated = isolate_freepages_block(pfn, block_end_pfn,
  148 + &freelist, true);
  149 + spin_unlock_irqrestore(&zone->lock, flags);
  150 +
  151 + /*
  152 + * In strict mode, isolate_freepages_block() returns 0 if
  153 + * there are any holes in the block (ie. invalid PFNs or
  154 + * non-free pages).
  155 + */
  156 + if (!isolated)
  157 + break;
  158 +
  159 + /*
  160 + * If we managed to isolate pages, it is always (1 << n) *
  161 + * pageblock_nr_pages for some non-negative n. (Max order
  162 + * page may span two pageblocks).
  163 + */
  164 + }
  165 +
  166 + /* split_free_page does not map the pages */
  167 + map_pages(&freelist);
  168 +
  169 + if (pfn < end_pfn) {
  170 + /* Loop terminated early, cleanup. */
  171 + release_freepages(&freelist);
  172 + return 0;
  173 + }
  174 +
  175 + /* We don't use freelists for anything. */
  176 + return pfn;
  177 +}
  178 +
108 179 /* Returns true if the page is within a block suitable for migration to */
109 180 static bool suitable_migration_target(struct page *page)
110 181 {
... ... @@ -145,7 +216,7 @@
145 216 struct compact_control *cc)
146 217 {
147 218 struct page *page;
148   - unsigned long high_pfn, low_pfn, pfn;
  219 + unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
149 220 unsigned long flags;
150 221 int nr_freepages = cc->nr_freepages;
151 222 struct list_head *freelist = &cc->freepages;
... ... @@ -165,6 +236,8 @@
165 236 */
166 237 high_pfn = min(low_pfn, pfn);
167 238  
  239 + zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
  240 +
168 241 /*
169 242 * Isolate free pages until enough are available to migrate the
170 243 * pages on cc->migratepages. We stop searching if the migrate
... ... @@ -201,7 +274,9 @@
201 274 isolated = 0;
202 275 spin_lock_irqsave(&zone->lock, flags);
203 276 if (suitable_migration_target(page)) {
204   - isolated = isolate_freepages_block(zone, pfn, freelist);
  277 + end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
  278 + isolated = isolate_freepages_block(pfn, end_pfn,
  279 + freelist, false);
205 280 nr_freepages += isolated;
206 281 }
207 282 spin_unlock_irqrestore(&zone->lock, flags);