Blame view
mm/page_isolation.c
9.68 KB
b24413180 License cleanup: ... |
1 |
// SPDX-License-Identifier: GPL-2.0 |
a5d76b54a memory unplug: pa... |
2 3 4 |
/* * linux/mm/page_isolation.c */ |
a5d76b54a memory unplug: pa... |
5 6 7 |
#include <linux/mm.h> #include <linux/page-isolation.h> #include <linux/pageblock-flags.h> |
ee6f509c3 mm: factor out me... |
8 |
#include <linux/memory.h> |
c8721bbbd mm: memory-hotplu... |
9 |
#include <linux/hugetlb.h> |
83358ece2 mm/page_owner: in... |
10 |
#include <linux/page_owner.h> |
8b9132388 mm: unify new_nod... |
11 |
#include <linux/migrate.h> |
a5d76b54a memory unplug: pa... |
12 |
#include "internal.h" |
0f0848e51 mm/page_isolation... |
13 14 |
#define CREATE_TRACE_POINTS #include <trace/events/page_isolation.h> |
d381c5476 mm: only report i... |
15 |
static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags) |
ee6f509c3 mm: factor out me... |
16 17 18 19 20 21 22 23 24 25 |
{ struct zone *zone; unsigned long flags, pfn; struct memory_isolate_notify arg; int notifier_ret; int ret = -EBUSY; zone = page_zone(page); spin_lock_irqsave(&zone->lock, flags); |
2c7452a07 mm/page_isolation... |
26 27 28 29 30 31 32 |
/* * We assume the caller intended to SET migrate type to isolate. * If it is already set, then someone else must have raced and * set it before us. Return -EBUSY */ if (is_migrate_isolate_page(page)) goto out; |
ee6f509c3 mm: factor out me... |
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
pfn = page_to_pfn(page); arg.start_pfn = pfn; arg.nr_pages = pageblock_nr_pages; arg.pages_found = 0; /* * It may be possible to isolate a pageblock even if the * migratetype is not MIGRATE_MOVABLE. The memory isolation * notifier chain is used by balloon drivers to return the * number of pages in a range that are held by the balloon * driver to shrink memory. If all the pages are accounted for * by balloons, are free, or on the LRU, isolation can continue. * Later, for example, when memory hotplug notifier runs, these * pages reported as "can be isolated" should be isolated(freed) * by the balloon driver through the memory notifier chain. */ notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg); notifier_ret = notifier_to_errno(notifier_ret); if (notifier_ret) goto out; /* * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. * We just check MOVABLE pages. */ |
f5777bc2d mm/page_isolation... |
57 58 |
if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype, isol_flags)) |
ee6f509c3 mm: factor out me... |
59 60 61 |
ret = 0; /* |
ac34dcd26 mm/page_isolation... |
62 |
* immobile means "not-on-lru" pages. If immobile is larger than |
ee6f509c3 mm: factor out me... |
63 64 65 66 67 |
* removable-by-driver pages reported by notifier, we'll fail. */ out: if (!ret) { |
2139cbe62 cma: fix counting... |
68 |
unsigned long nr_pages; |
4da2ce250 mm: distinguish C... |
69 |
int mt = get_pageblock_migratetype(page); |
2139cbe62 cma: fix counting... |
70 |
|
a458431e1 mm: fix zone_wate... |
71 |
set_pageblock_migratetype(page, MIGRATE_ISOLATE); |
ad53f92eb mm/page_alloc: fi... |
72 |
zone->nr_isolate_pageblock++; |
02aa0cdd7 mm, page_alloc: c... |
73 74 |
nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, NULL); |
2139cbe62 cma: fix counting... |
75 |
|
4da2ce250 mm: distinguish C... |
76 |
__mod_zone_freepage_state(zone, -nr_pages, mt); |
ee6f509c3 mm: factor out me... |
77 78 79 80 |
} spin_unlock_irqrestore(&zone->lock, flags); if (!ret) |
ec25af84b mm, page_isolatio... |
81 |
drain_all_pages(zone); |
ee6f509c3 mm: factor out me... |
82 83 |
return ret; } |
c5b4e1b02 mm, page_isolatio... |
84 |
static void unset_migratetype_isolate(struct page *page, unsigned migratetype) |
ee6f509c3 mm: factor out me... |
85 86 |
{ struct zone *zone; |
2139cbe62 cma: fix counting... |
87 |
unsigned long flags, nr_pages; |
e3a2713c3 mm/page_isolation... |
88 |
bool isolated_page = false; |
3c605096d mm/page_alloc: re... |
89 |
unsigned int order; |
76741e776 mm, page_alloc: d... |
90 |
unsigned long pfn, buddy_pfn; |
3c605096d mm/page_alloc: re... |
91 |
struct page *buddy; |
2139cbe62 cma: fix counting... |
92 |
|
ee6f509c3 mm: factor out me... |
93 94 |
zone = page_zone(page); spin_lock_irqsave(&zone->lock, flags); |
bbf9ce971 mm: use is_migrat... |
95 |
if (!is_migrate_isolate_page(page)) |
ee6f509c3 mm: factor out me... |
96 |
goto out; |
3c605096d mm/page_alloc: re... |
97 98 99 100 101 102 103 104 105 106 107 108 |
/* * Because freepage with more than pageblock_order on isolated * pageblock is restricted to merge due to freepage counting problem, * it is possible that there is free buddy page. * move_freepages_block() doesn't care of merge so we need other * approach in order to merge them. Isolation and free will make * these pages to be merged. */ if (PageBuddy(page)) { order = page_order(page); if (order >= pageblock_order) { |
76741e776 mm, page_alloc: d... |
109 110 111 |
pfn = page_to_pfn(page); buddy_pfn = __find_buddy_pfn(pfn, order); buddy = page + (buddy_pfn - pfn); |
3c605096d mm/page_alloc: re... |
112 |
|
13ad59df6 mm, page_alloc: a... |
113 |
if (pfn_valid_within(buddy_pfn) && |
1ae7013df CMA: page_isolati... |
114 |
!is_migrate_isolate_page(buddy)) { |
3c605096d mm/page_alloc: re... |
115 |
__isolate_free_page(page, order); |
e3a2713c3 mm/page_isolation... |
116 |
isolated_page = true; |
3c605096d mm/page_alloc: re... |
117 118 119 120 121 122 123 124 125 126 |
} } } /* * If we isolate freepage with more than pageblock_order, there * should be no freepage in the range, so we could avoid costly * pageblock scanning for freepage moving. */ if (!isolated_page) { |
02aa0cdd7 mm, page_alloc: c... |
127 |
nr_pages = move_freepages_block(zone, page, migratetype, NULL); |
3c605096d mm/page_alloc: re... |
128 129 |
__mod_zone_freepage_state(zone, nr_pages, migratetype); } |
a458431e1 mm: fix zone_wate... |
130 |
set_pageblock_migratetype(page, migratetype); |
ad53f92eb mm/page_alloc: fi... |
131 |
zone->nr_isolate_pageblock--; |
ee6f509c3 mm: factor out me... |
132 133 |
out: spin_unlock_irqrestore(&zone->lock, flags); |
83358ece2 mm/page_owner: in... |
134 |
if (isolated_page) { |
46f24fd85 mm/page_alloc: in... |
135 |
post_alloc_hook(page, order, __GFP_MOVABLE); |
e3a2713c3 mm/page_isolation... |
136 |
__free_pages(page, order); |
83358ece2 mm/page_owner: in... |
137 |
} |
ee6f509c3 mm: factor out me... |
138 |
} |
a5d76b54a memory unplug: pa... |
139 140 141 142 |
static inline struct page * __first_valid_page(unsigned long pfn, unsigned long nr_pages) { int i; |
2ce13640b mm: __first_valid... |
143 144 145 |
for (i = 0; i < nr_pages; i++) { struct page *page; |
2ce13640b mm: __first_valid... |
146 147 148 149 150 151 |
page = pfn_to_online_page(pfn + i); if (!page) continue; return page; } return NULL; |
a5d76b54a memory unplug: pa... |
152 |
} |
9b7ea46a8 mm/hotplug: fix o... |
153 154 155 156 157 158 159 160 161 162 163 164 |
/** * start_isolate_page_range() - make page-allocation-type of range of pages to * be MIGRATE_ISOLATE. * @start_pfn: The lower PFN of the range to be isolated. * @end_pfn: The upper PFN of the range to be isolated. * start_pfn/end_pfn must be aligned to pageblock_order. * @migratetype: Migrate type to set in error recovery. * @flags: The following flags are allowed (they can be combined in * a bit mask) * SKIP_HWPOISON - ignore hwpoison pages * REPORT_FAILURE - report details about the failure to * isolate the range |
a5d76b54a memory unplug: pa... |
165 166 167 |
* * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in * the range will never be allocated. Any free pages and pages freed in the |
9b7ea46a8 mm/hotplug: fix o... |
168 169 170 171 |
* future will not be allocated again. If specified range includes migrate types * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all * pages in the range finally, the caller have to free all pages in the range. * test_page_isolated() can be used for test it. |
2c7452a07 mm/page_isolation... |
172 173 |
* * There is no high level synchronization mechanism that prevents two threads |
9b7ea46a8 mm/hotplug: fix o... |
174 |
* from trying to isolate overlapping ranges. If this happens, one thread |
2c7452a07 mm/page_isolation... |
175 176 |
* will notice pageblocks in the overlapping range already set to isolate. * This happens in set_migratetype_isolate, and set_migratetype_isolate |
9b7ea46a8 mm/hotplug: fix o... |
177 178 |
* returns an error. We then clean up by restoring the migration type on * pageblocks we may have modified and return -EBUSY to caller. This |
2c7452a07 mm/page_isolation... |
179 |
* prevents two threads from simultaneously working on overlapping ranges. |
9b7ea46a8 mm/hotplug: fix o... |
180 |
* |
6b02d0598 mm/memory_hotplug... |
181 182 183 184 185 186 187 188 |
* Please note that there is no strong synchronization with the page allocator * either. Pages might be freed while their page blocks are marked ISOLATED. * In some cases pages might still end up on pcp lists and that would allow * for their allocation even when they are in fact isolated already. Depending * on how strong of a guarantee the caller needs drain_all_pages might be needed * (e.g. __offline_pages will need to call it after check for isolated range for * a next retry). * |
9b7ea46a8 mm/hotplug: fix o... |
189 190 |
* Return: the number of isolated pageblocks on success and -EBUSY if any part * of range cannot be isolated. |
a5d76b54a memory unplug: pa... |
191 |
*/ |
0815f3d81 mm: page_isolatio... |
192 |
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
d381c5476 mm: only report i... |
193 |
unsigned migratetype, int flags) |
a5d76b54a memory unplug: pa... |
194 195 196 197 |
{ unsigned long pfn; unsigned long undo_pfn; struct page *page; |
9b7ea46a8 mm/hotplug: fix o... |
198 |
int nr_isolate_pageblock = 0; |
a5d76b54a memory unplug: pa... |
199 |
|
fec174d66 mm/page_isolation... |
200 201 |
BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); |
a5d76b54a memory unplug: pa... |
202 203 204 205 206 |
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { page = __first_valid_page(pfn, pageblock_nr_pages); |
9b7ea46a8 mm/hotplug: fix o... |
207 208 209 210 211 212 |
if (page) { if (set_migratetype_isolate(page, migratetype, flags)) { undo_pfn = pfn; goto undo; } nr_isolate_pageblock++; |
a5d76b54a memory unplug: pa... |
213 214 |
} } |
9b7ea46a8 mm/hotplug: fix o... |
215 |
return nr_isolate_pageblock; |
a5d76b54a memory unplug: pa... |
216 217 |
undo: for (pfn = start_pfn; |
dbc0e4cef memory hotremove:... |
218 |
pfn < undo_pfn; |
2ce13640b mm: __first_valid... |
219 220 221 222 223 224 |
pfn += pageblock_nr_pages) { struct page *page = pfn_to_online_page(pfn); if (!page) continue; unset_migratetype_isolate(page, migratetype); } |
a5d76b54a memory unplug: pa... |
225 226 227 228 229 230 231 |
return -EBUSY; } /* * Make isolated pages available again. */ |
1fcf0a561 mm/page_isolation... |
232 |
void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
0815f3d81 mm: page_isolatio... |
233 |
unsigned migratetype) |
a5d76b54a memory unplug: pa... |
234 235 236 |
{ unsigned long pfn; struct page *page; |
6f8d2b8a2 mm/page_isolation... |
237 238 239 |
BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); |
a5d76b54a memory unplug: pa... |
240 241 242 243 |
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { page = __first_valid_page(pfn, pageblock_nr_pages); |
bbf9ce971 mm: use is_migrat... |
244 |
if (!page || !is_migrate_isolate_page(page)) |
a5d76b54a memory unplug: pa... |
245 |
continue; |
0815f3d81 mm: page_isolatio... |
246 |
unset_migratetype_isolate(page, migratetype); |
a5d76b54a memory unplug: pa... |
247 |
} |
a5d76b54a memory unplug: pa... |
248 249 250 251 252 253 |
} /* * Test all pages in the range is free(means isolated) or not. * all pages in [start_pfn...end_pfn) must be in the same zone. * zone->lock must be held before call this. * |
ec3b68825 mm/page_isolation... |
254 |
* Returns the last tested pfn. |
a5d76b54a memory unplug: pa... |
255 |
*/ |
fea85cff1 mm/page_isolation... |
256 |
static unsigned long |
b023f4681 memory-hotplug: s... |
257 258 |
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, bool skip_hwpoisoned_pages) |
a5d76b54a memory unplug: pa... |
259 260 261 262 263 264 265 266 267 |
{ struct page *page; while (pfn < end_pfn) { if (!pfn_valid_within(pfn)) { pfn++; continue; } page = pfn_to_page(pfn); |
aa016d145 mm, page_isolatio... |
268 |
if (PageBuddy(page)) |
435b405c0 memory-hotplug: f... |
269 |
/* |
aa016d145 mm, page_isolatio... |
270 271 272 |
* If the page is on a free list, it has to be on * the correct MIGRATE_ISOLATE freelist. There is no * simple way to verify that as VM_BUG_ON(), though. |
435b405c0 memory-hotplug: f... |
273 |
*/ |
a5d76b54a memory unplug: pa... |
274 |
pfn += 1 << page_order(page); |
aa016d145 mm, page_isolatio... |
275 276 |
else if (skip_hwpoisoned_pages && PageHWPoison(page)) /* A HWPoisoned page cannot be also PageBuddy */ |
b023f4681 memory-hotplug: s... |
277 |
pfn++; |
a5d76b54a memory unplug: pa... |
278 279 280 |
else break; } |
fea85cff1 mm/page_isolation... |
281 282 |
return pfn; |
a5d76b54a memory unplug: pa... |
283 |
} |
b9eb63191 mm/memory_hotplug... |
284 |
/* Caller should ensure that requested range is in a single zone */ |
b023f4681 memory-hotplug: s... |
285 286 |
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, bool skip_hwpoisoned_pages) |
a5d76b54a memory unplug: pa... |
287 |
{ |
6c1b7f680 memory hotplug: m... |
288 |
unsigned long pfn, flags; |
a5d76b54a memory unplug: pa... |
289 |
struct page *page; |
6c1b7f680 memory hotplug: m... |
290 |
struct zone *zone; |
a5d76b54a memory unplug: pa... |
291 |
|
a5d76b54a memory unplug: pa... |
292 |
/* |
85dbe7060 page_isolation: F... |
293 294 295 |
* Note: pageblock_nr_pages != MAX_ORDER. Then, chunks of free pages * are not aligned to pageblock_nr_pages. * Then we just check migratetype first. |
a5d76b54a memory unplug: pa... |
296 297 298 |
*/ for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { page = __first_valid_page(pfn, pageblock_nr_pages); |
bbf9ce971 mm: use is_migrat... |
299 |
if (page && !is_migrate_isolate_page(page)) |
a5d76b54a memory unplug: pa... |
300 301 |
break; } |
a70dcb969 memory hotplug: f... |
302 303 |
page = __first_valid_page(start_pfn, end_pfn - start_pfn); if ((pfn < end_pfn) || !page) |
a5d76b54a memory unplug: pa... |
304 |
return -EBUSY; |
85dbe7060 page_isolation: F... |
305 |
/* Check all pages are free or marked as ISOLATED */ |
a70dcb969 memory hotplug: f... |
306 |
zone = page_zone(page); |
6c1b7f680 memory hotplug: m... |
307 |
spin_lock_irqsave(&zone->lock, flags); |
fea85cff1 mm/page_isolation... |
308 |
pfn = __test_page_isolated_in_pageblock(start_pfn, end_pfn, |
b023f4681 memory-hotplug: s... |
309 |
skip_hwpoisoned_pages); |
6c1b7f680 memory hotplug: m... |
310 |
spin_unlock_irqrestore(&zone->lock, flags); |
fea85cff1 mm/page_isolation... |
311 |
|
0f0848e51 mm/page_isolation... |
312 |
trace_test_pages_isolated(start_pfn, end_pfn, pfn); |
fea85cff1 mm/page_isolation... |
313 |
return pfn < end_pfn ? -EBUSY : 0; |
a5d76b54a memory unplug: pa... |
314 |
} |
723a0644a mm/page_alloc: re... |
315 |
|
666feb21a mm, migrate: remo... |
316 |
struct page *alloc_migrate_target(struct page *page, unsigned long private) |
723a0644a mm/page_alloc: re... |
317 |
{ |
8b9132388 mm: unify new_nod... |
318 |
return new_page_nodemask(page, numa_node_id(), &node_states[N_MEMORY]); |
723a0644a mm/page_alloc: re... |
319 |
} |