Blame view

mm/migrate.c 76.5 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
b20a35035   Christoph Lameter   [PATCH] page migr...
2
  /*
14e0f9bcc   Hugh Dickins   mm: correct a cou...
3
   * Memory Migration functionality - linux/mm/migrate.c
b20a35035   Christoph Lameter   [PATCH] page migr...
4
5
6
7
8
9
10
11
12
   *
   * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
   *
   * Page migration was first developed in the context of the memory hotplug
   * project. The main authors of the migration code are:
   *
   * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
   * Hirokazu Takahashi <taka@valinux.co.jp>
   * Dave Hansen <haveblue@us.ibm.com>
cde535359   Christoph Lameter   Christoph has moved
13
   * Christoph Lameter
b20a35035   Christoph Lameter   [PATCH] page migr...
14
15
16
   */
  
  #include <linux/migrate.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
17
  #include <linux/export.h>
b20a35035   Christoph Lameter   [PATCH] page migr...
18
  #include <linux/swap.h>
0697212a4   Christoph Lameter   [PATCH] Swapless ...
19
  #include <linux/swapops.h>
b20a35035   Christoph Lameter   [PATCH] page migr...
20
  #include <linux/pagemap.h>
e23ca00bf   Christoph Lameter   [PATCH] Some page...
21
  #include <linux/buffer_head.h>
b20a35035   Christoph Lameter   [PATCH] page migr...
22
  #include <linux/mm_inline.h>
b488893a3   Pavel Emelyanov   pid namespaces: c...
23
  #include <linux/nsproxy.h>
b20a35035   Christoph Lameter   [PATCH] page migr...
24
  #include <linux/pagevec.h>
e9995ef97   Hugh Dickins   ksm: rmap_walk to...
25
  #include <linux/ksm.h>
b20a35035   Christoph Lameter   [PATCH] page migr...
26
27
28
29
  #include <linux/rmap.h>
  #include <linux/topology.h>
  #include <linux/cpu.h>
  #include <linux/cpuset.h>
04e62a29b   Christoph Lameter   [PATCH] More page...
30
  #include <linux/writeback.h>
742755a1d   Christoph Lameter   [PATCH] page migr...
31
32
  #include <linux/mempolicy.h>
  #include <linux/vmalloc.h>
86c3a7645   David Quigley   [PATCH] SELinux: ...
33
  #include <linux/security.h>
42cb14b11   Hugh Dickins   mm: migrate dirty...
34
  #include <linux/backing-dev.h>
bda807d44   Minchan Kim   mm: migrate: supp...
35
  #include <linux/compaction.h>
4f5ca2657   Adrian Bunk   mm/migrate.c shou...
36
  #include <linux/syscalls.h>
7addf4438   Dominik Brodowski   mm: add kernel_mo...
37
  #include <linux/compat.h>
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
38
  #include <linux/hugetlb.h>
8e6ac7fab   Aneesh Kumar K.V   hugetlb/cgroup: m...
39
  #include <linux/hugetlb_cgroup.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
40
  #include <linux/gfp.h>
df6ad6983   Jérôme Glisse   mm/device-public-...
41
  #include <linux/pfn_t.h>
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
42
  #include <linux/memremap.h>
8315ada7f   Jérôme Glisse   mm/migrate: allow...
43
  #include <linux/userfaultfd_k.h>
bf6bddf19   Rafael Aquini   mm: introduce com...
44
  #include <linux/balloon_compaction.h>
f714f4f20   Mel Gorman   mm: numa: call MM...
45
  #include <linux/mmu_notifier.h>
33c3fc71c   Vladimir Davydov   mm: introduce idl...
46
  #include <linux/page_idle.h>
d435edca9   Vlastimil Babka   mm, page_owner: c...
47
  #include <linux/page_owner.h>
6e84f3152   Ingo Molnar   sched/headers: Pr...
48
  #include <linux/sched/mm.h>
197e7e521   Linus Torvalds   Sanitize 'move_pa...
49
  #include <linux/ptrace.h>
b20a35035   Christoph Lameter   [PATCH] page migr...
50

0d1836c36   Michal Nazarewicz   mm/migrate.c: fix...
51
  #include <asm/tlbflush.h>
7b2a2d4a1   Mel Gorman   mm: migrate: Add ...
52
53
  #define CREATE_TRACE_POINTS
  #include <trace/events/migrate.h>
b20a35035   Christoph Lameter   [PATCH] page migr...
54
  #include "internal.h"
b20a35035   Christoph Lameter   [PATCH] page migr...
55
  /*
742755a1d   Christoph Lameter   [PATCH] page migr...
56
   * migrate_prep() needs to be called before we start compiling a list of pages
748446bb6   Mel Gorman   mm: compaction: m...
57
58
   * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
   * undesirable, use migrate_prep_local()
b20a35035   Christoph Lameter   [PATCH] page migr...
59
60
61
   */
  int migrate_prep(void)
  {
b20a35035   Christoph Lameter   [PATCH] page migr...
62
63
64
65
66
67
68
69
70
71
  	/*
  	 * Clear the LRU lists so pages can be isolated.
  	 * Note that pages may be moved off the LRU after we have
  	 * drained them. Those pages will fail to migrate like other
  	 * pages that may be busy.
  	 */
  	lru_add_drain_all();
  
  	return 0;
  }
748446bb6   Mel Gorman   mm: compaction: m...
72
73
74
75
76
77
78
  /* Do the necessary work of migrate_prep but not if it involves other CPUs */
  int migrate_prep_local(void)
  {
  	lru_add_drain();
  
  	return 0;
  }
9e5bcd610   Yisheng Xie   mm/migration: mak...
79
  int isolate_movable_page(struct page *page, isolate_mode_t mode)
bda807d44   Minchan Kim   mm: migrate: supp...
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
  {
  	struct address_space *mapping;
  
  	/*
  	 * Avoid burning cycles with pages that are yet under __free_pages(),
  	 * or just got freed under us.
  	 *
  	 * In case we 'win' a race for a movable page being freed under us and
  	 * raise its refcount preventing __free_pages() from doing its job
  	 * the put_page() at the end of this block will take care of
  	 * release this page, thus avoiding a nasty leakage.
  	 */
  	if (unlikely(!get_page_unless_zero(page)))
  		goto out;
  
  	/*
  	 * Check PageMovable before holding a PG_lock because page's owner
  	 * assumes anybody doesn't touch PG_lock of newly allocated page
  	 * so unconditionally grapping the lock ruins page's owner side.
  	 */
  	if (unlikely(!__PageMovable(page)))
  		goto out_putpage;
  	/*
  	 * As movable pages are not isolated from LRU lists, concurrent
  	 * compaction threads can race against page migration functions
  	 * as well as race against the releasing a page.
  	 *
  	 * In order to avoid having an already isolated movable page
  	 * being (wrongly) re-isolated while it is under migration,
  	 * or to avoid attempting to isolate pages being released,
  	 * lets be sure we have the page lock
  	 * before proceeding with the movable page isolation steps.
  	 */
  	if (unlikely(!trylock_page(page)))
  		goto out_putpage;
  
  	if (!PageMovable(page) || PageIsolated(page))
  		goto out_no_isolated;
  
  	mapping = page_mapping(page);
  	VM_BUG_ON_PAGE(!mapping, page);
  
  	if (!mapping->a_ops->isolate_page(page, mode))
  		goto out_no_isolated;
  
  	/* Driver shouldn't use PG_isolated bit of page->flags */
  	WARN_ON_ONCE(PageIsolated(page));
  	__SetPageIsolated(page);
  	unlock_page(page);
9e5bcd610   Yisheng Xie   mm/migration: mak...
129
  	return 0;
bda807d44   Minchan Kim   mm: migrate: supp...
130
131
132
133
134
135
  
  out_no_isolated:
  	unlock_page(page);
  out_putpage:
  	put_page(page);
  out:
9e5bcd610   Yisheng Xie   mm/migration: mak...
136
  	return -EBUSY;
bda807d44   Minchan Kim   mm: migrate: supp...
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
  }
  
  /* It should be called on page which is PG_movable */
  void putback_movable_page(struct page *page)
  {
  	struct address_space *mapping;
  
  	VM_BUG_ON_PAGE(!PageLocked(page), page);
  	VM_BUG_ON_PAGE(!PageMovable(page), page);
  	VM_BUG_ON_PAGE(!PageIsolated(page), page);
  
  	mapping = page_mapping(page);
  	mapping->a_ops->putback_page(page);
  	__ClearPageIsolated(page);
  }
b20a35035   Christoph Lameter   [PATCH] page migr...
152
  /*
5733c7d11   Rafael Aquini   mm: introduce put...
153
154
155
   * Put previously isolated pages back onto the appropriate lists
   * from where they were once taken off for compaction/migration.
   *
59c82b70d   Joonsoo Kim   mm/migrate: remov...
156
157
158
   * This function shall be used whenever the isolated pageset has been
   * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
   * and isolate_huge_page().
5733c7d11   Rafael Aquini   mm: introduce put...
159
160
161
162
163
164
165
   */
  void putback_movable_pages(struct list_head *l)
  {
  	struct page *page;
  	struct page *page2;
  
  	list_for_each_entry_safe(page, page2, l, lru) {
31caf665e   Naoya Horiguchi   mm: migrate: make...
166
167
168
169
  		if (unlikely(PageHuge(page))) {
  			putback_active_hugepage(page);
  			continue;
  		}
5733c7d11   Rafael Aquini   mm: introduce put...
170
  		list_del(&page->lru);
bda807d44   Minchan Kim   mm: migrate: supp...
171
172
173
174
175
  		/*
  		 * We isolated non-lru movable page so here we can use
  		 * __PageMovable because LRU page's mapping cannot have
  		 * PAGE_MAPPING_MOVABLE.
  		 */
b1123ea6d   Minchan Kim   mm: balloon: use ...
176
  		if (unlikely(__PageMovable(page))) {
bda807d44   Minchan Kim   mm: migrate: supp...
177
178
179
180
181
182
183
184
185
  			VM_BUG_ON_PAGE(!PageIsolated(page), page);
  			lock_page(page);
  			if (PageMovable(page))
  				putback_movable_page(page);
  			else
  				__ClearPageIsolated(page);
  			unlock_page(page);
  			put_page(page);
  		} else {
e8db67eb0   Naoya Horiguchi   mm: migrate: move...
186
187
  			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
  					page_is_file_cache(page), -hpage_nr_pages(page));
fc280fe87   Rabin Vincent   mm: prevent NR_IS...
188
  			putback_lru_page(page);
bda807d44   Minchan Kim   mm: migrate: supp...
189
  		}
b20a35035   Christoph Lameter   [PATCH] page migr...
190
  	}
b20a35035   Christoph Lameter   [PATCH] page migr...
191
  }
0697212a4   Christoph Lameter   [PATCH] Swapless ...
192
193
194
  /*
   * Restore a potential migration pte to a working pte entry
   */
e4b822227   Minchan Kim   mm: make rmap_one...
195
  static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
e9995ef97   Hugh Dickins   ksm: rmap_walk to...
196
  				 unsigned long addr, void *old)
0697212a4   Christoph Lameter   [PATCH] Swapless ...
197
  {
3fe87967c   Kirill A. Shutemov   mm: convert remov...
198
199
200
201
202
203
204
205
  	struct page_vma_mapped_walk pvmw = {
  		.page = old,
  		.vma = vma,
  		.address = addr,
  		.flags = PVMW_SYNC | PVMW_MIGRATION,
  	};
  	struct page *new;
  	pte_t pte;
0697212a4   Christoph Lameter   [PATCH] Swapless ...
206
  	swp_entry_t entry;
0697212a4   Christoph Lameter   [PATCH] Swapless ...
207

3fe87967c   Kirill A. Shutemov   mm: convert remov...
208
209
  	VM_BUG_ON_PAGE(PageTail(page), page);
  	while (page_vma_mapped_walk(&pvmw)) {
4b0ece6fa   Naoya Horiguchi   mm: migrate: fix ...
210
211
212
213
214
  		if (PageKsm(page))
  			new = page;
  		else
  			new = page - pvmw.page->index +
  				linear_page_index(vma, pvmw.address);
0697212a4   Christoph Lameter   [PATCH] Swapless ...
215

616b83715   Zi Yan   mm: thp: enable t...
216
217
218
219
220
221
222
223
  #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
  		/* PMD-mapped THP migration entry */
  		if (!pvmw.pte) {
  			VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
  			remove_migration_pmd(&pvmw, new);
  			continue;
  		}
  #endif
3fe87967c   Kirill A. Shutemov   mm: convert remov...
224
225
226
227
  		get_page(new);
  		pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
  		if (pte_swp_soft_dirty(*pvmw.pte))
  			pte = pte_mksoft_dirty(pte);
0697212a4   Christoph Lameter   [PATCH] Swapless ...
228

3fe87967c   Kirill A. Shutemov   mm: convert remov...
229
230
231
232
233
234
  		/*
  		 * Recheck VMA as permissions can change since migration started
  		 */
  		entry = pte_to_swp_entry(*pvmw.pte);
  		if (is_write_migration_entry(entry))
  			pte = maybe_mkwrite(pte, vma);
d3cb8bf60   Mel Gorman   mm: migrate: Clos...
235

df6ad6983   Jérôme Glisse   mm/device-public-...
236
237
238
239
240
241
  		if (unlikely(is_zone_device_page(new))) {
  			if (is_device_private_page(new)) {
  				entry = make_device_private_entry(new, pte_write(pte));
  				pte = swp_entry_to_pte(entry);
  			} else if (is_device_public_page(new)) {
  				pte = pte_mkdevmap(pte);
df6ad6983   Jérôme Glisse   mm/device-public-...
242
  			}
f70ddae24   Lars Persson   mm/migrate.c: add...
243
  		}
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
244

3ef8fd7f7   Andi Kleen   Fix migration.c c...
245
  #ifdef CONFIG_HUGETLB_PAGE
3fe87967c   Kirill A. Shutemov   mm: convert remov...
246
247
248
  		if (PageHuge(new)) {
  			pte = pte_mkhuge(pte);
  			pte = arch_make_huge_pte(pte, vma, new, 0);
383321ab8   Aneesh Kumar K.V   mm/hugetlb/migrat...
249
  			set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
3fe87967c   Kirill A. Shutemov   mm: convert remov...
250
251
252
253
  			if (PageAnon(new))
  				hugepage_add_anon_rmap(new, vma, pvmw.address);
  			else
  				page_dup_rmap(new, true);
383321ab8   Aneesh Kumar K.V   mm/hugetlb/migrat...
254
255
256
257
  		} else
  #endif
  		{
  			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
04e62a29b   Christoph Lameter   [PATCH] More page...
258

383321ab8   Aneesh Kumar K.V   mm/hugetlb/migrat...
259
260
261
262
263
  			if (PageAnon(new))
  				page_add_anon_rmap(new, vma, pvmw.address, false);
  			else
  				page_add_file_rmap(new, false);
  		}
3fe87967c   Kirill A. Shutemov   mm: convert remov...
264
265
  		if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
  			mlock_vma_page(new);
e125fe405   Kirill A. Shutemov   mm, thp: fix mloc...
266
267
  		if (PageTransHuge(page) && PageMlocked(page))
  			clear_page_mlock(page);
3fe87967c   Kirill A. Shutemov   mm: convert remov...
268
269
270
  		/* No need to invalidate - it was non-present before */
  		update_mmu_cache(vma, pvmw.address, pvmw.pte);
  	}
51afb12ba   Hugh Dickins   mm: page migratio...
271

e4b822227   Minchan Kim   mm: make rmap_one...
272
  	return true;
0697212a4   Christoph Lameter   [PATCH] Swapless ...
273
274
275
  }
  
  /*
04e62a29b   Christoph Lameter   [PATCH] More page...
276
277
278
   * Get rid of all migration entries and replace them by
   * references to the indicated page.
   */
e388466de   Kirill A. Shutemov   mm: make remove_m...
279
  void remove_migration_ptes(struct page *old, struct page *new, bool locked)
04e62a29b   Christoph Lameter   [PATCH] More page...
280
  {
051ac83ad   Joonsoo Kim   mm/rmap: make rma...
281
282
283
284
  	struct rmap_walk_control rwc = {
  		.rmap_one = remove_migration_pte,
  		.arg = old,
  	};
e388466de   Kirill A. Shutemov   mm: make remove_m...
285
286
287
288
  	if (locked)
  		rmap_walk_locked(new, &rwc);
  	else
  		rmap_walk(new, &rwc);
04e62a29b   Christoph Lameter   [PATCH] More page...
289
290
291
  }
  
  /*
0697212a4   Christoph Lameter   [PATCH] Swapless ...
292
293
294
   * Something used the pte of a page under migration. We need to
   * get to the page and wait until migration is finished.
   * When we return from this function the fault will be retried.
0697212a4   Christoph Lameter   [PATCH] Swapless ...
295
   */
e66f17ff7   Naoya Horiguchi   mm/hugetlb: take ...
296
  void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
30dad3092   Naoya Horiguchi   mm: migration: ad...
297
  				spinlock_t *ptl)
0697212a4   Christoph Lameter   [PATCH] Swapless ...
298
  {
30dad3092   Naoya Horiguchi   mm: migration: ad...
299
  	pte_t pte;
0697212a4   Christoph Lameter   [PATCH] Swapless ...
300
301
  	swp_entry_t entry;
  	struct page *page;
30dad3092   Naoya Horiguchi   mm: migration: ad...
302
  	spin_lock(ptl);
0697212a4   Christoph Lameter   [PATCH] Swapless ...
303
304
305
306
307
308
309
310
311
  	pte = *ptep;
  	if (!is_swap_pte(pte))
  		goto out;
  
  	entry = pte_to_swp_entry(pte);
  	if (!is_migration_entry(entry))
  		goto out;
  
  	page = migration_entry_to_page(entry);
e286781d5   Nick Piggin   mm: speculative p...
312
313
314
315
316
317
318
319
320
  	/*
  	 * Once radix-tree replacement of page migration started, page_count
  	 * *must* be zero. And, we don't want to call wait_on_page_locked()
  	 * against a page without get_page().
  	 * So, we use get_page_unless_zero(), here. Even failed, page fault
  	 * will occur again.
  	 */
  	if (!get_page_unless_zero(page))
  		goto out;
0697212a4   Christoph Lameter   [PATCH] Swapless ...
321
322
323
324
325
326
327
  	pte_unmap_unlock(ptep, ptl);
  	wait_on_page_locked(page);
  	put_page(page);
  	return;
  out:
  	pte_unmap_unlock(ptep, ptl);
  }
30dad3092   Naoya Horiguchi   mm: migration: ad...
328
329
330
331
332
333
334
  void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
  				unsigned long address)
  {
  	spinlock_t *ptl = pte_lockptr(mm, pmd);
  	pte_t *ptep = pte_offset_map(pmd, address);
  	__migration_entry_wait(mm, ptep, ptl);
  }
cb900f412   Kirill A. Shutemov   mm, hugetlb: conv...
335
336
  void migration_entry_wait_huge(struct vm_area_struct *vma,
  		struct mm_struct *mm, pte_t *pte)
30dad3092   Naoya Horiguchi   mm: migration: ad...
337
  {
cb900f412   Kirill A. Shutemov   mm, hugetlb: conv...
338
  	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
30dad3092   Naoya Horiguchi   mm: migration: ad...
339
340
  	__migration_entry_wait(mm, pte, ptl);
  }
616b83715   Zi Yan   mm: thp: enable t...
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
  #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
  void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
  {
  	spinlock_t *ptl;
  	struct page *page;
  
  	ptl = pmd_lock(mm, pmd);
  	if (!is_pmd_migration_entry(*pmd))
  		goto unlock;
  	page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
  	if (!get_page_unless_zero(page))
  		goto unlock;
  	spin_unlock(ptl);
  	wait_on_page_locked(page);
  	put_page(page);
  	return;
  unlock:
  	spin_unlock(ptl);
  }
  #endif
b969c4ab9   Mel Gorman   mm: compaction: d...
361
362
  #ifdef CONFIG_BLOCK
  /* Returns true if all buffers are successfully locked */
a6bc32b89   Mel Gorman   mm: compaction: i...
363
364
  static bool buffer_migrate_lock_buffers(struct buffer_head *head,
  							enum migrate_mode mode)
b969c4ab9   Mel Gorman   mm: compaction: d...
365
366
367
368
  {
  	struct buffer_head *bh = head;
  
  	/* Simple case, sync compaction */
a6bc32b89   Mel Gorman   mm: compaction: i...
369
  	if (mode != MIGRATE_ASYNC) {
b969c4ab9   Mel Gorman   mm: compaction: d...
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
  		do {
  			get_bh(bh);
  			lock_buffer(bh);
  			bh = bh->b_this_page;
  
  		} while (bh != head);
  
  		return true;
  	}
  
  	/* async case, we cannot block on lock_buffer so use trylock_buffer */
  	do {
  		get_bh(bh);
  		if (!trylock_buffer(bh)) {
  			/*
  			 * We failed to lock the buffer and cannot stall in
  			 * async migration. Release the taken locks
  			 */
  			struct buffer_head *failed_bh = bh;
  			put_bh(failed_bh);
  			bh = head;
  			while (bh != failed_bh) {
  				unlock_buffer(bh);
  				put_bh(bh);
  				bh = bh->b_this_page;
  			}
  			return false;
  		}
  
  		bh = bh->b_this_page;
  	} while (bh != head);
  	return true;
  }
  #else
  static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
a6bc32b89   Mel Gorman   mm: compaction: i...
405
  							enum migrate_mode mode)
b969c4ab9   Mel Gorman   mm: compaction: d...
406
407
408
409
  {
  	return true;
  }
  #endif /* CONFIG_BLOCK */
b20a35035   Christoph Lameter   [PATCH] page migr...
410
  /*
c3fcf8a5d   Christoph Lameter   [PATCH] page migr...
411
   * Replace the page in the mapping.
5b5c7120e   Christoph Lameter   [PATCH] page migr...
412
413
414
415
   *
   * The number of remaining references must be:
   * 1 for anonymous pages without a mapping
   * 2 for pages with a mapping
266cf658e   David Howells   FS-Cache: Recruit...
416
   * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
b20a35035   Christoph Lameter   [PATCH] page migr...
417
   */
36bc08cc0   Gu Zheng   fs/aio: Add suppo...
418
  int migrate_page_move_mapping(struct address_space *mapping,
b969c4ab9   Mel Gorman   mm: compaction: d...
419
  		struct page *newpage, struct page *page,
8e321fefb   Benjamin LaHaise   aio/migratepages:...
420
421
  		struct buffer_head *head, enum migrate_mode mode,
  		int extra_count)
b20a35035   Christoph Lameter   [PATCH] page migr...
422
  {
42cb14b11   Hugh Dickins   mm: migrate dirty...
423
424
  	struct zone *oldzone, *newzone;
  	int dirty;
8e321fefb   Benjamin LaHaise   aio/migratepages:...
425
  	int expected_count = 1 + extra_count;
7cf9c2c76   Nick Piggin   [PATCH] radix-tre...
426
  	void **pslot;
b20a35035   Christoph Lameter   [PATCH] page migr...
427

8763cb45a   Jérôme Glisse   mm/migrate: new m...
428
  	/*
df6ad6983   Jérôme Glisse   mm/device-public-...
429
430
  	 * Device public or private pages have an extra refcount as they are
  	 * ZONE_DEVICE pages.
8763cb45a   Jérôme Glisse   mm/migrate: new m...
431
  	 */
df6ad6983   Jérôme Glisse   mm/device-public-...
432
433
  	expected_count += is_device_private_page(page);
  	expected_count += is_device_public_page(page);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
434

6c5240ae7   Christoph Lameter   [PATCH] Swapless ...
435
  	if (!mapping) {
0e8c7d0fd   Christoph Lameter   page migration: f...
436
  		/* Anonymous page without mapping */
8e321fefb   Benjamin LaHaise   aio/migratepages:...
437
  		if (page_count(page) != expected_count)
6c5240ae7   Christoph Lameter   [PATCH] Swapless ...
438
  			return -EAGAIN;
cf4b769ab   Hugh Dickins   mm: page migratio...
439
440
  
  		/* No turning back from here */
cf4b769ab   Hugh Dickins   mm: page migratio...
441
442
443
  		newpage->index = page->index;
  		newpage->mapping = page->mapping;
  		if (PageSwapBacked(page))
fa9949da5   Hugh Dickins   mm: use __SetPage...
444
  			__SetPageSwapBacked(newpage);
cf4b769ab   Hugh Dickins   mm: page migratio...
445

78bd52097   Rafael Aquini   mm: adjust addres...
446
  		return MIGRATEPAGE_SUCCESS;
6c5240ae7   Christoph Lameter   [PATCH] Swapless ...
447
  	}
42cb14b11   Hugh Dickins   mm: migrate dirty...
448
449
  	oldzone = page_zone(page);
  	newzone = page_zone(newpage);
b93b01631   Matthew Wilcox   page cache: use x...
450
  	xa_lock_irq(&mapping->i_pages);
b20a35035   Christoph Lameter   [PATCH] page migr...
451

b93b01631   Matthew Wilcox   page cache: use x...
452
  	pslot = radix_tree_lookup_slot(&mapping->i_pages,
7cf9c2c76   Nick Piggin   [PATCH] radix-tre...
453
   					page_index(page));
b20a35035   Christoph Lameter   [PATCH] page migr...
454

e71769ae5   Naoya Horiguchi   mm: enable thp mi...
455
  	expected_count += hpage_nr_pages(page) + page_has_private(page);
e286781d5   Nick Piggin   mm: speculative p...
456
  	if (page_count(page) != expected_count ||
b93b01631   Matthew Wilcox   page cache: use x...
457
458
459
  		radix_tree_deref_slot_protected(pslot,
  					&mapping->i_pages.xa_lock) != page) {
  		xa_unlock_irq(&mapping->i_pages);
e23ca00bf   Christoph Lameter   [PATCH] Some page...
460
  		return -EAGAIN;
b20a35035   Christoph Lameter   [PATCH] page migr...
461
  	}
fe896d187   Joonsoo Kim   mm: introduce pag...
462
  	if (!page_ref_freeze(page, expected_count)) {
b93b01631   Matthew Wilcox   page cache: use x...
463
  		xa_unlock_irq(&mapping->i_pages);
e286781d5   Nick Piggin   mm: speculative p...
464
465
  		return -EAGAIN;
  	}
b20a35035   Christoph Lameter   [PATCH] page migr...
466
  	/*
b969c4ab9   Mel Gorman   mm: compaction: d...
467
468
469
470
471
472
  	 * In the async migration case of moving a page with buffers, lock the
  	 * buffers using trylock before the mapping is moved. If the mapping
  	 * was moved, we later failed to lock the buffers and could not move
  	 * the mapping back due to an elevated page count, we would have to
  	 * block waiting on other references to be dropped.
  	 */
a6bc32b89   Mel Gorman   mm: compaction: i...
473
474
  	if (mode == MIGRATE_ASYNC && head &&
  			!buffer_migrate_lock_buffers(head, mode)) {
fe896d187   Joonsoo Kim   mm: introduce pag...
475
  		page_ref_unfreeze(page, expected_count);
b93b01631   Matthew Wilcox   page cache: use x...
476
  		xa_unlock_irq(&mapping->i_pages);
b969c4ab9   Mel Gorman   mm: compaction: d...
477
478
479
480
  		return -EAGAIN;
  	}
  
  	/*
cf4b769ab   Hugh Dickins   mm: page migratio...
481
482
  	 * Now we know that no one else is looking at the page:
  	 * no turning back from here.
b20a35035   Christoph Lameter   [PATCH] page migr...
483
  	 */
cf4b769ab   Hugh Dickins   mm: page migratio...
484
485
  	newpage->index = page->index;
  	newpage->mapping = page->mapping;
e71769ae5   Naoya Horiguchi   mm: enable thp mi...
486
  	page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
6326fec11   Nicholas Piggin   mm: Use owner_pri...
487
488
489
490
491
492
493
494
  	if (PageSwapBacked(page)) {
  		__SetPageSwapBacked(newpage);
  		if (PageSwapCache(page)) {
  			SetPageSwapCache(newpage);
  			set_page_private(newpage, page_private(page));
  		}
  	} else {
  		VM_BUG_ON_PAGE(PageSwapCache(page), page);
b20a35035   Christoph Lameter   [PATCH] page migr...
495
  	}
42cb14b11   Hugh Dickins   mm: migrate dirty...
496
497
498
499
500
501
  	/* Move dirty while page refs frozen and newpage not yet exposed */
  	dirty = PageDirty(page);
  	if (dirty) {
  		ClearPageDirty(page);
  		SetPageDirty(newpage);
  	}
b93b01631   Matthew Wilcox   page cache: use x...
502
  	radix_tree_replace_slot(&mapping->i_pages, pslot, newpage);
e71769ae5   Naoya Horiguchi   mm: enable thp mi...
503
504
505
  	if (PageTransHuge(page)) {
  		int i;
  		int index = page_index(page);
013567be1   Naoya Horiguchi   mm: migrate: fix ...
506
  		for (i = 1; i < HPAGE_PMD_NR; i++) {
e71769ae5   Naoya Horiguchi   mm: enable thp mi...
507
508
509
510
511
  			pslot = radix_tree_lookup_slot(&mapping->i_pages,
  						       index + i);
  			radix_tree_replace_slot(&mapping->i_pages, pslot,
  						newpage + i);
  		}
e71769ae5   Naoya Horiguchi   mm: enable thp mi...
512
  	}
7cf9c2c76   Nick Piggin   [PATCH] radix-tre...
513
514
  
  	/*
937a94c9d   Jacobo Giralt   mm: migrate: one ...
515
516
  	 * Drop cache reference from old page by unfreezing
  	 * to one less reference.
7cf9c2c76   Nick Piggin   [PATCH] radix-tre...
517
518
  	 * We know this isn't the last reference.
  	 */
e71769ae5   Naoya Horiguchi   mm: enable thp mi...
519
  	page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
7cf9c2c76   Nick Piggin   [PATCH] radix-tre...
520

b93b01631   Matthew Wilcox   page cache: use x...
521
  	xa_unlock(&mapping->i_pages);
42cb14b11   Hugh Dickins   mm: migrate dirty...
522
  	/* Leave irq disabled to prevent preemption while updating stats */
0e8c7d0fd   Christoph Lameter   page migration: f...
523
524
525
526
527
528
529
  	/*
  	 * If moved to a different zone then also account
  	 * the page for that zone. Other VM counters will be
  	 * taken care of when we establish references to the
  	 * new page and drop references to the old page.
  	 *
  	 * Note that anonymous pages are accounted for
4b9d0fab7   Mel Gorman   mm: rename NR_ANO...
530
  	 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
0e8c7d0fd   Christoph Lameter   page migration: f...
531
532
  	 * are mapped to swap space.
  	 */
42cb14b11   Hugh Dickins   mm: migrate dirty...
533
  	if (newzone != oldzone) {
11fb99898   Mel Gorman   mm: move most fil...
534
535
  		__dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES);
  		__inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES);
42cb14b11   Hugh Dickins   mm: migrate dirty...
536
  		if (PageSwapBacked(page) && !PageSwapCache(page)) {
11fb99898   Mel Gorman   mm: move most fil...
537
538
  			__dec_node_state(oldzone->zone_pgdat, NR_SHMEM);
  			__inc_node_state(newzone->zone_pgdat, NR_SHMEM);
42cb14b11   Hugh Dickins   mm: migrate dirty...
539
540
  		}
  		if (dirty && mapping_cap_account_dirty(mapping)) {
11fb99898   Mel Gorman   mm: move most fil...
541
  			__dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
5a1c84b40   Mel Gorman   mm: remove reclai...
542
  			__dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
11fb99898   Mel Gorman   mm: move most fil...
543
  			__inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
5a1c84b40   Mel Gorman   mm: remove reclai...
544
  			__inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
42cb14b11   Hugh Dickins   mm: migrate dirty...
545
  		}
4b02108ac   KOSAKI Motohiro   mm: oom analysis:...
546
  	}
42cb14b11   Hugh Dickins   mm: migrate dirty...
547
  	local_irq_enable();
b20a35035   Christoph Lameter   [PATCH] page migr...
548

78bd52097   Rafael Aquini   mm: adjust addres...
549
  	return MIGRATEPAGE_SUCCESS;
b20a35035   Christoph Lameter   [PATCH] page migr...
550
  }
1118dce77   Richard Weinberger   mm: Export migrat...
551
  EXPORT_SYMBOL(migrate_page_move_mapping);
b20a35035   Christoph Lameter   [PATCH] page migr...
552
553
  
  /*
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
554
555
556
557
558
559
560
561
   * The expected number of remaining references is the same as that
   * of migrate_page_move_mapping().
   */
  int migrate_huge_page_move_mapping(struct address_space *mapping,
  				   struct page *newpage, struct page *page)
  {
  	int expected_count;
  	void **pslot;
b93b01631   Matthew Wilcox   page cache: use x...
562
  	xa_lock_irq(&mapping->i_pages);
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
563

b93b01631   Matthew Wilcox   page cache: use x...
564
  	pslot = radix_tree_lookup_slot(&mapping->i_pages, page_index(page));
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
565
566
567
  
  	expected_count = 2 + page_has_private(page);
  	if (page_count(page) != expected_count ||
b93b01631   Matthew Wilcox   page cache: use x...
568
569
  		radix_tree_deref_slot_protected(pslot, &mapping->i_pages.xa_lock) != page) {
  		xa_unlock_irq(&mapping->i_pages);
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
570
571
  		return -EAGAIN;
  	}
fe896d187   Joonsoo Kim   mm: introduce pag...
572
  	if (!page_ref_freeze(page, expected_count)) {
b93b01631   Matthew Wilcox   page cache: use x...
573
  		xa_unlock_irq(&mapping->i_pages);
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
574
575
  		return -EAGAIN;
  	}
cf4b769ab   Hugh Dickins   mm: page migratio...
576
577
  	newpage->index = page->index;
  	newpage->mapping = page->mapping;
6a93ca8fd   Johannes Weiner   mm: migrate: do n...
578

290408d4a   Naoya Horiguchi   hugetlb: hugepage...
579
  	get_page(newpage);
b93b01631   Matthew Wilcox   page cache: use x...
580
  	radix_tree_replace_slot(&mapping->i_pages, pslot, newpage);
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
581

fe896d187   Joonsoo Kim   mm: introduce pag...
582
  	page_ref_unfreeze(page, expected_count - 1);
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
583

b93b01631   Matthew Wilcox   page cache: use x...
584
  	xa_unlock_irq(&mapping->i_pages);
6a93ca8fd   Johannes Weiner   mm: migrate: do n...
585

78bd52097   Rafael Aquini   mm: adjust addres...
586
  	return MIGRATEPAGE_SUCCESS;
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
587
588
589
  }
  
  /*
30b0a105d   Dave Hansen   mm: thp: give tra...
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
   * Gigantic pages are so large that we do not guarantee that page++ pointer
   * arithmetic will work across the entire page.  We need something more
   * specialized.
   */
  static void __copy_gigantic_page(struct page *dst, struct page *src,
  				int nr_pages)
  {
  	int i;
  	struct page *dst_base = dst;
  	struct page *src_base = src;
  
  	for (i = 0; i < nr_pages; ) {
  		cond_resched();
  		copy_highpage(dst, src);
  
  		i++;
  		dst = mem_map_next(dst, dst_base, i);
  		src = mem_map_next(src, src_base, i);
  	}
  }
  
  static void copy_huge_page(struct page *dst, struct page *src)
  {
  	int i;
  	int nr_pages;
  
  	if (PageHuge(src)) {
  		/* hugetlbfs page */
  		struct hstate *h = page_hstate(src);
  		nr_pages = pages_per_huge_page(h);
  
  		if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
  			__copy_gigantic_page(dst, src, nr_pages);
  			return;
  		}
  	} else {
  		/* thp page */
  		BUG_ON(!PageTransHuge(src));
  		nr_pages = hpage_nr_pages(src);
  	}
  
  	for (i = 0; i < nr_pages; i++) {
  		cond_resched();
  		copy_highpage(dst + i, src + i);
  	}
  }
  
  /*
b20a35035   Christoph Lameter   [PATCH] page migr...
638
639
   * Copy the page to its new location
   */
2916ecc0f   Jérôme Glisse   mm/migrate: new m...
640
  void migrate_page_states(struct page *newpage, struct page *page)
b20a35035   Christoph Lameter   [PATCH] page migr...
641
  {
7851a45cd   Rik van Riel   mm: numa: Copy cp...
642
  	int cpupid;
b20a35035   Christoph Lameter   [PATCH] page migr...
643
644
645
646
647
648
  	if (PageError(page))
  		SetPageError(newpage);
  	if (PageReferenced(page))
  		SetPageReferenced(newpage);
  	if (PageUptodate(page))
  		SetPageUptodate(newpage);
894bc3104   Lee Schermerhorn   Unevictable LRU I...
649
  	if (TestClearPageActive(page)) {
309381fea   Sasha Levin   mm: dump page whe...
650
  		VM_BUG_ON_PAGE(PageUnevictable(page), page);
b20a35035   Christoph Lameter   [PATCH] page migr...
651
  		SetPageActive(newpage);
418b27ef5   Lee Schermerhorn   mm: remove unevic...
652
653
  	} else if (TestClearPageUnevictable(page))
  		SetPageUnevictable(newpage);
b20a35035   Christoph Lameter   [PATCH] page migr...
654
655
656
657
  	if (PageChecked(page))
  		SetPageChecked(newpage);
  	if (PageMappedToDisk(page))
  		SetPageMappedToDisk(newpage);
42cb14b11   Hugh Dickins   mm: migrate dirty...
658
659
660
  	/* Move dirty on pages not done by migrate_page_move_mapping() */
  	if (PageDirty(page))
  		SetPageDirty(newpage);
b20a35035   Christoph Lameter   [PATCH] page migr...
661

33c3fc71c   Vladimir Davydov   mm: introduce idl...
662
663
664
665
  	if (page_is_young(page))
  		set_page_young(newpage);
  	if (page_is_idle(page))
  		set_page_idle(newpage);
7851a45cd   Rik van Riel   mm: numa: Copy cp...
666
667
668
669
670
671
  	/*
  	 * Copy NUMA information to the new page, to prevent over-eager
  	 * future migrations of this same page.
  	 */
  	cpupid = page_cpupid_xchg_last(page, -1);
  	page_cpupid_xchg_last(newpage, cpupid);
e9995ef97   Hugh Dickins   ksm: rmap_walk to...
672
  	ksm_migrate_page(newpage, page);
c8d6553b9   Hugh Dickins   ksm: make KSM pag...
673
674
675
676
  	/*
  	 * Please do not reorder this without considering how mm/ksm.c's
  	 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
  	 */
b3b3a99c5   Naoya Horiguchi   mm/migrate: check...
677
678
  	if (PageSwapCache(page))
  		ClearPageSwapCache(page);
b20a35035   Christoph Lameter   [PATCH] page migr...
679
680
  	ClearPagePrivate(page);
  	set_page_private(page, 0);
b20a35035   Christoph Lameter   [PATCH] page migr...
681
682
683
684
685
686
687
  
  	/*
  	 * If any waiters have accumulated on the new page then
  	 * wake them up.
  	 */
  	if (PageWriteback(newpage))
  		end_page_writeback(newpage);
d435edca9   Vlastimil Babka   mm, page_owner: c...
688
689
  
  	copy_page_owner(page, newpage);
74485cf2b   Johannes Weiner   mm: migrate: cons...
690
691
  
  	mem_cgroup_migrate(page, newpage);
b20a35035   Christoph Lameter   [PATCH] page migr...
692
  }
2916ecc0f   Jérôme Glisse   mm/migrate: new m...
693
694
695
696
697
698
699
700
701
702
703
  EXPORT_SYMBOL(migrate_page_states);
  
  void migrate_page_copy(struct page *newpage, struct page *page)
  {
  	if (PageHuge(page) || PageTransHuge(page))
  		copy_huge_page(newpage, page);
  	else
  		copy_highpage(newpage, page);
  
  	migrate_page_states(newpage, page);
  }
1118dce77   Richard Weinberger   mm: Export migrat...
704
  EXPORT_SYMBOL(migrate_page_copy);
b20a35035   Christoph Lameter   [PATCH] page migr...
705

1d8b85ccf   Christoph Lameter   [PATCH] page migr...
706
707
708
  /************************************************************
   *                    Migration functions
   ***********************************************************/
b20a35035   Christoph Lameter   [PATCH] page migr...
709
  /*
bda807d44   Minchan Kim   mm: migrate: supp...
710
   * Common logic to directly migrate a single LRU page suitable for
266cf658e   David Howells   FS-Cache: Recruit...
711
   * pages that do not use PagePrivate/PagePrivate2.
b20a35035   Christoph Lameter   [PATCH] page migr...
712
713
714
   *
   * Pages are locked upon entry and exit.
   */
2d1db3b11   Christoph Lameter   [PATCH] page migr...
715
  int migrate_page(struct address_space *mapping,
a6bc32b89   Mel Gorman   mm: compaction: i...
716
717
  		struct page *newpage, struct page *page,
  		enum migrate_mode mode)
b20a35035   Christoph Lameter   [PATCH] page migr...
718
719
720
721
  {
  	int rc;
  
  	BUG_ON(PageWriteback(page));	/* Writeback must be complete */
8e321fefb   Benjamin LaHaise   aio/migratepages:...
722
  	rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
b20a35035   Christoph Lameter   [PATCH] page migr...
723

78bd52097   Rafael Aquini   mm: adjust addres...
724
  	if (rc != MIGRATEPAGE_SUCCESS)
b20a35035   Christoph Lameter   [PATCH] page migr...
725
  		return rc;
2916ecc0f   Jérôme Glisse   mm/migrate: new m...
726
727
728
729
  	if (mode != MIGRATE_SYNC_NO_COPY)
  		migrate_page_copy(newpage, page);
  	else
  		migrate_page_states(newpage, page);
78bd52097   Rafael Aquini   mm: adjust addres...
730
  	return MIGRATEPAGE_SUCCESS;
b20a35035   Christoph Lameter   [PATCH] page migr...
731
732
  }
  EXPORT_SYMBOL(migrate_page);
9361401eb   David Howells   [PATCH] BLOCK: Ma...
733
  #ifdef CONFIG_BLOCK
b20a35035   Christoph Lameter   [PATCH] page migr...
734
  /*
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
735
736
737
738
   * Migration function for pages with buffers. This function can only be used
   * if the underlying filesystem guarantees that no other references to "page"
   * exist.
   */
2d1db3b11   Christoph Lameter   [PATCH] page migr...
739
  int buffer_migrate_page(struct address_space *mapping,
a6bc32b89   Mel Gorman   mm: compaction: i...
740
  		struct page *newpage, struct page *page, enum migrate_mode mode)
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
741
  {
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
742
743
  	struct buffer_head *bh, *head;
  	int rc;
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
744
  	if (!page_has_buffers(page))
a6bc32b89   Mel Gorman   mm: compaction: i...
745
  		return migrate_page(mapping, newpage, page, mode);
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
746
747
  
  	head = page_buffers(page);
8e321fefb   Benjamin LaHaise   aio/migratepages:...
748
  	rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
749

78bd52097   Rafael Aquini   mm: adjust addres...
750
  	if (rc != MIGRATEPAGE_SUCCESS)
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
751
  		return rc;
b969c4ab9   Mel Gorman   mm: compaction: d...
752
753
754
755
756
  	/*
  	 * In the async case, migrate_page_move_mapping locked the buffers
  	 * with an IRQ-safe spinlock held. In the sync case, the buffers
  	 * need to be locked now
  	 */
a6bc32b89   Mel Gorman   mm: compaction: i...
757
758
  	if (mode != MIGRATE_ASYNC)
  		BUG_ON(!buffer_migrate_lock_buffers(head, mode));
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
  
  	ClearPagePrivate(page);
  	set_page_private(newpage, page_private(page));
  	set_page_private(page, 0);
  	put_page(page);
  	get_page(newpage);
  
  	bh = head;
  	do {
  		set_bh_page(bh, newpage, bh_offset(bh));
  		bh = bh->b_this_page;
  
  	} while (bh != head);
  
  	SetPagePrivate(newpage);
2916ecc0f   Jérôme Glisse   mm/migrate: new m...
774
775
776
777
  	if (mode != MIGRATE_SYNC_NO_COPY)
  		migrate_page_copy(newpage, page);
  	else
  		migrate_page_states(newpage, page);
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
778
779
780
781
  
  	bh = head;
  	do {
  		unlock_buffer(bh);
2916ecc0f   Jérôme Glisse   mm/migrate: new m...
782
  		put_bh(bh);
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
783
784
785
  		bh = bh->b_this_page;
  
  	} while (bh != head);
78bd52097   Rafael Aquini   mm: adjust addres...
786
  	return MIGRATEPAGE_SUCCESS;
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
787
788
  }
  EXPORT_SYMBOL(buffer_migrate_page);
9361401eb   David Howells   [PATCH] BLOCK: Ma...
789
  #endif
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
790

04e62a29b   Christoph Lameter   [PATCH] More page...
791
792
793
794
  /*
   * Writeback a page to clean the dirty state
   */
  static int writeout(struct address_space *mapping, struct page *page)
8351a6e47   Christoph Lameter   [PATCH] page migr...
795
  {
04e62a29b   Christoph Lameter   [PATCH] More page...
796
797
798
799
800
  	struct writeback_control wbc = {
  		.sync_mode = WB_SYNC_NONE,
  		.nr_to_write = 1,
  		.range_start = 0,
  		.range_end = LLONG_MAX,
04e62a29b   Christoph Lameter   [PATCH] More page...
801
802
803
804
805
806
807
808
809
810
811
  		.for_reclaim = 1
  	};
  	int rc;
  
  	if (!mapping->a_ops->writepage)
  		/* No write method for the address space */
  		return -EINVAL;
  
  	if (!clear_page_dirty_for_io(page))
  		/* Someone else already triggered a write */
  		return -EAGAIN;
8351a6e47   Christoph Lameter   [PATCH] page migr...
812
  	/*
04e62a29b   Christoph Lameter   [PATCH] More page...
813
814
815
816
817
818
  	 * A dirty page may imply that the underlying filesystem has
  	 * the page on some queue. So the page must be clean for
  	 * migration. Writeout may mean we loose the lock and the
  	 * page state is no longer what we checked for earlier.
  	 * At this point we know that the migration attempt cannot
  	 * be successful.
8351a6e47   Christoph Lameter   [PATCH] page migr...
819
  	 */
e388466de   Kirill A. Shutemov   mm: make remove_m...
820
  	remove_migration_ptes(page, page, false);
8351a6e47   Christoph Lameter   [PATCH] page migr...
821

04e62a29b   Christoph Lameter   [PATCH] More page...
822
  	rc = mapping->a_ops->writepage(page, &wbc);
8351a6e47   Christoph Lameter   [PATCH] page migr...
823

04e62a29b   Christoph Lameter   [PATCH] More page...
824
825
826
  	if (rc != AOP_WRITEPAGE_ACTIVATE)
  		/* unlocked. Relock */
  		lock_page(page);
bda8550de   Hugh Dickins   migration: fix wr...
827
  	return (rc < 0) ? -EIO : -EAGAIN;
04e62a29b   Christoph Lameter   [PATCH] More page...
828
829
830
831
832
833
  }
  
  /*
   * Default handling if a filesystem does not provide a migration function.
   */
  static int fallback_migrate_page(struct address_space *mapping,
a6bc32b89   Mel Gorman   mm: compaction: i...
834
  	struct page *newpage, struct page *page, enum migrate_mode mode)
04e62a29b   Christoph Lameter   [PATCH] More page...
835
  {
b969c4ab9   Mel Gorman   mm: compaction: d...
836
  	if (PageDirty(page)) {
a6bc32b89   Mel Gorman   mm: compaction: i...
837
  		/* Only writeback pages in full synchronous migration */
2916ecc0f   Jérôme Glisse   mm/migrate: new m...
838
839
840
841
842
  		switch (mode) {
  		case MIGRATE_SYNC:
  		case MIGRATE_SYNC_NO_COPY:
  			break;
  		default:
b969c4ab9   Mel Gorman   mm: compaction: d...
843
  			return -EBUSY;
2916ecc0f   Jérôme Glisse   mm/migrate: new m...
844
  		}
04e62a29b   Christoph Lameter   [PATCH] More page...
845
  		return writeout(mapping, page);
b969c4ab9   Mel Gorman   mm: compaction: d...
846
  	}
8351a6e47   Christoph Lameter   [PATCH] page migr...
847
848
849
850
851
  
  	/*
  	 * Buffers may be managed in a filesystem specific way.
  	 * We must have no buffers or drop them.
  	 */
266cf658e   David Howells   FS-Cache: Recruit...
852
  	if (page_has_private(page) &&
8351a6e47   Christoph Lameter   [PATCH] page migr...
853
854
  	    !try_to_release_page(page, GFP_KERNEL))
  		return -EAGAIN;
a6bc32b89   Mel Gorman   mm: compaction: i...
855
  	return migrate_page(mapping, newpage, page, mode);
8351a6e47   Christoph Lameter   [PATCH] page migr...
856
  }
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
857
  /*
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
858
859
860
861
862
   * Move a page to a newly allocated page
   * The page is locked and all ptes have been successfully removed.
   *
   * The new page will have replaced the old page if this function
   * is successful.
894bc3104   Lee Schermerhorn   Unevictable LRU I...
863
864
865
   *
   * Return value:
   *   < 0 - error code
78bd52097   Rafael Aquini   mm: adjust addres...
866
   *  MIGRATEPAGE_SUCCESS - success
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
867
   */
3fe2011ff   Mel Gorman   mm: migration: al...
868
  static int move_to_new_page(struct page *newpage, struct page *page,
5c3f9a673   Hugh Dickins   mm: page migratio...
869
  				enum migrate_mode mode)
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
870
871
  {
  	struct address_space *mapping;
bda807d44   Minchan Kim   mm: migrate: supp...
872
873
  	int rc = -EAGAIN;
  	bool is_lru = !__PageMovable(page);
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
874

7db7671f8   Hugh Dickins   mm: page migratio...
875
876
  	VM_BUG_ON_PAGE(!PageLocked(page), page);
  	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
877

e24f0b8f7   Christoph Lameter   [PATCH] page migr...
878
  	mapping = page_mapping(page);
bda807d44   Minchan Kim   mm: migrate: supp...
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
  
  	if (likely(is_lru)) {
  		if (!mapping)
  			rc = migrate_page(mapping, newpage, page, mode);
  		else if (mapping->a_ops->migratepage)
  			/*
  			 * Most pages have a mapping and most filesystems
  			 * provide a migratepage callback. Anonymous pages
  			 * are part of swap space which also has its own
  			 * migratepage callback. This is the most common path
  			 * for page migration.
  			 */
  			rc = mapping->a_ops->migratepage(mapping, newpage,
  							page, mode);
  		else
  			rc = fallback_migrate_page(mapping, newpage,
  							page, mode);
  	} else {
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
897
  		/*
bda807d44   Minchan Kim   mm: migrate: supp...
898
899
  		 * In case of non-lru page, it could be released after
  		 * isolation step. In that case, we shouldn't try migration.
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
900
  		 */
bda807d44   Minchan Kim   mm: migrate: supp...
901
902
903
904
905
906
907
908
909
910
911
912
  		VM_BUG_ON_PAGE(!PageIsolated(page), page);
  		if (!PageMovable(page)) {
  			rc = MIGRATEPAGE_SUCCESS;
  			__ClearPageIsolated(page);
  			goto out;
  		}
  
  		rc = mapping->a_ops->migratepage(mapping, newpage,
  						page, mode);
  		WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
  			!PageIsolated(page));
  	}
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
913

5c3f9a673   Hugh Dickins   mm: page migratio...
914
915
916
917
918
  	/*
  	 * When successful, old pagecache page->mapping must be cleared before
  	 * page is freed; but stats require that PageAnon be left as PageAnon.
  	 */
  	if (rc == MIGRATEPAGE_SUCCESS) {
bda807d44   Minchan Kim   mm: migrate: supp...
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
  		if (__PageMovable(page)) {
  			VM_BUG_ON_PAGE(!PageIsolated(page), page);
  
  			/*
  			 * We clear PG_movable under page_lock so any compactor
  			 * cannot try to migrate this page.
  			 */
  			__ClearPageIsolated(page);
  		}
  
  		/*
  		 * Anonymous and movable page->mapping will be cleard by
  		 * free_pages_prepare so don't reset it here for keeping
  		 * the type to work PageAnon, for example.
  		 */
  		if (!PageMappingFlags(page))
5c3f9a673   Hugh Dickins   mm: page migratio...
935
  			page->mapping = NULL;
f70ddae24   Lars Persson   mm/migrate.c: add...
936
937
938
939
940
941
  
  		if (unlikely(is_zone_device_page(newpage))) {
  			if (is_device_public_page(newpage))
  				flush_dcache_page(newpage);
  		} else
  			flush_dcache_page(newpage);
3fe2011ff   Mel Gorman   mm: migration: al...
942
  	}
bda807d44   Minchan Kim   mm: migrate: supp...
943
  out:
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
944
945
  	return rc;
  }
0dabec93d   Minchan Kim   mm: migration: cl...
946
  static int __unmap_and_move(struct page *page, struct page *newpage,
9c620e2bc   Hugh Dickins   mm: remove offlin...
947
  				int force, enum migrate_mode mode)
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
948
  {
0dabec93d   Minchan Kim   mm: migration: cl...
949
  	int rc = -EAGAIN;
2ebba6b7e   Hugh Dickins   mm: unmapped page...
950
  	int page_was_mapped = 0;
3f6c82728   Mel Gorman   mm: migration: ta...
951
  	struct anon_vma *anon_vma = NULL;
bda807d44   Minchan Kim   mm: migrate: supp...
952
  	bool is_lru = !__PageMovable(page);
95a402c38   Christoph Lameter   [PATCH] page migr...
953

529ae9aaa   Nick Piggin   mm: rename page t...
954
  	if (!trylock_page(page)) {
a6bc32b89   Mel Gorman   mm: compaction: i...
955
  		if (!force || mode == MIGRATE_ASYNC)
0dabec93d   Minchan Kim   mm: migration: cl...
956
  			goto out;
3e7d34497   Mel Gorman   mm: vmscan: recla...
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
  
  		/*
  		 * It's not safe for direct compaction to call lock_page.
  		 * For example, during page readahead pages are added locked
  		 * to the LRU. Later, when the IO completes the pages are
  		 * marked uptodate and unlocked. However, the queueing
  		 * could be merging multiple pages for one bio (e.g.
  		 * mpage_readpages). If an allocation happens for the
  		 * second or third page, the process can end up locking
  		 * the same page twice and deadlocking. Rather than
  		 * trying to be clever about what pages can be locked,
  		 * avoid the use of lock_page for direct compaction
  		 * altogether.
  		 */
  		if (current->flags & PF_MEMALLOC)
0dabec93d   Minchan Kim   mm: migration: cl...
972
  			goto out;
3e7d34497   Mel Gorman   mm: vmscan: recla...
973

e24f0b8f7   Christoph Lameter   [PATCH] page migr...
974
975
976
977
  		lock_page(page);
  	}
  
  	if (PageWriteback(page)) {
11bc82d67   Andrea Arcangeli   mm: compaction: U...
978
  		/*
fed5b64a9   Jianguo Wu   mm/migrate: fix c...
979
  		 * Only in the case of a full synchronous migration is it
a6bc32b89   Mel Gorman   mm: compaction: i...
980
981
982
  		 * necessary to wait for PageWriteback. In the async case,
  		 * the retry loop is too short and in the sync-light case,
  		 * the overhead of stalling is too much
11bc82d67   Andrea Arcangeli   mm: compaction: U...
983
  		 */
2916ecc0f   Jérôme Glisse   mm/migrate: new m...
984
985
986
987
988
  		switch (mode) {
  		case MIGRATE_SYNC:
  		case MIGRATE_SYNC_NO_COPY:
  			break;
  		default:
11bc82d67   Andrea Arcangeli   mm: compaction: U...
989
  			rc = -EBUSY;
0a31bc97c   Johannes Weiner   mm: memcontrol: r...
990
  			goto out_unlock;
11bc82d67   Andrea Arcangeli   mm: compaction: U...
991
992
  		}
  		if (!force)
0a31bc97c   Johannes Weiner   mm: memcontrol: r...
993
  			goto out_unlock;
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
994
995
  		wait_on_page_writeback(page);
  	}
03f15c86c   Hugh Dickins   mm: simplify page...
996

e24f0b8f7   Christoph Lameter   [PATCH] page migr...
997
  	/*
dc386d4d1   KAMEZAWA Hiroyuki   memory unplug: mi...
998
999
  	 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
  	 * we cannot notice that anon_vma is freed while we migrates a page.
1ce82b69e   Hugh Dickins   mm: fix migration...
1000
  	 * This get_anon_vma() delays freeing anon_vma pointer until the end
dc386d4d1   KAMEZAWA Hiroyuki   memory unplug: mi...
1001
  	 * of migration. File cache pages are no problem because of page_lock()
989f89c57   KAMEZAWA Hiroyuki   fix rcu_read_lock...
1002
1003
  	 * File Caches may use write_page() or lock_page() in migration, then,
  	 * just care Anon page here.
03f15c86c   Hugh Dickins   mm: simplify page...
1004
1005
1006
1007
1008
1009
  	 *
  	 * Only page_get_anon_vma() understands the subtleties of
  	 * getting a hold on an anon_vma from outside one of its mms.
  	 * But if we cannot get anon_vma, then we won't need it anyway,
  	 * because that implies that the anon page is no longer mapped
  	 * (and cannot be remapped so long as we hold the page lock).
dc386d4d1   KAMEZAWA Hiroyuki   memory unplug: mi...
1010
  	 */
03f15c86c   Hugh Dickins   mm: simplify page...
1011
  	if (PageAnon(page) && !PageKsm(page))
746b18d42   Peter Zijlstra   mm: use refcounts...
1012
  		anon_vma = page_get_anon_vma(page);
62e1c5530   Shaohua Li   page migraton: ha...
1013

7db7671f8   Hugh Dickins   mm: page migratio...
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
  	/*
  	 * Block others from accessing the new page when we get around to
  	 * establishing additional references. We are usually the only one
  	 * holding a reference to newpage at this point. We used to have a BUG
  	 * here if trylock_page(newpage) fails, but would like to allow for
  	 * cases where there might be a race with the previous use of newpage.
  	 * This is much like races on refcount of oldpage: just don't BUG().
  	 */
  	if (unlikely(!trylock_page(newpage)))
  		goto out_unlock;
bda807d44   Minchan Kim   mm: migrate: supp...
1024
1025
1026
1027
  	if (unlikely(!is_lru)) {
  		rc = move_to_new_page(newpage, page, mode);
  		goto out_unlock_both;
  	}
dc386d4d1   KAMEZAWA Hiroyuki   memory unplug: mi...
1028
  	/*
62e1c5530   Shaohua Li   page migraton: ha...
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
  	 * Corner case handling:
  	 * 1. When a new swap-cache page is read into, it is added to the LRU
  	 * and treated as swapcache but it has no rmap yet.
  	 * Calling try_to_unmap() against a page->mapping==NULL page will
  	 * trigger a BUG.  So handle it here.
  	 * 2. An orphaned page (see truncate_complete_page) might have
  	 * fs-private metadata. The page can be picked up due to memory
  	 * offlining.  Everywhere else except page reclaim, the page is
  	 * invisible to the vm, so the page can not be migrated.  So try to
  	 * free the metadata, so the page can be freed.
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1039
  	 */
62e1c5530   Shaohua Li   page migraton: ha...
1040
  	if (!page->mapping) {
309381fea   Sasha Levin   mm: dump page whe...
1041
  		VM_BUG_ON_PAGE(PageAnon(page), page);
1ce82b69e   Hugh Dickins   mm: fix migration...
1042
  		if (page_has_private(page)) {
62e1c5530   Shaohua Li   page migraton: ha...
1043
  			try_to_free_buffers(page);
7db7671f8   Hugh Dickins   mm: page migratio...
1044
  			goto out_unlock_both;
62e1c5530   Shaohua Li   page migraton: ha...
1045
  		}
7db7671f8   Hugh Dickins   mm: page migratio...
1046
1047
  	} else if (page_mapped(page)) {
  		/* Establish migration ptes */
03f15c86c   Hugh Dickins   mm: simplify page...
1048
1049
  		VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
  				page);
2ebba6b7e   Hugh Dickins   mm: unmapped page...
1050
  		try_to_unmap(page,
da1b13ccf   Wanpeng Li   mm/hwpoison: fix ...
1051
  			TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
2ebba6b7e   Hugh Dickins   mm: unmapped page...
1052
1053
  		page_was_mapped = 1;
  	}
dc386d4d1   KAMEZAWA Hiroyuki   memory unplug: mi...
1054

e6a1530d6   Christoph Lameter   [PATCH] Allow mig...
1055
  	if (!page_mapped(page))
5c3f9a673   Hugh Dickins   mm: page migratio...
1056
  		rc = move_to_new_page(newpage, page, mode);
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1057

5c3f9a673   Hugh Dickins   mm: page migratio...
1058
1059
  	if (page_was_mapped)
  		remove_migration_ptes(page,
e388466de   Kirill A. Shutemov   mm: make remove_m...
1060
  			rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
3f6c82728   Mel Gorman   mm: migration: ta...
1061

7db7671f8   Hugh Dickins   mm: page migratio...
1062
1063
1064
  out_unlock_both:
  	unlock_page(newpage);
  out_unlock:
3f6c82728   Mel Gorman   mm: migration: ta...
1065
  	/* Drop an anon_vma reference if we took one */
76545066c   Rik van Riel   mm: extend KSM re...
1066
  	if (anon_vma)
9e60109f1   Peter Zijlstra   mm: rename drop_a...
1067
  		put_anon_vma(anon_vma);
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1068
  	unlock_page(page);
0dabec93d   Minchan Kim   mm: migration: cl...
1069
  out:
c6c919eb9   Minchan Kim   mm: use put_page(...
1070
1071
1072
1073
  	/*
  	 * If migration is successful, decrease refcount of the newpage
  	 * which will not free the page because new page owner increased
  	 * refcounter. As well, if it is LRU page, add the page to LRU
214dea147   David Hildenbrand   mm: migrate: don'...
1074
1075
1076
1077
  	 * list in here. Use the old state of the isolated source page to
  	 * determine if we migrated a LRU page. newpage was already unlocked
  	 * and possibly modified by its owner - don't rely on the page
  	 * state.
c6c919eb9   Minchan Kim   mm: use put_page(...
1078
1079
  	 */
  	if (rc == MIGRATEPAGE_SUCCESS) {
214dea147   David Hildenbrand   mm: migrate: don'...
1080
  		if (unlikely(!is_lru))
c6c919eb9   Minchan Kim   mm: use put_page(...
1081
1082
1083
1084
  			put_page(newpage);
  		else
  			putback_lru_page(newpage);
  	}
0dabec93d   Minchan Kim   mm: migration: cl...
1085
1086
  	return rc;
  }
95a402c38   Christoph Lameter   [PATCH] page migr...
1087

0dabec93d   Minchan Kim   mm: migration: cl...
1088
  /*
ef2a5153b   Geert Uytterhoeven   mm/migrate: mark ...
1089
1090
1091
   * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move().  Work
   * around it.
   */
815f0ddb3   Nick Desaulniers   include/linux/com...
1092
1093
  #if defined(CONFIG_ARM) && \
  	defined(GCC_VERSION) && GCC_VERSION < 40900 && GCC_VERSION >= 40700
ef2a5153b   Geert Uytterhoeven   mm/migrate: mark ...
1094
1095
1096
1097
1098
1099
  #define ICE_noinline noinline
  #else
  #define ICE_noinline
  #endif
  
  /*
0dabec93d   Minchan Kim   mm: migration: cl...
1100
1101
1102
   * Obtain the lock on page, remove all ptes and migrate the page
   * to the newly allocated page in newpage.
   */
ef2a5153b   Geert Uytterhoeven   mm/migrate: mark ...
1103
1104
1105
  static ICE_noinline int unmap_and_move(new_page_t get_new_page,
  				   free_page_t put_new_page,
  				   unsigned long private, struct page *page,
add05cece   Naoya Horiguchi   mm: soft-offline:...
1106
1107
  				   int force, enum migrate_mode mode,
  				   enum migrate_reason reason)
0dabec93d   Minchan Kim   mm: migration: cl...
1108
  {
2def7424c   Hugh Dickins   mm: page migratio...
1109
  	int rc = MIGRATEPAGE_SUCCESS;
2def7424c   Hugh Dickins   mm: page migratio...
1110
  	struct page *newpage;
0dabec93d   Minchan Kim   mm: migration: cl...
1111

94723aafb   Michal Hocko   mm: unclutter THP...
1112
1113
  	if (!thp_migration_supported() && PageTransHuge(page))
  		return -ENOMEM;
666feb21a   Michal Hocko   mm, migrate: remo...
1114
  	newpage = get_new_page(page, private);
0dabec93d   Minchan Kim   mm: migration: cl...
1115
1116
1117
1118
1119
  	if (!newpage)
  		return -ENOMEM;
  
  	if (page_count(page) == 1) {
  		/* page was freed from under us. So we are done. */
c6c919eb9   Minchan Kim   mm: use put_page(...
1120
1121
  		ClearPageActive(page);
  		ClearPageUnevictable(page);
bda807d44   Minchan Kim   mm: migrate: supp...
1122
1123
1124
1125
1126
1127
  		if (unlikely(__PageMovable(page))) {
  			lock_page(page);
  			if (!PageMovable(page))
  				__ClearPageIsolated(page);
  			unlock_page(page);
  		}
c6c919eb9   Minchan Kim   mm: use put_page(...
1128
1129
1130
1131
  		if (put_new_page)
  			put_new_page(newpage, private);
  		else
  			put_page(newpage);
0dabec93d   Minchan Kim   mm: migration: cl...
1132
1133
  		goto out;
  	}
9c620e2bc   Hugh Dickins   mm: remove offlin...
1134
  	rc = __unmap_and_move(page, newpage, force, mode);
c6c919eb9   Minchan Kim   mm: use put_page(...
1135
  	if (rc == MIGRATEPAGE_SUCCESS)
7cd12b4ab   Vlastimil Babka   mm, page_owner: t...
1136
  		set_page_owner_migrate_reason(newpage, reason);
bf6bddf19   Rafael Aquini   mm: introduce com...
1137

0dabec93d   Minchan Kim   mm: migration: cl...
1138
  out:
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1139
  	if (rc != -EAGAIN) {
0dabec93d   Minchan Kim   mm: migration: cl...
1140
1141
1142
1143
1144
1145
1146
  		/*
  		 * A page that has been migrated has all references
  		 * removed and will be freed. A page that has not been
  		 * migrated will have kepts its references and be
  		 * restored.
  		 */
  		list_del(&page->lru);
6afcf8ef0   Ming Ling   mm, compaction: f...
1147
1148
1149
1150
1151
1152
1153
  
  		/*
  		 * Compaction can migrate also non-LRU pages which are
  		 * not accounted to NR_ISOLATED_*. They can be recognized
  		 * as __PageMovable
  		 */
  		if (likely(!__PageMovable(page)))
e8db67eb0   Naoya Horiguchi   mm: migrate: move...
1154
1155
  			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
  					page_is_file_cache(page), -hpage_nr_pages(page));
c6c919eb9   Minchan Kim   mm: use put_page(...
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
  	}
  
  	/*
  	 * If migration is successful, releases reference grabbed during
  	 * isolation. Otherwise, restore the page to right list unless
  	 * we want to retry.
  	 */
  	if (rc == MIGRATEPAGE_SUCCESS) {
  		put_page(page);
  		if (reason == MR_MEMORY_FAILURE) {
d7e69488b   Minchan Kim   mm/hwpoison: fix ...
1166
  			/*
c6c919eb9   Minchan Kim   mm: use put_page(...
1167
1168
1169
  			 * Set PG_HWPoison on just freed page
  			 * intentionally. Although it's rather weird,
  			 * it's how HWPoison flag works at the moment.
d7e69488b   Minchan Kim   mm/hwpoison: fix ...
1170
  			 */
d4ae9916e   Naoya Horiguchi   mm: soft-offline:...
1171
  			if (set_hwpoison_free_buddy_page(page))
da1b13ccf   Wanpeng Li   mm/hwpoison: fix ...
1172
  				num_poisoned_pages_inc();
c6c919eb9   Minchan Kim   mm: use put_page(...
1173
1174
  		}
  	} else {
bda807d44   Minchan Kim   mm: migrate: supp...
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
  		if (rc != -EAGAIN) {
  			if (likely(!__PageMovable(page))) {
  				putback_lru_page(page);
  				goto put_new;
  			}
  
  			lock_page(page);
  			if (PageMovable(page))
  				putback_movable_page(page);
  			else
  				__ClearPageIsolated(page);
  			unlock_page(page);
  			put_page(page);
  		}
  put_new:
c6c919eb9   Minchan Kim   mm: use put_page(...
1190
1191
1192
1193
  		if (put_new_page)
  			put_new_page(newpage, private);
  		else
  			put_page(newpage);
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1194
  	}
68711a746   David Rientjes   mm, migration: ad...
1195

e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1196
1197
1198
1199
  	return rc;
  }
  
  /*
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
   * Counterpart of unmap_and_move_page() for hugepage migration.
   *
   * This function doesn't wait the completion of hugepage I/O
   * because there is no race between I/O and migration for hugepage.
   * Note that currently hugepage I/O occurs only in direct I/O
   * where no lock is held and PG_writeback is irrelevant,
   * and writeback status of all subpages are counted in the reference
   * count of the head page (i.e. if all subpages of a 2MB hugepage are
   * under direct I/O, the reference of the head page is 512 and a bit more.)
   * This means that when we try to migrate hugepage whose subpages are
   * doing direct I/O, some references remain after try_to_unmap() and
   * hugepage migration fails without data corruption.
   *
   * There is also no race when direct I/O is issued on the page under migration,
   * because then pte is replaced with migration swap entry and direct I/O code
   * will wait in the page fault for migration to complete.
   */
  static int unmap_and_move_huge_page(new_page_t get_new_page,
68711a746   David Rientjes   mm, migration: ad...
1218
1219
  				free_page_t put_new_page, unsigned long private,
  				struct page *hpage, int force,
7cd12b4ab   Vlastimil Babka   mm, page_owner: t...
1220
  				enum migrate_mode mode, int reason)
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1221
  {
2def7424c   Hugh Dickins   mm: page migratio...
1222
  	int rc = -EAGAIN;
2ebba6b7e   Hugh Dickins   mm: unmapped page...
1223
  	int page_was_mapped = 0;
32665f2bb   Joonsoo Kim   mm/migrate: corre...
1224
  	struct page *new_hpage;
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1225
  	struct anon_vma *anon_vma = NULL;
83467efbd   Naoya Horiguchi   mm: migrate: chec...
1226
1227
1228
1229
1230
1231
1232
  	/*
  	 * Movability of hugepages depends on architectures and hugepage size.
  	 * This check is necessary because some callers of hugepage migration
  	 * like soft offline and memory hotremove don't walk through page
  	 * tables or check whether the hugepage is pmd-based or not before
  	 * kicking migration.
  	 */
100873d7a   Naoya Horiguchi   hugetlb: rename h...
1233
  	if (!hugepage_migration_supported(page_hstate(hpage))) {
32665f2bb   Joonsoo Kim   mm/migrate: corre...
1234
  		putback_active_hugepage(hpage);
83467efbd   Naoya Horiguchi   mm: migrate: chec...
1235
  		return -ENOSYS;
32665f2bb   Joonsoo Kim   mm/migrate: corre...
1236
  	}
83467efbd   Naoya Horiguchi   mm: migrate: chec...
1237

666feb21a   Michal Hocko   mm, migrate: remo...
1238
  	new_hpage = get_new_page(hpage, private);
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1239
1240
  	if (!new_hpage)
  		return -ENOMEM;
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1241
  	if (!trylock_page(hpage)) {
2916ecc0f   Jérôme Glisse   mm/migrate: new m...
1242
  		if (!force)
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1243
  			goto out;
2916ecc0f   Jérôme Glisse   mm/migrate: new m...
1244
1245
1246
1247
1248
1249
1250
  		switch (mode) {
  		case MIGRATE_SYNC:
  		case MIGRATE_SYNC_NO_COPY:
  			break;
  		default:
  			goto out;
  		}
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1251
1252
  		lock_page(hpage);
  	}
527cabfff   Mike Kravetz   hugetlbfs: fix ra...
1253
1254
1255
1256
1257
1258
1259
1260
1261
  	/*
  	 * Check for pages which are in the process of being freed.  Without
  	 * page_mapping() set, hugetlbfs specific move page routine will not
  	 * be called and we could leak usage counts for subpools.
  	 */
  	if (page_private(hpage) && !page_mapping(hpage)) {
  		rc = -EBUSY;
  		goto out_unlock;
  	}
746b18d42   Peter Zijlstra   mm: use refcounts...
1262
1263
  	if (PageAnon(hpage))
  		anon_vma = page_get_anon_vma(hpage);
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1264

7db7671f8   Hugh Dickins   mm: page migratio...
1265
1266
  	if (unlikely(!trylock_page(new_hpage)))
  		goto put_anon;
2ebba6b7e   Hugh Dickins   mm: unmapped page...
1267
1268
1269
1270
1271
  	if (page_mapped(hpage)) {
  		try_to_unmap(hpage,
  			TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
  		page_was_mapped = 1;
  	}
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1272
1273
  
  	if (!page_mapped(hpage))
5c3f9a673   Hugh Dickins   mm: page migratio...
1274
  		rc = move_to_new_page(new_hpage, hpage, mode);
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1275

5c3f9a673   Hugh Dickins   mm: page migratio...
1276
1277
  	if (page_was_mapped)
  		remove_migration_ptes(hpage,
e388466de   Kirill A. Shutemov   mm: make remove_m...
1278
  			rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1279

7db7671f8   Hugh Dickins   mm: page migratio...
1280
1281
1282
  	unlock_page(new_hpage);
  
  put_anon:
fd4a4663d   Hugh Dickins   mm: fix hugepage ...
1283
  	if (anon_vma)
9e60109f1   Peter Zijlstra   mm: rename drop_a...
1284
  		put_anon_vma(anon_vma);
8e6ac7fab   Aneesh Kumar K.V   hugetlb/cgroup: m...
1285

2def7424c   Hugh Dickins   mm: page migratio...
1286
  	if (rc == MIGRATEPAGE_SUCCESS) {
ab5ac90ae   Michal Hocko   mm, hugetlb: do n...
1287
  		move_hugetlb_state(hpage, new_hpage, reason);
2def7424c   Hugh Dickins   mm: page migratio...
1288
1289
  		put_new_page = NULL;
  	}
8e6ac7fab   Aneesh Kumar K.V   hugetlb/cgroup: m...
1290

527cabfff   Mike Kravetz   hugetlbfs: fix ra...
1291
  out_unlock:
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1292
  	unlock_page(hpage);
09761333e   Hillf Danton   mm/migrate.c: pai...
1293
  out:
b8ec1cee5   Naoya Horiguchi   mm: soft-offline:...
1294
1295
  	if (rc != -EAGAIN)
  		putback_active_hugepage(hpage);
68711a746   David Rientjes   mm, migration: ad...
1296
1297
1298
1299
1300
1301
  
  	/*
  	 * If migration was not successful and there's a freeing callback, use
  	 * it.  Otherwise, put_page() will drop the reference grabbed during
  	 * isolation.
  	 */
2def7424c   Hugh Dickins   mm: page migratio...
1302
  	if (put_new_page)
68711a746   David Rientjes   mm, migration: ad...
1303
1304
  		put_new_page(new_hpage, private);
  	else
3aaa76e12   Naoya Horiguchi   mm: migrate: huge...
1305
  		putback_active_hugepage(new_hpage);
68711a746   David Rientjes   mm, migration: ad...
1306

290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1307
1308
1309
1310
  	return rc;
  }
  
  /*
c73e5c9c5   Srivatsa S. Bhat   mm: rewrite the c...
1311
1312
   * migrate_pages - migrate the pages specified in a list, to the free pages
   *		   supplied as the target for the page migration
b20a35035   Christoph Lameter   [PATCH] page migr...
1313
   *
c73e5c9c5   Srivatsa S. Bhat   mm: rewrite the c...
1314
1315
1316
   * @from:		The list of pages to be migrated.
   * @get_new_page:	The function used to allocate free pages to be used
   *			as the target of the page migration.
68711a746   David Rientjes   mm, migration: ad...
1317
1318
   * @put_new_page:	The function used to free target pages if migration
   *			fails, or NULL if no special handling is necessary.
c73e5c9c5   Srivatsa S. Bhat   mm: rewrite the c...
1319
1320
1321
1322
   * @private:		Private data to be passed on to get_new_page()
   * @mode:		The migration mode that specifies the constraints for
   *			page migration, if any.
   * @reason:		The reason for page migration.
b20a35035   Christoph Lameter   [PATCH] page migr...
1323
   *
c73e5c9c5   Srivatsa S. Bhat   mm: rewrite the c...
1324
1325
   * The function returns after 10 attempts or if no pages are movable any more
   * because the list has become empty or no retryable pages exist any more.
14e0f9bcc   Hugh Dickins   mm: correct a cou...
1326
   * The caller should call putback_movable_pages() to return pages to the LRU
28bd65781   Minchan Kim   mm: migration: cl...
1327
   * or free list only if ret != 0.
b20a35035   Christoph Lameter   [PATCH] page migr...
1328
   *
c73e5c9c5   Srivatsa S. Bhat   mm: rewrite the c...
1329
   * Returns the number of pages that were not migrated, or an error code.
b20a35035   Christoph Lameter   [PATCH] page migr...
1330
   */
9c620e2bc   Hugh Dickins   mm: remove offlin...
1331
  int migrate_pages(struct list_head *from, new_page_t get_new_page,
68711a746   David Rientjes   mm, migration: ad...
1332
1333
  		free_page_t put_new_page, unsigned long private,
  		enum migrate_mode mode, int reason)
b20a35035   Christoph Lameter   [PATCH] page migr...
1334
  {
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1335
  	int retry = 1;
b20a35035   Christoph Lameter   [PATCH] page migr...
1336
  	int nr_failed = 0;
5647bc293   Mel Gorman   mm: compaction: M...
1337
  	int nr_succeeded = 0;
b20a35035   Christoph Lameter   [PATCH] page migr...
1338
1339
1340
1341
1342
1343
1344
1345
  	int pass = 0;
  	struct page *page;
  	struct page *page2;
  	int swapwrite = current->flags & PF_SWAPWRITE;
  	int rc;
  
  	if (!swapwrite)
  		current->flags |= PF_SWAPWRITE;
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1346
1347
  	for(pass = 0; pass < 10 && retry; pass++) {
  		retry = 0;
b20a35035   Christoph Lameter   [PATCH] page migr...
1348

e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1349
  		list_for_each_entry_safe(page, page2, from, lru) {
94723aafb   Michal Hocko   mm: unclutter THP...
1350
  retry:
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1351
  			cond_resched();
2d1db3b11   Christoph Lameter   [PATCH] page migr...
1352

31caf665e   Naoya Horiguchi   mm: migrate: make...
1353
1354
  			if (PageHuge(page))
  				rc = unmap_and_move_huge_page(get_new_page,
68711a746   David Rientjes   mm, migration: ad...
1355
  						put_new_page, private, page,
7cd12b4ab   Vlastimil Babka   mm, page_owner: t...
1356
  						pass > 2, mode, reason);
31caf665e   Naoya Horiguchi   mm: migrate: make...
1357
  			else
68711a746   David Rientjes   mm, migration: ad...
1358
  				rc = unmap_and_move(get_new_page, put_new_page,
add05cece   Naoya Horiguchi   mm: soft-offline:...
1359
1360
  						private, page, pass > 2, mode,
  						reason);
2d1db3b11   Christoph Lameter   [PATCH] page migr...
1361

e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1362
  			switch(rc) {
95a402c38   Christoph Lameter   [PATCH] page migr...
1363
  			case -ENOMEM:
94723aafb   Michal Hocko   mm: unclutter THP...
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
  				/*
  				 * THP migration might be unsupported or the
  				 * allocation could've failed so we should
  				 * retry on the same page with the THP split
  				 * to base pages.
  				 *
  				 * Head page is retried immediately and tail
  				 * pages are added to the tail of the list so
  				 * we encounter them after the rest of the list
  				 * is processed.
  				 */
e6112fc30   Anshuman Khandual   mm/migrate.c: spl...
1375
  				if (PageTransHuge(page) && !PageHuge(page)) {
94723aafb   Michal Hocko   mm: unclutter THP...
1376
1377
1378
1379
1380
1381
1382
1383
  					lock_page(page);
  					rc = split_huge_page_to_list(page, from);
  					unlock_page(page);
  					if (!rc) {
  						list_safe_reset_next(page, page2, lru);
  						goto retry;
  					}
  				}
dfef2ef40   David Rientjes   mm, migrate: incr...
1384
  				nr_failed++;
95a402c38   Christoph Lameter   [PATCH] page migr...
1385
  				goto out;
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1386
  			case -EAGAIN:
2d1db3b11   Christoph Lameter   [PATCH] page migr...
1387
  				retry++;
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1388
  				break;
78bd52097   Rafael Aquini   mm: adjust addres...
1389
  			case MIGRATEPAGE_SUCCESS:
5647bc293   Mel Gorman   mm: compaction: M...
1390
  				nr_succeeded++;
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1391
1392
  				break;
  			default:
354a33633   Naoya Horiguchi   mm/migrate: add c...
1393
1394
1395
1396
1397
1398
  				/*
  				 * Permanent failure (-EBUSY, -ENOSYS, etc.):
  				 * unlike -EAGAIN case, the failed page is
  				 * removed from migration page list and not
  				 * retried in the next outer loop.
  				 */
2d1db3b11   Christoph Lameter   [PATCH] page migr...
1399
  				nr_failed++;
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1400
  				break;
2d1db3b11   Christoph Lameter   [PATCH] page migr...
1401
  			}
b20a35035   Christoph Lameter   [PATCH] page migr...
1402
1403
  		}
  	}
f2f81fb2b   Vlastimil Babka   mm, migrate: coun...
1404
1405
  	nr_failed += retry;
  	rc = nr_failed;
95a402c38   Christoph Lameter   [PATCH] page migr...
1406
  out:
5647bc293   Mel Gorman   mm: compaction: M...
1407
1408
1409
1410
  	if (nr_succeeded)
  		count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
  	if (nr_failed)
  		count_vm_events(PGMIGRATE_FAIL, nr_failed);
7b2a2d4a1   Mel Gorman   mm: migrate: Add ...
1411
  	trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
b20a35035   Christoph Lameter   [PATCH] page migr...
1412
1413
  	if (!swapwrite)
  		current->flags &= ~PF_SWAPWRITE;
78bd52097   Rafael Aquini   mm: adjust addres...
1414
  	return rc;
b20a35035   Christoph Lameter   [PATCH] page migr...
1415
  }
95a402c38   Christoph Lameter   [PATCH] page migr...
1416

742755a1d   Christoph Lameter   [PATCH] page migr...
1417
  #ifdef CONFIG_NUMA
742755a1d   Christoph Lameter   [PATCH] page migr...
1418

a49bd4d71   Michal Hocko   mm, numa: rework ...
1419
  static int store_status(int __user *status, int start, int value, int nr)
742755a1d   Christoph Lameter   [PATCH] page migr...
1420
  {
a49bd4d71   Michal Hocko   mm, numa: rework ...
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
  	while (nr-- > 0) {
  		if (put_user(value, status + start))
  			return -EFAULT;
  		start++;
  	}
  
  	return 0;
  }
  
  static int do_move_pages_to_node(struct mm_struct *mm,
  		struct list_head *pagelist, int node)
  {
  	int err;
  
  	if (list_empty(pagelist))
  		return 0;
  
  	err = migrate_pages(pagelist, alloc_new_node_page, NULL, node,
  			MIGRATE_SYNC, MR_SYSCALL);
  	if (err)
  		putback_movable_pages(pagelist);
  	return err;
742755a1d   Christoph Lameter   [PATCH] page migr...
1443
1444
1445
  }
  
  /*
a49bd4d71   Michal Hocko   mm, numa: rework ...
1446
1447
1448
1449
1450
   * Resolves the given address to a struct page, isolates it from the LRU and
   * puts it to the given pagelist.
   * Returns -errno if the page cannot be found/isolated or 0 when it has been
   * queued or the page doesn't need to be migrated because it is already on
   * the target node
742755a1d   Christoph Lameter   [PATCH] page migr...
1451
   */
a49bd4d71   Michal Hocko   mm, numa: rework ...
1452
1453
  static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
  		int node, struct list_head *pagelist, bool migrate_all)
742755a1d   Christoph Lameter   [PATCH] page migr...
1454
  {
a49bd4d71   Michal Hocko   mm, numa: rework ...
1455
1456
1457
  	struct vm_area_struct *vma;
  	struct page *page;
  	unsigned int follflags;
742755a1d   Christoph Lameter   [PATCH] page migr...
1458
  	int err;
742755a1d   Christoph Lameter   [PATCH] page migr...
1459
1460
  
  	down_read(&mm->mmap_sem);
a49bd4d71   Michal Hocko   mm, numa: rework ...
1461
1462
1463
1464
  	err = -EFAULT;
  	vma = find_vma(mm, addr);
  	if (!vma || addr < vma->vm_start || !vma_migratable(vma))
  		goto out;
742755a1d   Christoph Lameter   [PATCH] page migr...
1465

a49bd4d71   Michal Hocko   mm, numa: rework ...
1466
1467
  	/* FOLL_DUMP to ignore special (like zero) pages */
  	follflags = FOLL_GET | FOLL_DUMP;
a49bd4d71   Michal Hocko   mm, numa: rework ...
1468
  	page = follow_page(vma, addr, follflags);
89f5b7da2   Linus Torvalds   Reinstate ZERO_PA...
1469

a49bd4d71   Michal Hocko   mm, numa: rework ...
1470
1471
1472
  	err = PTR_ERR(page);
  	if (IS_ERR(page))
  		goto out;
89f5b7da2   Linus Torvalds   Reinstate ZERO_PA...
1473

a49bd4d71   Michal Hocko   mm, numa: rework ...
1474
1475
1476
  	err = -ENOENT;
  	if (!page)
  		goto out;
742755a1d   Christoph Lameter   [PATCH] page migr...
1477

a49bd4d71   Michal Hocko   mm, numa: rework ...
1478
1479
1480
  	err = 0;
  	if (page_to_nid(page) == node)
  		goto out_putpage;
742755a1d   Christoph Lameter   [PATCH] page migr...
1481

a49bd4d71   Michal Hocko   mm, numa: rework ...
1482
1483
1484
  	err = -EACCES;
  	if (page_mapcount(page) > 1 && !migrate_all)
  		goto out_putpage;
742755a1d   Christoph Lameter   [PATCH] page migr...
1485

a49bd4d71   Michal Hocko   mm, numa: rework ...
1486
1487
1488
1489
  	if (PageHuge(page)) {
  		if (PageHead(page)) {
  			isolate_huge_page(page, pagelist);
  			err = 0;
e632a938d   Naoya Horiguchi   mm: migrate: add ...
1490
  		}
a49bd4d71   Michal Hocko   mm, numa: rework ...
1491
1492
  	} else {
  		struct page *head;
e632a938d   Naoya Horiguchi   mm: migrate: add ...
1493

e8db67eb0   Naoya Horiguchi   mm: migrate: move...
1494
1495
  		head = compound_head(page);
  		err = isolate_lru_page(head);
cf608ac19   Minchan Kim   mm: compaction: f...
1496
  		if (err)
a49bd4d71   Michal Hocko   mm, numa: rework ...
1497
  			goto out_putpage;
742755a1d   Christoph Lameter   [PATCH] page migr...
1498

a49bd4d71   Michal Hocko   mm, numa: rework ...
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
  		err = 0;
  		list_add_tail(&head->lru, pagelist);
  		mod_node_page_state(page_pgdat(head),
  			NR_ISOLATED_ANON + page_is_file_cache(head),
  			hpage_nr_pages(head));
  	}
  out_putpage:
  	/*
  	 * Either remove the duplicate refcount from
  	 * isolate_lru_page() or drop the page ref if it was
  	 * not isolated.
  	 */
  	put_page(page);
  out:
742755a1d   Christoph Lameter   [PATCH] page migr...
1513
1514
1515
1516
1517
  	up_read(&mm->mmap_sem);
  	return err;
  }
  
  /*
5e9a0f023   Brice Goglin   mm: extract do_pa...
1518
1519
1520
   * Migrate an array of page address onto an array of nodes and fill
   * the corresponding array of status.
   */
3268c63ed   Christoph Lameter   mm: fix move/migr...
1521
  static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
5e9a0f023   Brice Goglin   mm: extract do_pa...
1522
1523
1524
1525
1526
  			 unsigned long nr_pages,
  			 const void __user * __user *pages,
  			 const int __user *nodes,
  			 int __user *status, int flags)
  {
a49bd4d71   Michal Hocko   mm, numa: rework ...
1527
1528
1529
1530
  	int current_node = NUMA_NO_NODE;
  	LIST_HEAD(pagelist);
  	int start, i;
  	int err = 0, err1;
35282a2de   Brice Goglin   migration: only m...
1531
1532
  
  	migrate_prep();
a49bd4d71   Michal Hocko   mm, numa: rework ...
1533
1534
1535
1536
  	for (i = start = 0; i < nr_pages; i++) {
  		const void __user *p;
  		unsigned long addr;
  		int node;
3140a2273   Brice Goglin   mm: rework do_pag...
1537

a49bd4d71   Michal Hocko   mm, numa: rework ...
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
  		err = -EFAULT;
  		if (get_user(p, pages + i))
  			goto out_flush;
  		if (get_user(node, nodes + i))
  			goto out_flush;
  		addr = (unsigned long)p;
  
  		err = -ENODEV;
  		if (node < 0 || node >= MAX_NUMNODES)
  			goto out_flush;
  		if (!node_state(node, N_MEMORY))
  			goto out_flush;
5e9a0f023   Brice Goglin   mm: extract do_pa...
1550

a49bd4d71   Michal Hocko   mm, numa: rework ...
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
  		err = -EACCES;
  		if (!node_isset(node, task_nodes))
  			goto out_flush;
  
  		if (current_node == NUMA_NO_NODE) {
  			current_node = node;
  			start = i;
  		} else if (node != current_node) {
  			err = do_move_pages_to_node(mm, &pagelist, current_node);
  			if (err)
  				goto out;
  			err = store_status(status, start, current_node, i - start);
  			if (err)
  				goto out;
  			start = i;
  			current_node = node;
3140a2273   Brice Goglin   mm: rework do_pag...
1567
  		}
a49bd4d71   Michal Hocko   mm, numa: rework ...
1568
1569
1570
1571
1572
1573
1574
1575
  		/*
  		 * Errors in the page lookup or isolation are not fatal and we simply
  		 * report them via status
  		 */
  		err = add_page_for_migration(mm, addr, current_node,
  				&pagelist, flags & MPOL_MF_MOVE_ALL);
  		if (!err)
  			continue;
3140a2273   Brice Goglin   mm: rework do_pag...
1576

a49bd4d71   Michal Hocko   mm, numa: rework ...
1577
1578
1579
  		err = store_status(status, i, err, 1);
  		if (err)
  			goto out_flush;
5e9a0f023   Brice Goglin   mm: extract do_pa...
1580

a49bd4d71   Michal Hocko   mm, numa: rework ...
1581
1582
1583
1584
1585
1586
1587
1588
1589
  		err = do_move_pages_to_node(mm, &pagelist, current_node);
  		if (err)
  			goto out;
  		if (i > start) {
  			err = store_status(status, start, current_node, i - start);
  			if (err)
  				goto out;
  		}
  		current_node = NUMA_NO_NODE;
3140a2273   Brice Goglin   mm: rework do_pag...
1590
  	}
a49bd4d71   Michal Hocko   mm, numa: rework ...
1591
  out_flush:
8f175cf5c   Michal Hocko   mm: fix do_pages_...
1592
1593
  	if (list_empty(&pagelist))
  		return err;
a49bd4d71   Michal Hocko   mm, numa: rework ...
1594
1595
1596
1597
1598
1599
  	/* Make sure we do not overwrite the existing error */
  	err1 = do_move_pages_to_node(mm, &pagelist, current_node);
  	if (!err1)
  		err1 = store_status(status, start, current_node, i - start);
  	if (!err)
  		err = err1;
5e9a0f023   Brice Goglin   mm: extract do_pa...
1600
1601
1602
1603
1604
  out:
  	return err;
  }
  
  /*
2f007e74b   Brice Goglin   mm: don't vmalloc...
1605
   * Determine the nodes of an array of pages and store it in an array of status.
742755a1d   Christoph Lameter   [PATCH] page migr...
1606
   */
80bba1290   Brice Goglin   mm: no get_user/p...
1607
1608
  static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
  				const void __user **pages, int *status)
742755a1d   Christoph Lameter   [PATCH] page migr...
1609
  {
2f007e74b   Brice Goglin   mm: don't vmalloc...
1610
  	unsigned long i;
2f007e74b   Brice Goglin   mm: don't vmalloc...
1611

742755a1d   Christoph Lameter   [PATCH] page migr...
1612
  	down_read(&mm->mmap_sem);
2f007e74b   Brice Goglin   mm: don't vmalloc...
1613
  	for (i = 0; i < nr_pages; i++) {
80bba1290   Brice Goglin   mm: no get_user/p...
1614
  		unsigned long addr = (unsigned long)(*pages);
742755a1d   Christoph Lameter   [PATCH] page migr...
1615
1616
  		struct vm_area_struct *vma;
  		struct page *page;
c095adbc2   KOSAKI Motohiro   mm: Don't touch u...
1617
  		int err = -EFAULT;
2f007e74b   Brice Goglin   mm: don't vmalloc...
1618
1619
  
  		vma = find_vma(mm, addr);
70384dc6d   Gleb Natapov   mm: fix error rep...
1620
  		if (!vma || addr < vma->vm_start)
742755a1d   Christoph Lameter   [PATCH] page migr...
1621
  			goto set_status;
d899844e9   Kirill A. Shutemov   mm: fix status co...
1622
1623
  		/* FOLL_DUMP to ignore special (like zero) pages */
  		page = follow_page(vma, addr, FOLL_DUMP);
89f5b7da2   Linus Torvalds   Reinstate ZERO_PA...
1624
1625
1626
1627
  
  		err = PTR_ERR(page);
  		if (IS_ERR(page))
  			goto set_status;
d899844e9   Kirill A. Shutemov   mm: fix status co...
1628
  		err = page ? page_to_nid(page) : -ENOENT;
742755a1d   Christoph Lameter   [PATCH] page migr...
1629
  set_status:
80bba1290   Brice Goglin   mm: no get_user/p...
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
  		*status = err;
  
  		pages++;
  		status++;
  	}
  
  	up_read(&mm->mmap_sem);
  }
  
  /*
   * Determine the nodes of a user array of pages and store it in
   * a user array of status.
   */
  static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
  			 const void __user * __user *pages,
  			 int __user *status)
  {
  #define DO_PAGES_STAT_CHUNK_NR 16
  	const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
  	int chunk_status[DO_PAGES_STAT_CHUNK_NR];
80bba1290   Brice Goglin   mm: no get_user/p...
1650

87b8d1ade   H. Peter Anvin   mm: Make copy_fro...
1651
1652
  	while (nr_pages) {
  		unsigned long chunk_nr;
80bba1290   Brice Goglin   mm: no get_user/p...
1653

87b8d1ade   H. Peter Anvin   mm: Make copy_fro...
1654
1655
1656
1657
1658
1659
  		chunk_nr = nr_pages;
  		if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
  			chunk_nr = DO_PAGES_STAT_CHUNK_NR;
  
  		if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
  			break;
80bba1290   Brice Goglin   mm: no get_user/p...
1660
1661
  
  		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
87b8d1ade   H. Peter Anvin   mm: Make copy_fro...
1662
1663
  		if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
  			break;
742755a1d   Christoph Lameter   [PATCH] page migr...
1664

87b8d1ade   H. Peter Anvin   mm: Make copy_fro...
1665
1666
1667
1668
1669
  		pages += chunk_nr;
  		status += chunk_nr;
  		nr_pages -= chunk_nr;
  	}
  	return nr_pages ? -EFAULT : 0;
742755a1d   Christoph Lameter   [PATCH] page migr...
1670
1671
1672
1673
1674
1675
  }
  
  /*
   * Move a list of pages in the address space of the currently executing
   * process.
   */
7addf4438   Dominik Brodowski   mm: add kernel_mo...
1676
1677
1678
1679
  static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
  			     const void __user * __user *pages,
  			     const int __user *nodes,
  			     int __user *status, int flags)
742755a1d   Christoph Lameter   [PATCH] page migr...
1680
  {
742755a1d   Christoph Lameter   [PATCH] page migr...
1681
  	struct task_struct *task;
742755a1d   Christoph Lameter   [PATCH] page migr...
1682
  	struct mm_struct *mm;
5e9a0f023   Brice Goglin   mm: extract do_pa...
1683
  	int err;
3268c63ed   Christoph Lameter   mm: fix move/migr...
1684
  	nodemask_t task_nodes;
742755a1d   Christoph Lameter   [PATCH] page migr...
1685
1686
1687
1688
1689
1690
1691
1692
1693
  
  	/* Check flags */
  	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
  		return -EINVAL;
  
  	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
  		return -EPERM;
  
  	/* Find the mm_struct */
a879bf582   Greg Thelen   mm: grab rcu read...
1694
  	rcu_read_lock();
228ebcbe6   Pavel Emelyanov   Uninline find_tas...
1695
  	task = pid ? find_task_by_vpid(pid) : current;
742755a1d   Christoph Lameter   [PATCH] page migr...
1696
  	if (!task) {
a879bf582   Greg Thelen   mm: grab rcu read...
1697
  		rcu_read_unlock();
742755a1d   Christoph Lameter   [PATCH] page migr...
1698
1699
  		return -ESRCH;
  	}
3268c63ed   Christoph Lameter   mm: fix move/migr...
1700
  	get_task_struct(task);
742755a1d   Christoph Lameter   [PATCH] page migr...
1701
1702
1703
  
  	/*
  	 * Check if this process has the right to modify the specified
197e7e521   Linus Torvalds   Sanitize 'move_pa...
1704
  	 * process. Use the regular "ptrace_may_access()" checks.
742755a1d   Christoph Lameter   [PATCH] page migr...
1705
  	 */
197e7e521   Linus Torvalds   Sanitize 'move_pa...
1706
  	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
c69e8d9c0   David Howells   CRED: Use RCU to ...
1707
  		rcu_read_unlock();
742755a1d   Christoph Lameter   [PATCH] page migr...
1708
  		err = -EPERM;
5e9a0f023   Brice Goglin   mm: extract do_pa...
1709
  		goto out;
742755a1d   Christoph Lameter   [PATCH] page migr...
1710
  	}
c69e8d9c0   David Howells   CRED: Use RCU to ...
1711
  	rcu_read_unlock();
742755a1d   Christoph Lameter   [PATCH] page migr...
1712

86c3a7645   David Quigley   [PATCH] SELinux: ...
1713
1714
   	err = security_task_movememory(task);
   	if (err)
5e9a0f023   Brice Goglin   mm: extract do_pa...
1715
  		goto out;
86c3a7645   David Quigley   [PATCH] SELinux: ...
1716

3268c63ed   Christoph Lameter   mm: fix move/migr...
1717
1718
1719
  	task_nodes = cpuset_mems_allowed(task);
  	mm = get_task_mm(task);
  	put_task_struct(task);
6e8b09eaf   Sasha Levin   mm: fix NULL ptr ...
1720
1721
1722
1723
1724
1725
1726
1727
  	if (!mm)
  		return -EINVAL;
  
  	if (nodes)
  		err = do_pages_move(mm, task_nodes, nr_pages, pages,
  				    nodes, status, flags);
  	else
  		err = do_pages_stat(mm, nr_pages, pages, status);
742755a1d   Christoph Lameter   [PATCH] page migr...
1728

742755a1d   Christoph Lameter   [PATCH] page migr...
1729
1730
  	mmput(mm);
  	return err;
3268c63ed   Christoph Lameter   mm: fix move/migr...
1731
1732
1733
1734
  
  out:
  	put_task_struct(task);
  	return err;
742755a1d   Christoph Lameter   [PATCH] page migr...
1735
  }
742755a1d   Christoph Lameter   [PATCH] page migr...
1736

7addf4438   Dominik Brodowski   mm: add kernel_mo...
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
  SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
  		const void __user * __user *, pages,
  		const int __user *, nodes,
  		int __user *, status, int, flags)
  {
  	return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
  }
  
  #ifdef CONFIG_COMPAT
  COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
  		       compat_uptr_t __user *, pages32,
  		       const int __user *, nodes,
  		       int __user *, status,
  		       int, flags)
  {
  	const void __user * __user *pages;
  	int i;
  
  	pages = compat_alloc_user_space(nr_pages * sizeof(void *));
  	for (i = 0; i < nr_pages; i++) {
  		compat_uptr_t p;
  
  		if (get_user(p, pages32 + i) ||
  			put_user(compat_ptr(p), pages + i))
  			return -EFAULT;
  	}
  	return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
  }
  #endif /* CONFIG_COMPAT */
7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1766
1767
1768
1769
1770
1771
  #ifdef CONFIG_NUMA_BALANCING
  /*
   * Returns true if this is a safe migration target node for misplaced NUMA
   * pages. Currently it only checks the watermarks which crude
   */
  static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
3abef4e6c   Mel Gorman   mm: numa: take TH...
1772
  				   unsigned long nr_migrate_pages)
7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1773
1774
  {
  	int z;
599d0c954   Mel Gorman   mm, vmscan: move ...
1775

7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1776
1777
1778
1779
1780
  	for (z = pgdat->nr_zones - 1; z >= 0; z--) {
  		struct zone *zone = pgdat->node_zones + z;
  
  		if (!populated_zone(zone))
  			continue;
7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
  		/* Avoid waking kswapd by allocating pages_to_migrate pages. */
  		if (!zone_watermark_ok(zone, 0,
  				       high_wmark_pages(zone) +
  				       nr_migrate_pages,
  				       0, 0))
  			continue;
  		return true;
  	}
  	return false;
  }
  
  static struct page *alloc_misplaced_dst_page(struct page *page,
666feb21a   Michal Hocko   mm, migrate: remo...
1793
  					   unsigned long data)
7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1794
1795
1796
  {
  	int nid = (int) data;
  	struct page *newpage;
96db800f5   Vlastimil Babka   mm: rename alloc_...
1797
  	newpage = __alloc_pages_node(nid,
e97ca8e5b   Johannes Weiner   mm: fix GFP_THISN...
1798
1799
1800
  					 (GFP_HIGHUSER_MOVABLE |
  					  __GFP_THISNODE | __GFP_NOMEMALLOC |
  					  __GFP_NORETRY | __GFP_NOWARN) &
8479eba77   Mel Gorman   mm: numa: quickly...
1801
  					 ~__GFP_RECLAIM, 0);
bac0382c6   Hillf Danton   mm: numa: migrate...
1802

7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1803
1804
  	return newpage;
  }
1c30e0177   Mel Gorman   mm: numa: make NU...
1805
  static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
b32967ff1   Mel Gorman   mm: numa: Add THP...
1806
  {
340ef3902   Hugh Dickins   mm: numa: cleanup...
1807
  	int page_lru;
a8f607721   Mel Gorman   mm: numa: Rate li...
1808

309381fea   Sasha Levin   mm: dump page whe...
1809
  	VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
3abef4e6c   Mel Gorman   mm: numa: take TH...
1810

7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1811
  	/* Avoid migrating to a node that is nearly full */
340ef3902   Hugh Dickins   mm: numa: cleanup...
1812
1813
  	if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
  		return 0;
7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1814

340ef3902   Hugh Dickins   mm: numa: cleanup...
1815
1816
  	if (isolate_lru_page(page))
  		return 0;
7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1817

340ef3902   Hugh Dickins   mm: numa: cleanup...
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
  	/*
  	 * migrate_misplaced_transhuge_page() skips page migration's usual
  	 * check on page_count(), so we must do it here, now that the page
  	 * has been isolated: a GUP pin, or any other pin, prevents migration.
  	 * The expected page count is 3: 1 for page's mapcount and 1 for the
  	 * caller's pin and 1 for the reference taken by isolate_lru_page().
  	 */
  	if (PageTransHuge(page) && page_count(page) != 3) {
  		putback_lru_page(page);
  		return 0;
7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1828
  	}
340ef3902   Hugh Dickins   mm: numa: cleanup...
1829
  	page_lru = page_is_file_cache(page);
599d0c954   Mel Gorman   mm, vmscan: move ...
1830
  	mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
340ef3902   Hugh Dickins   mm: numa: cleanup...
1831
  				hpage_nr_pages(page));
149c33e1c   Mel Gorman   mm: migrate: Drop...
1832
  	/*
340ef3902   Hugh Dickins   mm: numa: cleanup...
1833
1834
1835
  	 * Isolating the page has taken another reference, so the
  	 * caller's reference can be safely dropped without the page
  	 * disappearing underneath us during migration.
149c33e1c   Mel Gorman   mm: migrate: Drop...
1836
1837
  	 */
  	put_page(page);
340ef3902   Hugh Dickins   mm: numa: cleanup...
1838
  	return 1;
b32967ff1   Mel Gorman   mm: numa: Add THP...
1839
  }
de466bd62   Mel Gorman   mm: numa: avoid u...
1840
1841
1842
1843
1844
  bool pmd_trans_migrating(pmd_t pmd)
  {
  	struct page *page = pmd_page(pmd);
  	return PageLocked(page);
  }
b32967ff1   Mel Gorman   mm: numa: Add THP...
1845
1846
1847
1848
1849
  /*
   * Attempt to migrate a misplaced page to the specified destination
   * node. Caller is expected to have an elevated reference count on
   * the page that will be dropped by this function before returning.
   */
1bc115d87   Mel Gorman   mm: numa: Scan pa...
1850
1851
  int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
  			   int node)
b32967ff1   Mel Gorman   mm: numa: Add THP...
1852
1853
  {
  	pg_data_t *pgdat = NODE_DATA(node);
340ef3902   Hugh Dickins   mm: numa: cleanup...
1854
  	int isolated;
b32967ff1   Mel Gorman   mm: numa: Add THP...
1855
1856
1857
1858
  	int nr_remaining;
  	LIST_HEAD(migratepages);
  
  	/*
1bc115d87   Mel Gorman   mm: numa: Scan pa...
1859
1860
  	 * Don't migrate file pages that are mapped in multiple processes
  	 * with execute permissions as they are probably shared libraries.
b32967ff1   Mel Gorman   mm: numa: Add THP...
1861
  	 */
1bc115d87   Mel Gorman   mm: numa: Scan pa...
1862
1863
  	if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
  	    (vma->vm_flags & VM_EXEC))
b32967ff1   Mel Gorman   mm: numa: Add THP...
1864
  		goto out;
b32967ff1   Mel Gorman   mm: numa: Add THP...
1865
1866
  
  	/*
09a913a7a   Mel Gorman   sched/numa: avoid...
1867
1868
1869
1870
1871
  	 * Also do not migrate dirty pages as not all filesystems can move
  	 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
  	 */
  	if (page_is_file_cache(page) && PageDirty(page))
  		goto out;
b32967ff1   Mel Gorman   mm: numa: Add THP...
1872
1873
1874
1875
1876
  	isolated = numamigrate_isolate_page(pgdat, page);
  	if (!isolated)
  		goto out;
  
  	list_add(&page->lru, &migratepages);
9c620e2bc   Hugh Dickins   mm: remove offlin...
1877
  	nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
68711a746   David Rientjes   mm, migration: ad...
1878
1879
  				     NULL, node, MIGRATE_ASYNC,
  				     MR_NUMA_MISPLACED);
b32967ff1   Mel Gorman   mm: numa: Add THP...
1880
  	if (nr_remaining) {
59c82b70d   Joonsoo Kim   mm/migrate: remov...
1881
1882
  		if (!list_empty(&migratepages)) {
  			list_del(&page->lru);
599d0c954   Mel Gorman   mm, vmscan: move ...
1883
  			dec_node_page_state(page, NR_ISOLATED_ANON +
59c82b70d   Joonsoo Kim   mm/migrate: remov...
1884
1885
1886
  					page_is_file_cache(page));
  			putback_lru_page(page);
  		}
b32967ff1   Mel Gorman   mm: numa: Add THP...
1887
1888
1889
  		isolated = 0;
  	} else
  		count_vm_numa_event(NUMA_PAGE_MIGRATE);
7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1890
  	BUG_ON(!list_empty(&migratepages));
7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1891
  	return isolated;
340ef3902   Hugh Dickins   mm: numa: cleanup...
1892
1893
1894
1895
  
  out:
  	put_page(page);
  	return 0;
7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1896
  }
220018d38   Mel Gorman   mm: numa: Add THP...
1897
  #endif /* CONFIG_NUMA_BALANCING */
b32967ff1   Mel Gorman   mm: numa: Add THP...
1898

220018d38   Mel Gorman   mm: numa: Add THP...
1899
  #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
340ef3902   Hugh Dickins   mm: numa: cleanup...
1900
1901
1902
1903
  /*
   * Migrates a THP to a given target node. page must be locked and is unlocked
   * before returning.
   */
b32967ff1   Mel Gorman   mm: numa: Add THP...
1904
1905
1906
1907
1908
1909
  int migrate_misplaced_transhuge_page(struct mm_struct *mm,
  				struct vm_area_struct *vma,
  				pmd_t *pmd, pmd_t entry,
  				unsigned long address,
  				struct page *page, int node)
  {
c4088ebdc   Kirill A. Shutemov   mm: convert the r...
1910
  	spinlock_t *ptl;
b32967ff1   Mel Gorman   mm: numa: Add THP...
1911
1912
1913
  	pg_data_t *pgdat = NODE_DATA(node);
  	int isolated = 0;
  	struct page *new_page = NULL;
b32967ff1   Mel Gorman   mm: numa: Add THP...
1914
  	int page_lru = page_is_file_cache(page);
f714f4f20   Mel Gorman   mm: numa: call MM...
1915
1916
  	unsigned long mmun_start = address & HPAGE_PMD_MASK;
  	unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
b32967ff1   Mel Gorman   mm: numa: Add THP...
1917

b32967ff1   Mel Gorman   mm: numa: Add THP...
1918
  	new_page = alloc_pages_node(node,
251603549   Vlastimil Babka   mm, thp: remove _...
1919
  		(GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
e97ca8e5b   Johannes Weiner   mm: fix GFP_THISN...
1920
  		HPAGE_PMD_ORDER);
340ef3902   Hugh Dickins   mm: numa: cleanup...
1921
1922
  	if (!new_page)
  		goto out_fail;
9a982250f   Kirill A. Shutemov   thp: introduce de...
1923
  	prep_transhuge_page(new_page);
340ef3902   Hugh Dickins   mm: numa: cleanup...
1924

b32967ff1   Mel Gorman   mm: numa: Add THP...
1925
  	isolated = numamigrate_isolate_page(pgdat, page);
340ef3902   Hugh Dickins   mm: numa: cleanup...
1926
  	if (!isolated) {
b32967ff1   Mel Gorman   mm: numa: Add THP...
1927
  		put_page(new_page);
340ef3902   Hugh Dickins   mm: numa: cleanup...
1928
  		goto out_fail;
b32967ff1   Mel Gorman   mm: numa: Add THP...
1929
  	}
b0943d61b   Mel Gorman   mm: numa: defer T...
1930

b32967ff1   Mel Gorman   mm: numa: Add THP...
1931
  	/* Prepare a page as a migration target */
48c935ad8   Kirill A. Shutemov   page-flags: defin...
1932
  	__SetPageLocked(new_page);
d44d363f6   Shaohua Li   mm: don't assume ...
1933
1934
  	if (PageSwapBacked(page))
  		__SetPageSwapBacked(new_page);
b32967ff1   Mel Gorman   mm: numa: Add THP...
1935
1936
1937
1938
1939
1940
1941
1942
  
  	/* anon mapping, we can simply copy page->mapping to the new page: */
  	new_page->mapping = page->mapping;
  	new_page->index = page->index;
  	migrate_page_copy(new_page, page);
  	WARN_ON(PageLRU(new_page));
  
  	/* Recheck the target PMD */
f714f4f20   Mel Gorman   mm: numa: call MM...
1943
  	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
c4088ebdc   Kirill A. Shutemov   mm: convert the r...
1944
  	ptl = pmd_lock(mm, pmd);
f4e177d12   Will Deacon   mm/migrate.c: sta...
1945
  	if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
c4088ebdc   Kirill A. Shutemov   mm: convert the r...
1946
  		spin_unlock(ptl);
f714f4f20   Mel Gorman   mm: numa: call MM...
1947
  		mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
b32967ff1   Mel Gorman   mm: numa: Add THP...
1948
1949
1950
1951
1952
1953
  
  		/* Reverse changes made by migrate_page_copy() */
  		if (TestClearPageActive(new_page))
  			SetPageActive(page);
  		if (TestClearPageUnevictable(new_page))
  			SetPageUnevictable(page);
b32967ff1   Mel Gorman   mm: numa: Add THP...
1954
1955
1956
  
  		unlock_page(new_page);
  		put_page(new_page);		/* Free it */
a54a407fb   Mel Gorman   mm: Close races b...
1957
1958
  		/* Retake the callers reference and putback on LRU */
  		get_page(page);
b32967ff1   Mel Gorman   mm: numa: Add THP...
1959
  		putback_lru_page(page);
599d0c954   Mel Gorman   mm, vmscan: move ...
1960
  		mod_node_page_state(page_pgdat(page),
a54a407fb   Mel Gorman   mm: Close races b...
1961
  			 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
eb4489f69   Mel Gorman   mm: numa: avoid u...
1962
1963
  
  		goto out_unlock;
b32967ff1   Mel Gorman   mm: numa: Add THP...
1964
  	}
101024596   Kirill A. Shutemov   mm: introduce do_...
1965
  	entry = mk_huge_pmd(new_page, vma->vm_page_prot);
f55e1014f   Linus Torvalds   Revert "mm, thp: ...
1966
  	entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
b32967ff1   Mel Gorman   mm: numa: Add THP...
1967

2b4847e73   Mel Gorman   mm: numa: seriali...
1968
1969
1970
1971
1972
1973
1974
  	/*
  	 * Clear the old entry under pagetable lock and establish the new PTE.
  	 * Any parallel GUP will either observe the old page blocking on the
  	 * page lock, block on the page table lock or observe the new page.
  	 * The SetPageUptodate on the new page and page_add_new_anon_rmap
  	 * guarantee the copy is visible before the pagetable update.
  	 */
f714f4f20   Mel Gorman   mm: numa: call MM...
1975
  	flush_cache_range(vma, mmun_start, mmun_end);
d281ee614   Kirill A. Shutemov   rmap: add argumen...
1976
  	page_add_anon_rmap(new_page, vma, mmun_start, true);
8809aa2d2   Aneesh Kumar K.V   mm: clarify that ...
1977
  	pmdp_huge_clear_flush_notify(vma, mmun_start, pmd);
f714f4f20   Mel Gorman   mm: numa: call MM...
1978
  	set_pmd_at(mm, mmun_start, pmd, entry);
ce4a9cc57   Stephen Rothwell   mm,numa: fix upda...
1979
  	update_mmu_cache_pmd(vma, address, &entry);
2b4847e73   Mel Gorman   mm: numa: seriali...
1980

f4e177d12   Will Deacon   mm/migrate.c: sta...
1981
  	page_ref_unfreeze(page, 2);
51afb12ba   Hugh Dickins   mm: page migratio...
1982
  	mlock_migrate_page(new_page, page);
d281ee614   Kirill A. Shutemov   rmap: add argumen...
1983
  	page_remove_rmap(page, true);
7cd12b4ab   Vlastimil Babka   mm, page_owner: t...
1984
  	set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
2b4847e73   Mel Gorman   mm: numa: seriali...
1985

c4088ebdc   Kirill A. Shutemov   mm: convert the r...
1986
  	spin_unlock(ptl);
4645b9fe8   Jérôme Glisse   mm/mmu_notifier: ...
1987
1988
1989
1990
1991
  	/*
  	 * No need to double call mmu_notifier->invalidate_range() callback as
  	 * the above pmdp_huge_clear_flush_notify() did already call it.
  	 */
  	mmu_notifier_invalidate_range_only_end(mm, mmun_start, mmun_end);
b32967ff1   Mel Gorman   mm: numa: Add THP...
1992

11de9927f   Mel Gorman   mm: numa: add mig...
1993
1994
1995
  	/* Take an "isolate" reference and put new page on the LRU. */
  	get_page(new_page);
  	putback_lru_page(new_page);
b32967ff1   Mel Gorman   mm: numa: Add THP...
1996
1997
1998
1999
2000
2001
2002
  	unlock_page(new_page);
  	unlock_page(page);
  	put_page(page);			/* Drop the rmap reference */
  	put_page(page);			/* Drop the LRU isolation reference */
  
  	count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
  	count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
599d0c954   Mel Gorman   mm, vmscan: move ...
2003
  	mod_node_page_state(page_pgdat(page),
b32967ff1   Mel Gorman   mm: numa: Add THP...
2004
2005
2006
  			NR_ISOLATED_ANON + page_lru,
  			-HPAGE_PMD_NR);
  	return isolated;
340ef3902   Hugh Dickins   mm: numa: cleanup...
2007
2008
  out_fail:
  	count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
2b4847e73   Mel Gorman   mm: numa: seriali...
2009
2010
  	ptl = pmd_lock(mm, pmd);
  	if (pmd_same(*pmd, entry)) {
4d9424669   Mel Gorman   mm: convert p[te|...
2011
  		entry = pmd_modify(entry, vma->vm_page_prot);
f714f4f20   Mel Gorman   mm: numa: call MM...
2012
  		set_pmd_at(mm, mmun_start, pmd, entry);
2b4847e73   Mel Gorman   mm: numa: seriali...
2013
2014
2015
  		update_mmu_cache_pmd(vma, address, &entry);
  	}
  	spin_unlock(ptl);
a54a407fb   Mel Gorman   mm: Close races b...
2016

eb4489f69   Mel Gorman   mm: numa: avoid u...
2017
  out_unlock:
340ef3902   Hugh Dickins   mm: numa: cleanup...
2018
  	unlock_page(page);
b32967ff1   Mel Gorman   mm: numa: Add THP...
2019
  	put_page(page);
b32967ff1   Mel Gorman   mm: numa: Add THP...
2020
2021
  	return 0;
  }
7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
2022
2023
2024
  #endif /* CONFIG_NUMA_BALANCING */
  
  #endif /* CONFIG_NUMA */
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2025

6b368cd4a   Jérôme Glisse   mm/hmm: avoid blo...
2026
  #if defined(CONFIG_MIGRATE_VMA_HELPER)
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
  struct migrate_vma {
  	struct vm_area_struct	*vma;
  	unsigned long		*dst;
  	unsigned long		*src;
  	unsigned long		cpages;
  	unsigned long		npages;
  	unsigned long		start;
  	unsigned long		end;
  };
  
  static int migrate_vma_collect_hole(unsigned long start,
  				    unsigned long end,
  				    struct mm_walk *walk)
  {
  	struct migrate_vma *migrate = walk->private;
  	unsigned long addr;
  
  	for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
e20d103b6   Mark Hairgrove   mm/migrate: fix i...
2045
  		migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2046
  		migrate->dst[migrate->npages] = 0;
e20d103b6   Mark Hairgrove   mm/migrate: fix i...
2047
  		migrate->npages++;
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
  		migrate->cpages++;
  	}
  
  	return 0;
  }
  
  static int migrate_vma_collect_skip(unsigned long start,
  				    unsigned long end,
  				    struct mm_walk *walk)
  {
  	struct migrate_vma *migrate = walk->private;
  	unsigned long addr;
  
  	for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
  		migrate->dst[migrate->npages] = 0;
  		migrate->src[migrate->npages++] = 0;
  	}
  
  	return 0;
  }
  
  static int migrate_vma_collect_pmd(pmd_t *pmdp,
  				   unsigned long start,
  				   unsigned long end,
  				   struct mm_walk *walk)
  {
  	struct migrate_vma *migrate = walk->private;
  	struct vm_area_struct *vma = walk->vma;
  	struct mm_struct *mm = vma->vm_mm;
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2077
  	unsigned long addr = start, unmapped = 0;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
  	spinlock_t *ptl;
  	pte_t *ptep;
  
  again:
  	if (pmd_none(*pmdp))
  		return migrate_vma_collect_hole(start, end, walk);
  
  	if (pmd_trans_huge(*pmdp)) {
  		struct page *page;
  
  		ptl = pmd_lock(mm, pmdp);
  		if (unlikely(!pmd_trans_huge(*pmdp))) {
  			spin_unlock(ptl);
  			goto again;
  		}
  
  		page = pmd_page(*pmdp);
  		if (is_huge_zero_page(page)) {
  			spin_unlock(ptl);
  			split_huge_pmd(vma, pmdp, addr);
  			if (pmd_trans_unstable(pmdp))
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2099
  				return migrate_vma_collect_skip(start, end,
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2100
2101
2102
2103
2104
2105
2106
  								walk);
  		} else {
  			int ret;
  
  			get_page(page);
  			spin_unlock(ptl);
  			if (unlikely(!trylock_page(page)))
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2107
  				return migrate_vma_collect_skip(start, end,
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2108
2109
2110
2111
  								walk);
  			ret = split_huge_page(page);
  			unlock_page(page);
  			put_page(page);
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2112
2113
2114
2115
  			if (ret)
  				return migrate_vma_collect_skip(start, end,
  								walk);
  			if (pmd_none(*pmdp))
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2116
2117
2118
2119
2120
2121
  				return migrate_vma_collect_hole(start, end,
  								walk);
  		}
  	}
  
  	if (unlikely(pmd_bad(*pmdp)))
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2122
  		return migrate_vma_collect_skip(start, end, walk);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2123
2124
  
  	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2125
  	arch_enter_lazy_mmu_mode();
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2126
2127
2128
  	for (; addr < end; addr += PAGE_SIZE, ptep++) {
  		unsigned long mpfn, pfn;
  		struct page *page;
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2129
  		swp_entry_t entry;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2130
2131
2132
2133
  		pte_t pte;
  
  		pte = *ptep;
  		pfn = pte_pfn(pte);
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2134
  		if (pte_none(pte)) {
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2135
2136
2137
  			mpfn = MIGRATE_PFN_MIGRATE;
  			migrate->cpages++;
  			pfn = 0;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2138
2139
  			goto next;
  		}
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
  		if (!pte_present(pte)) {
  			mpfn = pfn = 0;
  
  			/*
  			 * Only care about unaddressable device page special
  			 * page table entry. Other special swap entries are not
  			 * migratable, and we ignore regular swapped page.
  			 */
  			entry = pte_to_swp_entry(pte);
  			if (!is_device_private_entry(entry))
  				goto next;
  
  			page = device_private_entry_to_page(entry);
  			mpfn = migrate_pfn(page_to_pfn(page))|
  				MIGRATE_PFN_DEVICE | MIGRATE_PFN_MIGRATE;
  			if (is_write_device_private_entry(entry))
  				mpfn |= MIGRATE_PFN_WRITE;
  		} else {
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2158
2159
2160
2161
2162
2163
  			if (is_zero_pfn(pfn)) {
  				mpfn = MIGRATE_PFN_MIGRATE;
  				migrate->cpages++;
  				pfn = 0;
  				goto next;
  			}
df6ad6983   Jérôme Glisse   mm/device-public-...
2164
  			page = _vm_normal_page(migrate->vma, addr, pte, true);
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2165
2166
2167
  			mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
  			mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
  		}
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2168
  		/* FIXME support THP */
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2169
2170
2171
2172
  		if (!page || !page->mapping || PageTransCompound(page)) {
  			mpfn = pfn = 0;
  			goto next;
  		}
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2173
  		pfn = page_to_pfn(page);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
  
  		/*
  		 * By getting a reference on the page we pin it and that blocks
  		 * any kind of migration. Side effect is that it "freezes" the
  		 * pte.
  		 *
  		 * We drop this reference after isolating the page from the lru
  		 * for non device page (device page are not on the lru and thus
  		 * can't be dropped from it).
  		 */
  		get_page(page);
  		migrate->cpages++;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2186

8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
  		/*
  		 * Optimize for the common case where page is only mapped once
  		 * in one process. If we can lock the page, then we can safely
  		 * set up a special migration page table entry now.
  		 */
  		if (trylock_page(page)) {
  			pte_t swp_pte;
  
  			mpfn |= MIGRATE_PFN_LOCKED;
  			ptep_get_and_clear(mm, addr, ptep);
  
  			/* Setup special migration page table entry */
07707125a   Ralph Campbell   mm/migrate: prope...
2199
2200
  			entry = make_migration_entry(page, mpfn &
  						     MIGRATE_PFN_WRITE);
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
  			swp_pte = swp_entry_to_pte(entry);
  			if (pte_soft_dirty(pte))
  				swp_pte = pte_swp_mksoft_dirty(swp_pte);
  			set_pte_at(mm, addr, ptep, swp_pte);
  
  			/*
  			 * This is like regular unmap: we remove the rmap and
  			 * drop page refcount. Page won't be freed, as we took
  			 * a reference just above.
  			 */
  			page_remove_rmap(page, false);
  			put_page(page);
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2213
2214
2215
  
  			if (pte_present(pte))
  				unmapped++;
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2216
  		}
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2217
  next:
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2218
  		migrate->dst[migrate->npages] = 0;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2219
2220
  		migrate->src[migrate->npages++] = mpfn;
  	}
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2221
  	arch_leave_lazy_mmu_mode();
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2222
  	pte_unmap_unlock(ptep - 1, ptl);
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2223
2224
2225
  	/* Only flush the TLB if we actually modified any entries */
  	if (unmapped)
  		flush_tlb_range(walk->vma, start, end);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
  	return 0;
  }
  
  /*
   * migrate_vma_collect() - collect pages over a range of virtual addresses
   * @migrate: migrate struct containing all migration information
   *
   * This will walk the CPU page table. For each virtual address backed by a
   * valid page, it updates the src array and takes a reference on the page, in
   * order to pin the page until we lock it and unmap it.
   */
  static void migrate_vma_collect(struct migrate_vma *migrate)
  {
  	struct mm_walk mm_walk;
  
  	mm_walk.pmd_entry = migrate_vma_collect_pmd;
  	mm_walk.pte_entry = NULL;
  	mm_walk.pte_hole = migrate_vma_collect_hole;
  	mm_walk.hugetlb_entry = NULL;
  	mm_walk.test_walk = NULL;
  	mm_walk.vma = migrate->vma;
  	mm_walk.mm = migrate->vma->vm_mm;
  	mm_walk.private = migrate;
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2249
2250
2251
  	mmu_notifier_invalidate_range_start(mm_walk.mm,
  					    migrate->start,
  					    migrate->end);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2252
  	walk_page_range(migrate->start, migrate->end, &mm_walk);
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2253
2254
2255
  	mmu_notifier_invalidate_range_end(mm_walk.mm,
  					  migrate->start,
  					  migrate->end);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
  
  	migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
  }
  
  /*
   * migrate_vma_check_page() - check if page is pinned or not
   * @page: struct page to check
   *
   * Pinned pages cannot be migrated. This is the same test as in
   * migrate_page_move_mapping(), except that here we allow migration of a
   * ZONE_DEVICE page.
   */
  static bool migrate_vma_check_page(struct page *page)
  {
  	/*
  	 * One extra ref because caller holds an extra reference, either from
  	 * isolate_lru_page() for a regular page, or migrate_vma_collect() for
  	 * a device page.
  	 */
  	int extra = 1;
  
  	/*
  	 * FIXME support THP (transparent huge page), it is bit more complex to
  	 * check them than regular pages, because they can be mapped with a pmd
  	 * or with a pte (split pte mapping).
  	 */
  	if (PageCompound(page))
  		return false;
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
  	/* Page from ZONE_DEVICE have one extra reference */
  	if (is_zone_device_page(page)) {
  		/*
  		 * Private page can never be pin as they have no valid pte and
  		 * GUP will fail for those. Yet if there is a pending migration
  		 * a thread might try to wait on the pte migration entry and
  		 * will bump the page reference count. Sadly there is no way to
  		 * differentiate a regular pin from migration wait. Hence to
  		 * avoid 2 racing thread trying to migrate back to CPU to enter
  		 * infinite loop (one stoping migration because the other is
  		 * waiting on pte migration entry). We always return true here.
  		 *
  		 * FIXME proper solution is to rework migration_entry_wait() so
  		 * it does not need to take a reference on page.
  		 */
  		if (is_device_private_page(page))
  			return true;
df6ad6983   Jérôme Glisse   mm/device-public-...
2301
2302
2303
2304
2305
2306
2307
  		/*
  		 * Only allow device public page to be migrated and account for
  		 * the extra reference count imply by ZONE_DEVICE pages.
  		 */
  		if (!is_device_public_page(page))
  			return false;
  		extra++;
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2308
  	}
df6ad6983   Jérôme Glisse   mm/device-public-...
2309
2310
2311
  	/* For file back page */
  	if (page_mapping(page))
  		extra += 1 + page_has_private(page);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
  	if ((page_count(page) - extra) > page_mapcount(page))
  		return false;
  
  	return true;
  }
  
  /*
   * migrate_vma_prepare() - lock pages and isolate them from the lru
   * @migrate: migrate struct containing all migration information
   *
   * This locks pages that have been collected by migrate_vma_collect(). Once each
   * page is locked it is isolated from the lru (for non-device pages). Finally,
   * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be
   * migrated by concurrent kernel threads.
   */
  static void migrate_vma_prepare(struct migrate_vma *migrate)
  {
  	const unsigned long npages = migrate->npages;
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2330
2331
  	const unsigned long start = migrate->start;
  	unsigned long addr, i, restore = 0;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2332
  	bool allow_drain = true;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2333
2334
2335
2336
2337
  
  	lru_add_drain();
  
  	for (i = 0; (i < npages) && migrate->cpages; i++) {
  		struct page *page = migrate_pfn_to_page(migrate->src[i]);
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2338
  		bool remap = true;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2339
2340
2341
  
  		if (!page)
  			continue;
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
  		if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
  			/*
  			 * Because we are migrating several pages there can be
  			 * a deadlock between 2 concurrent migration where each
  			 * are waiting on each other page lock.
  			 *
  			 * Make migrate_vma() a best effort thing and backoff
  			 * for any page we can not lock right away.
  			 */
  			if (!trylock_page(page)) {
  				migrate->src[i] = 0;
  				migrate->cpages--;
  				put_page(page);
  				continue;
  			}
  			remap = false;
  			migrate->src[i] |= MIGRATE_PFN_LOCKED;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2359
  		}
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2360

a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2361
2362
2363
2364
2365
2366
2367
  		/* ZONE_DEVICE pages are not on LRU */
  		if (!is_zone_device_page(page)) {
  			if (!PageLRU(page) && allow_drain) {
  				/* Drain CPU's pagevec */
  				lru_add_drain_all();
  				allow_drain = false;
  			}
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2368

a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
  			if (isolate_lru_page(page)) {
  				if (remap) {
  					migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
  					migrate->cpages--;
  					restore++;
  				} else {
  					migrate->src[i] = 0;
  					unlock_page(page);
  					migrate->cpages--;
  					put_page(page);
  				}
  				continue;
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2381
  			}
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2382
2383
2384
  
  			/* Drop the reference we took in collect */
  			put_page(page);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2385
2386
2387
  		}
  
  		if (!migrate_vma_check_page(page)) {
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2388
2389
2390
2391
  			if (remap) {
  				migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
  				migrate->cpages--;
  				restore++;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2392

a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2393
2394
2395
2396
  				if (!is_zone_device_page(page)) {
  					get_page(page);
  					putback_lru_page(page);
  				}
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2397
2398
2399
2400
  			} else {
  				migrate->src[i] = 0;
  				unlock_page(page);
  				migrate->cpages--;
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2401
2402
2403
2404
  				if (!is_zone_device_page(page))
  					putback_lru_page(page);
  				else
  					put_page(page);
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2405
  			}
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2406
2407
  		}
  	}
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
  
  	for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
  		struct page *page = migrate_pfn_to_page(migrate->src[i]);
  
  		if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
  			continue;
  
  		remove_migration_pte(page, migrate->vma, addr, page);
  
  		migrate->src[i] = 0;
  		unlock_page(page);
  		put_page(page);
  		restore--;
  	}
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
  }
  
  /*
   * migrate_vma_unmap() - replace page mapping with special migration pte entry
   * @migrate: migrate struct containing all migration information
   *
   * Replace page mapping (CPU page table pte) with a special migration pte entry
   * and check again if it has been pinned. Pinned pages are restored because we
   * cannot migrate them.
   *
   * This is the last step before we call the device driver callback to allocate
   * destination memory and copy contents of original page over to new page.
   */
  static void migrate_vma_unmap(struct migrate_vma *migrate)
  {
  	int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
  	const unsigned long npages = migrate->npages;
  	const unsigned long start = migrate->start;
  	unsigned long addr, i, restore = 0;
  
  	for (i = 0; i < npages; i++) {
  		struct page *page = migrate_pfn_to_page(migrate->src[i]);
  
  		if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
  			continue;
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2447
2448
2449
2450
  		if (page_mapped(page)) {
  			try_to_unmap(page, flags);
  			if (page_mapped(page))
  				goto restore;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2451
  		}
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2452
2453
2454
2455
2456
2457
2458
2459
  
  		if (migrate_vma_check_page(page))
  			continue;
  
  restore:
  		migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
  		migrate->cpages--;
  		restore++;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
  	}
  
  	for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
  		struct page *page = migrate_pfn_to_page(migrate->src[i]);
  
  		if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
  			continue;
  
  		remove_migration_ptes(page, page, false);
  
  		migrate->src[i] = 0;
  		unlock_page(page);
  		restore--;
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2473
2474
2475
2476
  		if (is_zone_device_page(page))
  			put_page(page);
  		else
  			putback_lru_page(page);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2477
2478
  	}
  }
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
  static void migrate_vma_insert_page(struct migrate_vma *migrate,
  				    unsigned long addr,
  				    struct page *page,
  				    unsigned long *src,
  				    unsigned long *dst)
  {
  	struct vm_area_struct *vma = migrate->vma;
  	struct mm_struct *mm = vma->vm_mm;
  	struct mem_cgroup *memcg;
  	bool flush = false;
  	spinlock_t *ptl;
  	pte_t entry;
  	pgd_t *pgdp;
  	p4d_t *p4dp;
  	pud_t *pudp;
  	pmd_t *pmdp;
  	pte_t *ptep;
  
  	/* Only allow populating anonymous memory */
  	if (!vma_is_anonymous(vma))
  		goto abort;
  
  	pgdp = pgd_offset(mm, addr);
  	p4dp = p4d_alloc(mm, pgdp, addr);
  	if (!p4dp)
  		goto abort;
  	pudp = pud_alloc(mm, p4dp, addr);
  	if (!pudp)
  		goto abort;
  	pmdp = pmd_alloc(mm, pudp, addr);
  	if (!pmdp)
  		goto abort;
  
  	if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
  		goto abort;
  
  	/*
  	 * Use pte_alloc() instead of pte_alloc_map().  We can't run
  	 * pte_offset_map() on pmds where a huge pmd might be created
  	 * from a different thread.
  	 *
  	 * pte_alloc_map() is safe to use under down_write(mmap_sem) or when
  	 * parallel threads are excluded by other means.
  	 *
  	 * Here we only have down_read(mmap_sem).
  	 */
  	if (pte_alloc(mm, pmdp, addr))
  		goto abort;
  
  	/* See the comment in pte_alloc_one_map() */
  	if (unlikely(pmd_trans_unstable(pmdp)))
  		goto abort;
  
  	if (unlikely(anon_vma_prepare(vma)))
  		goto abort;
  	if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
  		goto abort;
  
  	/*
  	 * The memory barrier inside __SetPageUptodate makes sure that
  	 * preceding stores to the page contents become visible before
  	 * the set_pte_at() write.
  	 */
  	__SetPageUptodate(page);
df6ad6983   Jérôme Glisse   mm/device-public-...
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
  	if (is_zone_device_page(page)) {
  		if (is_device_private_page(page)) {
  			swp_entry_t swp_entry;
  
  			swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
  			entry = swp_entry_to_pte(swp_entry);
  		} else if (is_device_public_page(page)) {
  			entry = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
  			if (vma->vm_flags & VM_WRITE)
  				entry = pte_mkwrite(pte_mkdirty(entry));
  			entry = pte_mkdevmap(entry);
  		}
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
  	} else {
  		entry = mk_pte(page, vma->vm_page_prot);
  		if (vma->vm_flags & VM_WRITE)
  			entry = pte_mkwrite(pte_mkdirty(entry));
  	}
  
  	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
  
  	if (pte_present(*ptep)) {
  		unsigned long pfn = pte_pfn(*ptep);
  
  		if (!is_zero_pfn(pfn)) {
  			pte_unmap_unlock(ptep, ptl);
  			mem_cgroup_cancel_charge(page, memcg, false);
  			goto abort;
  		}
  		flush = true;
  	} else if (!pte_none(*ptep)) {
  		pte_unmap_unlock(ptep, ptl);
  		mem_cgroup_cancel_charge(page, memcg, false);
  		goto abort;
  	}
  
  	/*
  	 * Check for usefaultfd but do not deliver the fault. Instead,
  	 * just back off.
  	 */
  	if (userfaultfd_missing(vma)) {
  		pte_unmap_unlock(ptep, ptl);
  		mem_cgroup_cancel_charge(page, memcg, false);
  		goto abort;
  	}
  
  	inc_mm_counter(mm, MM_ANONPAGES);
  	page_add_new_anon_rmap(page, vma, addr, false);
  	mem_cgroup_commit_charge(page, memcg, false, false);
  	if (!is_zone_device_page(page))
  		lru_cache_add_active_or_unevictable(page, vma);
  	get_page(page);
  
  	if (flush) {
  		flush_cache_page(vma, addr, pte_pfn(*ptep));
  		ptep_clear_flush_notify(vma, addr, ptep);
  		set_pte_at_notify(mm, addr, ptep, entry);
  		update_mmu_cache(vma, addr, ptep);
  	} else {
  		/* No need to invalidate - it was non-present before */
  		set_pte_at(mm, addr, ptep, entry);
  		update_mmu_cache(vma, addr, ptep);
  	}
  
  	pte_unmap_unlock(ptep, ptl);
  	*src = MIGRATE_PFN_MIGRATE;
  	return;
  
  abort:
  	*src &= ~MIGRATE_PFN_MIGRATE;
  }
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
  /*
   * migrate_vma_pages() - migrate meta-data from src page to dst page
   * @migrate: migrate struct containing all migration information
   *
   * This migrates struct page meta-data from source struct page to destination
   * struct page. This effectively finishes the migration from source page to the
   * destination page.
   */
  static void migrate_vma_pages(struct migrate_vma *migrate)
  {
  	const unsigned long npages = migrate->npages;
  	const unsigned long start = migrate->start;
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2625
2626
2627
2628
  	struct vm_area_struct *vma = migrate->vma;
  	struct mm_struct *mm = vma->vm_mm;
  	unsigned long addr, i, mmu_start;
  	bool notified = false;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2629
2630
2631
2632
2633
2634
  
  	for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
  		struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
  		struct page *page = migrate_pfn_to_page(migrate->src[i]);
  		struct address_space *mapping;
  		int r;
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2635
2636
  		if (!newpage) {
  			migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2637
  			continue;
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
  		}
  
  		if (!page) {
  			if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) {
  				continue;
  			}
  			if (!notified) {
  				mmu_start = addr;
  				notified = true;
  				mmu_notifier_invalidate_range_start(mm,
  								mmu_start,
  								migrate->end);
  			}
  			migrate_vma_insert_page(migrate, addr, newpage,
  						&migrate->src[i],
  						&migrate->dst[i]);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2654
  			continue;
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2655
  		}
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2656
2657
  
  		mapping = page_mapping(page);
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
  		if (is_zone_device_page(newpage)) {
  			if (is_device_private_page(newpage)) {
  				/*
  				 * For now only support private anonymous when
  				 * migrating to un-addressable device memory.
  				 */
  				if (mapping) {
  					migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
  					continue;
  				}
df6ad6983   Jérôme Glisse   mm/device-public-...
2668
  			} else if (!is_device_public_page(newpage)) {
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2669
2670
2671
2672
2673
2674
2675
2676
  				/*
  				 * Other types of ZONE_DEVICE page are not
  				 * supported.
  				 */
  				migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
  				continue;
  			}
  		}
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2677
2678
2679
2680
  		r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
  		if (r != MIGRATEPAGE_SUCCESS)
  			migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
  	}
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2681

4645b9fe8   Jérôme Glisse   mm/mmu_notifier: ...
2682
2683
2684
2685
2686
  	/*
  	 * No need to double call mmu_notifier->invalidate_range() callback as
  	 * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
  	 * did already call it.
  	 */
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2687
  	if (notified)
4645b9fe8   Jérôme Glisse   mm/mmu_notifier: ...
2688
2689
  		mmu_notifier_invalidate_range_only_end(mm, mmu_start,
  						       migrate->end);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
  }
  
  /*
   * migrate_vma_finalize() - restore CPU page table entry
   * @migrate: migrate struct containing all migration information
   *
   * This replaces the special migration pte entry with either a mapping to the
   * new page if migration was successful for that page, or to the original page
   * otherwise.
   *
   * This also unlocks the pages and puts them back on the lru, or drops the extra
   * refcount, for device pages.
   */
  static void migrate_vma_finalize(struct migrate_vma *migrate)
  {
  	const unsigned long npages = migrate->npages;
  	unsigned long i;
  
  	for (i = 0; i < npages; i++) {
  		struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
  		struct page *page = migrate_pfn_to_page(migrate->src[i]);
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2711
2712
2713
2714
2715
  		if (!page) {
  			if (newpage) {
  				unlock_page(newpage);
  				put_page(newpage);
  			}
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2716
  			continue;
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2717
  		}
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
  		if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
  			if (newpage) {
  				unlock_page(newpage);
  				put_page(newpage);
  			}
  			newpage = page;
  		}
  
  		remove_migration_ptes(page, newpage, false);
  		unlock_page(page);
  		migrate->cpages--;
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2729
2730
2731
2732
  		if (is_zone_device_page(page))
  			put_page(page);
  		else
  			putback_lru_page(page);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2733
2734
2735
  
  		if (newpage != page) {
  			unlock_page(newpage);
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2736
2737
2738
2739
  			if (is_zone_device_page(newpage))
  				put_page(newpage);
  			else
  				putback_lru_page(newpage);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
  		}
  	}
  }
  
  /*
   * migrate_vma() - migrate a range of memory inside vma
   *
   * @ops: migration callback for allocating destination memory and copying
   * @vma: virtual memory area containing the range to be migrated
   * @start: start address of the range to migrate (inclusive)
   * @end: end address of the range to migrate (exclusive)
   * @src: array of hmm_pfn_t containing source pfns
   * @dst: array of hmm_pfn_t containing destination pfns
   * @private: pointer passed back to each of the callback
   * Returns: 0 on success, error code otherwise
   *
   * This function tries to migrate a range of memory virtual address range, using
   * callbacks to allocate and copy memory from source to destination. First it
   * collects all the pages backing each virtual address in the range, saving this
   * inside the src array. Then it locks those pages and unmaps them. Once the pages
   * are locked and unmapped, it checks whether each page is pinned or not. Pages
   * that aren't pinned have the MIGRATE_PFN_MIGRATE flag set (by this function)
   * in the corresponding src array entry. It then restores any pages that are
   * pinned, by remapping and unlocking those pages.
   *
   * At this point it calls the alloc_and_copy() callback. For documentation on
   * what is expected from that callback, see struct migrate_vma_ops comments in
   * include/linux/migrate.h
   *
   * After the alloc_and_copy() callback, this function goes over each entry in
   * the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
   * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
   * then the function tries to migrate struct page information from the source
   * struct page to the destination struct page. If it fails to migrate the struct
   * page information, then it clears the MIGRATE_PFN_MIGRATE flag in the src
   * array.
   *
   * At this point all successfully migrated pages have an entry in the src
   * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
   * array entry with MIGRATE_PFN_VALID flag set.
   *
   * It then calls the finalize_and_map() callback. See comments for "struct
   * migrate_vma_ops", in include/linux/migrate.h for details about
   * finalize_and_map() behavior.
   *
   * After the finalize_and_map() callback, for successfully migrated pages, this
   * function updates the CPU page table to point to new pages, otherwise it
   * restores the CPU page table to point to the original source pages.
   *
   * Function returns 0 after the above steps, even if no pages were migrated
   * (The function only returns an error if any of the arguments are invalid.)
   *
   * Both src and dst array must be big enough for (end - start) >> PAGE_SHIFT
   * unsigned long entries.
   */
  int migrate_vma(const struct migrate_vma_ops *ops,
  		struct vm_area_struct *vma,
  		unsigned long start,
  		unsigned long end,
  		unsigned long *src,
  		unsigned long *dst,
  		void *private)
  {
  	struct migrate_vma migrate;
  
  	/* Sanity check the arguments */
  	start &= PAGE_MASK;
  	end &= PAGE_MASK;
e1fb4a086   Dave Jiang   dax: remove VM_MI...
2808
2809
  	if (!vma || is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) ||
  			vma_is_dax(vma))
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
  		return -EINVAL;
  	if (start < vma->vm_start || start >= vma->vm_end)
  		return -EINVAL;
  	if (end <= vma->vm_start || end > vma->vm_end)
  		return -EINVAL;
  	if (!ops || !src || !dst || start >= end)
  		return -EINVAL;
  
  	memset(src, 0, sizeof(*src) * ((end - start) >> PAGE_SHIFT));
  	migrate.src = src;
  	migrate.dst = dst;
  	migrate.start = start;
  	migrate.npages = 0;
  	migrate.cpages = 0;
  	migrate.end = end;
  	migrate.vma = vma;
  
  	/* Collect, and try to unmap source pages */
  	migrate_vma_collect(&migrate);
  	if (!migrate.cpages)
  		return 0;
  
  	/* Lock and isolate page */
  	migrate_vma_prepare(&migrate);
  	if (!migrate.cpages)
  		return 0;
  
  	/* Unmap pages */
  	migrate_vma_unmap(&migrate);
  	if (!migrate.cpages)
  		return 0;
  
  	/*
  	 * At this point pages are locked and unmapped, and thus they have
  	 * stable content and can safely be copied to destination memory that
  	 * is allocated by the callback.
  	 *
  	 * Note that migration can fail in migrate_vma_struct_page() for each
  	 * individual page.
  	 */
  	ops->alloc_and_copy(vma, src, dst, start, end, private);
  
  	/* This does the real migration of struct page */
  	migrate_vma_pages(&migrate);
  
  	ops->finalize_and_map(vma, src, dst, start, end, private);
  
  	/* Unlock and remap pages */
  	migrate_vma_finalize(&migrate);
  
  	return 0;
  }
  EXPORT_SYMBOL(migrate_vma);
6b368cd4a   Jérôme Glisse   mm/hmm: avoid blo...
2863
  #endif /* defined(MIGRATE_VMA_HELPER) */