Blame view

mm/migrate.c 76.9 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
b20a35035   Christoph Lameter   [PATCH] page migr...
2
  /*
14e0f9bcc   Hugh Dickins   mm: correct a cou...
3
   * Memory Migration functionality - linux/mm/migrate.c
b20a35035   Christoph Lameter   [PATCH] page migr...
4
5
6
7
8
9
10
11
12
   *
   * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
   *
   * Page migration was first developed in the context of the memory hotplug
   * project. The main authors of the migration code are:
   *
   * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
   * Hirokazu Takahashi <taka@valinux.co.jp>
   * Dave Hansen <haveblue@us.ibm.com>
cde535359   Christoph Lameter   Christoph has moved
13
   * Christoph Lameter
b20a35035   Christoph Lameter   [PATCH] page migr...
14
15
16
   */
  
  #include <linux/migrate.h>
b95f1b31b   Paul Gortmaker   mm: Map most file...
17
  #include <linux/export.h>
b20a35035   Christoph Lameter   [PATCH] page migr...
18
  #include <linux/swap.h>
0697212a4   Christoph Lameter   [PATCH] Swapless ...
19
  #include <linux/swapops.h>
b20a35035   Christoph Lameter   [PATCH] page migr...
20
  #include <linux/pagemap.h>
e23ca00bf   Christoph Lameter   [PATCH] Some page...
21
  #include <linux/buffer_head.h>
b20a35035   Christoph Lameter   [PATCH] page migr...
22
  #include <linux/mm_inline.h>
b488893a3   Pavel Emelyanov   pid namespaces: c...
23
  #include <linux/nsproxy.h>
b20a35035   Christoph Lameter   [PATCH] page migr...
24
  #include <linux/pagevec.h>
e9995ef97   Hugh Dickins   ksm: rmap_walk to...
25
  #include <linux/ksm.h>
b20a35035   Christoph Lameter   [PATCH] page migr...
26
27
28
29
  #include <linux/rmap.h>
  #include <linux/topology.h>
  #include <linux/cpu.h>
  #include <linux/cpuset.h>
04e62a29b   Christoph Lameter   [PATCH] More page...
30
  #include <linux/writeback.h>
742755a1d   Christoph Lameter   [PATCH] page migr...
31
32
  #include <linux/mempolicy.h>
  #include <linux/vmalloc.h>
86c3a7645   David Quigley   [PATCH] SELinux: ...
33
  #include <linux/security.h>
42cb14b11   Hugh Dickins   mm: migrate dirty...
34
  #include <linux/backing-dev.h>
bda807d44   Minchan Kim   mm: migrate: supp...
35
  #include <linux/compaction.h>
4f5ca2657   Adrian Bunk   mm/migrate.c shou...
36
  #include <linux/syscalls.h>
7addf4438   Dominik Brodowski   mm: add kernel_mo...
37
  #include <linux/compat.h>
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
38
  #include <linux/hugetlb.h>
8e6ac7fab   Aneesh Kumar K.V   hugetlb/cgroup: m...
39
  #include <linux/hugetlb_cgroup.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
40
  #include <linux/gfp.h>
df6ad6983   Jérôme Glisse   mm/device-public-...
41
  #include <linux/pfn_t.h>
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
42
  #include <linux/memremap.h>
8315ada7f   Jérôme Glisse   mm/migrate: allow...
43
  #include <linux/userfaultfd_k.h>
bf6bddf19   Rafael Aquini   mm: introduce com...
44
  #include <linux/balloon_compaction.h>
f714f4f20   Mel Gorman   mm: numa: call MM...
45
  #include <linux/mmu_notifier.h>
33c3fc71c   Vladimir Davydov   mm: introduce idl...
46
  #include <linux/page_idle.h>
d435edca9   Vlastimil Babka   mm, page_owner: c...
47
  #include <linux/page_owner.h>
6e84f3152   Ingo Molnar   sched/headers: Pr...
48
  #include <linux/sched/mm.h>
197e7e521   Linus Torvalds   Sanitize 'move_pa...
49
  #include <linux/ptrace.h>
b20a35035   Christoph Lameter   [PATCH] page migr...
50

0d1836c36   Michal Nazarewicz   mm/migrate.c: fix...
51
  #include <asm/tlbflush.h>
7b2a2d4a1   Mel Gorman   mm: migrate: Add ...
52
53
  #define CREATE_TRACE_POINTS
  #include <trace/events/migrate.h>
b20a35035   Christoph Lameter   [PATCH] page migr...
54
  #include "internal.h"
b20a35035   Christoph Lameter   [PATCH] page migr...
55
  /*
742755a1d   Christoph Lameter   [PATCH] page migr...
56
   * migrate_prep() needs to be called before we start compiling a list of pages
748446bb6   Mel Gorman   mm: compaction: m...
57
58
   * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
   * undesirable, use migrate_prep_local()
b20a35035   Christoph Lameter   [PATCH] page migr...
59
60
61
   */
  int migrate_prep(void)
  {
b20a35035   Christoph Lameter   [PATCH] page migr...
62
63
64
65
66
67
68
69
70
71
  	/*
  	 * Clear the LRU lists so pages can be isolated.
  	 * Note that pages may be moved off the LRU after we have
  	 * drained them. Those pages will fail to migrate like other
  	 * pages that may be busy.
  	 */
  	lru_add_drain_all();
  
  	return 0;
  }
748446bb6   Mel Gorman   mm: compaction: m...
72
73
74
75
76
77
78
  /* Do the necessary work of migrate_prep but not if it involves other CPUs */
  int migrate_prep_local(void)
  {
  	lru_add_drain();
  
  	return 0;
  }
9e5bcd610   Yisheng Xie   mm/migration: mak...
79
  int isolate_movable_page(struct page *page, isolate_mode_t mode)
bda807d44   Minchan Kim   mm: migrate: supp...
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
  {
  	struct address_space *mapping;
  
  	/*
  	 * Avoid burning cycles with pages that are yet under __free_pages(),
  	 * or just got freed under us.
  	 *
  	 * In case we 'win' a race for a movable page being freed under us and
  	 * raise its refcount preventing __free_pages() from doing its job
  	 * the put_page() at the end of this block will take care of
  	 * release this page, thus avoiding a nasty leakage.
  	 */
  	if (unlikely(!get_page_unless_zero(page)))
  		goto out;
  
  	/*
  	 * Check PageMovable before holding a PG_lock because page's owner
  	 * assumes anybody doesn't touch PG_lock of newly allocated page
8bb4e7a2e   Wei Yang   mm: fix some typo...
98
  	 * so unconditionally grabbing the lock ruins page's owner side.
bda807d44   Minchan Kim   mm: migrate: supp...
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
  	 */
  	if (unlikely(!__PageMovable(page)))
  		goto out_putpage;
  	/*
  	 * As movable pages are not isolated from LRU lists, concurrent
  	 * compaction threads can race against page migration functions
  	 * as well as race against the releasing a page.
  	 *
  	 * In order to avoid having an already isolated movable page
  	 * being (wrongly) re-isolated while it is under migration,
  	 * or to avoid attempting to isolate pages being released,
  	 * lets be sure we have the page lock
  	 * before proceeding with the movable page isolation steps.
  	 */
  	if (unlikely(!trylock_page(page)))
  		goto out_putpage;
  
  	if (!PageMovable(page) || PageIsolated(page))
  		goto out_no_isolated;
  
  	mapping = page_mapping(page);
  	VM_BUG_ON_PAGE(!mapping, page);
  
  	if (!mapping->a_ops->isolate_page(page, mode))
  		goto out_no_isolated;
  
  	/* Driver shouldn't use PG_isolated bit of page->flags */
  	WARN_ON_ONCE(PageIsolated(page));
  	__SetPageIsolated(page);
  	unlock_page(page);
9e5bcd610   Yisheng Xie   mm/migration: mak...
129
  	return 0;
bda807d44   Minchan Kim   mm: migrate: supp...
130
131
132
133
134
135
  
  out_no_isolated:
  	unlock_page(page);
  out_putpage:
  	put_page(page);
  out:
9e5bcd610   Yisheng Xie   mm/migration: mak...
136
  	return -EBUSY;
bda807d44   Minchan Kim   mm: migrate: supp...
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
  }
  
  /* It should be called on page which is PG_movable */
  void putback_movable_page(struct page *page)
  {
  	struct address_space *mapping;
  
  	VM_BUG_ON_PAGE(!PageLocked(page), page);
  	VM_BUG_ON_PAGE(!PageMovable(page), page);
  	VM_BUG_ON_PAGE(!PageIsolated(page), page);
  
  	mapping = page_mapping(page);
  	mapping->a_ops->putback_page(page);
  	__ClearPageIsolated(page);
  }
b20a35035   Christoph Lameter   [PATCH] page migr...
152
  /*
5733c7d11   Rafael Aquini   mm: introduce put...
153
154
155
   * Put previously isolated pages back onto the appropriate lists
   * from where they were once taken off for compaction/migration.
   *
59c82b70d   Joonsoo Kim   mm/migrate: remov...
156
157
158
   * This function shall be used whenever the isolated pageset has been
   * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
   * and isolate_huge_page().
5733c7d11   Rafael Aquini   mm: introduce put...
159
160
161
162
163
164
165
   */
  void putback_movable_pages(struct list_head *l)
  {
  	struct page *page;
  	struct page *page2;
  
  	list_for_each_entry_safe(page, page2, l, lru) {
31caf665e   Naoya Horiguchi   mm: migrate: make...
166
167
168
169
  		if (unlikely(PageHuge(page))) {
  			putback_active_hugepage(page);
  			continue;
  		}
5733c7d11   Rafael Aquini   mm: introduce put...
170
  		list_del(&page->lru);
bda807d44   Minchan Kim   mm: migrate: supp...
171
172
173
174
175
  		/*
  		 * We isolated non-lru movable page so here we can use
  		 * __PageMovable because LRU page's mapping cannot have
  		 * PAGE_MAPPING_MOVABLE.
  		 */
b1123ea6d   Minchan Kim   mm: balloon: use ...
176
  		if (unlikely(__PageMovable(page))) {
bda807d44   Minchan Kim   mm: migrate: supp...
177
178
179
180
181
182
183
184
185
  			VM_BUG_ON_PAGE(!PageIsolated(page), page);
  			lock_page(page);
  			if (PageMovable(page))
  				putback_movable_page(page);
  			else
  				__ClearPageIsolated(page);
  			unlock_page(page);
  			put_page(page);
  		} else {
e8db67eb0   Naoya Horiguchi   mm: migrate: move...
186
187
  			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
  					page_is_file_cache(page), -hpage_nr_pages(page));
fc280fe87   Rabin Vincent   mm: prevent NR_IS...
188
  			putback_lru_page(page);
bda807d44   Minchan Kim   mm: migrate: supp...
189
  		}
b20a35035   Christoph Lameter   [PATCH] page migr...
190
  	}
b20a35035   Christoph Lameter   [PATCH] page migr...
191
  }
0697212a4   Christoph Lameter   [PATCH] Swapless ...
192
193
194
  /*
   * Restore a potential migration pte to a working pte entry
   */
e4b822227   Minchan Kim   mm: make rmap_one...
195
  static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
e9995ef97   Hugh Dickins   ksm: rmap_walk to...
196
  				 unsigned long addr, void *old)
0697212a4   Christoph Lameter   [PATCH] Swapless ...
197
  {
3fe87967c   Kirill A. Shutemov   mm: convert remov...
198
199
200
201
202
203
204
205
  	struct page_vma_mapped_walk pvmw = {
  		.page = old,
  		.vma = vma,
  		.address = addr,
  		.flags = PVMW_SYNC | PVMW_MIGRATION,
  	};
  	struct page *new;
  	pte_t pte;
0697212a4   Christoph Lameter   [PATCH] Swapless ...
206
  	swp_entry_t entry;
0697212a4   Christoph Lameter   [PATCH] Swapless ...
207

3fe87967c   Kirill A. Shutemov   mm: convert remov...
208
209
  	VM_BUG_ON_PAGE(PageTail(page), page);
  	while (page_vma_mapped_walk(&pvmw)) {
4b0ece6fa   Naoya Horiguchi   mm: migrate: fix ...
210
211
212
213
214
  		if (PageKsm(page))
  			new = page;
  		else
  			new = page - pvmw.page->index +
  				linear_page_index(vma, pvmw.address);
0697212a4   Christoph Lameter   [PATCH] Swapless ...
215

616b83715   Zi Yan   mm: thp: enable t...
216
217
218
219
220
221
222
223
  #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
  		/* PMD-mapped THP migration entry */
  		if (!pvmw.pte) {
  			VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
  			remove_migration_pmd(&pvmw, new);
  			continue;
  		}
  #endif
3fe87967c   Kirill A. Shutemov   mm: convert remov...
224
225
226
227
  		get_page(new);
  		pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
  		if (pte_swp_soft_dirty(*pvmw.pte))
  			pte = pte_mksoft_dirty(pte);
0697212a4   Christoph Lameter   [PATCH] Swapless ...
228

3fe87967c   Kirill A. Shutemov   mm: convert remov...
229
230
231
232
233
234
  		/*
  		 * Recheck VMA as permissions can change since migration started
  		 */
  		entry = pte_to_swp_entry(*pvmw.pte);
  		if (is_write_migration_entry(entry))
  			pte = maybe_mkwrite(pte, vma);
d3cb8bf60   Mel Gorman   mm: migrate: Clos...
235

df6ad6983   Jérôme Glisse   mm/device-public-...
236
237
238
239
240
241
  		if (unlikely(is_zone_device_page(new))) {
  			if (is_device_private_page(new)) {
  				entry = make_device_private_entry(new, pte_write(pte));
  				pte = swp_entry_to_pte(entry);
  			} else if (is_device_public_page(new)) {
  				pte = pte_mkdevmap(pte);
df6ad6983   Jérôme Glisse   mm/device-public-...
242
  			}
d2b2c6dd2   Lars Persson   mm/migrate.c: add...
243
  		}
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
244

3ef8fd7f7   Andi Kleen   Fix migration.c c...
245
  #ifdef CONFIG_HUGETLB_PAGE
3fe87967c   Kirill A. Shutemov   mm: convert remov...
246
247
248
  		if (PageHuge(new)) {
  			pte = pte_mkhuge(pte);
  			pte = arch_make_huge_pte(pte, vma, new, 0);
383321ab8   Aneesh Kumar K.V   mm/hugetlb/migrat...
249
  			set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
3fe87967c   Kirill A. Shutemov   mm: convert remov...
250
251
252
253
  			if (PageAnon(new))
  				hugepage_add_anon_rmap(new, vma, pvmw.address);
  			else
  				page_dup_rmap(new, true);
383321ab8   Aneesh Kumar K.V   mm/hugetlb/migrat...
254
255
256
257
  		} else
  #endif
  		{
  			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
04e62a29b   Christoph Lameter   [PATCH] More page...
258

383321ab8   Aneesh Kumar K.V   mm/hugetlb/migrat...
259
260
261
262
263
  			if (PageAnon(new))
  				page_add_anon_rmap(new, vma, pvmw.address, false);
  			else
  				page_add_file_rmap(new, false);
  		}
3fe87967c   Kirill A. Shutemov   mm: convert remov...
264
265
  		if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
  			mlock_vma_page(new);
e125fe405   Kirill A. Shutemov   mm, thp: fix mloc...
266
267
  		if (PageTransHuge(page) && PageMlocked(page))
  			clear_page_mlock(page);
3fe87967c   Kirill A. Shutemov   mm: convert remov...
268
269
270
  		/* No need to invalidate - it was non-present before */
  		update_mmu_cache(vma, pvmw.address, pvmw.pte);
  	}
51afb12ba   Hugh Dickins   mm: page migratio...
271

e4b822227   Minchan Kim   mm: make rmap_one...
272
  	return true;
0697212a4   Christoph Lameter   [PATCH] Swapless ...
273
274
275
  }
  
  /*
04e62a29b   Christoph Lameter   [PATCH] More page...
276
277
278
   * Get rid of all migration entries and replace them by
   * references to the indicated page.
   */
e388466de   Kirill A. Shutemov   mm: make remove_m...
279
  void remove_migration_ptes(struct page *old, struct page *new, bool locked)
04e62a29b   Christoph Lameter   [PATCH] More page...
280
  {
051ac83ad   Joonsoo Kim   mm/rmap: make rma...
281
282
283
284
  	struct rmap_walk_control rwc = {
  		.rmap_one = remove_migration_pte,
  		.arg = old,
  	};
e388466de   Kirill A. Shutemov   mm: make remove_m...
285
286
287
288
  	if (locked)
  		rmap_walk_locked(new, &rwc);
  	else
  		rmap_walk(new, &rwc);
04e62a29b   Christoph Lameter   [PATCH] More page...
289
290
291
  }
  
  /*
0697212a4   Christoph Lameter   [PATCH] Swapless ...
292
293
294
   * Something used the pte of a page under migration. We need to
   * get to the page and wait until migration is finished.
   * When we return from this function the fault will be retried.
0697212a4   Christoph Lameter   [PATCH] Swapless ...
295
   */
e66f17ff7   Naoya Horiguchi   mm/hugetlb: take ...
296
  void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
30dad3092   Naoya Horiguchi   mm: migration: ad...
297
  				spinlock_t *ptl)
0697212a4   Christoph Lameter   [PATCH] Swapless ...
298
  {
30dad3092   Naoya Horiguchi   mm: migration: ad...
299
  	pte_t pte;
0697212a4   Christoph Lameter   [PATCH] Swapless ...
300
301
  	swp_entry_t entry;
  	struct page *page;
30dad3092   Naoya Horiguchi   mm: migration: ad...
302
  	spin_lock(ptl);
0697212a4   Christoph Lameter   [PATCH] Swapless ...
303
304
305
306
307
308
309
310
311
  	pte = *ptep;
  	if (!is_swap_pte(pte))
  		goto out;
  
  	entry = pte_to_swp_entry(pte);
  	if (!is_migration_entry(entry))
  		goto out;
  
  	page = migration_entry_to_page(entry);
e286781d5   Nick Piggin   mm: speculative p...
312
  	/*
89eb946a7   Matthew Wilcox   mm: Convert page ...
313
  	 * Once page cache replacement of page migration started, page_count
9a1ea439b   Hugh Dickins   mm: put_and_wait_...
314
315
  	 * is zero; but we must not call put_and_wait_on_page_locked() without
  	 * a ref. Use get_page_unless_zero(), and just fault again if it fails.
e286781d5   Nick Piggin   mm: speculative p...
316
317
318
  	 */
  	if (!get_page_unless_zero(page))
  		goto out;
0697212a4   Christoph Lameter   [PATCH] Swapless ...
319
  	pte_unmap_unlock(ptep, ptl);
9a1ea439b   Hugh Dickins   mm: put_and_wait_...
320
  	put_and_wait_on_page_locked(page);
0697212a4   Christoph Lameter   [PATCH] Swapless ...
321
322
323
324
  	return;
  out:
  	pte_unmap_unlock(ptep, ptl);
  }
30dad3092   Naoya Horiguchi   mm: migration: ad...
325
326
327
328
329
330
331
  void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
  				unsigned long address)
  {
  	spinlock_t *ptl = pte_lockptr(mm, pmd);
  	pte_t *ptep = pte_offset_map(pmd, address);
  	__migration_entry_wait(mm, ptep, ptl);
  }
cb900f412   Kirill A. Shutemov   mm, hugetlb: conv...
332
333
  void migration_entry_wait_huge(struct vm_area_struct *vma,
  		struct mm_struct *mm, pte_t *pte)
30dad3092   Naoya Horiguchi   mm: migration: ad...
334
  {
cb900f412   Kirill A. Shutemov   mm, hugetlb: conv...
335
  	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
30dad3092   Naoya Horiguchi   mm: migration: ad...
336
337
  	__migration_entry_wait(mm, pte, ptl);
  }
616b83715   Zi Yan   mm: thp: enable t...
338
339
340
341
342
343
344
345
346
347
348
349
350
  #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
  void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
  {
  	spinlock_t *ptl;
  	struct page *page;
  
  	ptl = pmd_lock(mm, pmd);
  	if (!is_pmd_migration_entry(*pmd))
  		goto unlock;
  	page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
  	if (!get_page_unless_zero(page))
  		goto unlock;
  	spin_unlock(ptl);
9a1ea439b   Hugh Dickins   mm: put_and_wait_...
351
  	put_and_wait_on_page_locked(page);
616b83715   Zi Yan   mm: thp: enable t...
352
353
354
355
356
  	return;
  unlock:
  	spin_unlock(ptl);
  }
  #endif
f900482da   Jan Kara   mm/migrate.c: cle...
357
  static int expected_page_refs(struct address_space *mapping, struct page *page)
0b3901b38   Jan Kara   mm: migration: fa...
358
359
360
361
362
363
364
365
366
  {
  	int expected_count = 1;
  
  	/*
  	 * Device public or private pages have an extra refcount as they are
  	 * ZONE_DEVICE pages.
  	 */
  	expected_count += is_device_private_page(page);
  	expected_count += is_device_public_page(page);
f900482da   Jan Kara   mm/migrate.c: cle...
367
  	if (mapping)
0b3901b38   Jan Kara   mm: migration: fa...
368
369
370
371
  		expected_count += hpage_nr_pages(page) + page_has_private(page);
  
  	return expected_count;
  }
b20a35035   Christoph Lameter   [PATCH] page migr...
372
  /*
c3fcf8a5d   Christoph Lameter   [PATCH] page migr...
373
   * Replace the page in the mapping.
5b5c7120e   Christoph Lameter   [PATCH] page migr...
374
375
376
377
   *
   * The number of remaining references must be:
   * 1 for anonymous pages without a mapping
   * 2 for pages with a mapping
266cf658e   David Howells   FS-Cache: Recruit...
378
   * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
b20a35035   Christoph Lameter   [PATCH] page migr...
379
   */
36bc08cc0   Gu Zheng   fs/aio: Add suppo...
380
  int migrate_page_move_mapping(struct address_space *mapping,
ab41ee687   Jan Kara   mm: migrate: drop...
381
  		struct page *newpage, struct page *page, enum migrate_mode mode,
8e321fefb   Benjamin LaHaise   aio/migratepages:...
382
  		int extra_count)
b20a35035   Christoph Lameter   [PATCH] page migr...
383
  {
89eb946a7   Matthew Wilcox   mm: Convert page ...
384
  	XA_STATE(xas, &mapping->i_pages, page_index(page));
42cb14b11   Hugh Dickins   mm: migrate dirty...
385
386
  	struct zone *oldzone, *newzone;
  	int dirty;
f900482da   Jan Kara   mm/migrate.c: cle...
387
  	int expected_count = expected_page_refs(mapping, page) + extra_count;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
388

6c5240ae7   Christoph Lameter   [PATCH] Swapless ...
389
  	if (!mapping) {
0e8c7d0fd   Christoph Lameter   page migration: f...
390
  		/* Anonymous page without mapping */
8e321fefb   Benjamin LaHaise   aio/migratepages:...
391
  		if (page_count(page) != expected_count)
6c5240ae7   Christoph Lameter   [PATCH] Swapless ...
392
  			return -EAGAIN;
cf4b769ab   Hugh Dickins   mm: page migratio...
393
394
  
  		/* No turning back from here */
cf4b769ab   Hugh Dickins   mm: page migratio...
395
396
397
  		newpage->index = page->index;
  		newpage->mapping = page->mapping;
  		if (PageSwapBacked(page))
fa9949da5   Hugh Dickins   mm: use __SetPage...
398
  			__SetPageSwapBacked(newpage);
cf4b769ab   Hugh Dickins   mm: page migratio...
399

78bd52097   Rafael Aquini   mm: adjust addres...
400
  		return MIGRATEPAGE_SUCCESS;
6c5240ae7   Christoph Lameter   [PATCH] Swapless ...
401
  	}
42cb14b11   Hugh Dickins   mm: migrate dirty...
402
403
  	oldzone = page_zone(page);
  	newzone = page_zone(newpage);
89eb946a7   Matthew Wilcox   mm: Convert page ...
404
  	xas_lock_irq(&xas);
89eb946a7   Matthew Wilcox   mm: Convert page ...
405
406
  	if (page_count(page) != expected_count || xas_load(&xas) != page) {
  		xas_unlock_irq(&xas);
e23ca00bf   Christoph Lameter   [PATCH] Some page...
407
  		return -EAGAIN;
b20a35035   Christoph Lameter   [PATCH] page migr...
408
  	}
fe896d187   Joonsoo Kim   mm: introduce pag...
409
  	if (!page_ref_freeze(page, expected_count)) {
89eb946a7   Matthew Wilcox   mm: Convert page ...
410
  		xas_unlock_irq(&xas);
e286781d5   Nick Piggin   mm: speculative p...
411
412
  		return -EAGAIN;
  	}
b20a35035   Christoph Lameter   [PATCH] page migr...
413
  	/*
cf4b769ab   Hugh Dickins   mm: page migratio...
414
415
  	 * Now we know that no one else is looking at the page:
  	 * no turning back from here.
b20a35035   Christoph Lameter   [PATCH] page migr...
416
  	 */
cf4b769ab   Hugh Dickins   mm: page migratio...
417
418
  	newpage->index = page->index;
  	newpage->mapping = page->mapping;
e71769ae5   Naoya Horiguchi   mm: enable thp mi...
419
  	page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
6326fec11   Nicholas Piggin   mm: Use owner_pri...
420
421
422
423
424
425
426
427
  	if (PageSwapBacked(page)) {
  		__SetPageSwapBacked(newpage);
  		if (PageSwapCache(page)) {
  			SetPageSwapCache(newpage);
  			set_page_private(newpage, page_private(page));
  		}
  	} else {
  		VM_BUG_ON_PAGE(PageSwapCache(page), page);
b20a35035   Christoph Lameter   [PATCH] page migr...
428
  	}
42cb14b11   Hugh Dickins   mm: migrate dirty...
429
430
431
432
433
434
  	/* Move dirty while page refs frozen and newpage not yet exposed */
  	dirty = PageDirty(page);
  	if (dirty) {
  		ClearPageDirty(page);
  		SetPageDirty(newpage);
  	}
89eb946a7   Matthew Wilcox   mm: Convert page ...
435
  	xas_store(&xas, newpage);
e71769ae5   Naoya Horiguchi   mm: enable thp mi...
436
437
  	if (PageTransHuge(page)) {
  		int i;
e71769ae5   Naoya Horiguchi   mm: enable thp mi...
438

013567be1   Naoya Horiguchi   mm: migrate: fix ...
439
  		for (i = 1; i < HPAGE_PMD_NR; i++) {
89eb946a7   Matthew Wilcox   mm: Convert page ...
440
  			xas_next(&xas);
5fd4ca2d8   Matthew Wilcox   mm: page cache: s...
441
  			xas_store(&xas, newpage);
e71769ae5   Naoya Horiguchi   mm: enable thp mi...
442
  		}
e71769ae5   Naoya Horiguchi   mm: enable thp mi...
443
  	}
7cf9c2c76   Nick Piggin   [PATCH] radix-tre...
444
445
  
  	/*
937a94c9d   Jacobo Giralt   mm: migrate: one ...
446
447
  	 * Drop cache reference from old page by unfreezing
  	 * to one less reference.
7cf9c2c76   Nick Piggin   [PATCH] radix-tre...
448
449
  	 * We know this isn't the last reference.
  	 */
e71769ae5   Naoya Horiguchi   mm: enable thp mi...
450
  	page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
7cf9c2c76   Nick Piggin   [PATCH] radix-tre...
451

89eb946a7   Matthew Wilcox   mm: Convert page ...
452
  	xas_unlock(&xas);
42cb14b11   Hugh Dickins   mm: migrate dirty...
453
  	/* Leave irq disabled to prevent preemption while updating stats */
0e8c7d0fd   Christoph Lameter   page migration: f...
454
455
456
457
458
459
460
  	/*
  	 * If moved to a different zone then also account
  	 * the page for that zone. Other VM counters will be
  	 * taken care of when we establish references to the
  	 * new page and drop references to the old page.
  	 *
  	 * Note that anonymous pages are accounted for
4b9d0fab7   Mel Gorman   mm: rename NR_ANO...
461
  	 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
0e8c7d0fd   Christoph Lameter   page migration: f...
462
463
  	 * are mapped to swap space.
  	 */
42cb14b11   Hugh Dickins   mm: migrate dirty...
464
  	if (newzone != oldzone) {
11fb99898   Mel Gorman   mm: move most fil...
465
466
  		__dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES);
  		__inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES);
42cb14b11   Hugh Dickins   mm: migrate dirty...
467
  		if (PageSwapBacked(page) && !PageSwapCache(page)) {
11fb99898   Mel Gorman   mm: move most fil...
468
469
  			__dec_node_state(oldzone->zone_pgdat, NR_SHMEM);
  			__inc_node_state(newzone->zone_pgdat, NR_SHMEM);
42cb14b11   Hugh Dickins   mm: migrate dirty...
470
471
  		}
  		if (dirty && mapping_cap_account_dirty(mapping)) {
11fb99898   Mel Gorman   mm: move most fil...
472
  			__dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
5a1c84b40   Mel Gorman   mm: remove reclai...
473
  			__dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
11fb99898   Mel Gorman   mm: move most fil...
474
  			__inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
5a1c84b40   Mel Gorman   mm: remove reclai...
475
  			__inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
42cb14b11   Hugh Dickins   mm: migrate dirty...
476
  		}
4b02108ac   KOSAKI Motohiro   mm: oom analysis:...
477
  	}
42cb14b11   Hugh Dickins   mm: migrate dirty...
478
  	local_irq_enable();
b20a35035   Christoph Lameter   [PATCH] page migr...
479

78bd52097   Rafael Aquini   mm: adjust addres...
480
  	return MIGRATEPAGE_SUCCESS;
b20a35035   Christoph Lameter   [PATCH] page migr...
481
  }
1118dce77   Richard Weinberger   mm: Export migrat...
482
  EXPORT_SYMBOL(migrate_page_move_mapping);
b20a35035   Christoph Lameter   [PATCH] page migr...
483
484
  
  /*
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
485
486
487
488
489
490
   * The expected number of remaining references is the same as that
   * of migrate_page_move_mapping().
   */
  int migrate_huge_page_move_mapping(struct address_space *mapping,
  				   struct page *newpage, struct page *page)
  {
89eb946a7   Matthew Wilcox   mm: Convert page ...
491
  	XA_STATE(xas, &mapping->i_pages, page_index(page));
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
492
  	int expected_count;
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
493

89eb946a7   Matthew Wilcox   mm: Convert page ...
494
  	xas_lock_irq(&xas);
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
495
  	expected_count = 2 + page_has_private(page);
89eb946a7   Matthew Wilcox   mm: Convert page ...
496
497
  	if (page_count(page) != expected_count || xas_load(&xas) != page) {
  		xas_unlock_irq(&xas);
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
498
499
  		return -EAGAIN;
  	}
fe896d187   Joonsoo Kim   mm: introduce pag...
500
  	if (!page_ref_freeze(page, expected_count)) {
89eb946a7   Matthew Wilcox   mm: Convert page ...
501
  		xas_unlock_irq(&xas);
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
502
503
  		return -EAGAIN;
  	}
cf4b769ab   Hugh Dickins   mm: page migratio...
504
505
  	newpage->index = page->index;
  	newpage->mapping = page->mapping;
6a93ca8fd   Johannes Weiner   mm: migrate: do n...
506

290408d4a   Naoya Horiguchi   hugetlb: hugepage...
507
  	get_page(newpage);
89eb946a7   Matthew Wilcox   mm: Convert page ...
508
  	xas_store(&xas, newpage);
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
509

fe896d187   Joonsoo Kim   mm: introduce pag...
510
  	page_ref_unfreeze(page, expected_count - 1);
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
511

89eb946a7   Matthew Wilcox   mm: Convert page ...
512
  	xas_unlock_irq(&xas);
6a93ca8fd   Johannes Weiner   mm: migrate: do n...
513

78bd52097   Rafael Aquini   mm: adjust addres...
514
  	return MIGRATEPAGE_SUCCESS;
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
515
516
517
  }
  
  /*
30b0a105d   Dave Hansen   mm: thp: give tra...
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
   * Gigantic pages are so large that we do not guarantee that page++ pointer
   * arithmetic will work across the entire page.  We need something more
   * specialized.
   */
  static void __copy_gigantic_page(struct page *dst, struct page *src,
  				int nr_pages)
  {
  	int i;
  	struct page *dst_base = dst;
  	struct page *src_base = src;
  
  	for (i = 0; i < nr_pages; ) {
  		cond_resched();
  		copy_highpage(dst, src);
  
  		i++;
  		dst = mem_map_next(dst, dst_base, i);
  		src = mem_map_next(src, src_base, i);
  	}
  }
  
  static void copy_huge_page(struct page *dst, struct page *src)
  {
  	int i;
  	int nr_pages;
  
  	if (PageHuge(src)) {
  		/* hugetlbfs page */
  		struct hstate *h = page_hstate(src);
  		nr_pages = pages_per_huge_page(h);
  
  		if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
  			__copy_gigantic_page(dst, src, nr_pages);
  			return;
  		}
  	} else {
  		/* thp page */
  		BUG_ON(!PageTransHuge(src));
  		nr_pages = hpage_nr_pages(src);
  	}
  
  	for (i = 0; i < nr_pages; i++) {
  		cond_resched();
  		copy_highpage(dst + i, src + i);
  	}
  }
  
  /*
b20a35035   Christoph Lameter   [PATCH] page migr...
566
567
   * Copy the page to its new location
   */
2916ecc0f   Jérôme Glisse   mm/migrate: new m...
568
  void migrate_page_states(struct page *newpage, struct page *page)
b20a35035   Christoph Lameter   [PATCH] page migr...
569
  {
7851a45cd   Rik van Riel   mm: numa: Copy cp...
570
  	int cpupid;
b20a35035   Christoph Lameter   [PATCH] page migr...
571
572
573
574
575
576
  	if (PageError(page))
  		SetPageError(newpage);
  	if (PageReferenced(page))
  		SetPageReferenced(newpage);
  	if (PageUptodate(page))
  		SetPageUptodate(newpage);
894bc3104   Lee Schermerhorn   Unevictable LRU I...
577
  	if (TestClearPageActive(page)) {
309381fea   Sasha Levin   mm: dump page whe...
578
  		VM_BUG_ON_PAGE(PageUnevictable(page), page);
b20a35035   Christoph Lameter   [PATCH] page migr...
579
  		SetPageActive(newpage);
418b27ef5   Lee Schermerhorn   mm: remove unevic...
580
581
  	} else if (TestClearPageUnevictable(page))
  		SetPageUnevictable(newpage);
1899ad18c   Johannes Weiner   mm: workingset: t...
582
583
  	if (PageWorkingset(page))
  		SetPageWorkingset(newpage);
b20a35035   Christoph Lameter   [PATCH] page migr...
584
585
586
587
  	if (PageChecked(page))
  		SetPageChecked(newpage);
  	if (PageMappedToDisk(page))
  		SetPageMappedToDisk(newpage);
42cb14b11   Hugh Dickins   mm: migrate dirty...
588
589
590
  	/* Move dirty on pages not done by migrate_page_move_mapping() */
  	if (PageDirty(page))
  		SetPageDirty(newpage);
b20a35035   Christoph Lameter   [PATCH] page migr...
591

33c3fc71c   Vladimir Davydov   mm: introduce idl...
592
593
594
595
  	if (page_is_young(page))
  		set_page_young(newpage);
  	if (page_is_idle(page))
  		set_page_idle(newpage);
7851a45cd   Rik van Riel   mm: numa: Copy cp...
596
597
598
599
600
601
  	/*
  	 * Copy NUMA information to the new page, to prevent over-eager
  	 * future migrations of this same page.
  	 */
  	cpupid = page_cpupid_xchg_last(page, -1);
  	page_cpupid_xchg_last(newpage, cpupid);
e9995ef97   Hugh Dickins   ksm: rmap_walk to...
602
  	ksm_migrate_page(newpage, page);
c8d6553b9   Hugh Dickins   ksm: make KSM pag...
603
604
605
606
  	/*
  	 * Please do not reorder this without considering how mm/ksm.c's
  	 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
  	 */
b3b3a99c5   Naoya Horiguchi   mm/migrate: check...
607
608
  	if (PageSwapCache(page))
  		ClearPageSwapCache(page);
b20a35035   Christoph Lameter   [PATCH] page migr...
609
610
  	ClearPagePrivate(page);
  	set_page_private(page, 0);
b20a35035   Christoph Lameter   [PATCH] page migr...
611
612
613
614
615
616
617
  
  	/*
  	 * If any waiters have accumulated on the new page then
  	 * wake them up.
  	 */
  	if (PageWriteback(newpage))
  		end_page_writeback(newpage);
d435edca9   Vlastimil Babka   mm, page_owner: c...
618
619
  
  	copy_page_owner(page, newpage);
74485cf2b   Johannes Weiner   mm: migrate: cons...
620
621
  
  	mem_cgroup_migrate(page, newpage);
b20a35035   Christoph Lameter   [PATCH] page migr...
622
  }
2916ecc0f   Jérôme Glisse   mm/migrate: new m...
623
624
625
626
627
628
629
630
631
632
633
  EXPORT_SYMBOL(migrate_page_states);
  
  void migrate_page_copy(struct page *newpage, struct page *page)
  {
  	if (PageHuge(page) || PageTransHuge(page))
  		copy_huge_page(newpage, page);
  	else
  		copy_highpage(newpage, page);
  
  	migrate_page_states(newpage, page);
  }
1118dce77   Richard Weinberger   mm: Export migrat...
634
  EXPORT_SYMBOL(migrate_page_copy);
b20a35035   Christoph Lameter   [PATCH] page migr...
635

1d8b85ccf   Christoph Lameter   [PATCH] page migr...
636
637
638
  /************************************************************
   *                    Migration functions
   ***********************************************************/
b20a35035   Christoph Lameter   [PATCH] page migr...
639
  /*
bda807d44   Minchan Kim   mm: migrate: supp...
640
   * Common logic to directly migrate a single LRU page suitable for
266cf658e   David Howells   FS-Cache: Recruit...
641
   * pages that do not use PagePrivate/PagePrivate2.
b20a35035   Christoph Lameter   [PATCH] page migr...
642
643
644
   *
   * Pages are locked upon entry and exit.
   */
2d1db3b11   Christoph Lameter   [PATCH] page migr...
645
  int migrate_page(struct address_space *mapping,
a6bc32b89   Mel Gorman   mm: compaction: i...
646
647
  		struct page *newpage, struct page *page,
  		enum migrate_mode mode)
b20a35035   Christoph Lameter   [PATCH] page migr...
648
649
650
651
  {
  	int rc;
  
  	BUG_ON(PageWriteback(page));	/* Writeback must be complete */
ab41ee687   Jan Kara   mm: migrate: drop...
652
  	rc = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
b20a35035   Christoph Lameter   [PATCH] page migr...
653

78bd52097   Rafael Aquini   mm: adjust addres...
654
  	if (rc != MIGRATEPAGE_SUCCESS)
b20a35035   Christoph Lameter   [PATCH] page migr...
655
  		return rc;
2916ecc0f   Jérôme Glisse   mm/migrate: new m...
656
657
658
659
  	if (mode != MIGRATE_SYNC_NO_COPY)
  		migrate_page_copy(newpage, page);
  	else
  		migrate_page_states(newpage, page);
78bd52097   Rafael Aquini   mm: adjust addres...
660
  	return MIGRATEPAGE_SUCCESS;
b20a35035   Christoph Lameter   [PATCH] page migr...
661
662
  }
  EXPORT_SYMBOL(migrate_page);
9361401eb   David Howells   [PATCH] BLOCK: Ma...
663
  #ifdef CONFIG_BLOCK
84ade7c15   Jan Kara   mm: migrate: move...
664
665
666
667
668
669
670
671
672
  /* Returns true if all buffers are successfully locked */
  static bool buffer_migrate_lock_buffers(struct buffer_head *head,
  							enum migrate_mode mode)
  {
  	struct buffer_head *bh = head;
  
  	/* Simple case, sync compaction */
  	if (mode != MIGRATE_ASYNC) {
  		do {
84ade7c15   Jan Kara   mm: migrate: move...
673
674
675
676
677
678
679
680
681
682
  			lock_buffer(bh);
  			bh = bh->b_this_page;
  
  		} while (bh != head);
  
  		return true;
  	}
  
  	/* async case, we cannot block on lock_buffer so use trylock_buffer */
  	do {
84ade7c15   Jan Kara   mm: migrate: move...
683
684
685
686
687
688
  		if (!trylock_buffer(bh)) {
  			/*
  			 * We failed to lock the buffer and cannot stall in
  			 * async migration. Release the taken locks
  			 */
  			struct buffer_head *failed_bh = bh;
84ade7c15   Jan Kara   mm: migrate: move...
689
690
691
  			bh = head;
  			while (bh != failed_bh) {
  				unlock_buffer(bh);
84ade7c15   Jan Kara   mm: migrate: move...
692
693
694
695
696
697
698
699
700
  				bh = bh->b_this_page;
  			}
  			return false;
  		}
  
  		bh = bh->b_this_page;
  	} while (bh != head);
  	return true;
  }
89cb0888c   Jan Kara   mm: migrate: prov...
701
702
703
  static int __buffer_migrate_page(struct address_space *mapping,
  		struct page *newpage, struct page *page, enum migrate_mode mode,
  		bool check_refs)
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
704
  {
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
705
706
  	struct buffer_head *bh, *head;
  	int rc;
cc4f11e69   Jan Kara   mm: migrate: lock...
707
  	int expected_count;
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
708

1d8b85ccf   Christoph Lameter   [PATCH] page migr...
709
  	if (!page_has_buffers(page))
a6bc32b89   Mel Gorman   mm: compaction: i...
710
  		return migrate_page(mapping, newpage, page, mode);
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
711

cc4f11e69   Jan Kara   mm: migrate: lock...
712
  	/* Check whether page does not have extra refs before we do more work */
f900482da   Jan Kara   mm/migrate.c: cle...
713
  	expected_count = expected_page_refs(mapping, page);
cc4f11e69   Jan Kara   mm: migrate: lock...
714
715
  	if (page_count(page) != expected_count)
  		return -EAGAIN;
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
716

cc4f11e69   Jan Kara   mm: migrate: lock...
717
718
719
  	head = page_buffers(page);
  	if (!buffer_migrate_lock_buffers(head, mode))
  		return -EAGAIN;
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
720

89cb0888c   Jan Kara   mm: migrate: prov...
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
  	if (check_refs) {
  		bool busy;
  		bool invalidated = false;
  
  recheck_buffers:
  		busy = false;
  		spin_lock(&mapping->private_lock);
  		bh = head;
  		do {
  			if (atomic_read(&bh->b_count)) {
  				busy = true;
  				break;
  			}
  			bh = bh->b_this_page;
  		} while (bh != head);
  		spin_unlock(&mapping->private_lock);
  		if (busy) {
  			if (invalidated) {
  				rc = -EAGAIN;
  				goto unlock_buffers;
  			}
  			invalidate_bh_lrus();
  			invalidated = true;
  			goto recheck_buffers;
  		}
  	}
ab41ee687   Jan Kara   mm: migrate: drop...
747
  	rc = migrate_page_move_mapping(mapping, newpage, page, mode, 0);
78bd52097   Rafael Aquini   mm: adjust addres...
748
  	if (rc != MIGRATEPAGE_SUCCESS)
cc4f11e69   Jan Kara   mm: migrate: lock...
749
  		goto unlock_buffers;
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
  
  	ClearPagePrivate(page);
  	set_page_private(newpage, page_private(page));
  	set_page_private(page, 0);
  	put_page(page);
  	get_page(newpage);
  
  	bh = head;
  	do {
  		set_bh_page(bh, newpage, bh_offset(bh));
  		bh = bh->b_this_page;
  
  	} while (bh != head);
  
  	SetPagePrivate(newpage);
2916ecc0f   Jérôme Glisse   mm/migrate: new m...
765
766
767
768
  	if (mode != MIGRATE_SYNC_NO_COPY)
  		migrate_page_copy(newpage, page);
  	else
  		migrate_page_states(newpage, page);
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
769

cc4f11e69   Jan Kara   mm: migrate: lock...
770
771
  	rc = MIGRATEPAGE_SUCCESS;
  unlock_buffers:
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
772
773
774
  	bh = head;
  	do {
  		unlock_buffer(bh);
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
775
776
777
  		bh = bh->b_this_page;
  
  	} while (bh != head);
cc4f11e69   Jan Kara   mm: migrate: lock...
778
  	return rc;
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
779
  }
89cb0888c   Jan Kara   mm: migrate: prov...
780
781
782
783
784
785
786
787
788
789
790
  
  /*
   * Migration function for pages with buffers. This function can only be used
   * if the underlying filesystem guarantees that no other references to "page"
   * exist. For example attached buffer heads are accessed only under page lock.
   */
  int buffer_migrate_page(struct address_space *mapping,
  		struct page *newpage, struct page *page, enum migrate_mode mode)
  {
  	return __buffer_migrate_page(mapping, newpage, page, mode, false);
  }
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
791
  EXPORT_SYMBOL(buffer_migrate_page);
89cb0888c   Jan Kara   mm: migrate: prov...
792
793
794
795
796
797
798
799
800
801
802
803
  
  /*
   * Same as above except that this variant is more careful and checks that there
   * are also no buffer head references. This function is the right one for
   * mappings where buffer heads are directly looked up and referenced (such as
   * block device mappings).
   */
  int buffer_migrate_page_norefs(struct address_space *mapping,
  		struct page *newpage, struct page *page, enum migrate_mode mode)
  {
  	return __buffer_migrate_page(mapping, newpage, page, mode, true);
  }
9361401eb   David Howells   [PATCH] BLOCK: Ma...
804
  #endif
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
805

04e62a29b   Christoph Lameter   [PATCH] More page...
806
807
808
809
  /*
   * Writeback a page to clean the dirty state
   */
  static int writeout(struct address_space *mapping, struct page *page)
8351a6e47   Christoph Lameter   [PATCH] page migr...
810
  {
04e62a29b   Christoph Lameter   [PATCH] More page...
811
812
813
814
815
  	struct writeback_control wbc = {
  		.sync_mode = WB_SYNC_NONE,
  		.nr_to_write = 1,
  		.range_start = 0,
  		.range_end = LLONG_MAX,
04e62a29b   Christoph Lameter   [PATCH] More page...
816
817
818
819
820
821
822
823
824
825
826
  		.for_reclaim = 1
  	};
  	int rc;
  
  	if (!mapping->a_ops->writepage)
  		/* No write method for the address space */
  		return -EINVAL;
  
  	if (!clear_page_dirty_for_io(page))
  		/* Someone else already triggered a write */
  		return -EAGAIN;
8351a6e47   Christoph Lameter   [PATCH] page migr...
827
  	/*
04e62a29b   Christoph Lameter   [PATCH] More page...
828
829
830
831
832
833
  	 * A dirty page may imply that the underlying filesystem has
  	 * the page on some queue. So the page must be clean for
  	 * migration. Writeout may mean we loose the lock and the
  	 * page state is no longer what we checked for earlier.
  	 * At this point we know that the migration attempt cannot
  	 * be successful.
8351a6e47   Christoph Lameter   [PATCH] page migr...
834
  	 */
e388466de   Kirill A. Shutemov   mm: make remove_m...
835
  	remove_migration_ptes(page, page, false);
8351a6e47   Christoph Lameter   [PATCH] page migr...
836

04e62a29b   Christoph Lameter   [PATCH] More page...
837
  	rc = mapping->a_ops->writepage(page, &wbc);
8351a6e47   Christoph Lameter   [PATCH] page migr...
838

04e62a29b   Christoph Lameter   [PATCH] More page...
839
840
841
  	if (rc != AOP_WRITEPAGE_ACTIVATE)
  		/* unlocked. Relock */
  		lock_page(page);
bda8550de   Hugh Dickins   migration: fix wr...
842
  	return (rc < 0) ? -EIO : -EAGAIN;
04e62a29b   Christoph Lameter   [PATCH] More page...
843
844
845
846
847
848
  }
  
  /*
   * Default handling if a filesystem does not provide a migration function.
   */
  static int fallback_migrate_page(struct address_space *mapping,
a6bc32b89   Mel Gorman   mm: compaction: i...
849
  	struct page *newpage, struct page *page, enum migrate_mode mode)
04e62a29b   Christoph Lameter   [PATCH] More page...
850
  {
b969c4ab9   Mel Gorman   mm: compaction: d...
851
  	if (PageDirty(page)) {
a6bc32b89   Mel Gorman   mm: compaction: i...
852
  		/* Only writeback pages in full synchronous migration */
2916ecc0f   Jérôme Glisse   mm/migrate: new m...
853
854
855
856
857
  		switch (mode) {
  		case MIGRATE_SYNC:
  		case MIGRATE_SYNC_NO_COPY:
  			break;
  		default:
b969c4ab9   Mel Gorman   mm: compaction: d...
858
  			return -EBUSY;
2916ecc0f   Jérôme Glisse   mm/migrate: new m...
859
  		}
04e62a29b   Christoph Lameter   [PATCH] More page...
860
  		return writeout(mapping, page);
b969c4ab9   Mel Gorman   mm: compaction: d...
861
  	}
8351a6e47   Christoph Lameter   [PATCH] page migr...
862
863
864
865
866
  
  	/*
  	 * Buffers may be managed in a filesystem specific way.
  	 * We must have no buffers or drop them.
  	 */
266cf658e   David Howells   FS-Cache: Recruit...
867
  	if (page_has_private(page) &&
8351a6e47   Christoph Lameter   [PATCH] page migr...
868
  	    !try_to_release_page(page, GFP_KERNEL))
806031bb5   Mel Gorman   mm, migrate: imme...
869
  		return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
8351a6e47   Christoph Lameter   [PATCH] page migr...
870

a6bc32b89   Mel Gorman   mm: compaction: i...
871
  	return migrate_page(mapping, newpage, page, mode);
8351a6e47   Christoph Lameter   [PATCH] page migr...
872
  }
1d8b85ccf   Christoph Lameter   [PATCH] page migr...
873
  /*
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
874
875
876
877
878
   * Move a page to a newly allocated page
   * The page is locked and all ptes have been successfully removed.
   *
   * The new page will have replaced the old page if this function
   * is successful.
894bc3104   Lee Schermerhorn   Unevictable LRU I...
879
880
881
   *
   * Return value:
   *   < 0 - error code
78bd52097   Rafael Aquini   mm: adjust addres...
882
   *  MIGRATEPAGE_SUCCESS - success
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
883
   */
3fe2011ff   Mel Gorman   mm: migration: al...
884
  static int move_to_new_page(struct page *newpage, struct page *page,
5c3f9a673   Hugh Dickins   mm: page migratio...
885
  				enum migrate_mode mode)
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
886
887
  {
  	struct address_space *mapping;
bda807d44   Minchan Kim   mm: migrate: supp...
888
889
  	int rc = -EAGAIN;
  	bool is_lru = !__PageMovable(page);
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
890

7db7671f8   Hugh Dickins   mm: page migratio...
891
892
  	VM_BUG_ON_PAGE(!PageLocked(page), page);
  	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
893

e24f0b8f7   Christoph Lameter   [PATCH] page migr...
894
  	mapping = page_mapping(page);
bda807d44   Minchan Kim   mm: migrate: supp...
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
  
  	if (likely(is_lru)) {
  		if (!mapping)
  			rc = migrate_page(mapping, newpage, page, mode);
  		else if (mapping->a_ops->migratepage)
  			/*
  			 * Most pages have a mapping and most filesystems
  			 * provide a migratepage callback. Anonymous pages
  			 * are part of swap space which also has its own
  			 * migratepage callback. This is the most common path
  			 * for page migration.
  			 */
  			rc = mapping->a_ops->migratepage(mapping, newpage,
  							page, mode);
  		else
  			rc = fallback_migrate_page(mapping, newpage,
  							page, mode);
  	} else {
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
913
  		/*
bda807d44   Minchan Kim   mm: migrate: supp...
914
915
  		 * In case of non-lru page, it could be released after
  		 * isolation step. In that case, we shouldn't try migration.
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
916
  		 */
bda807d44   Minchan Kim   mm: migrate: supp...
917
918
919
920
921
922
923
924
925
926
927
928
  		VM_BUG_ON_PAGE(!PageIsolated(page), page);
  		if (!PageMovable(page)) {
  			rc = MIGRATEPAGE_SUCCESS;
  			__ClearPageIsolated(page);
  			goto out;
  		}
  
  		rc = mapping->a_ops->migratepage(mapping, newpage,
  						page, mode);
  		WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
  			!PageIsolated(page));
  	}
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
929

5c3f9a673   Hugh Dickins   mm: page migratio...
930
931
932
933
934
  	/*
  	 * When successful, old pagecache page->mapping must be cleared before
  	 * page is freed; but stats require that PageAnon be left as PageAnon.
  	 */
  	if (rc == MIGRATEPAGE_SUCCESS) {
bda807d44   Minchan Kim   mm: migrate: supp...
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
  		if (__PageMovable(page)) {
  			VM_BUG_ON_PAGE(!PageIsolated(page), page);
  
  			/*
  			 * We clear PG_movable under page_lock so any compactor
  			 * cannot try to migrate this page.
  			 */
  			__ClearPageIsolated(page);
  		}
  
  		/*
  		 * Anonymous and movable page->mapping will be cleard by
  		 * free_pages_prepare so don't reset it here for keeping
  		 * the type to work PageAnon, for example.
  		 */
  		if (!PageMappingFlags(page))
5c3f9a673   Hugh Dickins   mm: page migratio...
951
  			page->mapping = NULL;
d2b2c6dd2   Lars Persson   mm/migrate.c: add...
952
953
954
955
956
957
  
  		if (unlikely(is_zone_device_page(newpage))) {
  			if (is_device_public_page(newpage))
  				flush_dcache_page(newpage);
  		} else
  			flush_dcache_page(newpage);
3fe2011ff   Mel Gorman   mm: migration: al...
958
  	}
bda807d44   Minchan Kim   mm: migrate: supp...
959
  out:
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
960
961
  	return rc;
  }
0dabec93d   Minchan Kim   mm: migration: cl...
962
  static int __unmap_and_move(struct page *page, struct page *newpage,
9c620e2bc   Hugh Dickins   mm: remove offlin...
963
  				int force, enum migrate_mode mode)
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
964
  {
0dabec93d   Minchan Kim   mm: migration: cl...
965
  	int rc = -EAGAIN;
2ebba6b7e   Hugh Dickins   mm: unmapped page...
966
  	int page_was_mapped = 0;
3f6c82728   Mel Gorman   mm: migration: ta...
967
  	struct anon_vma *anon_vma = NULL;
bda807d44   Minchan Kim   mm: migrate: supp...
968
  	bool is_lru = !__PageMovable(page);
95a402c38   Christoph Lameter   [PATCH] page migr...
969

529ae9aaa   Nick Piggin   mm: rename page t...
970
  	if (!trylock_page(page)) {
a6bc32b89   Mel Gorman   mm: compaction: i...
971
  		if (!force || mode == MIGRATE_ASYNC)
0dabec93d   Minchan Kim   mm: migration: cl...
972
  			goto out;
3e7d34497   Mel Gorman   mm: vmscan: recla...
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
  
  		/*
  		 * It's not safe for direct compaction to call lock_page.
  		 * For example, during page readahead pages are added locked
  		 * to the LRU. Later, when the IO completes the pages are
  		 * marked uptodate and unlocked. However, the queueing
  		 * could be merging multiple pages for one bio (e.g.
  		 * mpage_readpages). If an allocation happens for the
  		 * second or third page, the process can end up locking
  		 * the same page twice and deadlocking. Rather than
  		 * trying to be clever about what pages can be locked,
  		 * avoid the use of lock_page for direct compaction
  		 * altogether.
  		 */
  		if (current->flags & PF_MEMALLOC)
0dabec93d   Minchan Kim   mm: migration: cl...
988
  			goto out;
3e7d34497   Mel Gorman   mm: vmscan: recla...
989

e24f0b8f7   Christoph Lameter   [PATCH] page migr...
990
991
992
993
  		lock_page(page);
  	}
  
  	if (PageWriteback(page)) {
11bc82d67   Andrea Arcangeli   mm: compaction: U...
994
  		/*
fed5b64a9   Jianguo Wu   mm/migrate: fix c...
995
  		 * Only in the case of a full synchronous migration is it
a6bc32b89   Mel Gorman   mm: compaction: i...
996
997
998
  		 * necessary to wait for PageWriteback. In the async case,
  		 * the retry loop is too short and in the sync-light case,
  		 * the overhead of stalling is too much
11bc82d67   Andrea Arcangeli   mm: compaction: U...
999
  		 */
2916ecc0f   Jérôme Glisse   mm/migrate: new m...
1000
1001
1002
1003
1004
  		switch (mode) {
  		case MIGRATE_SYNC:
  		case MIGRATE_SYNC_NO_COPY:
  			break;
  		default:
11bc82d67   Andrea Arcangeli   mm: compaction: U...
1005
  			rc = -EBUSY;
0a31bc97c   Johannes Weiner   mm: memcontrol: r...
1006
  			goto out_unlock;
11bc82d67   Andrea Arcangeli   mm: compaction: U...
1007
1008
  		}
  		if (!force)
0a31bc97c   Johannes Weiner   mm: memcontrol: r...
1009
  			goto out_unlock;
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1010
1011
  		wait_on_page_writeback(page);
  	}
03f15c86c   Hugh Dickins   mm: simplify page...
1012

e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1013
  	/*
dc386d4d1   KAMEZAWA Hiroyuki   memory unplug: mi...
1014
1015
  	 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
  	 * we cannot notice that anon_vma is freed while we migrates a page.
1ce82b69e   Hugh Dickins   mm: fix migration...
1016
  	 * This get_anon_vma() delays freeing anon_vma pointer until the end
dc386d4d1   KAMEZAWA Hiroyuki   memory unplug: mi...
1017
  	 * of migration. File cache pages are no problem because of page_lock()
989f89c57   KAMEZAWA Hiroyuki   fix rcu_read_lock...
1018
1019
  	 * File Caches may use write_page() or lock_page() in migration, then,
  	 * just care Anon page here.
03f15c86c   Hugh Dickins   mm: simplify page...
1020
1021
1022
1023
1024
1025
  	 *
  	 * Only page_get_anon_vma() understands the subtleties of
  	 * getting a hold on an anon_vma from outside one of its mms.
  	 * But if we cannot get anon_vma, then we won't need it anyway,
  	 * because that implies that the anon page is no longer mapped
  	 * (and cannot be remapped so long as we hold the page lock).
dc386d4d1   KAMEZAWA Hiroyuki   memory unplug: mi...
1026
  	 */
03f15c86c   Hugh Dickins   mm: simplify page...
1027
  	if (PageAnon(page) && !PageKsm(page))
746b18d42   Peter Zijlstra   mm: use refcounts...
1028
  		anon_vma = page_get_anon_vma(page);
62e1c5530   Shaohua Li   page migraton: ha...
1029

7db7671f8   Hugh Dickins   mm: page migratio...
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
  	/*
  	 * Block others from accessing the new page when we get around to
  	 * establishing additional references. We are usually the only one
  	 * holding a reference to newpage at this point. We used to have a BUG
  	 * here if trylock_page(newpage) fails, but would like to allow for
  	 * cases where there might be a race with the previous use of newpage.
  	 * This is much like races on refcount of oldpage: just don't BUG().
  	 */
  	if (unlikely(!trylock_page(newpage)))
  		goto out_unlock;
bda807d44   Minchan Kim   mm: migrate: supp...
1040
1041
1042
1043
  	if (unlikely(!is_lru)) {
  		rc = move_to_new_page(newpage, page, mode);
  		goto out_unlock_both;
  	}
dc386d4d1   KAMEZAWA Hiroyuki   memory unplug: mi...
1044
  	/*
62e1c5530   Shaohua Li   page migraton: ha...
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
  	 * Corner case handling:
  	 * 1. When a new swap-cache page is read into, it is added to the LRU
  	 * and treated as swapcache but it has no rmap yet.
  	 * Calling try_to_unmap() against a page->mapping==NULL page will
  	 * trigger a BUG.  So handle it here.
  	 * 2. An orphaned page (see truncate_complete_page) might have
  	 * fs-private metadata. The page can be picked up due to memory
  	 * offlining.  Everywhere else except page reclaim, the page is
  	 * invisible to the vm, so the page can not be migrated.  So try to
  	 * free the metadata, so the page can be freed.
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1055
  	 */
62e1c5530   Shaohua Li   page migraton: ha...
1056
  	if (!page->mapping) {
309381fea   Sasha Levin   mm: dump page whe...
1057
  		VM_BUG_ON_PAGE(PageAnon(page), page);
1ce82b69e   Hugh Dickins   mm: fix migration...
1058
  		if (page_has_private(page)) {
62e1c5530   Shaohua Li   page migraton: ha...
1059
  			try_to_free_buffers(page);
7db7671f8   Hugh Dickins   mm: page migratio...
1060
  			goto out_unlock_both;
62e1c5530   Shaohua Li   page migraton: ha...
1061
  		}
7db7671f8   Hugh Dickins   mm: page migratio...
1062
1063
  	} else if (page_mapped(page)) {
  		/* Establish migration ptes */
03f15c86c   Hugh Dickins   mm: simplify page...
1064
1065
  		VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
  				page);
2ebba6b7e   Hugh Dickins   mm: unmapped page...
1066
  		try_to_unmap(page,
da1b13ccf   Wanpeng Li   mm/hwpoison: fix ...
1067
  			TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
2ebba6b7e   Hugh Dickins   mm: unmapped page...
1068
1069
  		page_was_mapped = 1;
  	}
dc386d4d1   KAMEZAWA Hiroyuki   memory unplug: mi...
1070

e6a1530d6   Christoph Lameter   [PATCH] Allow mig...
1071
  	if (!page_mapped(page))
5c3f9a673   Hugh Dickins   mm: page migratio...
1072
  		rc = move_to_new_page(newpage, page, mode);
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1073

5c3f9a673   Hugh Dickins   mm: page migratio...
1074
1075
  	if (page_was_mapped)
  		remove_migration_ptes(page,
e388466de   Kirill A. Shutemov   mm: make remove_m...
1076
  			rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
3f6c82728   Mel Gorman   mm: migration: ta...
1077

7db7671f8   Hugh Dickins   mm: page migratio...
1078
1079
1080
  out_unlock_both:
  	unlock_page(newpage);
  out_unlock:
3f6c82728   Mel Gorman   mm: migration: ta...
1081
  	/* Drop an anon_vma reference if we took one */
76545066c   Rik van Riel   mm: extend KSM re...
1082
  	if (anon_vma)
9e60109f1   Peter Zijlstra   mm: rename drop_a...
1083
  		put_anon_vma(anon_vma);
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1084
  	unlock_page(page);
0dabec93d   Minchan Kim   mm: migration: cl...
1085
  out:
c6c919eb9   Minchan Kim   mm: use put_page(...
1086
1087
1088
1089
  	/*
  	 * If migration is successful, decrease refcount of the newpage
  	 * which will not free the page because new page owner increased
  	 * refcounter. As well, if it is LRU page, add the page to LRU
e0a352fab   David Hildenbrand   mm: migrate: don'...
1090
1091
1092
1093
  	 * list in here. Use the old state of the isolated source page to
  	 * determine if we migrated a LRU page. newpage was already unlocked
  	 * and possibly modified by its owner - don't rely on the page
  	 * state.
c6c919eb9   Minchan Kim   mm: use put_page(...
1094
1095
  	 */
  	if (rc == MIGRATEPAGE_SUCCESS) {
e0a352fab   David Hildenbrand   mm: migrate: don'...
1096
  		if (unlikely(!is_lru))
c6c919eb9   Minchan Kim   mm: use put_page(...
1097
1098
1099
1100
  			put_page(newpage);
  		else
  			putback_lru_page(newpage);
  	}
0dabec93d   Minchan Kim   mm: migration: cl...
1101
1102
  	return rc;
  }
95a402c38   Christoph Lameter   [PATCH] page migr...
1103

0dabec93d   Minchan Kim   mm: migration: cl...
1104
  /*
ef2a5153b   Geert Uytterhoeven   mm/migrate: mark ...
1105
1106
1107
   * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move().  Work
   * around it.
   */
815f0ddb3   Nick Desaulniers   include/linux/com...
1108
1109
  #if defined(CONFIG_ARM) && \
  	defined(GCC_VERSION) && GCC_VERSION < 40900 && GCC_VERSION >= 40700
ef2a5153b   Geert Uytterhoeven   mm/migrate: mark ...
1110
1111
1112
1113
1114
1115
  #define ICE_noinline noinline
  #else
  #define ICE_noinline
  #endif
  
  /*
0dabec93d   Minchan Kim   mm: migration: cl...
1116
1117
1118
   * Obtain the lock on page, remove all ptes and migrate the page
   * to the newly allocated page in newpage.
   */
ef2a5153b   Geert Uytterhoeven   mm/migrate: mark ...
1119
1120
1121
  static ICE_noinline int unmap_and_move(new_page_t get_new_page,
  				   free_page_t put_new_page,
  				   unsigned long private, struct page *page,
add05cece   Naoya Horiguchi   mm: soft-offline:...
1122
1123
  				   int force, enum migrate_mode mode,
  				   enum migrate_reason reason)
0dabec93d   Minchan Kim   mm: migration: cl...
1124
  {
2def7424c   Hugh Dickins   mm: page migratio...
1125
  	int rc = MIGRATEPAGE_SUCCESS;
2def7424c   Hugh Dickins   mm: page migratio...
1126
  	struct page *newpage;
0dabec93d   Minchan Kim   mm: migration: cl...
1127

94723aafb   Michal Hocko   mm: unclutter THP...
1128
1129
  	if (!thp_migration_supported() && PageTransHuge(page))
  		return -ENOMEM;
666feb21a   Michal Hocko   mm, migrate: remo...
1130
  	newpage = get_new_page(page, private);
0dabec93d   Minchan Kim   mm: migration: cl...
1131
1132
1133
1134
1135
  	if (!newpage)
  		return -ENOMEM;
  
  	if (page_count(page) == 1) {
  		/* page was freed from under us. So we are done. */
c6c919eb9   Minchan Kim   mm: use put_page(...
1136
1137
  		ClearPageActive(page);
  		ClearPageUnevictable(page);
bda807d44   Minchan Kim   mm: migrate: supp...
1138
1139
1140
1141
1142
1143
  		if (unlikely(__PageMovable(page))) {
  			lock_page(page);
  			if (!PageMovable(page))
  				__ClearPageIsolated(page);
  			unlock_page(page);
  		}
c6c919eb9   Minchan Kim   mm: use put_page(...
1144
1145
1146
1147
  		if (put_new_page)
  			put_new_page(newpage, private);
  		else
  			put_page(newpage);
0dabec93d   Minchan Kim   mm: migration: cl...
1148
1149
  		goto out;
  	}
9c620e2bc   Hugh Dickins   mm: remove offlin...
1150
  	rc = __unmap_and_move(page, newpage, force, mode);
c6c919eb9   Minchan Kim   mm: use put_page(...
1151
  	if (rc == MIGRATEPAGE_SUCCESS)
7cd12b4ab   Vlastimil Babka   mm, page_owner: t...
1152
  		set_page_owner_migrate_reason(newpage, reason);
bf6bddf19   Rafael Aquini   mm: introduce com...
1153

0dabec93d   Minchan Kim   mm: migration: cl...
1154
  out:
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1155
  	if (rc != -EAGAIN) {
0dabec93d   Minchan Kim   mm: migration: cl...
1156
1157
1158
1159
1160
1161
1162
  		/*
  		 * A page that has been migrated has all references
  		 * removed and will be freed. A page that has not been
  		 * migrated will have kepts its references and be
  		 * restored.
  		 */
  		list_del(&page->lru);
6afcf8ef0   Ming Ling   mm, compaction: f...
1163
1164
1165
1166
1167
1168
1169
  
  		/*
  		 * Compaction can migrate also non-LRU pages which are
  		 * not accounted to NR_ISOLATED_*. They can be recognized
  		 * as __PageMovable
  		 */
  		if (likely(!__PageMovable(page)))
e8db67eb0   Naoya Horiguchi   mm: migrate: move...
1170
1171
  			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
  					page_is_file_cache(page), -hpage_nr_pages(page));
c6c919eb9   Minchan Kim   mm: use put_page(...
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
  	}
  
  	/*
  	 * If migration is successful, releases reference grabbed during
  	 * isolation. Otherwise, restore the page to right list unless
  	 * we want to retry.
  	 */
  	if (rc == MIGRATEPAGE_SUCCESS) {
  		put_page(page);
  		if (reason == MR_MEMORY_FAILURE) {
d7e69488b   Minchan Kim   mm/hwpoison: fix ...
1182
  			/*
c6c919eb9   Minchan Kim   mm: use put_page(...
1183
1184
1185
  			 * Set PG_HWPoison on just freed page
  			 * intentionally. Although it's rather weird,
  			 * it's how HWPoison flag works at the moment.
d7e69488b   Minchan Kim   mm/hwpoison: fix ...
1186
  			 */
d4ae9916e   Naoya Horiguchi   mm: soft-offline:...
1187
  			if (set_hwpoison_free_buddy_page(page))
da1b13ccf   Wanpeng Li   mm/hwpoison: fix ...
1188
  				num_poisoned_pages_inc();
c6c919eb9   Minchan Kim   mm: use put_page(...
1189
1190
  		}
  	} else {
bda807d44   Minchan Kim   mm: migrate: supp...
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
  		if (rc != -EAGAIN) {
  			if (likely(!__PageMovable(page))) {
  				putback_lru_page(page);
  				goto put_new;
  			}
  
  			lock_page(page);
  			if (PageMovable(page))
  				putback_movable_page(page);
  			else
  				__ClearPageIsolated(page);
  			unlock_page(page);
  			put_page(page);
  		}
  put_new:
c6c919eb9   Minchan Kim   mm: use put_page(...
1206
1207
1208
1209
  		if (put_new_page)
  			put_new_page(newpage, private);
  		else
  			put_page(newpage);
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1210
  	}
68711a746   David Rientjes   mm, migration: ad...
1211

e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1212
1213
1214
1215
  	return rc;
  }
  
  /*
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
   * Counterpart of unmap_and_move_page() for hugepage migration.
   *
   * This function doesn't wait the completion of hugepage I/O
   * because there is no race between I/O and migration for hugepage.
   * Note that currently hugepage I/O occurs only in direct I/O
   * where no lock is held and PG_writeback is irrelevant,
   * and writeback status of all subpages are counted in the reference
   * count of the head page (i.e. if all subpages of a 2MB hugepage are
   * under direct I/O, the reference of the head page is 512 and a bit more.)
   * This means that when we try to migrate hugepage whose subpages are
   * doing direct I/O, some references remain after try_to_unmap() and
   * hugepage migration fails without data corruption.
   *
   * There is also no race when direct I/O is issued on the page under migration,
   * because then pte is replaced with migration swap entry and direct I/O code
   * will wait in the page fault for migration to complete.
   */
  static int unmap_and_move_huge_page(new_page_t get_new_page,
68711a746   David Rientjes   mm, migration: ad...
1234
1235
  				free_page_t put_new_page, unsigned long private,
  				struct page *hpage, int force,
7cd12b4ab   Vlastimil Babka   mm, page_owner: t...
1236
  				enum migrate_mode mode, int reason)
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1237
  {
2def7424c   Hugh Dickins   mm: page migratio...
1238
  	int rc = -EAGAIN;
2ebba6b7e   Hugh Dickins   mm: unmapped page...
1239
  	int page_was_mapped = 0;
32665f2bb   Joonsoo Kim   mm/migrate: corre...
1240
  	struct page *new_hpage;
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1241
  	struct anon_vma *anon_vma = NULL;
83467efbd   Naoya Horiguchi   mm: migrate: chec...
1242
  	/*
7ed2c31da   Anshuman Khandual   mm/hugetlb: disti...
1243
  	 * Migratability of hugepages depends on architectures and their size.
83467efbd   Naoya Horiguchi   mm: migrate: chec...
1244
1245
1246
1247
1248
  	 * This check is necessary because some callers of hugepage migration
  	 * like soft offline and memory hotremove don't walk through page
  	 * tables or check whether the hugepage is pmd-based or not before
  	 * kicking migration.
  	 */
100873d7a   Naoya Horiguchi   hugetlb: rename h...
1249
  	if (!hugepage_migration_supported(page_hstate(hpage))) {
32665f2bb   Joonsoo Kim   mm/migrate: corre...
1250
  		putback_active_hugepage(hpage);
83467efbd   Naoya Horiguchi   mm: migrate: chec...
1251
  		return -ENOSYS;
32665f2bb   Joonsoo Kim   mm/migrate: corre...
1252
  	}
83467efbd   Naoya Horiguchi   mm: migrate: chec...
1253

666feb21a   Michal Hocko   mm, migrate: remo...
1254
  	new_hpage = get_new_page(hpage, private);
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1255
1256
  	if (!new_hpage)
  		return -ENOMEM;
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1257
  	if (!trylock_page(hpage)) {
2916ecc0f   Jérôme Glisse   mm/migrate: new m...
1258
  		if (!force)
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1259
  			goto out;
2916ecc0f   Jérôme Glisse   mm/migrate: new m...
1260
1261
1262
1263
1264
1265
1266
  		switch (mode) {
  		case MIGRATE_SYNC:
  		case MIGRATE_SYNC_NO_COPY:
  			break;
  		default:
  			goto out;
  		}
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1267
1268
  		lock_page(hpage);
  	}
cb6acd01e   Mike Kravetz   hugetlbfs: fix ra...
1269
1270
1271
1272
1273
1274
1275
1276
1277
  	/*
  	 * Check for pages which are in the process of being freed.  Without
  	 * page_mapping() set, hugetlbfs specific move page routine will not
  	 * be called and we could leak usage counts for subpools.
  	 */
  	if (page_private(hpage) && !page_mapping(hpage)) {
  		rc = -EBUSY;
  		goto out_unlock;
  	}
746b18d42   Peter Zijlstra   mm: use refcounts...
1278
1279
  	if (PageAnon(hpage))
  		anon_vma = page_get_anon_vma(hpage);
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1280

7db7671f8   Hugh Dickins   mm: page migratio...
1281
1282
  	if (unlikely(!trylock_page(new_hpage)))
  		goto put_anon;
2ebba6b7e   Hugh Dickins   mm: unmapped page...
1283
1284
  	if (page_mapped(hpage)) {
  		try_to_unmap(hpage,
ddeaab32a   Mike Kravetz   hugetlbfs: revert...
1285
  			TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
2ebba6b7e   Hugh Dickins   mm: unmapped page...
1286
1287
  		page_was_mapped = 1;
  	}
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1288
1289
  
  	if (!page_mapped(hpage))
5c3f9a673   Hugh Dickins   mm: page migratio...
1290
  		rc = move_to_new_page(new_hpage, hpage, mode);
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1291

5c3f9a673   Hugh Dickins   mm: page migratio...
1292
1293
  	if (page_was_mapped)
  		remove_migration_ptes(hpage,
e388466de   Kirill A. Shutemov   mm: make remove_m...
1294
  			rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1295

7db7671f8   Hugh Dickins   mm: page migratio...
1296
1297
1298
  	unlock_page(new_hpage);
  
  put_anon:
fd4a4663d   Hugh Dickins   mm: fix hugepage ...
1299
  	if (anon_vma)
9e60109f1   Peter Zijlstra   mm: rename drop_a...
1300
  		put_anon_vma(anon_vma);
8e6ac7fab   Aneesh Kumar K.V   hugetlb/cgroup: m...
1301

2def7424c   Hugh Dickins   mm: page migratio...
1302
  	if (rc == MIGRATEPAGE_SUCCESS) {
ab5ac90ae   Michal Hocko   mm, hugetlb: do n...
1303
  		move_hugetlb_state(hpage, new_hpage, reason);
2def7424c   Hugh Dickins   mm: page migratio...
1304
1305
  		put_new_page = NULL;
  	}
8e6ac7fab   Aneesh Kumar K.V   hugetlb/cgroup: m...
1306

cb6acd01e   Mike Kravetz   hugetlbfs: fix ra...
1307
  out_unlock:
290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1308
  	unlock_page(hpage);
09761333e   Hillf Danton   mm/migrate.c: pai...
1309
  out:
b8ec1cee5   Naoya Horiguchi   mm: soft-offline:...
1310
1311
  	if (rc != -EAGAIN)
  		putback_active_hugepage(hpage);
68711a746   David Rientjes   mm, migration: ad...
1312
1313
1314
1315
1316
1317
  
  	/*
  	 * If migration was not successful and there's a freeing callback, use
  	 * it.  Otherwise, put_page() will drop the reference grabbed during
  	 * isolation.
  	 */
2def7424c   Hugh Dickins   mm: page migratio...
1318
  	if (put_new_page)
68711a746   David Rientjes   mm, migration: ad...
1319
1320
  		put_new_page(new_hpage, private);
  	else
3aaa76e12   Naoya Horiguchi   mm: migrate: huge...
1321
  		putback_active_hugepage(new_hpage);
68711a746   David Rientjes   mm, migration: ad...
1322

290408d4a   Naoya Horiguchi   hugetlb: hugepage...
1323
1324
1325
1326
  	return rc;
  }
  
  /*
c73e5c9c5   Srivatsa S. Bhat   mm: rewrite the c...
1327
1328
   * migrate_pages - migrate the pages specified in a list, to the free pages
   *		   supplied as the target for the page migration
b20a35035   Christoph Lameter   [PATCH] page migr...
1329
   *
c73e5c9c5   Srivatsa S. Bhat   mm: rewrite the c...
1330
1331
1332
   * @from:		The list of pages to be migrated.
   * @get_new_page:	The function used to allocate free pages to be used
   *			as the target of the page migration.
68711a746   David Rientjes   mm, migration: ad...
1333
1334
   * @put_new_page:	The function used to free target pages if migration
   *			fails, or NULL if no special handling is necessary.
c73e5c9c5   Srivatsa S. Bhat   mm: rewrite the c...
1335
1336
1337
1338
   * @private:		Private data to be passed on to get_new_page()
   * @mode:		The migration mode that specifies the constraints for
   *			page migration, if any.
   * @reason:		The reason for page migration.
b20a35035   Christoph Lameter   [PATCH] page migr...
1339
   *
c73e5c9c5   Srivatsa S. Bhat   mm: rewrite the c...
1340
1341
   * The function returns after 10 attempts or if no pages are movable any more
   * because the list has become empty or no retryable pages exist any more.
14e0f9bcc   Hugh Dickins   mm: correct a cou...
1342
   * The caller should call putback_movable_pages() to return pages to the LRU
28bd65781   Minchan Kim   mm: migration: cl...
1343
   * or free list only if ret != 0.
b20a35035   Christoph Lameter   [PATCH] page migr...
1344
   *
c73e5c9c5   Srivatsa S. Bhat   mm: rewrite the c...
1345
   * Returns the number of pages that were not migrated, or an error code.
b20a35035   Christoph Lameter   [PATCH] page migr...
1346
   */
9c620e2bc   Hugh Dickins   mm: remove offlin...
1347
  int migrate_pages(struct list_head *from, new_page_t get_new_page,
68711a746   David Rientjes   mm, migration: ad...
1348
1349
  		free_page_t put_new_page, unsigned long private,
  		enum migrate_mode mode, int reason)
b20a35035   Christoph Lameter   [PATCH] page migr...
1350
  {
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1351
  	int retry = 1;
b20a35035   Christoph Lameter   [PATCH] page migr...
1352
  	int nr_failed = 0;
5647bc293   Mel Gorman   mm: compaction: M...
1353
  	int nr_succeeded = 0;
b20a35035   Christoph Lameter   [PATCH] page migr...
1354
1355
1356
1357
1358
1359
1360
1361
  	int pass = 0;
  	struct page *page;
  	struct page *page2;
  	int swapwrite = current->flags & PF_SWAPWRITE;
  	int rc;
  
  	if (!swapwrite)
  		current->flags |= PF_SWAPWRITE;
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1362
1363
  	for(pass = 0; pass < 10 && retry; pass++) {
  		retry = 0;
b20a35035   Christoph Lameter   [PATCH] page migr...
1364

e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1365
  		list_for_each_entry_safe(page, page2, from, lru) {
94723aafb   Michal Hocko   mm: unclutter THP...
1366
  retry:
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1367
  			cond_resched();
2d1db3b11   Christoph Lameter   [PATCH] page migr...
1368

31caf665e   Naoya Horiguchi   mm: migrate: make...
1369
1370
  			if (PageHuge(page))
  				rc = unmap_and_move_huge_page(get_new_page,
68711a746   David Rientjes   mm, migration: ad...
1371
  						put_new_page, private, page,
7cd12b4ab   Vlastimil Babka   mm, page_owner: t...
1372
  						pass > 2, mode, reason);
31caf665e   Naoya Horiguchi   mm: migrate: make...
1373
  			else
68711a746   David Rientjes   mm, migration: ad...
1374
  				rc = unmap_and_move(get_new_page, put_new_page,
add05cece   Naoya Horiguchi   mm: soft-offline:...
1375
1376
  						private, page, pass > 2, mode,
  						reason);
2d1db3b11   Christoph Lameter   [PATCH] page migr...
1377

e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1378
  			switch(rc) {
95a402c38   Christoph Lameter   [PATCH] page migr...
1379
  			case -ENOMEM:
94723aafb   Michal Hocko   mm: unclutter THP...
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
  				/*
  				 * THP migration might be unsupported or the
  				 * allocation could've failed so we should
  				 * retry on the same page with the THP split
  				 * to base pages.
  				 *
  				 * Head page is retried immediately and tail
  				 * pages are added to the tail of the list so
  				 * we encounter them after the rest of the list
  				 * is processed.
  				 */
e6112fc30   Anshuman Khandual   mm/migrate.c: spl...
1391
  				if (PageTransHuge(page) && !PageHuge(page)) {
94723aafb   Michal Hocko   mm: unclutter THP...
1392
1393
1394
1395
1396
1397
1398
1399
  					lock_page(page);
  					rc = split_huge_page_to_list(page, from);
  					unlock_page(page);
  					if (!rc) {
  						list_safe_reset_next(page, page2, lru);
  						goto retry;
  					}
  				}
dfef2ef40   David Rientjes   mm, migrate: incr...
1400
  				nr_failed++;
95a402c38   Christoph Lameter   [PATCH] page migr...
1401
  				goto out;
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1402
  			case -EAGAIN:
2d1db3b11   Christoph Lameter   [PATCH] page migr...
1403
  				retry++;
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1404
  				break;
78bd52097   Rafael Aquini   mm: adjust addres...
1405
  			case MIGRATEPAGE_SUCCESS:
5647bc293   Mel Gorman   mm: compaction: M...
1406
  				nr_succeeded++;
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1407
1408
  				break;
  			default:
354a33633   Naoya Horiguchi   mm/migrate: add c...
1409
1410
1411
1412
1413
1414
  				/*
  				 * Permanent failure (-EBUSY, -ENOSYS, etc.):
  				 * unlike -EAGAIN case, the failed page is
  				 * removed from migration page list and not
  				 * retried in the next outer loop.
  				 */
2d1db3b11   Christoph Lameter   [PATCH] page migr...
1415
  				nr_failed++;
e24f0b8f7   Christoph Lameter   [PATCH] page migr...
1416
  				break;
2d1db3b11   Christoph Lameter   [PATCH] page migr...
1417
  			}
b20a35035   Christoph Lameter   [PATCH] page migr...
1418
1419
  		}
  	}
f2f81fb2b   Vlastimil Babka   mm, migrate: coun...
1420
1421
  	nr_failed += retry;
  	rc = nr_failed;
95a402c38   Christoph Lameter   [PATCH] page migr...
1422
  out:
5647bc293   Mel Gorman   mm: compaction: M...
1423
1424
1425
1426
  	if (nr_succeeded)
  		count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
  	if (nr_failed)
  		count_vm_events(PGMIGRATE_FAIL, nr_failed);
7b2a2d4a1   Mel Gorman   mm: migrate: Add ...
1427
  	trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
b20a35035   Christoph Lameter   [PATCH] page migr...
1428
1429
  	if (!swapwrite)
  		current->flags &= ~PF_SWAPWRITE;
78bd52097   Rafael Aquini   mm: adjust addres...
1430
  	return rc;
b20a35035   Christoph Lameter   [PATCH] page migr...
1431
  }
95a402c38   Christoph Lameter   [PATCH] page migr...
1432

742755a1d   Christoph Lameter   [PATCH] page migr...
1433
  #ifdef CONFIG_NUMA
742755a1d   Christoph Lameter   [PATCH] page migr...
1434

a49bd4d71   Michal Hocko   mm, numa: rework ...
1435
  static int store_status(int __user *status, int start, int value, int nr)
742755a1d   Christoph Lameter   [PATCH] page migr...
1436
  {
a49bd4d71   Michal Hocko   mm, numa: rework ...
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
  	while (nr-- > 0) {
  		if (put_user(value, status + start))
  			return -EFAULT;
  		start++;
  	}
  
  	return 0;
  }
  
  static int do_move_pages_to_node(struct mm_struct *mm,
  		struct list_head *pagelist, int node)
  {
  	int err;
  
  	if (list_empty(pagelist))
  		return 0;
  
  	err = migrate_pages(pagelist, alloc_new_node_page, NULL, node,
  			MIGRATE_SYNC, MR_SYSCALL);
  	if (err)
  		putback_movable_pages(pagelist);
  	return err;
742755a1d   Christoph Lameter   [PATCH] page migr...
1459
1460
1461
  }
  
  /*
a49bd4d71   Michal Hocko   mm, numa: rework ...
1462
1463
1464
1465
1466
   * Resolves the given address to a struct page, isolates it from the LRU and
   * puts it to the given pagelist.
   * Returns -errno if the page cannot be found/isolated or 0 when it has been
   * queued or the page doesn't need to be migrated because it is already on
   * the target node
742755a1d   Christoph Lameter   [PATCH] page migr...
1467
   */
a49bd4d71   Michal Hocko   mm, numa: rework ...
1468
1469
  static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
  		int node, struct list_head *pagelist, bool migrate_all)
742755a1d   Christoph Lameter   [PATCH] page migr...
1470
  {
a49bd4d71   Michal Hocko   mm, numa: rework ...
1471
1472
1473
  	struct vm_area_struct *vma;
  	struct page *page;
  	unsigned int follflags;
742755a1d   Christoph Lameter   [PATCH] page migr...
1474
  	int err;
742755a1d   Christoph Lameter   [PATCH] page migr...
1475
1476
  
  	down_read(&mm->mmap_sem);
a49bd4d71   Michal Hocko   mm, numa: rework ...
1477
1478
1479
1480
  	err = -EFAULT;
  	vma = find_vma(mm, addr);
  	if (!vma || addr < vma->vm_start || !vma_migratable(vma))
  		goto out;
742755a1d   Christoph Lameter   [PATCH] page migr...
1481

a49bd4d71   Michal Hocko   mm, numa: rework ...
1482
1483
  	/* FOLL_DUMP to ignore special (like zero) pages */
  	follflags = FOLL_GET | FOLL_DUMP;
a49bd4d71   Michal Hocko   mm, numa: rework ...
1484
  	page = follow_page(vma, addr, follflags);
89f5b7da2   Linus Torvalds   Reinstate ZERO_PA...
1485

a49bd4d71   Michal Hocko   mm, numa: rework ...
1486
1487
1488
  	err = PTR_ERR(page);
  	if (IS_ERR(page))
  		goto out;
89f5b7da2   Linus Torvalds   Reinstate ZERO_PA...
1489

a49bd4d71   Michal Hocko   mm, numa: rework ...
1490
1491
1492
  	err = -ENOENT;
  	if (!page)
  		goto out;
742755a1d   Christoph Lameter   [PATCH] page migr...
1493

a49bd4d71   Michal Hocko   mm, numa: rework ...
1494
1495
1496
  	err = 0;
  	if (page_to_nid(page) == node)
  		goto out_putpage;
742755a1d   Christoph Lameter   [PATCH] page migr...
1497

a49bd4d71   Michal Hocko   mm, numa: rework ...
1498
1499
1500
  	err = -EACCES;
  	if (page_mapcount(page) > 1 && !migrate_all)
  		goto out_putpage;
742755a1d   Christoph Lameter   [PATCH] page migr...
1501

a49bd4d71   Michal Hocko   mm, numa: rework ...
1502
1503
1504
1505
  	if (PageHuge(page)) {
  		if (PageHead(page)) {
  			isolate_huge_page(page, pagelist);
  			err = 0;
e632a938d   Naoya Horiguchi   mm: migrate: add ...
1506
  		}
a49bd4d71   Michal Hocko   mm, numa: rework ...
1507
1508
  	} else {
  		struct page *head;
e632a938d   Naoya Horiguchi   mm: migrate: add ...
1509

e8db67eb0   Naoya Horiguchi   mm: migrate: move...
1510
1511
  		head = compound_head(page);
  		err = isolate_lru_page(head);
cf608ac19   Minchan Kim   mm: compaction: f...
1512
  		if (err)
a49bd4d71   Michal Hocko   mm, numa: rework ...
1513
  			goto out_putpage;
742755a1d   Christoph Lameter   [PATCH] page migr...
1514

a49bd4d71   Michal Hocko   mm, numa: rework ...
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
  		err = 0;
  		list_add_tail(&head->lru, pagelist);
  		mod_node_page_state(page_pgdat(head),
  			NR_ISOLATED_ANON + page_is_file_cache(head),
  			hpage_nr_pages(head));
  	}
  out_putpage:
  	/*
  	 * Either remove the duplicate refcount from
  	 * isolate_lru_page() or drop the page ref if it was
  	 * not isolated.
  	 */
  	put_page(page);
  out:
742755a1d   Christoph Lameter   [PATCH] page migr...
1529
1530
1531
1532
1533
  	up_read(&mm->mmap_sem);
  	return err;
  }
  
  /*
5e9a0f023   Brice Goglin   mm: extract do_pa...
1534
1535
1536
   * Migrate an array of page address onto an array of nodes and fill
   * the corresponding array of status.
   */
3268c63ed   Christoph Lameter   mm: fix move/migr...
1537
  static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
5e9a0f023   Brice Goglin   mm: extract do_pa...
1538
1539
1540
1541
1542
  			 unsigned long nr_pages,
  			 const void __user * __user *pages,
  			 const int __user *nodes,
  			 int __user *status, int flags)
  {
a49bd4d71   Michal Hocko   mm, numa: rework ...
1543
1544
1545
1546
  	int current_node = NUMA_NO_NODE;
  	LIST_HEAD(pagelist);
  	int start, i;
  	int err = 0, err1;
35282a2de   Brice Goglin   migration: only m...
1547
1548
  
  	migrate_prep();
a49bd4d71   Michal Hocko   mm, numa: rework ...
1549
1550
1551
1552
  	for (i = start = 0; i < nr_pages; i++) {
  		const void __user *p;
  		unsigned long addr;
  		int node;
3140a2273   Brice Goglin   mm: rework do_pag...
1553

a49bd4d71   Michal Hocko   mm, numa: rework ...
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
  		err = -EFAULT;
  		if (get_user(p, pages + i))
  			goto out_flush;
  		if (get_user(node, nodes + i))
  			goto out_flush;
  		addr = (unsigned long)p;
  
  		err = -ENODEV;
  		if (node < 0 || node >= MAX_NUMNODES)
  			goto out_flush;
  		if (!node_state(node, N_MEMORY))
  			goto out_flush;
5e9a0f023   Brice Goglin   mm: extract do_pa...
1566

a49bd4d71   Michal Hocko   mm, numa: rework ...
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
  		err = -EACCES;
  		if (!node_isset(node, task_nodes))
  			goto out_flush;
  
  		if (current_node == NUMA_NO_NODE) {
  			current_node = node;
  			start = i;
  		} else if (node != current_node) {
  			err = do_move_pages_to_node(mm, &pagelist, current_node);
  			if (err)
  				goto out;
  			err = store_status(status, start, current_node, i - start);
  			if (err)
  				goto out;
  			start = i;
  			current_node = node;
3140a2273   Brice Goglin   mm: rework do_pag...
1583
  		}
a49bd4d71   Michal Hocko   mm, numa: rework ...
1584
1585
1586
1587
1588
1589
1590
1591
  		/*
  		 * Errors in the page lookup or isolation are not fatal and we simply
  		 * report them via status
  		 */
  		err = add_page_for_migration(mm, addr, current_node,
  				&pagelist, flags & MPOL_MF_MOVE_ALL);
  		if (!err)
  			continue;
3140a2273   Brice Goglin   mm: rework do_pag...
1592

a49bd4d71   Michal Hocko   mm, numa: rework ...
1593
1594
1595
  		err = store_status(status, i, err, 1);
  		if (err)
  			goto out_flush;
5e9a0f023   Brice Goglin   mm: extract do_pa...
1596

a49bd4d71   Michal Hocko   mm, numa: rework ...
1597
1598
1599
1600
1601
1602
1603
1604
1605
  		err = do_move_pages_to_node(mm, &pagelist, current_node);
  		if (err)
  			goto out;
  		if (i > start) {
  			err = store_status(status, start, current_node, i - start);
  			if (err)
  				goto out;
  		}
  		current_node = NUMA_NO_NODE;
3140a2273   Brice Goglin   mm: rework do_pag...
1606
  	}
a49bd4d71   Michal Hocko   mm, numa: rework ...
1607
  out_flush:
8f175cf5c   Michal Hocko   mm: fix do_pages_...
1608
1609
  	if (list_empty(&pagelist))
  		return err;
a49bd4d71   Michal Hocko   mm, numa: rework ...
1610
1611
1612
1613
1614
1615
  	/* Make sure we do not overwrite the existing error */
  	err1 = do_move_pages_to_node(mm, &pagelist, current_node);
  	if (!err1)
  		err1 = store_status(status, start, current_node, i - start);
  	if (!err)
  		err = err1;
5e9a0f023   Brice Goglin   mm: extract do_pa...
1616
1617
1618
1619
1620
  out:
  	return err;
  }
  
  /*
2f007e74b   Brice Goglin   mm: don't vmalloc...
1621
   * Determine the nodes of an array of pages and store it in an array of status.
742755a1d   Christoph Lameter   [PATCH] page migr...
1622
   */
80bba1290   Brice Goglin   mm: no get_user/p...
1623
1624
  static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
  				const void __user **pages, int *status)
742755a1d   Christoph Lameter   [PATCH] page migr...
1625
  {
2f007e74b   Brice Goglin   mm: don't vmalloc...
1626
  	unsigned long i;
2f007e74b   Brice Goglin   mm: don't vmalloc...
1627

742755a1d   Christoph Lameter   [PATCH] page migr...
1628
  	down_read(&mm->mmap_sem);
2f007e74b   Brice Goglin   mm: don't vmalloc...
1629
  	for (i = 0; i < nr_pages; i++) {
80bba1290   Brice Goglin   mm: no get_user/p...
1630
  		unsigned long addr = (unsigned long)(*pages);
742755a1d   Christoph Lameter   [PATCH] page migr...
1631
1632
  		struct vm_area_struct *vma;
  		struct page *page;
c095adbc2   KOSAKI Motohiro   mm: Don't touch u...
1633
  		int err = -EFAULT;
2f007e74b   Brice Goglin   mm: don't vmalloc...
1634
1635
  
  		vma = find_vma(mm, addr);
70384dc6d   Gleb Natapov   mm: fix error rep...
1636
  		if (!vma || addr < vma->vm_start)
742755a1d   Christoph Lameter   [PATCH] page migr...
1637
  			goto set_status;
d899844e9   Kirill A. Shutemov   mm: fix status co...
1638
1639
  		/* FOLL_DUMP to ignore special (like zero) pages */
  		page = follow_page(vma, addr, FOLL_DUMP);
89f5b7da2   Linus Torvalds   Reinstate ZERO_PA...
1640
1641
1642
1643
  
  		err = PTR_ERR(page);
  		if (IS_ERR(page))
  			goto set_status;
d899844e9   Kirill A. Shutemov   mm: fix status co...
1644
  		err = page ? page_to_nid(page) : -ENOENT;
742755a1d   Christoph Lameter   [PATCH] page migr...
1645
  set_status:
80bba1290   Brice Goglin   mm: no get_user/p...
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
  		*status = err;
  
  		pages++;
  		status++;
  	}
  
  	up_read(&mm->mmap_sem);
  }
  
  /*
   * Determine the nodes of a user array of pages and store it in
   * a user array of status.
   */
  static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
  			 const void __user * __user *pages,
  			 int __user *status)
  {
  #define DO_PAGES_STAT_CHUNK_NR 16
  	const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
  	int chunk_status[DO_PAGES_STAT_CHUNK_NR];
80bba1290   Brice Goglin   mm: no get_user/p...
1666

87b8d1ade   H. Peter Anvin   mm: Make copy_fro...
1667
1668
  	while (nr_pages) {
  		unsigned long chunk_nr;
80bba1290   Brice Goglin   mm: no get_user/p...
1669

87b8d1ade   H. Peter Anvin   mm: Make copy_fro...
1670
1671
1672
1673
1674
1675
  		chunk_nr = nr_pages;
  		if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
  			chunk_nr = DO_PAGES_STAT_CHUNK_NR;
  
  		if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
  			break;
80bba1290   Brice Goglin   mm: no get_user/p...
1676
1677
  
  		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
87b8d1ade   H. Peter Anvin   mm: Make copy_fro...
1678
1679
  		if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
  			break;
742755a1d   Christoph Lameter   [PATCH] page migr...
1680

87b8d1ade   H. Peter Anvin   mm: Make copy_fro...
1681
1682
1683
1684
1685
  		pages += chunk_nr;
  		status += chunk_nr;
  		nr_pages -= chunk_nr;
  	}
  	return nr_pages ? -EFAULT : 0;
742755a1d   Christoph Lameter   [PATCH] page migr...
1686
1687
1688
1689
1690
1691
  }
  
  /*
   * Move a list of pages in the address space of the currently executing
   * process.
   */
7addf4438   Dominik Brodowski   mm: add kernel_mo...
1692
1693
1694
1695
  static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
  			     const void __user * __user *pages,
  			     const int __user *nodes,
  			     int __user *status, int flags)
742755a1d   Christoph Lameter   [PATCH] page migr...
1696
  {
742755a1d   Christoph Lameter   [PATCH] page migr...
1697
  	struct task_struct *task;
742755a1d   Christoph Lameter   [PATCH] page migr...
1698
  	struct mm_struct *mm;
5e9a0f023   Brice Goglin   mm: extract do_pa...
1699
  	int err;
3268c63ed   Christoph Lameter   mm: fix move/migr...
1700
  	nodemask_t task_nodes;
742755a1d   Christoph Lameter   [PATCH] page migr...
1701
1702
1703
1704
1705
1706
1707
1708
1709
  
  	/* Check flags */
  	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
  		return -EINVAL;
  
  	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
  		return -EPERM;
  
  	/* Find the mm_struct */
a879bf582   Greg Thelen   mm: grab rcu read...
1710
  	rcu_read_lock();
228ebcbe6   Pavel Emelyanov   Uninline find_tas...
1711
  	task = pid ? find_task_by_vpid(pid) : current;
742755a1d   Christoph Lameter   [PATCH] page migr...
1712
  	if (!task) {
a879bf582   Greg Thelen   mm: grab rcu read...
1713
  		rcu_read_unlock();
742755a1d   Christoph Lameter   [PATCH] page migr...
1714
1715
  		return -ESRCH;
  	}
3268c63ed   Christoph Lameter   mm: fix move/migr...
1716
  	get_task_struct(task);
742755a1d   Christoph Lameter   [PATCH] page migr...
1717
1718
1719
  
  	/*
  	 * Check if this process has the right to modify the specified
197e7e521   Linus Torvalds   Sanitize 'move_pa...
1720
  	 * process. Use the regular "ptrace_may_access()" checks.
742755a1d   Christoph Lameter   [PATCH] page migr...
1721
  	 */
197e7e521   Linus Torvalds   Sanitize 'move_pa...
1722
  	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
c69e8d9c0   David Howells   CRED: Use RCU to ...
1723
  		rcu_read_unlock();
742755a1d   Christoph Lameter   [PATCH] page migr...
1724
  		err = -EPERM;
5e9a0f023   Brice Goglin   mm: extract do_pa...
1725
  		goto out;
742755a1d   Christoph Lameter   [PATCH] page migr...
1726
  	}
c69e8d9c0   David Howells   CRED: Use RCU to ...
1727
  	rcu_read_unlock();
742755a1d   Christoph Lameter   [PATCH] page migr...
1728

86c3a7645   David Quigley   [PATCH] SELinux: ...
1729
1730
   	err = security_task_movememory(task);
   	if (err)
5e9a0f023   Brice Goglin   mm: extract do_pa...
1731
  		goto out;
86c3a7645   David Quigley   [PATCH] SELinux: ...
1732

3268c63ed   Christoph Lameter   mm: fix move/migr...
1733
1734
1735
  	task_nodes = cpuset_mems_allowed(task);
  	mm = get_task_mm(task);
  	put_task_struct(task);
6e8b09eaf   Sasha Levin   mm: fix NULL ptr ...
1736
1737
1738
1739
1740
1741
1742
1743
  	if (!mm)
  		return -EINVAL;
  
  	if (nodes)
  		err = do_pages_move(mm, task_nodes, nr_pages, pages,
  				    nodes, status, flags);
  	else
  		err = do_pages_stat(mm, nr_pages, pages, status);
742755a1d   Christoph Lameter   [PATCH] page migr...
1744

742755a1d   Christoph Lameter   [PATCH] page migr...
1745
1746
  	mmput(mm);
  	return err;
3268c63ed   Christoph Lameter   mm: fix move/migr...
1747
1748
1749
1750
  
  out:
  	put_task_struct(task);
  	return err;
742755a1d   Christoph Lameter   [PATCH] page migr...
1751
  }
742755a1d   Christoph Lameter   [PATCH] page migr...
1752

7addf4438   Dominik Brodowski   mm: add kernel_mo...
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
  SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
  		const void __user * __user *, pages,
  		const int __user *, nodes,
  		int __user *, status, int, flags)
  {
  	return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
  }
  
  #ifdef CONFIG_COMPAT
  COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
  		       compat_uptr_t __user *, pages32,
  		       const int __user *, nodes,
  		       int __user *, status,
  		       int, flags)
  {
  	const void __user * __user *pages;
  	int i;
  
  	pages = compat_alloc_user_space(nr_pages * sizeof(void *));
  	for (i = 0; i < nr_pages; i++) {
  		compat_uptr_t p;
  
  		if (get_user(p, pages32 + i) ||
  			put_user(compat_ptr(p), pages + i))
  			return -EFAULT;
  	}
  	return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
  }
  #endif /* CONFIG_COMPAT */
7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1782
1783
1784
1785
1786
1787
  #ifdef CONFIG_NUMA_BALANCING
  /*
   * Returns true if this is a safe migration target node for misplaced NUMA
   * pages. Currently it only checks the watermarks which crude
   */
  static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
3abef4e6c   Mel Gorman   mm: numa: take TH...
1788
  				   unsigned long nr_migrate_pages)
7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1789
1790
  {
  	int z;
599d0c954   Mel Gorman   mm, vmscan: move ...
1791

7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1792
1793
1794
1795
1796
  	for (z = pgdat->nr_zones - 1; z >= 0; z--) {
  		struct zone *zone = pgdat->node_zones + z;
  
  		if (!populated_zone(zone))
  			continue;
7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
  		/* Avoid waking kswapd by allocating pages_to_migrate pages. */
  		if (!zone_watermark_ok(zone, 0,
  				       high_wmark_pages(zone) +
  				       nr_migrate_pages,
  				       0, 0))
  			continue;
  		return true;
  	}
  	return false;
  }
  
  static struct page *alloc_misplaced_dst_page(struct page *page,
666feb21a   Michal Hocko   mm, migrate: remo...
1809
  					   unsigned long data)
7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1810
1811
1812
  {
  	int nid = (int) data;
  	struct page *newpage;
96db800f5   Vlastimil Babka   mm: rename alloc_...
1813
  	newpage = __alloc_pages_node(nid,
e97ca8e5b   Johannes Weiner   mm: fix GFP_THISN...
1814
1815
1816
  					 (GFP_HIGHUSER_MOVABLE |
  					  __GFP_THISNODE | __GFP_NOMEMALLOC |
  					  __GFP_NORETRY | __GFP_NOWARN) &
8479eba77   Mel Gorman   mm: numa: quickly...
1817
  					 ~__GFP_RECLAIM, 0);
bac0382c6   Hillf Danton   mm: numa: migrate...
1818

7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1819
1820
  	return newpage;
  }
1c30e0177   Mel Gorman   mm: numa: make NU...
1821
  static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
b32967ff1   Mel Gorman   mm: numa: Add THP...
1822
  {
340ef3902   Hugh Dickins   mm: numa: cleanup...
1823
  	int page_lru;
a8f607721   Mel Gorman   mm: numa: Rate li...
1824

309381fea   Sasha Levin   mm: dump page whe...
1825
  	VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
3abef4e6c   Mel Gorman   mm: numa: take TH...
1826

7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1827
  	/* Avoid migrating to a node that is nearly full */
340ef3902   Hugh Dickins   mm: numa: cleanup...
1828
1829
  	if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
  		return 0;
7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1830

340ef3902   Hugh Dickins   mm: numa: cleanup...
1831
1832
  	if (isolate_lru_page(page))
  		return 0;
7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1833

340ef3902   Hugh Dickins   mm: numa: cleanup...
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
  	/*
  	 * migrate_misplaced_transhuge_page() skips page migration's usual
  	 * check on page_count(), so we must do it here, now that the page
  	 * has been isolated: a GUP pin, or any other pin, prevents migration.
  	 * The expected page count is 3: 1 for page's mapcount and 1 for the
  	 * caller's pin and 1 for the reference taken by isolate_lru_page().
  	 */
  	if (PageTransHuge(page) && page_count(page) != 3) {
  		putback_lru_page(page);
  		return 0;
7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1844
  	}
340ef3902   Hugh Dickins   mm: numa: cleanup...
1845
  	page_lru = page_is_file_cache(page);
599d0c954   Mel Gorman   mm, vmscan: move ...
1846
  	mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
340ef3902   Hugh Dickins   mm: numa: cleanup...
1847
  				hpage_nr_pages(page));
149c33e1c   Mel Gorman   mm: migrate: Drop...
1848
  	/*
340ef3902   Hugh Dickins   mm: numa: cleanup...
1849
1850
1851
  	 * Isolating the page has taken another reference, so the
  	 * caller's reference can be safely dropped without the page
  	 * disappearing underneath us during migration.
149c33e1c   Mel Gorman   mm: migrate: Drop...
1852
1853
  	 */
  	put_page(page);
340ef3902   Hugh Dickins   mm: numa: cleanup...
1854
  	return 1;
b32967ff1   Mel Gorman   mm: numa: Add THP...
1855
  }
de466bd62   Mel Gorman   mm: numa: avoid u...
1856
1857
1858
1859
1860
  bool pmd_trans_migrating(pmd_t pmd)
  {
  	struct page *page = pmd_page(pmd);
  	return PageLocked(page);
  }
b32967ff1   Mel Gorman   mm: numa: Add THP...
1861
1862
1863
1864
1865
  /*
   * Attempt to migrate a misplaced page to the specified destination
   * node. Caller is expected to have an elevated reference count on
   * the page that will be dropped by this function before returning.
   */
1bc115d87   Mel Gorman   mm: numa: Scan pa...
1866
1867
  int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
  			   int node)
b32967ff1   Mel Gorman   mm: numa: Add THP...
1868
1869
  {
  	pg_data_t *pgdat = NODE_DATA(node);
340ef3902   Hugh Dickins   mm: numa: cleanup...
1870
  	int isolated;
b32967ff1   Mel Gorman   mm: numa: Add THP...
1871
1872
1873
1874
  	int nr_remaining;
  	LIST_HEAD(migratepages);
  
  	/*
1bc115d87   Mel Gorman   mm: numa: Scan pa...
1875
1876
  	 * Don't migrate file pages that are mapped in multiple processes
  	 * with execute permissions as they are probably shared libraries.
b32967ff1   Mel Gorman   mm: numa: Add THP...
1877
  	 */
1bc115d87   Mel Gorman   mm: numa: Scan pa...
1878
1879
  	if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
  	    (vma->vm_flags & VM_EXEC))
b32967ff1   Mel Gorman   mm: numa: Add THP...
1880
  		goto out;
b32967ff1   Mel Gorman   mm: numa: Add THP...
1881
1882
  
  	/*
09a913a7a   Mel Gorman   sched/numa: avoid...
1883
1884
1885
1886
1887
  	 * Also do not migrate dirty pages as not all filesystems can move
  	 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
  	 */
  	if (page_is_file_cache(page) && PageDirty(page))
  		goto out;
b32967ff1   Mel Gorman   mm: numa: Add THP...
1888
1889
1890
1891
1892
  	isolated = numamigrate_isolate_page(pgdat, page);
  	if (!isolated)
  		goto out;
  
  	list_add(&page->lru, &migratepages);
9c620e2bc   Hugh Dickins   mm: remove offlin...
1893
  	nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
68711a746   David Rientjes   mm, migration: ad...
1894
1895
  				     NULL, node, MIGRATE_ASYNC,
  				     MR_NUMA_MISPLACED);
b32967ff1   Mel Gorman   mm: numa: Add THP...
1896
  	if (nr_remaining) {
59c82b70d   Joonsoo Kim   mm/migrate: remov...
1897
1898
  		if (!list_empty(&migratepages)) {
  			list_del(&page->lru);
599d0c954   Mel Gorman   mm, vmscan: move ...
1899
  			dec_node_page_state(page, NR_ISOLATED_ANON +
59c82b70d   Joonsoo Kim   mm/migrate: remov...
1900
1901
1902
  					page_is_file_cache(page));
  			putback_lru_page(page);
  		}
b32967ff1   Mel Gorman   mm: numa: Add THP...
1903
1904
1905
  		isolated = 0;
  	} else
  		count_vm_numa_event(NUMA_PAGE_MIGRATE);
7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1906
  	BUG_ON(!list_empty(&migratepages));
7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1907
  	return isolated;
340ef3902   Hugh Dickins   mm: numa: cleanup...
1908
1909
1910
1911
  
  out:
  	put_page(page);
  	return 0;
7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
1912
  }
220018d38   Mel Gorman   mm: numa: Add THP...
1913
  #endif /* CONFIG_NUMA_BALANCING */
b32967ff1   Mel Gorman   mm: numa: Add THP...
1914

220018d38   Mel Gorman   mm: numa: Add THP...
1915
  #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
340ef3902   Hugh Dickins   mm: numa: cleanup...
1916
1917
1918
1919
  /*
   * Migrates a THP to a given target node. page must be locked and is unlocked
   * before returning.
   */
b32967ff1   Mel Gorman   mm: numa: Add THP...
1920
1921
1922
1923
1924
1925
  int migrate_misplaced_transhuge_page(struct mm_struct *mm,
  				struct vm_area_struct *vma,
  				pmd_t *pmd, pmd_t entry,
  				unsigned long address,
  				struct page *page, int node)
  {
c4088ebdc   Kirill A. Shutemov   mm: convert the r...
1926
  	spinlock_t *ptl;
b32967ff1   Mel Gorman   mm: numa: Add THP...
1927
1928
1929
  	pg_data_t *pgdat = NODE_DATA(node);
  	int isolated = 0;
  	struct page *new_page = NULL;
b32967ff1   Mel Gorman   mm: numa: Add THP...
1930
  	int page_lru = page_is_file_cache(page);
7066f0f93   Andrea Arcangeli   mm: thp: fix mmu_...
1931
  	unsigned long start = address & HPAGE_PMD_MASK;
b32967ff1   Mel Gorman   mm: numa: Add THP...
1932

b32967ff1   Mel Gorman   mm: numa: Add THP...
1933
  	new_page = alloc_pages_node(node,
251603549   Vlastimil Babka   mm, thp: remove _...
1934
  		(GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
e97ca8e5b   Johannes Weiner   mm: fix GFP_THISN...
1935
  		HPAGE_PMD_ORDER);
340ef3902   Hugh Dickins   mm: numa: cleanup...
1936
1937
  	if (!new_page)
  		goto out_fail;
9a982250f   Kirill A. Shutemov   thp: introduce de...
1938
  	prep_transhuge_page(new_page);
340ef3902   Hugh Dickins   mm: numa: cleanup...
1939

b32967ff1   Mel Gorman   mm: numa: Add THP...
1940
  	isolated = numamigrate_isolate_page(pgdat, page);
340ef3902   Hugh Dickins   mm: numa: cleanup...
1941
  	if (!isolated) {
b32967ff1   Mel Gorman   mm: numa: Add THP...
1942
  		put_page(new_page);
340ef3902   Hugh Dickins   mm: numa: cleanup...
1943
  		goto out_fail;
b32967ff1   Mel Gorman   mm: numa: Add THP...
1944
  	}
b0943d61b   Mel Gorman   mm: numa: defer T...
1945

b32967ff1   Mel Gorman   mm: numa: Add THP...
1946
  	/* Prepare a page as a migration target */
48c935ad8   Kirill A. Shutemov   page-flags: defin...
1947
  	__SetPageLocked(new_page);
d44d363f6   Shaohua Li   mm: don't assume ...
1948
1949
  	if (PageSwapBacked(page))
  		__SetPageSwapBacked(new_page);
b32967ff1   Mel Gorman   mm: numa: Add THP...
1950
1951
1952
1953
  
  	/* anon mapping, we can simply copy page->mapping to the new page: */
  	new_page->mapping = page->mapping;
  	new_page->index = page->index;
7eef5f97c   Andrea Arcangeli   mm: thp: relocate...
1954
1955
  	/* flush the cache before copying using the kernel virtual address */
  	flush_cache_range(vma, start, start + HPAGE_PMD_SIZE);
b32967ff1   Mel Gorman   mm: numa: Add THP...
1956
1957
1958
1959
  	migrate_page_copy(new_page, page);
  	WARN_ON(PageLRU(new_page));
  
  	/* Recheck the target PMD */
c4088ebdc   Kirill A. Shutemov   mm: convert the r...
1960
  	ptl = pmd_lock(mm, pmd);
f4e177d12   Will Deacon   mm/migrate.c: sta...
1961
  	if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
c4088ebdc   Kirill A. Shutemov   mm: convert the r...
1962
  		spin_unlock(ptl);
b32967ff1   Mel Gorman   mm: numa: Add THP...
1963
1964
1965
1966
1967
1968
  
  		/* Reverse changes made by migrate_page_copy() */
  		if (TestClearPageActive(new_page))
  			SetPageActive(page);
  		if (TestClearPageUnevictable(new_page))
  			SetPageUnevictable(page);
b32967ff1   Mel Gorman   mm: numa: Add THP...
1969
1970
1971
  
  		unlock_page(new_page);
  		put_page(new_page);		/* Free it */
a54a407fb   Mel Gorman   mm: Close races b...
1972
1973
  		/* Retake the callers reference and putback on LRU */
  		get_page(page);
b32967ff1   Mel Gorman   mm: numa: Add THP...
1974
  		putback_lru_page(page);
599d0c954   Mel Gorman   mm, vmscan: move ...
1975
  		mod_node_page_state(page_pgdat(page),
a54a407fb   Mel Gorman   mm: Close races b...
1976
  			 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
eb4489f69   Mel Gorman   mm: numa: avoid u...
1977
1978
  
  		goto out_unlock;
b32967ff1   Mel Gorman   mm: numa: Add THP...
1979
  	}
101024596   Kirill A. Shutemov   mm: introduce do_...
1980
  	entry = mk_huge_pmd(new_page, vma->vm_page_prot);
f55e1014f   Linus Torvalds   Revert "mm, thp: ...
1981
  	entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
b32967ff1   Mel Gorman   mm: numa: Add THP...
1982

2b4847e73   Mel Gorman   mm: numa: seriali...
1983
  	/*
d7c339341   Andrea Arcangeli   mm: thp: fix MADV...
1984
1985
1986
1987
1988
1989
  	 * Overwrite the old entry under pagetable lock and establish
  	 * the new PTE. Any parallel GUP will either observe the old
  	 * page blocking on the page lock, block on the page table
  	 * lock or observe the new page. The SetPageUptodate on the
  	 * new page and page_add_new_anon_rmap guarantee the copy is
  	 * visible before the pagetable update.
2b4847e73   Mel Gorman   mm: numa: seriali...
1990
  	 */
7066f0f93   Andrea Arcangeli   mm: thp: fix mmu_...
1991
  	page_add_anon_rmap(new_page, vma, start, true);
d7c339341   Andrea Arcangeli   mm: thp: fix MADV...
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
  	/*
  	 * At this point the pmd is numa/protnone (i.e. non present) and the TLB
  	 * has already been flushed globally.  So no TLB can be currently
  	 * caching this non present pmd mapping.  There's no need to clear the
  	 * pmd before doing set_pmd_at(), nor to flush the TLB after
  	 * set_pmd_at().  Clearing the pmd here would introduce a race
  	 * condition against MADV_DONTNEED, because MADV_DONTNEED only holds the
  	 * mmap_sem for reading.  If the pmd is set to NULL at any given time,
  	 * MADV_DONTNEED won't wait on the pmd lock and it'll skip clearing this
  	 * pmd.
  	 */
7066f0f93   Andrea Arcangeli   mm: thp: fix mmu_...
2003
  	set_pmd_at(mm, start, pmd, entry);
ce4a9cc57   Stephen Rothwell   mm,numa: fix upda...
2004
  	update_mmu_cache_pmd(vma, address, &entry);
2b4847e73   Mel Gorman   mm: numa: seriali...
2005

f4e177d12   Will Deacon   mm/migrate.c: sta...
2006
  	page_ref_unfreeze(page, 2);
51afb12ba   Hugh Dickins   mm: page migratio...
2007
  	mlock_migrate_page(new_page, page);
d281ee614   Kirill A. Shutemov   rmap: add argumen...
2008
  	page_remove_rmap(page, true);
7cd12b4ab   Vlastimil Babka   mm, page_owner: t...
2009
  	set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
2b4847e73   Mel Gorman   mm: numa: seriali...
2010

c4088ebdc   Kirill A. Shutemov   mm: convert the r...
2011
  	spin_unlock(ptl);
b32967ff1   Mel Gorman   mm: numa: Add THP...
2012

11de9927f   Mel Gorman   mm: numa: add mig...
2013
2014
2015
  	/* Take an "isolate" reference and put new page on the LRU. */
  	get_page(new_page);
  	putback_lru_page(new_page);
b32967ff1   Mel Gorman   mm: numa: Add THP...
2016
2017
2018
2019
2020
2021
2022
  	unlock_page(new_page);
  	unlock_page(page);
  	put_page(page);			/* Drop the rmap reference */
  	put_page(page);			/* Drop the LRU isolation reference */
  
  	count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
  	count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
599d0c954   Mel Gorman   mm, vmscan: move ...
2023
  	mod_node_page_state(page_pgdat(page),
b32967ff1   Mel Gorman   mm: numa: Add THP...
2024
2025
2026
  			NR_ISOLATED_ANON + page_lru,
  			-HPAGE_PMD_NR);
  	return isolated;
340ef3902   Hugh Dickins   mm: numa: cleanup...
2027
2028
  out_fail:
  	count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
2b4847e73   Mel Gorman   mm: numa: seriali...
2029
2030
  	ptl = pmd_lock(mm, pmd);
  	if (pmd_same(*pmd, entry)) {
4d9424669   Mel Gorman   mm: convert p[te|...
2031
  		entry = pmd_modify(entry, vma->vm_page_prot);
7066f0f93   Andrea Arcangeli   mm: thp: fix mmu_...
2032
  		set_pmd_at(mm, start, pmd, entry);
2b4847e73   Mel Gorman   mm: numa: seriali...
2033
2034
2035
  		update_mmu_cache_pmd(vma, address, &entry);
  	}
  	spin_unlock(ptl);
a54a407fb   Mel Gorman   mm: Close races b...
2036

eb4489f69   Mel Gorman   mm: numa: avoid u...
2037
  out_unlock:
340ef3902   Hugh Dickins   mm: numa: cleanup...
2038
  	unlock_page(page);
b32967ff1   Mel Gorman   mm: numa: Add THP...
2039
  	put_page(page);
b32967ff1   Mel Gorman   mm: numa: Add THP...
2040
2041
  	return 0;
  }
7039e1dbe   Peter Zijlstra   mm: migrate: Intr...
2042
2043
2044
  #endif /* CONFIG_NUMA_BALANCING */
  
  #endif /* CONFIG_NUMA */
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2045

6b368cd4a   Jérôme Glisse   mm/hmm: avoid blo...
2046
  #if defined(CONFIG_MIGRATE_VMA_HELPER)
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
  struct migrate_vma {
  	struct vm_area_struct	*vma;
  	unsigned long		*dst;
  	unsigned long		*src;
  	unsigned long		cpages;
  	unsigned long		npages;
  	unsigned long		start;
  	unsigned long		end;
  };
  
  static int migrate_vma_collect_hole(unsigned long start,
  				    unsigned long end,
  				    struct mm_walk *walk)
  {
  	struct migrate_vma *migrate = walk->private;
  	unsigned long addr;
  
  	for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
e20d103b6   Mark Hairgrove   mm/migrate: fix i...
2065
  		migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2066
  		migrate->dst[migrate->npages] = 0;
e20d103b6   Mark Hairgrove   mm/migrate: fix i...
2067
  		migrate->npages++;
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
  		migrate->cpages++;
  	}
  
  	return 0;
  }
  
  static int migrate_vma_collect_skip(unsigned long start,
  				    unsigned long end,
  				    struct mm_walk *walk)
  {
  	struct migrate_vma *migrate = walk->private;
  	unsigned long addr;
  
  	for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
  		migrate->dst[migrate->npages] = 0;
  		migrate->src[migrate->npages++] = 0;
  	}
  
  	return 0;
  }
  
  static int migrate_vma_collect_pmd(pmd_t *pmdp,
  				   unsigned long start,
  				   unsigned long end,
  				   struct mm_walk *walk)
  {
  	struct migrate_vma *migrate = walk->private;
  	struct vm_area_struct *vma = walk->vma;
  	struct mm_struct *mm = vma->vm_mm;
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2097
  	unsigned long addr = start, unmapped = 0;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
  	spinlock_t *ptl;
  	pte_t *ptep;
  
  again:
  	if (pmd_none(*pmdp))
  		return migrate_vma_collect_hole(start, end, walk);
  
  	if (pmd_trans_huge(*pmdp)) {
  		struct page *page;
  
  		ptl = pmd_lock(mm, pmdp);
  		if (unlikely(!pmd_trans_huge(*pmdp))) {
  			spin_unlock(ptl);
  			goto again;
  		}
  
  		page = pmd_page(*pmdp);
  		if (is_huge_zero_page(page)) {
  			spin_unlock(ptl);
  			split_huge_pmd(vma, pmdp, addr);
  			if (pmd_trans_unstable(pmdp))
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2119
  				return migrate_vma_collect_skip(start, end,
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2120
2121
2122
2123
2124
2125
2126
  								walk);
  		} else {
  			int ret;
  
  			get_page(page);
  			spin_unlock(ptl);
  			if (unlikely(!trylock_page(page)))
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2127
  				return migrate_vma_collect_skip(start, end,
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2128
2129
2130
2131
  								walk);
  			ret = split_huge_page(page);
  			unlock_page(page);
  			put_page(page);
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2132
2133
2134
2135
  			if (ret)
  				return migrate_vma_collect_skip(start, end,
  								walk);
  			if (pmd_none(*pmdp))
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2136
2137
2138
2139
2140
2141
  				return migrate_vma_collect_hole(start, end,
  								walk);
  		}
  	}
  
  	if (unlikely(pmd_bad(*pmdp)))
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2142
  		return migrate_vma_collect_skip(start, end, walk);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2143
2144
  
  	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2145
  	arch_enter_lazy_mmu_mode();
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2146
2147
2148
  	for (; addr < end; addr += PAGE_SIZE, ptep++) {
  		unsigned long mpfn, pfn;
  		struct page *page;
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2149
  		swp_entry_t entry;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2150
2151
2152
2153
  		pte_t pte;
  
  		pte = *ptep;
  		pfn = pte_pfn(pte);
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2154
  		if (pte_none(pte)) {
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2155
2156
2157
  			mpfn = MIGRATE_PFN_MIGRATE;
  			migrate->cpages++;
  			pfn = 0;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2158
2159
  			goto next;
  		}
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
  		if (!pte_present(pte)) {
  			mpfn = pfn = 0;
  
  			/*
  			 * Only care about unaddressable device page special
  			 * page table entry. Other special swap entries are not
  			 * migratable, and we ignore regular swapped page.
  			 */
  			entry = pte_to_swp_entry(pte);
  			if (!is_device_private_entry(entry))
  				goto next;
  
  			page = device_private_entry_to_page(entry);
  			mpfn = migrate_pfn(page_to_pfn(page))|
  				MIGRATE_PFN_DEVICE | MIGRATE_PFN_MIGRATE;
  			if (is_write_device_private_entry(entry))
  				mpfn |= MIGRATE_PFN_WRITE;
  		} else {
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2178
2179
2180
2181
2182
2183
  			if (is_zero_pfn(pfn)) {
  				mpfn = MIGRATE_PFN_MIGRATE;
  				migrate->cpages++;
  				pfn = 0;
  				goto next;
  			}
df6ad6983   Jérôme Glisse   mm/device-public-...
2184
  			page = _vm_normal_page(migrate->vma, addr, pte, true);
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2185
2186
2187
  			mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
  			mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
  		}
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2188
  		/* FIXME support THP */
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2189
2190
2191
2192
  		if (!page || !page->mapping || PageTransCompound(page)) {
  			mpfn = pfn = 0;
  			goto next;
  		}
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2193
  		pfn = page_to_pfn(page);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
  
  		/*
  		 * By getting a reference on the page we pin it and that blocks
  		 * any kind of migration. Side effect is that it "freezes" the
  		 * pte.
  		 *
  		 * We drop this reference after isolating the page from the lru
  		 * for non device page (device page are not on the lru and thus
  		 * can't be dropped from it).
  		 */
  		get_page(page);
  		migrate->cpages++;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2206

8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
  		/*
  		 * Optimize for the common case where page is only mapped once
  		 * in one process. If we can lock the page, then we can safely
  		 * set up a special migration page table entry now.
  		 */
  		if (trylock_page(page)) {
  			pte_t swp_pte;
  
  			mpfn |= MIGRATE_PFN_LOCKED;
  			ptep_get_and_clear(mm, addr, ptep);
  
  			/* Setup special migration page table entry */
07707125a   Ralph Campbell   mm/migrate: prope...
2219
2220
  			entry = make_migration_entry(page, mpfn &
  						     MIGRATE_PFN_WRITE);
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
  			swp_pte = swp_entry_to_pte(entry);
  			if (pte_soft_dirty(pte))
  				swp_pte = pte_swp_mksoft_dirty(swp_pte);
  			set_pte_at(mm, addr, ptep, swp_pte);
  
  			/*
  			 * This is like regular unmap: we remove the rmap and
  			 * drop page refcount. Page won't be freed, as we took
  			 * a reference just above.
  			 */
  			page_remove_rmap(page, false);
  			put_page(page);
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2233
2234
2235
  
  			if (pte_present(pte))
  				unmapped++;
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2236
  		}
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2237
  next:
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2238
  		migrate->dst[migrate->npages] = 0;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2239
2240
  		migrate->src[migrate->npages++] = mpfn;
  	}
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2241
  	arch_leave_lazy_mmu_mode();
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2242
  	pte_unmap_unlock(ptep - 1, ptl);
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2243
2244
2245
  	/* Only flush the TLB if we actually modified any entries */
  	if (unmapped)
  		flush_tlb_range(walk->vma, start, end);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
  	return 0;
  }
  
  /*
   * migrate_vma_collect() - collect pages over a range of virtual addresses
   * @migrate: migrate struct containing all migration information
   *
   * This will walk the CPU page table. For each virtual address backed by a
   * valid page, it updates the src array and takes a reference on the page, in
   * order to pin the page until we lock it and unmap it.
   */
  static void migrate_vma_collect(struct migrate_vma *migrate)
  {
ac46d4f3c   Jérôme Glisse   mm/mmu_notifier: ...
2259
  	struct mmu_notifier_range range;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
  	struct mm_walk mm_walk;
  
  	mm_walk.pmd_entry = migrate_vma_collect_pmd;
  	mm_walk.pte_entry = NULL;
  	mm_walk.pte_hole = migrate_vma_collect_hole;
  	mm_walk.hugetlb_entry = NULL;
  	mm_walk.test_walk = NULL;
  	mm_walk.vma = migrate->vma;
  	mm_walk.mm = migrate->vma->vm_mm;
  	mm_walk.private = migrate;
7269f9999   Jérôme Glisse   mm/mmu_notifier: ...
2270
  	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm_walk.mm,
6f4f13e8d   Jérôme Glisse   mm/mmu_notifier: ...
2271
  				migrate->start,
ac46d4f3c   Jérôme Glisse   mm/mmu_notifier: ...
2272
2273
  				migrate->end);
  	mmu_notifier_invalidate_range_start(&range);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2274
  	walk_page_range(migrate->start, migrate->end, &mm_walk);
ac46d4f3c   Jérôme Glisse   mm/mmu_notifier: ...
2275
  	mmu_notifier_invalidate_range_end(&range);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
  
  	migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
  }
  
  /*
   * migrate_vma_check_page() - check if page is pinned or not
   * @page: struct page to check
   *
   * Pinned pages cannot be migrated. This is the same test as in
   * migrate_page_move_mapping(), except that here we allow migration of a
   * ZONE_DEVICE page.
   */
  static bool migrate_vma_check_page(struct page *page)
  {
  	/*
  	 * One extra ref because caller holds an extra reference, either from
  	 * isolate_lru_page() for a regular page, or migrate_vma_collect() for
  	 * a device page.
  	 */
  	int extra = 1;
  
  	/*
  	 * FIXME support THP (transparent huge page), it is bit more complex to
  	 * check them than regular pages, because they can be mapped with a pmd
  	 * or with a pte (split pte mapping).
  	 */
  	if (PageCompound(page))
  		return false;
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
  	/* Page from ZONE_DEVICE have one extra reference */
  	if (is_zone_device_page(page)) {
  		/*
  		 * Private page can never be pin as they have no valid pte and
  		 * GUP will fail for those. Yet if there is a pending migration
  		 * a thread might try to wait on the pte migration entry and
  		 * will bump the page reference count. Sadly there is no way to
  		 * differentiate a regular pin from migration wait. Hence to
  		 * avoid 2 racing thread trying to migrate back to CPU to enter
  		 * infinite loop (one stoping migration because the other is
  		 * waiting on pte migration entry). We always return true here.
  		 *
  		 * FIXME proper solution is to rework migration_entry_wait() so
  		 * it does not need to take a reference on page.
  		 */
  		if (is_device_private_page(page))
  			return true;
df6ad6983   Jérôme Glisse   mm/device-public-...
2321
2322
2323
2324
2325
2326
2327
  		/*
  		 * Only allow device public page to be migrated and account for
  		 * the extra reference count imply by ZONE_DEVICE pages.
  		 */
  		if (!is_device_public_page(page))
  			return false;
  		extra++;
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2328
  	}
df6ad6983   Jérôme Glisse   mm/device-public-...
2329
2330
2331
  	/* For file back page */
  	if (page_mapping(page))
  		extra += 1 + page_has_private(page);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
  	if ((page_count(page) - extra) > page_mapcount(page))
  		return false;
  
  	return true;
  }
  
  /*
   * migrate_vma_prepare() - lock pages and isolate them from the lru
   * @migrate: migrate struct containing all migration information
   *
   * This locks pages that have been collected by migrate_vma_collect(). Once each
   * page is locked it is isolated from the lru (for non-device pages). Finally,
   * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be
   * migrated by concurrent kernel threads.
   */
  static void migrate_vma_prepare(struct migrate_vma *migrate)
  {
  	const unsigned long npages = migrate->npages;
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2350
2351
  	const unsigned long start = migrate->start;
  	unsigned long addr, i, restore = 0;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2352
  	bool allow_drain = true;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2353
2354
2355
2356
2357
  
  	lru_add_drain();
  
  	for (i = 0; (i < npages) && migrate->cpages; i++) {
  		struct page *page = migrate_pfn_to_page(migrate->src[i]);
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2358
  		bool remap = true;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2359
2360
2361
  
  		if (!page)
  			continue;
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
  		if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
  			/*
  			 * Because we are migrating several pages there can be
  			 * a deadlock between 2 concurrent migration where each
  			 * are waiting on each other page lock.
  			 *
  			 * Make migrate_vma() a best effort thing and backoff
  			 * for any page we can not lock right away.
  			 */
  			if (!trylock_page(page)) {
  				migrate->src[i] = 0;
  				migrate->cpages--;
  				put_page(page);
  				continue;
  			}
  			remap = false;
  			migrate->src[i] |= MIGRATE_PFN_LOCKED;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2379
  		}
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2380

a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2381
2382
2383
2384
2385
2386
2387
  		/* ZONE_DEVICE pages are not on LRU */
  		if (!is_zone_device_page(page)) {
  			if (!PageLRU(page) && allow_drain) {
  				/* Drain CPU's pagevec */
  				lru_add_drain_all();
  				allow_drain = false;
  			}
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2388

a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
  			if (isolate_lru_page(page)) {
  				if (remap) {
  					migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
  					migrate->cpages--;
  					restore++;
  				} else {
  					migrate->src[i] = 0;
  					unlock_page(page);
  					migrate->cpages--;
  					put_page(page);
  				}
  				continue;
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2401
  			}
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2402
2403
2404
  
  			/* Drop the reference we took in collect */
  			put_page(page);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2405
2406
2407
  		}
  
  		if (!migrate_vma_check_page(page)) {
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2408
2409
2410
2411
  			if (remap) {
  				migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
  				migrate->cpages--;
  				restore++;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2412

a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2413
2414
2415
2416
  				if (!is_zone_device_page(page)) {
  					get_page(page);
  					putback_lru_page(page);
  				}
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2417
2418
2419
2420
  			} else {
  				migrate->src[i] = 0;
  				unlock_page(page);
  				migrate->cpages--;
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2421
2422
2423
2424
  				if (!is_zone_device_page(page))
  					putback_lru_page(page);
  				else
  					put_page(page);
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2425
  			}
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2426
2427
  		}
  	}
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
  
  	for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
  		struct page *page = migrate_pfn_to_page(migrate->src[i]);
  
  		if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
  			continue;
  
  		remove_migration_pte(page, migrate->vma, addr, page);
  
  		migrate->src[i] = 0;
  		unlock_page(page);
  		put_page(page);
  		restore--;
  	}
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
  }
  
  /*
   * migrate_vma_unmap() - replace page mapping with special migration pte entry
   * @migrate: migrate struct containing all migration information
   *
   * Replace page mapping (CPU page table pte) with a special migration pte entry
   * and check again if it has been pinned. Pinned pages are restored because we
   * cannot migrate them.
   *
   * This is the last step before we call the device driver callback to allocate
   * destination memory and copy contents of original page over to new page.
   */
  static void migrate_vma_unmap(struct migrate_vma *migrate)
  {
  	int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
  	const unsigned long npages = migrate->npages;
  	const unsigned long start = migrate->start;
  	unsigned long addr, i, restore = 0;
  
  	for (i = 0; i < npages; i++) {
  		struct page *page = migrate_pfn_to_page(migrate->src[i]);
  
  		if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
  			continue;
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2467
2468
2469
2470
  		if (page_mapped(page)) {
  			try_to_unmap(page, flags);
  			if (page_mapped(page))
  				goto restore;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2471
  		}
8c3328f1f   Jérôme Glisse   mm/migrate: migra...
2472
2473
2474
2475
2476
2477
2478
2479
  
  		if (migrate_vma_check_page(page))
  			continue;
  
  restore:
  		migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
  		migrate->cpages--;
  		restore++;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
  	}
  
  	for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
  		struct page *page = migrate_pfn_to_page(migrate->src[i]);
  
  		if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
  			continue;
  
  		remove_migration_ptes(page, page, false);
  
  		migrate->src[i] = 0;
  		unlock_page(page);
  		restore--;
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2493
2494
2495
2496
  		if (is_zone_device_page(page))
  			put_page(page);
  		else
  			putback_lru_page(page);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2497
2498
  	}
  }
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
  static void migrate_vma_insert_page(struct migrate_vma *migrate,
  				    unsigned long addr,
  				    struct page *page,
  				    unsigned long *src,
  				    unsigned long *dst)
  {
  	struct vm_area_struct *vma = migrate->vma;
  	struct mm_struct *mm = vma->vm_mm;
  	struct mem_cgroup *memcg;
  	bool flush = false;
  	spinlock_t *ptl;
  	pte_t entry;
  	pgd_t *pgdp;
  	p4d_t *p4dp;
  	pud_t *pudp;
  	pmd_t *pmdp;
  	pte_t *ptep;
  
  	/* Only allow populating anonymous memory */
  	if (!vma_is_anonymous(vma))
  		goto abort;
  
  	pgdp = pgd_offset(mm, addr);
  	p4dp = p4d_alloc(mm, pgdp, addr);
  	if (!p4dp)
  		goto abort;
  	pudp = pud_alloc(mm, p4dp, addr);
  	if (!pudp)
  		goto abort;
  	pmdp = pmd_alloc(mm, pudp, addr);
  	if (!pmdp)
  		goto abort;
  
  	if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
  		goto abort;
  
  	/*
  	 * Use pte_alloc() instead of pte_alloc_map().  We can't run
  	 * pte_offset_map() on pmds where a huge pmd might be created
  	 * from a different thread.
  	 *
  	 * pte_alloc_map() is safe to use under down_write(mmap_sem) or when
  	 * parallel threads are excluded by other means.
  	 *
  	 * Here we only have down_read(mmap_sem).
  	 */
4cf589249   Joel Fernandes (Google)   mm: treewide: rem...
2545
  	if (pte_alloc(mm, pmdp))
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
  		goto abort;
  
  	/* See the comment in pte_alloc_one_map() */
  	if (unlikely(pmd_trans_unstable(pmdp)))
  		goto abort;
  
  	if (unlikely(anon_vma_prepare(vma)))
  		goto abort;
  	if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
  		goto abort;
  
  	/*
  	 * The memory barrier inside __SetPageUptodate makes sure that
  	 * preceding stores to the page contents become visible before
  	 * the set_pte_at() write.
  	 */
  	__SetPageUptodate(page);
df6ad6983   Jérôme Glisse   mm/device-public-...
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
  	if (is_zone_device_page(page)) {
  		if (is_device_private_page(page)) {
  			swp_entry_t swp_entry;
  
  			swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
  			entry = swp_entry_to_pte(swp_entry);
  		} else if (is_device_public_page(page)) {
  			entry = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
  			if (vma->vm_flags & VM_WRITE)
  				entry = pte_mkwrite(pte_mkdirty(entry));
  			entry = pte_mkdevmap(entry);
  		}
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
  	} else {
  		entry = mk_pte(page, vma->vm_page_prot);
  		if (vma->vm_flags & VM_WRITE)
  			entry = pte_mkwrite(pte_mkdirty(entry));
  	}
  
  	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
  
  	if (pte_present(*ptep)) {
  		unsigned long pfn = pte_pfn(*ptep);
  
  		if (!is_zero_pfn(pfn)) {
  			pte_unmap_unlock(ptep, ptl);
  			mem_cgroup_cancel_charge(page, memcg, false);
  			goto abort;
  		}
  		flush = true;
  	} else if (!pte_none(*ptep)) {
  		pte_unmap_unlock(ptep, ptl);
  		mem_cgroup_cancel_charge(page, memcg, false);
  		goto abort;
  	}
  
  	/*
  	 * Check for usefaultfd but do not deliver the fault. Instead,
  	 * just back off.
  	 */
  	if (userfaultfd_missing(vma)) {
  		pte_unmap_unlock(ptep, ptl);
  		mem_cgroup_cancel_charge(page, memcg, false);
  		goto abort;
  	}
  
  	inc_mm_counter(mm, MM_ANONPAGES);
  	page_add_new_anon_rmap(page, vma, addr, false);
  	mem_cgroup_commit_charge(page, memcg, false, false);
  	if (!is_zone_device_page(page))
  		lru_cache_add_active_or_unevictable(page, vma);
  	get_page(page);
  
  	if (flush) {
  		flush_cache_page(vma, addr, pte_pfn(*ptep));
  		ptep_clear_flush_notify(vma, addr, ptep);
  		set_pte_at_notify(mm, addr, ptep, entry);
  		update_mmu_cache(vma, addr, ptep);
  	} else {
  		/* No need to invalidate - it was non-present before */
  		set_pte_at(mm, addr, ptep, entry);
  		update_mmu_cache(vma, addr, ptep);
  	}
  
  	pte_unmap_unlock(ptep, ptl);
  	*src = MIGRATE_PFN_MIGRATE;
  	return;
  
  abort:
  	*src &= ~MIGRATE_PFN_MIGRATE;
  }
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
  /*
   * migrate_vma_pages() - migrate meta-data from src page to dst page
   * @migrate: migrate struct containing all migration information
   *
   * This migrates struct page meta-data from source struct page to destination
   * struct page. This effectively finishes the migration from source page to the
   * destination page.
   */
  static void migrate_vma_pages(struct migrate_vma *migrate)
  {
  	const unsigned long npages = migrate->npages;
  	const unsigned long start = migrate->start;
ac46d4f3c   Jérôme Glisse   mm/mmu_notifier: ...
2645
2646
  	struct mmu_notifier_range range;
  	unsigned long addr, i;
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2647
  	bool notified = false;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2648
2649
2650
2651
2652
2653
  
  	for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
  		struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
  		struct page *page = migrate_pfn_to_page(migrate->src[i]);
  		struct address_space *mapping;
  		int r;
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2654
2655
  		if (!newpage) {
  			migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2656
  			continue;
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2657
2658
2659
2660
2661
2662
2663
  		}
  
  		if (!page) {
  			if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) {
  				continue;
  			}
  			if (!notified) {
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2664
  				notified = true;
ac46d4f3c   Jérôme Glisse   mm/mmu_notifier: ...
2665
2666
  
  				mmu_notifier_range_init(&range,
7269f9999   Jérôme Glisse   mm/mmu_notifier: ...
2667
  							MMU_NOTIFY_CLEAR, 0,
6f4f13e8d   Jérôme Glisse   mm/mmu_notifier: ...
2668
  							NULL,
ac46d4f3c   Jérôme Glisse   mm/mmu_notifier: ...
2669
2670
2671
  							migrate->vma->vm_mm,
  							addr, migrate->end);
  				mmu_notifier_invalidate_range_start(&range);
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2672
2673
2674
2675
  			}
  			migrate_vma_insert_page(migrate, addr, newpage,
  						&migrate->src[i],
  						&migrate->dst[i]);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2676
  			continue;
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2677
  		}
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2678
2679
  
  		mapping = page_mapping(page);
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
  		if (is_zone_device_page(newpage)) {
  			if (is_device_private_page(newpage)) {
  				/*
  				 * For now only support private anonymous when
  				 * migrating to un-addressable device memory.
  				 */
  				if (mapping) {
  					migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
  					continue;
  				}
df6ad6983   Jérôme Glisse   mm/device-public-...
2690
  			} else if (!is_device_public_page(newpage)) {
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2691
2692
2693
2694
2695
2696
2697
2698
  				/*
  				 * Other types of ZONE_DEVICE page are not
  				 * supported.
  				 */
  				migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
  				continue;
  			}
  		}
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2699
2700
2701
2702
  		r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
  		if (r != MIGRATEPAGE_SUCCESS)
  			migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
  	}
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2703

4645b9fe8   Jérôme Glisse   mm/mmu_notifier: ...
2704
2705
2706
2707
2708
  	/*
  	 * No need to double call mmu_notifier->invalidate_range() callback as
  	 * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
  	 * did already call it.
  	 */
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2709
  	if (notified)
ac46d4f3c   Jérôme Glisse   mm/mmu_notifier: ...
2710
  		mmu_notifier_invalidate_range_only_end(&range);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
  }
  
  /*
   * migrate_vma_finalize() - restore CPU page table entry
   * @migrate: migrate struct containing all migration information
   *
   * This replaces the special migration pte entry with either a mapping to the
   * new page if migration was successful for that page, or to the original page
   * otherwise.
   *
   * This also unlocks the pages and puts them back on the lru, or drops the extra
   * refcount, for device pages.
   */
  static void migrate_vma_finalize(struct migrate_vma *migrate)
  {
  	const unsigned long npages = migrate->npages;
  	unsigned long i;
  
  	for (i = 0; i < npages; i++) {
  		struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
  		struct page *page = migrate_pfn_to_page(migrate->src[i]);
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2732
2733
2734
2735
2736
  		if (!page) {
  			if (newpage) {
  				unlock_page(newpage);
  				put_page(newpage);
  			}
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2737
  			continue;
8315ada7f   Jérôme Glisse   mm/migrate: allow...
2738
  		}
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
  		if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
  			if (newpage) {
  				unlock_page(newpage);
  				put_page(newpage);
  			}
  			newpage = page;
  		}
  
  		remove_migration_ptes(page, newpage, false);
  		unlock_page(page);
  		migrate->cpages--;
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2750
2751
2752
2753
  		if (is_zone_device_page(page))
  			put_page(page);
  		else
  			putback_lru_page(page);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2754
2755
2756
  
  		if (newpage != page) {
  			unlock_page(newpage);
a5430dda8   Jérôme Glisse   mm/migrate: suppo...
2757
2758
2759
2760
  			if (is_zone_device_page(newpage))
  				put_page(newpage);
  			else
  				putback_lru_page(newpage);
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
  		}
  	}
  }
  
  /*
   * migrate_vma() - migrate a range of memory inside vma
   *
   * @ops: migration callback for allocating destination memory and copying
   * @vma: virtual memory area containing the range to be migrated
   * @start: start address of the range to migrate (inclusive)
   * @end: end address of the range to migrate (exclusive)
   * @src: array of hmm_pfn_t containing source pfns
   * @dst: array of hmm_pfn_t containing destination pfns
   * @private: pointer passed back to each of the callback
   * Returns: 0 on success, error code otherwise
   *
   * This function tries to migrate a range of memory virtual address range, using
   * callbacks to allocate and copy memory from source to destination. First it
   * collects all the pages backing each virtual address in the range, saving this
   * inside the src array. Then it locks those pages and unmaps them. Once the pages
   * are locked and unmapped, it checks whether each page is pinned or not. Pages
   * that aren't pinned have the MIGRATE_PFN_MIGRATE flag set (by this function)
   * in the corresponding src array entry. It then restores any pages that are
   * pinned, by remapping and unlocking those pages.
   *
   * At this point it calls the alloc_and_copy() callback. For documentation on
   * what is expected from that callback, see struct migrate_vma_ops comments in
   * include/linux/migrate.h
   *
   * After the alloc_and_copy() callback, this function goes over each entry in
   * the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
   * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
   * then the function tries to migrate struct page information from the source
   * struct page to the destination struct page. If it fails to migrate the struct
   * page information, then it clears the MIGRATE_PFN_MIGRATE flag in the src
   * array.
   *
   * At this point all successfully migrated pages have an entry in the src
   * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
   * array entry with MIGRATE_PFN_VALID flag set.
   *
   * It then calls the finalize_and_map() callback. See comments for "struct
   * migrate_vma_ops", in include/linux/migrate.h for details about
   * finalize_and_map() behavior.
   *
   * After the finalize_and_map() callback, for successfully migrated pages, this
   * function updates the CPU page table to point to new pages, otherwise it
   * restores the CPU page table to point to the original source pages.
   *
   * Function returns 0 after the above steps, even if no pages were migrated
   * (The function only returns an error if any of the arguments are invalid.)
   *
   * Both src and dst array must be big enough for (end - start) >> PAGE_SHIFT
   * unsigned long entries.
   */
  int migrate_vma(const struct migrate_vma_ops *ops,
  		struct vm_area_struct *vma,
  		unsigned long start,
  		unsigned long end,
  		unsigned long *src,
  		unsigned long *dst,
  		void *private)
  {
  	struct migrate_vma migrate;
  
  	/* Sanity check the arguments */
  	start &= PAGE_MASK;
  	end &= PAGE_MASK;
e1fb4a086   Dave Jiang   dax: remove VM_MI...
2829
2830
  	if (!vma || is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL) ||
  			vma_is_dax(vma))
8763cb45a   Jérôme Glisse   mm/migrate: new m...
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
  		return -EINVAL;
  	if (start < vma->vm_start || start >= vma->vm_end)
  		return -EINVAL;
  	if (end <= vma->vm_start || end > vma->vm_end)
  		return -EINVAL;
  	if (!ops || !src || !dst || start >= end)
  		return -EINVAL;
  
  	memset(src, 0, sizeof(*src) * ((end - start) >> PAGE_SHIFT));
  	migrate.src = src;
  	migrate.dst = dst;
  	migrate.start = start;
  	migrate.npages = 0;
  	migrate.cpages = 0;
  	migrate.end = end;
  	migrate.vma = vma;
  
  	/* Collect, and try to unmap source pages */
  	migrate_vma_collect(&migrate);
  	if (!migrate.cpages)
  		return 0;
  
  	/* Lock and isolate page */
  	migrate_vma_prepare(&migrate);
  	if (!migrate.cpages)
  		return 0;
  
  	/* Unmap pages */
  	migrate_vma_unmap(&migrate);
  	if (!migrate.cpages)
  		return 0;
  
  	/*
  	 * At this point pages are locked and unmapped, and thus they have
  	 * stable content and can safely be copied to destination memory that
  	 * is allocated by the callback.
  	 *
  	 * Note that migration can fail in migrate_vma_struct_page() for each
  	 * individual page.
  	 */
  	ops->alloc_and_copy(vma, src, dst, start, end, private);
  
  	/* This does the real migration of struct page */
  	migrate_vma_pages(&migrate);
  
  	ops->finalize_and_map(vma, src, dst, start, end, private);
  
  	/* Unlock and remap pages */
  	migrate_vma_finalize(&migrate);
  
  	return 0;
  }
  EXPORT_SYMBOL(migrate_vma);
6b368cd4a   Jérôme Glisse   mm/hmm: avoid blo...
2884
  #endif /* defined(MIGRATE_VMA_HELPER) */