Commit ee92d4d6d28561c5019da10ed62c5bb8bdcd3c1e

Authored by David Rientjes
Committed by Jiri Slaby
1 parent e644c10bf8

mm, migration: add destination page freeing callback

commit 68711a746345c44ae00c64d8dbac6a9ce13ac54a upstream.

Memory migration uses a callback defined by the caller to determine how to
allocate destination pages.  When migration fails for a source page,
however, it frees the destination page back to the system.

This patch adds a memory migration callback defined by the caller to
determine how to free destination pages.  If a caller, such as memory
compaction, builds its own freelist for migration targets, this can reuse
already freed memory instead of scanning additional memory.

If the caller provides a function to handle freeing of destination pages,
it is called when page migration fails.  If the caller passes NULL then
freeing back to the system will be handled as usual.  This patch
introduces no functional change.

Signed-off-by: David Rientjes <rientjes@google.com>
Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Greg Thelen <gthelen@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Signed-off-by: Jiri Slaby <jslaby@suse.cz>

Showing 7 changed files with 53 additions and 27 deletions Side-by-side Diff

include/linux/migrate.h
... ... @@ -5,7 +5,9 @@
5 5 #include <linux/mempolicy.h>
6 6 #include <linux/migrate_mode.h>
7 7  
8   -typedef struct page *new_page_t(struct page *, unsigned long private, int **);
  8 +typedef struct page *new_page_t(struct page *page, unsigned long private,
  9 + int **reason);
  10 +typedef void free_page_t(struct page *page, unsigned long private);
9 11  
10 12 /*
11 13 * Return values from addresss_space_operations.migratepage():
... ... @@ -39,7 +41,7 @@
39 41 extern void putback_movable_pages(struct list_head *l);
40 42 extern int migrate_page(struct address_space *,
41 43 struct page *, struct page *, enum migrate_mode);
42   -extern int migrate_pages(struct list_head *l, new_page_t x,
  44 +extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
43 45 unsigned long private, enum migrate_mode mode, int reason);
44 46  
45 47 extern int fail_migrate_page(struct address_space *,
... ... @@ -61,8 +63,9 @@
61 63  
62 64 static inline void putback_lru_pages(struct list_head *l) {}
63 65 static inline void putback_movable_pages(struct list_head *l) {}
64   -static inline int migrate_pages(struct list_head *l, new_page_t x,
65   - unsigned long private, enum migrate_mode mode, int reason)
  66 +static inline int migrate_pages(struct list_head *l, new_page_t new,
  67 + free_page_t free, unsigned long private, enum migrate_mode mode,
  68 + int reason)
66 69 { return -ENOSYS; }
67 70  
68 71 static inline int migrate_prep(void) { return -ENOSYS; }
... ... @@ -1014,7 +1014,7 @@
1014 1014 }
1015 1015  
1016 1016 nr_migrate = cc->nr_migratepages;
1017   - err = migrate_pages(&cc->migratepages, compaction_alloc,
  1017 + err = migrate_pages(&cc->migratepages, compaction_alloc, NULL,
1018 1018 (unsigned long)cc,
1019 1019 cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC,
1020 1020 MR_COMPACTION);
... ... @@ -1554,7 +1554,7 @@
1554 1554  
1555 1555 /* Keep page count to indicate a given hugepage is isolated. */
1556 1556 list_move(&hpage->lru, &pagelist);
1557   - ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
  1557 + ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
1558 1558 MIGRATE_SYNC, MR_MEMORY_FAILURE);
1559 1559 if (ret) {
1560 1560 pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
... ... @@ -1635,7 +1635,7 @@
1635 1635 inc_zone_page_state(page, NR_ISOLATED_ANON +
1636 1636 page_is_file_cache(page));
1637 1637 list_add(&page->lru, &pagelist);
1638   - ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
  1638 + ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
1639 1639 MIGRATE_SYNC, MR_MEMORY_FAILURE);
1640 1640 if (ret) {
1641 1641 putback_lru_pages(&pagelist);
... ... @@ -1321,7 +1321,7 @@
1321 1321 * alloc_migrate_target should be improooooved!!
1322 1322 * migrate_pages returns # of failed pages.
1323 1323 */
1324   - ret = migrate_pages(&source, alloc_migrate_target, 0,
  1324 + ret = migrate_pages(&source, alloc_migrate_target, NULL, 0,
1325 1325 MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
1326 1326 if (ret)
1327 1327 putback_movable_pages(&source);
... ... @@ -1060,7 +1060,7 @@
1060 1060 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1061 1061  
1062 1062 if (!list_empty(&pagelist)) {
1063   - err = migrate_pages(&pagelist, new_node_page, dest,
  1063 + err = migrate_pages(&pagelist, new_node_page, NULL, dest,
1064 1064 MIGRATE_SYNC, MR_SYSCALL);
1065 1065 if (err)
1066 1066 putback_movable_pages(&pagelist);
... ... @@ -1306,7 +1306,7 @@
1306 1306  
1307 1307 if (!list_empty(&pagelist)) {
1308 1308 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1309   - nr_failed = migrate_pages(&pagelist, new_page,
  1309 + nr_failed = migrate_pages(&pagelist, new_page, NULL,
1310 1310 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1311 1311 if (nr_failed)
1312 1312 putback_movable_pages(&pagelist);
... ... @@ -867,8 +867,9 @@
867 867 * Obtain the lock on page, remove all ptes and migrate the page
868 868 * to the newly allocated page in newpage.
869 869 */
870   -static int unmap_and_move(new_page_t get_new_page, unsigned long private,
871   - struct page *page, int force, enum migrate_mode mode)
  870 +static int unmap_and_move(new_page_t get_new_page, free_page_t put_new_page,
  871 + unsigned long private, struct page *page, int force,
  872 + enum migrate_mode mode)
872 873 {
873 874 int rc = 0;
874 875 int *result = NULL;
875 876  
876 877  
... ... @@ -912,11 +913,17 @@
912 913 page_is_file_cache(page));
913 914 putback_lru_page(page);
914 915 }
  916 +
915 917 /*
916   - * Move the new page to the LRU. If migration was not successful
917   - * then this will free the page.
  918 + * If migration was not successful and there's a freeing callback, use
  919 + * it. Otherwise, putback_lru_page() will drop the reference grabbed
  920 + * during isolation.
918 921 */
919   - putback_lru_page(newpage);
  922 + if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
  923 + put_new_page(newpage, private);
  924 + else
  925 + putback_lru_page(newpage);
  926 +
920 927 if (result) {
921 928 if (rc)
922 929 *result = rc;
... ... @@ -945,8 +952,9 @@
945 952 * will wait in the page fault for migration to complete.
946 953 */
947 954 static int unmap_and_move_huge_page(new_page_t get_new_page,
948   - unsigned long private, struct page *hpage,
949   - int force, enum migrate_mode mode)
  955 + free_page_t put_new_page, unsigned long private,
  956 + struct page *hpage, int force,
  957 + enum migrate_mode mode)
950 958 {
951 959 int rc = 0;
952 960 int *result = NULL;
953 961  
954 962  
... ... @@ -982,20 +990,30 @@
982 990 if (!page_mapped(hpage))
983 991 rc = move_to_new_page(new_hpage, hpage, 1, mode);
984 992  
985   - if (rc)
  993 + if (rc != MIGRATEPAGE_SUCCESS)
986 994 remove_migration_ptes(hpage, hpage);
987 995  
988 996 if (anon_vma)
989 997 put_anon_vma(anon_vma);
990 998  
991   - if (!rc)
  999 + if (rc == MIGRATEPAGE_SUCCESS)
992 1000 hugetlb_cgroup_migrate(hpage, new_hpage);
993 1001  
994 1002 unlock_page(hpage);
995 1003 out:
996 1004 if (rc != -EAGAIN)
997 1005 putback_active_hugepage(hpage);
998   - put_page(new_hpage);
  1006 +
  1007 + /*
  1008 + * If migration was not successful and there's a freeing callback, use
  1009 + * it. Otherwise, put_page() will drop the reference grabbed during
  1010 + * isolation.
  1011 + */
  1012 + if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
  1013 + put_new_page(new_hpage, private);
  1014 + else
  1015 + put_page(new_hpage);
  1016 +
999 1017 if (result) {
1000 1018 if (rc)
1001 1019 *result = rc;
... ... @@ -1012,6 +1030,8 @@
1012 1030 * @from: The list of pages to be migrated.
1013 1031 * @get_new_page: The function used to allocate free pages to be used
1014 1032 * as the target of the page migration.
  1033 + * @put_new_page: The function used to free target pages if migration
  1034 + * fails, or NULL if no special handling is necessary.
1015 1035 * @private: Private data to be passed on to get_new_page()
1016 1036 * @mode: The migration mode that specifies the constraints for
1017 1037 * page migration, if any.
... ... @@ -1025,7 +1045,8 @@
1025 1045 * Returns the number of pages that were not migrated, or an error code.
1026 1046 */
1027 1047 int migrate_pages(struct list_head *from, new_page_t get_new_page,
1028   - unsigned long private, enum migrate_mode mode, int reason)
  1048 + free_page_t put_new_page, unsigned long private,
  1049 + enum migrate_mode mode, int reason)
1029 1050 {
1030 1051 int retry = 1;
1031 1052 int nr_failed = 0;
1032 1053  
... ... @@ -1047,10 +1068,11 @@
1047 1068  
1048 1069 if (PageHuge(page))
1049 1070 rc = unmap_and_move_huge_page(get_new_page,
1050   - private, page, pass > 2, mode);
  1071 + put_new_page, private, page,
  1072 + pass > 2, mode);
1051 1073 else
1052   - rc = unmap_and_move(get_new_page, private,
1053   - page, pass > 2, mode);
  1074 + rc = unmap_and_move(get_new_page, put_new_page,
  1075 + private, page, pass > 2, mode);
1054 1076  
1055 1077 switch(rc) {
1056 1078 case -ENOMEM:
... ... @@ -1194,7 +1216,7 @@
1194 1216  
1195 1217 err = 0;
1196 1218 if (!list_empty(&pagelist)) {
1197   - err = migrate_pages(&pagelist, new_page_node,
  1219 + err = migrate_pages(&pagelist, new_page_node, NULL,
1198 1220 (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
1199 1221 if (err)
1200 1222 putback_movable_pages(&pagelist);
... ... @@ -1643,7 +1665,8 @@
1643 1665  
1644 1666 list_add(&page->lru, &migratepages);
1645 1667 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
1646   - node, MIGRATE_ASYNC, MR_NUMA_MISPLACED);
  1668 + NULL, node, MIGRATE_ASYNC,
  1669 + MR_NUMA_MISPLACED);
1647 1670 if (nr_remaining) {
1648 1671 putback_lru_pages(&migratepages);
1649 1672 isolated = 0;
... ... @@ -6204,7 +6204,7 @@
6204 6204 cc->nr_migratepages -= nr_reclaimed;
6205 6205  
6206 6206 ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
6207   - 0, MIGRATE_SYNC, MR_CMA);
  6207 + NULL, 0, MIGRATE_SYNC, MR_CMA);
6208 6208 }
6209 6209 if (ret < 0) {
6210 6210 putback_movable_pages(&cc->migratepages);