Commit 83467efbdb7948146581a56cbd683a22a0684bbb
Committed by
Linus Torvalds
1 parent
c8721bbbdd
Exists in
master
and in
20 other branches
mm: migrate: check movability of hugepage in unmap_and_move_huge_page()
Currently hugepage migration works well only for pmd-based hugepages (mainly due to lack of testing,) so we had better not enable migration of other levels of hugepages until we are ready for it. Some users of hugepage migration (mbind, move_pages, and migrate_pages) do page table walk and check pud/pmd_huge() there, so they are safe. But the other users (softoffline and memory hotremove) don't do this, so without this patch they can try to migrate unexpected types of hugepages. To prevent this, we introduce hugepage_migration_support() as an architecture dependent check of whether hugepage are implemented on a pmd basis or not. And on some architecture multiple sizes of hugepages are available, so hugepage_migration_support() also checks hugepage size. Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Hillf Danton <dhillf@gmail.com> Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Rik van Riel <riel@redhat.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 13 changed files with 85 additions and 0 deletions Side-by-side Diff
- arch/arm/mm/hugetlbpage.c
- arch/arm64/mm/hugetlbpage.c
- arch/ia64/mm/hugetlbpage.c
- arch/metag/mm/hugetlbpage.c
- arch/mips/mm/hugetlbpage.c
- arch/powerpc/mm/hugetlbpage.c
- arch/s390/mm/hugetlbpage.c
- arch/sh/mm/hugetlbpage.c
- arch/sparc/mm/hugetlbpage.c
- arch/tile/mm/hugetlbpage.c
- arch/x86/mm/hugetlbpage.c
- include/linux/hugetlb.h
- mm/migrate.c
arch/arm/mm/hugetlbpage.c
arch/arm64/mm/hugetlbpage.c
arch/ia64/mm/hugetlbpage.c
arch/metag/mm/hugetlbpage.c
arch/mips/mm/hugetlbpage.c
arch/powerpc/mm/hugetlbpage.c
... | ... | @@ -86,6 +86,11 @@ |
86 | 86 | */ |
87 | 87 | return ((pgd_val(pgd) & 0x3) != 0x0); |
88 | 88 | } |
89 | + | |
90 | +int pmd_huge_support(void) | |
91 | +{ | |
92 | + return 1; | |
93 | +} | |
89 | 94 | #else |
90 | 95 | int pmd_huge(pmd_t pmd) |
91 | 96 | { |
... | ... | @@ -98,6 +103,11 @@ |
98 | 103 | } |
99 | 104 | |
100 | 105 | int pgd_huge(pgd_t pgd) |
106 | +{ | |
107 | + return 0; | |
108 | +} | |
109 | + | |
110 | +int pmd_huge_support(void) | |
101 | 111 | { |
102 | 112 | return 0; |
103 | 113 | } |
arch/s390/mm/hugetlbpage.c
arch/sh/mm/hugetlbpage.c
arch/sparc/mm/hugetlbpage.c
arch/tile/mm/hugetlbpage.c
arch/x86/mm/hugetlbpage.c
... | ... | @@ -59,6 +59,10 @@ |
59 | 59 | return NULL; |
60 | 60 | } |
61 | 61 | |
62 | +int pmd_huge_support(void) | |
63 | +{ | |
64 | + return 0; | |
65 | +} | |
62 | 66 | #else |
63 | 67 | |
64 | 68 | struct page * |
... | ... | @@ -77,6 +81,10 @@ |
77 | 81 | return !!(pud_val(pud) & _PAGE_PSE); |
78 | 82 | } |
79 | 83 | |
84 | +int pmd_huge_support(void) | |
85 | +{ | |
86 | + return 1; | |
87 | +} | |
80 | 88 | #endif |
81 | 89 | |
82 | 90 | /* x86_64 also uses this file */ |
include/linux/hugetlb.h
... | ... | @@ -381,6 +381,16 @@ |
381 | 381 | |
382 | 382 | extern void dissolve_free_huge_pages(unsigned long start_pfn, |
383 | 383 | unsigned long end_pfn); |
384 | +int pmd_huge_support(void); | |
385 | +/* | |
386 | + * Currently hugepage migration is enabled only for pmd-based hugepage. | |
387 | + * This function will be updated when hugepage migration is more widely | |
388 | + * supported. | |
389 | + */ | |
390 | +static inline int hugepage_migration_support(struct hstate *h) | |
391 | +{ | |
392 | + return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT); | |
393 | +} | |
384 | 394 | |
385 | 395 | #else /* CONFIG_HUGETLB_PAGE */ |
386 | 396 | struct hstate {}; |
... | ... | @@ -409,6 +419,8 @@ |
409 | 419 | return page->index; |
410 | 420 | } |
411 | 421 | #define dissolve_free_huge_pages(s, e) do {} while (0) |
422 | +#define pmd_huge_support() 0 | |
423 | +#define hugepage_migration_support(h) 0 | |
412 | 424 | #endif /* CONFIG_HUGETLB_PAGE */ |
413 | 425 | |
414 | 426 | #endif /* _LINUX_HUGETLB_H */ |
mm/migrate.c
... | ... | @@ -949,6 +949,16 @@ |
949 | 949 | struct page *new_hpage = get_new_page(hpage, private, &result); |
950 | 950 | struct anon_vma *anon_vma = NULL; |
951 | 951 | |
952 | + /* | |
953 | + * Movability of hugepages depends on architectures and hugepage size. | |
954 | + * This check is necessary because some callers of hugepage migration | |
955 | + * like soft offline and memory hotremove don't walk through page | |
956 | + * tables or check whether the hugepage is pmd-based or not before | |
957 | + * kicking migration. | |
958 | + */ | |
959 | + if (!hugepage_migration_support(page_hstate(hpage))) | |
960 | + return -ENOSYS; | |
961 | + | |
952 | 962 | if (!new_hpage) |
953 | 963 | return -ENOMEM; |
954 | 964 |