Commit cc5d462f7777c06c5cf0b55d736be325cda747b3

Authored by Andi Kleen
Committed by Linus Torvalds
1 parent 78afd5612d

mm: use __GFP_OTHER_NODE for transparent huge pages

Pass __GFP_OTHER_NODE for transparent hugepages NUMA allocations done by the
hugepages daemon.  This way the low level accounting for local versus
remote pages works correctly.

Contains improvements from Andrea Arcangeli

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 11 additions and 9 deletions Side-by-side Diff

... ... @@ -643,23 +643,24 @@
643 643 return ret;
644 644 }
645 645  
646   -static inline gfp_t alloc_hugepage_gfpmask(int defrag)
  646 +static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
647 647 {
648   - return GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT);
  648 + return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
649 649 }
650 650  
651 651 static inline struct page *alloc_hugepage_vma(int defrag,
652 652 struct vm_area_struct *vma,
653   - unsigned long haddr, int nd)
  653 + unsigned long haddr, int nd,
  654 + gfp_t extra_gfp)
654 655 {
655   - return alloc_pages_vma(alloc_hugepage_gfpmask(defrag),
  656 + return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
656 657 HPAGE_PMD_ORDER, vma, haddr, nd);
657 658 }
658 659  
659 660 #ifndef CONFIG_NUMA
660 661 static inline struct page *alloc_hugepage(int defrag)
661 662 {
662   - return alloc_pages(alloc_hugepage_gfpmask(defrag),
  663 + return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
663 664 HPAGE_PMD_ORDER);
664 665 }
665 666 #endif
... ... @@ -678,7 +679,7 @@
678 679 if (unlikely(khugepaged_enter(vma)))
679 680 return VM_FAULT_OOM;
680 681 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
681   - vma, haddr, numa_node_id());
  682 + vma, haddr, numa_node_id(), 0);
682 683 if (unlikely(!page))
683 684 goto out;
684 685 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
... ... @@ -799,7 +800,8 @@
799 800 }
800 801  
801 802 for (i = 0; i < HPAGE_PMD_NR; i++) {
802   - pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE,
  803 + pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
  804 + __GFP_OTHER_NODE,
803 805 vma, address, page_to_nid(page));
804 806 if (unlikely(!pages[i] ||
805 807 mem_cgroup_newpage_charge(pages[i], mm,
... ... @@ -902,7 +904,7 @@
902 904 if (transparent_hugepage_enabled(vma) &&
903 905 !transparent_hugepage_debug_cow())
904 906 new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
905   - vma, haddr, numa_node_id());
  907 + vma, haddr, numa_node_id(), 0);
906 908 else
907 909 new_page = NULL;
908 910  
... ... @@ -1779,7 +1781,7 @@
1779 1781 * scalability.
1780 1782 */
1781 1783 new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
1782   - node);
  1784 + node, __GFP_OTHER_NODE);
1783 1785 if (unlikely(!new_page)) {
1784 1786 up_read(&mm->mmap_sem);
1785 1787 *hpage = ERR_PTR(-ENOMEM);