Commit 685f345708096ed21078aa44a6f4a6e6d1d1b580
Committed by
Linus Torvalds
1 parent
e8c5c82498
Exists in
master
and in
4 other branches
hugetlb: use free_pool_huge_page() to return unused surplus pages
Use the [modified] free_pool_huge_page() function to return unused surplus pages. This will help keep huge pages balanced across nodes between freeing of unused surplus pages and freeing of persistent huge pages [from set_max_huge_pages] by using the same node id "cursor". It also eliminates some code duplication. Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Nishanth Aravamudan <nacc@us.ibm.com> Acked-by: David Rientjes <rientjes@google.com> Cc: Adam Litke <agl@us.ibm.com> Cc: Andy Whitcroft <apw@canonical.com> Cc: Eric Whitney <eric.whitney@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 1 changed file with 24 additions and 33 deletions Side-by-side Diff
mm/hugetlb.c
... | ... | @@ -687,7 +687,7 @@ |
687 | 687 | * balanced over allowed nodes. |
688 | 688 | * Called with hugetlb_lock locked. |
689 | 689 | */ |
690 | -static int free_pool_huge_page(struct hstate *h) | |
690 | +static int free_pool_huge_page(struct hstate *h, bool acct_surplus) | |
691 | 691 | { |
692 | 692 | int start_nid; |
693 | 693 | int next_nid; |
694 | 694 | |
... | ... | @@ -697,13 +697,22 @@ |
697 | 697 | next_nid = start_nid; |
698 | 698 | |
699 | 699 | do { |
700 | - if (!list_empty(&h->hugepage_freelists[next_nid])) { | |
700 | + /* | |
701 | + * If we're returning unused surplus pages, only examine | |
702 | + * nodes with surplus pages. | |
703 | + */ | |
704 | + if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) && | |
705 | + !list_empty(&h->hugepage_freelists[next_nid])) { | |
701 | 706 | struct page *page = |
702 | 707 | list_entry(h->hugepage_freelists[next_nid].next, |
703 | 708 | struct page, lru); |
704 | 709 | list_del(&page->lru); |
705 | 710 | h->free_huge_pages--; |
706 | 711 | h->free_huge_pages_node[next_nid]--; |
712 | + if (acct_surplus) { | |
713 | + h->surplus_huge_pages--; | |
714 | + h->surplus_huge_pages_node[next_nid]--; | |
715 | + } | |
707 | 716 | update_and_free_page(h, page); |
708 | 717 | ret = 1; |
709 | 718 | } |
710 | 719 | |
711 | 720 | |
... | ... | @@ -884,22 +893,13 @@ |
884 | 893 | * When releasing a hugetlb pool reservation, any surplus pages that were |
885 | 894 | * allocated to satisfy the reservation must be explicitly freed if they were |
886 | 895 | * never used. |
896 | + * Called with hugetlb_lock held. | |
887 | 897 | */ |
888 | 898 | static void return_unused_surplus_pages(struct hstate *h, |
889 | 899 | unsigned long unused_resv_pages) |
890 | 900 | { |
891 | - static int nid = -1; | |
892 | - struct page *page; | |
893 | 901 | unsigned long nr_pages; |
894 | 902 | |
895 | - /* | |
896 | - * We want to release as many surplus pages as possible, spread | |
897 | - * evenly across all nodes. Iterate across all nodes until we | |
898 | - * can no longer free unreserved surplus pages. This occurs when | |
899 | - * the nodes with surplus pages have no free pages. | |
900 | - */ | |
901 | - unsigned long remaining_iterations = nr_online_nodes; | |
902 | - | |
903 | 903 | /* Uncommit the reservation */ |
904 | 904 | h->resv_huge_pages -= unused_resv_pages; |
905 | 905 | |
... | ... | @@ -909,26 +909,17 @@ |
909 | 909 | |
910 | 910 | nr_pages = min(unused_resv_pages, h->surplus_huge_pages); |
911 | 911 | |
912 | - while (remaining_iterations-- && nr_pages) { | |
913 | - nid = next_node(nid, node_online_map); | |
914 | - if (nid == MAX_NUMNODES) | |
915 | - nid = first_node(node_online_map); | |
916 | - | |
917 | - if (!h->surplus_huge_pages_node[nid]) | |
918 | - continue; | |
919 | - | |
920 | - if (!list_empty(&h->hugepage_freelists[nid])) { | |
921 | - page = list_entry(h->hugepage_freelists[nid].next, | |
922 | - struct page, lru); | |
923 | - list_del(&page->lru); | |
924 | - update_and_free_page(h, page); | |
925 | - h->free_huge_pages--; | |
926 | - h->free_huge_pages_node[nid]--; | |
927 | - h->surplus_huge_pages--; | |
928 | - h->surplus_huge_pages_node[nid]--; | |
929 | - nr_pages--; | |
930 | - remaining_iterations = nr_online_nodes; | |
931 | - } | |
912 | + /* | |
913 | + * We want to release as many surplus pages as possible, spread | |
914 | + * evenly across all nodes. Iterate across all nodes until we | |
915 | + * can no longer free unreserved surplus pages. This occurs when | |
916 | + * the nodes with surplus pages have no free pages. | |
917 | + * free_pool_huge_page() will balance the the frees across the | |
918 | + * on-line nodes for us and will handle the hstate accounting. | |
919 | + */ | |
920 | + while (nr_pages--) { | |
921 | + if (!free_pool_huge_page(h, 1)) | |
922 | + break; | |
932 | 923 | } |
933 | 924 | } |
934 | 925 | |
... | ... | @@ -1268,7 +1259,7 @@ |
1268 | 1259 | min_count = max(count, min_count); |
1269 | 1260 | try_to_free_low(h, min_count); |
1270 | 1261 | while (min_count < persistent_huge_pages(h)) { |
1271 | - if (!free_pool_huge_page(h)) | |
1262 | + if (!free_pool_huge_page(h, 0)) | |
1272 | 1263 | break; |
1273 | 1264 | } |
1274 | 1265 | while (count < persistent_huge_pages(h)) { |