Commit 5ced66c901f1cf0b684feb15c2cd8b126e263d07
Committed by
Linus Torvalds
1 parent
a343787016
Exists in
master
and in
7 other branches
hugetlb: abstract numa round robin selection
Need this as a separate function for a future patch. No behaviour change. Acked-by: Adam Litke <agl@us.ibm.com> Acked-by: Nishanth Aravamudan <nacc@us.ibm.com> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 1 changed file with 22 additions and 15 deletions Side-by-side Diff
mm/hugetlb.c
... | ... | @@ -565,6 +565,27 @@ |
565 | 565 | return page; |
566 | 566 | } |
567 | 567 | |
568 | +/* | |
569 | + * Use a helper variable to find the next node and then | |
570 | + * copy it back to hugetlb_next_nid afterwards: | |
571 | + * otherwise there's a window in which a racer might | |
572 | + * pass invalid nid MAX_NUMNODES to alloc_pages_node. | |
573 | + * But we don't need to use a spin_lock here: it really | |
574 | + * doesn't matter if occasionally a racer chooses the | |
575 | + * same nid as we do. Move nid forward in the mask even | |
576 | + * if we just successfully allocated a hugepage so that | |
577 | + * the next caller gets hugepages on the next node. | |
578 | + */ | |
579 | +static int hstate_next_node(struct hstate *h) | |
580 | +{ | |
581 | + int next_nid; | |
582 | + next_nid = next_node(h->hugetlb_next_nid, node_online_map); | |
583 | + if (next_nid == MAX_NUMNODES) | |
584 | + next_nid = first_node(node_online_map); | |
585 | + h->hugetlb_next_nid = next_nid; | |
586 | + return next_nid; | |
587 | +} | |
588 | + | |
568 | 589 | static int alloc_fresh_huge_page(struct hstate *h) |
569 | 590 | { |
570 | 591 | struct page *page; |
... | ... | @@ -578,21 +599,7 @@ |
578 | 599 | page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid); |
579 | 600 | if (page) |
580 | 601 | ret = 1; |
581 | - /* | |
582 | - * Use a helper variable to find the next node and then | |
583 | - * copy it back to hugetlb_next_nid afterwards: | |
584 | - * otherwise there's a window in which a racer might | |
585 | - * pass invalid nid MAX_NUMNODES to alloc_pages_node. | |
586 | - * But we don't need to use a spin_lock here: it really | |
587 | - * doesn't matter if occasionally a racer chooses the | |
588 | - * same nid as we do. Move nid forward in the mask even | |
589 | - * if we just successfully allocated a hugepage so that | |
590 | - * the next caller gets hugepages on the next node. | |
591 | - */ | |
592 | - next_nid = next_node(h->hugetlb_next_nid, node_online_map); | |
593 | - if (next_nid == MAX_NUMNODES) | |
594 | - next_nid = first_node(node_online_map); | |
595 | - h->hugetlb_next_nid = next_nid; | |
602 | + next_nid = hstate_next_node(h); | |
596 | 603 | } while (!page && h->hugetlb_next_nid != start_nid); |
597 | 604 | |
598 | 605 | if (ret) |