Commit 98094945785464c657d598291d714d11694c8cd9

Authored by Naoya Horiguchi
Committed by Linus Torvalds
1 parent 86cdb465cf

mm/mempolicy: rename check_*range to queue_pages_*range

The function check_range() (and its family) is not well-named, because it
does not only checking something, but moving pages from list to list to do
page migration for them.  So queue_pages_*range is more desirable name.

Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Hillf Danton <dhillf@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Rik van Riel <riel@redhat.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 1 changed file with 23 additions and 18 deletions Side-by-side Diff

... ... @@ -476,8 +476,11 @@
476 476 static void migrate_page_add(struct page *page, struct list_head *pagelist,
477 477 unsigned long flags);
478 478  
479   -/* Scan through pages checking if pages follow certain conditions. */
480   -static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
  479 +/*
  480 + * Scan through pages checking if pages follow certain conditions,
  481 + * and move them to the pagelist if they do.
  482 + */
  483 +static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
481 484 unsigned long addr, unsigned long end,
482 485 const nodemask_t *nodes, unsigned long flags,
483 486 void *private)
... ... @@ -515,8 +518,8 @@
515 518 return addr != end;
516 519 }
517 520  
518   -static void check_hugetlb_pmd_range(struct vm_area_struct *vma, pmd_t *pmd,
519   - const nodemask_t *nodes, unsigned long flags,
  521 +static void queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma,
  522 + pmd_t *pmd, const nodemask_t *nodes, unsigned long flags,
520 523 void *private)
521 524 {
522 525 #ifdef CONFIG_HUGETLB_PAGE
... ... @@ -539,7 +542,7 @@
539 542 #endif
540 543 }
541 544  
542   -static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
  545 +static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud,
543 546 unsigned long addr, unsigned long end,
544 547 const nodemask_t *nodes, unsigned long flags,
545 548 void *private)
546 549  
547 550  
... ... @@ -553,21 +556,21 @@
553 556 if (!pmd_present(*pmd))
554 557 continue;
555 558 if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) {
556   - check_hugetlb_pmd_range(vma, pmd, nodes,
  559 + queue_pages_hugetlb_pmd_range(vma, pmd, nodes,
557 560 flags, private);
558 561 continue;
559 562 }
560 563 split_huge_page_pmd(vma, addr, pmd);
561 564 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
562 565 continue;
563   - if (check_pte_range(vma, pmd, addr, next, nodes,
  566 + if (queue_pages_pte_range(vma, pmd, addr, next, nodes,
564 567 flags, private))
565 568 return -EIO;
566 569 } while (pmd++, addr = next, addr != end);
567 570 return 0;
568 571 }
569 572  
570   -static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
  573 +static inline int queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
571 574 unsigned long addr, unsigned long end,
572 575 const nodemask_t *nodes, unsigned long flags,
573 576 void *private)
574 577  
... ... @@ -582,14 +585,14 @@
582 585 continue;
583 586 if (pud_none_or_clear_bad(pud))
584 587 continue;
585   - if (check_pmd_range(vma, pud, addr, next, nodes,
  588 + if (queue_pages_pmd_range(vma, pud, addr, next, nodes,
586 589 flags, private))
587 590 return -EIO;
588 591 } while (pud++, addr = next, addr != end);
589 592 return 0;
590 593 }
591 594  
592   -static inline int check_pgd_range(struct vm_area_struct *vma,
  595 +static inline int queue_pages_pgd_range(struct vm_area_struct *vma,
593 596 unsigned long addr, unsigned long end,
594 597 const nodemask_t *nodes, unsigned long flags,
595 598 void *private)
... ... @@ -602,7 +605,7 @@
602 605 next = pgd_addr_end(addr, end);
603 606 if (pgd_none_or_clear_bad(pgd))
604 607 continue;
605   - if (check_pud_range(vma, pgd, addr, next, nodes,
  608 + if (queue_pages_pud_range(vma, pgd, addr, next, nodes,
606 609 flags, private))
607 610 return -EIO;
608 611 } while (pgd++, addr = next, addr != end);
609 612  
... ... @@ -640,12 +643,14 @@
640 643 #endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
641 644  
642 645 /*
643   - * Check if all pages in a range are on a set of nodes.
644   - * If pagelist != NULL then isolate pages from the LRU and
645   - * put them on the pagelist.
  646 + * Walk through page tables and collect pages to be migrated.
  647 + *
  648 + * If pages found in a given range are on a set of nodes (determined by
  649 + * @nodes and @flags,) it's isolated and queued to the pagelist which is
  650 + * passed via @private.)
646 651 */
647 652 static struct vm_area_struct *
648   -check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
  653 +queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
649 654 const nodemask_t *nodes, unsigned long flags, void *private)
650 655 {
651 656 int err;
... ... @@ -680,7 +685,7 @@
680 685 ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
681 686 vma_migratable(vma))) {
682 687  
683   - err = check_pgd_range(vma, start, endvma, nodes,
  688 + err = queue_pages_pgd_range(vma, start, endvma, nodes,
684 689 flags, private);
685 690 if (err) {
686 691 first = ERR_PTR(err);
... ... @@ -1050,7 +1055,7 @@
1050 1055 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1051 1056 */
1052 1057 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1053   - check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
  1058 + queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
1054 1059 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1055 1060  
1056 1061 if (!list_empty(&pagelist)) {
... ... @@ -1288,7 +1293,7 @@
1288 1293 if (err)
1289 1294 goto mpol_out;
1290 1295  
1291   - vma = check_range(mm, start, end, nmask,
  1296 + vma = queue_pages_range(mm, start, end, nmask,
1292 1297 flags | MPOL_MF_INVERT, &pagelist);
1293 1298  
1294 1299 err = PTR_ERR(vma); /* maybe ... */