Commit d498471133ff1f9586a06820beaeebc575fe2814
Committed by
Linus Torvalds
1 parent
ee27497df3
Exists in
master
and in
20 other branches
[PATCH] SwapMig: Extend parameters for migrate_pages()
Extend the parameters of migrate_pages() to allow the caller control over the fate of successfully migrated or impossible to migrate pages. Swap migration and direct migration will have the same interface after this patch so that patches can be independently applied to the policy layer and the core migration code. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Andi Kleen <ak@muc.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 3 changed files with 32 additions and 15 deletions Side-by-side Diff
include/linux/swap.h
... | ... | @@ -178,7 +178,8 @@ |
178 | 178 | #ifdef CONFIG_MIGRATION |
179 | 179 | extern int isolate_lru_page(struct page *p); |
180 | 180 | extern int putback_lru_pages(struct list_head *l); |
181 | -extern int migrate_pages(struct list_head *l, struct list_head *t); | |
181 | +extern int migrate_pages(struct list_head *l, struct list_head *t, | |
182 | + struct list_head *moved, struct list_head *failed); | |
182 | 183 | #endif |
183 | 184 | |
184 | 185 | #ifdef CONFIG_MMU |
mm/mempolicy.c
... | ... | @@ -429,6 +429,19 @@ |
429 | 429 | return mpol_check_policy(mode, nodes); |
430 | 430 | } |
431 | 431 | |
432 | +static int swap_pages(struct list_head *pagelist) | |
433 | +{ | |
434 | + LIST_HEAD(moved); | |
435 | + LIST_HEAD(failed); | |
436 | + int n; | |
437 | + | |
438 | + n = migrate_pages(pagelist, NULL, &moved, &failed); | |
439 | + putback_lru_pages(&failed); | |
440 | + putback_lru_pages(&moved); | |
441 | + | |
442 | + return n; | |
443 | +} | |
444 | + | |
432 | 445 | long do_mbind(unsigned long start, unsigned long len, |
433 | 446 | unsigned long mode, nodemask_t *nmask, unsigned long flags) |
434 | 447 | { |
435 | 448 | |
... | ... | @@ -481,10 +494,13 @@ |
481 | 494 | (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) ? &pagelist : NULL); |
482 | 495 | err = PTR_ERR(vma); |
483 | 496 | if (!IS_ERR(vma)) { |
497 | + int nr_failed = 0; | |
498 | + | |
484 | 499 | err = mbind_range(vma, start, end, new); |
485 | 500 | if (!list_empty(&pagelist)) |
486 | - migrate_pages(&pagelist, NULL); | |
487 | - if (!err && !list_empty(&pagelist) && (flags & MPOL_MF_STRICT)) | |
501 | + nr_failed = swap_pages(&pagelist); | |
502 | + | |
503 | + if (!err && nr_failed && (flags & MPOL_MF_STRICT)) | |
488 | 504 | err = -EIO; |
489 | 505 | } |
490 | 506 | if (!list_empty(&pagelist)) |
491 | 507 | |
492 | 508 | |
... | ... | @@ -635,11 +651,12 @@ |
635 | 651 | down_read(&mm->mmap_sem); |
636 | 652 | check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nodes, |
637 | 653 | flags | MPOL_MF_DISCONTIG_OK, &pagelist); |
654 | + | |
638 | 655 | if (!list_empty(&pagelist)) { |
639 | - migrate_pages(&pagelist, NULL); | |
640 | - if (!list_empty(&pagelist)) | |
641 | - count = putback_lru_pages(&pagelist); | |
656 | + count = swap_pages(&pagelist); | |
657 | + putback_lru_pages(&pagelist); | |
642 | 658 | } |
659 | + | |
643 | 660 | up_read(&mm->mmap_sem); |
644 | 661 | return count; |
645 | 662 | } |
mm/vmscan.c
... | ... | @@ -670,10 +670,10 @@ |
670 | 670 | * list. The direct migration patchset |
671 | 671 | * extends this function to avoid the use of swap. |
672 | 672 | */ |
673 | -int migrate_pages(struct list_head *l, struct list_head *t) | |
673 | +int migrate_pages(struct list_head *from, struct list_head *to, | |
674 | + struct list_head *moved, struct list_head *failed) | |
674 | 675 | { |
675 | 676 | int retry; |
676 | - LIST_HEAD(failed); | |
677 | 677 | int nr_failed = 0; |
678 | 678 | int pass = 0; |
679 | 679 | struct page *page; |
680 | 680 | |
... | ... | @@ -686,12 +686,12 @@ |
686 | 686 | redo: |
687 | 687 | retry = 0; |
688 | 688 | |
689 | - list_for_each_entry_safe(page, page2, l, lru) { | |
689 | + list_for_each_entry_safe(page, page2, from, lru) { | |
690 | 690 | cond_resched(); |
691 | 691 | |
692 | 692 | if (page_count(page) == 1) { |
693 | 693 | /* page was freed from under us. So we are done. */ |
694 | - move_to_lru(page); | |
694 | + list_move(&page->lru, moved); | |
695 | 695 | continue; |
696 | 696 | } |
697 | 697 | /* |
... | ... | @@ -722,7 +722,7 @@ |
722 | 722 | if (PageAnon(page) && !PageSwapCache(page)) { |
723 | 723 | if (!add_to_swap(page, GFP_KERNEL)) { |
724 | 724 | unlock_page(page); |
725 | - list_move(&page->lru, &failed); | |
725 | + list_move(&page->lru, failed); | |
726 | 726 | nr_failed++; |
727 | 727 | continue; |
728 | 728 | } |
729 | 729 | |
... | ... | @@ -732,8 +732,10 @@ |
732 | 732 | * Page is properly locked and writeback is complete. |
733 | 733 | * Try to migrate the page. |
734 | 734 | */ |
735 | - if (!swap_page(page)) | |
735 | + if (!swap_page(page)) { | |
736 | + list_move(&page->lru, moved); | |
736 | 737 | continue; |
738 | + } | |
737 | 739 | retry_later: |
738 | 740 | retry++; |
739 | 741 | } |
... | ... | @@ -742,9 +744,6 @@ |
742 | 744 | |
743 | 745 | if (!swapwrite) |
744 | 746 | current->flags &= ~PF_SWAPWRITE; |
745 | - | |
746 | - if (!list_empty(&failed)) | |
747 | - list_splice(&failed, l); | |
748 | 747 | |
749 | 748 | return nr_failed + retry; |
750 | 749 | } |