Commit e24f0b8f76cc3dd96f36f5b6a9f020f6c3fce198
Committed by
Linus Torvalds
1 parent
8f9de51a4a
Exists in
master
and in
7 other branches
[PATCH] page migration: simplify migrate_pages()
Currently migrate_pages() is mess with lots of goto. Extract two functions from migrate_pages() and get rid of the gotos. Plus we can just unconditionally set the locked bit on the new page since we are the only one holding a reference. Locking is to stop others from accessing the page once we establish references to the new page. Remove the list_del from move_to_lru in order to have finer control over list processing. [akpm@osdl.org: add debug check] Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Hugh Dickins <hugh@veritas.com> Cc: Jes Sorensen <jes@trained-monkey.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Lee Schermerhorn <lee.schermerhorn@hp.com> Cc: Andi Kleen <ak@muc.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 1 changed file with 115 additions and 103 deletions Side-by-side Diff
mm/migrate.c
... | ... | @@ -84,7 +84,6 @@ |
84 | 84 | |
85 | 85 | static inline void move_to_lru(struct page *page) |
86 | 86 | { |
87 | - list_del(&page->lru); | |
88 | 87 | if (PageActive(page)) { |
89 | 88 | /* |
90 | 89 | * lru_cache_add_active checks that |
... | ... | @@ -110,6 +109,7 @@ |
110 | 109 | int count = 0; |
111 | 110 | |
112 | 111 | list_for_each_entry_safe(page, page2, l, lru) { |
112 | + list_del(&page->lru); | |
113 | 113 | move_to_lru(page); |
114 | 114 | count++; |
115 | 115 | } |
116 | 116 | |
... | ... | @@ -534,11 +534,108 @@ |
534 | 534 | } |
535 | 535 | |
536 | 536 | /* |
537 | + * Move a page to a newly allocated page | |
538 | + * The page is locked and all ptes have been successfully removed. | |
539 | + * | |
540 | + * The new page will have replaced the old page if this function | |
541 | + * is successful. | |
542 | + */ | |
543 | +static int move_to_new_page(struct page *newpage, struct page *page) | |
544 | +{ | |
545 | + struct address_space *mapping; | |
546 | + int rc; | |
547 | + | |
548 | + /* | |
549 | + * Block others from accessing the page when we get around to | |
550 | + * establishing additional references. We are the only one | |
551 | + * holding a reference to the new page at this point. | |
552 | + */ | |
553 | + if (TestSetPageLocked(newpage)) | |
554 | + BUG(); | |
555 | + | |
556 | + /* Prepare mapping for the new page.*/ | |
557 | + newpage->index = page->index; | |
558 | + newpage->mapping = page->mapping; | |
559 | + | |
560 | + mapping = page_mapping(page); | |
561 | + if (!mapping) | |
562 | + rc = migrate_page(mapping, newpage, page); | |
563 | + else if (mapping->a_ops->migratepage) | |
564 | + /* | |
565 | + * Most pages have a mapping and most filesystems | |
566 | + * should provide a migration function. Anonymous | |
567 | + * pages are part of swap space which also has its | |
568 | + * own migration function. This is the most common | |
569 | + * path for page migration. | |
570 | + */ | |
571 | + rc = mapping->a_ops->migratepage(mapping, | |
572 | + newpage, page); | |
573 | + else | |
574 | + rc = fallback_migrate_page(mapping, newpage, page); | |
575 | + | |
576 | + if (!rc) | |
577 | + remove_migration_ptes(page, newpage); | |
578 | + else | |
579 | + newpage->mapping = NULL; | |
580 | + | |
581 | + unlock_page(newpage); | |
582 | + | |
583 | + return rc; | |
584 | +} | |
585 | + | |
586 | +/* | |
587 | + * Obtain the lock on page, remove all ptes and migrate the page | |
588 | + * to the newly allocated page in newpage. | |
589 | + */ | |
590 | +static int unmap_and_move(struct page *newpage, struct page *page, int force) | |
591 | +{ | |
592 | + int rc = 0; | |
593 | + | |
594 | + if (page_count(page) == 1) | |
595 | + /* page was freed from under us. So we are done. */ | |
596 | + goto ret; | |
597 | + | |
598 | + rc = -EAGAIN; | |
599 | + if (TestSetPageLocked(page)) { | |
600 | + if (!force) | |
601 | + goto ret; | |
602 | + lock_page(page); | |
603 | + } | |
604 | + | |
605 | + if (PageWriteback(page)) { | |
606 | + if (!force) | |
607 | + goto unlock; | |
608 | + wait_on_page_writeback(page); | |
609 | + } | |
610 | + | |
611 | + /* | |
612 | + * Establish migration ptes or remove ptes | |
613 | + */ | |
614 | + if (try_to_unmap(page, 1) != SWAP_FAIL) { | |
615 | + if (!page_mapped(page)) | |
616 | + rc = move_to_new_page(newpage, page); | |
617 | + } else | |
618 | + /* A vma has VM_LOCKED set -> permanent failure */ | |
619 | + rc = -EPERM; | |
620 | + | |
621 | + if (rc) | |
622 | + remove_migration_ptes(page, page); | |
623 | +unlock: | |
624 | + unlock_page(page); | |
625 | +ret: | |
626 | + if (rc != -EAGAIN) { | |
627 | + list_del(&newpage->lru); | |
628 | + move_to_lru(newpage); | |
629 | + } | |
630 | + return rc; | |
631 | +} | |
632 | + | |
633 | +/* | |
537 | 634 | * migrate_pages |
538 | 635 | * |
539 | 636 | * Two lists are passed to this function. The first list |
540 | 637 | * contains the pages isolated from the LRU to be migrated. |
541 | - * The second list contains new pages that the pages isolated | |
638 | + * The second list contains new pages that the isolated pages | |
542 | 639 | * can be moved to. |
543 | 640 | * |
544 | 641 | * The function returns after 10 attempts or if no pages |
... | ... | @@ -550,7 +647,7 @@ |
550 | 647 | int migrate_pages(struct list_head *from, struct list_head *to, |
551 | 648 | struct list_head *moved, struct list_head *failed) |
552 | 649 | { |
553 | - int retry; | |
650 | + int retry = 1; | |
554 | 651 | int nr_failed = 0; |
555 | 652 | int pass = 0; |
556 | 653 | struct page *page; |
557 | 654 | |
558 | 655 | |
559 | 656 | |
560 | 657 | |
561 | 658 | |
562 | 659 | |
563 | 660 | |
564 | 661 | |
565 | 662 | |
... | ... | @@ -561,118 +658,33 @@ |
561 | 658 | if (!swapwrite) |
562 | 659 | current->flags |= PF_SWAPWRITE; |
563 | 660 | |
564 | -redo: | |
565 | - retry = 0; | |
661 | + for(pass = 0; pass < 10 && retry; pass++) { | |
662 | + retry = 0; | |
566 | 663 | |
567 | - list_for_each_entry_safe(page, page2, from, lru) { | |
568 | - struct page *newpage = NULL; | |
569 | - struct address_space *mapping; | |
664 | + list_for_each_entry_safe(page, page2, from, lru) { | |
570 | 665 | |
571 | - cond_resched(); | |
666 | + if (list_empty(to)) | |
667 | + break; | |
572 | 668 | |
573 | - rc = 0; | |
574 | - if (page_count(page) == 1) | |
575 | - /* page was freed from under us. So we are done. */ | |
576 | - goto next; | |
669 | + cond_resched(); | |
577 | 670 | |
578 | - if (to && list_empty(to)) | |
579 | - break; | |
671 | + rc = unmap_and_move(lru_to_page(to), page, pass > 2); | |
580 | 672 | |
581 | - /* | |
582 | - * Skip locked pages during the first two passes to give the | |
583 | - * functions holding the lock time to release the page. Later we | |
584 | - * use lock_page() to have a higher chance of acquiring the | |
585 | - * lock. | |
586 | - */ | |
587 | - rc = -EAGAIN; | |
588 | - if (pass > 2) | |
589 | - lock_page(page); | |
590 | - else | |
591 | - if (TestSetPageLocked(page)) | |
592 | - goto next; | |
593 | - | |
594 | - /* | |
595 | - * Only wait on writeback if we have already done a pass where | |
596 | - * we we may have triggered writeouts for lots of pages. | |
597 | - */ | |
598 | - if (pass > 0) | |
599 | - wait_on_page_writeback(page); | |
600 | - else | |
601 | - if (PageWriteback(page)) | |
602 | - goto unlock_page; | |
603 | - | |
604 | - /* | |
605 | - * Establish migration ptes or remove ptes | |
606 | - */ | |
607 | - rc = -EPERM; | |
608 | - if (try_to_unmap(page, 1) == SWAP_FAIL) | |
609 | - /* A vma has VM_LOCKED set -> permanent failure */ | |
610 | - goto unlock_page; | |
611 | - | |
612 | - rc = -EAGAIN; | |
613 | - if (page_mapped(page)) | |
614 | - goto unlock_page; | |
615 | - | |
616 | - newpage = lru_to_page(to); | |
617 | - lock_page(newpage); | |
618 | - /* Prepare mapping for the new page.*/ | |
619 | - newpage->index = page->index; | |
620 | - newpage->mapping = page->mapping; | |
621 | - | |
622 | - /* | |
623 | - * Pages are properly locked and writeback is complete. | |
624 | - * Try to migrate the page. | |
625 | - */ | |
626 | - mapping = page_mapping(page); | |
627 | - if (!mapping) | |
628 | - rc = migrate_page(mapping, newpage, page); | |
629 | - | |
630 | - else if (mapping->a_ops->migratepage) | |
631 | - /* | |
632 | - * Most pages have a mapping and most filesystems | |
633 | - * should provide a migration function. Anonymous | |
634 | - * pages are part of swap space which also has its | |
635 | - * own migration function. This is the most common | |
636 | - * path for page migration. | |
637 | - */ | |
638 | - rc = mapping->a_ops->migratepage(mapping, | |
639 | - newpage, page); | |
640 | - else | |
641 | - rc = fallback_migrate_page(mapping, newpage, page); | |
642 | - | |
643 | - if (!rc) | |
644 | - remove_migration_ptes(page, newpage); | |
645 | - | |
646 | - unlock_page(newpage); | |
647 | - | |
648 | -unlock_page: | |
649 | - if (rc) | |
650 | - remove_migration_ptes(page, page); | |
651 | - | |
652 | - unlock_page(page); | |
653 | - | |
654 | -next: | |
655 | - if (rc) { | |
656 | - if (newpage) | |
657 | - newpage->mapping = NULL; | |
658 | - | |
659 | - if (rc == -EAGAIN) | |
673 | + switch(rc) { | |
674 | + case -EAGAIN: | |
660 | 675 | retry++; |
661 | - else { | |
676 | + break; | |
677 | + case 0: | |
678 | + list_move(&page->lru, moved); | |
679 | + break; | |
680 | + default: | |
662 | 681 | /* Permanent failure */ |
663 | 682 | list_move(&page->lru, failed); |
664 | 683 | nr_failed++; |
684 | + break; | |
665 | 685 | } |
666 | - } else { | |
667 | - if (newpage) { | |
668 | - /* Successful migration. Return page to LRU */ | |
669 | - move_to_lru(newpage); | |
670 | - } | |
671 | - list_move(&page->lru, moved); | |
672 | 686 | } |
673 | 687 | } |
674 | - if (retry && pass++ < 10) | |
675 | - goto redo; | |
676 | 688 | |
677 | 689 | if (!swapwrite) |
678 | 690 | current->flags &= ~PF_SWAPWRITE; |