Commit e180cf806a93ea1abbce47b245d25204ff557ce9

Authored by Kirill A. Shutemov
Committed by Linus Torvalds
1 parent ef2a2cbdda

thp, mm: avoid PageUnevictable on active/inactive lru lists

active/inactive lru lists can contain unevicable pages (i.e.  ramfs pages
that have been placed on the LRU lists when first allocated), but these
pages must not have PageUnevictable set - otherwise shrink_[in]active_list
goes crazy:

kernel BUG at /home/space/kas/git/public/linux-next/mm/vmscan.c:1122!

1090 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1091                 struct lruvec *lruvec, struct list_head *dst,
1092                 unsigned long *nr_scanned, struct scan_control *sc,
1093                 isolate_mode_t mode, enum lru_list lru)
1094 {
...
1108                 switch (__isolate_lru_page(page, mode)) {
1109                 case 0:
...
1116                 case -EBUSY:
...
1121                 default:
1122                         BUG();
1123                 }
1124         }
...
1130 }

__isolate_lru_page() returns EINVAL for PageUnevictable(page).

For lru_add_page_tail(), it means we should not set PageUnevictable()
for tail pages unless we're sure that it will go to LRU_UNEVICTABLE.
Let's just copy PG_active and PG_unevictable from head page in
__split_huge_page_refcount(), it will simplify lru_add_page_tail().

This will fix one more bug in lru_add_page_tail(): if
page_evictable(page_tail) is false and PageLRU(page) is true, page_tail
will go to the same lru as page, but nobody cares to sync page_tail
active/inactive state with page.  So we can end up with inactive page on
active lru.  The patch will fix it as well since we copy PG_active from
head page.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 2 changed files with 5 additions and 19 deletions Side-by-side Diff

... ... @@ -1620,7 +1620,9 @@
1620 1620 ((1L << PG_referenced) |
1621 1621 (1L << PG_swapbacked) |
1622 1622 (1L << PG_mlocked) |
1623   - (1L << PG_uptodate)));
  1623 + (1L << PG_uptodate) |
  1624 + (1L << PG_active) |
  1625 + (1L << PG_unevictable)));
1624 1626 page_tail->flags |= (1L << PG_dirty);
1625 1627  
1626 1628 /* clear PageTail before overwriting first_page */
... ... @@ -770,8 +770,6 @@
770 770 void lru_add_page_tail(struct page *page, struct page *page_tail,
771 771 struct lruvec *lruvec, struct list_head *list)
772 772 {
773   - int uninitialized_var(active);
774   - enum lru_list lru;
775 773 const int file = 0;
776 774  
777 775 VM_BUG_ON(!PageHead(page));
... ... @@ -783,20 +781,6 @@
783 781 if (!list)
784 782 SetPageLRU(page_tail);
785 783  
786   - if (page_evictable(page_tail)) {
787   - if (PageActive(page)) {
788   - SetPageActive(page_tail);
789   - active = 1;
790   - lru = LRU_ACTIVE_ANON;
791   - } else {
792   - active = 0;
793   - lru = LRU_INACTIVE_ANON;
794   - }
795   - } else {
796   - SetPageUnevictable(page_tail);
797   - lru = LRU_UNEVICTABLE;
798   - }
799   -
800 784 if (likely(PageLRU(page)))
801 785 list_add_tail(&page_tail->lru, &page->lru);
802 786 else if (list) {
803 787  
... ... @@ -812,13 +796,13 @@
812 796 * Use the standard add function to put page_tail on the list,
813 797 * but then correct its position so they all end up in order.
814 798 */
815   - add_page_to_lru_list(page_tail, lruvec, lru);
  799 + add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail));
816 800 list_head = page_tail->lru.prev;
817 801 list_move_tail(&page_tail->lru, list_head);
818 802 }
819 803  
820 804 if (!PageUnevictable(page))
821   - update_page_reclaim_stat(lruvec, file, active);
  805 + update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
822 806 }
823 807 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
824 808