Commit 39b5f29ac1f988c1615fbc9c69f6651ab0d0c3c7

Authored by Hugh Dickins
Committed by Linus Torvalds
1 parent ec4d9f626d

mm: remove vma arg from page_evictable

page_evictable(page, vma) is an irritant: almost all its callers pass
NULL for vma.  Remove the vma arg and use mlocked_vma_newpage(vma, page)
explicitly in the couple of places it's needed.  But in those places we
don't even need page_evictable() itself!  They're dealing with a freshly
allocated anonymous page, which has no "mapping" and cannot be mlocked yet.

Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Cc: Rik van Riel <riel@redhat.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michel Lespinasse <walken@google.com>
Cc: Ying Han <yinghan@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 7 changed files with 18 additions and 32 deletions Side-by-side Diff

Documentation/vm/unevictable-lru.txt
... ... @@ -197,12 +197,8 @@
197 197 freeing them.
198 198  
199 199 page_evictable() also checks for mlocked pages by testing an additional page
200   -flag, PG_mlocked (as wrapped by PageMlocked()). If the page is NOT mlocked,
201   -and a non-NULL VMA is supplied, page_evictable() will check whether the VMA is
202   -VM_LOCKED via is_mlocked_vma(). is_mlocked_vma() will SetPageMlocked() and
203   -update the appropriate statistics if the vma is VM_LOCKED. This method allows
204   -efficient "culling" of pages in the fault path that are being faulted in to
205   -VM_LOCKED VMAs.
  200 +flag, PG_mlocked (as wrapped by PageMlocked()), which is set when a page is
  201 +faulted into a VM_LOCKED vma, or found in a vma being VM_LOCKED.
206 202  
207 203  
208 204 VMSCAN'S HANDLING OF UNEVICTABLE PAGES
... ... @@ -651,7 +647,7 @@
651 647 -------------------------------
652 648  
653 649 shrink_active_list() culls any obviously unevictable pages - i.e.
654   -!page_evictable(page, NULL) - diverting these to the unevictable list.
  650 +!page_evictable(page) - diverting these to the unevictable list.
655 651 However, shrink_active_list() only sees unevictable pages that made it onto the
656 652 active/inactive lru lists. Note that these pages do not have PageUnevictable
657 653 set - otherwise they would be on the unevictable list and shrink_active_list
include/linux/swap.h
... ... @@ -281,7 +281,7 @@
281 281 }
282 282 #endif
283 283  
284   -extern int page_evictable(struct page *page, struct vm_area_struct *vma);
  284 +extern int page_evictable(struct page *page);
285 285 extern void check_move_unevictable_pages(struct page **, int nr_pages);
286 286  
287 287 extern unsigned long scan_unevictable_pages;
... ... @@ -168,9 +168,8 @@
168 168 }
169 169  
170 170 /*
171   - * Called only in fault path via page_evictable() for a new page
172   - * to determine if it's being mapped into a LOCKED vma.
173   - * If so, mark page as mlocked.
  171 + * Called only in fault path, to determine if a new page is being
  172 + * mapped into a LOCKED vma. If it is, mark page as mlocked.
174 173 */
175 174 static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
176 175 struct page *page)
... ... @@ -1586,7 +1586,7 @@
1586 1586 SetPageSwapBacked(new_page);
1587 1587 __set_page_locked(new_page);
1588 1588  
1589   - if (page_evictable(new_page, vma))
  1589 + if (!mlocked_vma_newpage(vma, new_page))
1590 1590 lru_cache_add_lru(new_page, LRU_ACTIVE_ANON);
1591 1591 else
1592 1592 add_page_to_unevictable_list(new_page);
... ... @@ -1080,7 +1080,7 @@
1080 1080 else
1081 1081 __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1082 1082 __page_set_anon_rmap(page, vma, address, 1);
1083   - if (page_evictable(page, vma))
  1083 + if (!mlocked_vma_newpage(vma, page))
1084 1084 lru_cache_add_lru(page, LRU_ACTIVE_ANON);
1085 1085 else
1086 1086 add_page_to_unevictable_list(page);
... ... @@ -751,7 +751,7 @@
751 751  
752 752 SetPageLRU(page_tail);
753 753  
754   - if (page_evictable(page_tail, NULL)) {
  754 + if (page_evictable(page_tail)) {
755 755 if (PageActive(page)) {
756 756 SetPageActive(page_tail);
757 757 active = 1;
... ... @@ -553,7 +553,7 @@
553 553 redo:
554 554 ClearPageUnevictable(page);
555 555  
556   - if (page_evictable(page, NULL)) {
  556 + if (page_evictable(page)) {
557 557 /*
558 558 * For evictable pages, we can use the cache.
559 559 * In event of a race, worst case is we end up with an
... ... @@ -587,7 +587,7 @@
587 587 * page is on unevictable list, it never be freed. To avoid that,
588 588 * check after we added it to the list, again.
589 589 */
590   - if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
  590 + if (lru == LRU_UNEVICTABLE && page_evictable(page)) {
591 591 if (!isolate_lru_page(page)) {
592 592 put_page(page);
593 593 goto redo;
... ... @@ -709,7 +709,7 @@
709 709  
710 710 sc->nr_scanned++;
711 711  
712   - if (unlikely(!page_evictable(page, NULL)))
  712 + if (unlikely(!page_evictable(page)))
713 713 goto cull_mlocked;
714 714  
715 715 if (!sc->may_unmap && page_mapped(page))
... ... @@ -1217,7 +1217,7 @@
1217 1217  
1218 1218 VM_BUG_ON(PageLRU(page));
1219 1219 list_del(&page->lru);
1220   - if (unlikely(!page_evictable(page, NULL))) {
  1220 + if (unlikely(!page_evictable(page))) {
1221 1221 spin_unlock_irq(&zone->lru_lock);
1222 1222 putback_lru_page(page);
1223 1223 spin_lock_irq(&zone->lru_lock);
... ... @@ -1470,7 +1470,7 @@
1470 1470 page = lru_to_page(&l_hold);
1471 1471 list_del(&page->lru);
1472 1472  
1473   - if (unlikely(!page_evictable(page, NULL))) {
  1473 + if (unlikely(!page_evictable(page))) {
1474 1474 putback_lru_page(page);
1475 1475 continue;
1476 1476 }
1477 1477  
1478 1478  
1479 1479  
... ... @@ -3414,27 +3414,18 @@
3414 3414 /*
3415 3415 * page_evictable - test whether a page is evictable
3416 3416 * @page: the page to test
3417   - * @vma: the VMA in which the page is or will be mapped, may be NULL
3418 3417 *
3419 3418 * Test whether page is evictable--i.e., should be placed on active/inactive
3420   - * lists vs unevictable list. The vma argument is !NULL when called from the
3421   - * fault path to determine how to instantate a new page.
  3419 + * lists vs unevictable list.
3422 3420 *
3423 3421 * Reasons page might not be evictable:
3424 3422 * (1) page's mapping marked unevictable
3425 3423 * (2) page is part of an mlocked VMA
3426 3424 *
3427 3425 */
3428   -int page_evictable(struct page *page, struct vm_area_struct *vma)
  3426 +int page_evictable(struct page *page)
3429 3427 {
3430   -
3431   - if (mapping_unevictable(page_mapping(page)))
3432   - return 0;
3433   -
3434   - if (PageMlocked(page) || (vma && mlocked_vma_newpage(vma, page)))
3435   - return 0;
3436   -
3437   - return 1;
  3428 + return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
3438 3429 }
3439 3430  
3440 3431 #ifdef CONFIG_SHMEM
... ... @@ -3472,7 +3463,7 @@
3472 3463 if (!PageLRU(page) || !PageUnevictable(page))
3473 3464 continue;
3474 3465  
3475   - if (page_evictable(page, NULL)) {
  3466 + if (page_evictable(page)) {
3476 3467 enum lru_list lru = page_lru_base_type(page);
3477 3468  
3478 3469 VM_BUG_ON(PageActive(page));