Commit f84f6e2b0868f198f97a32ba503d6f9f319a249a
Committed by
Linus Torvalds
1 parent
966dbde2c2
Exists in
master
and in
6 other branches
mm: vmscan: do not writeback filesystem pages in kswapd except in high priority
It is preferable that no dirty pages are dispatched for cleaning from the page reclaim path. At normal priorities, this patch prevents kswapd writing pages. However, page reclaim does have a requirement that pages be freed in a particular zone. If it is failing to make sufficient progress (reclaiming < SWAP_CLUSTER_MAX at any priority priority), the priority is raised to scan more pages. A priority of DEF_PRIORITY - 3 is considered to be the point where kswapd is getting into trouble reclaiming pages. If this priority is reached, kswapd will dispatch pages for writing. Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Johannes Weiner <jweiner@redhat.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Jan Kara <jack@suse.cz> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Alex Elder <aelder@sgi.com> Cc: Theodore Ts'o <tytso@mit.edu> Cc: Chris Mason <chris.mason@oracle.com> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 1 changed file with 8 additions and 5 deletions Side-by-side Diff
mm/vmscan.c
... | ... | @@ -750,7 +750,8 @@ |
750 | 750 | */ |
751 | 751 | static unsigned long shrink_page_list(struct list_head *page_list, |
752 | 752 | struct zone *zone, |
753 | - struct scan_control *sc) | |
753 | + struct scan_control *sc, | |
754 | + int priority) | |
754 | 755 | { |
755 | 756 | LIST_HEAD(ret_pages); |
756 | 757 | LIST_HEAD(free_pages); |
757 | 758 | |
... | ... | @@ -856,9 +857,11 @@ |
856 | 857 | |
857 | 858 | /* |
858 | 859 | * Only kswapd can writeback filesystem pages to |
859 | - * avoid risk of stack overflow | |
860 | + * avoid risk of stack overflow but do not writeback | |
861 | + * unless under significant pressure. | |
860 | 862 | */ |
861 | - if (page_is_file_cache(page) && !current_is_kswapd()) { | |
863 | + if (page_is_file_cache(page) && | |
864 | + (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) { | |
862 | 865 | inc_zone_page_state(page, NR_VMSCAN_WRITE_SKIP); |
863 | 866 | goto keep_locked; |
864 | 867 | } |
865 | 868 | |
... | ... | @@ -1509,12 +1512,12 @@ |
1509 | 1512 | |
1510 | 1513 | spin_unlock_irq(&zone->lru_lock); |
1511 | 1514 | |
1512 | - nr_reclaimed = shrink_page_list(&page_list, zone, sc); | |
1515 | + nr_reclaimed = shrink_page_list(&page_list, zone, sc, priority); | |
1513 | 1516 | |
1514 | 1517 | /* Check if we should syncronously wait for writeback */ |
1515 | 1518 | if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) { |
1516 | 1519 | set_reclaim_mode(priority, sc, true); |
1517 | - nr_reclaimed += shrink_page_list(&page_list, zone, sc); | |
1520 | + nr_reclaimed += shrink_page_list(&page_list, zone, sc, priority); | |
1518 | 1521 | } |
1519 | 1522 | |
1520 | 1523 | local_irq_disable(); |