Commit 6e6938b6d3130305a5960c86b1a9b21e58cf6144

Authored by Wu Fengguang
1 parent 59c5f46fbe

writeback: introduce .tagged_writepages for the WB_SYNC_NONE sync stage

sync(2) is performed in two stages: the WB_SYNC_NONE sync and the
WB_SYNC_ALL sync. Identify the first stage with .tagged_writepages and
do livelock prevention for it, too.

Jan's commit f446daaea9 ("mm: implement writeback livelock avoidance
using page tagging") is a partial fix in that it only fixed the
WB_SYNC_ALL phase livelock.

Although ext4 is tested to no longer livelock with commit f446daaea9,
it may due to some "redirty_tail() after pages_skipped" effect which
is by no means a guarantee for _all_ the file systems.

Note that writeback_inodes_sb() is called by not only sync(), they are
treated the same because the other callers also need livelock prevention.

Impact:  It changes the order in which pages/inodes are synced to disk.
Now in the WB_SYNC_NONE stage, it won't proceed to write the next inode
until finished with the current inode.

Acked-by: Jan Kara <jack@suse.cz>
CC: Dave Chinner <david@fromorbit.com>
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>

Showing 4 changed files with 14 additions and 12 deletions Side-by-side Diff

... ... @@ -2741,7 +2741,7 @@
2741 2741 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2742 2742 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2743 2743  
2744   - if (wbc->sync_mode == WB_SYNC_ALL)
  2744 + if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2745 2745 tag = PAGECACHE_TAG_TOWRITE;
2746 2746 else
2747 2747 tag = PAGECACHE_TAG_DIRTY;
... ... @@ -2973,7 +2973,7 @@
2973 2973 }
2974 2974  
2975 2975 retry:
2976   - if (wbc->sync_mode == WB_SYNC_ALL)
  2976 + if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2977 2977 tag_pages_for_writeback(mapping, index, end);
2978 2978  
2979 2979 while (!ret && wbc->nr_to_write > 0) {
... ... @@ -36,6 +36,7 @@
36 36 long nr_pages;
37 37 struct super_block *sb;
38 38 enum writeback_sync_modes sync_mode;
  39 + unsigned int tagged_writepages:1;
39 40 unsigned int for_kupdate:1;
40 41 unsigned int range_cyclic:1;
41 42 unsigned int for_background:1;
... ... @@ -650,6 +651,7 @@
650 651 {
651 652 struct writeback_control wbc = {
652 653 .sync_mode = work->sync_mode,
  654 + .tagged_writepages = work->tagged_writepages,
653 655 .older_than_this = NULL,
654 656 .for_kupdate = work->for_kupdate,
655 657 .for_background = work->for_background,
... ... @@ -657,7 +659,7 @@
657 659 };
658 660 unsigned long oldest_jif;
659 661 long wrote = 0;
660   - long write_chunk;
  662 + long write_chunk = MAX_WRITEBACK_PAGES;
661 663 struct inode *inode;
662 664  
663 665 if (wbc.for_kupdate) {
... ... @@ -683,9 +685,7 @@
683 685 * (quickly) tag currently dirty pages
684 686 * (maybe slowly) sync all tagged pages
685 687 */
686   - if (wbc.sync_mode == WB_SYNC_NONE)
687   - write_chunk = MAX_WRITEBACK_PAGES;
688   - else
  688 + if (wbc.sync_mode == WB_SYNC_ALL || wbc.tagged_writepages)
689 689 write_chunk = LONG_MAX;
690 690  
691 691 wbc.wb_start = jiffies; /* livelock avoidance */
... ... @@ -1188,10 +1188,11 @@
1188 1188 {
1189 1189 DECLARE_COMPLETION_ONSTACK(done);
1190 1190 struct wb_writeback_work work = {
1191   - .sb = sb,
1192   - .sync_mode = WB_SYNC_NONE,
1193   - .done = &done,
1194   - .nr_pages = nr,
  1191 + .sb = sb,
  1192 + .sync_mode = WB_SYNC_NONE,
  1193 + .tagged_writepages = 1,
  1194 + .done = &done,
  1195 + .nr_pages = nr,
1195 1196 };
1196 1197  
1197 1198 WARN_ON(!rwsem_is_locked(&sb->s_umount));
include/linux/writeback.h
... ... @@ -47,6 +47,7 @@
47 47 unsigned encountered_congestion:1; /* An output: a queue is full */
48 48 unsigned for_kupdate:1; /* A kupdate writeback */
49 49 unsigned for_background:1; /* A background writeback */
  50 + unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */
50 51 unsigned for_reclaim:1; /* Invoked from the page allocator */
51 52 unsigned range_cyclic:1; /* range_start is cyclic */
52 53 unsigned more_io:1; /* more io to be dispatched */
... ... @@ -892,12 +892,12 @@
892 892 range_whole = 1;
893 893 cycled = 1; /* ignore range_cyclic tests */
894 894 }
895   - if (wbc->sync_mode == WB_SYNC_ALL)
  895 + if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
896 896 tag = PAGECACHE_TAG_TOWRITE;
897 897 else
898 898 tag = PAGECACHE_TAG_DIRTY;
899 899 retry:
900   - if (wbc->sync_mode == WB_SYNC_ALL)
  900 + if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
901 901 tag_pages_for_writeback(mapping, index, end);
902 902 done_index = index;
903 903 while (!done && (index <= end)) {