Commit 1b430beee5e388605dfb092b214ef0320f752cf6

Authored by Wu Fengguang
Committed by Linus Torvalds
1 parent d19d5476f4

writeback: remove nonblocking/encountered_congestion references

This removes more dead code that was somehow missed by commit 0d99519efef
(writeback: remove unused nonblocking and congestion checks).  There are
no behavior change except for the removal of two entries from one of the
ext4 tracing interface.

The nonblocking checks in ->writepages are no longer used because the
flusher now prefer to block on get_request_wait() than to skip inodes on
IO congestion.  The latter will lead to more seeky IO.

The nonblocking checks in ->writepage are no longer used because it's
redundant with the WB_SYNC_NONE check.

We no long set ->nonblocking in VM page out and page migration, because
a) it's effectively redundant with WB_SYNC_NONE in current code
b) it's old semantic of "Don't get stuck on request queues" is mis-behavior:
   that would skip some dirty inodes on congestion and page out others, which
   is unfair in terms of LRU age.

Inspired by Christoph Hellwig. Thanks!

Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: David Howells <dhowells@redhat.com>
Cc: Sage Weil <sage@newdream.net>
Cc: Steve French <sfrench@samba.org>
Cc: Chris Mason <chris.mason@oracle.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 12 changed files with 11 additions and 52 deletions Side-by-side Diff

... ... @@ -438,7 +438,6 @@
438 438 */
439 439 int afs_writepage(struct page *page, struct writeback_control *wbc)
440 440 {
441   - struct backing_dev_info *bdi = page->mapping->backing_dev_info;
442 441 struct afs_writeback *wb;
443 442 int ret;
444 443  
... ... @@ -455,8 +454,6 @@
455 454 }
456 455  
457 456 wbc->nr_to_write -= ret;
458   - if (wbc->nonblocking && bdi_write_congested(bdi))
459   - wbc->encountered_congestion = 1;
460 457  
461 458 _leave(" = 0");
462 459 return 0;
... ... @@ -469,7 +466,6 @@
469 466 struct writeback_control *wbc,
470 467 pgoff_t index, pgoff_t end, pgoff_t *_next)
471 468 {
472   - struct backing_dev_info *bdi = mapping->backing_dev_info;
473 469 struct afs_writeback *wb;
474 470 struct page *page;
475 471 int ret, n;
... ... @@ -529,11 +525,6 @@
529 525  
530 526 wbc->nr_to_write -= ret;
531 527  
532   - if (wbc->nonblocking && bdi_write_congested(bdi)) {
533   - wbc->encountered_congestion = 1;
534   - break;
535   - }
536   -
537 528 cond_resched();
538 529 } while (index < end && wbc->nr_to_write > 0);
539 530  
540 531  
541 532  
... ... @@ -548,24 +539,16 @@
548 539 int afs_writepages(struct address_space *mapping,
549 540 struct writeback_control *wbc)
550 541 {
551   - struct backing_dev_info *bdi = mapping->backing_dev_info;
552 542 pgoff_t start, end, next;
553 543 int ret;
554 544  
555 545 _enter("");
556 546  
557   - if (wbc->nonblocking && bdi_write_congested(bdi)) {
558   - wbc->encountered_congestion = 1;
559   - _leave(" = 0 [congest]");
560   - return 0;
561   - }
562   -
563 547 if (wbc->range_cyclic) {
564 548 start = mapping->writeback_index;
565 549 end = -1;
566 550 ret = afs_writepages_region(mapping, wbc, start, end, &next);
567   - if (start > 0 && wbc->nr_to_write > 0 && ret == 0 &&
568   - !(wbc->nonblocking && wbc->encountered_congestion))
  551 + if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
569 552 ret = afs_writepages_region(mapping, wbc, 0, start,
570 553 &next);
571 554 mapping->writeback_index = next;
... ... @@ -1706,7 +1706,7 @@
1706 1706 * and kswapd activity, but those code paths have their own
1707 1707 * higher-level throttling.
1708 1708 */
1709   - if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
  1709 + if (wbc->sync_mode != WB_SYNC_NONE) {
1710 1710 lock_buffer(bh);
1711 1711 } else if (!trylock_buffer(bh)) {
1712 1712 redirty_page_for_writepage(wbc, page);
... ... @@ -591,7 +591,6 @@
591 591 struct writeback_control *wbc)
592 592 {
593 593 struct inode *inode = mapping->host;
594   - struct backing_dev_info *bdi = mapping->backing_dev_info;
595 594 struct ceph_inode_info *ci = ceph_inode(inode);
596 595 struct ceph_fs_client *fsc;
597 596 pgoff_t index, start, end;
... ... @@ -633,13 +632,6 @@
633 632  
634 633 pagevec_init(&pvec, 0);
635 634  
636   - /* ?? */
637   - if (wbc->nonblocking && bdi_write_congested(bdi)) {
638   - dout(" writepages congested\n");
639   - wbc->encountered_congestion = 1;
640   - goto out_final;
641   - }
642   -
643 635 /* where to start/end? */
644 636 if (wbc->range_cyclic) {
645 637 start = mapping->writeback_index; /* Start from prev offset */
... ... @@ -885,7 +877,6 @@
885 877 rc = 0; /* vfs expects us to return 0 */
886 878 ceph_put_snap_context(snapc);
887 879 dout("writepages done, rc = %d\n", rc);
888   -out_final:
889 880 return rc;
890 881 }
891 882  
... ... @@ -1303,7 +1303,6 @@
1303 1303 static int cifs_writepages(struct address_space *mapping,
1304 1304 struct writeback_control *wbc)
1305 1305 {
1306   - struct backing_dev_info *bdi = mapping->backing_dev_info;
1307 1306 unsigned int bytes_to_write;
1308 1307 unsigned int bytes_written;
1309 1308 struct cifs_sb_info *cifs_sb;
... ... @@ -1325,15 +1324,6 @@
1325 1324 int rc = 0;
1326 1325 int scanned = 0;
1327 1326 int xid, long_op;
1328   -
1329   - /*
1330   - * BB: Is this meaningful for a non-block-device file system?
1331   - * If it is, we should test it again after we do I/O
1332   - */
1333   - if (wbc->nonblocking && bdi_write_congested(bdi)) {
1334   - wbc->encountered_congestion = 1;
1335   - return 0;
1336   - }
1337 1327  
1338 1328 cifs_sb = CIFS_SB(mapping->host->i_sb);
1339 1329  
... ... @@ -55,7 +55,7 @@
55 55 * activity, but those code paths have their own higher-level
56 56 * throttling.
57 57 */
58   - if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
  58 + if (wbc->sync_mode != WB_SYNC_NONE) {
59 59 lock_buffer(bh);
60 60 } else if (!trylock_buffer(bh)) {
61 61 redirty_page_for_writepage(wbc, page);
... ... @@ -290,9 +290,7 @@
290 290 nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
291 291  
292 292 nfs_pageio_cond_complete(pgio, page->index);
293   - ret = nfs_page_async_flush(pgio, page,
294   - wbc->sync_mode == WB_SYNC_NONE ||
295   - wbc->nonblocking != 0);
  293 + ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
296 294 if (ret == -EAGAIN) {
297 295 redirty_page_for_writepage(wbc, page);
298 296 ret = 0;
... ... @@ -2438,7 +2438,7 @@
2438 2438 /* from this point on, we know the buffer is mapped to a
2439 2439 * real block and not a direct item
2440 2440 */
2441   - if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
  2441 + if (wbc->sync_mode != WB_SYNC_NONE) {
2442 2442 lock_buffer(bh);
2443 2443 } else {
2444 2444 if (!trylock_buffer(bh)) {
fs/xfs/linux-2.6/xfs_aops.c
... ... @@ -1139,8 +1139,7 @@
1139 1139 type = IO_DELAY;
1140 1140 flags = BMAPI_ALLOCATE;
1141 1141  
1142   - if (wbc->sync_mode == WB_SYNC_NONE &&
1143   - wbc->nonblocking)
  1142 + if (wbc->sync_mode == WB_SYNC_NONE)
1144 1143 flags |= BMAPI_TRYLOCK;
1145 1144 }
1146 1145  
include/trace/events/ext4.h
... ... @@ -242,18 +242,20 @@
242 242 __entry->pages_skipped = wbc->pages_skipped;
243 243 __entry->range_start = wbc->range_start;
244 244 __entry->range_end = wbc->range_end;
245   - __entry->nonblocking = wbc->nonblocking;
246 245 __entry->for_kupdate = wbc->for_kupdate;
247 246 __entry->for_reclaim = wbc->for_reclaim;
248 247 __entry->range_cyclic = wbc->range_cyclic;
249 248 __entry->writeback_index = inode->i_mapping->writeback_index;
250 249 ),
251 250  
252   - TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d range_cyclic %d writeback_index %lu",
  251 + TP_printk("dev %s ino %lu nr_to_write %ld pages_skipped %ld "
  252 + "range_start %llu range_end %llu "
  253 + "for_kupdate %d for_reclaim %d "
  254 + "range_cyclic %d writeback_index %lu",
253 255 jbd2_dev_to_name(__entry->dev),
254 256 (unsigned long) __entry->ino, __entry->nr_to_write,
255 257 __entry->pages_skipped, __entry->range_start,
256   - __entry->range_end, __entry->nonblocking,
  258 + __entry->range_end,
257 259 __entry->for_kupdate, __entry->for_reclaim,
258 260 __entry->range_cyclic,
259 261 (unsigned long) __entry->writeback_index)
include/trace/events/writeback.h
... ... @@ -96,8 +96,6 @@
96 96 __field(long, nr_to_write)
97 97 __field(long, pages_skipped)
98 98 __field(int, sync_mode)
99   - __field(int, nonblocking)
100   - __field(int, encountered_congestion)
101 99 __field(int, for_kupdate)
102 100 __field(int, for_background)
103 101 __field(int, for_reclaim)
... ... @@ -497,7 +497,6 @@
497 497 .nr_to_write = 1,
498 498 .range_start = 0,
499 499 .range_end = LLONG_MAX,
500   - .nonblocking = 1,
501 500 .for_reclaim = 1
502 501 };
503 502 int rc;
... ... @@ -376,7 +376,6 @@
376 376 .nr_to_write = SWAP_CLUSTER_MAX,
377 377 .range_start = 0,
378 378 .range_end = LLONG_MAX,
379   - .nonblocking = 1,
380 379 .for_reclaim = 1,
381 380 };
382 381