Commit 721a9602e6607417c6bc15b18e97a2f35266c690
1 parent
cf15900e12
Exists in
master
and in
7 other branches
block: kill off REQ_UNPLUG
With the plugging now being explicitly controlled by the submitter, callers need not pass down unplugging hints to the block layer. If they want to unplug, it's because they manually plugged on their own - in which case, they should just unplug at will. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Showing 24 changed files with 43 additions and 75 deletions Side-by-side Diff
- block/blk-core.c
- drivers/block/drbd/drbd_actlog.c
- drivers/block/drbd/drbd_int.h
- drivers/block/drbd/drbd_main.c
- drivers/block/drbd/drbd_receiver.c
- drivers/md/bitmap.c
- drivers/md/dm-io.c
- drivers/md/dm-kcopyd.c
- drivers/md/md.c
- fs/btrfs/extent_io.c
- fs/buffer.c
- fs/direct-io.c
- fs/ext4/page-io.c
- fs/gfs2/log.c
- fs/gfs2/lops.c
- fs/gfs2/meta_io.c
- fs/jbd/commit.c
- fs/jbd2/commit.c
- fs/nilfs2/segbuf.c
- fs/xfs/linux-2.6/xfs_aops.c
- include/linux/blk_types.h
- include/linux/fs.h
- kernel/power/block_io.c
- mm/page_io.c
block/blk-core.c
drivers/block/drbd/drbd_actlog.c
drivers/block/drbd/drbd_int.h
... | ... | @@ -377,7 +377,7 @@ |
377 | 377 | #define DP_HARDBARRIER 1 /* depricated */ |
378 | 378 | #define DP_RW_SYNC 2 /* equals REQ_SYNC */ |
379 | 379 | #define DP_MAY_SET_IN_SYNC 4 |
380 | -#define DP_UNPLUG 8 /* equals REQ_UNPLUG */ | |
380 | +#define DP_UNPLUG 8 /* not used anymore */ | |
381 | 381 | #define DP_FUA 16 /* equals REQ_FUA */ |
382 | 382 | #define DP_FLUSH 32 /* equals REQ_FLUSH */ |
383 | 383 | #define DP_DISCARD 64 /* equals REQ_DISCARD */ |
drivers/block/drbd/drbd_main.c
... | ... | @@ -2477,12 +2477,11 @@ |
2477 | 2477 | { |
2478 | 2478 | if (mdev->agreed_pro_version >= 95) |
2479 | 2479 | return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | |
2480 | - (bi_rw & REQ_UNPLUG ? DP_UNPLUG : 0) | | |
2481 | 2480 | (bi_rw & REQ_FUA ? DP_FUA : 0) | |
2482 | 2481 | (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) | |
2483 | 2482 | (bi_rw & REQ_DISCARD ? DP_DISCARD : 0); |
2484 | 2483 | else |
2485 | - return bi_rw & (REQ_SYNC | REQ_UNPLUG) ? DP_RW_SYNC : 0; | |
2484 | + return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0; | |
2486 | 2485 | } |
2487 | 2486 | |
2488 | 2487 | /* Used to send write requests |
drivers/block/drbd/drbd_receiver.c
... | ... | @@ -1100,8 +1100,6 @@ |
1100 | 1100 | /* > e->sector, unless this is the first bio */ |
1101 | 1101 | bio->bi_sector = sector; |
1102 | 1102 | bio->bi_bdev = mdev->ldev->backing_bdev; |
1103 | - /* we special case some flags in the multi-bio case, see below | |
1104 | - * (REQ_UNPLUG) */ | |
1105 | 1103 | bio->bi_rw = rw; |
1106 | 1104 | bio->bi_private = e; |
1107 | 1105 | bio->bi_end_io = drbd_endio_sec; |
... | ... | @@ -1130,10 +1128,6 @@ |
1130 | 1128 | bios = bios->bi_next; |
1131 | 1129 | bio->bi_next = NULL; |
1132 | 1130 | |
1133 | - /* strip off REQ_UNPLUG unless it is the last bio */ | |
1134 | - if (bios) | |
1135 | - bio->bi_rw &= ~REQ_UNPLUG; | |
1136 | - | |
1137 | 1131 | drbd_generic_make_request(mdev, fault_type, bio); |
1138 | 1132 | } while (bios); |
1139 | 1133 | return 0; |
1140 | 1134 | |
... | ... | @@ -1621,12 +1615,11 @@ |
1621 | 1615 | { |
1622 | 1616 | if (mdev->agreed_pro_version >= 95) |
1623 | 1617 | return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | |
1624 | - (dpf & DP_UNPLUG ? REQ_UNPLUG : 0) | | |
1625 | 1618 | (dpf & DP_FUA ? REQ_FUA : 0) | |
1626 | 1619 | (dpf & DP_FLUSH ? REQ_FUA : 0) | |
1627 | 1620 | (dpf & DP_DISCARD ? REQ_DISCARD : 0); |
1628 | 1621 | else |
1629 | - return dpf & DP_RW_SYNC ? (REQ_SYNC | REQ_UNPLUG) : 0; | |
1622 | + return dpf & DP_RW_SYNC ? REQ_SYNC : 0; | |
1630 | 1623 | } |
1631 | 1624 | |
1632 | 1625 | /* mirrored write */ |
drivers/md/bitmap.c
drivers/md/dm-io.c
drivers/md/dm-kcopyd.c
... | ... | @@ -356,11 +356,8 @@ |
356 | 356 | |
357 | 357 | if (job->rw == READ) |
358 | 358 | r = dm_io(&io_req, 1, &job->source, NULL); |
359 | - else { | |
360 | - if (job->num_dests > 1) | |
361 | - io_req.bi_rw |= REQ_UNPLUG; | |
359 | + else | |
362 | 360 | r = dm_io(&io_req, job->num_dests, job->dests, NULL); |
363 | - } | |
364 | 361 | |
365 | 362 | return r; |
366 | 363 | } |
drivers/md/md.c
... | ... | @@ -777,8 +777,7 @@ |
777 | 777 | bio->bi_end_io = super_written; |
778 | 778 | |
779 | 779 | atomic_inc(&mddev->pending_writes); |
780 | - submit_bio(REQ_WRITE | REQ_SYNC | REQ_UNPLUG | REQ_FLUSH | REQ_FUA, | |
781 | - bio); | |
780 | + submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio); | |
782 | 781 | } |
783 | 782 | |
784 | 783 | void md_super_wait(mddev_t *mddev) |
... | ... | @@ -806,7 +805,7 @@ |
806 | 805 | struct completion event; |
807 | 806 | int ret; |
808 | 807 | |
809 | - rw |= REQ_SYNC | REQ_UNPLUG; | |
808 | + rw |= REQ_SYNC; | |
810 | 809 | |
811 | 810 | bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? |
812 | 811 | rdev->meta_bdev : rdev->bdev; |
fs/btrfs/extent_io.c
fs/buffer.c
... | ... | @@ -767,7 +767,7 @@ |
767 | 767 | * still in flight on potentially older |
768 | 768 | * contents. |
769 | 769 | */ |
770 | - write_dirty_buffer(bh, WRITE_SYNC_PLUG); | |
770 | + write_dirty_buffer(bh, WRITE_SYNC); | |
771 | 771 | |
772 | 772 | /* |
773 | 773 | * Kick off IO for the previous mapping. Note |
... | ... | @@ -1602,14 +1602,8 @@ |
1602 | 1602 | * prevents this contention from occurring. |
1603 | 1603 | * |
1604 | 1604 | * If block_write_full_page() is called with wbc->sync_mode == |
1605 | - * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this | |
1606 | - * causes the writes to be flagged as synchronous writes, but the | |
1607 | - * block device queue will NOT be unplugged, since usually many pages | |
1608 | - * will be pushed to the out before the higher-level caller actually | |
1609 | - * waits for the writes to be completed. The various wait functions, | |
1610 | - * such as wait_on_writeback_range() will ultimately call sync_page() | |
1611 | - * which will ultimately call blk_run_backing_dev(), which will end up | |
1612 | - * unplugging the device queue. | |
1605 | + * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this | |
1606 | + * causes the writes to be flagged as synchronous writes. | |
1613 | 1607 | */ |
1614 | 1608 | static int __block_write_full_page(struct inode *inode, struct page *page, |
1615 | 1609 | get_block_t *get_block, struct writeback_control *wbc, |
... | ... | @@ -1622,7 +1616,7 @@ |
1622 | 1616 | const unsigned blocksize = 1 << inode->i_blkbits; |
1623 | 1617 | int nr_underway = 0; |
1624 | 1618 | int write_op = (wbc->sync_mode == WB_SYNC_ALL ? |
1625 | - WRITE_SYNC_PLUG : WRITE); | |
1619 | + WRITE_SYNC : WRITE); | |
1626 | 1620 | |
1627 | 1621 | BUG_ON(!PageLocked(page)); |
1628 | 1622 |
fs/direct-io.c
fs/ext4/page-io.c
... | ... | @@ -310,8 +310,7 @@ |
310 | 310 | io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh); |
311 | 311 | |
312 | 312 | io->io_bio = bio; |
313 | - io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? | |
314 | - WRITE_SYNC_PLUG : WRITE); | |
313 | + io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); | |
315 | 314 | io->io_next_block = bh->b_blocknr; |
316 | 315 | return 0; |
317 | 316 | } |
fs/gfs2/log.c
... | ... | @@ -121,7 +121,7 @@ |
121 | 121 | lock_buffer(bh); |
122 | 122 | if (test_clear_buffer_dirty(bh)) { |
123 | 123 | bh->b_end_io = end_buffer_write_sync; |
124 | - submit_bh(WRITE_SYNC_PLUG, bh); | |
124 | + submit_bh(WRITE_SYNC, bh); | |
125 | 125 | } else { |
126 | 126 | unlock_buffer(bh); |
127 | 127 | brelse(bh); |
... | ... | @@ -647,7 +647,7 @@ |
647 | 647 | lock_buffer(bh); |
648 | 648 | if (buffer_mapped(bh) && test_clear_buffer_dirty(bh)) { |
649 | 649 | bh->b_end_io = end_buffer_write_sync; |
650 | - submit_bh(WRITE_SYNC_PLUG, bh); | |
650 | + submit_bh(WRITE_SYNC, bh); | |
651 | 651 | } else { |
652 | 652 | unlock_buffer(bh); |
653 | 653 | brelse(bh); |
fs/gfs2/lops.c
... | ... | @@ -200,7 +200,7 @@ |
200 | 200 | } |
201 | 201 | |
202 | 202 | gfs2_log_unlock(sdp); |
203 | - submit_bh(WRITE_SYNC_PLUG, bh); | |
203 | + submit_bh(WRITE_SYNC, bh); | |
204 | 204 | gfs2_log_lock(sdp); |
205 | 205 | |
206 | 206 | n = 0; |
... | ... | @@ -210,7 +210,7 @@ |
210 | 210 | gfs2_log_unlock(sdp); |
211 | 211 | lock_buffer(bd2->bd_bh); |
212 | 212 | bh = gfs2_log_fake_buf(sdp, bd2->bd_bh); |
213 | - submit_bh(WRITE_SYNC_PLUG, bh); | |
213 | + submit_bh(WRITE_SYNC, bh); | |
214 | 214 | gfs2_log_lock(sdp); |
215 | 215 | if (++n >= num) |
216 | 216 | break; |
... | ... | @@ -352,7 +352,7 @@ |
352 | 352 | sdp->sd_log_num_revoke--; |
353 | 353 | |
354 | 354 | if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { |
355 | - submit_bh(WRITE_SYNC_PLUG, bh); | |
355 | + submit_bh(WRITE_SYNC, bh); | |
356 | 356 | |
357 | 357 | bh = gfs2_log_get_buf(sdp); |
358 | 358 | mh = (struct gfs2_meta_header *)bh->b_data; |
... | ... | @@ -369,7 +369,7 @@ |
369 | 369 | } |
370 | 370 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); |
371 | 371 | |
372 | - submit_bh(WRITE_SYNC_PLUG, bh); | |
372 | + submit_bh(WRITE_SYNC, bh); | |
373 | 373 | } |
374 | 374 | |
375 | 375 | static void revoke_lo_before_scan(struct gfs2_jdesc *jd, |
... | ... | @@ -571,7 +571,7 @@ |
571 | 571 | ptr = bh_log_ptr(bh); |
572 | 572 | |
573 | 573 | get_bh(bh); |
574 | - submit_bh(WRITE_SYNC_PLUG, bh); | |
574 | + submit_bh(WRITE_SYNC, bh); | |
575 | 575 | gfs2_log_lock(sdp); |
576 | 576 | while(!list_empty(list)) { |
577 | 577 | bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list); |
... | ... | @@ -597,7 +597,7 @@ |
597 | 597 | } else { |
598 | 598 | bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh); |
599 | 599 | } |
600 | - submit_bh(WRITE_SYNC_PLUG, bh1); | |
600 | + submit_bh(WRITE_SYNC, bh1); | |
601 | 601 | gfs2_log_lock(sdp); |
602 | 602 | ptr += 2; |
603 | 603 | } |
fs/gfs2/meta_io.c
... | ... | @@ -37,7 +37,7 @@ |
37 | 37 | struct buffer_head *bh, *head; |
38 | 38 | int nr_underway = 0; |
39 | 39 | int write_op = REQ_META | |
40 | - (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC_PLUG : WRITE); | |
40 | + (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); | |
41 | 41 | |
42 | 42 | BUG_ON(!PageLocked(page)); |
43 | 43 | BUG_ON(!page_has_buffers(page)); |
fs/jbd/commit.c
... | ... | @@ -333,7 +333,7 @@ |
333 | 333 | * instead we rely on sync_buffer() doing the unplug for us. |
334 | 334 | */ |
335 | 335 | if (commit_transaction->t_synchronous_commit) |
336 | - write_op = WRITE_SYNC_PLUG; | |
336 | + write_op = WRITE_SYNC; | |
337 | 337 | spin_lock(&commit_transaction->t_handle_lock); |
338 | 338 | while (commit_transaction->t_updates) { |
339 | 339 | DEFINE_WAIT(wait); |
fs/jbd2/commit.c
... | ... | @@ -137,9 +137,9 @@ |
137 | 137 | if (journal->j_flags & JBD2_BARRIER && |
138 | 138 | !JBD2_HAS_INCOMPAT_FEATURE(journal, |
139 | 139 | JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) |
140 | - ret = submit_bh(WRITE_SYNC_PLUG | WRITE_FLUSH_FUA, bh); | |
140 | + ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh); | |
141 | 141 | else |
142 | - ret = submit_bh(WRITE_SYNC_PLUG, bh); | |
142 | + ret = submit_bh(WRITE_SYNC, bh); | |
143 | 143 | |
144 | 144 | *cbh = bh; |
145 | 145 | return ret; |
... | ... | @@ -369,7 +369,7 @@ |
369 | 369 | * instead we rely on sync_buffer() doing the unplug for us. |
370 | 370 | */ |
371 | 371 | if (commit_transaction->t_synchronous_commit) |
372 | - write_op = WRITE_SYNC_PLUG; | |
372 | + write_op = WRITE_SYNC; | |
373 | 373 | trace_jbd2_commit_locking(journal, commit_transaction); |
374 | 374 | stats.run.rs_wait = commit_transaction->t_max_wait; |
375 | 375 | stats.run.rs_locked = jiffies; |
fs/nilfs2/segbuf.c
fs/xfs/linux-2.6/xfs_aops.c
... | ... | @@ -413,8 +413,7 @@ |
413 | 413 | if (xfs_ioend_new_eof(ioend)) |
414 | 414 | xfs_mark_inode_dirty(XFS_I(ioend->io_inode)); |
415 | 415 | |
416 | - submit_bio(wbc->sync_mode == WB_SYNC_ALL ? | |
417 | - WRITE_SYNC_PLUG : WRITE, bio); | |
416 | + submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio); | |
418 | 417 | } |
419 | 418 | |
420 | 419 | STATIC struct bio * |
include/linux/blk_types.h
... | ... | @@ -128,7 +128,6 @@ |
128 | 128 | __REQ_NOIDLE, /* don't anticipate more IO after this one */ |
129 | 129 | |
130 | 130 | /* bio only flags */ |
131 | - __REQ_UNPLUG, /* unplug the immediately after submission */ | |
132 | 131 | __REQ_RAHEAD, /* read ahead, can fail anytime */ |
133 | 132 | __REQ_THROTTLED, /* This bio has already been subjected to |
134 | 133 | * throttling rules. Don't do it again. */ |
... | ... | @@ -172,7 +171,6 @@ |
172 | 171 | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) |
173 | 172 | #define REQ_CLONE_MASK REQ_COMMON_MASK |
174 | 173 | |
175 | -#define REQ_UNPLUG (1 << __REQ_UNPLUG) | |
176 | 174 | #define REQ_RAHEAD (1 << __REQ_RAHEAD) |
177 | 175 | #define REQ_THROTTLED (1 << __REQ_THROTTLED) |
178 | 176 |
include/linux/fs.h
... | ... | @@ -135,16 +135,10 @@ |
135 | 135 | * block layer could (in theory) choose to ignore this |
136 | 136 | * request if it runs into resource problems. |
137 | 137 | * WRITE A normal async write. Device will be plugged. |
138 | - * WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down | |
138 | + * WRITE_SYNC Synchronous write. Identical to WRITE, but passes down | |
139 | 139 | * the hint that someone will be waiting on this IO |
140 | - * shortly. The device must still be unplugged explicitly, | |
141 | - * WRITE_SYNC_PLUG does not do this as we could be | |
142 | - * submitting more writes before we actually wait on any | |
143 | - * of them. | |
144 | - * WRITE_SYNC Like WRITE_SYNC_PLUG, but also unplugs the device | |
145 | - * immediately after submission. The write equivalent | |
146 | - * of READ_SYNC. | |
147 | - * WRITE_ODIRECT_PLUG Special case write for O_DIRECT only. | |
140 | + * shortly. The write equivalent of READ_SYNC. | |
141 | + * WRITE_ODIRECT Special case write for O_DIRECT only. | |
148 | 142 | * WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush. |
149 | 143 | * WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on |
150 | 144 | * non-volatile media on completion. |
151 | 145 | |
152 | 146 | |
... | ... | @@ -160,18 +154,14 @@ |
160 | 154 | #define WRITE RW_MASK |
161 | 155 | #define READA RWA_MASK |
162 | 156 | |
163 | -#define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG) | |
157 | +#define READ_SYNC (READ | REQ_SYNC) | |
164 | 158 | #define READ_META (READ | REQ_META) |
165 | -#define WRITE_SYNC_PLUG (WRITE | REQ_SYNC | REQ_NOIDLE) | |
166 | -#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG) | |
167 | -#define WRITE_ODIRECT_PLUG (WRITE | REQ_SYNC) | |
159 | +#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE) | |
160 | +#define WRITE_ODIRECT (WRITE | REQ_SYNC) | |
168 | 161 | #define WRITE_META (WRITE | REQ_META) |
169 | -#define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ | |
170 | - REQ_FLUSH) | |
171 | -#define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ | |
172 | - REQ_FUA) | |
173 | -#define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ | |
174 | - REQ_FLUSH | REQ_FUA) | |
162 | +#define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH) | |
163 | +#define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA) | |
164 | +#define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) | |
175 | 165 | |
176 | 166 | #define SEL_IN 1 |
177 | 167 | #define SEL_OUT 2 |
kernel/power/block_io.c
... | ... | @@ -28,7 +28,7 @@ |
28 | 28 | static int submit(int rw, struct block_device *bdev, sector_t sector, |
29 | 29 | struct page *page, struct bio **bio_chain) |
30 | 30 | { |
31 | - const int bio_rw = rw | REQ_SYNC | REQ_UNPLUG; | |
31 | + const int bio_rw = rw | REQ_SYNC; | |
32 | 32 | struct bio *bio; |
33 | 33 | |
34 | 34 | bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); |