Commit dd3932eddf428571762596e17b65f5dc92ca361b

Authored by Christoph Hellwig
Committed by Jens Axboe
1 parent 8786fb70cc

block: remove BLKDEV_IFL_WAIT

All the blkdev_issue_* helpers can only sanely be used for synchronous
caller.  To issue cache flushes or barriers asynchronously the caller needs
to set up a bio by itself with a completion callback to move the asynchronous
state machine ahead.  So drop the BLKDEV_IFL_WAIT flag that is always
specified when calling blkdev_issue_* and also remove the now unused flags
argument to blkdev_issue_flush and blkdev_issue_zeroout.  For
blkdev_issue_discard we need to keep it for the secure discard flag, which
gains a more descriptive name and loses the bitops vs flag confusion.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>

Showing 19 changed files with 47 additions and 71 deletions Side-by-side Diff

... ... @@ -205,7 +205,6 @@
205 205 * @bdev: blockdev to issue flush for
206 206 * @gfp_mask: memory allocation flags (for bio_alloc)
207 207 * @error_sector: error sector
208   - * @flags: BLKDEV_IFL_* flags to control behaviour
209 208 *
210 209 * Description:
211 210 * Issue a flush for the block device in question. Caller can supply
... ... @@ -214,7 +213,7 @@
214 213 * request was pushed in some internal queue for later handling.
215 214 */
216 215 int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
217   - sector_t *error_sector, unsigned long flags)
  216 + sector_t *error_sector)
218 217 {
219 218 DECLARE_COMPLETION_ONSTACK(wait);
220 219 struct request_queue *q;
221 220  
... ... @@ -240,21 +239,19 @@
240 239 bio = bio_alloc(gfp_mask, 0);
241 240 bio->bi_end_io = bio_end_flush;
242 241 bio->bi_bdev = bdev;
243   - if (test_bit(BLKDEV_WAIT, &flags))
244   - bio->bi_private = &wait;
  242 + bio->bi_private = &wait;
245 243  
246 244 bio_get(bio);
247 245 submit_bio(WRITE_FLUSH, bio);
248   - if (test_bit(BLKDEV_WAIT, &flags)) {
249   - wait_for_completion(&wait);
250   - /*
251   - * The driver must store the error location in ->bi_sector, if
252   - * it supports it. For non-stacked drivers, this should be
253   - * copied from blk_rq_pos(rq).
254   - */
255   - if (error_sector)
256   - *error_sector = bio->bi_sector;
257   - }
  246 + wait_for_completion(&wait);
  247 +
  248 + /*
  249 + * The driver must store the error location in ->bi_sector, if
  250 + * it supports it. For non-stacked drivers, this should be
  251 + * copied from blk_rq_pos(rq).
  252 + */
  253 + if (error_sector)
  254 + *error_sector = bio->bi_sector;
258 255  
259 256 if (!bio_flagged(bio, BIO_UPTODATE))
260 257 ret = -EIO;
... ... @@ -61,7 +61,7 @@
61 61 max_discard_sectors &= ~(disc_sects - 1);
62 62 }
63 63  
64   - if (flags & BLKDEV_IFL_SECURE) {
  64 + if (flags & BLKDEV_DISCARD_SECURE) {
65 65 if (!blk_queue_secdiscard(q))
66 66 return -EOPNOTSUPP;
67 67 type |= REQ_SECURE;
... ... @@ -77,8 +77,7 @@
77 77 bio->bi_sector = sector;
78 78 bio->bi_end_io = blkdev_discard_end_io;
79 79 bio->bi_bdev = bdev;
80   - if (flags & BLKDEV_IFL_WAIT)
81   - bio->bi_private = &wait;
  80 + bio->bi_private = &wait;
82 81  
83 82 if (nr_sects > max_discard_sectors) {
84 83 bio->bi_size = max_discard_sectors << 9;
... ... @@ -92,8 +91,7 @@
92 91 bio_get(bio);
93 92 submit_bio(type, bio);
94 93  
95   - if (flags & BLKDEV_IFL_WAIT)
96   - wait_for_completion(&wait);
  94 + wait_for_completion(&wait);
97 95  
98 96 if (bio_flagged(bio, BIO_EOPNOTSUPP))
99 97 ret = -EOPNOTSUPP;
... ... @@ -139,7 +137,6 @@
139 137 * @sector: start sector
140 138 * @nr_sects: number of sectors to write
141 139 * @gfp_mask: memory allocation flags (for bio_alloc)
142   - * @flags: BLKDEV_IFL_* flags to control behaviour
143 140 *
144 141 * Description:
145 142 * Generate and issue number of bios with zerofiled pages.
... ... @@ -148,7 +145,7 @@
148 145 */
149 146  
150 147 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
151   - sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
  148 + sector_t nr_sects, gfp_t gfp_mask)
152 149 {
153 150 int ret;
154 151 struct bio *bio;
... ... @@ -174,8 +171,7 @@
174 171 bio->bi_sector = sector;
175 172 bio->bi_bdev = bdev;
176 173 bio->bi_end_io = bio_batch_end_io;
177   - if (flags & BLKDEV_IFL_WAIT)
178   - bio->bi_private = &bb;
  174 + bio->bi_private = &bb;
179 175  
180 176 while (nr_sects != 0) {
181 177 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
... ... @@ -193,10 +189,9 @@
193 189 submit_bio(WRITE, bio);
194 190 }
195 191  
196   - if (flags & BLKDEV_IFL_WAIT)
197   - /* Wait for bios in-flight */
198   - while ( issued != atomic_read(&bb.done))
199   - wait_for_completion(&wait);
  192 + /* Wait for bios in-flight */
  193 + while (issued != atomic_read(&bb.done))
  194 + wait_for_completion(&wait);
200 195  
201 196 if (!test_bit(BIO_UPTODATE, &bb.flags))
202 197 /* One of bios in the batch was completed with error.*/
... ... @@ -116,7 +116,7 @@
116 116 static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
117 117 uint64_t len, int secure)
118 118 {
119   - unsigned long flags = BLKDEV_IFL_WAIT;
  119 + unsigned long flags = 0;
120 120  
121 121 if (start & 511)
122 122 return -EINVAL;
... ... @@ -128,7 +128,7 @@
128 128 if (start + len > (bdev->bd_inode->i_size >> 9))
129 129 return -EINVAL;
130 130 if (secure)
131   - flags |= BLKDEV_IFL_SECURE;
  131 + flags |= BLKDEV_DISCARD_SECURE;
132 132 return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags);
133 133 }
134 134  
drivers/block/drbd/drbd_int.h
... ... @@ -2321,8 +2321,7 @@
2321 2321 if (test_bit(MD_NO_BARRIER, &mdev->flags))
2322 2322 return;
2323 2323  
2324   - r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL,
2325   - BLKDEV_IFL_WAIT);
  2324 + r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL);
2326 2325 if (r) {
2327 2326 set_bit(MD_NO_BARRIER, &mdev->flags);
2328 2327 dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
drivers/block/drbd/drbd_receiver.c
... ... @@ -975,7 +975,7 @@
975 975  
976 976 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
977 977 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
978   - NULL, BLKDEV_IFL_WAIT);
  978 + NULL);
979 979 if (rv) {
980 980 dev_err(DEV, "local disk flush failed with status %d\n", rv);
981 981 /* would rather check on EOPNOTSUPP, but that is not reliable.
... ... @@ -370,7 +370,7 @@
370 370 */
371 371 mutex_unlock(&bd_inode->i_mutex);
372 372  
373   - error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL, BLKDEV_IFL_WAIT);
  373 + error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL);
374 374 if (error == -EOPNOTSUPP)
375 375 error = 0;
376 376  
fs/btrfs/extent-tree.c
... ... @@ -1695,8 +1695,7 @@
1695 1695 static void btrfs_issue_discard(struct block_device *bdev,
1696 1696 u64 start, u64 len)
1697 1697 {
1698   - blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL,
1699   - BLKDEV_IFL_WAIT);
  1698 + blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, 0);
1700 1699 }
1701 1700  
1702 1701 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
... ... @@ -90,8 +90,7 @@
90 90 * storage
91 91 */
92 92 if (needs_barrier)
93   - blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL,
94   - BLKDEV_IFL_WAIT);
  93 + blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
95 94 return ret;
96 95 }
... ... @@ -128,11 +128,10 @@
128 128 (journal->j_fs_dev != journal->j_dev) &&
129 129 (journal->j_flags & JBD2_BARRIER))
130 130 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL,
131   - NULL, BLKDEV_IFL_WAIT);
  131 + NULL);
132 132 ret = jbd2_log_wait_commit(journal, commit_tid);
133 133 } else if (journal->j_flags & JBD2_BARRIER)
134   - blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL,
135   - BLKDEV_IFL_WAIT);
  134 + blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
136 135 return ret;
137 136 }
... ... @@ -2566,8 +2566,7 @@
2566 2566 discard_block = block + ext4_group_first_block_no(sb, block_group);
2567 2567 trace_ext4_discard_blocks(sb,
2568 2568 (unsigned long long) discard_block, count);
2569   - ret = sb_issue_discard(sb, discard_block, count, GFP_NOFS,
2570   - BLKDEV_IFL_WAIT);
  2569 + ret = sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
2571 2570 if (ret == EOPNOTSUPP) {
2572 2571 ext4_warning(sb, "discard not supported, disabling");
2573 2572 clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
... ... @@ -578,8 +578,7 @@
578 578 sb_issue_discard(sb,
579 579 fat_clus_to_blknr(sbi, first_cl),
580 580 nr_clus * sbi->sec_per_clus,
581   - GFP_NOFS,
582   - BLKDEV_IFL_WAIT);
  581 + GFP_NOFS, 0);
583 582  
584 583 first_cl = cluster;
585 584 }
... ... @@ -854,7 +854,7 @@
854 854 if ((start + nr_sects) != blk) {
855 855 rv = blkdev_issue_discard(bdev, start,
856 856 nr_sects, GFP_NOFS,
857   - BLKDEV_IFL_WAIT);
  857 + 0);
858 858 if (rv)
859 859 goto fail;
860 860 nr_sects = 0;
... ... @@ -868,8 +868,7 @@
868 868 }
869 869 }
870 870 if (nr_sects) {
871   - rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS,
872   - BLKDEV_IFL_WAIT);
  871 + rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0);
873 872 if (rv)
874 873 goto fail;
875 874 }
fs/jbd2/checkpoint.c
... ... @@ -532,8 +532,7 @@
532 532 */
533 533 if ((journal->j_fs_dev != journal->j_dev) &&
534 534 (journal->j_flags & JBD2_BARRIER))
535   - blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL,
536   - BLKDEV_IFL_WAIT);
  535 + blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
537 536 if (!(journal->j_flags & JBD2_ABORT))
538 537 jbd2_journal_update_superblock(journal, 1);
539 538 return 0;
... ... @@ -684,8 +684,7 @@
684 684 if (commit_transaction->t_flushed_data_blocks &&
685 685 (journal->j_fs_dev != journal->j_dev) &&
686 686 (journal->j_flags & JBD2_BARRIER))
687   - blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL,
688   - BLKDEV_IFL_WAIT);
  687 + blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
689 688  
690 689 /* Done it all: now write the commit record asynchronously. */
691 690 if (JBD2_HAS_INCOMPAT_FEATURE(journal,
... ... @@ -810,8 +809,7 @@
810 809 if (JBD2_HAS_INCOMPAT_FEATURE(journal,
811 810 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
812 811 journal->j_flags & JBD2_BARRIER) {
813   - blkdev_issue_flush(journal->j_dev, GFP_KERNEL, NULL,
814   - BLKDEV_IFL_WAIT);
  812 + blkdev_issue_flush(journal->j_dev, GFP_KERNEL, NULL);
815 813 }
816 814  
817 815 if (err)
fs/nilfs2/the_nilfs.c
... ... @@ -774,7 +774,7 @@
774 774 ret = blkdev_issue_discard(nilfs->ns_bdev,
775 775 start * sects_per_block,
776 776 nblocks * sects_per_block,
777   - GFP_NOFS, BLKDEV_IFL_WAIT);
  777 + GFP_NOFS, 0);
778 778 if (ret < 0)
779 779 return ret;
780 780 nblocks = 0;
... ... @@ -784,7 +784,7 @@
784 784 ret = blkdev_issue_discard(nilfs->ns_bdev,
785 785 start * sects_per_block,
786 786 nblocks * sects_per_block,
787   - GFP_NOFS, BLKDEV_IFL_WAIT);
  787 + GFP_NOFS, 0);
788 788 return ret;
789 789 }
790 790  
... ... @@ -152,8 +152,7 @@
152 152 barrier_done = reiserfs_commit_for_inode(inode);
153 153 reiserfs_write_unlock(inode->i_sb);
154 154 if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb))
155   - blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL,
156   - BLKDEV_IFL_WAIT);
  155 + blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
157 156 if (barrier_done < 0)
158 157 return barrier_done;
159 158 return (err < 0) ? -EIO : 0;
fs/xfs/linux-2.6/xfs_super.c
... ... @@ -693,8 +693,7 @@
693 693 xfs_blkdev_issue_flush(
694 694 xfs_buftarg_t *buftarg)
695 695 {
696   - blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL,
697   - BLKDEV_IFL_WAIT);
  696 + blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL);
698 697 }
699 698  
700 699 STATIC void
include/linux/blkdev.h
... ... @@ -867,18 +867,14 @@
867 867 return NULL;
868 868 return bqt->tag_index[tag];
869 869 }
870   -enum{
871   - BLKDEV_WAIT, /* wait for completion */
872   - BLKDEV_SECURE, /* secure discard */
873   -};
874   -#define BLKDEV_IFL_WAIT (1 << BLKDEV_WAIT)
875   -#define BLKDEV_IFL_SECURE (1 << BLKDEV_SECURE)
876   -extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *,
877   - unsigned long);
  870 +
  871 +#define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */
  872 +
  873 +extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
878 874 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
879 875 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
880 876 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
881   - sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
  877 + sector_t nr_sects, gfp_t gfp_mask);
882 878 static inline int sb_issue_discard(struct super_block *sb, sector_t block,
883 879 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
884 880 {
... ... @@ -141,7 +141,7 @@
141 141 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
142 142 if (nr_blocks) {
143 143 err = blkdev_issue_discard(si->bdev, start_block,
144   - nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT);
  144 + nr_blocks, GFP_KERNEL, 0);
145 145 if (err)
146 146 return err;
147 147 cond_resched();
... ... @@ -152,7 +152,7 @@
152 152 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
153 153  
154 154 err = blkdev_issue_discard(si->bdev, start_block,
155   - nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT);
  155 + nr_blocks, GFP_KERNEL, 0);
156 156 if (err)
157 157 break;
158 158  
... ... @@ -191,7 +191,7 @@
191 191 start_block <<= PAGE_SHIFT - 9;
192 192 nr_blocks <<= PAGE_SHIFT - 9;
193 193 if (blkdev_issue_discard(si->bdev, start_block,
194   - nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT))
  194 + nr_blocks, GFP_NOIO, 0))
195 195 break;
196 196 }
197 197