Commit aa8b57aa3d1c06ca53312294ee6dfc767ee3ddb3

Authored by Kent Overstreet
1 parent f73a1c7d11

block: Use bio_sectors() more consistently

Bunch of places in the code weren't using it where they could be -
this'll reduce the size of the patch that puts bi_sector/bi_size/bi_idx
into a struct bvec_iter.

Signed-off-by: Kent Overstreet <koverstreet@google.com>
CC: Jens Axboe <axboe@kernel.dk>
CC: "Ed L. Cashin" <ecashin@coraid.com>
CC: Nick Piggin <npiggin@kernel.dk>
CC: Jiri Kosina <jkosina@suse.cz>
CC: Jim Paris <jim@jtan.com>
CC: Geoff Levand <geoff@infradead.org>
CC: Alasdair Kergon <agk@redhat.com>
CC: dm-devel@redhat.com
CC: Neil Brown <neilb@suse.de>
CC: Steven Rostedt <rostedt@goodmis.org>
Acked-by: Ed Cashin <ecashin@coraid.com>

Showing 8 changed files with 35 additions and 38 deletions Side-by-side Diff

drivers/block/pktcdvd.c
... ... @@ -2433,7 +2433,7 @@
2433 2433 cloned_bio->bi_bdev = pd->bdev;
2434 2434 cloned_bio->bi_private = psd;
2435 2435 cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2436   - pd->stats.secs_r += bio->bi_size >> 9;
  2436 + pd->stats.secs_r += bio_sectors(bio);
2437 2437 pkt_queue_bio(pd, cloned_bio);
2438 2438 return;
2439 2439 }
drivers/md/dm-raid1.c
... ... @@ -458,7 +458,7 @@
458 458 {
459 459 io->bdev = m->dev->bdev;
460 460 io->sector = map_sector(m, bio);
461   - io->count = bio->bi_size >> 9;
  461 + io->count = bio_sectors(bio);
462 462 }
463 463  
464 464 static void hold_bio(struct mirror_set *ms, struct bio *bio)
... ... @@ -502,11 +502,11 @@
502 502 {
503 503 if (likely(is_power_of_2(chunk_sects))) {
504 504 return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
505   - + (bio->bi_size >> 9));
  505 + + bio_sectors(bio));
506 506 } else{
507 507 sector_t sector = bio->bi_sector;
508 508 return chunk_sects >= (sector_div(sector, chunk_sects)
509   - + (bio->bi_size >> 9));
  509 + + bio_sectors(bio));
510 510 }
511 511 }
512 512  
... ... @@ -567,7 +567,7 @@
567 567 printk("md/raid0:%s: make_request bug: can't convert block across chunks"
568 568 " or bigger than %dk %llu %d\n",
569 569 mdname(mddev), chunk_sects / 2,
570   - (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
  570 + (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
571 571  
572 572 bio_io_error(bio);
573 573 return;
... ... @@ -267,7 +267,7 @@
267 267 (bio_data_dir(bio) == WRITE) ? "write" : "read",
268 268 (unsigned long long) bio->bi_sector,
269 269 (unsigned long long) bio->bi_sector +
270   - (bio->bi_size >> 9) - 1);
  270 + bio_sectors(bio) - 1);
271 271  
272 272 call_bio_endio(r1_bio);
273 273 }
... ... @@ -458,7 +458,7 @@
458 458 " %llu-%llu\n",
459 459 (unsigned long long) mbio->bi_sector,
460 460 (unsigned long long) mbio->bi_sector +
461   - (mbio->bi_size >> 9) - 1);
  461 + bio_sectors(mbio) - 1);
462 462 call_bio_endio(r1_bio);
463 463 }
464 464 }
... ... @@ -1049,7 +1049,7 @@
1049 1049 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1050 1050  
1051 1051 r1_bio->master_bio = bio;
1052   - r1_bio->sectors = bio->bi_size >> 9;
  1052 + r1_bio->sectors = bio_sectors(bio);
1053 1053 r1_bio->state = 0;
1054 1054 r1_bio->mddev = mddev;
1055 1055 r1_bio->sector = bio->bi_sector;
... ... @@ -1127,7 +1127,7 @@
1127 1127 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1128 1128  
1129 1129 r1_bio->master_bio = bio;
1130   - r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
  1130 + r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1131 1131 r1_bio->state = 0;
1132 1132 r1_bio->mddev = mddev;
1133 1133 r1_bio->sector = bio->bi_sector + sectors_handled;
1134 1134  
... ... @@ -1329,14 +1329,14 @@
1329 1329 /* Mustn't call r1_bio_write_done before this next test,
1330 1330 * as it could result in the bio being freed.
1331 1331 */
1332   - if (sectors_handled < (bio->bi_size >> 9)) {
  1332 + if (sectors_handled < bio_sectors(bio)) {
1333 1333 r1_bio_write_done(r1_bio);
1334 1334 /* We need another r1_bio. It has already been counted
1335 1335 * in bio->bi_phys_segments
1336 1336 */
1337 1337 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1338 1338 r1_bio->master_bio = bio;
1339   - r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
  1339 + r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1340 1340 r1_bio->state = 0;
1341 1341 r1_bio->mddev = mddev;
1342 1342 r1_bio->sector = bio->bi_sector + sectors_handled;
... ... @@ -1947,7 +1947,7 @@
1947 1947 wbio->bi_rw = WRITE;
1948 1948 wbio->bi_end_io = end_sync_write;
1949 1949 atomic_inc(&r1_bio->remaining);
1950   - md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
  1950 + md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
1951 1951  
1952 1952 generic_make_request(wbio);
1953 1953 }
... ... @@ -2284,8 +2284,7 @@
2284 2284 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
2285 2285  
2286 2286 r1_bio->master_bio = mbio;
2287   - r1_bio->sectors = (mbio->bi_size >> 9)
2288   - - sectors_handled;
  2287 + r1_bio->sectors = bio_sectors(mbio) - sectors_handled;
2289 2288 r1_bio->state = 0;
2290 2289 set_bit(R1BIO_ReadError, &r1_bio->state);
2291 2290 r1_bio->mddev = mddev;
... ... @@ -1169,7 +1169,7 @@
1169 1169 /* If this request crosses a chunk boundary, we need to
1170 1170 * split it. This will only happen for 1 PAGE (or less) requests.
1171 1171 */
1172   - if (unlikely((bio->bi_sector & chunk_mask) + (bio->bi_size >> 9)
  1172 + if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio)
1173 1173 > chunk_sects
1174 1174 && (conf->geo.near_copies < conf->geo.raid_disks
1175 1175 || conf->prev.near_copies < conf->prev.raid_disks))) {
... ... @@ -1209,7 +1209,7 @@
1209 1209 bad_map:
1210 1210 printk("md/raid10:%s: make_request bug: can't convert block across chunks"
1211 1211 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
1212   - (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
  1212 + (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
1213 1213  
1214 1214 bio_io_error(bio);
1215 1215 return;
... ... @@ -1224,7 +1224,7 @@
1224 1224 */
1225 1225 wait_barrier(conf);
1226 1226  
1227   - sectors = bio->bi_size >> 9;
  1227 + sectors = bio_sectors(bio);
1228 1228 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1229 1229 bio->bi_sector < conf->reshape_progress &&
1230 1230 bio->bi_sector + sectors > conf->reshape_progress) {
... ... @@ -1326,8 +1326,7 @@
1326 1326 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1327 1327  
1328 1328 r10_bio->master_bio = bio;
1329   - r10_bio->sectors = ((bio->bi_size >> 9)
1330   - - sectors_handled);
  1329 + r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1331 1330 r10_bio->state = 0;
1332 1331 r10_bio->mddev = mddev;
1333 1332 r10_bio->sector = bio->bi_sector + sectors_handled;
... ... @@ -1569,7 +1568,7 @@
1569 1568 * after checking if we need to go around again.
1570 1569 */
1571 1570  
1572   - if (sectors_handled < (bio->bi_size >> 9)) {
  1571 + if (sectors_handled < bio_sectors(bio)) {
1573 1572 one_write_done(r10_bio);
1574 1573 /* We need another r10_bio. It has already been counted
1575 1574 * in bio->bi_phys_segments.
... ... @@ -1577,7 +1576,7 @@
1577 1576 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1578 1577  
1579 1578 r10_bio->master_bio = bio;
1580   - r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
  1579 + r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1581 1580  
1582 1581 r10_bio->mddev = mddev;
1583 1582 r10_bio->sector = bio->bi_sector + sectors_handled;
... ... @@ -2103,7 +2102,7 @@
2103 2102 d = r10_bio->devs[i].devnum;
2104 2103 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2105 2104 atomic_inc(&r10_bio->remaining);
2106   - md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9);
  2105 + md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2107 2106  
2108 2107 tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
2109 2108 tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
... ... @@ -2128,7 +2127,7 @@
2128 2127 d = r10_bio->devs[i].devnum;
2129 2128 atomic_inc(&r10_bio->remaining);
2130 2129 md_sync_acct(conf->mirrors[d].replacement->bdev,
2131   - tbio->bi_size >> 9);
  2130 + bio_sectors(tbio));
2132 2131 generic_make_request(tbio);
2133 2132 }
2134 2133  
2135 2134  
... ... @@ -2254,13 +2253,13 @@
2254 2253 wbio2 = r10_bio->devs[1].repl_bio;
2255 2254 if (wbio->bi_end_io) {
2256 2255 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2257   - md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
  2256 + md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2258 2257 generic_make_request(wbio);
2259 2258 }
2260 2259 if (wbio2 && wbio2->bi_end_io) {
2261 2260 atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2262 2261 md_sync_acct(conf->mirrors[d].replacement->bdev,
2263   - wbio2->bi_size >> 9);
  2262 + bio_sectors(wbio2));
2264 2263 generic_make_request(wbio2);
2265 2264 }
2266 2265 }
... ... @@ -2690,8 +2689,7 @@
2690 2689 r10_bio = mempool_alloc(conf->r10bio_pool,
2691 2690 GFP_NOIO);
2692 2691 r10_bio->master_bio = mbio;
2693   - r10_bio->sectors = (mbio->bi_size >> 9)
2694   - - sectors_handled;
  2692 + r10_bio->sectors = bio_sectors(mbio) - sectors_handled;
2695 2693 r10_bio->state = 0;
2696 2694 set_bit(R10BIO_ReadError,
2697 2695 &r10_bio->state);
... ... @@ -90,7 +90,7 @@
90 90 */
91 91 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
92 92 {
93   - int sectors = bio->bi_size >> 9;
  93 + int sectors = bio_sectors(bio);
94 94 if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
95 95 return bio->bi_next;
96 96 else
... ... @@ -3804,7 +3804,7 @@
3804 3804 {
3805 3805 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
3806 3806 unsigned int chunk_sectors = mddev->chunk_sectors;
3807   - unsigned int bio_sectors = bio->bi_size >> 9;
  3807 + unsigned int bio_sectors = bio_sectors(bio);
3808 3808  
3809 3809 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3810 3810 chunk_sectors = mddev->new_chunk_sectors;
... ... @@ -3894,7 +3894,7 @@
3894 3894 {
3895 3895 struct request_queue *q = bdev_get_queue(bi->bi_bdev);
3896 3896  
3897   - if ((bi->bi_size>>9) > queue_max_sectors(q))
  3897 + if (bio_sectors(bi) > queue_max_sectors(q))
3898 3898 return 0;
3899 3899 blk_recount_segments(q, bi);
3900 3900 if (bi->bi_phys_segments > queue_max_segments(q))
... ... @@ -3964,7 +3964,7 @@
3964 3964 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
3965 3965  
3966 3966 if (!bio_fits_rdev(align_bi) ||
3967   - is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
  3967 + is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi),
3968 3968 &first_bad, &bad_sectors)) {
3969 3969 /* too big in some way, or has a known bad block */
3970 3970 bio_put(align_bi);
... ... @@ -5166,7 +5166,7 @@
5166 5166 }
5167 5167  
5168 5168 prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
5169   - if ((bio->bi_size >> 9) > max_sectors)
  5169 + if (bio_sectors(bio) > max_sectors)
5170 5170 return 0;
5171 5171  
5172 5172 if (!q->merge_bvec_fn)
include/trace/events/block.h
... ... @@ -244,7 +244,7 @@
244 244 __entry->dev = bio->bi_bdev ?
245 245 bio->bi_bdev->bd_dev : 0;
246 246 __entry->sector = bio->bi_sector;
247   - __entry->nr_sector = bio->bi_size >> 9;
  247 + __entry->nr_sector = bio_sectors(bio);
248 248 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
249 249 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
250 250 ),
... ... @@ -281,7 +281,7 @@
281 281 __entry->dev = bio->bi_bdev ?
282 282 bio->bi_bdev->bd_dev : 0;
283 283 __entry->sector = bio->bi_sector;
284   - __entry->nr_sector = bio->bi_size >> 9;
  284 + __entry->nr_sector = bio_sectors(bio);
285 285 __entry->error = error;
286 286 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
287 287 ),
... ... @@ -309,7 +309,7 @@
309 309 TP_fast_assign(
310 310 __entry->dev = bio->bi_bdev->bd_dev;
311 311 __entry->sector = bio->bi_sector;
312   - __entry->nr_sector = bio->bi_size >> 9;
  312 + __entry->nr_sector = bio_sectors(bio);
313 313 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
314 314 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
315 315 ),
... ... @@ -376,7 +376,7 @@
376 376 TP_fast_assign(
377 377 __entry->dev = bio->bi_bdev->bd_dev;
378 378 __entry->sector = bio->bi_sector;
379   - __entry->nr_sector = bio->bi_size >> 9;
  379 + __entry->nr_sector = bio_sectors(bio);
380 380 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
381 381 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
382 382 ),
... ... @@ -404,7 +404,7 @@
404 404 TP_fast_assign(
405 405 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
406 406 __entry->sector = bio ? bio->bi_sector : 0;
407   - __entry->nr_sector = bio ? bio->bi_size >> 9 : 0;
  407 + __entry->nr_sector = bio ? bio_sectors(bio) : 0;
408 408 blk_fill_rwbs(__entry->rwbs,
409 409 bio ? bio->bi_rw : 0, __entry->nr_sector);
410 410 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
... ... @@ -580,7 +580,7 @@
580 580 TP_fast_assign(
581 581 __entry->dev = bio->bi_bdev->bd_dev;
582 582 __entry->sector = bio->bi_sector;
583   - __entry->nr_sector = bio->bi_size >> 9;
  583 + __entry->nr_sector = bio_sectors(bio);
584 584 __entry->old_dev = dev;
585 585 __entry->old_sector = from;
586 586 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);