Commit 7988613b0e5b2638caf6cd493cc78e9595eba19c

Authored by Kent Overstreet
1 parent a4ad39b1d1

block: Convert bio_for_each_segment() to bvec_iter

More prep work for immutable biovecs - with immutable bvecs drivers
won't be able to use the biovec directly, they'll need to use helpers
that take into account bio->bi_iter.bi_bvec_done.

This updates callers for the new usage without changing the
implementation yet.

Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: "Ed L. Cashin" <ecashin@coraid.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Lars Ellenberg <drbd-dev@lists.linbit.com>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Paul Clements <Paul.Clements@steeleye.com>
Cc: Jim Paris <jim@jtan.com>
Cc: Geoff Levand <geoff@infradead.org>
Cc: Yehuda Sadeh <yehuda@inktank.com>
Cc: Sage Weil <sage@inktank.com>
Cc: Alex Elder <elder@inktank.com>
Cc: ceph-devel@vger.kernel.org
Cc: Joshua Morris <josh.h.morris@us.ibm.com>
Cc: Philip Kelleher <pjk1939@linux.vnet.ibm.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Neil Brown <neilb@suse.de>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: linux390@de.ibm.com
Cc: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com>
Cc: Sreekanth Reddy <Sreekanth.Reddy@lsi.com>
Cc: support@lsi.com
Cc: "James E.J. Bottomley" <JBottomley@parallels.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Guo Chao <yan@linux.vnet.ibm.com>
Cc: Asai Thambi S P <asamymuthupa@micron.com>
Cc: Selvan Mani <smani@micron.com>
Cc: Sam Bradshaw <sbradshaw@micron.com>
Cc: Matthew Wilcox <matthew.r.wilcox@intel.com>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Stephen Hemminger <shemminger@vyatta.com>
Cc: Quoc-Son Anh <quoc-sonx.anh@intel.com>
Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
Cc: Nitin Gupta <ngupta@vflare.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Jerome Marchand <jmarchan@redhat.com>
Cc: Seth Jennings <sjenning@linux.vnet.ibm.com>
Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
Cc: Mike Snitzer <snitzer@redhat.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: "Darrick J. Wong" <darrick.wong@oracle.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Jan Kara <jack@suse.cz>
Cc: linux-m68k@lists.linux-m68k.org
Cc: linuxppc-dev@lists.ozlabs.org
Cc: drbd-user@lists.linbit.com
Cc: nbd-general@lists.sourceforge.net
Cc: cbe-oss-dev@lists.ozlabs.org
Cc: xen-devel@lists.xensource.com
Cc: virtualization@lists.linux-foundation.org
Cc: linux-raid@vger.kernel.org
Cc: linux-s390@vger.kernel.org
Cc: DL-MPTFusionLinux@lsi.com
Cc: linux-scsi@vger.kernel.org
Cc: devel@driverdev.osuosl.org
Cc: linux-fsdevel@vger.kernel.org
Cc: cluster-devel@redhat.com
Cc: linux-mm@kvack.org
Acked-by: Geoff Levand <geoff@infradead.org>

Showing 39 changed files with 401 additions and 397 deletions Side-by-side Diff

arch/m68k/emu/nfblock.c
... ... @@ -62,17 +62,18 @@
62 62 static void nfhd_make_request(struct request_queue *queue, struct bio *bio)
63 63 {
64 64 struct nfhd_device *dev = queue->queuedata;
65   - struct bio_vec *bvec;
66   - int i, dir, len, shift;
  65 + struct bio_vec bvec;
  66 + struct bvec_iter iter;
  67 + int dir, len, shift;
67 68 sector_t sec = bio->bi_iter.bi_sector;
68 69  
69 70 dir = bio_data_dir(bio);
70 71 shift = dev->bshift;
71   - bio_for_each_segment(bvec, bio, i) {
72   - len = bvec->bv_len;
  72 + bio_for_each_segment(bvec, bio, iter) {
  73 + len = bvec.bv_len;
73 74 len >>= 9;
74 75 nfhd_read_write(dev->id, 0, dir, sec >> shift, len >> shift,
75   - bvec_to_phys(bvec));
  76 + bvec_to_phys(&bvec));
76 77 sec += len;
77 78 }
78 79 bio_endio(bio, 0);
arch/powerpc/sysdev/axonram.c
... ... @@ -109,28 +109,28 @@
109 109 struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data;
110 110 unsigned long phys_mem, phys_end;
111 111 void *user_mem;
112   - struct bio_vec *vec;
  112 + struct bio_vec vec;
113 113 unsigned int transfered;
114   - unsigned short idx;
  114 + struct bvec_iter iter;
115 115  
116 116 phys_mem = bank->io_addr + (bio->bi_iter.bi_sector <<
117 117 AXON_RAM_SECTOR_SHIFT);
118 118 phys_end = bank->io_addr + bank->size;
119 119 transfered = 0;
120   - bio_for_each_segment(vec, bio, idx) {
121   - if (unlikely(phys_mem + vec->bv_len > phys_end)) {
  120 + bio_for_each_segment(vec, bio, iter) {
  121 + if (unlikely(phys_mem + vec.bv_len > phys_end)) {
122 122 bio_io_error(bio);
123 123 return;
124 124 }
125 125  
126   - user_mem = page_address(vec->bv_page) + vec->bv_offset;
  126 + user_mem = page_address(vec.bv_page) + vec.bv_offset;
127 127 if (bio_data_dir(bio) == READ)
128   - memcpy(user_mem, (void *) phys_mem, vec->bv_len);
  128 + memcpy(user_mem, (void *) phys_mem, vec.bv_len);
129 129 else
130   - memcpy((void *) phys_mem, user_mem, vec->bv_len);
  130 + memcpy((void *) phys_mem, user_mem, vec.bv_len);
131 131  
132   - phys_mem += vec->bv_len;
133   - transfered += vec->bv_len;
  132 + phys_mem += vec.bv_len;
  133 + transfered += vec.bv_len;
134 134 }
135 135 bio_endio(bio, 0);
136 136 }
... ... @@ -2746,10 +2746,10 @@
2746 2746 void rq_flush_dcache_pages(struct request *rq)
2747 2747 {
2748 2748 struct req_iterator iter;
2749   - struct bio_vec *bvec;
  2749 + struct bio_vec bvec;
2750 2750  
2751 2751 rq_for_each_segment(bvec, rq, iter)
2752   - flush_dcache_page(bvec->bv_page);
  2752 + flush_dcache_page(bvec.bv_page);
2753 2753 }
2754 2754 EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
2755 2755 #endif
... ... @@ -12,10 +12,11 @@
12 12 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
13 13 struct bio *bio)
14 14 {
15   - struct bio_vec *bv, *bvprv = NULL;
16   - int cluster, i, high, highprv = 1;
  15 + struct bio_vec bv, bvprv = { NULL };
  16 + int cluster, high, highprv = 1;
17 17 unsigned int seg_size, nr_phys_segs;
18 18 struct bio *fbio, *bbio;
  19 + struct bvec_iter iter;
19 20  
20 21 if (!bio)
21 22 return 0;
22 23  
23 24  
24 25  
25 26  
... ... @@ -25,25 +26,23 @@
25 26 seg_size = 0;
26 27 nr_phys_segs = 0;
27 28 for_each_bio(bio) {
28   - bio_for_each_segment(bv, bio, i) {
  29 + bio_for_each_segment(bv, bio, iter) {
29 30 /*
30 31 * the trick here is making sure that a high page is
31 32 * never considered part of another segment, since that
32 33 * might change with the bounce page.
33 34 */
34   - high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
35   - if (high || highprv)
36   - goto new_segment;
37   - if (cluster) {
38   - if (seg_size + bv->bv_len
  35 + high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
  36 + if (!high && !highprv && cluster) {
  37 + if (seg_size + bv.bv_len
39 38 > queue_max_segment_size(q))
40 39 goto new_segment;
41   - if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
  40 + if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
42 41 goto new_segment;
43   - if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
  42 + if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
44 43 goto new_segment;
45 44  
46   - seg_size += bv->bv_len;
  45 + seg_size += bv.bv_len;
47 46 bvprv = bv;
48 47 continue;
49 48 }
... ... @@ -54,7 +53,7 @@
54 53  
55 54 nr_phys_segs++;
56 55 bvprv = bv;
57   - seg_size = bv->bv_len;
  56 + seg_size = bv.bv_len;
58 57 highprv = high;
59 58 }
60 59 bbio = bio;
61 60  
62 61  
63 62  
64 63  
... ... @@ -110,21 +109,21 @@
110 109 return 0;
111 110 }
112 111  
113   -static void
  112 +static inline void
114 113 __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
115   - struct scatterlist *sglist, struct bio_vec **bvprv,
  114 + struct scatterlist *sglist, struct bio_vec *bvprv,
116 115 struct scatterlist **sg, int *nsegs, int *cluster)
117 116 {
118 117  
119 118 int nbytes = bvec->bv_len;
120 119  
121   - if (*bvprv && *cluster) {
  120 + if (*sg && *cluster) {
122 121 if ((*sg)->length + nbytes > queue_max_segment_size(q))
123 122 goto new_segment;
124 123  
125   - if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
  124 + if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
126 125 goto new_segment;
127   - if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
  126 + if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
128 127 goto new_segment;
129 128  
130 129 (*sg)->length += nbytes;
... ... @@ -150,7 +149,7 @@
150 149 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
151 150 (*nsegs)++;
152 151 }
153   - *bvprv = bvec;
  152 + *bvprv = *bvec;
154 153 }
155 154  
156 155 /*
... ... @@ -160,7 +159,7 @@
160 159 int blk_rq_map_sg(struct request_queue *q, struct request *rq,
161 160 struct scatterlist *sglist)
162 161 {
163   - struct bio_vec *bvec, *bvprv;
  162 + struct bio_vec bvec, bvprv;
164 163 struct req_iterator iter;
165 164 struct scatterlist *sg;
166 165 int nsegs, cluster;
167 166  
... ... @@ -171,10 +170,9 @@
171 170 /*
172 171 * for each bio in rq
173 172 */
174   - bvprv = NULL;
175 173 sg = NULL;
176 174 rq_for_each_segment(bvec, rq, iter) {
177   - __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
  175 + __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
178 176 &nsegs, &cluster);
179 177 } /* segments in rq */
180 178  
181 179  
182 180  
183 181  
... ... @@ -223,18 +221,17 @@
223 221 int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
224 222 struct scatterlist *sglist)
225 223 {
226   - struct bio_vec *bvec, *bvprv;
  224 + struct bio_vec bvec, bvprv;
227 225 struct scatterlist *sg;
228 226 int nsegs, cluster;
229   - unsigned long i;
  227 + struct bvec_iter iter;
230 228  
231 229 nsegs = 0;
232 230 cluster = blk_queue_cluster(q);
233 231  
234   - bvprv = NULL;
235 232 sg = NULL;
236   - bio_for_each_segment(bvec, bio, i) {
237   - __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
  233 + bio_for_each_segment(bvec, bio, iter) {
  234 + __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
238 235 &nsegs, &cluster);
239 236 } /* segments in bio */
240 237  
drivers/block/aoe/aoecmd.c
... ... @@ -897,15 +897,15 @@
897 897 static void
898 898 bio_pageinc(struct bio *bio)
899 899 {
900   - struct bio_vec *bv;
  900 + struct bio_vec bv;
901 901 struct page *page;
902   - int i;
  902 + struct bvec_iter iter;
903 903  
904   - bio_for_each_segment(bv, bio, i) {
  904 + bio_for_each_segment(bv, bio, iter) {
905 905 /* Non-zero page count for non-head members of
906 906 * compound pages is no longer allowed by the kernel.
907 907 */
908   - page = compound_trans_head(bv->bv_page);
  908 + page = compound_trans_head(bv.bv_page);
909 909 atomic_inc(&page->_count);
910 910 }
911 911 }
912 912  
913 913  
... ... @@ -913,12 +913,12 @@
913 913 static void
914 914 bio_pagedec(struct bio *bio)
915 915 {
916   - struct bio_vec *bv;
917 916 struct page *page;
918   - int i;
  917 + struct bio_vec bv;
  918 + struct bvec_iter iter;
919 919  
920   - bio_for_each_segment(bv, bio, i) {
921   - page = compound_trans_head(bv->bv_page);
  920 + bio_for_each_segment(bv, bio, iter) {
  921 + page = compound_trans_head(bv.bv_page);
922 922 atomic_dec(&page->_count);
923 923 }
924 924 }
... ... @@ -328,9 +328,9 @@
328 328 struct block_device *bdev = bio->bi_bdev;
329 329 struct brd_device *brd = bdev->bd_disk->private_data;
330 330 int rw;
331   - struct bio_vec *bvec;
  331 + struct bio_vec bvec;
332 332 sector_t sector;
333   - int i;
  333 + struct bvec_iter iter;
334 334 int err = -EIO;
335 335  
336 336 sector = bio->bi_iter.bi_sector;
... ... @@ -347,10 +347,10 @@
347 347 if (rw == READA)
348 348 rw = READ;
349 349  
350   - bio_for_each_segment(bvec, bio, i) {
351   - unsigned int len = bvec->bv_len;
352   - err = brd_do_bvec(brd, bvec->bv_page, len,
353   - bvec->bv_offset, rw, sector);
  350 + bio_for_each_segment(bvec, bio, iter) {
  351 + unsigned int len = bvec.bv_len;
  352 + err = brd_do_bvec(brd, bvec.bv_page, len,
  353 + bvec.bv_offset, rw, sector);
354 354 if (err)
355 355 break;
356 356 sector += len >> SECTOR_SHIFT;
drivers/block/drbd/drbd_main.c
... ... @@ -1537,15 +1537,17 @@
1537 1537  
1538 1538 static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
1539 1539 {
1540   - struct bio_vec *bvec;
1541   - int i;
  1540 + struct bio_vec bvec;
  1541 + struct bvec_iter iter;
  1542 +
1542 1543 /* hint all but last page with MSG_MORE */
1543   - bio_for_each_segment(bvec, bio, i) {
  1544 + bio_for_each_segment(bvec, bio, iter) {
1544 1545 int err;
1545 1546  
1546   - err = _drbd_no_send_page(mdev, bvec->bv_page,
1547   - bvec->bv_offset, bvec->bv_len,
1548   - i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
  1547 + err = _drbd_no_send_page(mdev, bvec.bv_page,
  1548 + bvec.bv_offset, bvec.bv_len,
  1549 + bio_iter_last(bio, iter)
  1550 + ? 0 : MSG_MORE);
1549 1551 if (err)
1550 1552 return err;
1551 1553 }
1552 1554  
1553 1555  
... ... @@ -1554,15 +1556,16 @@
1554 1556  
1555 1557 static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
1556 1558 {
1557   - struct bio_vec *bvec;
1558   - int i;
  1559 + struct bio_vec bvec;
  1560 + struct bvec_iter iter;
  1561 +
1559 1562 /* hint all but last page with MSG_MORE */
1560   - bio_for_each_segment(bvec, bio, i) {
  1563 + bio_for_each_segment(bvec, bio, iter) {
1561 1564 int err;
1562 1565  
1563   - err = _drbd_send_page(mdev, bvec->bv_page,
1564   - bvec->bv_offset, bvec->bv_len,
1565   - i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
  1566 + err = _drbd_send_page(mdev, bvec.bv_page,
  1567 + bvec.bv_offset, bvec.bv_len,
  1568 + bio_iter_last(bio, iter) ? 0 : MSG_MORE);
1566 1569 if (err)
1567 1570 return err;
1568 1571 }
drivers/block/drbd/drbd_receiver.c
... ... @@ -1595,9 +1595,10 @@
1595 1595 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1596 1596 sector_t sector, int data_size)
1597 1597 {
1598   - struct bio_vec *bvec;
  1598 + struct bio_vec bvec;
  1599 + struct bvec_iter iter;
1599 1600 struct bio *bio;
1600   - int dgs, err, i, expect;
  1601 + int dgs, err, expect;
1601 1602 void *dig_in = mdev->tconn->int_dig_in;
1602 1603 void *dig_vv = mdev->tconn->int_dig_vv;
1603 1604  
1604 1605  
... ... @@ -1617,11 +1618,11 @@
1617 1618 bio = req->master_bio;
1618 1619 D_ASSERT(sector == bio->bi_iter.bi_sector);
1619 1620  
1620   - bio_for_each_segment(bvec, bio, i) {
1621   - void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
1622   - expect = min_t(int, data_size, bvec->bv_len);
  1621 + bio_for_each_segment(bvec, bio, iter) {
  1622 + void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
  1623 + expect = min_t(int, data_size, bvec.bv_len);
1623 1624 err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
1624   - kunmap(bvec->bv_page);
  1625 + kunmap(bvec.bv_page);
1625 1626 if (err)
1626 1627 return err;
1627 1628 data_size -= expect;
drivers/block/drbd/drbd_worker.c
... ... @@ -313,8 +313,8 @@
313 313 {
314 314 struct hash_desc desc;
315 315 struct scatterlist sg;
316   - struct bio_vec *bvec;
317   - int i;
  316 + struct bio_vec bvec;
  317 + struct bvec_iter iter;
318 318  
319 319 desc.tfm = tfm;
320 320 desc.flags = 0;
... ... @@ -322,8 +322,8 @@
322 322 sg_init_table(&sg, 1);
323 323 crypto_hash_init(&desc);
324 324  
325   - bio_for_each_segment(bvec, bio, i) {
326   - sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
  325 + bio_for_each_segment(bvec, bio, iter) {
  326 + sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
327 327 crypto_hash_update(&desc, &sg, sg.length);
328 328 }
329 329 crypto_hash_final(&desc, digest);
drivers/block/floppy.c
... ... @@ -2351,7 +2351,7 @@
2351 2351 /* Compute maximal contiguous buffer size. */
2352 2352 static int buffer_chain_size(void)
2353 2353 {
2354   - struct bio_vec *bv;
  2354 + struct bio_vec bv;
2355 2355 int size;
2356 2356 struct req_iterator iter;
2357 2357 char *base;
2358 2358  
... ... @@ -2360,10 +2360,10 @@
2360 2360 size = 0;
2361 2361  
2362 2362 rq_for_each_segment(bv, current_req, iter) {
2363   - if (page_address(bv->bv_page) + bv->bv_offset != base + size)
  2363 + if (page_address(bv.bv_page) + bv.bv_offset != base + size)
2364 2364 break;
2365 2365  
2366   - size += bv->bv_len;
  2366 + size += bv.bv_len;
2367 2367 }
2368 2368  
2369 2369 return size >> 9;
... ... @@ -2389,7 +2389,7 @@
2389 2389 static void copy_buffer(int ssize, int max_sector, int max_sector_2)
2390 2390 {
2391 2391 int remaining; /* number of transferred 512-byte sectors */
2392   - struct bio_vec *bv;
  2392 + struct bio_vec bv;
2393 2393 char *buffer;
2394 2394 char *dma_buffer;
2395 2395 int size;
2396 2396  
... ... @@ -2427,10 +2427,10 @@
2427 2427 if (!remaining)
2428 2428 break;
2429 2429  
2430   - size = bv->bv_len;
  2430 + size = bv.bv_len;
2431 2431 SUPBOUND(size, remaining);
2432 2432  
2433   - buffer = page_address(bv->bv_page) + bv->bv_offset;
  2433 + buffer = page_address(bv.bv_page) + bv.bv_offset;
2434 2434 if (dma_buffer + size >
2435 2435 floppy_track_buffer + (max_buffer_sectors << 10) ||
2436 2436 dma_buffer < floppy_track_buffer) {
drivers/block/loop.c
... ... @@ -288,9 +288,10 @@
288 288 {
289 289 int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
290 290 struct page *page);
291   - struct bio_vec *bvec;
  291 + struct bio_vec bvec;
  292 + struct bvec_iter iter;
292 293 struct page *page = NULL;
293   - int i, ret = 0;
  294 + int ret = 0;
294 295  
295 296 if (lo->transfer != transfer_none) {
296 297 page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
297 298  
... ... @@ -302,11 +303,11 @@
302 303 do_lo_send = do_lo_send_direct_write;
303 304 }
304 305  
305   - bio_for_each_segment(bvec, bio, i) {
306   - ret = do_lo_send(lo, bvec, pos, page);
  306 + bio_for_each_segment(bvec, bio, iter) {
  307 + ret = do_lo_send(lo, &bvec, pos, page);
307 308 if (ret < 0)
308 309 break;
309   - pos += bvec->bv_len;
  310 + pos += bvec.bv_len;
310 311 }
311 312 if (page) {
312 313 kunmap(page);
313 314  
314 315  
315 316  
316 317  
... ... @@ -392,20 +393,20 @@
392 393 static int
393 394 lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
394 395 {
395   - struct bio_vec *bvec;
  396 + struct bio_vec bvec;
  397 + struct bvec_iter iter;
396 398 ssize_t s;
397   - int i;
398 399  
399   - bio_for_each_segment(bvec, bio, i) {
400   - s = do_lo_receive(lo, bvec, bsize, pos);
  400 + bio_for_each_segment(bvec, bio, iter) {
  401 + s = do_lo_receive(lo, &bvec, bsize, pos);
401 402 if (s < 0)
402 403 return s;
403 404  
404   - if (s != bvec->bv_len) {
  405 + if (s != bvec.bv_len) {
405 406 zero_fill_bio(bio);
406 407 break;
407 408 }
408   - pos += bvec->bv_len;
  409 + pos += bvec.bv_len;
409 410 }
410 411 return 0;
411 412 }
drivers/block/mtip32xx/mtip32xx.c
... ... @@ -3962,8 +3962,9 @@
3962 3962 {
3963 3963 struct driver_data *dd = queue->queuedata;
3964 3964 struct scatterlist *sg;
3965   - struct bio_vec *bvec;
3966   - int i, nents = 0;
  3965 + struct bio_vec bvec;
  3966 + struct bvec_iter iter;
  3967 + int nents = 0;
3967 3968 int tag = 0, unaligned = 0;
3968 3969  
3969 3970 if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
3970 3971  
... ... @@ -4026,11 +4027,11 @@
4026 4027 }
4027 4028  
4028 4029 /* Create the scatter list for this bio. */
4029   - bio_for_each_segment(bvec, bio, i) {
  4030 + bio_for_each_segment(bvec, bio, iter) {
4030 4031 sg_set_page(&sg[nents],
4031   - bvec->bv_page,
4032   - bvec->bv_len,
4033   - bvec->bv_offset);
  4032 + bvec.bv_page,
  4033 + bvec.bv_len,
  4034 + bvec.bv_offset);
4034 4035 nents++;
4035 4036 }
4036 4037  
... ... @@ -271,7 +271,7 @@
271 271  
272 272 if (nbd_cmd(req) == NBD_CMD_WRITE) {
273 273 struct req_iterator iter;
274   - struct bio_vec *bvec;
  274 + struct bio_vec bvec;
275 275 /*
276 276 * we are really probing at internals to determine
277 277 * whether to set MSG_MORE or not...
... ... @@ -281,8 +281,8 @@
281 281 if (!rq_iter_last(req, iter))
282 282 flags = MSG_MORE;
283 283 dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
284   - nbd->disk->disk_name, req, bvec->bv_len);
285   - result = sock_send_bvec(nbd, bvec, flags);
  284 + nbd->disk->disk_name, req, bvec.bv_len);
  285 + result = sock_send_bvec(nbd, &bvec, flags);
286 286 if (result <= 0) {
287 287 dev_err(disk_to_dev(nbd->disk),
288 288 "Send data failed (result %d)\n",
289 289  
... ... @@ -378,10 +378,10 @@
378 378 nbd->disk->disk_name, req);
379 379 if (nbd_cmd(req) == NBD_CMD_READ) {
380 380 struct req_iterator iter;
381   - struct bio_vec *bvec;
  381 + struct bio_vec bvec;
382 382  
383 383 rq_for_each_segment(bvec, req, iter) {
384   - result = sock_recv_bvec(nbd, bvec);
  384 + result = sock_recv_bvec(nbd, &bvec);
385 385 if (result <= 0) {
386 386 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
387 387 result);
... ... @@ -389,7 +389,7 @@
389 389 return req;
390 390 }
391 391 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
392   - nbd->disk->disk_name, req, bvec->bv_len);
  392 + nbd->disk->disk_name, req, bvec.bv_len);
393 393 }
394 394 }
395 395 return req;
drivers/block/nvme-core.c
... ... @@ -550,9 +550,11 @@
550 550 static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
551 551 struct bio *bio, enum dma_data_direction dma_dir, int psegs)
552 552 {
553   - struct bio_vec *bvec, *bvprv = NULL;
  553 + struct bio_vec bvec, bvprv;
  554 + struct bvec_iter iter;
554 555 struct scatterlist *sg = NULL;
555   - int i, length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size;
  556 + int length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size;
  557 + int first = 1;
556 558  
557 559 if (nvmeq->dev->stripe_size)
558 560 split_len = nvmeq->dev->stripe_size -
559 561  
560 562  
561 563  
562 564  
... ... @@ -560,25 +562,28 @@
560 562 (nvmeq->dev->stripe_size - 1));
561 563  
562 564 sg_init_table(iod->sg, psegs);
563   - bio_for_each_segment(bvec, bio, i) {
564   - if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
565   - sg->length += bvec->bv_len;
  565 + bio_for_each_segment(bvec, bio, iter) {
  566 + if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) {
  567 + sg->length += bvec.bv_len;
566 568 } else {
567   - if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
568   - return nvme_split_and_submit(bio, nvmeq, i,
569   - length, 0);
  569 + if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec))
  570 + return nvme_split_and_submit(bio, nvmeq,
  571 + iter.bi_idx,
  572 + length, 0);
570 573  
571 574 sg = sg ? sg + 1 : iod->sg;
572   - sg_set_page(sg, bvec->bv_page, bvec->bv_len,
573   - bvec->bv_offset);
  575 + sg_set_page(sg, bvec.bv_page,
  576 + bvec.bv_len, bvec.bv_offset);
574 577 nsegs++;
575 578 }
576 579  
577   - if (split_len - length < bvec->bv_len)
578   - return nvme_split_and_submit(bio, nvmeq, i, split_len,
579   - split_len - length);
580   - length += bvec->bv_len;
  580 + if (split_len - length < bvec.bv_len)
  581 + return nvme_split_and_submit(bio, nvmeq, iter.bi_idx,
  582 + split_len,
  583 + split_len - length);
  584 + length += bvec.bv_len;
581 585 bvprv = bvec;
  586 + first = 0;
582 587 }
583 588 iod->nents = nsegs;
584 589 sg_mark_end(sg);
drivers/block/ps3disk.c
... ... @@ -94,7 +94,7 @@
94 94 {
95 95 unsigned int offset = 0;
96 96 struct req_iterator iter;
97   - struct bio_vec *bvec;
  97 + struct bio_vec bvec;
98 98 unsigned int i = 0;
99 99 size_t size;
100 100 void *buf;
101 101  
... ... @@ -106,14 +106,14 @@
106 106 __func__, __LINE__, i, bio_segments(iter.bio),
107 107 bio_sectors(iter.bio), iter.bio->bi_iter.bi_sector);
108 108  
109   - size = bvec->bv_len;
110   - buf = bvec_kmap_irq(bvec, &flags);
  109 + size = bvec.bv_len;
  110 + buf = bvec_kmap_irq(&bvec, &flags);
111 111 if (gather)
112 112 memcpy(dev->bounce_buf+offset, buf, size);
113 113 else
114 114 memcpy(buf, dev->bounce_buf+offset, size);
115 115 offset += size;
116   - flush_kernel_dcache_page(bvec->bv_page);
  116 + flush_kernel_dcache_page(bvec.bv_page);
117 117 bvec_kunmap_irq(buf, &flags);
118 118 i++;
119 119 }
... ... @@ -130,7 +130,7 @@
130 130  
131 131 #ifdef DEBUG
132 132 unsigned int n = 0;
133   - struct bio_vec *bv;
  133 + struct bio_vec bv;
134 134 struct req_iterator iter;
135 135  
136 136 rq_for_each_segment(bv, req, iter)
drivers/block/ps3vram.c
... ... @@ -555,14 +555,14 @@
555 555 const char *op = write ? "write" : "read";
556 556 loff_t offset = bio->bi_iter.bi_sector << 9;
557 557 int error = 0;
558   - struct bio_vec *bvec;
559   - unsigned int i;
  558 + struct bio_vec bvec;
  559 + struct bvec_iter iter;
560 560 struct bio *next;
561 561  
562   - bio_for_each_segment(bvec, bio, i) {
  562 + bio_for_each_segment(bvec, bio, iter) {
563 563 /* PS3 is ppc64, so we don't handle highmem */
564   - char *ptr = page_address(bvec->bv_page) + bvec->bv_offset;
565   - size_t len = bvec->bv_len, retlen;
  564 + char *ptr = page_address(bvec.bv_page) + bvec.bv_offset;
  565 + size_t len = bvec.bv_len, retlen;
566 566  
567 567 dev_dbg(&dev->core, " %s %zu bytes at offset %llu\n", op,
568 568 len, offset);
... ... @@ -1109,23 +1109,23 @@
1109 1109 */
1110 1110 static void zero_bio_chain(struct bio *chain, int start_ofs)
1111 1111 {
1112   - struct bio_vec *bv;
  1112 + struct bio_vec bv;
  1113 + struct bvec_iter iter;
1113 1114 unsigned long flags;
1114 1115 void *buf;
1115   - int i;
1116 1116 int pos = 0;
1117 1117  
1118 1118 while (chain) {
1119   - bio_for_each_segment(bv, chain, i) {
1120   - if (pos + bv->bv_len > start_ofs) {
  1119 + bio_for_each_segment(bv, chain, iter) {
  1120 + if (pos + bv.bv_len > start_ofs) {
1121 1121 int remainder = max(start_ofs - pos, 0);
1122   - buf = bvec_kmap_irq(bv, &flags);
  1122 + buf = bvec_kmap_irq(&bv, &flags);
1123 1123 memset(buf + remainder, 0,
1124   - bv->bv_len - remainder);
1125   - flush_dcache_page(bv->bv_page);
  1124 + bv.bv_len - remainder);
  1125 + flush_dcache_page(bv.bv_page);
1126 1126 bvec_kunmap_irq(buf, &flags);
1127 1127 }
1128   - pos += bv->bv_len;
  1128 + pos += bv.bv_len;
1129 1129 }
1130 1130  
1131 1131 chain = chain->bi_next;
1132 1132  
1133 1133  
... ... @@ -1173,11 +1173,11 @@
1173 1173 unsigned int len,
1174 1174 gfp_t gfpmask)
1175 1175 {
1176   - struct bio_vec *bv;
  1176 + struct bio_vec bv;
  1177 + struct bvec_iter iter;
  1178 + struct bvec_iter end_iter;
1177 1179 unsigned int resid;
1178   - unsigned short idx;
1179 1180 unsigned int voff;
1180   - unsigned short end_idx;
1181 1181 unsigned short vcnt;
1182 1182 struct bio *bio;
1183 1183  
1184 1184  
1185 1185  
1186 1186  
1187 1187  
... ... @@ -1196,22 +1196,22 @@
1196 1196 /* Find first affected segment... */
1197 1197  
1198 1198 resid = offset;
1199   - bio_for_each_segment(bv, bio_src, idx) {
1200   - if (resid < bv->bv_len)
  1199 + bio_for_each_segment(bv, bio_src, iter) {
  1200 + if (resid < bv.bv_len)
1201 1201 break;
1202   - resid -= bv->bv_len;
  1202 + resid -= bv.bv_len;
1203 1203 }
1204 1204 voff = resid;
1205 1205  
1206 1206 /* ...and the last affected segment */
1207 1207  
1208 1208 resid += len;
1209   - __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1210   - if (resid <= bv->bv_len)
  1209 + __bio_for_each_segment(bv, bio_src, end_iter, iter) {
  1210 + if (resid <= bv.bv_len)
1211 1211 break;
1212   - resid -= bv->bv_len;
  1212 + resid -= bv.bv_len;
1213 1213 }
1214   - vcnt = end_idx - idx + 1;
  1214 + vcnt = end_iter.bi_idx = iter.bi_idx + 1;
1215 1215  
1216 1216 /* Build the clone */
1217 1217  
... ... @@ -1229,7 +1229,7 @@
1229 1229 * Copy over our part of the bio_vec, then update the first
1230 1230 * and last (or only) entries.
1231 1231 */
1232   - memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
  1232 + memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[iter.bi_idx],
1233 1233 vcnt * sizeof (struct bio_vec));
1234 1234 bio->bi_io_vec[0].bv_offset += voff;
1235 1235 if (vcnt > 1) {
drivers/block/rsxx/dma.c
... ... @@ -684,7 +684,8 @@
684 684 void *cb_data)
685 685 {
686 686 struct list_head dma_list[RSXX_MAX_TARGETS];
687   - struct bio_vec *bvec;
  687 + struct bio_vec bvec;
  688 + struct bvec_iter iter;
688 689 unsigned long long addr8;
689 690 unsigned int laddr;
690 691 unsigned int bv_len;
... ... @@ -722,9 +723,9 @@
722 723 bv_len -= RSXX_HW_BLK_SIZE;
723 724 }
724 725 } else {
725   - bio_for_each_segment(bvec, bio, i) {
726   - bv_len = bvec->bv_len;
727   - bv_off = bvec->bv_offset;
  726 + bio_for_each_segment(bvec, bio, iter) {
  727 + bv_len = bvec.bv_len;
  728 + bv_off = bvec.bv_offset;
728 729  
729 730 while (bv_len > 0) {
730 731 tgt = rsxx_get_dma_tgt(card, addr8);
... ... @@ -736,7 +737,7 @@
736 737 st = rsxx_queue_dma(card, &dma_list[tgt],
737 738 bio_data_dir(bio),
738 739 dma_off, dma_len,
739   - laddr, bvec->bv_page,
  740 + laddr, bvec.bv_page,
740 741 bv_off, cb, cb_data);
741 742 if (st)
742 743 goto bvec_err;
drivers/md/bcache/btree.c
... ... @@ -362,7 +362,7 @@
362 362 struct bio_vec *bv;
363 363 int n;
364 364  
365   - __bio_for_each_segment(bv, b->bio, n, 0)
  365 + bio_for_each_segment_all(bv, b->bio, n)
366 366 __free_page(bv->bv_page);
367 367  
368 368 __btree_node_write_done(cl);
... ... @@ -421,7 +421,7 @@
421 421 struct bio_vec *bv;
422 422 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
423 423  
424   - bio_for_each_segment(bv, b->bio, j)
  424 + bio_for_each_segment_all(bv, b->bio, j)
425 425 memcpy(page_address(bv->bv_page),
426 426 base + j * PAGE_SIZE, PAGE_SIZE);
427 427  
drivers/md/bcache/debug.c
... ... @@ -173,7 +173,8 @@
173 173 {
174 174 char name[BDEVNAME_SIZE];
175 175 struct bio *check;
176   - struct bio_vec *bv;
  176 + struct bio_vec bv, *bv2;
  177 + struct bvec_iter iter;
177 178 int i;
178 179  
179 180 check = bio_clone(bio, GFP_NOIO);
180 181  
... ... @@ -185,13 +186,13 @@
185 186  
186 187 submit_bio_wait(READ_SYNC, check);
187 188  
188   - bio_for_each_segment(bv, bio, i) {
189   - void *p1 = kmap_atomic(bv->bv_page);
190   - void *p2 = page_address(check->bi_io_vec[i].bv_page);
  189 + bio_for_each_segment(bv, bio, iter) {
  190 + void *p1 = kmap_atomic(bv.bv_page);
  191 + void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page);
191 192  
192   - cache_set_err_on(memcmp(p1 + bv->bv_offset,
193   - p2 + bv->bv_offset,
194   - bv->bv_len),
  193 + cache_set_err_on(memcmp(p1 + bv.bv_offset,
  194 + p2 + bv.bv_offset,
  195 + bv.bv_len),
195 196 dc->disk.c,
196 197 "verify failed at dev %s sector %llu",
197 198 bdevname(dc->bdev, name),
... ... @@ -200,8 +201,8 @@
200 201 kunmap_atomic(p1);
201 202 }
202 203  
203   - bio_for_each_segment_all(bv, check, i)
204   - __free_page(bv->bv_page);
  204 + bio_for_each_segment_all(bv2, check, i)
  205 + __free_page(bv2->bv_page);
205 206 out_put:
206 207 bio_put(check);
207 208 }
drivers/md/bcache/io.c
... ... @@ -22,12 +22,12 @@
22 22 static void bch_generic_make_request_hack(struct bio *bio)
23 23 {
24 24 if (bio->bi_iter.bi_idx) {
25   - int i;
26   - struct bio_vec *bv;
  25 + struct bio_vec bv;
  26 + struct bvec_iter iter;
27 27 struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio));
28 28  
29   - bio_for_each_segment(bv, bio, i)
30   - clone->bi_io_vec[clone->bi_vcnt++] = *bv;
  29 + bio_for_each_segment(bv, bio, iter)
  30 + clone->bi_io_vec[clone->bi_vcnt++] = bv;
31 31  
32 32 clone->bi_iter.bi_sector = bio->bi_iter.bi_sector;
33 33 clone->bi_bdev = bio->bi_bdev;
... ... @@ -73,8 +73,9 @@
73 73 struct bio *bch_bio_split(struct bio *bio, int sectors,
74 74 gfp_t gfp, struct bio_set *bs)
75 75 {
76   - unsigned idx = bio->bi_iter.bi_idx, vcnt = 0, nbytes = sectors << 9;
77   - struct bio_vec *bv;
  76 + unsigned vcnt = 0, nbytes = sectors << 9;
  77 + struct bio_vec bv;
  78 + struct bvec_iter iter;
78 79 struct bio *ret = NULL;
79 80  
80 81 BUG_ON(sectors <= 0);
81 82  
82 83  
83 84  
84 85  
85 86  
86 87  
87 88  
88 89  
89 90  
90 91  
... ... @@ -86,50 +87,36 @@
86 87 ret = bio_alloc_bioset(gfp, 1, bs);
87 88 if (!ret)
88 89 return NULL;
89   - idx = 0;
90 90 goto out;
91 91 }
92 92  
93   - bio_for_each_segment(bv, bio, idx) {
94   - vcnt = idx - bio->bi_iter.bi_idx;
  93 + bio_for_each_segment(bv, bio, iter) {
  94 + vcnt++;
95 95  
96   - if (!nbytes) {
97   - ret = bio_alloc_bioset(gfp, vcnt, bs);
98   - if (!ret)
99   - return NULL;
  96 + if (nbytes <= bv.bv_len)
  97 + break;
100 98  
101   - memcpy(ret->bi_io_vec, __bio_iovec(bio),
102   - sizeof(struct bio_vec) * vcnt);
  99 + nbytes -= bv.bv_len;
  100 + }
103 101  
104   - break;
105   - } else if (nbytes < bv->bv_len) {
106   - ret = bio_alloc_bioset(gfp, ++vcnt, bs);
107   - if (!ret)
108   - return NULL;
  102 + ret = bio_alloc_bioset(gfp, vcnt, bs);
  103 + if (!ret)
  104 + return NULL;
109 105  
110   - memcpy(ret->bi_io_vec, __bio_iovec(bio),
111   - sizeof(struct bio_vec) * vcnt);
  106 + bio_for_each_segment(bv, bio, iter) {
  107 + ret->bi_io_vec[ret->bi_vcnt++] = bv;
112 108  
113   - ret->bi_io_vec[vcnt - 1].bv_len = nbytes;
114   - bv->bv_offset += nbytes;
115   - bv->bv_len -= nbytes;
  109 + if (ret->bi_vcnt == vcnt)
116 110 break;
117   - }
118   -
119   - nbytes -= bv->bv_len;
120 111 }
  112 +
  113 + ret->bi_io_vec[ret->bi_vcnt - 1].bv_len = nbytes;
121 114 out:
122 115 ret->bi_bdev = bio->bi_bdev;
123 116 ret->bi_iter.bi_sector = bio->bi_iter.bi_sector;
124 117 ret->bi_iter.bi_size = sectors << 9;
125 118 ret->bi_rw = bio->bi_rw;
126   - ret->bi_vcnt = vcnt;
127   - ret->bi_max_vecs = vcnt;
128 119  
129   - bio->bi_iter.bi_sector += sectors;
130   - bio->bi_iter.bi_size -= sectors << 9;
131   - bio->bi_iter.bi_idx = idx;
132   -
133 120 if (bio_integrity(bio)) {
134 121 if (bio_integrity_clone(ret, bio, gfp)) {
135 122 bio_put(ret);
136 123  
... ... @@ -137,9 +124,10 @@
137 124 }
138 125  
139 126 bio_integrity_trim(ret, 0, bio_sectors(ret));
140   - bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio));
141 127 }
142 128  
  129 + bio_advance(bio, ret->bi_iter.bi_size);
  130 +
143 131 return ret;
144 132 }
145 133  
146 134  
... ... @@ -155,12 +143,13 @@
155 143  
156 144 if (bio_segments(bio) > max_segments ||
157 145 q->merge_bvec_fn) {
158   - struct bio_vec *bv;
159   - int i, seg = 0;
  146 + struct bio_vec bv;
  147 + struct bvec_iter iter;
  148 + unsigned seg = 0;
160 149  
161 150 ret = 0;
162 151  
163   - bio_for_each_segment(bv, bio, i) {
  152 + bio_for_each_segment(bv, bio, iter) {
164 153 struct bvec_merge_data bvm = {
165 154 .bi_bdev = bio->bi_bdev,
166 155 .bi_sector = bio->bi_iter.bi_sector,
167 156  
... ... @@ -172,11 +161,11 @@
172 161 break;
173 162  
174 163 if (q->merge_bvec_fn &&
175   - q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len)
  164 + q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
176 165 break;
177 166  
178 167 seg++;
179   - ret += bv->bv_len >> 9;
  168 + ret += bv.bv_len >> 9;
180 169 }
181 170 }
182 171  
drivers/md/bcache/request.c
... ... @@ -198,14 +198,14 @@
198 198  
199 199 static void bio_csum(struct bio *bio, struct bkey *k)
200 200 {
201   - struct bio_vec *bv;
  201 + struct bio_vec bv;
  202 + struct bvec_iter iter;
202 203 uint64_t csum = 0;
203   - int i;
204 204  
205   - bio_for_each_segment(bv, bio, i) {
206   - void *d = kmap(bv->bv_page) + bv->bv_offset;
207   - csum = bch_crc64_update(csum, d, bv->bv_len);
208   - kunmap(bv->bv_page);
  205 + bio_for_each_segment(bv, bio, iter) {
  206 + void *d = kmap(bv.bv_page) + bv.bv_offset;
  207 + csum = bch_crc64_update(csum, d, bv.bv_len);
  208 + kunmap(bv.bv_page);
209 209 }
210 210  
211 211 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
212 212  
213 213  
... ... @@ -1182,17 +1182,17 @@
1182 1182 static int flash_dev_cache_miss(struct btree *b, struct search *s,
1183 1183 struct bio *bio, unsigned sectors)
1184 1184 {
1185   - struct bio_vec *bv;
1186   - int i;
  1185 + struct bio_vec bv;
  1186 + struct bvec_iter iter;
1187 1187  
1188 1188 /* Zero fill bio */
1189 1189  
1190   - bio_for_each_segment(bv, bio, i) {
1191   - unsigned j = min(bv->bv_len >> 9, sectors);
  1190 + bio_for_each_segment(bv, bio, iter) {
  1191 + unsigned j = min(bv.bv_len >> 9, sectors);
1192 1192  
1193   - void *p = kmap(bv->bv_page);
1194   - memset(p + bv->bv_offset, 0, j << 9);
1195   - kunmap(bv->bv_page);
  1193 + void *p = kmap(bv.bv_page);
  1194 + memset(p + bv.bv_offset, 0, j << 9);
  1195 + kunmap(bv.bv_page);
1196 1196  
1197 1197 sectors -= j;
1198 1198 }
... ... @@ -937,9 +937,9 @@
937 937 async_copy_data(int frombio, struct bio *bio, struct page *page,
938 938 sector_t sector, struct dma_async_tx_descriptor *tx)
939 939 {
940   - struct bio_vec *bvl;
  940 + struct bio_vec bvl;
  941 + struct bvec_iter iter;
941 942 struct page *bio_page;
942   - int i;
943 943 int page_offset;
944 944 struct async_submit_ctl submit;
945 945 enum async_tx_flags flags = 0;
... ... @@ -953,8 +953,8 @@
953 953 flags |= ASYNC_TX_FENCE;
954 954 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
955 955  
956   - bio_for_each_segment(bvl, bio, i) {
957   - int len = bvl->bv_len;
  956 + bio_for_each_segment(bvl, bio, iter) {
  957 + int len = bvl.bv_len;
958 958 int clen;
959 959 int b_offset = 0;
960 960  
... ... @@ -970,8 +970,8 @@
970 970 clen = len;
971 971  
972 972 if (clen > 0) {
973   - b_offset += bvl->bv_offset;
974   - bio_page = bvl->bv_page;
  973 + b_offset += bvl.bv_offset;
  974 + bio_page = bvl.bv_page;
975 975 if (frombio)
976 976 tx = async_memcpy(page, bio_page, page_offset,
977 977 b_offset, clen, &submit);
drivers/s390/block/dasd_diag.c
... ... @@ -504,7 +504,7 @@
504 504 struct dasd_diag_req *dreq;
505 505 struct dasd_diag_bio *dbio;
506 506 struct req_iterator iter;
507   - struct bio_vec *bv;
  507 + struct bio_vec bv;
508 508 char *dst;
509 509 unsigned int count, datasize;
510 510 sector_t recid, first_rec, last_rec;
511 511  
... ... @@ -525,10 +525,10 @@
525 525 /* Check struct bio and count the number of blocks for the request. */
526 526 count = 0;
527 527 rq_for_each_segment(bv, req, iter) {
528   - if (bv->bv_len & (blksize - 1))
  528 + if (bv.bv_len & (blksize - 1))
529 529 /* Fba can only do full blocks. */
530 530 return ERR_PTR(-EINVAL);
531   - count += bv->bv_len >> (block->s2b_shift + 9);
  531 + count += bv.bv_len >> (block->s2b_shift + 9);
532 532 }
533 533 /* Paranoia. */
534 534 if (count != last_rec - first_rec + 1)
... ... @@ -545,8 +545,8 @@
545 545 dbio = dreq->bio;
546 546 recid = first_rec;
547 547 rq_for_each_segment(bv, req, iter) {
548   - dst = page_address(bv->bv_page) + bv->bv_offset;
549   - for (off = 0; off < bv->bv_len; off += blksize) {
  548 + dst = page_address(bv.bv_page) + bv.bv_offset;
  549 + for (off = 0; off < bv.bv_len; off += blksize) {
550 550 memset(dbio, 0, sizeof (struct dasd_diag_bio));
551 551 dbio->type = rw_cmd;
552 552 dbio->block_number = recid + 1;
drivers/s390/block/dasd_eckd.c
... ... @@ -2551,7 +2551,7 @@
2551 2551 struct dasd_ccw_req *cqr;
2552 2552 struct ccw1 *ccw;
2553 2553 struct req_iterator iter;
2554   - struct bio_vec *bv;
  2554 + struct bio_vec bv;
2555 2555 char *dst;
2556 2556 unsigned int off;
2557 2557 int count, cidaw, cplength, datasize;
2558 2558  
2559 2559  
... ... @@ -2573,13 +2573,13 @@
2573 2573 count = 0;
2574 2574 cidaw = 0;
2575 2575 rq_for_each_segment(bv, req, iter) {
2576   - if (bv->bv_len & (blksize - 1))
  2576 + if (bv.bv_len & (blksize - 1))
2577 2577 /* Eckd can only do full blocks. */
2578 2578 return ERR_PTR(-EINVAL);
2579   - count += bv->bv_len >> (block->s2b_shift + 9);
  2579 + count += bv.bv_len >> (block->s2b_shift + 9);
2580 2580 #if defined(CONFIG_64BIT)
2581   - if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
2582   - cidaw += bv->bv_len >> (block->s2b_shift + 9);
  2581 + if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
  2582 + cidaw += bv.bv_len >> (block->s2b_shift + 9);
2583 2583 #endif
2584 2584 }
2585 2585 /* Paranoia. */
2586 2586  
2587 2587  
2588 2588  
... ... @@ -2650,16 +2650,16 @@
2650 2650 last_rec - recid + 1, cmd, basedev, blksize);
2651 2651 }
2652 2652 rq_for_each_segment(bv, req, iter) {
2653   - dst = page_address(bv->bv_page) + bv->bv_offset;
  2653 + dst = page_address(bv.bv_page) + bv.bv_offset;
2654 2654 if (dasd_page_cache) {
2655 2655 char *copy = kmem_cache_alloc(dasd_page_cache,
2656 2656 GFP_DMA | __GFP_NOWARN);
2657 2657 if (copy && rq_data_dir(req) == WRITE)
2658   - memcpy(copy + bv->bv_offset, dst, bv->bv_len);
  2658 + memcpy(copy + bv.bv_offset, dst, bv.bv_len);
2659 2659 if (copy)
2660   - dst = copy + bv->bv_offset;
  2660 + dst = copy + bv.bv_offset;
2661 2661 }
2662   - for (off = 0; off < bv->bv_len; off += blksize) {
  2662 + for (off = 0; off < bv.bv_len; off += blksize) {
2663 2663 sector_t trkid = recid;
2664 2664 unsigned int recoffs = sector_div(trkid, blk_per_trk);
2665 2665 rcmd = cmd;
... ... @@ -2735,7 +2735,7 @@
2735 2735 struct dasd_ccw_req *cqr;
2736 2736 struct ccw1 *ccw;
2737 2737 struct req_iterator iter;
2738   - struct bio_vec *bv;
  2738 + struct bio_vec bv;
2739 2739 char *dst, *idaw_dst;
2740 2740 unsigned int cidaw, cplength, datasize;
2741 2741 unsigned int tlf;
... ... @@ -2813,8 +2813,8 @@
2813 2813 idaw_dst = NULL;
2814 2814 idaw_len = 0;
2815 2815 rq_for_each_segment(bv, req, iter) {
2816   - dst = page_address(bv->bv_page) + bv->bv_offset;
2817   - seg_len = bv->bv_len;
  2816 + dst = page_address(bv.bv_page) + bv.bv_offset;
  2817 + seg_len = bv.bv_len;
2818 2818 while (seg_len) {
2819 2819 if (new_track) {
2820 2820 trkid = recid;
... ... @@ -3039,7 +3039,7 @@
3039 3039 {
3040 3040 struct dasd_ccw_req *cqr;
3041 3041 struct req_iterator iter;
3042   - struct bio_vec *bv;
  3042 + struct bio_vec bv;
3043 3043 char *dst;
3044 3044 unsigned int trkcount, ctidaw;
3045 3045 unsigned char cmd;
... ... @@ -3125,8 +3125,8 @@
3125 3125 new_track = 1;
3126 3126 recid = first_rec;
3127 3127 rq_for_each_segment(bv, req, iter) {
3128   - dst = page_address(bv->bv_page) + bv->bv_offset;
3129   - seg_len = bv->bv_len;
  3128 + dst = page_address(bv.bv_page) + bv.bv_offset;
  3129 + seg_len = bv.bv_len;
3130 3130 while (seg_len) {
3131 3131 if (new_track) {
3132 3132 trkid = recid;
3133 3133  
... ... @@ -3158,9 +3158,9 @@
3158 3158 }
3159 3159 } else {
3160 3160 rq_for_each_segment(bv, req, iter) {
3161   - dst = page_address(bv->bv_page) + bv->bv_offset;
  3161 + dst = page_address(bv.bv_page) + bv.bv_offset;
3162 3162 last_tidaw = itcw_add_tidaw(itcw, 0x00,
3163   - dst, bv->bv_len);
  3163 + dst, bv.bv_len);
3164 3164 if (IS_ERR(last_tidaw)) {
3165 3165 ret = -EINVAL;
3166 3166 goto out_error;
... ... @@ -3276,7 +3276,7 @@
3276 3276 struct dasd_ccw_req *cqr;
3277 3277 struct ccw1 *ccw;
3278 3278 struct req_iterator iter;
3279   - struct bio_vec *bv;
  3279 + struct bio_vec bv;
3280 3280 char *dst;
3281 3281 unsigned char cmd;
3282 3282 unsigned int trkcount;
... ... @@ -3376,8 +3376,8 @@
3376 3376 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
3377 3377 }
3378 3378 rq_for_each_segment(bv, req, iter) {
3379   - dst = page_address(bv->bv_page) + bv->bv_offset;
3380   - seg_len = bv->bv_len;
  3379 + dst = page_address(bv.bv_page) + bv.bv_offset;
  3380 + seg_len = bv.bv_len;
3381 3381 if (cmd == DASD_ECKD_CCW_READ_TRACK)
3382 3382 memset(dst, 0, seg_len);
3383 3383 if (!len_to_track_end) {
... ... @@ -3422,7 +3422,7 @@
3422 3422 struct dasd_eckd_private *private;
3423 3423 struct ccw1 *ccw;
3424 3424 struct req_iterator iter;
3425   - struct bio_vec *bv;
  3425 + struct bio_vec bv;
3426 3426 char *dst, *cda;
3427 3427 unsigned int blksize, blk_per_trk, off;
3428 3428 sector_t recid;
... ... @@ -3440,8 +3440,8 @@
3440 3440 if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
3441 3441 ccw++;
3442 3442 rq_for_each_segment(bv, req, iter) {
3443   - dst = page_address(bv->bv_page) + bv->bv_offset;
3444   - for (off = 0; off < bv->bv_len; off += blksize) {
  3443 + dst = page_address(bv.bv_page) + bv.bv_offset;
  3444 + for (off = 0; off < bv.bv_len; off += blksize) {
3445 3445 /* Skip locate record. */
3446 3446 if (private->uses_cdl && recid <= 2*blk_per_trk)
3447 3447 ccw++;
... ... @@ -3452,7 +3452,7 @@
3452 3452 cda = (char *)((addr_t) ccw->cda);
3453 3453 if (dst != cda) {
3454 3454 if (rq_data_dir(req) == READ)
3455   - memcpy(dst, cda, bv->bv_len);
  3455 + memcpy(dst, cda, bv.bv_len);
3456 3456 kmem_cache_free(dasd_page_cache,
3457 3457 (void *)((addr_t)cda & PAGE_MASK));
3458 3458 }
drivers/s390/block/dasd_fba.c
... ... @@ -260,7 +260,7 @@
260 260 struct dasd_ccw_req *cqr;
261 261 struct ccw1 *ccw;
262 262 struct req_iterator iter;
263   - struct bio_vec *bv;
  263 + struct bio_vec bv;
264 264 char *dst;
265 265 int count, cidaw, cplength, datasize;
266 266 sector_t recid, first_rec, last_rec;
267 267  
268 268  
... ... @@ -283,13 +283,13 @@
283 283 count = 0;
284 284 cidaw = 0;
285 285 rq_for_each_segment(bv, req, iter) {
286   - if (bv->bv_len & (blksize - 1))
  286 + if (bv.bv_len & (blksize - 1))
287 287 /* Fba can only do full blocks. */
288 288 return ERR_PTR(-EINVAL);
289   - count += bv->bv_len >> (block->s2b_shift + 9);
  289 + count += bv.bv_len >> (block->s2b_shift + 9);
290 290 #if defined(CONFIG_64BIT)
291   - if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
292   - cidaw += bv->bv_len / blksize;
  291 + if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
  292 + cidaw += bv.bv_len / blksize;
293 293 #endif
294 294 }
295 295 /* Paranoia. */
296 296  
297 297  
298 298  
... ... @@ -326,16 +326,16 @@
326 326 }
327 327 recid = first_rec;
328 328 rq_for_each_segment(bv, req, iter) {
329   - dst = page_address(bv->bv_page) + bv->bv_offset;
  329 + dst = page_address(bv.bv_page) + bv.bv_offset;
330 330 if (dasd_page_cache) {
331 331 char *copy = kmem_cache_alloc(dasd_page_cache,
332 332 GFP_DMA | __GFP_NOWARN);
333 333 if (copy && rq_data_dir(req) == WRITE)
334   - memcpy(copy + bv->bv_offset, dst, bv->bv_len);
  334 + memcpy(copy + bv.bv_offset, dst, bv.bv_len);
335 335 if (copy)
336   - dst = copy + bv->bv_offset;
  336 + dst = copy + bv.bv_offset;
337 337 }
338   - for (off = 0; off < bv->bv_len; off += blksize) {
  338 + for (off = 0; off < bv.bv_len; off += blksize) {
339 339 /* Locate record for stupid devices. */
340 340 if (private->rdc_data.mode.bits.data_chain == 0) {
341 341 ccw[-1].flags |= CCW_FLAG_CC;
... ... @@ -384,7 +384,7 @@
384 384 struct dasd_fba_private *private;
385 385 struct ccw1 *ccw;
386 386 struct req_iterator iter;
387   - struct bio_vec *bv;
  387 + struct bio_vec bv;
388 388 char *dst, *cda;
389 389 unsigned int blksize, off;
390 390 int status;
... ... @@ -399,8 +399,8 @@
399 399 if (private->rdc_data.mode.bits.data_chain != 0)
400 400 ccw++;
401 401 rq_for_each_segment(bv, req, iter) {
402   - dst = page_address(bv->bv_page) + bv->bv_offset;
403   - for (off = 0; off < bv->bv_len; off += blksize) {
  402 + dst = page_address(bv.bv_page) + bv.bv_offset;
  403 + for (off = 0; off < bv.bv_len; off += blksize) {
404 404 /* Skip locate record. */
405 405 if (private->rdc_data.mode.bits.data_chain == 0)
406 406 ccw++;
... ... @@ -411,7 +411,7 @@
411 411 cda = (char *)((addr_t) ccw->cda);
412 412 if (dst != cda) {
413 413 if (rq_data_dir(req) == READ)
414   - memcpy(dst, cda, bv->bv_len);
  414 + memcpy(dst, cda, bv.bv_len);
415 415 kmem_cache_free(dasd_page_cache,
416 416 (void *)((addr_t)cda & PAGE_MASK));
417 417 }
drivers/s390/block/dcssblk.c
... ... @@ -808,12 +808,12 @@
808 808 dcssblk_make_request(struct request_queue *q, struct bio *bio)
809 809 {
810 810 struct dcssblk_dev_info *dev_info;
811   - struct bio_vec *bvec;
  811 + struct bio_vec bvec;
  812 + struct bvec_iter iter;
812 813 unsigned long index;
813 814 unsigned long page_addr;
814 815 unsigned long source_addr;
815 816 unsigned long bytes_done;
816   - int i;
817 817  
818 818 bytes_done = 0;
819 819 dev_info = bio->bi_bdev->bd_disk->private_data;
820 820  
821 821  
822 822  
823 823  
824 824  
... ... @@ -844,21 +844,21 @@
844 844 }
845 845  
846 846 index = (bio->bi_iter.bi_sector >> 3);
847   - bio_for_each_segment(bvec, bio, i) {
  847 + bio_for_each_segment(bvec, bio, iter) {
848 848 page_addr = (unsigned long)
849   - page_address(bvec->bv_page) + bvec->bv_offset;
  849 + page_address(bvec.bv_page) + bvec.bv_offset;
850 850 source_addr = dev_info->start + (index<<12) + bytes_done;
851   - if (unlikely((page_addr & 4095) != 0) || (bvec->bv_len & 4095) != 0)
  851 + if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0)
852 852 // More paranoia.
853 853 goto fail;
854 854 if (bio_data_dir(bio) == READ) {
855 855 memcpy((void*)page_addr, (void*)source_addr,
856   - bvec->bv_len);
  856 + bvec.bv_len);
857 857 } else {
858 858 memcpy((void*)source_addr, (void*)page_addr,
859   - bvec->bv_len);
  859 + bvec.bv_len);
860 860 }
861   - bytes_done += bvec->bv_len;
  861 + bytes_done += bvec.bv_len;
862 862 }
863 863 bio_endio(bio, 0);
864 864 return;
drivers/s390/block/scm_blk.c
... ... @@ -130,7 +130,7 @@
130 130 struct aidaw *aidaw = scmrq->aidaw;
131 131 struct msb *msb = &scmrq->aob->msb[0];
132 132 struct req_iterator iter;
133   - struct bio_vec *bv;
  133 + struct bio_vec bv;
134 134  
135 135 msb->bs = MSB_BS_4K;
136 136 scmrq->aob->request.msb_count = 1;
... ... @@ -142,9 +142,9 @@
142 142 msb->data_addr = (u64) aidaw;
143 143  
144 144 rq_for_each_segment(bv, scmrq->request, iter) {
145   - WARN_ON(bv->bv_offset);
146   - msb->blk_count += bv->bv_len >> 12;
147   - aidaw->data_addr = (u64) page_address(bv->bv_page);
  145 + WARN_ON(bv.bv_offset);
  146 + msb->blk_count += bv.bv_len >> 12;
  147 + aidaw->data_addr = (u64) page_address(bv.bv_page);
148 148 aidaw++;
149 149 }
150 150 }
drivers/s390/block/scm_blk_cluster.c
... ... @@ -122,7 +122,7 @@
122 122 struct aidaw *aidaw = scmrq->aidaw;
123 123 struct msb *msb = &scmrq->aob->msb[0];
124 124 struct req_iterator iter;
125   - struct bio_vec *bv;
  125 + struct bio_vec bv;
126 126 int i = 0;
127 127 u64 addr;
128 128  
... ... @@ -163,7 +163,7 @@
163 163 i++;
164 164 }
165 165 rq_for_each_segment(bv, req, iter) {
166   - aidaw->data_addr = (u64) page_address(bv->bv_page);
  166 + aidaw->data_addr = (u64) page_address(bv.bv_page);
167 167 aidaw++;
168 168 i++;
169 169 }
drivers/s390/block/xpram.c
... ... @@ -184,11 +184,11 @@
184 184 static void xpram_make_request(struct request_queue *q, struct bio *bio)
185 185 {
186 186 xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
187   - struct bio_vec *bvec;
  187 + struct bio_vec bvec;
  188 + struct bvec_iter iter;
188 189 unsigned int index;
189 190 unsigned long page_addr;
190 191 unsigned long bytes;
191   - int i;
192 192  
193 193 if ((bio->bi_iter.bi_sector & 7) != 0 ||
194 194 (bio->bi_iter.bi_size & 4095) != 0)
195 195  
... ... @@ -200,10 +200,10 @@
200 200 if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset)
201 201 goto fail;
202 202 index = (bio->bi_iter.bi_sector >> 3) + xdev->offset;
203   - bio_for_each_segment(bvec, bio, i) {
  203 + bio_for_each_segment(bvec, bio, iter) {
204 204 page_addr = (unsigned long)
205   - kmap(bvec->bv_page) + bvec->bv_offset;
206   - bytes = bvec->bv_len;
  205 + kmap(bvec.bv_page) + bvec.bv_offset;
  206 + bytes = bvec.bv_len;
207 207 if ((page_addr & 4095) != 0 || (bytes & 4095) != 0)
208 208 /* More paranoia. */
209 209 goto fail;
drivers/scsi/mpt2sas/mpt2sas_transport.c
... ... @@ -1901,7 +1901,7 @@
1901 1901 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
1902 1902 Mpi2SmpPassthroughRequest_t *mpi_request;
1903 1903 Mpi2SmpPassthroughReply_t *mpi_reply;
1904   - int rc, i;
  1904 + int rc;
1905 1905 u16 smid;
1906 1906 u32 ioc_state;
1907 1907 unsigned long timeleft;
... ... @@ -1916,7 +1916,8 @@
1916 1916 void *pci_addr_out = NULL;
1917 1917 u16 wait_state_count;
1918 1918 struct request *rsp = req->next_rq;
1919   - struct bio_vec *bvec = NULL;
  1919 + struct bio_vec bvec;
  1920 + struct bvec_iter iter;
1920 1921  
1921 1922 if (!rsp) {
1922 1923 printk(MPT2SAS_ERR_FMT "%s: the smp response space is "
1923 1924  
... ... @@ -1955,11 +1956,11 @@
1955 1956 goto out;
1956 1957 }
1957 1958  
1958   - bio_for_each_segment(bvec, req->bio, i) {
  1959 + bio_for_each_segment(bvec, req->bio, iter) {
1959 1960 memcpy(pci_addr_out + offset,
1960   - page_address(bvec->bv_page) + bvec->bv_offset,
1961   - bvec->bv_len);
1962   - offset += bvec->bv_len;
  1961 + page_address(bvec.bv_page) + bvec.bv_offset,
  1962 + bvec.bv_len);
  1963 + offset += bvec.bv_len;
1963 1964 }
1964 1965 } else {
1965 1966 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
1966 1967  
1967 1968  
... ... @@ -2106,19 +2107,19 @@
2106 2107 u32 offset = 0;
2107 2108 u32 bytes_to_copy =
2108 2109 le16_to_cpu(mpi_reply->ResponseDataLength);
2109   - bio_for_each_segment(bvec, rsp->bio, i) {
2110   - if (bytes_to_copy <= bvec->bv_len) {
2111   - memcpy(page_address(bvec->bv_page) +
2112   - bvec->bv_offset, pci_addr_in +
  2110 + bio_for_each_segment(bvec, rsp->bio, iter) {
  2111 + if (bytes_to_copy <= bvec.bv_len) {
  2112 + memcpy(page_address(bvec.bv_page) +
  2113 + bvec.bv_offset, pci_addr_in +
2113 2114 offset, bytes_to_copy);
2114 2115 break;
2115 2116 } else {
2116   - memcpy(page_address(bvec->bv_page) +
2117   - bvec->bv_offset, pci_addr_in +
2118   - offset, bvec->bv_len);
2119   - bytes_to_copy -= bvec->bv_len;
  2117 + memcpy(page_address(bvec.bv_page) +
  2118 + bvec.bv_offset, pci_addr_in +
  2119 + offset, bvec.bv_len);
  2120 + bytes_to_copy -= bvec.bv_len;
2120 2121 }
2121   - offset += bvec->bv_len;
  2122 + offset += bvec.bv_len;
2122 2123 }
2123 2124 }
2124 2125 } else {
drivers/scsi/mpt3sas/mpt3sas_transport.c
... ... @@ -1884,7 +1884,7 @@
1884 1884 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1885 1885 Mpi2SmpPassthroughRequest_t *mpi_request;
1886 1886 Mpi2SmpPassthroughReply_t *mpi_reply;
1887   - int rc, i;
  1887 + int rc;
1888 1888 u16 smid;
1889 1889 u32 ioc_state;
1890 1890 unsigned long timeleft;
... ... @@ -1898,7 +1898,8 @@
1898 1898 void *pci_addr_out = NULL;
1899 1899 u16 wait_state_count;
1900 1900 struct request *rsp = req->next_rq;
1901   - struct bio_vec *bvec = NULL;
  1901 + struct bio_vec bvec;
  1902 + struct bvec_iter iter;
1902 1903  
1903 1904 if (!rsp) {
1904 1905 pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n",
1905 1906  
... ... @@ -1938,11 +1939,11 @@
1938 1939 goto out;
1939 1940 }
1940 1941  
1941   - bio_for_each_segment(bvec, req->bio, i) {
  1942 + bio_for_each_segment(bvec, req->bio, iter) {
1942 1943 memcpy(pci_addr_out + offset,
1943   - page_address(bvec->bv_page) + bvec->bv_offset,
1944   - bvec->bv_len);
1945   - offset += bvec->bv_len;
  1944 + page_address(bvec.bv_page) + bvec.bv_offset,
  1945 + bvec.bv_len);
  1946 + offset += bvec.bv_len;
1946 1947 }
1947 1948 } else {
1948 1949 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
1949 1950  
1950 1951  
... ... @@ -2067,19 +2068,19 @@
2067 2068 u32 offset = 0;
2068 2069 u32 bytes_to_copy =
2069 2070 le16_to_cpu(mpi_reply->ResponseDataLength);
2070   - bio_for_each_segment(bvec, rsp->bio, i) {
2071   - if (bytes_to_copy <= bvec->bv_len) {
2072   - memcpy(page_address(bvec->bv_page) +
2073   - bvec->bv_offset, pci_addr_in +
  2071 + bio_for_each_segment(bvec, rsp->bio, iter) {
  2072 + if (bytes_to_copy <= bvec.bv_len) {
  2073 + memcpy(page_address(bvec.bv_page) +
  2074 + bvec.bv_offset, pci_addr_in +
2074 2075 offset, bytes_to_copy);
2075 2076 break;
2076 2077 } else {
2077   - memcpy(page_address(bvec->bv_page) +
2078   - bvec->bv_offset, pci_addr_in +
2079   - offset, bvec->bv_len);
2080   - bytes_to_copy -= bvec->bv_len;
  2078 + memcpy(page_address(bvec.bv_page) +
  2079 + bvec.bv_offset, pci_addr_in +
  2080 + offset, bvec.bv_len);
  2081 + bytes_to_copy -= bvec.bv_len;
2081 2082 }
2082   - offset += bvec->bv_len;
  2083 + offset += bvec.bv_len;
2083 2084 }
2084 2085 }
2085 2086 } else {
drivers/staging/lustre/lustre/llite/lloop.c
... ... @@ -194,10 +194,10 @@
194 194 struct cl_object *obj = ll_i2info(inode)->lli_clob;
195 195 pgoff_t offset;
196 196 int ret;
197   - int i;
198 197 int rw;
199 198 obd_count page_count = 0;
200   - struct bio_vec *bvec;
  199 + struct bio_vec bvec;
  200 + struct bvec_iter iter;
201 201 struct bio *bio;
202 202 ssize_t bytes;
203 203  
204 204  
205 205  
... ... @@ -221,14 +221,14 @@
221 221 LASSERT(rw == bio->bi_rw);
222 222  
223 223 offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
224   - bio_for_each_segment(bvec, bio, i) {
225   - BUG_ON(bvec->bv_offset != 0);
226   - BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE);
  224 + bio_for_each_segment(bvec, bio, iter) {
  225 + BUG_ON(bvec.bv_offset != 0);
  226 + BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE);
227 227  
228   - pages[page_count] = bvec->bv_page;
  228 + pages[page_count] = bvec.bv_page;
229 229 offsets[page_count] = offset;
230 230 page_count++;
231   - offset += bvec->bv_len;
  231 + offset += bvec.bv_len;
232 232 }
233 233 LASSERT(page_count <= LLOOP_MAX_SEGMENTS);
234 234 }
drivers/staging/zram/zram_drv.c
... ... @@ -672,9 +672,10 @@
672 672  
673 673 static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
674 674 {
675   - int i, offset;
  675 + int offset;
676 676 u32 index;
677   - struct bio_vec *bvec;
  677 + struct bio_vec bvec;
  678 + struct bvec_iter iter;
678 679  
679 680 switch (rw) {
680 681 case READ:
681 682  
682 683  
683 684  
684 685  
685 686  
686 687  
... ... @@ -689,33 +690,33 @@
689 690 offset = (bio->bi_iter.bi_sector &
690 691 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
691 692  
692   - bio_for_each_segment(bvec, bio, i) {
  693 + bio_for_each_segment(bvec, bio, iter) {
693 694 int max_transfer_size = PAGE_SIZE - offset;
694 695  
695   - if (bvec->bv_len > max_transfer_size) {
  696 + if (bvec.bv_len > max_transfer_size) {
696 697 /*
697 698 * zram_bvec_rw() can only make operation on a single
698 699 * zram page. Split the bio vector.
699 700 */
700 701 struct bio_vec bv;
701 702  
702   - bv.bv_page = bvec->bv_page;
  703 + bv.bv_page = bvec.bv_page;
703 704 bv.bv_len = max_transfer_size;
704   - bv.bv_offset = bvec->bv_offset;
  705 + bv.bv_offset = bvec.bv_offset;
705 706  
706 707 if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
707 708 goto out;
708 709  
709   - bv.bv_len = bvec->bv_len - max_transfer_size;
  710 + bv.bv_len = bvec.bv_len - max_transfer_size;
710 711 bv.bv_offset += max_transfer_size;
711 712 if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
712 713 goto out;
713 714 } else
714   - if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
  715 + if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw)
715 716 < 0)
716 717 goto out;
717 718  
718   - update_position(&index, &offset, bvec);
  719 + update_position(&index, &offset, &bvec);
719 720 }
720 721  
721 722 set_bit(BIO_UPTODATE, &bio->bi_flags);
... ... @@ -299,25 +299,26 @@
299 299 {
300 300 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
301 301 struct blk_integrity_exchg bix;
302   - struct bio_vec *bv;
  302 + struct bio_vec bv;
  303 + struct bvec_iter iter;
303 304 sector_t sector = bio->bi_iter.bi_sector;
304   - unsigned int i, sectors, total;
  305 + unsigned int sectors, total;
305 306 void *prot_buf = bio->bi_integrity->bip_buf;
306 307  
307 308 total = 0;
308 309 bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
309 310 bix.sector_size = bi->sector_size;
310 311  
311   - bio_for_each_segment(bv, bio, i) {
312   - void *kaddr = kmap_atomic(bv->bv_page);
313   - bix.data_buf = kaddr + bv->bv_offset;
314   - bix.data_size = bv->bv_len;
  312 + bio_for_each_segment(bv, bio, iter) {
  313 + void *kaddr = kmap_atomic(bv.bv_page);
  314 + bix.data_buf = kaddr + bv.bv_offset;
  315 + bix.data_size = bv.bv_len;
315 316 bix.prot_buf = prot_buf;
316 317 bix.sector = sector;
317 318  
318 319 bi->generate_fn(&bix);
319 320  
320   - sectors = bv->bv_len / bi->sector_size;
  321 + sectors = bv.bv_len / bi->sector_size;
321 322 sector += sectors;
322 323 prot_buf += sectors * bi->tuple_size;
323 324 total += sectors * bi->tuple_size;
324 325  
325 326  
... ... @@ -441,19 +442,20 @@
441 442 {
442 443 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
443 444 struct blk_integrity_exchg bix;
444   - struct bio_vec *bv;
  445 + struct bio_vec bv;
  446 + struct bvec_iter iter;
445 447 sector_t sector = bio->bi_integrity->bip_sector;
446   - unsigned int i, sectors, total, ret;
  448 + unsigned int sectors, total, ret;
447 449 void *prot_buf = bio->bi_integrity->bip_buf;
448 450  
449 451 ret = total = 0;
450 452 bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
451 453 bix.sector_size = bi->sector_size;
452 454  
453   - bio_for_each_segment(bv, bio, i) {
454   - void *kaddr = kmap_atomic(bv->bv_page);
455   - bix.data_buf = kaddr + bv->bv_offset;
456   - bix.data_size = bv->bv_len;
  455 + bio_for_each_segment(bv, bio, iter) {
  456 + void *kaddr = kmap_atomic(bv.bv_page);
  457 + bix.data_buf = kaddr + bv.bv_offset;
  458 + bix.data_size = bv.bv_len;
457 459 bix.prot_buf = prot_buf;
458 460 bix.sector = sector;
459 461  
... ... @@ -464,7 +466,7 @@
464 466 return ret;
465 467 }
466 468  
467   - sectors = bv->bv_len / bi->sector_size;
  469 + sectors = bv.bv_len / bi->sector_size;
468 470 sector += sectors;
469 471 prot_buf += sectors * bi->tuple_size;
470 472 total += sectors * bi->tuple_size;
... ... @@ -473,13 +473,13 @@
473 473 void zero_fill_bio(struct bio *bio)
474 474 {
475 475 unsigned long flags;
476   - struct bio_vec *bv;
477   - int i;
  476 + struct bio_vec bv;
  477 + struct bvec_iter iter;
478 478  
479   - bio_for_each_segment(bv, bio, i) {
480   - char *data = bvec_kmap_irq(bv, &flags);
481   - memset(data, 0, bv->bv_len);
482   - flush_dcache_page(bv->bv_page);
  479 + bio_for_each_segment(bv, bio, iter) {
  480 + char *data = bvec_kmap_irq(&bv, &flags);
  481 + memset(data, 0, bv.bv_len);
  482 + flush_dcache_page(bv.bv_page);
483 483 bvec_kunmap_irq(data, &flags);
484 484 }
485 485 }
486 486  
... ... @@ -1687,11 +1687,11 @@
1687 1687 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1688 1688 void bio_flush_dcache_pages(struct bio *bi)
1689 1689 {
1690   - int i;
1691   - struct bio_vec *bvec;
  1690 + struct bio_vec bvec;
  1691 + struct bvec_iter iter;
1692 1692  
1693   - bio_for_each_segment(bvec, bi, i)
1694   - flush_dcache_page(bvec->bv_page);
  1693 + bio_for_each_segment(bvec, bi, iter)
  1694 + flush_dcache_page(bvec.bv_page);
1695 1695 }
1696 1696 EXPORT_SYMBOL(bio_flush_dcache_pages);
1697 1697 #endif
... ... @@ -1840,7 +1840,7 @@
1840 1840 bio->bi_iter.bi_idx = 0;
1841 1841 }
1842 1842 /* Make sure vcnt and last bv are not too big */
1843   - bio_for_each_segment(bvec, bio, i) {
  1843 + bio_for_each_segment_all(bvec, bio, i) {
1844 1844 if (sofar + bvec->bv_len > size)
1845 1845 bvec->bv_len = size - sofar;
1846 1846 if (bvec->bv_len == 0) {
... ... @@ -63,10 +63,13 @@
63 63 */
64 64 #define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)]))
65 65 #define __bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_iter.bi_idx)
66   -#define bio_iovec(bio) (*__bio_iovec(bio))
67 66  
  67 +#define bio_iter_iovec(bio, iter) ((bio)->bi_io_vec[(iter).bi_idx])
  68 +
68 69 #define bio_page(bio) (bio_iovec((bio)).bv_page)
69 70 #define bio_offset(bio) (bio_iovec((bio)).bv_offset)
  71 +#define bio_iovec(bio) (*__bio_iovec(bio))
  72 +
70 73 #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_iter.bi_idx)
71 74 #define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9)
72 75 #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
... ... @@ -134,15 +137,6 @@
134 137 #define bio_io_error(bio) bio_endio((bio), -EIO)
135 138  
136 139 /*
137   - * drivers should not use the __ version unless they _really_ know what
138   - * they're doing
139   - */
140   -#define __bio_for_each_segment(bvl, bio, i, start_idx) \
141   - for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \
142   - i < (bio)->bi_vcnt; \
143   - bvl++, i++)
144   -
145   -/*
146 140 * drivers should _never_ use the all version - the bio may have been split
147 141 * before it got to the driver and the driver won't own all of it
148 142 */
... ... @@ -151,10 +145,16 @@
151 145 bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \
152 146 i++)
153 147  
154   -#define bio_for_each_segment(bvl, bio, i) \
155   - for (i = (bio)->bi_iter.bi_idx; \
156   - bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \
157   - i++)
  148 +#define __bio_for_each_segment(bvl, bio, iter, start) \
  149 + for (iter = (start); \
  150 + bvl = bio_iter_iovec((bio), (iter)), \
  151 + (iter).bi_idx < (bio)->bi_vcnt; \
  152 + (iter).bi_idx++)
  153 +
  154 +#define bio_for_each_segment(bvl, bio, iter) \
  155 + __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
  156 +
  157 +#define bio_iter_last(bio, iter) ((iter).bi_idx == (bio)->bi_vcnt - 1)
158 158  
159 159 /*
160 160 * get a reference to a bio, so it won't disappear. the intended use is
include/linux/blkdev.h
... ... @@ -735,7 +735,7 @@
735 735 };
736 736  
737 737 struct req_iterator {
738   - int i;
  738 + struct bvec_iter iter;
739 739 struct bio *bio;
740 740 };
741 741  
742 742  
... ... @@ -748,10 +748,11 @@
748 748  
749 749 #define rq_for_each_segment(bvl, _rq, _iter) \
750 750 __rq_for_each_bio(_iter.bio, _rq) \
751   - bio_for_each_segment(bvl, _iter.bio, _iter.i)
  751 + bio_for_each_segment(bvl, _iter.bio, _iter.iter)
752 752  
753 753 #define rq_iter_last(rq, _iter) \
754   - (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
  754 + (_iter.bio->bi_next == NULL && \
  755 + bio_iter_last(_iter.bio, _iter.iter))
755 756  
756 757 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
757 758 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
... ... @@ -98,27 +98,24 @@
98 98 static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
99 99 {
100 100 unsigned char *vfrom;
101   - struct bio_vec *tovec, *fromvec;
102   - int i;
  101 + struct bio_vec tovec, *fromvec = from->bi_io_vec;
  102 + struct bvec_iter iter;
103 103  
104   - bio_for_each_segment(tovec, to, i) {
105   - fromvec = from->bi_io_vec + i;
  104 + bio_for_each_segment(tovec, to, iter) {
  105 + if (tovec.bv_page != fromvec->bv_page) {
  106 + /*
  107 + * fromvec->bv_offset and fromvec->bv_len might have
  108 + * been modified by the block layer, so use the original
  109 + * copy, bounce_copy_vec already uses tovec->bv_len
  110 + */
  111 + vfrom = page_address(fromvec->bv_page) +
  112 + tovec.bv_offset;
106 113  
107   - /*
108   - * not bounced
109   - */
110   - if (tovec->bv_page == fromvec->bv_page)
111   - continue;
  114 + bounce_copy_vec(&tovec, vfrom);
  115 + flush_dcache_page(tovec.bv_page);
  116 + }
112 117  
113   - /*
114   - * fromvec->bv_offset and fromvec->bv_len might have been
115   - * modified by the block layer, so use the original copy,
116   - * bounce_copy_vec already uses tovec->bv_len
117   - */
118   - vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
119   -
120   - bounce_copy_vec(tovec, vfrom);
121   - flush_dcache_page(tovec->bv_page);
  118 + fromvec++;
122 119 }
123 120 }
124 121  
125 122  
... ... @@ -201,13 +198,14 @@
201 198 {
202 199 struct bio *bio;
203 200 int rw = bio_data_dir(*bio_orig);
204   - struct bio_vec *to, *from;
  201 + struct bio_vec *to, from;
  202 + struct bvec_iter iter;
205 203 unsigned i;
206 204  
207 205 if (force)
208 206 goto bounce;
209   - bio_for_each_segment(from, *bio_orig, i)
210   - if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q))
  207 + bio_for_each_segment(from, *bio_orig, iter)
  208 + if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
211 209 goto bounce;
212 210  
213 211 return;