Commit 458b76ed2f9517becb74dcc8eedd70d3068ea6e4
1 parent
d57a5f7c66
Exists in
master
and in
16 other branches
block: Kill bio_segments()/bi_vcnt usage
When we start sharing biovecs, keeping bi_vcnt accurate for splits is going to be error prone - and unnecessary, if we refactor some code. So bio_segments() has to go - but most of the existing users just needed to know if the bio had multiple segments, which is easier - add a bio_multiple_segments() for them. (Two of the current uses of bio_segments() are going to go away in a couple patches, but the current implementation of bio_segments() is unsafe as soon as we start doing driver conversions for immutable biovecs - so implement a dumb version for bisectability, it'll go away in a couple patches) Signed-off-by: Kent Overstreet <kmo@daterainc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Neil Brown <neilb@suse.de> Cc: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com> Cc: Sreekanth Reddy <Sreekanth.Reddy@lsi.com> Cc: "James E.J. Bottomley" <JBottomley@parallels.com>
Showing 10 changed files with 94 additions and 87 deletions Side-by-side Diff
drivers/block/ps3disk.c
... | ... | @@ -101,10 +101,9 @@ |
101 | 101 | |
102 | 102 | rq_for_each_segment(bvec, req, iter) { |
103 | 103 | unsigned long flags; |
104 | - dev_dbg(&dev->sbd.core, | |
105 | - "%s:%u: bio %u: %u segs %u sectors from %lu\n", | |
106 | - __func__, __LINE__, i, bio_segments(iter.bio), | |
107 | - bio_sectors(iter.bio), iter.bio->bi_iter.bi_sector); | |
104 | + dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %lu\n", | |
105 | + __func__, __LINE__, i, bio_sectors(iter.bio), | |
106 | + iter.bio->bi_iter.bi_sector); | |
108 | 107 | |
109 | 108 | size = bvec.bv_len; |
110 | 109 | buf = bvec_kmap_irq(&bvec, &flags); |
drivers/md/bcache/io.c
... | ... | @@ -24,7 +24,8 @@ |
24 | 24 | if (bio->bi_iter.bi_idx) { |
25 | 25 | struct bio_vec bv; |
26 | 26 | struct bvec_iter iter; |
27 | - struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio)); | |
27 | + unsigned segs = bio_segments(bio); | |
28 | + struct bio *clone = bio_alloc(GFP_NOIO, segs); | |
28 | 29 | |
29 | 30 | bio_for_each_segment(bv, bio, iter) |
30 | 31 | clone->bi_io_vec[clone->bi_vcnt++] = bv; |
... | ... | @@ -32,7 +33,7 @@ |
32 | 33 | clone->bi_iter.bi_sector = bio->bi_iter.bi_sector; |
33 | 34 | clone->bi_bdev = bio->bi_bdev; |
34 | 35 | clone->bi_rw = bio->bi_rw; |
35 | - clone->bi_vcnt = bio_segments(bio); | |
36 | + clone->bi_vcnt = segs; | |
36 | 37 | clone->bi_iter.bi_size = bio->bi_iter.bi_size; |
37 | 38 | |
38 | 39 | clone->bi_private = bio; |
39 | 40 | |
40 | 41 | |
41 | 42 | |
42 | 43 | |
43 | 44 | |
44 | 45 | |
... | ... | @@ -133,40 +134,32 @@ |
133 | 134 | |
134 | 135 | static unsigned bch_bio_max_sectors(struct bio *bio) |
135 | 136 | { |
136 | - unsigned ret = bio_sectors(bio); | |
137 | 137 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
138 | - unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES, | |
139 | - queue_max_segments(q)); | |
138 | + struct bio_vec bv; | |
139 | + struct bvec_iter iter; | |
140 | + unsigned ret = 0, seg = 0; | |
140 | 141 | |
141 | 142 | if (bio->bi_rw & REQ_DISCARD) |
142 | - return min(ret, q->limits.max_discard_sectors); | |
143 | + return min(bio_sectors(bio), q->limits.max_discard_sectors); | |
143 | 144 | |
144 | - if (bio_segments(bio) > max_segments || | |
145 | - q->merge_bvec_fn) { | |
146 | - struct bio_vec bv; | |
147 | - struct bvec_iter iter; | |
148 | - unsigned seg = 0; | |
145 | + bio_for_each_segment(bv, bio, iter) { | |
146 | + struct bvec_merge_data bvm = { | |
147 | + .bi_bdev = bio->bi_bdev, | |
148 | + .bi_sector = bio->bi_iter.bi_sector, | |
149 | + .bi_size = ret << 9, | |
150 | + .bi_rw = bio->bi_rw, | |
151 | + }; | |
149 | 152 | |
150 | - ret = 0; | |
153 | + if (seg == min_t(unsigned, BIO_MAX_PAGES, | |
154 | + queue_max_segments(q))) | |
155 | + break; | |
151 | 156 | |
152 | - bio_for_each_segment(bv, bio, iter) { | |
153 | - struct bvec_merge_data bvm = { | |
154 | - .bi_bdev = bio->bi_bdev, | |
155 | - .bi_sector = bio->bi_iter.bi_sector, | |
156 | - .bi_size = ret << 9, | |
157 | - .bi_rw = bio->bi_rw, | |
158 | - }; | |
157 | + if (q->merge_bvec_fn && | |
158 | + q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len) | |
159 | + break; | |
159 | 160 | |
160 | - if (seg == max_segments) | |
161 | - break; | |
162 | - | |
163 | - if (q->merge_bvec_fn && | |
164 | - q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len) | |
165 | - break; | |
166 | - | |
167 | - seg++; | |
168 | - ret += bv.bv_len >> 9; | |
169 | - } | |
161 | + seg++; | |
162 | + ret += bv.bv_len >> 9; | |
170 | 163 | } |
171 | 164 | |
172 | 165 | ret = min(ret, queue_max_sectors(q)); |
drivers/md/raid0.c
... | ... | @@ -528,7 +528,7 @@ |
528 | 528 | sector_t sector = bio->bi_iter.bi_sector; |
529 | 529 | struct bio_pair *bp; |
530 | 530 | /* Sanity check -- queue functions should prevent this happening */ |
531 | - if (bio_segments(bio) > 1) | |
531 | + if (bio_multiple_segments(bio)) | |
532 | 532 | goto bad_map; |
533 | 533 | /* This is a one page bio that upper layers |
534 | 534 | * refuse to split for us, so we need to split it. |
drivers/md/raid10.c
... | ... | @@ -1188,7 +1188,7 @@ |
1188 | 1188 | || conf->prev.near_copies < conf->prev.raid_disks))) { |
1189 | 1189 | struct bio_pair *bp; |
1190 | 1190 | /* Sanity check -- queue functions should prevent this happening */ |
1191 | - if (bio_segments(bio) > 1) | |
1191 | + if (bio_multiple_segments(bio)) | |
1192 | 1192 | goto bad_map; |
1193 | 1193 | /* This is a one page bio that upper layers |
1194 | 1194 | * refuse to split for us, so we need to split it. |
drivers/message/fusion/mptsas.c
... | ... | @@ -2235,10 +2235,10 @@ |
2235 | 2235 | } |
2236 | 2236 | |
2237 | 2237 | /* do we need to support multiple segments? */ |
2238 | - if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) { | |
2239 | - printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n", | |
2240 | - ioc->name, __func__, bio_segments(req->bio), blk_rq_bytes(req), | |
2241 | - bio_segments(rsp->bio), blk_rq_bytes(rsp)); | |
2238 | + if (bio_multiple_segments(req->bio) || | |
2239 | + bio_multiple_segments(rsp->bio)) { | |
2240 | + printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u, rsp %u\n", | |
2241 | + ioc->name, __func__, blk_rq_bytes(req), blk_rq_bytes(rsp)); | |
2242 | 2242 | return -EINVAL; |
2243 | 2243 | } |
2244 | 2244 |
drivers/scsi/libsas/sas_expander.c
... | ... | @@ -2163,10 +2163,10 @@ |
2163 | 2163 | } |
2164 | 2164 | |
2165 | 2165 | /* do we need to support multiple segments? */ |
2166 | - if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) { | |
2167 | - printk("%s: multiple segments req %u %u, rsp %u %u\n", | |
2168 | - __func__, bio_segments(req->bio), blk_rq_bytes(req), | |
2169 | - bio_segments(rsp->bio), blk_rq_bytes(rsp)); | |
2166 | + if (bio_multiple_segments(req->bio) || | |
2167 | + bio_multiple_segments(rsp->bio)) { | |
2168 | + printk("%s: multiple segments req %u, rsp %u\n", | |
2169 | + __func__, blk_rq_bytes(req), blk_rq_bytes(rsp)); | |
2170 | 2170 | return -EINVAL; |
2171 | 2171 | } |
2172 | 2172 |
drivers/scsi/mpt2sas/mpt2sas_transport.c
... | ... | @@ -1943,7 +1943,7 @@ |
1943 | 1943 | ioc->transport_cmds.status = MPT2_CMD_PENDING; |
1944 | 1944 | |
1945 | 1945 | /* Check if the request is split across multiple segments */ |
1946 | - if (bio_segments(req->bio) > 1) { | |
1946 | + if (bio_multiple_segments(req->bio)) { | |
1947 | 1947 | u32 offset = 0; |
1948 | 1948 | |
1949 | 1949 | /* Allocate memory and copy the request */ |
... | ... | @@ -1975,7 +1975,7 @@ |
1975 | 1975 | |
1976 | 1976 | /* Check if the response needs to be populated across |
1977 | 1977 | * multiple segments */ |
1978 | - if (bio_segments(rsp->bio) > 1) { | |
1978 | + if (bio_multiple_segments(rsp->bio)) { | |
1979 | 1979 | pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp), |
1980 | 1980 | &pci_dma_in); |
1981 | 1981 | if (!pci_addr_in) { |
... | ... | @@ -2042,7 +2042,7 @@ |
2042 | 2042 | sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | |
2043 | 2043 | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); |
2044 | 2044 | sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; |
2045 | - if (bio_segments(req->bio) > 1) { | |
2045 | + if (bio_multiple_segments(req->bio)) { | |
2046 | 2046 | ioc->base_add_sg_single(psge, sgl_flags | |
2047 | 2047 | (blk_rq_bytes(req) - 4), pci_dma_out); |
2048 | 2048 | } else { |
... | ... | @@ -2058,7 +2058,7 @@ |
2058 | 2058 | MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | |
2059 | 2059 | MPI2_SGE_FLAGS_END_OF_LIST); |
2060 | 2060 | sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; |
2061 | - if (bio_segments(rsp->bio) > 1) { | |
2061 | + if (bio_multiple_segments(rsp->bio)) { | |
2062 | 2062 | ioc->base_add_sg_single(psge, sgl_flags | |
2063 | 2063 | (blk_rq_bytes(rsp) + 4), pci_dma_in); |
2064 | 2064 | } else { |
... | ... | @@ -2103,7 +2103,7 @@ |
2103 | 2103 | le16_to_cpu(mpi_reply->ResponseDataLength); |
2104 | 2104 | /* check if the resp needs to be copied from the allocated |
2105 | 2105 | * pci mem */ |
2106 | - if (bio_segments(rsp->bio) > 1) { | |
2106 | + if (bio_multiple_segments(rsp->bio)) { | |
2107 | 2107 | u32 offset = 0; |
2108 | 2108 | u32 bytes_to_copy = |
2109 | 2109 | le16_to_cpu(mpi_reply->ResponseDataLength); |
drivers/scsi/mpt3sas/mpt3sas_transport.c
... | ... | @@ -1926,7 +1926,7 @@ |
1926 | 1926 | ioc->transport_cmds.status = MPT3_CMD_PENDING; |
1927 | 1927 | |
1928 | 1928 | /* Check if the request is split across multiple segments */ |
1929 | - if (req->bio->bi_vcnt > 1) { | |
1929 | + if (bio_multiple_segments(req->bio)) { | |
1930 | 1930 | u32 offset = 0; |
1931 | 1931 | |
1932 | 1932 | /* Allocate memory and copy the request */ |
... | ... | @@ -1958,7 +1958,7 @@ |
1958 | 1958 | |
1959 | 1959 | /* Check if the response needs to be populated across |
1960 | 1960 | * multiple segments */ |
1961 | - if (rsp->bio->bi_vcnt > 1) { | |
1961 | + if (bio_multiple_segments(rsp->bio)) { | |
1962 | 1962 | pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp), |
1963 | 1963 | &pci_dma_in); |
1964 | 1964 | if (!pci_addr_in) { |
... | ... | @@ -2019,7 +2019,7 @@ |
2019 | 2019 | mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4); |
2020 | 2020 | psge = &mpi_request->SGL; |
2021 | 2021 | |
2022 | - if (req->bio->bi_vcnt > 1) | |
2022 | + if (bio_multiple_segments(req->bio)) | |
2023 | 2023 | ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4), |
2024 | 2024 | pci_dma_in, (blk_rq_bytes(rsp) + 4)); |
2025 | 2025 | else |
... | ... | @@ -2064,7 +2064,7 @@ |
2064 | 2064 | |
2065 | 2065 | /* check if the resp needs to be copied from the allocated |
2066 | 2066 | * pci mem */ |
2067 | - if (rsp->bio->bi_vcnt > 1) { | |
2067 | + if (bio_multiple_segments(rsp->bio)) { | |
2068 | 2068 | u32 offset = 0; |
2069 | 2069 | u32 bytes_to_copy = |
2070 | 2070 | le16_to_cpu(mpi_reply->ResponseDataLength); |
fs/bio.c
... | ... | @@ -1733,7 +1733,7 @@ |
1733 | 1733 | trace_block_split(bdev_get_queue(bi->bi_bdev), bi, |
1734 | 1734 | bi->bi_iter.bi_sector + first_sectors); |
1735 | 1735 | |
1736 | - BUG_ON(bio_segments(bi) > 1); | |
1736 | + BUG_ON(bio_multiple_segments(bi)); | |
1737 | 1737 | atomic_set(&bp->cnt, 3); |
1738 | 1738 | bp->error = 0; |
1739 | 1739 | bp->bio1 = *bi; |
include/linux/bio.h
... | ... | @@ -97,13 +97,46 @@ |
97 | 97 | #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) |
98 | 98 | #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) |
99 | 99 | |
100 | -#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_iter.bi_idx) | |
100 | +#define bio_multiple_segments(bio) \ | |
101 | + ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len) | |
101 | 102 | #define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9) |
102 | 103 | #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio))) |
103 | 104 | |
105 | +/* | |
106 | + * Check whether this bio carries any data or not. A NULL bio is allowed. | |
107 | + */ | |
108 | +static inline bool bio_has_data(struct bio *bio) | |
109 | +{ | |
110 | + if (bio && | |
111 | + bio->bi_iter.bi_size && | |
112 | + !(bio->bi_rw & REQ_DISCARD)) | |
113 | + return true; | |
114 | + | |
115 | + return false; | |
116 | +} | |
117 | + | |
118 | +static inline bool bio_is_rw(struct bio *bio) | |
119 | +{ | |
120 | + if (!bio_has_data(bio)) | |
121 | + return false; | |
122 | + | |
123 | + if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) | |
124 | + return false; | |
125 | + | |
126 | + return true; | |
127 | +} | |
128 | + | |
129 | +static inline bool bio_mergeable(struct bio *bio) | |
130 | +{ | |
131 | + if (bio->bi_rw & REQ_NOMERGE_FLAGS) | |
132 | + return false; | |
133 | + | |
134 | + return true; | |
135 | +} | |
136 | + | |
104 | 137 | static inline unsigned int bio_cur_bytes(struct bio *bio) |
105 | 138 | { |
106 | - if (bio->bi_vcnt) | |
139 | + if (bio_has_data(bio)) | |
107 | 140 | return bio_iovec(bio).bv_len; |
108 | 141 | else /* dataless requests such as discard */ |
109 | 142 | return bio->bi_iter.bi_size; |
... | ... | @@ -111,7 +144,7 @@ |
111 | 144 | |
112 | 145 | static inline void *bio_data(struct bio *bio) |
113 | 146 | { |
114 | - if (bio->bi_vcnt) | |
147 | + if (bio_has_data(bio)) | |
115 | 148 | return page_address(bio_page(bio)) + bio_offset(bio); |
116 | 149 | |
117 | 150 | return NULL; |
... | ... | @@ -221,6 +254,18 @@ |
221 | 254 | |
222 | 255 | #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) |
223 | 256 | |
257 | +static inline unsigned bio_segments(struct bio *bio) | |
258 | +{ | |
259 | + unsigned segs = 0; | |
260 | + struct bio_vec bv; | |
261 | + struct bvec_iter iter; | |
262 | + | |
263 | + bio_for_each_segment(bv, bio, iter) | |
264 | + segs++; | |
265 | + | |
266 | + return segs; | |
267 | +} | |
268 | + | |
224 | 269 | /* |
225 | 270 | * get a reference to a bio, so it won't disappear. the intended use is |
226 | 271 | * something like: |
... | ... | @@ -433,36 +478,6 @@ |
433 | 478 | #define bio_kmap_irq(bio, flags) \ |
434 | 479 | __bio_kmap_irq((bio), (bio)->bi_iter.bi_idx, (flags)) |
435 | 480 | #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) |
436 | - | |
437 | -/* | |
438 | - * Check whether this bio carries any data or not. A NULL bio is allowed. | |
439 | - */ | |
440 | -static inline bool bio_has_data(struct bio *bio) | |
441 | -{ | |
442 | - if (bio && bio->bi_vcnt) | |
443 | - return true; | |
444 | - | |
445 | - return false; | |
446 | -} | |
447 | - | |
448 | -static inline bool bio_is_rw(struct bio *bio) | |
449 | -{ | |
450 | - if (!bio_has_data(bio)) | |
451 | - return false; | |
452 | - | |
453 | - if (bio->bi_rw & REQ_WRITE_SAME) | |
454 | - return false; | |
455 | - | |
456 | - return true; | |
457 | -} | |
458 | - | |
459 | -static inline bool bio_mergeable(struct bio *bio) | |
460 | -{ | |
461 | - if (bio->bi_rw & REQ_NOMERGE_FLAGS) | |
462 | - return false; | |
463 | - | |
464 | - return true; | |
465 | -} | |
466 | 481 | |
467 | 482 | /* |
468 | 483 | * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. |