Commit dd1cab95f356f1395278633565f198463cf6bd24
1 parent
b823825e8e
Exists in
master
and in
7 other branches
[PATCH] Cleanup blk_rq_map_* interfaces
Change the blk_rq_map_user() and blk_rq_map_kern() interface to require a previously allocated request to be passed in. This is both more efficient for multiple iterations of mapping data to the same request, and it is also a much nicer API. Signed-off-by: Jens Axboe <axboe@suse.de>
Showing 4 changed files with 53 additions and 58 deletions Side-by-side Diff
drivers/block/ll_rw_blk.c
... | ... | @@ -2107,21 +2107,19 @@ |
2107 | 2107 | * original bio must be passed back in to blk_rq_unmap_user() for proper |
2108 | 2108 | * unmapping. |
2109 | 2109 | */ |
2110 | -struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf, | |
2111 | - unsigned int len) | |
2110 | +int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, | |
2111 | + unsigned int len) | |
2112 | 2112 | { |
2113 | 2113 | unsigned long uaddr; |
2114 | - struct request *rq; | |
2115 | 2114 | struct bio *bio; |
2115 | + int reading; | |
2116 | 2116 | |
2117 | 2117 | if (len > (q->max_sectors << 9)) |
2118 | - return ERR_PTR(-EINVAL); | |
2119 | - if ((!len && ubuf) || (len && !ubuf)) | |
2120 | - return ERR_PTR(-EINVAL); | |
2118 | + return -EINVAL; | |
2119 | + if (!len || !ubuf) | |
2120 | + return -EINVAL; | |
2121 | 2121 | |
2122 | - rq = blk_get_request(q, rw, __GFP_WAIT); | |
2123 | - if (!rq) | |
2124 | - return ERR_PTR(-ENOMEM); | |
2122 | + reading = rq_data_dir(rq) == READ; | |
2125 | 2123 | |
2126 | 2124 | /* |
2127 | 2125 | * if alignment requirement is satisfied, map in user pages for |
2128 | 2126 | |
... | ... | @@ -2129,9 +2127,9 @@ |
2129 | 2127 | */ |
2130 | 2128 | uaddr = (unsigned long) ubuf; |
2131 | 2129 | if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) |
2132 | - bio = bio_map_user(q, NULL, uaddr, len, rw == READ); | |
2130 | + bio = bio_map_user(q, NULL, uaddr, len, reading); | |
2133 | 2131 | else |
2134 | - bio = bio_copy_user(q, uaddr, len, rw == READ); | |
2132 | + bio = bio_copy_user(q, uaddr, len, reading); | |
2135 | 2133 | |
2136 | 2134 | if (!IS_ERR(bio)) { |
2137 | 2135 | rq->bio = rq->biotail = bio; |
2138 | 2136 | |
... | ... | @@ -2139,14 +2137,13 @@ |
2139 | 2137 | |
2140 | 2138 | rq->buffer = rq->data = NULL; |
2141 | 2139 | rq->data_len = len; |
2142 | - return rq; | |
2140 | + return 0; | |
2143 | 2141 | } |
2144 | 2142 | |
2145 | 2143 | /* |
2146 | 2144 | * bio is the err-ptr |
2147 | 2145 | */ |
2148 | - blk_put_request(rq); | |
2149 | - return (struct request *) bio; | |
2146 | + return PTR_ERR(bio); | |
2150 | 2147 | } |
2151 | 2148 | |
2152 | 2149 | EXPORT_SYMBOL(blk_rq_map_user); |
... | ... | @@ -2160,7 +2157,7 @@ |
2160 | 2157 | * Description: |
2161 | 2158 | * Unmap a request previously mapped by blk_rq_map_user(). |
2162 | 2159 | */ |
2163 | -int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen) | |
2160 | +int blk_rq_unmap_user(struct bio *bio, unsigned int ulen) | |
2164 | 2161 | { |
2165 | 2162 | int ret = 0; |
2166 | 2163 | |
... | ... | @@ -2171,8 +2168,7 @@ |
2171 | 2168 | ret = bio_uncopy_user(bio); |
2172 | 2169 | } |
2173 | 2170 | |
2174 | - blk_put_request(rq); | |
2175 | - return ret; | |
2171 | + return 0; | |
2176 | 2172 | } |
2177 | 2173 | |
2178 | 2174 | EXPORT_SYMBOL(blk_rq_unmap_user); |
2179 | 2175 | |
2180 | 2176 | |
2181 | 2177 | |
2182 | 2178 | |
2183 | 2179 | |
2184 | 2180 | |
2185 | 2181 | |
... | ... | @@ -2184,39 +2180,29 @@ |
2184 | 2180 | * @kbuf: the kernel buffer |
2185 | 2181 | * @len: length of user data |
2186 | 2182 | */ |
2187 | -struct request *blk_rq_map_kern(request_queue_t *q, int rw, void *kbuf, | |
2188 | - unsigned int len, unsigned int gfp_mask) | |
2183 | +int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, | |
2184 | + unsigned int len, unsigned int gfp_mask) | |
2189 | 2185 | { |
2190 | - struct request *rq; | |
2191 | 2186 | struct bio *bio; |
2192 | 2187 | |
2193 | 2188 | if (len > (q->max_sectors << 9)) |
2194 | - return ERR_PTR(-EINVAL); | |
2195 | - if ((!len && kbuf) || (len && !kbuf)) | |
2196 | - return ERR_PTR(-EINVAL); | |
2189 | + return -EINVAL; | |
2190 | + if (!len || !kbuf) | |
2191 | + return -EINVAL; | |
2197 | 2192 | |
2198 | - rq = blk_get_request(q, rw, gfp_mask); | |
2199 | - if (!rq) | |
2200 | - return ERR_PTR(-ENOMEM); | |
2201 | - | |
2202 | 2193 | bio = bio_map_kern(q, kbuf, len, gfp_mask); |
2203 | - if (!IS_ERR(bio)) { | |
2204 | - if (rw) | |
2205 | - bio->bi_rw |= (1 << BIO_RW); | |
2194 | + if (IS_ERR(bio)) | |
2195 | + return PTR_ERR(bio); | |
2206 | 2196 | |
2207 | - rq->bio = rq->biotail = bio; | |
2208 | - blk_rq_bio_prep(q, rq, bio); | |
2197 | + if (rq_data_dir(rq) == WRITE) | |
2198 | + bio->bi_rw |= (1 << BIO_RW); | |
2209 | 2199 | |
2210 | - rq->buffer = rq->data = NULL; | |
2211 | - rq->data_len = len; | |
2212 | - return rq; | |
2213 | - } | |
2200 | + rq->bio = rq->biotail = bio; | |
2201 | + blk_rq_bio_prep(q, rq, bio); | |
2214 | 2202 | |
2215 | - /* | |
2216 | - * bio is the err-ptr | |
2217 | - */ | |
2218 | - blk_put_request(rq); | |
2219 | - return (struct request *) bio; | |
2203 | + rq->buffer = rq->data = NULL; | |
2204 | + rq->data_len = len; | |
2205 | + return 0; | |
2220 | 2206 | } |
2221 | 2207 | |
2222 | 2208 | EXPORT_SYMBOL(blk_rq_map_kern); |
drivers/block/scsi_ioctl.c
... | ... | @@ -216,7 +216,7 @@ |
216 | 216 | struct gendisk *bd_disk, struct sg_io_hdr *hdr) |
217 | 217 | { |
218 | 218 | unsigned long start_time; |
219 | - int reading, writing; | |
219 | + int reading, writing, ret; | |
220 | 220 | struct request *rq; |
221 | 221 | struct bio *bio; |
222 | 222 | char sense[SCSI_SENSE_BUFFERSIZE]; |
223 | 223 | |
224 | 224 | |
... | ... | @@ -255,14 +255,17 @@ |
255 | 255 | reading = 1; |
256 | 256 | break; |
257 | 257 | } |
258 | + } | |
258 | 259 | |
259 | - rq = blk_rq_map_user(q, writing ? WRITE : READ, hdr->dxferp, | |
260 | - hdr->dxfer_len); | |
260 | + rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); | |
261 | + if (!rq) | |
262 | + return -ENOMEM; | |
261 | 263 | |
262 | - if (IS_ERR(rq)) | |
263 | - return PTR_ERR(rq); | |
264 | - } else | |
265 | - rq = blk_get_request(q, READ, __GFP_WAIT); | |
264 | + if (reading || writing) { | |
265 | + ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len); | |
266 | + if (ret) | |
267 | + goto out; | |
268 | + } | |
266 | 269 | |
267 | 270 | /* |
268 | 271 | * fill in request structure |
269 | 272 | |
... | ... | @@ -321,11 +324,13 @@ |
321 | 324 | } |
322 | 325 | |
323 | 326 | if (blk_rq_unmap_user(rq, bio, hdr->dxfer_len)) |
324 | - return -EFAULT; | |
327 | + ret = -EFAULT; | |
325 | 328 | |
326 | 329 | /* may not have succeeded, but output values written to control |
327 | 330 | * structure (struct sg_io_hdr). */ |
328 | - return 0; | |
331 | +out: | |
332 | + blk_put_request(rq); | |
333 | + return ret; | |
329 | 334 | } |
330 | 335 | |
331 | 336 | #define OMAX_SB_LEN 16 /* For backward compatibility */ |
drivers/cdrom/cdrom.c
... | ... | @@ -2097,6 +2097,10 @@ |
2097 | 2097 | if (!q) |
2098 | 2098 | return -ENXIO; |
2099 | 2099 | |
2100 | + rq = blk_get_request(q, READ, GFP_KERNEL); | |
2101 | + if (!rq) | |
2102 | + return -ENOMEM; | |
2103 | + | |
2100 | 2104 | cdi->last_sense = 0; |
2101 | 2105 | |
2102 | 2106 | while (nframes) { |
... | ... | @@ -2108,9 +2112,9 @@ |
2108 | 2112 | |
2109 | 2113 | len = nr * CD_FRAMESIZE_RAW; |
2110 | 2114 | |
2111 | - rq = blk_rq_map_user(q, READ, ubuf, len); | |
2112 | - if (IS_ERR(rq)) | |
2113 | - return PTR_ERR(rq); | |
2115 | + ret = blk_rq_map_user(q, rq, ubuf, len); | |
2116 | + if (ret) | |
2117 | + break; | |
2114 | 2118 | |
2115 | 2119 | memset(rq->cmd, 0, sizeof(rq->cmd)); |
2116 | 2120 | rq->cmd[0] = GPCMD_READ_CD; |
... | ... | @@ -2138,7 +2142,7 @@ |
2138 | 2142 | cdi->last_sense = s->sense_key; |
2139 | 2143 | } |
2140 | 2144 | |
2141 | - if (blk_rq_unmap_user(rq, bio, len)) | |
2145 | + if (blk_rq_unmap_user(bio, len)) | |
2142 | 2146 | ret = -EFAULT; |
2143 | 2147 | |
2144 | 2148 | if (ret) |
... | ... | @@ -2149,6 +2153,7 @@ |
2149 | 2153 | ubuf += len; |
2150 | 2154 | } |
2151 | 2155 | |
2156 | + blk_put_request(rq); | |
2152 | 2157 | return ret; |
2153 | 2158 | } |
2154 | 2159 |
include/linux/blkdev.h
... | ... | @@ -558,10 +558,9 @@ |
558 | 558 | extern void __blk_stop_queue(request_queue_t *q); |
559 | 559 | extern void blk_run_queue(request_queue_t *); |
560 | 560 | extern void blk_queue_activity_fn(request_queue_t *, activity_fn *, void *); |
561 | -extern struct request *blk_rq_map_user(request_queue_t *, int, void __user *, unsigned int); | |
562 | -extern int blk_rq_unmap_user(struct request *, struct bio *, unsigned int); | |
563 | -extern struct request *blk_rq_map_kern(request_queue_t *, int, void *, | |
564 | - unsigned int, unsigned int); | |
561 | +extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned int); | |
562 | +extern int blk_rq_unmap_user(struct bio *, unsigned int); | |
563 | +extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, unsigned int); | |
565 | 564 | extern int blk_execute_rq(request_queue_t *, struct gendisk *, struct request *); |
566 | 565 | |
567 | 566 | static inline request_queue_t *bdev_get_queue(struct block_device *bdev) |