Blame view
block/blk-map.c
8.2 KB
86db1e297
|
1 2 3 4 5 6 7 |
/* * Functions related to mapping data to requests */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> |
afdc1a780
|
8 |
#include <scsi/sg.h> /* for struct sg_iovec */ |
86db1e297
|
9 10 11 12 13 14 15 16 17 18 19 20 21 |
#include "blk.h" int blk_rq_append_bio(struct request_queue *q, struct request *rq, struct bio *bio) { if (!rq->bio) blk_rq_bio_prep(q, rq, bio); else if (!ll_back_merge_fn(q, rq, bio)) return -EINVAL; else { rq->biotail->bi_next = bio; rq->biotail = bio; |
a2dec7b36
|
22 |
rq->__data_len += bio->bi_size; |
86db1e297
|
23 24 25 |
} return 0; } |
86db1e297
|
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
static int __blk_rq_unmap_user(struct bio *bio) { int ret = 0; if (bio) { if (bio_flagged(bio, BIO_USER_MAPPED)) bio_unmap_user(bio); else ret = bio_uncopy_user(bio); } return ret; } static int __blk_rq_map_user(struct request_queue *q, struct request *rq, |
152e283fd
|
42 |
struct rq_map_data *map_data, void __user *ubuf, |
97ae77a1c
|
43 |
unsigned int len, gfp_t gfp_mask) |
86db1e297
|
44 45 46 47 48 49 50 51 52 53 54 55 |
{ unsigned long uaddr; struct bio *bio, *orig_bio; int reading, ret; reading = rq_data_dir(rq) == READ; /* * if alignment requirement is satisfied, map in user pages for * direct dma. else, set up kernel bounce buffers */ uaddr = (unsigned long) ubuf; |
144177991
|
56 |
if (blk_rq_aligned(q, uaddr, len) && !map_data) |
a3bce90ed
|
57 |
bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask); |
86db1e297
|
58 |
else |
152e283fd
|
59 |
bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask); |
86db1e297
|
60 61 62 |
if (IS_ERR(bio)) return PTR_ERR(bio); |
97ae77a1c
|
63 |
if (map_data && map_data->null_mapped) |
818827669
|
64 |
bio->bi_flags |= (1 << BIO_NULL_MAPPED); |
86db1e297
|
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 |
orig_bio = bio; blk_queue_bounce(q, &bio); /* * We link the bounce buffer in and could have to traverse it * later so we have to get a ref to prevent it from being freed */ bio_get(bio); ret = blk_rq_append_bio(q, rq, bio); if (!ret) return bio->bi_size; /* if it was boucned we must call the end io function */ bio_endio(bio, 0); __blk_rq_unmap_user(orig_bio); bio_put(bio); return ret; } /** |
710027a48
|
86 |
* blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage |
86db1e297
|
87 88 |
* @q: request queue where request should be inserted * @rq: request structure to fill |
152e283fd
|
89 |
* @map_data: pointer to the rq_map_data holding pages (if necessary) |
86db1e297
|
90 91 |
* @ubuf: the user buffer * @len: length of user data |
a3bce90ed
|
92 |
* @gfp_mask: memory allocation flags |
86db1e297
|
93 94 |
* * Description: |
710027a48
|
95 |
* Data will be mapped directly for zero copy I/O, if possible. Otherwise |
86db1e297
|
96 97 |
* a kernel bounce buffer is used. * |
710027a48
|
98 |
* A matching blk_rq_unmap_user() must be issued at the end of I/O, while |
86db1e297
|
99 100 101 102 103 104 105 106 107 |
* still in process context. * * Note: The mapped bio may need to be bounced through blk_queue_bounce() * before being submitted to the device, as pages mapped may be out of * reach. It's the callers responsibility to make sure this happens. The * original bio must be passed back in to blk_rq_unmap_user() for proper * unmapping. */ int blk_rq_map_user(struct request_queue *q, struct request *rq, |
152e283fd
|
108 109 |
struct rq_map_data *map_data, void __user *ubuf, unsigned long len, gfp_t gfp_mask) |
86db1e297
|
110 111 112 |
{ unsigned long bytes_read = 0; struct bio *bio = NULL; |
97ae77a1c
|
113 |
int ret; |
86db1e297
|
114 |
|
ae03bf639
|
115 |
if (len > (queue_max_hw_sectors(q) << 9)) |
86db1e297
|
116 |
return -EINVAL; |
818827669
|
117 |
if (!len) |
86db1e297
|
118 |
return -EINVAL; |
97ae77a1c
|
119 120 121 |
if (!ubuf && (!map_data || !map_data->null_mapped)) return -EINVAL; |
86db1e297
|
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
while (bytes_read != len) { unsigned long map_len, end, start; map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE); end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1) >> PAGE_SHIFT; start = (unsigned long)ubuf >> PAGE_SHIFT; /* * A bad offset could cause us to require BIO_MAX_PAGES + 1 * pages. If this happens we just lower the requested * mapping len by a page so that we can fit */ if (end - start > BIO_MAX_PAGES) map_len -= PAGE_SIZE; |
152e283fd
|
138 |
ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len, |
97ae77a1c
|
139 |
gfp_mask); |
86db1e297
|
140 141 142 143 144 145 |
if (ret < 0) goto unmap_rq; if (!bio) bio = rq->bio; bytes_read += ret; ubuf += ret; |
56c451f4b
|
146 147 148 |
if (map_data) map_data->offset += ret; |
86db1e297
|
149 |
} |
f18573abc
|
150 151 |
if (!bio_flagged(bio, BIO_USER_MAPPED)) rq->cmd_flags |= REQ_COPY_USER; |
40b01b9bb
|
152 |
|
731ec497e
|
153 |
rq->buffer = NULL; |
86db1e297
|
154 155 156 |
return 0; unmap_rq: blk_rq_unmap_user(bio); |
84e9e03c5
|
157 |
rq->bio = NULL; |
86db1e297
|
158 159 |
return ret; } |
86db1e297
|
160 161 162 |
EXPORT_SYMBOL(blk_rq_map_user); /** |
710027a48
|
163 |
* blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage |
86db1e297
|
164 165 |
* @q: request queue where request should be inserted * @rq: request to map data to |
152e283fd
|
166 |
* @map_data: pointer to the rq_map_data holding pages (if necessary) |
86db1e297
|
167 168 169 |
* @iov: pointer to the iovec * @iov_count: number of elements in the iovec * @len: I/O byte count |
a3bce90ed
|
170 |
* @gfp_mask: memory allocation flags |
86db1e297
|
171 172 |
* * Description: |
710027a48
|
173 |
* Data will be mapped directly for zero copy I/O, if possible. Otherwise |
86db1e297
|
174 175 |
* a kernel bounce buffer is used. * |
710027a48
|
176 |
* A matching blk_rq_unmap_user() must be issued at the end of I/O, while |
86db1e297
|
177 178 179 180 181 182 183 184 185 |
* still in process context. * * Note: The mapped bio may need to be bounced through blk_queue_bounce() * before being submitted to the device, as pages mapped may be out of * reach. It's the callers responsibility to make sure this happens. The * original bio must be passed back in to blk_rq_unmap_user() for proper * unmapping. */ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, |
152e283fd
|
186 187 |
struct rq_map_data *map_data, struct sg_iovec *iov, int iov_count, unsigned int len, gfp_t gfp_mask) |
86db1e297
|
188 189 |
{ struct bio *bio; |
afdc1a780
|
190 191 |
int i, read = rq_data_dir(rq) == READ; int unaligned = 0; |
86db1e297
|
192 193 194 |
if (!iov || iov_count <= 0) return -EINVAL; |
afdc1a780
|
195 196 |
for (i = 0; i < iov_count; i++) { unsigned long uaddr = (unsigned long)iov[i].iov_base; |
547875561
|
197 198 |
if (!iov[i].iov_len) return -EINVAL; |
afdc1a780
|
199 200 201 202 203 |
if (uaddr & queue_dma_alignment(q)) { unaligned = 1; break; } } |
152e283fd
|
204 205 206 |
if (unaligned || (q->dma_pad_mask & len) || map_data) bio = bio_copy_user_iov(q, map_data, iov, iov_count, read, gfp_mask); |
afdc1a780
|
207 |
else |
a3bce90ed
|
208 |
bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask); |
afdc1a780
|
209 |
|
86db1e297
|
210 211 212 213 |
if (IS_ERR(bio)) return PTR_ERR(bio); if (bio->bi_size != len) { |
c26156b25
|
214 215 216 217 218 219 |
/* * Grab an extra reference to this bio, as bio_unmap_user() * expects to be able to drop it twice as it happens on the * normal IO completion path */ bio_get(bio); |
86db1e297
|
220 |
bio_endio(bio, 0); |
53cc0b294
|
221 |
__blk_rq_unmap_user(bio); |
86db1e297
|
222 223 |
return -EINVAL; } |
f18573abc
|
224 225 |
if (!bio_flagged(bio, BIO_USER_MAPPED)) rq->cmd_flags |= REQ_COPY_USER; |
07359fc61
|
226 |
blk_queue_bounce(q, &bio); |
86db1e297
|
227 228 |
bio_get(bio); blk_rq_bio_prep(q, rq, bio); |
731ec497e
|
229 |
rq->buffer = NULL; |
86db1e297
|
230 231 |
return 0; } |
152e283fd
|
232 |
EXPORT_SYMBOL(blk_rq_map_user_iov); |
86db1e297
|
233 234 235 236 237 238 239 240 |
/** * blk_rq_unmap_user - unmap a request with user data * @bio: start of bio list * * Description: * Unmap a rq previously mapped by blk_rq_map_user(). The caller must * supply the original rq->bio from the blk_rq_map_user() return, since |
710027a48
|
241 |
* the I/O completion may have changed rq->bio. |
86db1e297
|
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 |
*/ int blk_rq_unmap_user(struct bio *bio) { struct bio *mapped_bio; int ret = 0, ret2; while (bio) { mapped_bio = bio; if (unlikely(bio_flagged(bio, BIO_BOUNCED))) mapped_bio = bio->bi_private; ret2 = __blk_rq_unmap_user(mapped_bio); if (ret2 && !ret) ret = ret2; mapped_bio = bio; bio = bio->bi_next; bio_put(mapped_bio); } return ret; } |
86db1e297
|
264 265 266 |
EXPORT_SYMBOL(blk_rq_unmap_user); /** |
710027a48
|
267 |
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage |
86db1e297
|
268 269 270 271 272 |
* @q: request queue where request should be inserted * @rq: request to fill * @kbuf: the kernel buffer * @len: length of user data * @gfp_mask: memory allocation flags |
68154e90c
|
273 274 275 |
* * Description: * Data will be mapped directly if possible. Otherwise a bounce |
3a5a39276
|
276 277 |
* buffer is used. Can be called multple times to append multple * buffers. |
86db1e297
|
278 279 280 281 |
*/ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, unsigned int len, gfp_t gfp_mask) { |
68154e90c
|
282 |
int reading = rq_data_dir(rq) == READ; |
144177991
|
283 |
unsigned long addr = (unsigned long) kbuf; |
68154e90c
|
284 |
int do_copy = 0; |
86db1e297
|
285 |
struct bio *bio; |
3a5a39276
|
286 |
int ret; |
86db1e297
|
287 |
|
ae03bf639
|
288 |
if (len > (queue_max_hw_sectors(q) << 9)) |
86db1e297
|
289 290 291 |
return -EINVAL; if (!len || !kbuf) return -EINVAL; |
144177991
|
292 |
do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); |
68154e90c
|
293 294 295 296 |
if (do_copy) bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); else bio = bio_map_kern(q, kbuf, len, gfp_mask); |
86db1e297
|
297 298 299 300 |
if (IS_ERR(bio)) return PTR_ERR(bio); if (rq_data_dir(rq) == WRITE) |
a45dc2d2b
|
301 |
bio->bi_rw |= REQ_WRITE; |
86db1e297
|
302 |
|
68154e90c
|
303 304 |
if (do_copy) rq->cmd_flags |= REQ_COPY_USER; |
3a5a39276
|
305 306 307 308 309 310 |
ret = blk_rq_append_bio(q, rq, bio); if (unlikely(ret)) { /* request is too big */ bio_put(bio); return ret; } |
86db1e297
|
311 |
blk_queue_bounce(q, &rq->bio); |
731ec497e
|
312 |
rq->buffer = NULL; |
86db1e297
|
313 314 |
return 0; } |
86db1e297
|
315 |
EXPORT_SYMBOL(blk_rq_map_kern); |