Blame view
block/blk-map.c
8.12 KB
86db1e297 block: continue l... |
1 2 3 4 5 6 7 |
/* * Functions related to mapping data to requests */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> |
afdc1a780 block: add bio_co... |
8 |
#include <scsi/sg.h> /* for struct sg_iovec */ |
86db1e297 block: continue l... |
9 10 11 12 13 14 15 16 17 18 19 20 21 |
#include "blk.h" int blk_rq_append_bio(struct request_queue *q, struct request *rq, struct bio *bio) { if (!rq->bio) blk_rq_bio_prep(q, rq, bio); else if (!ll_back_merge_fn(q, rq, bio)) return -EINVAL; else { rq->biotail->bi_next = bio; rq->biotail = bio; |
a2dec7b36 block: hide reque... |
22 |
rq->__data_len += bio->bi_size; |
86db1e297 block: continue l... |
23 24 25 |
} return 0; } |
86db1e297 block: continue l... |
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
static int __blk_rq_unmap_user(struct bio *bio) { int ret = 0; if (bio) { if (bio_flagged(bio, BIO_USER_MAPPED)) bio_unmap_user(bio); else ret = bio_uncopy_user(bio); } return ret; } static int __blk_rq_map_user(struct request_queue *q, struct request *rq, |
152e283fd block: introduce ... |
42 |
struct rq_map_data *map_data, void __user *ubuf, |
97ae77a1c [SCSI] block: mak... |
43 |
unsigned int len, gfp_t gfp_mask) |
86db1e297 block: continue l... |
44 45 46 47 48 49 50 51 52 53 54 55 |
{ unsigned long uaddr; struct bio *bio, *orig_bio; int reading, ret; reading = rq_data_dir(rq) == READ; /* * if alignment requirement is satisfied, map in user pages for * direct dma. else, set up kernel bounce buffers */ uaddr = (unsigned long) ubuf; |
879040742 block: add blk_rq... |
56 |
if (blk_rq_aligned(q, ubuf, len) && !map_data) |
a3bce90ed block: add gfp_ma... |
57 |
bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask); |
86db1e297 block: continue l... |
58 |
else |
152e283fd block: introduce ... |
59 |
bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask); |
86db1e297 block: continue l... |
60 61 62 |
if (IS_ERR(bio)) return PTR_ERR(bio); |
97ae77a1c [SCSI] block: mak... |
63 |
if (map_data && map_data->null_mapped) |
818827669 block: make blk_r... |
64 |
bio->bi_flags |= (1 << BIO_NULL_MAPPED); |
86db1e297 block: continue l... |
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 |
orig_bio = bio; blk_queue_bounce(q, &bio); /* * We link the bounce buffer in and could have to traverse it * later so we have to get a ref to prevent it from being freed */ bio_get(bio); ret = blk_rq_append_bio(q, rq, bio); if (!ret) return bio->bi_size; /* if it was boucned we must call the end io function */ bio_endio(bio, 0); __blk_rq_unmap_user(orig_bio); bio_put(bio); return ret; } /** |
710027a48 Add some block/ s... |
86 |
* blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage |
86db1e297 block: continue l... |
87 88 |
* @q: request queue where request should be inserted * @rq: request structure to fill |
152e283fd block: introduce ... |
89 |
* @map_data: pointer to the rq_map_data holding pages (if necessary) |
86db1e297 block: continue l... |
90 91 |
* @ubuf: the user buffer * @len: length of user data |
a3bce90ed block: add gfp_ma... |
92 |
* @gfp_mask: memory allocation flags |
86db1e297 block: continue l... |
93 94 |
* * Description: |
710027a48 Add some block/ s... |
95 |
* Data will be mapped directly for zero copy I/O, if possible. Otherwise |
86db1e297 block: continue l... |
96 97 |
* a kernel bounce buffer is used. * |
710027a48 Add some block/ s... |
98 |
* A matching blk_rq_unmap_user() must be issued at the end of I/O, while |
86db1e297 block: continue l... |
99 100 101 102 103 104 105 106 107 |
* still in process context. * * Note: The mapped bio may need to be bounced through blk_queue_bounce() * before being submitted to the device, as pages mapped may be out of * reach. It's the callers responsibility to make sure this happens. The * original bio must be passed back in to blk_rq_unmap_user() for proper * unmapping. */ int blk_rq_map_user(struct request_queue *q, struct request *rq, |
152e283fd block: introduce ... |
108 109 |
struct rq_map_data *map_data, void __user *ubuf, unsigned long len, gfp_t gfp_mask) |
86db1e297 block: continue l... |
110 111 112 |
{ unsigned long bytes_read = 0; struct bio *bio = NULL; |
97ae77a1c [SCSI] block: mak... |
113 |
int ret; |
86db1e297 block: continue l... |
114 |
|
ae03bf639 block: Use access... |
115 |
if (len > (queue_max_hw_sectors(q) << 9)) |
86db1e297 block: continue l... |
116 |
return -EINVAL; |
818827669 block: make blk_r... |
117 |
if (!len) |
86db1e297 block: continue l... |
118 |
return -EINVAL; |
97ae77a1c [SCSI] block: mak... |
119 120 121 |
if (!ubuf && (!map_data || !map_data->null_mapped)) return -EINVAL; |
86db1e297 block: continue l... |
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
while (bytes_read != len) { unsigned long map_len, end, start; map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE); end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1) >> PAGE_SHIFT; start = (unsigned long)ubuf >> PAGE_SHIFT; /* * A bad offset could cause us to require BIO_MAX_PAGES + 1 * pages. If this happens we just lower the requested * mapping len by a page so that we can fit */ if (end - start > BIO_MAX_PAGES) map_len -= PAGE_SIZE; |
152e283fd block: introduce ... |
138 |
ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len, |
97ae77a1c [SCSI] block: mak... |
139 |
gfp_mask); |
86db1e297 block: continue l... |
140 141 142 143 144 145 |
if (ret < 0) goto unmap_rq; if (!bio) bio = rq->bio; bytes_read += ret; ubuf += ret; |
56c451f4b [SCSI] block: fix... |
146 147 148 |
if (map_data) map_data->offset += ret; |
86db1e297 block: continue l... |
149 |
} |
f18573abc block: move the p... |
150 151 |
if (!bio_flagged(bio, BIO_USER_MAPPED)) rq->cmd_flags |= REQ_COPY_USER; |
40b01b9bb block: update bio... |
152 |
|
731ec497e block: kill rq->data |
153 |
rq->buffer = NULL; |
86db1e297 block: continue l... |
154 155 156 |
return 0; unmap_rq: blk_rq_unmap_user(bio); |
84e9e03c5 block: make blk_r... |
157 |
rq->bio = NULL; |
86db1e297 block: continue l... |
158 159 |
return ret; } |
86db1e297 block: continue l... |
160 161 162 |
EXPORT_SYMBOL(blk_rq_map_user); /** |
710027a48 Add some block/ s... |
163 |
* blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage |
86db1e297 block: continue l... |
164 165 |
* @q: request queue where request should be inserted * @rq: request to map data to |
152e283fd block: introduce ... |
166 |
* @map_data: pointer to the rq_map_data holding pages (if necessary) |
86db1e297 block: continue l... |
167 168 169 |
* @iov: pointer to the iovec * @iov_count: number of elements in the iovec * @len: I/O byte count |
a3bce90ed block: add gfp_ma... |
170 |
* @gfp_mask: memory allocation flags |
86db1e297 block: continue l... |
171 172 |
* * Description: |
710027a48 Add some block/ s... |
173 |
* Data will be mapped directly for zero copy I/O, if possible. Otherwise |
86db1e297 block: continue l... |
174 175 |
* a kernel bounce buffer is used. * |
710027a48 Add some block/ s... |
176 |
* A matching blk_rq_unmap_user() must be issued at the end of I/O, while |
86db1e297 block: continue l... |
177 178 179 180 181 182 183 184 185 |
* still in process context. * * Note: The mapped bio may need to be bounced through blk_queue_bounce() * before being submitted to the device, as pages mapped may be out of * reach. It's the callers responsibility to make sure this happens. The * original bio must be passed back in to blk_rq_unmap_user() for proper * unmapping. */ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, |
152e283fd block: introduce ... |
186 187 |
struct rq_map_data *map_data, struct sg_iovec *iov, int iov_count, unsigned int len, gfp_t gfp_mask) |
86db1e297 block: continue l... |
188 189 |
{ struct bio *bio; |
afdc1a780 block: add bio_co... |
190 191 |
int i, read = rq_data_dir(rq) == READ; int unaligned = 0; |
86db1e297 block: continue l... |
192 193 194 |
if (!iov || iov_count <= 0) return -EINVAL; |
afdc1a780 block: add bio_co... |
195 196 197 198 199 200 201 202 |
for (i = 0; i < iov_count; i++) { unsigned long uaddr = (unsigned long)iov[i].iov_base; if (uaddr & queue_dma_alignment(q)) { unaligned = 1; break; } } |
152e283fd block: introduce ... |
203 204 205 |
if (unaligned || (q->dma_pad_mask & len) || map_data) bio = bio_copy_user_iov(q, map_data, iov, iov_count, read, gfp_mask); |
afdc1a780 block: add bio_co... |
206 |
else |
a3bce90ed block: add gfp_ma... |
207 |
bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask); |
afdc1a780 block: add bio_co... |
208 |
|
86db1e297 block: continue l... |
209 210 211 212 |
if (IS_ERR(bio)) return PTR_ERR(bio); if (bio->bi_size != len) { |
c26156b25 block: hold extra... |
213 214 215 216 217 218 |
/* * Grab an extra reference to this bio, as bio_unmap_user() * expects to be able to drop it twice as it happens on the * normal IO completion path */ bio_get(bio); |
86db1e297 block: continue l... |
219 |
bio_endio(bio, 0); |
53cc0b294 When block layer ... |
220 |
__blk_rq_unmap_user(bio); |
86db1e297 block: continue l... |
221 222 |
return -EINVAL; } |
f18573abc block: move the p... |
223 224 |
if (!bio_flagged(bio, BIO_USER_MAPPED)) rq->cmd_flags |= REQ_COPY_USER; |
07359fc61 block: add bounce... |
225 |
blk_queue_bounce(q, &bio); |
86db1e297 block: continue l... |
226 227 |
bio_get(bio); blk_rq_bio_prep(q, rq, bio); |
731ec497e block: kill rq->data |
228 |
rq->buffer = NULL; |
86db1e297 block: continue l... |
229 230 |
return 0; } |
152e283fd block: introduce ... |
231 |
EXPORT_SYMBOL(blk_rq_map_user_iov); |
86db1e297 block: continue l... |
232 233 234 235 236 237 238 239 |
/** * blk_rq_unmap_user - unmap a request with user data * @bio: start of bio list * * Description: * Unmap a rq previously mapped by blk_rq_map_user(). The caller must * supply the original rq->bio from the blk_rq_map_user() return, since |
710027a48 Add some block/ s... |
240 |
* the I/O completion may have changed rq->bio. |
86db1e297 block: continue l... |
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 |
*/ int blk_rq_unmap_user(struct bio *bio) { struct bio *mapped_bio; int ret = 0, ret2; while (bio) { mapped_bio = bio; if (unlikely(bio_flagged(bio, BIO_BOUNCED))) mapped_bio = bio->bi_private; ret2 = __blk_rq_unmap_user(mapped_bio); if (ret2 && !ret) ret = ret2; mapped_bio = bio; bio = bio->bi_next; bio_put(mapped_bio); } return ret; } |
86db1e297 block: continue l... |
263 264 265 |
EXPORT_SYMBOL(blk_rq_unmap_user); /** |
710027a48 Add some block/ s... |
266 |
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage |
86db1e297 block: continue l... |
267 268 269 270 271 |
* @q: request queue where request should be inserted * @rq: request to fill * @kbuf: the kernel buffer * @len: length of user data * @gfp_mask: memory allocation flags |
68154e90c block: add dma al... |
272 273 274 |
* * Description: * Data will be mapped directly if possible. Otherwise a bounce |
3a5a39276 block: allow blk_... |
275 276 |
* buffer is used. Can be called multple times to append multple * buffers. |
86db1e297 block: continue l... |
277 278 279 280 |
*/ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, unsigned int len, gfp_t gfp_mask) { |
68154e90c block: add dma al... |
281 282 |
int reading = rq_data_dir(rq) == READ; int do_copy = 0; |
86db1e297 block: continue l... |
283 |
struct bio *bio; |
3a5a39276 block: allow blk_... |
284 |
int ret; |
86db1e297 block: continue l... |
285 |
|
ae03bf639 block: Use access... |
286 |
if (len > (queue_max_hw_sectors(q) << 9)) |
86db1e297 block: continue l... |
287 288 289 |
return -EINVAL; if (!len || !kbuf) return -EINVAL; |
879040742 block: add blk_rq... |
290 |
do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf); |
68154e90c block: add dma al... |
291 292 293 294 |
if (do_copy) bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); else bio = bio_map_kern(q, kbuf, len, gfp_mask); |
86db1e297 block: continue l... |
295 296 297 298 299 |
if (IS_ERR(bio)) return PTR_ERR(bio); if (rq_data_dir(rq) == WRITE) bio->bi_rw |= (1 << BIO_RW); |
68154e90c block: add dma al... |
300 301 |
if (do_copy) rq->cmd_flags |= REQ_COPY_USER; |
3a5a39276 block: allow blk_... |
302 303 304 305 306 307 |
ret = blk_rq_append_bio(q, rq, bio); if (unlikely(ret)) { /* request is too big */ bio_put(bio); return ret; } |
86db1e297 block: continue l... |
308 |
blk_queue_bounce(q, &rq->bio); |
731ec497e block: kill rq->data |
309 |
rq->buffer = NULL; |
86db1e297 block: continue l... |
310 311 |
return 0; } |
86db1e297 block: continue l... |
312 |
EXPORT_SYMBOL(blk_rq_map_kern); |