Blame view
block/blk-map.c
5.79 KB
86db1e297
|
1 2 3 4 5 6 7 |
/* * Functions related to mapping data to requests */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> |
26e49cfc7
|
8 |
#include <linux/uio.h> |
86db1e297
|
9 10 |
#include "blk.h" |
98d61d5b1
|
11 12 13 14 15 |
/* * Append a bio to a passthrough request. Only works can be merged into * the request based on the driver constraints. */ int blk_rq_append_bio(struct request *rq, struct bio *bio) |
86db1e297
|
16 |
{ |
98d61d5b1
|
17 18 19 20 21 |
if (!rq->bio) { blk_rq_bio_prep(rq->q, rq, bio); } else { if (!ll_back_merge_fn(rq->q, rq, bio)) return -EINVAL; |
86db1e297
|
22 23 |
rq->biotail->bi_next = bio; rq->biotail = bio; |
4f024f379
|
24 |
rq->__data_len += bio->bi_iter.bi_size; |
86db1e297
|
25 |
} |
98d61d5b1
|
26 |
|
86db1e297
|
27 28 |
return 0; } |
98d61d5b1
|
29 |
EXPORT_SYMBOL(blk_rq_append_bio); |
86db1e297
|
30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
static int __blk_rq_unmap_user(struct bio *bio) { int ret = 0; if (bio) { if (bio_flagged(bio, BIO_USER_MAPPED)) bio_unmap_user(bio); else ret = bio_uncopy_user(bio); } return ret; } |
4d6af73d9
|
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
static int __blk_rq_map_user_iov(struct request *rq, struct rq_map_data *map_data, struct iov_iter *iter, gfp_t gfp_mask, bool copy) { struct request_queue *q = rq->q; struct bio *bio, *orig_bio; int ret; if (copy) bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); else bio = bio_map_user_iov(q, iter, gfp_mask); if (IS_ERR(bio)) return PTR_ERR(bio); if (map_data && map_data->null_mapped) bio_set_flag(bio, BIO_NULL_MAPPED); iov_iter_advance(iter, bio->bi_iter.bi_size); if (map_data) map_data->offset += bio->bi_iter.bi_size; orig_bio = bio; blk_queue_bounce(q, &bio); /* * We link the bounce buffer in and could have to traverse it * later so we have to get a ref to prevent it from being freed */ bio_get(bio); |
98d61d5b1
|
75 |
ret = blk_rq_append_bio(rq, bio); |
4d6af73d9
|
76 77 78 79 80 81 82 83 84 |
if (ret) { bio_endio(bio); __blk_rq_unmap_user(orig_bio); bio_put(bio); return ret; } return 0; } |
86db1e297
|
85 |
/** |
710027a48
|
86 |
* blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage |
86db1e297
|
87 88 |
* @q: request queue where request should be inserted * @rq: request to map data to |
152e283fd
|
89 |
* @map_data: pointer to the rq_map_data holding pages (if necessary) |
26e49cfc7
|
90 |
* @iter: iovec iterator |
a3bce90ed
|
91 |
* @gfp_mask: memory allocation flags |
86db1e297
|
92 93 |
* * Description: |
710027a48
|
94 |
* Data will be mapped directly for zero copy I/O, if possible. Otherwise |
86db1e297
|
95 96 |
* a kernel bounce buffer is used. * |
710027a48
|
97 |
* A matching blk_rq_unmap_user() must be issued at the end of I/O, while |
86db1e297
|
98 99 100 101 102 103 104 105 106 |
* still in process context. * * Note: The mapped bio may need to be bounced through blk_queue_bounce() * before being submitted to the device, as pages mapped may be out of * reach. It's the callers responsibility to make sure this happens. The * original bio must be passed back in to blk_rq_unmap_user() for proper * unmapping. */ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, |
26e49cfc7
|
107 108 |
struct rq_map_data *map_data, const struct iov_iter *iter, gfp_t gfp_mask) |
86db1e297
|
109 |
{ |
357f435d8
|
110 111 |
bool copy = false; unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); |
4d6af73d9
|
112 113 114 |
struct bio *bio = NULL; struct iov_iter i; int ret; |
86db1e297
|
115 |
|
357f435d8
|
116 117 118 119 120 121 |
if (map_data) copy = true; else if (iov_iter_alignment(iter) & align) copy = true; else if (queue_virt_boundary(q)) copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); |
afdc1a780
|
122 |
|
4d6af73d9
|
123 124 125 126 127 128 129 130 |
i = *iter; do { ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); if (ret) goto unmap_rq; if (!bio) bio = rq->bio; } while (iov_iter_count(&i)); |
86db1e297
|
131 |
|
f18573abc
|
132 133 |
if (!bio_flagged(bio, BIO_USER_MAPPED)) rq->cmd_flags |= REQ_COPY_USER; |
86db1e297
|
134 |
return 0; |
4d6af73d9
|
135 136 137 138 139 |
unmap_rq: __blk_rq_unmap_user(bio); rq->bio = NULL; return -EINVAL; |
86db1e297
|
140 |
} |
152e283fd
|
141 |
EXPORT_SYMBOL(blk_rq_map_user_iov); |
86db1e297
|
142 |
|
ddad8dd0a
|
143 144 145 146 |
int blk_rq_map_user(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, void __user *ubuf, unsigned long len, gfp_t gfp_mask) { |
26e49cfc7
|
147 148 |
struct iovec iov; struct iov_iter i; |
8f7e885a4
|
149 |
int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); |
ddad8dd0a
|
150 |
|
8f7e885a4
|
151 152 |
if (unlikely(ret < 0)) return ret; |
ddad8dd0a
|
153 |
|
26e49cfc7
|
154 |
return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); |
ddad8dd0a
|
155 156 |
} EXPORT_SYMBOL(blk_rq_map_user); |
86db1e297
|
157 158 159 160 161 162 163 |
/** * blk_rq_unmap_user - unmap a request with user data * @bio: start of bio list * * Description: * Unmap a rq previously mapped by blk_rq_map_user(). The caller must * supply the original rq->bio from the blk_rq_map_user() return, since |
710027a48
|
164 |
* the I/O completion may have changed rq->bio. |
86db1e297
|
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 |
*/ int blk_rq_unmap_user(struct bio *bio) { struct bio *mapped_bio; int ret = 0, ret2; while (bio) { mapped_bio = bio; if (unlikely(bio_flagged(bio, BIO_BOUNCED))) mapped_bio = bio->bi_private; ret2 = __blk_rq_unmap_user(mapped_bio); if (ret2 && !ret) ret = ret2; mapped_bio = bio; bio = bio->bi_next; bio_put(mapped_bio); } return ret; } |
86db1e297
|
187 188 189 |
EXPORT_SYMBOL(blk_rq_unmap_user); /** |
710027a48
|
190 |
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage |
86db1e297
|
191 192 193 194 195 |
* @q: request queue where request should be inserted * @rq: request to fill * @kbuf: the kernel buffer * @len: length of user data * @gfp_mask: memory allocation flags |
68154e90c
|
196 197 198 |
* * Description: * Data will be mapped directly if possible. Otherwise a bounce |
e227867f1
|
199 |
* buffer is used. Can be called multiple times to append multiple |
3a5a39276
|
200 |
* buffers. |
86db1e297
|
201 202 203 204 |
*/ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, unsigned int len, gfp_t gfp_mask) { |
68154e90c
|
205 |
int reading = rq_data_dir(rq) == READ; |
144177991
|
206 |
unsigned long addr = (unsigned long) kbuf; |
68154e90c
|
207 |
int do_copy = 0; |
86db1e297
|
208 |
struct bio *bio; |
3a5a39276
|
209 |
int ret; |
86db1e297
|
210 |
|
ae03bf639
|
211 |
if (len > (queue_max_hw_sectors(q) << 9)) |
86db1e297
|
212 213 214 |
return -EINVAL; if (!len || !kbuf) return -EINVAL; |
144177991
|
215 |
do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); |
68154e90c
|
216 217 218 219 |
if (do_copy) bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); else bio = bio_map_kern(q, kbuf, len, gfp_mask); |
86db1e297
|
220 221 |
if (IS_ERR(bio)) return PTR_ERR(bio); |
609f6ea1c
|
222 |
if (!reading) |
95fe6c1a2
|
223 |
bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
86db1e297
|
224 |
|
68154e90c
|
225 226 |
if (do_copy) rq->cmd_flags |= REQ_COPY_USER; |
98d61d5b1
|
227 |
ret = blk_rq_append_bio(rq, bio); |
3a5a39276
|
228 229 230 231 232 |
if (unlikely(ret)) { /* request is too big */ bio_put(bio); return ret; } |
86db1e297
|
233 |
blk_queue_bounce(q, &rq->bio); |
86db1e297
|
234 235 |
return 0; } |
86db1e297
|
236 |
EXPORT_SYMBOL(blk_rq_map_kern); |