Blame view
block/blk-map.c
6.19 KB
b24413180
|
1 |
// SPDX-License-Identifier: GPL-2.0 |
86db1e297
|
2 3 4 5 |
/* * Functions related to mapping data to requests */ #include <linux/kernel.h> |
68db0cf10
|
6 |
#include <linux/sched/task_stack.h> |
86db1e297
|
7 8 9 |
#include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> |
26e49cfc7
|
10 |
#include <linux/uio.h> |
86db1e297
|
11 12 |
#include "blk.h" |
98d61d5b1
|
13 |
/* |
0abc2a103
|
14 15 |
* Append a bio to a passthrough request. Only works if the bio can be merged * into the request based on the driver constraints. |
98d61d5b1
|
16 |
*/ |
0abc2a103
|
17 |
int blk_rq_append_bio(struct request *rq, struct bio **bio) |
86db1e297
|
18 |
{ |
0abc2a103
|
19 |
struct bio *orig_bio = *bio; |
14ccb66b3
|
20 21 22 |
struct bvec_iter iter; struct bio_vec bv; unsigned int nr_segs = 0; |
0abc2a103
|
23 24 |
blk_queue_bounce(rq->q, bio); |
caa4b0247
|
25 |
|
14ccb66b3
|
26 27 |
bio_for_each_bvec(bv, *bio, iter) nr_segs++; |
98d61d5b1
|
28 |
if (!rq->bio) { |
14ccb66b3
|
29 |
blk_rq_bio_prep(rq, *bio, nr_segs); |
98d61d5b1
|
30 |
} else { |
14ccb66b3
|
31 |
if (!ll_back_merge_fn(rq, *bio, nr_segs)) { |
0abc2a103
|
32 33 34 35 |
if (orig_bio != *bio) { bio_put(*bio); *bio = orig_bio; } |
98d61d5b1
|
36 |
return -EINVAL; |
0abc2a103
|
37 |
} |
98d61d5b1
|
38 |
|
0abc2a103
|
39 40 41 |
rq->biotail->bi_next = *bio; rq->biotail = *bio; rq->__data_len += (*bio)->bi_iter.bi_size; |
86db1e297
|
42 |
} |
98d61d5b1
|
43 |
|
86db1e297
|
44 45 |
return 0; } |
98d61d5b1
|
46 |
EXPORT_SYMBOL(blk_rq_append_bio); |
86db1e297
|
47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
static int __blk_rq_unmap_user(struct bio *bio) { int ret = 0; if (bio) { if (bio_flagged(bio, BIO_USER_MAPPED)) bio_unmap_user(bio); else ret = bio_uncopy_user(bio); } return ret; } |
4d6af73d9
|
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
static int __blk_rq_map_user_iov(struct request *rq, struct rq_map_data *map_data, struct iov_iter *iter, gfp_t gfp_mask, bool copy) { struct request_queue *q = rq->q; struct bio *bio, *orig_bio; int ret; if (copy) bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); else bio = bio_map_user_iov(q, iter, gfp_mask); if (IS_ERR(bio)) return PTR_ERR(bio); |
aebf526b5
|
76 77 |
bio->bi_opf &= ~REQ_OP_MASK; bio->bi_opf |= req_op(rq); |
4d6af73d9
|
78 |
orig_bio = bio; |
4d6af73d9
|
79 80 81 82 83 |
/* * We link the bounce buffer in and could have to traverse it * later so we have to get a ref to prevent it from being freed */ |
0abc2a103
|
84 |
ret = blk_rq_append_bio(rq, &bio); |
4d6af73d9
|
85 |
if (ret) { |
4d6af73d9
|
86 |
__blk_rq_unmap_user(orig_bio); |
4d6af73d9
|
87 88 |
return ret; } |
0abc2a103
|
89 |
bio_get(bio); |
4d6af73d9
|
90 91 92 |
return 0; } |
86db1e297
|
93 |
/** |
aebf526b5
|
94 |
* blk_rq_map_user_iov - map user data to a request, for passthrough requests |
86db1e297
|
95 96 |
* @q: request queue where request should be inserted * @rq: request to map data to |
152e283fd
|
97 |
* @map_data: pointer to the rq_map_data holding pages (if necessary) |
26e49cfc7
|
98 |
* @iter: iovec iterator |
a3bce90ed
|
99 |
* @gfp_mask: memory allocation flags |
86db1e297
|
100 101 |
* * Description: |
710027a48
|
102 |
* Data will be mapped directly for zero copy I/O, if possible. Otherwise |
86db1e297
|
103 104 |
* a kernel bounce buffer is used. * |
710027a48
|
105 |
* A matching blk_rq_unmap_user() must be issued at the end of I/O, while |
86db1e297
|
106 107 108 109 110 111 112 113 114 |
* still in process context. * * Note: The mapped bio may need to be bounced through blk_queue_bounce() * before being submitted to the device, as pages mapped may be out of * reach. It's the callers responsibility to make sure this happens. The * original bio must be passed back in to blk_rq_unmap_user() for proper * unmapping. */ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, |
26e49cfc7
|
115 116 |
struct rq_map_data *map_data, const struct iov_iter *iter, gfp_t gfp_mask) |
86db1e297
|
117 |
{ |
357f435d8
|
118 119 |
bool copy = false; unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); |
4d6af73d9
|
120 121 |
struct bio *bio = NULL; struct iov_iter i; |
69e0927b3
|
122 |
int ret = -EINVAL; |
86db1e297
|
123 |
|
a0ac402cf
|
124 125 |
if (!iter_is_iovec(iter)) goto fail; |
357f435d8
|
126 127 128 129 130 131 |
if (map_data) copy = true; else if (iov_iter_alignment(iter) & align) copy = true; else if (queue_virt_boundary(q)) copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); |
afdc1a780
|
132 |
|
4d6af73d9
|
133 134 135 136 137 138 139 140 |
i = *iter; do { ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); if (ret) goto unmap_rq; if (!bio) bio = rq->bio; } while (iov_iter_count(&i)); |
86db1e297
|
141 |
|
f18573abc
|
142 |
if (!bio_flagged(bio, BIO_USER_MAPPED)) |
e80640213
|
143 |
rq->rq_flags |= RQF_COPY_USER; |
86db1e297
|
144 |
return 0; |
4d6af73d9
|
145 146 |
unmap_rq: |
3a1cba876
|
147 |
blk_rq_unmap_user(bio); |
a0ac402cf
|
148 |
fail: |
4d6af73d9
|
149 |
rq->bio = NULL; |
69e0927b3
|
150 |
return ret; |
86db1e297
|
151 |
} |
152e283fd
|
152 |
EXPORT_SYMBOL(blk_rq_map_user_iov); |
86db1e297
|
153 |
|
ddad8dd0a
|
154 155 156 157 |
int blk_rq_map_user(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, void __user *ubuf, unsigned long len, gfp_t gfp_mask) { |
26e49cfc7
|
158 159 |
struct iovec iov; struct iov_iter i; |
8f7e885a4
|
160 |
int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); |
ddad8dd0a
|
161 |
|
8f7e885a4
|
162 163 |
if (unlikely(ret < 0)) return ret; |
ddad8dd0a
|
164 |
|
26e49cfc7
|
165 |
return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); |
ddad8dd0a
|
166 167 |
} EXPORT_SYMBOL(blk_rq_map_user); |
86db1e297
|
168 169 170 171 172 173 174 |
/** * blk_rq_unmap_user - unmap a request with user data * @bio: start of bio list * * Description: * Unmap a rq previously mapped by blk_rq_map_user(). The caller must * supply the original rq->bio from the blk_rq_map_user() return, since |
710027a48
|
175 |
* the I/O completion may have changed rq->bio. |
86db1e297
|
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 |
*/ int blk_rq_unmap_user(struct bio *bio) { struct bio *mapped_bio; int ret = 0, ret2; while (bio) { mapped_bio = bio; if (unlikely(bio_flagged(bio, BIO_BOUNCED))) mapped_bio = bio->bi_private; ret2 = __blk_rq_unmap_user(mapped_bio); if (ret2 && !ret) ret = ret2; mapped_bio = bio; bio = bio->bi_next; bio_put(mapped_bio); } return ret; } |
86db1e297
|
198 |
EXPORT_SYMBOL(blk_rq_unmap_user); |
789755d1a
|
199 200 201 202 203 |
#ifdef CONFIG_AHCI_IMX extern void *sg_io_buffer_hack; #else #define sg_io_buffer_hack NULL #endif |
86db1e297
|
204 |
/** |
aebf526b5
|
205 |
* blk_rq_map_kern - map kernel data to a request, for passthrough requests |
86db1e297
|
206 207 208 209 210 |
* @q: request queue where request should be inserted * @rq: request to fill * @kbuf: the kernel buffer * @len: length of user data * @gfp_mask: memory allocation flags |
68154e90c
|
211 212 213 |
* * Description: * Data will be mapped directly if possible. Otherwise a bounce |
e227867f1
|
214 |
* buffer is used. Can be called multiple times to append multiple |
3a5a39276
|
215 |
* buffers. |
86db1e297
|
216 217 218 219 |
*/ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, unsigned int len, gfp_t gfp_mask) { |
68154e90c
|
220 |
int reading = rq_data_dir(rq) == READ; |
144177991
|
221 |
unsigned long addr = (unsigned long) kbuf; |
68154e90c
|
222 |
int do_copy = 0; |
0abc2a103
|
223 |
struct bio *bio, *orig_bio; |
3a5a39276
|
224 |
int ret; |
86db1e297
|
225 |
|
ae03bf639
|
226 |
if (len > (queue_max_hw_sectors(q) << 9)) |
86db1e297
|
227 228 229 |
return -EINVAL; if (!len || !kbuf) return -EINVAL; |
789755d1a
|
230 231 232 233 234 235 236 |
#ifdef CONFIG_AHCI_IMX if (kbuf == sg_io_buffer_hack) do_copy = 0; else #endif do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); |
68154e90c
|
237 238 239 240 |
if (do_copy) bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); else bio = bio_map_kern(q, kbuf, len, gfp_mask); |
86db1e297
|
241 242 |
if (IS_ERR(bio)) return PTR_ERR(bio); |
aebf526b5
|
243 244 |
bio->bi_opf &= ~REQ_OP_MASK; bio->bi_opf |= req_op(rq); |
86db1e297
|
245 |
|
68154e90c
|
246 |
if (do_copy) |
e80640213
|
247 |
rq->rq_flags |= RQF_COPY_USER; |
68154e90c
|
248 |
|
0abc2a103
|
249 250 |
orig_bio = bio; ret = blk_rq_append_bio(rq, &bio); |
3a5a39276
|
251 252 |
if (unlikely(ret)) { /* request is too big */ |
0abc2a103
|
253 |
bio_put(orig_bio); |
3a5a39276
|
254 255 |
return ret; } |
86db1e297
|
256 257 |
return 0; } |
86db1e297
|
258 |
EXPORT_SYMBOL(blk_rq_map_kern); |