Commit 68154e90c9d1492d570671ae181d9a8f8530da55
Committed by
Jens Axboe
1 parent
657e93be35
Exists in
master
and in
4 other branches
block: add dma alignment and padding support to blk_rq_map_kern
This patch adds bio_copy_kern similar to bio_copy_user. blk_rq_map_kern uses bio_copy_kern instead of bio_map_kern if necessary. bio_copy_kern uses temporary pages and the bi_end_io callback frees these pages. bio_copy_kern saves the original kernel buffer at bio->bi_private it doesn't use something like struct bio_map_data to store the information about the caller. Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Tejun Heo <htejun@gmail.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Showing 3 changed files with 112 additions and 1 deletions Side-by-side Diff
block/blk-map.c
... | ... | @@ -255,10 +255,18 @@ |
255 | 255 | * @kbuf: the kernel buffer |
256 | 256 | * @len: length of user data |
257 | 257 | * @gfp_mask: memory allocation flags |
258 | + * | |
259 | + * Description: | |
260 | + * Data will be mapped directly if possible. Otherwise a bounce | |
261 | + * buffer is used. | |
258 | 262 | */ |
259 | 263 | int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, |
260 | 264 | unsigned int len, gfp_t gfp_mask) |
261 | 265 | { |
266 | + unsigned long kaddr; | |
267 | + unsigned int alignment; | |
268 | + int reading = rq_data_dir(rq) == READ; | |
269 | + int do_copy = 0; | |
262 | 270 | struct bio *bio; |
263 | 271 | |
264 | 272 | if (len > (q->max_hw_sectors << 9)) |
265 | 273 | |
... | ... | @@ -266,12 +274,23 @@ |
266 | 274 | if (!len || !kbuf) |
267 | 275 | return -EINVAL; |
268 | 276 | |
269 | - bio = bio_map_kern(q, kbuf, len, gfp_mask); | |
277 | + kaddr = (unsigned long)kbuf; | |
278 | + alignment = queue_dma_alignment(q) | q->dma_pad_mask; | |
279 | + do_copy = ((kaddr & alignment) || (len & alignment)); | |
280 | + | |
281 | + if (do_copy) | |
282 | + bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); | |
283 | + else | |
284 | + bio = bio_map_kern(q, kbuf, len, gfp_mask); | |
285 | + | |
270 | 286 | if (IS_ERR(bio)) |
271 | 287 | return PTR_ERR(bio); |
272 | 288 | |
273 | 289 | if (rq_data_dir(rq) == WRITE) |
274 | 290 | bio->bi_rw |= (1 << BIO_RW); |
291 | + | |
292 | + if (do_copy) | |
293 | + rq->cmd_flags |= REQ_COPY_USER; | |
275 | 294 | |
276 | 295 | blk_rq_bio_prep(q, rq, bio); |
277 | 296 | blk_queue_bounce(q, &rq->bio); |
fs/bio.c
... | ... | @@ -937,6 +937,95 @@ |
937 | 937 | return ERR_PTR(-EINVAL); |
938 | 938 | } |
939 | 939 | |
940 | +static void bio_copy_kern_endio(struct bio *bio, int err) | |
941 | +{ | |
942 | + struct bio_vec *bvec; | |
943 | + const int read = bio_data_dir(bio) == READ; | |
944 | + char *p = bio->bi_private; | |
945 | + int i; | |
946 | + | |
947 | + __bio_for_each_segment(bvec, bio, i, 0) { | |
948 | + char *addr = page_address(bvec->bv_page); | |
949 | + | |
950 | + if (read && !err) | |
951 | + memcpy(p, addr, bvec->bv_len); | |
952 | + | |
953 | + __free_page(bvec->bv_page); | |
954 | + p += bvec->bv_len; | |
955 | + } | |
956 | + | |
957 | + bio_put(bio); | |
958 | +} | |
959 | + | |
960 | +/** | |
961 | + * bio_copy_kern - copy kernel address into bio | |
962 | + * @q: the struct request_queue for the bio | |
963 | + * @data: pointer to buffer to copy | |
964 | + * @len: length in bytes | |
965 | + * @gfp_mask: allocation flags for bio and page allocation | |
966 | + * | |
967 | + * copy the kernel address into a bio suitable for io to a block | |
968 | + * device. Returns an error pointer in case of error. | |
969 | + */ | |
970 | +struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, | |
971 | + gfp_t gfp_mask, int reading) | |
972 | +{ | |
973 | + unsigned long kaddr = (unsigned long)data; | |
974 | + unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
975 | + unsigned long start = kaddr >> PAGE_SHIFT; | |
976 | + const int nr_pages = end - start; | |
977 | + struct bio *bio; | |
978 | + struct bio_vec *bvec; | |
979 | + int i, ret; | |
980 | + | |
981 | + bio = bio_alloc(gfp_mask, nr_pages); | |
982 | + if (!bio) | |
983 | + return ERR_PTR(-ENOMEM); | |
984 | + | |
985 | + while (len) { | |
986 | + struct page *page; | |
987 | + unsigned int bytes = PAGE_SIZE; | |
988 | + | |
989 | + if (bytes > len) | |
990 | + bytes = len; | |
991 | + | |
992 | + page = alloc_page(q->bounce_gfp | gfp_mask); | |
993 | + if (!page) { | |
994 | + ret = -ENOMEM; | |
995 | + goto cleanup; | |
996 | + } | |
997 | + | |
998 | + if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) { | |
999 | + ret = -EINVAL; | |
1000 | + goto cleanup; | |
1001 | + } | |
1002 | + | |
1003 | + len -= bytes; | |
1004 | + } | |
1005 | + | |
1006 | + if (!reading) { | |
1007 | + void *p = data; | |
1008 | + | |
1009 | + bio_for_each_segment(bvec, bio, i) { | |
1010 | + char *addr = page_address(bvec->bv_page); | |
1011 | + | |
1012 | + memcpy(addr, p, bvec->bv_len); | |
1013 | + p += bvec->bv_len; | |
1014 | + } | |
1015 | + } | |
1016 | + | |
1017 | + bio->bi_private = data; | |
1018 | + bio->bi_end_io = bio_copy_kern_endio; | |
1019 | + return bio; | |
1020 | +cleanup: | |
1021 | + bio_for_each_segment(bvec, bio, i) | |
1022 | + __free_page(bvec->bv_page); | |
1023 | + | |
1024 | + bio_put(bio); | |
1025 | + | |
1026 | + return ERR_PTR(ret); | |
1027 | +} | |
1028 | + | |
940 | 1029 | /* |
941 | 1030 | * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions |
942 | 1031 | * for performing direct-IO in BIOs. |
... | ... | @@ -1273,6 +1362,7 @@ |
1273 | 1362 | EXPORT_SYMBOL(bio_map_user); |
1274 | 1363 | EXPORT_SYMBOL(bio_unmap_user); |
1275 | 1364 | EXPORT_SYMBOL(bio_map_kern); |
1365 | +EXPORT_SYMBOL(bio_copy_kern); | |
1276 | 1366 | EXPORT_SYMBOL(bio_pair_release); |
1277 | 1367 | EXPORT_SYMBOL(bio_split); |
1278 | 1368 | EXPORT_SYMBOL(bio_split_pool); |
include/linux/bio.h
... | ... | @@ -324,6 +324,8 @@ |
324 | 324 | extern void bio_unmap_user(struct bio *); |
325 | 325 | extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, |
326 | 326 | gfp_t); |
327 | +extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, | |
328 | + gfp_t, int); | |
327 | 329 | extern void bio_set_pages_dirty(struct bio *bio); |
328 | 330 | extern void bio_check_pages_dirty(struct bio *bio); |
329 | 331 | extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int); |