Blame view

block/blk-map.c 6.32 KB
86db1e297   Jens Axboe   block: continue l...
1
2
3
4
5
6
7
  /*
   * Functions related to mapping data to requests
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
26e49cfc7   Kent Overstreet   block: pass iov_i...
8
  #include <linux/uio.h>
86db1e297   Jens Axboe   block: continue l...
9
10
  
  #include "blk.h"
46348456c   Sagi Grimberg   block: Copy a use...
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
  static bool iovec_gap_to_prv(struct request_queue *q,
  			     struct iovec *prv, struct iovec *cur)
  {
  	unsigned long prev_end;
  
  	if (!queue_virt_boundary(q))
  		return false;
  
  	if (prv->iov_base == NULL && prv->iov_len == 0)
  		/* prv is not set - don't check */
  		return false;
  
  	prev_end = (unsigned long)(prv->iov_base + prv->iov_len);
  
  	return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) ||
  		prev_end & queue_virt_boundary(q));
  }
86db1e297   Jens Axboe   block: continue l...
28
29
30
31
32
33
34
35
36
37
  int blk_rq_append_bio(struct request_queue *q, struct request *rq,
  		      struct bio *bio)
  {
  	if (!rq->bio)
  		blk_rq_bio_prep(q, rq, bio);
  	else if (!ll_back_merge_fn(q, rq, bio))
  		return -EINVAL;
  	else {
  		rq->biotail->bi_next = bio;
  		rq->biotail = bio;
4f024f379   Kent Overstreet   block: Abstract o...
38
  		rq->__data_len += bio->bi_iter.bi_size;
86db1e297   Jens Axboe   block: continue l...
39
40
41
  	}
  	return 0;
  }
86db1e297   Jens Axboe   block: continue l...
42
43
44
45
46
47
48
49
50
51
52
53
54
55
  
  static int __blk_rq_unmap_user(struct bio *bio)
  {
  	int ret = 0;
  
  	if (bio) {
  		if (bio_flagged(bio, BIO_USER_MAPPED))
  			bio_unmap_user(bio);
  		else
  			ret = bio_uncopy_user(bio);
  	}
  
  	return ret;
  }
4d6af73d9   Christoph Hellwig   block: support la...
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
  static int __blk_rq_map_user_iov(struct request *rq,
  		struct rq_map_data *map_data, struct iov_iter *iter,
  		gfp_t gfp_mask, bool copy)
  {
  	struct request_queue *q = rq->q;
  	struct bio *bio, *orig_bio;
  	int ret;
  
  	if (copy)
  		bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
  	else
  		bio = bio_map_user_iov(q, iter, gfp_mask);
  
  	if (IS_ERR(bio))
  		return PTR_ERR(bio);
  
  	if (map_data && map_data->null_mapped)
  		bio_set_flag(bio, BIO_NULL_MAPPED);
  
  	iov_iter_advance(iter, bio->bi_iter.bi_size);
  	if (map_data)
  		map_data->offset += bio->bi_iter.bi_size;
  
  	orig_bio = bio;
  	blk_queue_bounce(q, &bio);
  
  	/*
  	 * We link the bounce buffer in and could have to traverse it
  	 * later so we have to get a ref to prevent it from being freed
  	 */
  	bio_get(bio);
  
  	ret = blk_rq_append_bio(q, rq, bio);
  	if (ret) {
  		bio_endio(bio);
  		__blk_rq_unmap_user(orig_bio);
  		bio_put(bio);
  		return ret;
  	}
  
  	return 0;
  }
86db1e297   Jens Axboe   block: continue l...
98
  /**
710027a48   Randy Dunlap   Add some block/ s...
99
   * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
86db1e297   Jens Axboe   block: continue l...
100
101
   * @q:		request queue where request should be inserted
   * @rq:		request to map data to
152e283fd   FUJITA Tomonori   block: introduce ...
102
   * @map_data:   pointer to the rq_map_data holding pages (if necessary)
26e49cfc7   Kent Overstreet   block: pass iov_i...
103
   * @iter:	iovec iterator
a3bce90ed   FUJITA Tomonori   block: add gfp_ma...
104
   * @gfp_mask:	memory allocation flags
86db1e297   Jens Axboe   block: continue l...
105
106
   *
   * Description:
710027a48   Randy Dunlap   Add some block/ s...
107
   *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
86db1e297   Jens Axboe   block: continue l...
108
109
   *    a kernel bounce buffer is used.
   *
710027a48   Randy Dunlap   Add some block/ s...
110
   *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
86db1e297   Jens Axboe   block: continue l...
111
112
113
114
115
116
117
118
119
   *    still in process context.
   *
   *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
   *    before being submitted to the device, as pages mapped may be out of
   *    reach. It's the callers responsibility to make sure this happens. The
   *    original bio must be passed back in to blk_rq_unmap_user() for proper
   *    unmapping.
   */
  int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
26e49cfc7   Kent Overstreet   block: pass iov_i...
120
121
  			struct rq_map_data *map_data,
  			const struct iov_iter *iter, gfp_t gfp_mask)
86db1e297   Jens Axboe   block: continue l...
122
  {
46348456c   Sagi Grimberg   block: Copy a use...
123
  	struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
4d6af73d9   Christoph Hellwig   block: support la...
124
125
126
127
  	bool copy = (q->dma_pad_mask & iter->count) || map_data;
  	struct bio *bio = NULL;
  	struct iov_iter i;
  	int ret;
86db1e297   Jens Axboe   block: continue l...
128

26e49cfc7   Kent Overstreet   block: pass iov_i...
129
  	if (!iter || !iter->count)
86db1e297   Jens Axboe   block: continue l...
130
  		return -EINVAL;
26e49cfc7   Kent Overstreet   block: pass iov_i...
131
132
  	iov_for_each(iov, i, *iter) {
  		unsigned long uaddr = (unsigned long) iov.iov_base;
afdc1a780   FUJITA Tomonori   block: add bio_co...
133

26e49cfc7   Kent Overstreet   block: pass iov_i...
134
  		if (!iov.iov_len)
547875561   Xiaotian Feng   block: check for ...
135
  			return -EINVAL;
6b76106d8   Ben Hutchings   block: Always che...
136
137
138
  		/*
  		 * Keep going so we check length of all segments
  		 */
46348456c   Sagi Grimberg   block: Copy a use...
139
140
  		if ((uaddr & queue_dma_alignment(q)) ||
  		    iovec_gap_to_prv(q, &prv, &iov))
4d6af73d9   Christoph Hellwig   block: support la...
141
  			copy = true;
46348456c   Sagi Grimberg   block: Copy a use...
142
143
144
  
  		prv.iov_base = iov.iov_base;
  		prv.iov_len = iov.iov_len;
afdc1a780   FUJITA Tomonori   block: add bio_co...
145
  	}
4d6af73d9   Christoph Hellwig   block: support la...
146
147
148
149
150
151
152
153
  	i = *iter;
  	do {
  		ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
  		if (ret)
  			goto unmap_rq;
  		if (!bio)
  			bio = rq->bio;
  	} while (iov_iter_count(&i));
86db1e297   Jens Axboe   block: continue l...
154

f18573abc   FUJITA Tomonori   block: move the p...
155
156
  	if (!bio_flagged(bio, BIO_USER_MAPPED))
  		rq->cmd_flags |= REQ_COPY_USER;
86db1e297   Jens Axboe   block: continue l...
157
  	return 0;
4d6af73d9   Christoph Hellwig   block: support la...
158
159
160
161
162
  
  unmap_rq:
  	__blk_rq_unmap_user(bio);
  	rq->bio = NULL;
  	return -EINVAL;
86db1e297   Jens Axboe   block: continue l...
163
  }
152e283fd   FUJITA Tomonori   block: introduce ...
164
  EXPORT_SYMBOL(blk_rq_map_user_iov);
86db1e297   Jens Axboe   block: continue l...
165

ddad8dd0a   Christoph Hellwig   block: use blk_rq...
166
167
168
169
  int blk_rq_map_user(struct request_queue *q, struct request *rq,
  		    struct rq_map_data *map_data, void __user *ubuf,
  		    unsigned long len, gfp_t gfp_mask)
  {
26e49cfc7   Kent Overstreet   block: pass iov_i...
170
171
  	struct iovec iov;
  	struct iov_iter i;
8f7e885a4   Al Viro   blk_rq_map_user()...
172
  	int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
ddad8dd0a   Christoph Hellwig   block: use blk_rq...
173

8f7e885a4   Al Viro   blk_rq_map_user()...
174
175
  	if (unlikely(ret < 0))
  		return ret;
ddad8dd0a   Christoph Hellwig   block: use blk_rq...
176

26e49cfc7   Kent Overstreet   block: pass iov_i...
177
  	return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
ddad8dd0a   Christoph Hellwig   block: use blk_rq...
178
179
  }
  EXPORT_SYMBOL(blk_rq_map_user);
86db1e297   Jens Axboe   block: continue l...
180
181
182
183
184
185
186
  /**
   * blk_rq_unmap_user - unmap a request with user data
   * @bio:	       start of bio list
   *
   * Description:
   *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
   *    supply the original rq->bio from the blk_rq_map_user() return, since
710027a48   Randy Dunlap   Add some block/ s...
187
   *    the I/O completion may have changed rq->bio.
86db1e297   Jens Axboe   block: continue l...
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
   */
  int blk_rq_unmap_user(struct bio *bio)
  {
  	struct bio *mapped_bio;
  	int ret = 0, ret2;
  
  	while (bio) {
  		mapped_bio = bio;
  		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
  			mapped_bio = bio->bi_private;
  
  		ret2 = __blk_rq_unmap_user(mapped_bio);
  		if (ret2 && !ret)
  			ret = ret2;
  
  		mapped_bio = bio;
  		bio = bio->bi_next;
  		bio_put(mapped_bio);
  	}
  
  	return ret;
  }
86db1e297   Jens Axboe   block: continue l...
210
211
212
  EXPORT_SYMBOL(blk_rq_unmap_user);
  
  /**
710027a48   Randy Dunlap   Add some block/ s...
213
   * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
86db1e297   Jens Axboe   block: continue l...
214
215
216
217
218
   * @q:		request queue where request should be inserted
   * @rq:		request to fill
   * @kbuf:	the kernel buffer
   * @len:	length of user data
   * @gfp_mask:	memory allocation flags
68154e90c   FUJITA Tomonori   block: add dma al...
219
220
221
   *
   * Description:
   *    Data will be mapped directly if possible. Otherwise a bounce
e227867f1   Masanari Iida   treewide: Fix typ...
222
   *    buffer is used. Can be called multiple times to append multiple
3a5a39276   James Bottomley   block: allow blk_...
223
   *    buffers.
86db1e297   Jens Axboe   block: continue l...
224
225
226
227
   */
  int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
  		    unsigned int len, gfp_t gfp_mask)
  {
68154e90c   FUJITA Tomonori   block: add dma al...
228
  	int reading = rq_data_dir(rq) == READ;
144177991   Namhyung Kim   block: fix an add...
229
  	unsigned long addr = (unsigned long) kbuf;
68154e90c   FUJITA Tomonori   block: add dma al...
230
  	int do_copy = 0;
86db1e297   Jens Axboe   block: continue l...
231
  	struct bio *bio;
3a5a39276   James Bottomley   block: allow blk_...
232
  	int ret;
86db1e297   Jens Axboe   block: continue l...
233

ae03bf639   Martin K. Petersen   block: Use access...
234
  	if (len > (queue_max_hw_sectors(q) << 9))
86db1e297   Jens Axboe   block: continue l...
235
236
237
  		return -EINVAL;
  	if (!len || !kbuf)
  		return -EINVAL;
144177991   Namhyung Kim   block: fix an add...
238
  	do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
68154e90c   FUJITA Tomonori   block: add dma al...
239
240
241
242
  	if (do_copy)
  		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
  	else
  		bio = bio_map_kern(q, kbuf, len, gfp_mask);
86db1e297   Jens Axboe   block: continue l...
243
244
  	if (IS_ERR(bio))
  		return PTR_ERR(bio);
609f6ea1c   majianpeng   block: re-use exi...
245
  	if (!reading)
a45dc2d2b   Benny Halevy   block: fix blk_rq...
246
  		bio->bi_rw |= REQ_WRITE;
86db1e297   Jens Axboe   block: continue l...
247

68154e90c   FUJITA Tomonori   block: add dma al...
248
249
  	if (do_copy)
  		rq->cmd_flags |= REQ_COPY_USER;
3a5a39276   James Bottomley   block: allow blk_...
250
251
252
253
254
255
  	ret = blk_rq_append_bio(q, rq, bio);
  	if (unlikely(ret)) {
  		/* request is too big */
  		bio_put(bio);
  		return ret;
  	}
86db1e297   Jens Axboe   block: continue l...
256
  	blk_queue_bounce(q, &rq->bio);
86db1e297   Jens Axboe   block: continue l...
257
258
  	return 0;
  }
86db1e297   Jens Axboe   block: continue l...
259
  EXPORT_SYMBOL(blk_rq_map_kern);