Blame view

block/blk-map.c 7.35 KB
86db1e297   Jens Axboe   block: continue l...
1
2
3
4
5
6
7
  /*
   * Functions related to mapping data to requests
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
afdc1a780   FUJITA Tomonori   block: add bio_co...
8
  #include <scsi/sg.h>		/* for struct sg_iovec */
86db1e297   Jens Axboe   block: continue l...
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
  
  #include "blk.h"
  
  int blk_rq_append_bio(struct request_queue *q, struct request *rq,
  		      struct bio *bio)
  {
  	if (!rq->bio)
  		blk_rq_bio_prep(q, rq, bio);
  	else if (!ll_back_merge_fn(q, rq, bio))
  		return -EINVAL;
  	else {
  		rq->biotail->bi_next = bio;
  		rq->biotail = bio;
  
  		rq->data_len += bio->bi_size;
  	}
  	return 0;
  }
  EXPORT_SYMBOL(blk_rq_append_bio);
  
  static int __blk_rq_unmap_user(struct bio *bio)
  {
  	int ret = 0;
  
  	if (bio) {
  		if (bio_flagged(bio, BIO_USER_MAPPED))
  			bio_unmap_user(bio);
  		else
  			ret = bio_uncopy_user(bio);
  	}
  
  	return ret;
  }
  
  static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
  			     void __user *ubuf, unsigned int len)
  {
  	unsigned long uaddr;
e3790c7d4   Tejun Heo   block: separate o...
47
  	unsigned int alignment;
86db1e297   Jens Axboe   block: continue l...
48
49
50
51
52
53
54
55
56
57
  	struct bio *bio, *orig_bio;
  	int reading, ret;
  
  	reading = rq_data_dir(rq) == READ;
  
  	/*
  	 * if alignment requirement is satisfied, map in user pages for
  	 * direct dma. else, set up kernel bounce buffers
  	 */
  	uaddr = (unsigned long) ubuf;
e3790c7d4   Tejun Heo   block: separate o...
58
59
  	alignment = queue_dma_alignment(q) | q->dma_pad_mask;
  	if (!(uaddr & alignment) && !(len & alignment))
86db1e297   Jens Axboe   block: continue l...
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
  		bio = bio_map_user(q, NULL, uaddr, len, reading);
  	else
  		bio = bio_copy_user(q, uaddr, len, reading);
  
  	if (IS_ERR(bio))
  		return PTR_ERR(bio);
  
  	orig_bio = bio;
  	blk_queue_bounce(q, &bio);
  
  	/*
  	 * We link the bounce buffer in and could have to traverse it
  	 * later so we have to get a ref to prevent it from being freed
  	 */
  	bio_get(bio);
  
  	ret = blk_rq_append_bio(q, rq, bio);
  	if (!ret)
  		return bio->bi_size;
  
  	/* if it was boucned we must call the end io function */
  	bio_endio(bio, 0);
  	__blk_rq_unmap_user(orig_bio);
  	bio_put(bio);
  	return ret;
  }
  
  /**
   * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
   * @q:		request queue where request should be inserted
   * @rq:		request structure to fill
   * @ubuf:	the user buffer
   * @len:	length of user data
   *
   * Description:
   *    Data will be mapped directly for zero copy io, if possible. Otherwise
   *    a kernel bounce buffer is used.
   *
   *    A matching blk_rq_unmap_user() must be issued at the end of io, while
   *    still in process context.
   *
   *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
   *    before being submitted to the device, as pages mapped may be out of
   *    reach. It's the callers responsibility to make sure this happens. The
   *    original bio must be passed back in to blk_rq_unmap_user() for proper
   *    unmapping.
   */
  int blk_rq_map_user(struct request_queue *q, struct request *rq,
  		    void __user *ubuf, unsigned long len)
  {
  	unsigned long bytes_read = 0;
  	struct bio *bio = NULL;
  	int ret;
  
  	if (len > (q->max_hw_sectors << 9))
  		return -EINVAL;
  	if (!len || !ubuf)
  		return -EINVAL;
  
  	while (bytes_read != len) {
  		unsigned long map_len, end, start;
  
  		map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
  		end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
  								>> PAGE_SHIFT;
  		start = (unsigned long)ubuf >> PAGE_SHIFT;
  
  		/*
  		 * A bad offset could cause us to require BIO_MAX_PAGES + 1
  		 * pages. If this happens we just lower the requested
  		 * mapping len by a page so that we can fit
  		 */
  		if (end - start > BIO_MAX_PAGES)
  			map_len -= PAGE_SIZE;
  
  		ret = __blk_rq_map_user(q, rq, ubuf, map_len);
  		if (ret < 0)
  			goto unmap_rq;
  		if (!bio)
  			bio = rq->bio;
  		bytes_read += ret;
  		ubuf += ret;
  	}
f18573abc   FUJITA Tomonori   block: move the p...
143
144
  	if (!bio_flagged(bio, BIO_USER_MAPPED))
  		rq->cmd_flags |= REQ_COPY_USER;
40b01b9bb   Tejun Heo   block: update bio...
145

86db1e297   Jens Axboe   block: continue l...
146
147
148
149
  	rq->buffer = rq->data = NULL;
  	return 0;
  unmap_rq:
  	blk_rq_unmap_user(bio);
84e9e03c5   Jens Axboe   block: make blk_r...
150
  	rq->bio = NULL;
86db1e297   Jens Axboe   block: continue l...
151
152
  	return ret;
  }
86db1e297   Jens Axboe   block: continue l...
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
  EXPORT_SYMBOL(blk_rq_map_user);
  
  /**
   * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
   * @q:		request queue where request should be inserted
   * @rq:		request to map data to
   * @iov:	pointer to the iovec
   * @iov_count:	number of elements in the iovec
   * @len:	I/O byte count
   *
   * Description:
   *    Data will be mapped directly for zero copy io, if possible. Otherwise
   *    a kernel bounce buffer is used.
   *
   *    A matching blk_rq_unmap_user() must be issued at the end of io, while
   *    still in process context.
   *
   *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
   *    before being submitted to the device, as pages mapped may be out of
   *    reach. It's the callers responsibility to make sure this happens. The
   *    original bio must be passed back in to blk_rq_unmap_user() for proper
   *    unmapping.
   */
  int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
  			struct sg_iovec *iov, int iov_count, unsigned int len)
  {
  	struct bio *bio;
afdc1a780   FUJITA Tomonori   block: add bio_co...
180
181
  	int i, read = rq_data_dir(rq) == READ;
  	int unaligned = 0;
86db1e297   Jens Axboe   block: continue l...
182
183
184
  
  	if (!iov || iov_count <= 0)
  		return -EINVAL;
afdc1a780   FUJITA Tomonori   block: add bio_co...
185
186
187
188
189
190
191
192
193
194
195
196
197
  	for (i = 0; i < iov_count; i++) {
  		unsigned long uaddr = (unsigned long)iov[i].iov_base;
  
  		if (uaddr & queue_dma_alignment(q)) {
  			unaligned = 1;
  			break;
  		}
  	}
  
  	if (unaligned || (q->dma_pad_mask & len))
  		bio = bio_copy_user_iov(q, iov, iov_count, read);
  	else
  		bio = bio_map_user_iov(q, NULL, iov, iov_count, read);
86db1e297   Jens Axboe   block: continue l...
198
199
200
201
202
203
204
205
  	if (IS_ERR(bio))
  		return PTR_ERR(bio);
  
  	if (bio->bi_size != len) {
  		bio_endio(bio, 0);
  		bio_unmap_user(bio);
  		return -EINVAL;
  	}
f18573abc   FUJITA Tomonori   block: move the p...
206
207
  	if (!bio_flagged(bio, BIO_USER_MAPPED))
  		rq->cmd_flags |= REQ_COPY_USER;
07359fc61   FUJITA Tomonori   block: add bounce...
208
  	blk_queue_bounce(q, &bio);
86db1e297   Jens Axboe   block: continue l...
209
210
211
212
213
  	bio_get(bio);
  	blk_rq_bio_prep(q, rq, bio);
  	rq->buffer = rq->data = NULL;
  	return 0;
  }
86db1e297   Jens Axboe   block: continue l...
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
  
  /**
   * blk_rq_unmap_user - unmap a request with user data
   * @bio:	       start of bio list
   *
   * Description:
   *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
   *    supply the original rq->bio from the blk_rq_map_user() return, since
   *    the io completion may have changed rq->bio.
   */
  int blk_rq_unmap_user(struct bio *bio)
  {
  	struct bio *mapped_bio;
  	int ret = 0, ret2;
  
  	while (bio) {
  		mapped_bio = bio;
  		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
  			mapped_bio = bio->bi_private;
  
  		ret2 = __blk_rq_unmap_user(mapped_bio);
  		if (ret2 && !ret)
  			ret = ret2;
  
  		mapped_bio = bio;
  		bio = bio->bi_next;
  		bio_put(mapped_bio);
  	}
  
  	return ret;
  }
86db1e297   Jens Axboe   block: continue l...
245
246
247
248
249
250
251
252
253
  EXPORT_SYMBOL(blk_rq_unmap_user);
  
  /**
   * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
   * @q:		request queue where request should be inserted
   * @rq:		request to fill
   * @kbuf:	the kernel buffer
   * @len:	length of user data
   * @gfp_mask:	memory allocation flags
68154e90c   FUJITA Tomonori   block: add dma al...
254
255
256
257
   *
   * Description:
   *    Data will be mapped directly if possible. Otherwise a bounce
   *    buffer is used.
86db1e297   Jens Axboe   block: continue l...
258
259
260
261
   */
  int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
  		    unsigned int len, gfp_t gfp_mask)
  {
68154e90c   FUJITA Tomonori   block: add dma al...
262
263
264
265
  	unsigned long kaddr;
  	unsigned int alignment;
  	int reading = rq_data_dir(rq) == READ;
  	int do_copy = 0;
86db1e297   Jens Axboe   block: continue l...
266
267
268
269
270
271
  	struct bio *bio;
  
  	if (len > (q->max_hw_sectors << 9))
  		return -EINVAL;
  	if (!len || !kbuf)
  		return -EINVAL;
68154e90c   FUJITA Tomonori   block: add dma al...
272
273
  	kaddr = (unsigned long)kbuf;
  	alignment = queue_dma_alignment(q) | q->dma_pad_mask;
a76eef957   FUJITA Tomonori   block/blk-map.c: ...
274
275
  	do_copy = ((kaddr & alignment) || (len & alignment) ||
  		   object_is_on_stack(kbuf));
30c00eda7   FUJITA Tomonori   block: blk_rq_map...
276

68154e90c   FUJITA Tomonori   block: add dma al...
277
278
279
280
  	if (do_copy)
  		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
  	else
  		bio = bio_map_kern(q, kbuf, len, gfp_mask);
86db1e297   Jens Axboe   block: continue l...
281
282
283
284
285
  	if (IS_ERR(bio))
  		return PTR_ERR(bio);
  
  	if (rq_data_dir(rq) == WRITE)
  		bio->bi_rw |= (1 << BIO_RW);
68154e90c   FUJITA Tomonori   block: add dma al...
286
287
  	if (do_copy)
  		rq->cmd_flags |= REQ_COPY_USER;
86db1e297   Jens Axboe   block: continue l...
288
289
290
291
292
  	blk_rq_bio_prep(q, rq, bio);
  	blk_queue_bounce(q, &rq->bio);
  	rq->buffer = rq->data = NULL;
  	return 0;
  }
86db1e297   Jens Axboe   block: continue l...
293
  EXPORT_SYMBOL(blk_rq_map_kern);