Blame view

block/blk-map.c 5.79 KB
86db1e297   Jens Axboe   block: continue l...
1
2
3
4
5
6
7
  /*
   * Functions related to mapping data to requests
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
26e49cfc7   Kent Overstreet   block: pass iov_i...
8
  #include <linux/uio.h>
86db1e297   Jens Axboe   block: continue l...
9
10
  
  #include "blk.h"
98d61d5b1   Christoph Hellwig   block: simplify a...
11
12
13
14
15
  /*
   * Append a bio to a passthrough request.  Only works can be merged into
   * the request based on the driver constraints.
   */
  int blk_rq_append_bio(struct request *rq, struct bio *bio)
86db1e297   Jens Axboe   block: continue l...
16
  {
98d61d5b1   Christoph Hellwig   block: simplify a...
17
18
19
20
21
  	if (!rq->bio) {
  		blk_rq_bio_prep(rq->q, rq, bio);
  	} else {
  		if (!ll_back_merge_fn(rq->q, rq, bio))
  			return -EINVAL;
86db1e297   Jens Axboe   block: continue l...
22
23
  		rq->biotail->bi_next = bio;
  		rq->biotail = bio;
4f024f379   Kent Overstreet   block: Abstract o...
24
  		rq->__data_len += bio->bi_iter.bi_size;
86db1e297   Jens Axboe   block: continue l...
25
  	}
98d61d5b1   Christoph Hellwig   block: simplify a...
26

86db1e297   Jens Axboe   block: continue l...
27
28
  	return 0;
  }
98d61d5b1   Christoph Hellwig   block: simplify a...
29
  EXPORT_SYMBOL(blk_rq_append_bio);
86db1e297   Jens Axboe   block: continue l...
30
31
32
33
34
35
36
37
38
39
40
41
42
43
  
  static int __blk_rq_unmap_user(struct bio *bio)
  {
  	int ret = 0;
  
  	if (bio) {
  		if (bio_flagged(bio, BIO_USER_MAPPED))
  			bio_unmap_user(bio);
  		else
  			ret = bio_uncopy_user(bio);
  	}
  
  	return ret;
  }
4d6af73d9   Christoph Hellwig   block: support la...
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
  static int __blk_rq_map_user_iov(struct request *rq,
  		struct rq_map_data *map_data, struct iov_iter *iter,
  		gfp_t gfp_mask, bool copy)
  {
  	struct request_queue *q = rq->q;
  	struct bio *bio, *orig_bio;
  	int ret;
  
  	if (copy)
  		bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
  	else
  		bio = bio_map_user_iov(q, iter, gfp_mask);
  
  	if (IS_ERR(bio))
  		return PTR_ERR(bio);
  
  	if (map_data && map_data->null_mapped)
  		bio_set_flag(bio, BIO_NULL_MAPPED);
  
  	iov_iter_advance(iter, bio->bi_iter.bi_size);
  	if (map_data)
  		map_data->offset += bio->bi_iter.bi_size;
  
  	orig_bio = bio;
  	blk_queue_bounce(q, &bio);
  
  	/*
  	 * We link the bounce buffer in and could have to traverse it
  	 * later so we have to get a ref to prevent it from being freed
  	 */
  	bio_get(bio);
98d61d5b1   Christoph Hellwig   block: simplify a...
75
  	ret = blk_rq_append_bio(rq, bio);
4d6af73d9   Christoph Hellwig   block: support la...
76
77
78
79
80
81
82
83
84
  	if (ret) {
  		bio_endio(bio);
  		__blk_rq_unmap_user(orig_bio);
  		bio_put(bio);
  		return ret;
  	}
  
  	return 0;
  }
86db1e297   Jens Axboe   block: continue l...
85
  /**
710027a48   Randy Dunlap   Add some block/ s...
86
   * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
86db1e297   Jens Axboe   block: continue l...
87
88
   * @q:		request queue where request should be inserted
   * @rq:		request to map data to
152e283fd   FUJITA Tomonori   block: introduce ...
89
   * @map_data:   pointer to the rq_map_data holding pages (if necessary)
26e49cfc7   Kent Overstreet   block: pass iov_i...
90
   * @iter:	iovec iterator
a3bce90ed   FUJITA Tomonori   block: add gfp_ma...
91
   * @gfp_mask:	memory allocation flags
86db1e297   Jens Axboe   block: continue l...
92
93
   *
   * Description:
710027a48   Randy Dunlap   Add some block/ s...
94
   *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
86db1e297   Jens Axboe   block: continue l...
95
96
   *    a kernel bounce buffer is used.
   *
710027a48   Randy Dunlap   Add some block/ s...
97
   *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
86db1e297   Jens Axboe   block: continue l...
98
99
100
101
102
103
104
105
106
   *    still in process context.
   *
   *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
   *    before being submitted to the device, as pages mapped may be out of
   *    reach. It's the callers responsibility to make sure this happens. The
   *    original bio must be passed back in to blk_rq_unmap_user() for proper
   *    unmapping.
   */
  int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
26e49cfc7   Kent Overstreet   block: pass iov_i...
107
108
  			struct rq_map_data *map_data,
  			const struct iov_iter *iter, gfp_t gfp_mask)
86db1e297   Jens Axboe   block: continue l...
109
  {
357f435d8   Al Viro   fix the copy vs. ...
110
111
  	bool copy = false;
  	unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
4d6af73d9   Christoph Hellwig   block: support la...
112
113
114
  	struct bio *bio = NULL;
  	struct iov_iter i;
  	int ret;
86db1e297   Jens Axboe   block: continue l...
115

357f435d8   Al Viro   fix the copy vs. ...
116
117
118
119
120
121
  	if (map_data)
  		copy = true;
  	else if (iov_iter_alignment(iter) & align)
  		copy = true;
  	else if (queue_virt_boundary(q))
  		copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
afdc1a780   FUJITA Tomonori   block: add bio_co...
122

4d6af73d9   Christoph Hellwig   block: support la...
123
124
125
126
127
128
129
130
  	i = *iter;
  	do {
  		ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
  		if (ret)
  			goto unmap_rq;
  		if (!bio)
  			bio = rq->bio;
  	} while (iov_iter_count(&i));
86db1e297   Jens Axboe   block: continue l...
131

f18573abc   FUJITA Tomonori   block: move the p...
132
133
  	if (!bio_flagged(bio, BIO_USER_MAPPED))
  		rq->cmd_flags |= REQ_COPY_USER;
86db1e297   Jens Axboe   block: continue l...
134
  	return 0;
4d6af73d9   Christoph Hellwig   block: support la...
135
136
137
138
139
  
  unmap_rq:
  	__blk_rq_unmap_user(bio);
  	rq->bio = NULL;
  	return -EINVAL;
86db1e297   Jens Axboe   block: continue l...
140
  }
152e283fd   FUJITA Tomonori   block: introduce ...
141
  EXPORT_SYMBOL(blk_rq_map_user_iov);
86db1e297   Jens Axboe   block: continue l...
142

ddad8dd0a   Christoph Hellwig   block: use blk_rq...
143
144
145
146
  int blk_rq_map_user(struct request_queue *q, struct request *rq,
  		    struct rq_map_data *map_data, void __user *ubuf,
  		    unsigned long len, gfp_t gfp_mask)
  {
26e49cfc7   Kent Overstreet   block: pass iov_i...
147
148
  	struct iovec iov;
  	struct iov_iter i;
8f7e885a4   Al Viro   blk_rq_map_user()...
149
  	int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
ddad8dd0a   Christoph Hellwig   block: use blk_rq...
150

8f7e885a4   Al Viro   blk_rq_map_user()...
151
152
  	if (unlikely(ret < 0))
  		return ret;
ddad8dd0a   Christoph Hellwig   block: use blk_rq...
153

26e49cfc7   Kent Overstreet   block: pass iov_i...
154
  	return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
ddad8dd0a   Christoph Hellwig   block: use blk_rq...
155
156
  }
  EXPORT_SYMBOL(blk_rq_map_user);
86db1e297   Jens Axboe   block: continue l...
157
158
159
160
161
162
163
  /**
   * blk_rq_unmap_user - unmap a request with user data
   * @bio:	       start of bio list
   *
   * Description:
   *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
   *    supply the original rq->bio from the blk_rq_map_user() return, since
710027a48   Randy Dunlap   Add some block/ s...
164
   *    the I/O completion may have changed rq->bio.
86db1e297   Jens Axboe   block: continue l...
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
   */
  int blk_rq_unmap_user(struct bio *bio)
  {
  	struct bio *mapped_bio;
  	int ret = 0, ret2;
  
  	while (bio) {
  		mapped_bio = bio;
  		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
  			mapped_bio = bio->bi_private;
  
  		ret2 = __blk_rq_unmap_user(mapped_bio);
  		if (ret2 && !ret)
  			ret = ret2;
  
  		mapped_bio = bio;
  		bio = bio->bi_next;
  		bio_put(mapped_bio);
  	}
  
  	return ret;
  }
86db1e297   Jens Axboe   block: continue l...
187
188
189
  EXPORT_SYMBOL(blk_rq_unmap_user);
  
  /**
710027a48   Randy Dunlap   Add some block/ s...
190
   * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
86db1e297   Jens Axboe   block: continue l...
191
192
193
194
195
   * @q:		request queue where request should be inserted
   * @rq:		request to fill
   * @kbuf:	the kernel buffer
   * @len:	length of user data
   * @gfp_mask:	memory allocation flags
68154e90c   FUJITA Tomonori   block: add dma al...
196
197
198
   *
   * Description:
   *    Data will be mapped directly if possible. Otherwise a bounce
e227867f1   Masanari Iida   treewide: Fix typ...
199
   *    buffer is used. Can be called multiple times to append multiple
3a5a39276   James Bottomley   block: allow blk_...
200
   *    buffers.
86db1e297   Jens Axboe   block: continue l...
201
202
203
204
   */
  int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
  		    unsigned int len, gfp_t gfp_mask)
  {
68154e90c   FUJITA Tomonori   block: add dma al...
205
  	int reading = rq_data_dir(rq) == READ;
144177991   Namhyung Kim   block: fix an add...
206
  	unsigned long addr = (unsigned long) kbuf;
68154e90c   FUJITA Tomonori   block: add dma al...
207
  	int do_copy = 0;
86db1e297   Jens Axboe   block: continue l...
208
  	struct bio *bio;
3a5a39276   James Bottomley   block: allow blk_...
209
  	int ret;
86db1e297   Jens Axboe   block: continue l...
210

ae03bf639   Martin K. Petersen   block: Use access...
211
  	if (len > (queue_max_hw_sectors(q) << 9))
86db1e297   Jens Axboe   block: continue l...
212
213
214
  		return -EINVAL;
  	if (!len || !kbuf)
  		return -EINVAL;
144177991   Namhyung Kim   block: fix an add...
215
  	do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
68154e90c   FUJITA Tomonori   block: add dma al...
216
217
218
219
  	if (do_copy)
  		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
  	else
  		bio = bio_map_kern(q, kbuf, len, gfp_mask);
86db1e297   Jens Axboe   block: continue l...
220
221
  	if (IS_ERR(bio))
  		return PTR_ERR(bio);
609f6ea1c   majianpeng   block: re-use exi...
222
  	if (!reading)
95fe6c1a2   Mike Christie   block, fs, mm, dr...
223
  		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
86db1e297   Jens Axboe   block: continue l...
224

68154e90c   FUJITA Tomonori   block: add dma al...
225
226
  	if (do_copy)
  		rq->cmd_flags |= REQ_COPY_USER;
98d61d5b1   Christoph Hellwig   block: simplify a...
227
  	ret = blk_rq_append_bio(rq, bio);
3a5a39276   James Bottomley   block: allow blk_...
228
229
230
231
232
  	if (unlikely(ret)) {
  		/* request is too big */
  		bio_put(bio);
  		return ret;
  	}
86db1e297   Jens Axboe   block: continue l...
233
  	blk_queue_bounce(q, &rq->bio);
86db1e297   Jens Axboe   block: continue l...
234
235
  	return 0;
  }
86db1e297   Jens Axboe   block: continue l...
236
  EXPORT_SYMBOL(blk_rq_map_kern);