Blame view

block/blk-map.c 6.02 KB
86db1e297   Jens Axboe   block: continue l...
1
2
3
4
5
6
7
  /*
   * Functions related to mapping data to requests
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
26e49cfc7   Kent Overstreet   block: pass iov_i...
8
  #include <linux/uio.h>
86db1e297   Jens Axboe   block: continue l...
9
10
  
  #include "blk.h"
98d61d5b1   Christoph Hellwig   block: simplify a...
11
12
13
14
15
  /*
   * Append a bio to a passthrough request.  Only works can be merged into
   * the request based on the driver constraints.
   */
  int blk_rq_append_bio(struct request *rq, struct bio *bio)
86db1e297   Jens Axboe   block: continue l...
16
  {
98d61d5b1   Christoph Hellwig   block: simplify a...
17
18
19
20
21
  	if (!rq->bio) {
  		blk_rq_bio_prep(rq->q, rq, bio);
  	} else {
  		if (!ll_back_merge_fn(rq->q, rq, bio))
  			return -EINVAL;
86db1e297   Jens Axboe   block: continue l...
22
23
  		rq->biotail->bi_next = bio;
  		rq->biotail = bio;
4f024f379   Kent Overstreet   block: Abstract o...
24
  		rq->__data_len += bio->bi_iter.bi_size;
86db1e297   Jens Axboe   block: continue l...
25
  	}
98d61d5b1   Christoph Hellwig   block: simplify a...
26

86db1e297   Jens Axboe   block: continue l...
27
28
  	return 0;
  }
98d61d5b1   Christoph Hellwig   block: simplify a...
29
  EXPORT_SYMBOL(blk_rq_append_bio);
86db1e297   Jens Axboe   block: continue l...
30
31
32
33
34
35
36
37
38
39
40
41
42
43
  
  static int __blk_rq_unmap_user(struct bio *bio)
  {
  	int ret = 0;
  
  	if (bio) {
  		if (bio_flagged(bio, BIO_USER_MAPPED))
  			bio_unmap_user(bio);
  		else
  			ret = bio_uncopy_user(bio);
  	}
  
  	return ret;
  }
4d6af73d9   Christoph Hellwig   block: support la...
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
  static int __blk_rq_map_user_iov(struct request *rq,
  		struct rq_map_data *map_data, struct iov_iter *iter,
  		gfp_t gfp_mask, bool copy)
  {
  	struct request_queue *q = rq->q;
  	struct bio *bio, *orig_bio;
  	int ret;
  
  	if (copy)
  		bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
  	else
  		bio = bio_map_user_iov(q, iter, gfp_mask);
  
  	if (IS_ERR(bio))
  		return PTR_ERR(bio);
  
  	if (map_data && map_data->null_mapped)
  		bio_set_flag(bio, BIO_NULL_MAPPED);
  
  	iov_iter_advance(iter, bio->bi_iter.bi_size);
  	if (map_data)
  		map_data->offset += bio->bi_iter.bi_size;
  
  	orig_bio = bio;
  	blk_queue_bounce(q, &bio);
  
  	/*
  	 * We link the bounce buffer in and could have to traverse it
  	 * later so we have to get a ref to prevent it from being freed
  	 */
  	bio_get(bio);
98d61d5b1   Christoph Hellwig   block: simplify a...
75
  	ret = blk_rq_append_bio(rq, bio);
4d6af73d9   Christoph Hellwig   block: support la...
76
77
78
79
80
81
82
83
84
  	if (ret) {
  		bio_endio(bio);
  		__blk_rq_unmap_user(orig_bio);
  		bio_put(bio);
  		return ret;
  	}
  
  	return 0;
  }
86db1e297   Jens Axboe   block: continue l...
85
  /**
710027a48   Randy Dunlap   Add some block/ s...
86
   * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
86db1e297   Jens Axboe   block: continue l...
87
88
   * @q:		request queue where request should be inserted
   * @rq:		request to map data to
152e283fd   FUJITA Tomonori   block: introduce ...
89
   * @map_data:   pointer to the rq_map_data holding pages (if necessary)
26e49cfc7   Kent Overstreet   block: pass iov_i...
90
   * @iter:	iovec iterator
a3bce90ed   FUJITA Tomonori   block: add gfp_ma...
91
   * @gfp_mask:	memory allocation flags
86db1e297   Jens Axboe   block: continue l...
92
93
   *
   * Description:
710027a48   Randy Dunlap   Add some block/ s...
94
   *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
86db1e297   Jens Axboe   block: continue l...
95
96
   *    a kernel bounce buffer is used.
   *
710027a48   Randy Dunlap   Add some block/ s...
97
   *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
86db1e297   Jens Axboe   block: continue l...
98
99
100
101
102
103
104
105
106
   *    still in process context.
   *
   *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
   *    before being submitted to the device, as pages mapped may be out of
   *    reach. It's the callers responsibility to make sure this happens. The
   *    original bio must be passed back in to blk_rq_unmap_user() for proper
   *    unmapping.
   */
  int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
26e49cfc7   Kent Overstreet   block: pass iov_i...
107
108
  			struct rq_map_data *map_data,
  			const struct iov_iter *iter, gfp_t gfp_mask)
86db1e297   Jens Axboe   block: continue l...
109
  {
357f435d8   Al Viro   fix the copy vs. ...
110
111
  	bool copy = false;
  	unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
4d6af73d9   Christoph Hellwig   block: support la...
112
113
114
  	struct bio *bio = NULL;
  	struct iov_iter i;
  	int ret;
86db1e297   Jens Axboe   block: continue l...
115

a0ac402cf   Linus Torvalds   Don't feed anythi...
116
117
  	if (!iter_is_iovec(iter))
  		goto fail;
357f435d8   Al Viro   fix the copy vs. ...
118
119
120
121
122
123
  	if (map_data)
  		copy = true;
  	else if (iov_iter_alignment(iter) & align)
  		copy = true;
  	else if (queue_virt_boundary(q))
  		copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
afdc1a780   FUJITA Tomonori   block: add bio_co...
124

4d6af73d9   Christoph Hellwig   block: support la...
125
126
127
128
129
130
131
132
  	i = *iter;
  	do {
  		ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
  		if (ret)
  			goto unmap_rq;
  		if (!bio)
  			bio = rq->bio;
  	} while (iov_iter_count(&i));
86db1e297   Jens Axboe   block: continue l...
133

f18573abc   FUJITA Tomonori   block: move the p...
134
135
  	if (!bio_flagged(bio, BIO_USER_MAPPED))
  		rq->cmd_flags |= REQ_COPY_USER;
86db1e297   Jens Axboe   block: continue l...
136
  	return 0;
4d6af73d9   Christoph Hellwig   block: support la...
137
138
139
  
  unmap_rq:
  	__blk_rq_unmap_user(bio);
a0ac402cf   Linus Torvalds   Don't feed anythi...
140
  fail:
4d6af73d9   Christoph Hellwig   block: support la...
141
142
  	rq->bio = NULL;
  	return -EINVAL;
86db1e297   Jens Axboe   block: continue l...
143
  }
152e283fd   FUJITA Tomonori   block: introduce ...
144
  EXPORT_SYMBOL(blk_rq_map_user_iov);
86db1e297   Jens Axboe   block: continue l...
145

ddad8dd0a   Christoph Hellwig   block: use blk_rq...
146
147
148
149
  int blk_rq_map_user(struct request_queue *q, struct request *rq,
  		    struct rq_map_data *map_data, void __user *ubuf,
  		    unsigned long len, gfp_t gfp_mask)
  {
26e49cfc7   Kent Overstreet   block: pass iov_i...
150
151
  	struct iovec iov;
  	struct iov_iter i;
8f7e885a4   Al Viro   blk_rq_map_user()...
152
  	int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
ddad8dd0a   Christoph Hellwig   block: use blk_rq...
153

8f7e885a4   Al Viro   blk_rq_map_user()...
154
155
  	if (unlikely(ret < 0))
  		return ret;
ddad8dd0a   Christoph Hellwig   block: use blk_rq...
156

26e49cfc7   Kent Overstreet   block: pass iov_i...
157
  	return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
ddad8dd0a   Christoph Hellwig   block: use blk_rq...
158
159
  }
  EXPORT_SYMBOL(blk_rq_map_user);
86db1e297   Jens Axboe   block: continue l...
160
161
162
163
164
165
166
  /**
   * blk_rq_unmap_user - unmap a request with user data
   * @bio:	       start of bio list
   *
   * Description:
   *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
   *    supply the original rq->bio from the blk_rq_map_user() return, since
710027a48   Randy Dunlap   Add some block/ s...
167
   *    the I/O completion may have changed rq->bio.
86db1e297   Jens Axboe   block: continue l...
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
   */
  int blk_rq_unmap_user(struct bio *bio)
  {
  	struct bio *mapped_bio;
  	int ret = 0, ret2;
  
  	while (bio) {
  		mapped_bio = bio;
  		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
  			mapped_bio = bio->bi_private;
  
  		ret2 = __blk_rq_unmap_user(mapped_bio);
  		if (ret2 && !ret)
  			ret = ret2;
  
  		mapped_bio = bio;
  		bio = bio->bi_next;
  		bio_put(mapped_bio);
  	}
  
  	return ret;
  }
86db1e297   Jens Axboe   block: continue l...
190
  EXPORT_SYMBOL(blk_rq_unmap_user);
a63bbaecb   Richard Zhu   MLK-11444 ata: im...
191
192
193
194
195
  #ifdef CONFIG_AHCI_IMX
  extern void *sg_io_buffer_hack;
  #else
  #define sg_io_buffer_hack NULL
  #endif
86db1e297   Jens Axboe   block: continue l...
196
  /**
710027a48   Randy Dunlap   Add some block/ s...
197
   * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
86db1e297   Jens Axboe   block: continue l...
198
199
200
201
202
   * @q:		request queue where request should be inserted
   * @rq:		request to fill
   * @kbuf:	the kernel buffer
   * @len:	length of user data
   * @gfp_mask:	memory allocation flags
68154e90c   FUJITA Tomonori   block: add dma al...
203
204
205
   *
   * Description:
   *    Data will be mapped directly if possible. Otherwise a bounce
e227867f1   Masanari Iida   treewide: Fix typ...
206
   *    buffer is used. Can be called multiple times to append multiple
3a5a39276   James Bottomley   block: allow blk_...
207
   *    buffers.
86db1e297   Jens Axboe   block: continue l...
208
209
210
211
   */
  int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
  		    unsigned int len, gfp_t gfp_mask)
  {
68154e90c   FUJITA Tomonori   block: add dma al...
212
  	int reading = rq_data_dir(rq) == READ;
144177991   Namhyung Kim   block: fix an add...
213
  	unsigned long addr = (unsigned long) kbuf;
68154e90c   FUJITA Tomonori   block: add dma al...
214
  	int do_copy = 0;
86db1e297   Jens Axboe   block: continue l...
215
  	struct bio *bio;
3a5a39276   James Bottomley   block: allow blk_...
216
  	int ret;
86db1e297   Jens Axboe   block: continue l...
217

ae03bf639   Martin K. Petersen   block: Use access...
218
  	if (len > (queue_max_hw_sectors(q) << 9))
86db1e297   Jens Axboe   block: continue l...
219
220
221
  		return -EINVAL;
  	if (!len || !kbuf)
  		return -EINVAL;
a63bbaecb   Richard Zhu   MLK-11444 ata: im...
222
223
224
225
226
227
228
  #ifdef CONFIG_AHCI_IMX
  	if (kbuf == sg_io_buffer_hack)
  		do_copy = 0;
  	else
  #endif
  		do_copy = !blk_rq_aligned(q, addr, len)
  			|| object_is_on_stack(kbuf);
68154e90c   FUJITA Tomonori   block: add dma al...
229
230
231
232
  	if (do_copy)
  		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
  	else
  		bio = bio_map_kern(q, kbuf, len, gfp_mask);
86db1e297   Jens Axboe   block: continue l...
233
234
  	if (IS_ERR(bio))
  		return PTR_ERR(bio);
609f6ea1c   majianpeng   block: re-use exi...
235
  	if (!reading)
95fe6c1a2   Mike Christie   block, fs, mm, dr...
236
  		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
86db1e297   Jens Axboe   block: continue l...
237

68154e90c   FUJITA Tomonori   block: add dma al...
238
239
  	if (do_copy)
  		rq->cmd_flags |= REQ_COPY_USER;
98d61d5b1   Christoph Hellwig   block: simplify a...
240
  	ret = blk_rq_append_bio(rq, bio);
3a5a39276   James Bottomley   block: allow blk_...
241
242
243
244
245
  	if (unlikely(ret)) {
  		/* request is too big */
  		bio_put(bio);
  		return ret;
  	}
86db1e297   Jens Axboe   block: continue l...
246
  	blk_queue_bounce(q, &rq->bio);
86db1e297   Jens Axboe   block: continue l...
247
248
  	return 0;
  }
86db1e297   Jens Axboe   block: continue l...
249
  EXPORT_SYMBOL(blk_rq_map_kern);