Blame view

block/blk-merge.c 19.6 KB
d6d481969   Jens Axboe   block: ll_rw_blk....
1
2
3
4
5
6
7
8
  /*
   * Functions related to segment and merge handling
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/scatterlist.h>
cda22646a   Mike Krinkin   block: add call t...
9
  #include <trace/events/block.h>
d6d481969   Jens Axboe   block: ll_rw_blk....
10
  #include "blk.h"
54efd50bf   Kent Overstreet   block: make gener...
11
12
  static struct bio *blk_bio_discard_split(struct request_queue *q,
  					 struct bio *bio,
bdced438a   Ming Lei   block: setup bi_p...
13
14
  					 struct bio_set *bs,
  					 unsigned *nsegs)
54efd50bf   Kent Overstreet   block: make gener...
15
16
17
18
19
  {
  	unsigned int max_discard_sectors, granularity;
  	int alignment;
  	sector_t tmp;
  	unsigned split_sectors;
bdced438a   Ming Lei   block: setup bi_p...
20
  	*nsegs = 1;
54efd50bf   Kent Overstreet   block: make gener...
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
  	/* Zero-sector (unknown) and one-sector granularities are the same.  */
  	granularity = max(q->limits.discard_granularity >> 9, 1U);
  
  	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
  	max_discard_sectors -= max_discard_sectors % granularity;
  
  	if (unlikely(!max_discard_sectors)) {
  		/* XXX: warn */
  		return NULL;
  	}
  
  	if (bio_sectors(bio) <= max_discard_sectors)
  		return NULL;
  
  	split_sectors = max_discard_sectors;
  
  	/*
  	 * If the next starting sector would be misaligned, stop the discard at
  	 * the previous aligned sector.
  	 */
  	alignment = (q->limits.discard_alignment >> 9) % granularity;
  
  	tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
  	tmp = sector_div(tmp, granularity);
  
  	if (split_sectors > tmp)
  		split_sectors -= tmp;
  
  	return bio_split(bio, split_sectors, GFP_NOIO, bs);
  }
  
  static struct bio *blk_bio_write_same_split(struct request_queue *q,
  					    struct bio *bio,
bdced438a   Ming Lei   block: setup bi_p...
54
55
  					    struct bio_set *bs,
  					    unsigned *nsegs)
54efd50bf   Kent Overstreet   block: make gener...
56
  {
bdced438a   Ming Lei   block: setup bi_p...
57
  	*nsegs = 1;
54efd50bf   Kent Overstreet   block: make gener...
58
59
60
61
62
63
64
65
  	if (!q->limits.max_write_same_sectors)
  		return NULL;
  
  	if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
  		return NULL;
  
  	return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
  }
d0e5fbb01   Ming Lei   block: fix bio sp...
66
67
68
69
70
71
72
73
74
75
76
  static inline unsigned get_max_io_size(struct request_queue *q,
  				       struct bio *bio)
  {
  	unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
  	unsigned mask = queue_logical_block_size(q) - 1;
  
  	/* aligned to logical block size */
  	sectors &= ~(mask >> 9);
  
  	return sectors;
  }
54efd50bf   Kent Overstreet   block: make gener...
77
78
  static struct bio *blk_bio_segment_split(struct request_queue *q,
  					 struct bio *bio,
bdced438a   Ming Lei   block: setup bi_p...
79
80
  					 struct bio_set *bs,
  					 unsigned *segs)
54efd50bf   Kent Overstreet   block: make gener...
81
  {
5014c311b   Jens Axboe   block: fix bogus ...
82
  	struct bio_vec bv, bvprv, *bvprvp = NULL;
54efd50bf   Kent Overstreet   block: make gener...
83
  	struct bvec_iter iter;
8ae126660   Kent Overstreet   block: kill merge...
84
  	unsigned seg_size = 0, nsegs = 0, sectors = 0;
02e707424   Ming Lei   blk-merge: fix bl...
85
86
87
  	unsigned front_seg_size = bio->bi_seg_front_size;
  	bool do_split = true;
  	struct bio *new = NULL;
d0e5fbb01   Ming Lei   block: fix bio sp...
88
  	const unsigned max_sectors = get_max_io_size(q, bio);
4d70dca4e   Ming Lei   block: make sure ...
89
  	unsigned bvecs = 0;
54efd50bf   Kent Overstreet   block: make gener...
90

54efd50bf   Kent Overstreet   block: make gener...
91
  	bio_for_each_segment(bv, bio, iter) {
54efd50bf   Kent Overstreet   block: make gener...
92
  		/*
4d70dca4e   Ming Lei   block: make sure ...
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
  		 * With arbitrary bio size, the incoming bio may be very
  		 * big. We have to split the bio into small bios so that
  		 * each holds at most BIO_MAX_PAGES bvecs because
  		 * bio_clone() can fail to allocate big bvecs.
  		 *
  		 * It should have been better to apply the limit per
  		 * request queue in which bio_clone() is involved,
  		 * instead of globally. The biggest blocker is the
  		 * bio_clone() in bio bounce.
  		 *
  		 * If bio is splitted by this reason, we should have
  		 * allowed to continue bios merging, but don't do
  		 * that now for making the change simple.
  		 *
  		 * TODO: deal with bio bounce's bio_clone() gracefully
  		 * and convert the global limit into per-queue limit.
  		 */
  		if (bvecs++ >= BIO_MAX_PAGES)
  			goto split;
  
  		/*
54efd50bf   Kent Overstreet   block: make gener...
114
115
116
  		 * If the queue doesn't support SG gaps and adding this
  		 * offset would create a gap, disallow it.
  		 */
5014c311b   Jens Axboe   block: fix bogus ...
117
  		if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
54efd50bf   Kent Overstreet   block: make gener...
118
  			goto split;
d0e5fbb01   Ming Lei   block: fix bio sp...
119
  		if (sectors + (bv.bv_len >> 9) > max_sectors) {
e36f62042   Keith Busch   block: split bios...
120
121
122
123
124
  			/*
  			 * Consider this a new segment if we're splitting in
  			 * the middle of this vector.
  			 */
  			if (nsegs < queue_max_segments(q) &&
d0e5fbb01   Ming Lei   block: fix bio sp...
125
  			    sectors < max_sectors) {
e36f62042   Keith Busch   block: split bios...
126
  				nsegs++;
d0e5fbb01   Ming Lei   block: fix bio sp...
127
  				sectors = max_sectors;
e36f62042   Keith Busch   block: split bios...
128
  			}
d0e5fbb01   Ming Lei   block: fix bio sp...
129
130
131
  			if (sectors)
  				goto split;
  			/* Make this single bvec as the 1st segment */
e36f62042   Keith Busch   block: split bios...
132
  		}
5014c311b   Jens Axboe   block: fix bogus ...
133
  		if (bvprvp && blk_queue_cluster(q)) {
54efd50bf   Kent Overstreet   block: make gener...
134
135
  			if (seg_size + bv.bv_len > queue_max_segment_size(q))
  				goto new_segment;
5014c311b   Jens Axboe   block: fix bogus ...
136
  			if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
54efd50bf   Kent Overstreet   block: make gener...
137
  				goto new_segment;
5014c311b   Jens Axboe   block: fix bogus ...
138
  			if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
54efd50bf   Kent Overstreet   block: make gener...
139
140
141
142
  				goto new_segment;
  
  			seg_size += bv.bv_len;
  			bvprv = bv;
578270bfb   Ming Lei   block: fix segmen...
143
  			bvprvp = &bvprv;
52cc6eead   Ming Lei   block: blk-merge:...
144
  			sectors += bv.bv_len >> 9;
a88d32af1   Ming Lei   blk-merge: fix co...
145
146
147
  
  			if (nsegs == 1 && seg_size > front_seg_size)
  				front_seg_size = seg_size;
54efd50bf   Kent Overstreet   block: make gener...
148
149
150
151
152
153
154
155
  			continue;
  		}
  new_segment:
  		if (nsegs == queue_max_segments(q))
  			goto split;
  
  		nsegs++;
  		bvprv = bv;
578270bfb   Ming Lei   block: fix segmen...
156
  		bvprvp = &bvprv;
54efd50bf   Kent Overstreet   block: make gener...
157
  		seg_size = bv.bv_len;
52cc6eead   Ming Lei   block: blk-merge:...
158
  		sectors += bv.bv_len >> 9;
02e707424   Ming Lei   blk-merge: fix bl...
159
160
161
  
  		if (nsegs == 1 && seg_size > front_seg_size)
  			front_seg_size = seg_size;
54efd50bf   Kent Overstreet   block: make gener...
162
  	}
02e707424   Ming Lei   blk-merge: fix bl...
163
  	do_split = false;
54efd50bf   Kent Overstreet   block: make gener...
164
  split:
bdced438a   Ming Lei   block: setup bi_p...
165
  	*segs = nsegs;
02e707424   Ming Lei   blk-merge: fix bl...
166
167
168
169
170
171
172
173
174
175
176
177
  
  	if (do_split) {
  		new = bio_split(bio, sectors, GFP_NOIO, bs);
  		if (new)
  			bio = new;
  	}
  
  	bio->bi_seg_front_size = front_seg_size;
  	if (seg_size > bio->bi_seg_back_size)
  		bio->bi_seg_back_size = seg_size;
  
  	return do_split ? new : NULL;
54efd50bf   Kent Overstreet   block: make gener...
178
179
180
181
182
  }
  
  void blk_queue_split(struct request_queue *q, struct bio **bio,
  		     struct bio_set *bs)
  {
bdced438a   Ming Lei   block: setup bi_p...
183
184
  	struct bio *split, *res;
  	unsigned nsegs;
54efd50bf   Kent Overstreet   block: make gener...
185

7afafc8a4   Adrian Hunter   block: Fix secure...
186
187
188
  	switch (bio_op(*bio)) {
  	case REQ_OP_DISCARD:
  	case REQ_OP_SECURE_ERASE:
bdced438a   Ming Lei   block: setup bi_p...
189
  		split = blk_bio_discard_split(q, *bio, bs, &nsegs);
7afafc8a4   Adrian Hunter   block: Fix secure...
190
191
  		break;
  	case REQ_OP_WRITE_SAME:
bdced438a   Ming Lei   block: setup bi_p...
192
  		split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
7afafc8a4   Adrian Hunter   block: Fix secure...
193
194
  		break;
  	default:
bdced438a   Ming Lei   block: setup bi_p...
195
  		split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
7afafc8a4   Adrian Hunter   block: Fix secure...
196
197
  		break;
  	}
bdced438a   Ming Lei   block: setup bi_p...
198
199
200
201
202
  
  	/* physical segments can be figured out during splitting */
  	res = split ? split : *bio;
  	res->bi_phys_segments = nsegs;
  	bio_set_flag(res, BIO_SEG_VALID);
54efd50bf   Kent Overstreet   block: make gener...
203
204
  
  	if (split) {
6ac45aeb6   Ming Lei   block: avoid to m...
205
  		/* there isn't chance to merge the splitted bio */
1eff9d322   Jens Axboe   block: rename bio...
206
  		split->bi_opf |= REQ_NOMERGE;
6ac45aeb6   Ming Lei   block: avoid to m...
207

54efd50bf   Kent Overstreet   block: make gener...
208
  		bio_chain(split, *bio);
cda22646a   Mike Krinkin   block: add call t...
209
  		trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
54efd50bf   Kent Overstreet   block: make gener...
210
211
212
213
214
  		generic_make_request(*bio);
  		*bio = split;
  	}
  }
  EXPORT_SYMBOL(blk_queue_split);
1e4280791   Jens Axboe   block: reduce sta...
215
  static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
073885493   Ming Lei   blk-merge: fix bl...
216
217
  					     struct bio *bio,
  					     bool no_sg_merge)
d6d481969   Jens Axboe   block: ll_rw_blk....
218
  {
7988613b0   Kent Overstreet   block: Convert bi...
219
  	struct bio_vec bv, bvprv = { NULL };
54efd50bf   Kent Overstreet   block: make gener...
220
  	int cluster, prev = 0;
1e4280791   Jens Axboe   block: reduce sta...
221
  	unsigned int seg_size, nr_phys_segs;
59247eaea   Jens Axboe   block: fix missin...
222
  	struct bio *fbio, *bbio;
7988613b0   Kent Overstreet   block: Convert bi...
223
  	struct bvec_iter iter;
d6d481969   Jens Axboe   block: ll_rw_blk....
224

1e4280791   Jens Axboe   block: reduce sta...
225
226
  	if (!bio)
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
227

5cb8850c9   Kent Overstreet   block: Explicitly...
228
229
230
231
  	/*
  	 * This should probably be returning 0, but blk_add_request_payload()
  	 * (Christoph!!!!)
  	 */
7afafc8a4   Adrian Hunter   block: Fix secure...
232
  	if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
5cb8850c9   Kent Overstreet   block: Explicitly...
233
  		return 1;
95fe6c1a2   Mike Christie   block, fs, mm, dr...
234
  	if (bio_op(bio) == REQ_OP_WRITE_SAME)
5cb8850c9   Kent Overstreet   block: Explicitly...
235
  		return 1;
1e4280791   Jens Axboe   block: reduce sta...
236
  	fbio = bio;
e692cb668   Martin K. Petersen   block: Deprecate ...
237
  	cluster = blk_queue_cluster(q);
5df97b91b   Mikulas Patocka   drop vmerge accou...
238
  	seg_size = 0;
2c8919dee   Andi Kleen   gcc-4.6: block: f...
239
  	nr_phys_segs = 0;
1e4280791   Jens Axboe   block: reduce sta...
240
  	for_each_bio(bio) {
7988613b0   Kent Overstreet   block: Convert bi...
241
  		bio_for_each_segment(bv, bio, iter) {
1e4280791   Jens Axboe   block: reduce sta...
242
  			/*
05f1dd531   Jens Axboe   block: add queue ...
243
244
245
246
247
  			 * If SG merging is disabled, each bio vector is
  			 * a segment
  			 */
  			if (no_sg_merge)
  				goto new_segment;
54efd50bf   Kent Overstreet   block: make gener...
248
  			if (prev && cluster) {
7988613b0   Kent Overstreet   block: Convert bi...
249
  				if (seg_size + bv.bv_len
ae03bf639   Martin K. Petersen   block: Use access...
250
  				    > queue_max_segment_size(q))
1e4280791   Jens Axboe   block: reduce sta...
251
  					goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
252
  				if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
1e4280791   Jens Axboe   block: reduce sta...
253
  					goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
254
  				if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
1e4280791   Jens Axboe   block: reduce sta...
255
  					goto new_segment;
d6d481969   Jens Axboe   block: ll_rw_blk....
256

7988613b0   Kent Overstreet   block: Convert bi...
257
  				seg_size += bv.bv_len;
1e4280791   Jens Axboe   block: reduce sta...
258
259
260
  				bvprv = bv;
  				continue;
  			}
d6d481969   Jens Axboe   block: ll_rw_blk....
261
  new_segment:
1e4280791   Jens Axboe   block: reduce sta...
262
263
264
  			if (nr_phys_segs == 1 && seg_size >
  			    fbio->bi_seg_front_size)
  				fbio->bi_seg_front_size = seg_size;
867714271   FUJITA Tomonori   block: fix nr_phy...
265

1e4280791   Jens Axboe   block: reduce sta...
266
267
  			nr_phys_segs++;
  			bvprv = bv;
54efd50bf   Kent Overstreet   block: make gener...
268
  			prev = 1;
7988613b0   Kent Overstreet   block: Convert bi...
269
  			seg_size = bv.bv_len;
1e4280791   Jens Axboe   block: reduce sta...
270
  		}
59247eaea   Jens Axboe   block: fix missin...
271
  		bbio = bio;
d6d481969   Jens Axboe   block: ll_rw_blk....
272
  	}
59247eaea   Jens Axboe   block: fix missin...
273
274
275
276
  	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
  		fbio->bi_seg_front_size = seg_size;
  	if (seg_size > bbio->bi_seg_back_size)
  		bbio->bi_seg_back_size = seg_size;
1e4280791   Jens Axboe   block: reduce sta...
277
278
279
280
281
282
  
  	return nr_phys_segs;
  }
  
  void blk_recalc_rq_segments(struct request *rq)
  {
073885493   Ming Lei   blk-merge: fix bl...
283
284
285
286
287
  	bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
  			&rq->q->queue_flags);
  
  	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
  			no_sg_merge);
d6d481969   Jens Axboe   block: ll_rw_blk....
288
289
290
291
  }
  
  void blk_recount_segments(struct request_queue *q, struct bio *bio)
  {
7f60dcaaf   Ming Lei   block: blk-merge:...
292
293
294
295
296
297
298
  	unsigned short seg_cnt;
  
  	/* estimate segment number by bi_vcnt for non-cloned bio */
  	if (bio_flagged(bio, BIO_CLONED))
  		seg_cnt = bio_segments(bio);
  	else
  		seg_cnt = bio->bi_vcnt;
764f612c6   Ming Lei   blk-merge: don't ...
299

7f60dcaaf   Ming Lei   block: blk-merge:...
300
301
302
  	if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
  			(seg_cnt < queue_max_segments(q)))
  		bio->bi_phys_segments = seg_cnt;
05f1dd531   Jens Axboe   block: add queue ...
303
304
305
306
  	else {
  		struct bio *nxt = bio->bi_next;
  
  		bio->bi_next = NULL;
7f60dcaaf   Ming Lei   block: blk-merge:...
307
  		bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
05f1dd531   Jens Axboe   block: add queue ...
308
309
  		bio->bi_next = nxt;
  	}
1e4280791   Jens Axboe   block: reduce sta...
310

b7c44ed9d   Jens Axboe   block: manipulate...
311
  	bio_set_flag(bio, BIO_SEG_VALID);
d6d481969   Jens Axboe   block: ll_rw_blk....
312
313
314
315
316
317
  }
  EXPORT_SYMBOL(blk_recount_segments);
  
  static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
  				   struct bio *nxt)
  {
2b8221e18   Kent Overstreet   block: Really sil...
318
  	struct bio_vec end_bv = { NULL }, nxt_bv;
f619d2546   Kent Overstreet   block: Kill bio_i...
319

e692cb668   Martin K. Petersen   block: Deprecate ...
320
  	if (!blk_queue_cluster(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
321
  		return 0;
867714271   FUJITA Tomonori   block: fix nr_phy...
322
  	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
ae03bf639   Martin K. Petersen   block: Use access...
323
  	    queue_max_segment_size(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
324
  		return 0;
e17fc0a1c   David Woodhouse   Allow elevators t...
325
326
  	if (!bio_has_data(bio))
  		return 1;
e827091cb   Ming Lei   block: merge: get...
327
328
  	bio_get_last_bvec(bio, &end_bv);
  	bio_get_first_bvec(nxt, &nxt_bv);
f619d2546   Kent Overstreet   block: Kill bio_i...
329
330
  
  	if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
e17fc0a1c   David Woodhouse   Allow elevators t...
331
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
332
  	/*
e17fc0a1c   David Woodhouse   Allow elevators t...
333
  	 * bio and nxt are contiguous in memory; check if the queue allows
d6d481969   Jens Axboe   block: ll_rw_blk....
334
335
  	 * these two to be merged into one
  	 */
f619d2546   Kent Overstreet   block: Kill bio_i...
336
  	if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
d6d481969   Jens Axboe   block: ll_rw_blk....
337
338
339
340
  		return 1;
  
  	return 0;
  }
7988613b0   Kent Overstreet   block: Convert bi...
341
  static inline void
963ab9e5d   Asias He   block: Introduce ...
342
  __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
7988613b0   Kent Overstreet   block: Convert bi...
343
  		     struct scatterlist *sglist, struct bio_vec *bvprv,
963ab9e5d   Asias He   block: Introduce ...
344
345
346
347
  		     struct scatterlist **sg, int *nsegs, int *cluster)
  {
  
  	int nbytes = bvec->bv_len;
7988613b0   Kent Overstreet   block: Convert bi...
348
  	if (*sg && *cluster) {
963ab9e5d   Asias He   block: Introduce ...
349
350
  		if ((*sg)->length + nbytes > queue_max_segment_size(q))
  			goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
351
  		if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
963ab9e5d   Asias He   block: Introduce ...
352
  			goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
353
  		if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
963ab9e5d   Asias He   block: Introduce ...
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
  			goto new_segment;
  
  		(*sg)->length += nbytes;
  	} else {
  new_segment:
  		if (!*sg)
  			*sg = sglist;
  		else {
  			/*
  			 * If the driver previously mapped a shorter
  			 * list, we could see a termination bit
  			 * prematurely unless it fully inits the sg
  			 * table on each mapping. We KNOW that there
  			 * must be more entries here or the driver
  			 * would be buggy, so force clear the
  			 * termination bit to avoid doing a full
  			 * sg_init_table() in drivers for each command.
  			 */
c8164d893   Paolo Bonzini   scatterlist: intr...
372
  			sg_unmark_end(*sg);
963ab9e5d   Asias He   block: Introduce ...
373
374
375
376
377
378
  			*sg = sg_next(*sg);
  		}
  
  		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
  		(*nsegs)++;
  	}
7988613b0   Kent Overstreet   block: Convert bi...
379
  	*bvprv = *bvec;
963ab9e5d   Asias He   block: Introduce ...
380
  }
5cb8850c9   Kent Overstreet   block: Explicitly...
381
382
383
  static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
  			     struct scatterlist *sglist,
  			     struct scatterlist **sg)
d6d481969   Jens Axboe   block: ll_rw_blk....
384
  {
2b8221e18   Kent Overstreet   block: Really sil...
385
  	struct bio_vec bvec, bvprv = { NULL };
5cb8850c9   Kent Overstreet   block: Explicitly...
386
  	struct bvec_iter iter;
d6d481969   Jens Axboe   block: ll_rw_blk....
387
388
389
  	int nsegs, cluster;
  
  	nsegs = 0;
e692cb668   Martin K. Petersen   block: Deprecate ...
390
  	cluster = blk_queue_cluster(q);
d6d481969   Jens Axboe   block: ll_rw_blk....
391

7afafc8a4   Adrian Hunter   block: Fix secure...
392
393
394
  	switch (bio_op(bio)) {
  	case REQ_OP_DISCARD:
  	case REQ_OP_SECURE_ERASE:
5cb8850c9   Kent Overstreet   block: Explicitly...
395
396
397
398
399
400
401
  		/*
  		 * This is a hack - drivers should be neither modifying the
  		 * biovec, nor relying on bi_vcnt - but because of
  		 * blk_add_request_payload(), a discard bio may or may not have
  		 * a payload we need to set up here (thank you Christoph) and
  		 * bi_vcnt is really the only way of telling if we need to.
  		 */
7afafc8a4   Adrian Hunter   block: Fix secure...
402
403
404
405
  		if (!bio->bi_vcnt)
  			return 0;
  		/* Fall through */
  	case REQ_OP_WRITE_SAME:
5cb8850c9   Kent Overstreet   block: Explicitly...
406
407
408
409
  		*sg = sglist;
  		bvec = bio_iovec(bio);
  		sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
  		return 1;
7afafc8a4   Adrian Hunter   block: Fix secure...
410
411
  	default:
  		break;
5cb8850c9   Kent Overstreet   block: Explicitly...
412
413
414
415
416
417
  	}
  
  	for_each_bio(bio)
  		bio_for_each_segment(bvec, bio, iter)
  			__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
  					     &nsegs, &cluster);
d6d481969   Jens Axboe   block: ll_rw_blk....
418

5cb8850c9   Kent Overstreet   block: Explicitly...
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
  	return nsegs;
  }
  
  /*
   * map a request to scatterlist, return number of sg entries setup. Caller
   * must make sure sg can hold rq->nr_phys_segments entries
   */
  int blk_rq_map_sg(struct request_queue *q, struct request *rq,
  		  struct scatterlist *sglist)
  {
  	struct scatterlist *sg = NULL;
  	int nsegs = 0;
  
  	if (rq->bio)
  		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
f18573abc   FUJITA Tomonori   block: move the p...
434
435
  
  	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
2e46e8b27   Tejun Heo   block: drop reque...
436
437
438
  	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
  		unsigned int pad_len =
  			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
f18573abc   FUJITA Tomonori   block: move the p...
439
440
441
442
  
  		sg->length += pad_len;
  		rq->extra_len += pad_len;
  	}
2fb98e841   Tejun Heo   block: implement ...
443
  	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
a8ebb056a   Mike Christie   block, drivers, c...
444
  		if (op_is_write(req_op(rq)))
db0a2e009   Tejun Heo   block: clear drai...
445
  			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
da81ed16b   Dan Williams   scatterlist: remo...
446
  		sg_unmark_end(sg);
d6d481969   Jens Axboe   block: ll_rw_blk....
447
448
449
450
451
452
  		sg = sg_next(sg);
  		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
  			    q->dma_drain_size,
  			    ((unsigned long)q->dma_drain_buffer) &
  			    (PAGE_SIZE - 1));
  		nsegs++;
7a85f8896   FUJITA Tomonori   block: restore th...
453
  		rq->extra_len += q->dma_drain_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
454
455
456
457
  	}
  
  	if (sg)
  		sg_mark_end(sg);
12e57f59c   Ming Lei   blk-merge: warn i...
458
459
460
461
462
  	/*
  	 * Something must have been wrong if the figured number of
  	 * segment is bigger than number of req's physical segments
  	 */
  	WARN_ON(nsegs > rq->nr_phys_segments);
d6d481969   Jens Axboe   block: ll_rw_blk....
463
464
  	return nsegs;
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
465
  EXPORT_SYMBOL(blk_rq_map_sg);
d6d481969   Jens Axboe   block: ll_rw_blk....
466
467
468
469
  static inline int ll_new_hw_segment(struct request_queue *q,
  				    struct request *req,
  				    struct bio *bio)
  {
d6d481969   Jens Axboe   block: ll_rw_blk....
470
  	int nr_phys_segs = bio_phys_segments(q, bio);
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
471
472
  	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
  		goto no_merge;
4eaf99bea   Martin K. Petersen   block: Don't merg...
473
  	if (blk_integrity_merge_bio(q, req, bio) == false)
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
474
  		goto no_merge;
d6d481969   Jens Axboe   block: ll_rw_blk....
475
476
477
478
479
  
  	/*
  	 * This will form the start of a new hw segment.  Bump both
  	 * counters.
  	 */
d6d481969   Jens Axboe   block: ll_rw_blk....
480
481
  	req->nr_phys_segments += nr_phys_segs;
  	return 1;
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
482
483
484
485
486
487
  
  no_merge:
  	req->cmd_flags |= REQ_NOMERGE;
  	if (req == q->last_merge)
  		q->last_merge = NULL;
  	return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
488
489
490
491
492
  }
  
  int ll_back_merge_fn(struct request_queue *q, struct request *req,
  		     struct bio *bio)
  {
5e7c4274a   Jens Axboe   block: Check for ...
493
494
  	if (req_gap_back_merge(req, bio))
  		return 0;
7f39add3b   Sagi Grimberg   block: Refuse req...
495
496
497
  	if (blk_integrity_rq(req) &&
  	    integrity_req_gap_back_merge(req, bio))
  		return 0;
f31dc1cd4   Martin K. Petersen   block: Consolidat...
498
  	if (blk_rq_sectors(req) + bio_sectors(bio) >
17007f399   Damien Le Moal   block: Fix front ...
499
  	    blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
d6d481969   Jens Axboe   block: ll_rw_blk....
500
501
502
503
504
  		req->cmd_flags |= REQ_NOMERGE;
  		if (req == q->last_merge)
  			q->last_merge = NULL;
  		return 0;
  	}
2cdf79caf   Jens Axboe   block: get rid of...
505
  	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
506
  		blk_recount_segments(q, req->biotail);
2cdf79caf   Jens Axboe   block: get rid of...
507
  	if (!bio_flagged(bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
508
  		blk_recount_segments(q, bio);
d6d481969   Jens Axboe   block: ll_rw_blk....
509
510
511
  
  	return ll_new_hw_segment(q, req, bio);
  }
6728cb0e6   Jens Axboe   block: make core ...
512
  int ll_front_merge_fn(struct request_queue *q, struct request *req,
d6d481969   Jens Axboe   block: ll_rw_blk....
513
514
  		      struct bio *bio)
  {
5e7c4274a   Jens Axboe   block: Check for ...
515
516
517
  
  	if (req_gap_front_merge(req, bio))
  		return 0;
7f39add3b   Sagi Grimberg   block: Refuse req...
518
519
520
  	if (blk_integrity_rq(req) &&
  	    integrity_req_gap_front_merge(req, bio))
  		return 0;
f31dc1cd4   Martin K. Petersen   block: Consolidat...
521
  	if (blk_rq_sectors(req) + bio_sectors(bio) >
17007f399   Damien Le Moal   block: Fix front ...
522
  	    blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
d6d481969   Jens Axboe   block: ll_rw_blk....
523
524
525
526
527
  		req->cmd_flags |= REQ_NOMERGE;
  		if (req == q->last_merge)
  			q->last_merge = NULL;
  		return 0;
  	}
2cdf79caf   Jens Axboe   block: get rid of...
528
  	if (!bio_flagged(bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
529
  		blk_recount_segments(q, bio);
2cdf79caf   Jens Axboe   block: get rid of...
530
  	if (!bio_flagged(req->bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
531
  		blk_recount_segments(q, req->bio);
d6d481969   Jens Axboe   block: ll_rw_blk....
532
533
534
  
  	return ll_new_hw_segment(q, req, bio);
  }
e7e245000   Jens Axboe   blk-mq: don't dis...
535
536
537
538
539
540
541
542
543
544
  /*
   * blk-mq uses req->special to carry normal driver per-request payload, it
   * does not indicate a prepared command that we cannot merge with.
   */
  static bool req_no_special_merge(struct request *req)
  {
  	struct request_queue *q = req->q;
  
  	return !q->mq_ops && req->special;
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
545
546
547
548
  static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
  				struct request *next)
  {
  	int total_phys_segments;
867714271   FUJITA Tomonori   block: fix nr_phy...
549
550
  	unsigned int seg_size =
  		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
551
552
553
554
555
  
  	/*
  	 * First check if the either of the requests are re-queued
  	 * requests.  Can't merge them if they are.
  	 */
e7e245000   Jens Axboe   blk-mq: don't dis...
556
  	if (req_no_special_merge(req) || req_no_special_merge(next))
d6d481969   Jens Axboe   block: ll_rw_blk....
557
  		return 0;
5e7c4274a   Jens Axboe   block: Check for ...
558
  	if (req_gap_back_merge(req, next->bio))
854fbb9c6   Keith Busch   block: prevent re...
559
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
560
561
562
  	/*
  	 * Will it become too large?
  	 */
f31dc1cd4   Martin K. Petersen   block: Consolidat...
563
  	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
17007f399   Damien Le Moal   block: Fix front ...
564
  	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
d6d481969   Jens Axboe   block: ll_rw_blk....
565
566
567
  		return 0;
  
  	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
867714271   FUJITA Tomonori   block: fix nr_phy...
568
569
570
571
572
  	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
  		if (req->nr_phys_segments == 1)
  			req->bio->bi_seg_front_size = seg_size;
  		if (next->nr_phys_segments == 1)
  			next->biotail->bi_seg_back_size = seg_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
573
  		total_phys_segments--;
867714271   FUJITA Tomonori   block: fix nr_phy...
574
  	}
d6d481969   Jens Axboe   block: ll_rw_blk....
575

8a78362c4   Martin K. Petersen   block: Consolidat...
576
  	if (total_phys_segments > queue_max_segments(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
577
  		return 0;
4eaf99bea   Martin K. Petersen   block: Don't merg...
578
  	if (blk_integrity_merge_rq(q, req, next) == false)
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
579
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
580
581
  	/* Merge is OK... */
  	req->nr_phys_segments = total_phys_segments;
d6d481969   Jens Axboe   block: ll_rw_blk....
582
583
  	return 1;
  }
80a761fd3   Tejun Heo   block: implement ...
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
  /**
   * blk_rq_set_mixed_merge - mark a request as mixed merge
   * @rq: request to mark as mixed merge
   *
   * Description:
   *     @rq is about to be mixed merged.  Make sure the attributes
   *     which can be mixed are set in each bio and mark @rq as mixed
   *     merged.
   */
  void blk_rq_set_mixed_merge(struct request *rq)
  {
  	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
  	struct bio *bio;
  
  	if (rq->cmd_flags & REQ_MIXED_MERGE)
  		return;
  
  	/*
  	 * @rq will no longer represent mixable attributes for all the
  	 * contained bios.  It will just track those of the first one.
  	 * Distributes the attributs to each bio.
  	 */
  	for (bio = rq->bio; bio; bio = bio->bi_next) {
1eff9d322   Jens Axboe   block: rename bio...
607
608
609
  		WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
  			     (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
  		bio->bi_opf |= ff;
80a761fd3   Tejun Heo   block: implement ...
610
611
612
  	}
  	rq->cmd_flags |= REQ_MIXED_MERGE;
  }
26308eab6   Jerome Marchand   block: fix incons...
613
614
615
616
617
618
619
  static void blk_account_io_merge(struct request *req)
  {
  	if (blk_do_io_stat(req)) {
  		struct hd_struct *part;
  		int cpu;
  
  		cpu = part_stat_lock();
09e099d4b   Jerome Marchand   block: fix accoun...
620
  		part = req->part;
26308eab6   Jerome Marchand   block: fix incons...
621
622
  
  		part_round_stats(cpu, part);
316d315bf   Nikanth Karthikesan   block: Seperate r...
623
  		part_dec_in_flight(part, rq_data_dir(req));
26308eab6   Jerome Marchand   block: fix incons...
624

6c23a9681   Jens Axboe   block: add intern...
625
  		hd_struct_put(part);
26308eab6   Jerome Marchand   block: fix incons...
626
627
628
  		part_stat_unlock();
  	}
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
629
630
631
632
633
634
635
636
  /*
   * Has to be called with the request spinlock acquired
   */
  static int attempt_merge(struct request_queue *q, struct request *req,
  			  struct request *next)
  {
  	if (!rq_mergeable(req) || !rq_mergeable(next))
  		return 0;
288dab8a3   Christoph Hellwig   block: add a sepa...
637
  	if (req_op(req) != req_op(next))
f31dc1cd4   Martin K. Petersen   block: Consolidat...
638
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
639
640
641
  	/*
  	 * not contiguous
  	 */
83096ebf1   Tejun Heo   block: convert to...
642
  	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
d6d481969   Jens Axboe   block: ll_rw_blk....
643
644
645
646
  		return 0;
  
  	if (rq_data_dir(req) != rq_data_dir(next)
  	    || req->rq_disk != next->rq_disk
e7e245000   Jens Axboe   blk-mq: don't dis...
647
  	    || req_no_special_merge(next))
d6d481969   Jens Axboe   block: ll_rw_blk....
648
  		return 0;
8fe0d473f   Mike Christie   block: convert me...
649
  	if (req_op(req) == REQ_OP_WRITE_SAME &&
4363ac7c1   Martin K. Petersen   block: Implement ...
650
651
  	    !blk_write_same_mergeable(req->bio, next->bio))
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
652
653
654
655
656
657
658
659
660
661
  	/*
  	 * If we are allowed to merge, then append bio list
  	 * from next to rq and release next. merge_requests_fn
  	 * will have updated segment counts, update sector
  	 * counts here.
  	 */
  	if (!ll_merge_requests_fn(q, req, next))
  		return 0;
  
  	/*
80a761fd3   Tejun Heo   block: implement ...
662
663
664
665
666
667
668
669
670
671
672
673
674
  	 * If failfast settings disagree or any of the two is already
  	 * a mixed merge, mark both as mixed before proceeding.  This
  	 * makes sure that all involved bios have mixable attributes
  	 * set properly.
  	 */
  	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
  	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
  	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
  		blk_rq_set_mixed_merge(req);
  		blk_rq_set_mixed_merge(next);
  	}
  
  	/*
d6d481969   Jens Axboe   block: ll_rw_blk....
675
676
677
678
679
680
681
682
683
684
  	 * At this point we have either done a back merge
  	 * or front merge. We need the smaller start_time of
  	 * the merged requests to be the current request
  	 * for accounting purposes.
  	 */
  	if (time_after(req->start_time, next->start_time))
  		req->start_time = next->start_time;
  
  	req->biotail->bi_next = next->bio;
  	req->biotail = next->biotail;
a2dec7b36   Tejun Heo   block: hide reque...
685
  	req->__data_len += blk_rq_bytes(next);
d6d481969   Jens Axboe   block: ll_rw_blk....
686
687
  
  	elv_merge_requests(q, req, next);
42dad7647   Jerome Marchand   block: simplify I...
688
689
690
691
  	/*
  	 * 'next' is going away, so update stats accordingly
  	 */
  	blk_account_io_merge(next);
d6d481969   Jens Axboe   block: ll_rw_blk....
692
693
  
  	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
ab780f1ec   Jens Axboe   block: inherit CP...
694
695
  	if (blk_rq_cpu_valid(next))
  		req->cpu = next->cpu;
d6d481969   Jens Axboe   block: ll_rw_blk....
696

1cd96c242   Boaz Harrosh   block: WARN in __...
697
698
  	/* owner-ship of bio passed from next to req */
  	next->bio = NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
  	__blk_put_request(q, next);
  	return 1;
  }
  
  int attempt_back_merge(struct request_queue *q, struct request *rq)
  {
  	struct request *next = elv_latter_request(q, rq);
  
  	if (next)
  		return attempt_merge(q, rq, next);
  
  	return 0;
  }
  
  int attempt_front_merge(struct request_queue *q, struct request *rq)
  {
  	struct request *prev = elv_former_request(q, rq);
  
  	if (prev)
  		return attempt_merge(q, prev, rq);
  
  	return 0;
  }
5e84ea3a9   Jens Axboe   block: attempt to...
722
723
724
725
  
  int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
  			  struct request *next)
  {
72ef799b3   Tahsin Erdogan   block: do not mer...
726
727
728
729
730
  	struct elevator_queue *e = q->elevator;
  
  	if (e->type->ops.elevator_allow_rq_merge_fn)
  		if (!e->type->ops.elevator_allow_rq_merge_fn(q, rq, next))
  			return 0;
5e84ea3a9   Jens Axboe   block: attempt to...
731
732
  	return attempt_merge(q, rq, next);
  }
050c8ea80   Tejun Heo   block: separate o...
733
734
735
  
  bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
  {
e2a60da74   Martin K. Petersen   block: Clean up s...
736
  	if (!rq_mergeable(rq) || !bio_mergeable(bio))
050c8ea80   Tejun Heo   block: separate o...
737
  		return false;
288dab8a3   Christoph Hellwig   block: add a sepa...
738
  	if (req_op(rq) != bio_op(bio))
f31dc1cd4   Martin K. Petersen   block: Consolidat...
739
  		return false;
050c8ea80   Tejun Heo   block: separate o...
740
741
742
743
744
  	/* different data direction or already started, don't merge */
  	if (bio_data_dir(bio) != rq_data_dir(rq))
  		return false;
  
  	/* must be same device and not a special request */
e7e245000   Jens Axboe   blk-mq: don't dis...
745
  	if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
050c8ea80   Tejun Heo   block: separate o...
746
747
748
  		return false;
  
  	/* only merge integrity protected bio into ditto rq */
4eaf99bea   Martin K. Petersen   block: Don't merg...
749
  	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
050c8ea80   Tejun Heo   block: separate o...
750
  		return false;
4363ac7c1   Martin K. Petersen   block: Implement ...
751
  	/* must be using the same buffer */
8fe0d473f   Mike Christie   block: convert me...
752
  	if (req_op(rq) == REQ_OP_WRITE_SAME &&
4363ac7c1   Martin K. Petersen   block: Implement ...
753
754
  	    !blk_write_same_mergeable(rq->bio, bio))
  		return false;
050c8ea80   Tejun Heo   block: separate o...
755
756
757
758
759
  	return true;
  }
  
  int blk_try_merge(struct request *rq, struct bio *bio)
  {
4f024f379   Kent Overstreet   block: Abstract o...
760
  	if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
050c8ea80   Tejun Heo   block: separate o...
761
  		return ELEVATOR_BACK_MERGE;
4f024f379   Kent Overstreet   block: Abstract o...
762
  	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
050c8ea80   Tejun Heo   block: separate o...
763
764
765
  		return ELEVATOR_FRONT_MERGE;
  	return ELEVATOR_NO_MERGE;
  }