Blame view

block/blk-merge.c 30.2 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
d6d481969   Jens Axboe   block: ll_rw_blk....
2
3
4
5
6
7
8
9
  /*
   * Functions related to segment and merge handling
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/scatterlist.h>
cda22646a   Mike Krinkin   block: add call t...
10
  #include <trace/events/block.h>
d6d481969   Jens Axboe   block: ll_rw_blk....
11
  #include "blk.h"
8e756373d   Baolin Wang   block: Move bio m...
12
  #include "blk-rq-qos.h"
d6d481969   Jens Axboe   block: ll_rw_blk....
13

e9907009c   Christoph Hellwig   block: move req_g...
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
  static inline bool bio_will_gap(struct request_queue *q,
  		struct request *prev_rq, struct bio *prev, struct bio *next)
  {
  	struct bio_vec pb, nb;
  
  	if (!bio_has_data(prev) || !queue_virt_boundary(q))
  		return false;
  
  	/*
  	 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
  	 * is quite difficult to respect the sg gap limit.  We work hard to
  	 * merge a huge number of small single bios in case of mkfs.
  	 */
  	if (prev_rq)
  		bio_get_first_bvec(prev_rq->bio, &pb);
  	else
  		bio_get_first_bvec(prev, &pb);
df376b2ed   Johannes Thumshirn   block: respect vi...
31
  	if (pb.bv_offset & queue_virt_boundary(q))
e9907009c   Christoph Hellwig   block: move req_g...
32
33
34
35
36
37
38
39
40
41
42
43
44
  		return true;
  
  	/*
  	 * We don't need to worry about the situation that the merged segment
  	 * ends in unaligned virt boundary:
  	 *
  	 * - if 'pb' ends aligned, the merged segment ends aligned
  	 * - if 'pb' ends unaligned, the next bio must include
  	 *   one single bvec of 'nb', otherwise the 'nb' can't
  	 *   merge with 'pb'
  	 */
  	bio_get_last_bvec(prev, &pb);
  	bio_get_first_bvec(next, &nb);
200a9aff7   Christoph Hellwig   block: remove the...
45
  	if (biovec_phys_mergeable(q, &pb, &nb))
e9907009c   Christoph Hellwig   block: move req_g...
46
47
48
49
50
51
52
53
54
55
56
57
58
  		return false;
  	return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
  }
  
  static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
  {
  	return bio_will_gap(req->q, req, req->biotail, bio);
  }
  
  static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
  {
  	return bio_will_gap(req->q, NULL, bio, req->bio);
  }
54efd50bf   Kent Overstreet   block: make gener...
59
60
  static struct bio *blk_bio_discard_split(struct request_queue *q,
  					 struct bio *bio,
bdced438a   Ming Lei   block: setup bi_p...
61
62
  					 struct bio_set *bs,
  					 unsigned *nsegs)
54efd50bf   Kent Overstreet   block: make gener...
63
64
65
66
67
  {
  	unsigned int max_discard_sectors, granularity;
  	int alignment;
  	sector_t tmp;
  	unsigned split_sectors;
bdced438a   Ming Lei   block: setup bi_p...
68
  	*nsegs = 1;
54efd50bf   Kent Overstreet   block: make gener...
69
70
  	/* Zero-sector (unknown) and one-sector granularities are the same.  */
  	granularity = max(q->limits.discard_granularity >> 9, 1U);
1adfc5e41   Ming Lei   block: make sure ...
71
72
  	max_discard_sectors = min(q->limits.max_discard_sectors,
  			bio_allowed_max_sectors(q));
54efd50bf   Kent Overstreet   block: make gener...
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
  	max_discard_sectors -= max_discard_sectors % granularity;
  
  	if (unlikely(!max_discard_sectors)) {
  		/* XXX: warn */
  		return NULL;
  	}
  
  	if (bio_sectors(bio) <= max_discard_sectors)
  		return NULL;
  
  	split_sectors = max_discard_sectors;
  
  	/*
  	 * If the next starting sector would be misaligned, stop the discard at
  	 * the previous aligned sector.
  	 */
  	alignment = (q->limits.discard_alignment >> 9) % granularity;
  
  	tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
  	tmp = sector_div(tmp, granularity);
  
  	if (split_sectors > tmp)
  		split_sectors -= tmp;
  
  	return bio_split(bio, split_sectors, GFP_NOIO, bs);
  }
885fa13f6   Christoph Hellwig   block: implement ...
99
100
101
  static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
  		struct bio *bio, struct bio_set *bs, unsigned *nsegs)
  {
d665e12aa   Christoph Hellwig   block: nr_phys_se...
102
  	*nsegs = 0;
885fa13f6   Christoph Hellwig   block: implement ...
103
104
105
106
107
108
109
110
111
  
  	if (!q->limits.max_write_zeroes_sectors)
  		return NULL;
  
  	if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
  		return NULL;
  
  	return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
  }
54efd50bf   Kent Overstreet   block: make gener...
112
113
  static struct bio *blk_bio_write_same_split(struct request_queue *q,
  					    struct bio *bio,
bdced438a   Ming Lei   block: setup bi_p...
114
115
  					    struct bio_set *bs,
  					    unsigned *nsegs)
54efd50bf   Kent Overstreet   block: make gener...
116
  {
bdced438a   Ming Lei   block: setup bi_p...
117
  	*nsegs = 1;
54efd50bf   Kent Overstreet   block: make gener...
118
119
120
121
122
123
124
125
  	if (!q->limits.max_write_same_sectors)
  		return NULL;
  
  	if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
  		return NULL;
  
  	return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
  }
9cc5169cd   Bart Van Assche   block: Improve ph...
126
127
128
129
130
131
132
133
  /*
   * Return the maximum number of sectors from the start of a bio that may be
   * submitted as a single request to a block device. If enough sectors remain,
   * align the end to the physical block size. Otherwise align the end to the
   * logical block size. This approach minimizes the number of non-aligned
   * requests that are submitted to a block device if the start of a bio is not
   * aligned to a physical block boundary.
   */
d0e5fbb01   Ming Lei   block: fix bio sp...
134
135
136
  static inline unsigned get_max_io_size(struct request_queue *q,
  				       struct bio *bio)
  {
3ee16db39   Mike Snitzer   dm: fix IO splitting
137
  	unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector, 0);
9cc5169cd   Bart Van Assche   block: Improve ph...
138
139
140
141
  	unsigned max_sectors = sectors;
  	unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
  	unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
  	unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1);
d0e5fbb01   Ming Lei   block: fix bio sp...
142

9cc5169cd   Bart Van Assche   block: Improve ph...
143
144
145
146
  	max_sectors += start_offset;
  	max_sectors &= ~(pbs - 1);
  	if (max_sectors > start_offset)
  		return max_sectors - start_offset;
d0e5fbb01   Ming Lei   block: fix bio sp...
147

e4b469c66   Keith Busch   block: fix get_ma...
148
  	return sectors & ~(lbs - 1);
d0e5fbb01   Ming Lei   block: fix bio sp...
149
  }
429120f3d   Ming Lei   block: fix splitt...
150
151
152
  static inline unsigned get_max_segment_size(const struct request_queue *q,
  					    struct page *start_page,
  					    unsigned long offset)
dcebd7559   Ming Lei   block: use bio_fo...
153
154
  {
  	unsigned long mask = queue_segment_boundary(q);
429120f3d   Ming Lei   block: fix splitt...
155
  	offset = mask & (page_to_phys(start_page) + offset);
4a2f704eb   Ming Lei   block: fix get_ma...
156
157
158
159
160
161
162
  
  	/*
  	 * overflow may be triggered in case of zero page physical address
  	 * on 32bit arch, use queue's max segment size when that happens.
  	 */
  	return min_not_zero(mask - offset + 1,
  			(unsigned long)queue_max_segment_size(q));
dcebd7559   Ming Lei   block: use bio_fo...
163
  }
708b25b34   Bart Van Assche   block: Simplify b...
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
  /**
   * bvec_split_segs - verify whether or not a bvec should be split in the middle
   * @q:        [in] request queue associated with the bio associated with @bv
   * @bv:       [in] bvec to examine
   * @nsegs:    [in,out] Number of segments in the bio being built. Incremented
   *            by the number of segments from @bv that may be appended to that
   *            bio without exceeding @max_segs
   * @sectors:  [in,out] Number of sectors in the bio being built. Incremented
   *            by the number of sectors from @bv that may be appended to that
   *            bio without exceeding @max_sectors
   * @max_segs: [in] upper bound for *@nsegs
   * @max_sectors: [in] upper bound for *@sectors
   *
   * When splitting a bio, it can happen that a bvec is encountered that is too
   * big to fit in a single segment and hence that it has to be split in the
   * middle. This function verifies whether or not that should happen. The value
   * %true is returned if and only if appending the entire @bv to a bio with
   * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
   * the block driver.
dcebd7559   Ming Lei   block: use bio_fo...
183
   */
af2c68fe9   Bart Van Assche   block: Declare se...
184
185
  static bool bvec_split_segs(const struct request_queue *q,
  			    const struct bio_vec *bv, unsigned *nsegs,
708b25b34   Bart Van Assche   block: Simplify b...
186
187
  			    unsigned *sectors, unsigned max_segs,
  			    unsigned max_sectors)
dcebd7559   Ming Lei   block: use bio_fo...
188
  {
708b25b34   Bart Van Assche   block: Simplify b...
189
190
  	unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9;
  	unsigned len = min(bv->bv_len, max_len);
dcebd7559   Ming Lei   block: use bio_fo...
191
  	unsigned total_len = 0;
ff9811b3c   Bart Van Assche   block: Simplify b...
192
  	unsigned seg_size = 0;
dcebd7559   Ming Lei   block: use bio_fo...
193

ff9811b3c   Bart Van Assche   block: Simplify b...
194
  	while (len && *nsegs < max_segs) {
429120f3d   Ming Lei   block: fix splitt...
195
196
  		seg_size = get_max_segment_size(q, bv->bv_page,
  						bv->bv_offset + total_len);
dcebd7559   Ming Lei   block: use bio_fo...
197
  		seg_size = min(seg_size, len);
ff9811b3c   Bart Van Assche   block: Simplify b...
198
  		(*nsegs)++;
dcebd7559   Ming Lei   block: use bio_fo...
199
200
201
202
203
204
  		total_len += seg_size;
  		len -= seg_size;
  
  		if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
  			break;
  	}
ff9811b3c   Bart Van Assche   block: Simplify b...
205
  	*sectors += total_len >> 9;
dcebd7559   Ming Lei   block: use bio_fo...
206

708b25b34   Bart Van Assche   block: Simplify b...
207
208
  	/* tell the caller to split the bvec if it is too big to fit */
  	return len > 0 || bv->bv_len > max_len;
dcebd7559   Ming Lei   block: use bio_fo...
209
  }
dad775845   Bart Van Assche   block: Document t...
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
  /**
   * blk_bio_segment_split - split a bio in two bios
   * @q:    [in] request queue pointer
   * @bio:  [in] bio to be split
   * @bs:	  [in] bio set to allocate the clone from
   * @segs: [out] number of segments in the bio with the first half of the sectors
   *
   * Clone @bio, update the bi_iter of the clone to represent the first sectors
   * of @bio and update @bio->bi_iter to represent the remaining sectors. The
   * following is guaranteed for the cloned bio:
   * - That it has at most get_max_io_size(@q, @bio) sectors.
   * - That it has at most queue_max_segments(@q) segments.
   *
   * Except for discard requests the cloned bio will point at the bi_io_vec of
   * the original bio. It is the responsibility of the caller to ensure that the
   * original bio is not freed before the cloned bio. The caller is also
   * responsible for ensuring that @bs is only destroyed after processing of the
   * split bio has finished.
   */
54efd50bf   Kent Overstreet   block: make gener...
229
230
  static struct bio *blk_bio_segment_split(struct request_queue *q,
  					 struct bio *bio,
bdced438a   Ming Lei   block: setup bi_p...
231
232
  					 struct bio_set *bs,
  					 unsigned *segs)
54efd50bf   Kent Overstreet   block: make gener...
233
  {
5014c311b   Jens Axboe   block: fix bogus ...
234
  	struct bio_vec bv, bvprv, *bvprvp = NULL;
54efd50bf   Kent Overstreet   block: make gener...
235
  	struct bvec_iter iter;
6869875fb   Christoph Hellwig   block: remove the...
236
  	unsigned nsegs = 0, sectors = 0;
d0e5fbb01   Ming Lei   block: fix bio sp...
237
  	const unsigned max_sectors = get_max_io_size(q, bio);
05b700ba6   Ming Lei   block: fix segmen...
238
  	const unsigned max_segs = queue_max_segments(q);
54efd50bf   Kent Overstreet   block: make gener...
239

dcebd7559   Ming Lei   block: use bio_fo...
240
  	bio_for_each_bvec(bv, bio, iter) {
54efd50bf   Kent Overstreet   block: make gener...
241
242
243
244
  		/*
  		 * If the queue doesn't support SG gaps and adding this
  		 * offset would create a gap, disallow it.
  		 */
5014c311b   Jens Axboe   block: fix bogus ...
245
  		if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
54efd50bf   Kent Overstreet   block: make gener...
246
  			goto split;
708b25b34   Bart Van Assche   block: Simplify b...
247
248
249
250
251
252
253
  		if (nsegs < max_segs &&
  		    sectors + (bv.bv_len >> 9) <= max_sectors &&
  		    bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
  			nsegs++;
  			sectors += bv.bv_len >> 9;
  		} else if (bvec_split_segs(q, &bv, &nsegs, &sectors, max_segs,
  					 max_sectors)) {
cf8c0c6a3   Ming Lei   block: blk-merge:...
254
  			goto split;
e36f62042   Keith Busch   block: split bios...
255
  		}
54efd50bf   Kent Overstreet   block: make gener...
256
  		bvprv = bv;
578270bfb   Ming Lei   block: fix segmen...
257
  		bvprvp = &bvprv;
54efd50bf   Kent Overstreet   block: make gener...
258
  	}
d627065d8   Christoph Hellwig   block: untangle t...
259
260
  	*segs = nsegs;
  	return NULL;
54efd50bf   Kent Overstreet   block: make gener...
261
  split:
bdced438a   Ming Lei   block: setup bi_p...
262
  	*segs = nsegs;
d627065d8   Christoph Hellwig   block: untangle t...
263
  	return bio_split(bio, sectors, GFP_NOIO, bs);
54efd50bf   Kent Overstreet   block: make gener...
264
  }
dad775845   Bart Van Assche   block: Document t...
265
266
  /**
   * __blk_queue_split - split a bio and submit the second half
dad775845   Bart Van Assche   block: Document t...
267
268
269
270
271
272
   * @bio:     [in, out] bio to be split
   * @nr_segs: [out] number of segments in the first bio
   *
   * Split a bio into two bios, chain the two bios, submit the second half and
   * store a pointer to the first half in *@bio. If the second bio is still too
   * big it will be split by a recursive call to this function. Since this
f695ca388   Christoph Hellwig   block: remove the...
273
274
275
   * function may allocate a new bio from @bio->bi_disk->queue->bio_split, it is
   * the responsibility of the caller to ensure that
   * @bio->bi_disk->queue->bio_split is only released after processing of the
dad775845   Bart Van Assche   block: Document t...
276
277
   * split bio has finished.
   */
f695ca388   Christoph Hellwig   block: remove the...
278
  void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
54efd50bf   Kent Overstreet   block: make gener...
279
  {
f695ca388   Christoph Hellwig   block: remove the...
280
  	struct request_queue *q = (*bio)->bi_disk->queue;
fa5322872   Christoph Hellwig   block: avoid blk_...
281
  	struct bio *split = NULL;
54efd50bf   Kent Overstreet   block: make gener...
282

7afafc8a4   Adrian Hunter   block: Fix secure...
283
284
285
  	switch (bio_op(*bio)) {
  	case REQ_OP_DISCARD:
  	case REQ_OP_SECURE_ERASE:
14ccb66b3   Christoph Hellwig   block: remove the...
286
  		split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
7afafc8a4   Adrian Hunter   block: Fix secure...
287
  		break;
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
288
  	case REQ_OP_WRITE_ZEROES:
14ccb66b3   Christoph Hellwig   block: remove the...
289
290
  		split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
  				nr_segs);
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
291
  		break;
7afafc8a4   Adrian Hunter   block: Fix secure...
292
  	case REQ_OP_WRITE_SAME:
14ccb66b3   Christoph Hellwig   block: remove the...
293
294
  		split = blk_bio_write_same_split(q, *bio, &q->bio_split,
  				nr_segs);
7afafc8a4   Adrian Hunter   block: Fix secure...
295
296
  		break;
  	default:
fa5322872   Christoph Hellwig   block: avoid blk_...
297
298
299
300
301
302
303
304
305
306
  		/*
  		 * All drivers must accept single-segments bios that are <=
  		 * PAGE_SIZE.  This is a quick and dirty check that relies on
  		 * the fact that bi_io_vec[0] is always valid if a bio has data.
  		 * The check might lead to occasional false negatives when bios
  		 * are cloned, but compared to the performance impact of cloned
  		 * bios themselves the loop below doesn't matter anyway.
  		 */
  		if (!q->limits.chunk_sectors &&
  		    (*bio)->bi_vcnt == 1 &&
59db8ba2f   Ming Lei   block: still try ...
307
  		    ((*bio)->bi_io_vec[0].bv_len +
1e279153d   Jens Axboe   Revert "block: sp...
308
  		     (*bio)->bi_io_vec[0].bv_offset) <= PAGE_SIZE) {
fa5322872   Christoph Hellwig   block: avoid blk_...
309
310
311
  			*nr_segs = 1;
  			break;
  		}
14ccb66b3   Christoph Hellwig   block: remove the...
312
  		split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
7afafc8a4   Adrian Hunter   block: Fix secure...
313
314
  		break;
  	}
bdced438a   Ming Lei   block: setup bi_p...
315

54efd50bf   Kent Overstreet   block: make gener...
316
  	if (split) {
6ac45aeb6   Ming Lei   block: avoid to m...
317
  		/* there isn't chance to merge the splitted bio */
1eff9d322   Jens Axboe   block: rename bio...
318
  		split->bi_opf |= REQ_NOMERGE;
6ac45aeb6   Ming Lei   block: avoid to m...
319

54efd50bf   Kent Overstreet   block: make gener...
320
  		bio_chain(split, *bio);
cda22646a   Mike Krinkin   block: add call t...
321
  		trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
ed00aabd5   Christoph Hellwig   block: rename gen...
322
  		submit_bio_noacct(*bio);
54efd50bf   Kent Overstreet   block: make gener...
323
324
325
  		*bio = split;
  	}
  }
14ccb66b3   Christoph Hellwig   block: remove the...
326

dad775845   Bart Van Assche   block: Document t...
327
328
  /**
   * blk_queue_split - split a bio and submit the second half
dad775845   Bart Van Assche   block: Document t...
329
330
331
332
   * @bio: [in, out] bio to be split
   *
   * Split a bio into two bios, chains the two bios, submit the second half and
   * store a pointer to the first half in *@bio. Since this function may allocate
f695ca388   Christoph Hellwig   block: remove the...
333
334
335
   * a new bio from @bio->bi_disk->queue->bio_split, it is the responsibility of
   * the caller to ensure that @bio->bi_disk->queue->bio_split is only released
   * after processing of the split bio has finished.
dad775845   Bart Van Assche   block: Document t...
336
   */
f695ca388   Christoph Hellwig   block: remove the...
337
  void blk_queue_split(struct bio **bio)
14ccb66b3   Christoph Hellwig   block: remove the...
338
339
  {
  	unsigned int nr_segs;
f695ca388   Christoph Hellwig   block: remove the...
340
  	__blk_queue_split(bio, &nr_segs);
14ccb66b3   Christoph Hellwig   block: remove the...
341
  }
54efd50bf   Kent Overstreet   block: make gener...
342
  EXPORT_SYMBOL(blk_queue_split);
e9cd19c0c   Christoph Hellwig   block: simplify b...
343
  unsigned int blk_recalc_rq_segments(struct request *rq)
d6d481969   Jens Axboe   block: ll_rw_blk....
344
  {
6869875fb   Christoph Hellwig   block: remove the...
345
  	unsigned int nr_phys_segs = 0;
ff9811b3c   Bart Van Assche   block: Simplify b...
346
  	unsigned int nr_sectors = 0;
e9cd19c0c   Christoph Hellwig   block: simplify b...
347
  	struct req_iterator iter;
6869875fb   Christoph Hellwig   block: remove the...
348
  	struct bio_vec bv;
d6d481969   Jens Axboe   block: ll_rw_blk....
349

e9cd19c0c   Christoph Hellwig   block: simplify b...
350
  	if (!rq->bio)
1e4280791   Jens Axboe   block: reduce sta...
351
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
352

e9cd19c0c   Christoph Hellwig   block: simplify b...
353
  	switch (bio_op(rq->bio)) {
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
354
355
  	case REQ_OP_DISCARD:
  	case REQ_OP_SECURE_ERASE:
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
356
  	case REQ_OP_WRITE_ZEROES:
f9d03f96b   Christoph Hellwig   block: improve ha...
357
358
  		return 0;
  	case REQ_OP_WRITE_SAME:
5cb8850c9   Kent Overstreet   block: Explicitly...
359
  		return 1;
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
360
  	}
5cb8850c9   Kent Overstreet   block: Explicitly...
361

e9cd19c0c   Christoph Hellwig   block: simplify b...
362
  	rq_for_each_bvec(bv, rq, iter)
ff9811b3c   Bart Van Assche   block: Simplify b...
363
  		bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors,
708b25b34   Bart Van Assche   block: Simplify b...
364
  				UINT_MAX, UINT_MAX);
1e4280791   Jens Axboe   block: reduce sta...
365
366
  	return nr_phys_segs;
  }
48d7727ca   Ming Lei   block: optimize _...
367
  static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
862e5a5e6   Ming Lei   block: use bio_fo...
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
  		struct scatterlist *sglist)
  {
  	if (!*sg)
  		return sglist;
  
  	/*
  	 * If the driver previously mapped a shorter list, we could see a
  	 * termination bit prematurely unless it fully inits the sg table
  	 * on each mapping. We KNOW that there must be more entries here
  	 * or the driver would be buggy, so force clear the termination bit
  	 * to avoid doing a full sg_init_table() in drivers for each command.
  	 */
  	sg_unmark_end(*sg);
  	return sg_next(*sg);
  }
  
  static unsigned blk_bvec_map_sg(struct request_queue *q,
  		struct bio_vec *bvec, struct scatterlist *sglist,
  		struct scatterlist **sg)
  {
  	unsigned nbytes = bvec->bv_len;
8a96a0e40   Christoph Hellwig   block: rewrite bl...
389
  	unsigned nsegs = 0, total = 0;
862e5a5e6   Ming Lei   block: use bio_fo...
390
391
  
  	while (nbytes > 0) {
8a96a0e40   Christoph Hellwig   block: rewrite bl...
392
  		unsigned offset = bvec->bv_offset + total;
429120f3d   Ming Lei   block: fix splitt...
393
394
  		unsigned len = min(get_max_segment_size(q, bvec->bv_page,
  					offset), nbytes);
f9f76879b   Christoph Hellwig   block: avoid scat...
395
396
397
398
399
400
401
402
403
404
405
406
  		struct page *page = bvec->bv_page;
  
  		/*
  		 * Unfortunately a fair number of drivers barf on scatterlists
  		 * that have an offset larger than PAGE_SIZE, despite other
  		 * subsystems dealing with that invariant just fine.  For now
  		 * stick to the legacy format where we never present those from
  		 * the block layer, but the code below should be removed once
  		 * these offenders (mostly MMC/SD drivers) are fixed.
  		 */
  		page += (offset >> PAGE_SHIFT);
  		offset &= ~PAGE_MASK;
862e5a5e6   Ming Lei   block: use bio_fo...
407
408
  
  		*sg = blk_next_sg(sg, sglist);
f9f76879b   Christoph Hellwig   block: avoid scat...
409
  		sg_set_page(*sg, page, len, offset);
862e5a5e6   Ming Lei   block: use bio_fo...
410

8a96a0e40   Christoph Hellwig   block: rewrite bl...
411
412
  		total += len;
  		nbytes -= len;
862e5a5e6   Ming Lei   block: use bio_fo...
413
414
415
416
417
  		nsegs++;
  	}
  
  	return nsegs;
  }
16e3e4187   Ming Lei   block: reuse __bl...
418
419
420
421
422
423
424
  static inline int __blk_bvec_map_sg(struct bio_vec bv,
  		struct scatterlist *sglist, struct scatterlist **sg)
  {
  	*sg = blk_next_sg(sg, sglist);
  	sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
  	return 1;
  }
f6970f83e   Ming Lei   block: don't chec...
425
426
427
428
  /* only try to merge bvecs into one sg if they are from two bios */
  static inline bool
  __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
  			   struct bio_vec *bvprv, struct scatterlist **sg)
963ab9e5d   Asias He   block: Introduce ...
429
430
431
  {
  
  	int nbytes = bvec->bv_len;
f6970f83e   Ming Lei   block: don't chec...
432
433
  	if (!*sg)
  		return false;
963ab9e5d   Asias He   block: Introduce ...
434

f6970f83e   Ming Lei   block: don't chec...
435
436
437
438
439
440
441
442
443
  	if ((*sg)->length + nbytes > queue_max_segment_size(q))
  		return false;
  
  	if (!biovec_phys_mergeable(q, bvprv, bvec))
  		return false;
  
  	(*sg)->length += nbytes;
  
  	return true;
963ab9e5d   Asias He   block: Introduce ...
444
  }
5cb8850c9   Kent Overstreet   block: Explicitly...
445
446
447
  static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
  			     struct scatterlist *sglist,
  			     struct scatterlist **sg)
d6d481969   Jens Axboe   block: ll_rw_blk....
448
  {
3f649ab72   Kees Cook   treewide: Remove ...
449
  	struct bio_vec bvec, bvprv = { NULL };
5cb8850c9   Kent Overstreet   block: Explicitly...
450
  	struct bvec_iter iter;
38417468d   Christoph Hellwig   scsi: block: remo...
451
  	int nsegs = 0;
f6970f83e   Ming Lei   block: don't chec...
452
  	bool new_bio = false;
5cb8850c9   Kent Overstreet   block: Explicitly...
453

f6970f83e   Ming Lei   block: don't chec...
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
  	for_each_bio(bio) {
  		bio_for_each_bvec(bvec, bio, iter) {
  			/*
  			 * Only try to merge bvecs from two bios given we
  			 * have done bio internal merge when adding pages
  			 * to bio
  			 */
  			if (new_bio &&
  			    __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
  				goto next_bvec;
  
  			if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
  				nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
  			else
  				nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
   next_bvec:
  			new_bio = false;
  		}
b21e11c5c   Ming Lei   block: fix build ...
472
473
474
475
  		if (likely(bio->bi_iter.bi_size)) {
  			bvprv = bvec;
  			new_bio = true;
  		}
f6970f83e   Ming Lei   block: don't chec...
476
  	}
d6d481969   Jens Axboe   block: ll_rw_blk....
477

5cb8850c9   Kent Overstreet   block: Explicitly...
478
479
480
481
482
483
484
  	return nsegs;
  }
  
  /*
   * map a request to scatterlist, return number of sg entries setup. Caller
   * must make sure sg can hold rq->nr_phys_segments entries
   */
89de1504d   Christoph Hellwig   block: provide a ...
485
486
  int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
  		struct scatterlist *sglist, struct scatterlist **last_sg)
5cb8850c9   Kent Overstreet   block: Explicitly...
487
  {
5cb8850c9   Kent Overstreet   block: Explicitly...
488
  	int nsegs = 0;
f9d03f96b   Christoph Hellwig   block: improve ha...
489
  	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
89de1504d   Christoph Hellwig   block: provide a ...
490
  		nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
f9d03f96b   Christoph Hellwig   block: improve ha...
491
  	else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
89de1504d   Christoph Hellwig   block: provide a ...
492
  		nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg);
f9d03f96b   Christoph Hellwig   block: improve ha...
493
  	else if (rq->bio)
89de1504d   Christoph Hellwig   block: provide a ...
494
  		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
f18573abc   FUJITA Tomonori   block: move the p...
495

89de1504d   Christoph Hellwig   block: provide a ...
496
497
  	if (*last_sg)
  		sg_mark_end(*last_sg);
d6d481969   Jens Axboe   block: ll_rw_blk....
498

12e57f59c   Ming Lei   blk-merge: warn i...
499
500
501
502
  	/*
  	 * Something must have been wrong if the figured number of
  	 * segment is bigger than number of req's physical segments
  	 */
f9d03f96b   Christoph Hellwig   block: improve ha...
503
  	WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
12e57f59c   Ming Lei   blk-merge: warn i...
504

d6d481969   Jens Axboe   block: ll_rw_blk....
505
506
  	return nsegs;
  }
89de1504d   Christoph Hellwig   block: provide a ...
507
  EXPORT_SYMBOL(__blk_rq_map_sg);
d6d481969   Jens Axboe   block: ll_rw_blk....
508

943b40c83   Ming Lei   block: respect qu...
509
510
511
512
513
514
  static inline unsigned int blk_rq_get_max_segments(struct request *rq)
  {
  	if (req_op(rq) == REQ_OP_DISCARD)
  		return queue_max_discard_segments(rq->q);
  	return queue_max_segments(rq->q);
  }
14ccb66b3   Christoph Hellwig   block: remove the...
515
516
  static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
  		unsigned int nr_phys_segs)
d6d481969   Jens Axboe   block: ll_rw_blk....
517
  {
943b40c83   Ming Lei   block: respect qu...
518
  	if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
519
  		goto no_merge;
14ccb66b3   Christoph Hellwig   block: remove the...
520
  	if (blk_integrity_merge_bio(req->q, req, bio) == false)
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
521
  		goto no_merge;
d6d481969   Jens Axboe   block: ll_rw_blk....
522
523
524
525
526
  
  	/*
  	 * This will form the start of a new hw segment.  Bump both
  	 * counters.
  	 */
d6d481969   Jens Axboe   block: ll_rw_blk....
527
528
  	req->nr_phys_segments += nr_phys_segs;
  	return 1;
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
529
530
  
  no_merge:
14ccb66b3   Christoph Hellwig   block: remove the...
531
  	req_set_nomerge(req->q, req);
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
532
  	return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
533
  }
14ccb66b3   Christoph Hellwig   block: remove the...
534
  int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
d6d481969   Jens Axboe   block: ll_rw_blk....
535
  {
5e7c4274a   Jens Axboe   block: Check for ...
536
537
  	if (req_gap_back_merge(req, bio))
  		return 0;
7f39add3b   Sagi Grimberg   block: Refuse req...
538
539
540
  	if (blk_integrity_rq(req) &&
  	    integrity_req_gap_back_merge(req, bio))
  		return 0;
a892c8d52   Satya Tangirala   block: Inline enc...
541
542
  	if (!bio_crypt_ctx_back_mergeable(req, bio))
  		return 0;
f31dc1cd4   Martin K. Petersen   block: Consolidat...
543
  	if (blk_rq_sectors(req) + bio_sectors(bio) >
17007f399   Damien Le Moal   block: Fix front ...
544
  	    blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
14ccb66b3   Christoph Hellwig   block: remove the...
545
  		req_set_nomerge(req->q, req);
d6d481969   Jens Axboe   block: ll_rw_blk....
546
547
  		return 0;
  	}
d6d481969   Jens Axboe   block: ll_rw_blk....
548

14ccb66b3   Christoph Hellwig   block: remove the...
549
  	return ll_new_hw_segment(req, bio, nr_segs);
d6d481969   Jens Axboe   block: ll_rw_blk....
550
  }
eda5cc997   Christoph Hellwig   block: move blk_m...
551
552
  static int ll_front_merge_fn(struct request *req, struct bio *bio,
  		unsigned int nr_segs)
d6d481969   Jens Axboe   block: ll_rw_blk....
553
  {
5e7c4274a   Jens Axboe   block: Check for ...
554
555
  	if (req_gap_front_merge(req, bio))
  		return 0;
7f39add3b   Sagi Grimberg   block: Refuse req...
556
557
558
  	if (blk_integrity_rq(req) &&
  	    integrity_req_gap_front_merge(req, bio))
  		return 0;
a892c8d52   Satya Tangirala   block: Inline enc...
559
560
  	if (!bio_crypt_ctx_front_mergeable(req, bio))
  		return 0;
f31dc1cd4   Martin K. Petersen   block: Consolidat...
561
  	if (blk_rq_sectors(req) + bio_sectors(bio) >
17007f399   Damien Le Moal   block: Fix front ...
562
  	    blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
14ccb66b3   Christoph Hellwig   block: remove the...
563
  		req_set_nomerge(req->q, req);
d6d481969   Jens Axboe   block: ll_rw_blk....
564
565
  		return 0;
  	}
d6d481969   Jens Axboe   block: ll_rw_blk....
566

14ccb66b3   Christoph Hellwig   block: remove the...
567
  	return ll_new_hw_segment(req, bio, nr_segs);
d6d481969   Jens Axboe   block: ll_rw_blk....
568
  }
445251d0f   Jens Axboe   blk-mq: fix disca...
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
  static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
  		struct request *next)
  {
  	unsigned short segments = blk_rq_nr_discard_segments(req);
  
  	if (segments >= queue_max_discard_segments(q))
  		goto no_merge;
  	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
  	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
  		goto no_merge;
  
  	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
  	return true;
  no_merge:
  	req_set_nomerge(q, req);
  	return false;
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
586
587
588
589
  static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
  				struct request *next)
  {
  	int total_phys_segments;
d6d481969   Jens Axboe   block: ll_rw_blk....
590

5e7c4274a   Jens Axboe   block: Check for ...
591
  	if (req_gap_back_merge(req, next->bio))
854fbb9c6   Keith Busch   block: prevent re...
592
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
593
594
595
  	/*
  	 * Will it become too large?
  	 */
f31dc1cd4   Martin K. Petersen   block: Consolidat...
596
  	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
17007f399   Damien Le Moal   block: Fix front ...
597
  	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
d6d481969   Jens Axboe   block: ll_rw_blk....
598
599
600
  		return 0;
  
  	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
943b40c83   Ming Lei   block: respect qu...
601
  	if (total_phys_segments > blk_rq_get_max_segments(req))
d6d481969   Jens Axboe   block: ll_rw_blk....
602
  		return 0;
4eaf99bea   Martin K. Petersen   block: Don't merg...
603
  	if (blk_integrity_merge_rq(q, req, next) == false)
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
604
  		return 0;
a892c8d52   Satya Tangirala   block: Inline enc...
605
606
  	if (!bio_crypt_ctx_merge_rq(req, next))
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
607
608
  	/* Merge is OK... */
  	req->nr_phys_segments = total_phys_segments;
d6d481969   Jens Axboe   block: ll_rw_blk....
609
610
  	return 1;
  }
80a761fd3   Tejun Heo   block: implement ...
611
612
613
614
615
616
617
618
619
620
621
622
623
  /**
   * blk_rq_set_mixed_merge - mark a request as mixed merge
   * @rq: request to mark as mixed merge
   *
   * Description:
   *     @rq is about to be mixed merged.  Make sure the attributes
   *     which can be mixed are set in each bio and mark @rq as mixed
   *     merged.
   */
  void blk_rq_set_mixed_merge(struct request *rq)
  {
  	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
  	struct bio *bio;
e80640213   Christoph Hellwig   block: split out ...
624
  	if (rq->rq_flags & RQF_MIXED_MERGE)
80a761fd3   Tejun Heo   block: implement ...
625
626
627
628
629
630
631
632
  		return;
  
  	/*
  	 * @rq will no longer represent mixable attributes for all the
  	 * contained bios.  It will just track those of the first one.
  	 * Distributes the attributs to each bio.
  	 */
  	for (bio = rq->bio; bio; bio = bio->bi_next) {
1eff9d322   Jens Axboe   block: rename bio...
633
634
635
  		WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
  			     (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
  		bio->bi_opf |= ff;
80a761fd3   Tejun Heo   block: implement ...
636
  	}
e80640213   Christoph Hellwig   block: split out ...
637
  	rq->rq_flags |= RQF_MIXED_MERGE;
80a761fd3   Tejun Heo   block: implement ...
638
  }
b9c54f566   Konstantin Khlebnikov   block: account me...
639
  static void blk_account_io_merge_request(struct request *req)
26308eab6   Jerome Marchand   block: fix incons...
640
641
  {
  	if (blk_do_io_stat(req)) {
112f158f6   Mike Snitzer   block: stop passi...
642
  		part_stat_lock();
b9c54f566   Konstantin Khlebnikov   block: account me...
643
  		part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
26308eab6   Jerome Marchand   block: fix incons...
644
  		part_stat_unlock();
524f9ffd6   Christoph Hellwig   block: reduce par...
645
646
  
  		hd_struct_put(req->part);
26308eab6   Jerome Marchand   block: fix incons...
647
648
  	}
  }
b9c54f566   Konstantin Khlebnikov   block: account me...
649

698404660   Jianchao Wang   block: fix the DI...
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
  /*
   * Two cases of handling DISCARD merge:
   * If max_discard_segments > 1, the driver takes every bio
   * as a range and send them to controller together. The ranges
   * needn't to be contiguous.
   * Otherwise, the bios/requests will be handled as same as
   * others which should be contiguous.
   */
  static inline bool blk_discard_mergable(struct request *req)
  {
  	if (req_op(req) == REQ_OP_DISCARD &&
  	    queue_max_discard_segments(req->q) > 1)
  		return true;
  	return false;
  }
e96c0d833   Eric Biggers   block: make blk_t...
665
666
  static enum elv_merge blk_try_req_merge(struct request *req,
  					struct request *next)
698404660   Jianchao Wang   block: fix the DI...
667
668
669
670
671
672
673
674
  {
  	if (blk_discard_mergable(req))
  		return ELEVATOR_DISCARD_MERGE;
  	else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
  		return ELEVATOR_BACK_MERGE;
  
  	return ELEVATOR_NO_MERGE;
  }
26308eab6   Jerome Marchand   block: fix incons...
675

d6d481969   Jens Axboe   block: ll_rw_blk....
676
  /*
b973cb7e8   Jens Axboe   blk-merge: return...
677
678
   * For non-mq, this has to be called with the request spinlock acquired.
   * For mq with scheduling, the appropriate queue wide lock should be held.
d6d481969   Jens Axboe   block: ll_rw_blk....
679
   */
b973cb7e8   Jens Axboe   blk-merge: return...
680
681
  static struct request *attempt_merge(struct request_queue *q,
  				     struct request *req, struct request *next)
d6d481969   Jens Axboe   block: ll_rw_blk....
682
683
  {
  	if (!rq_mergeable(req) || !rq_mergeable(next))
b973cb7e8   Jens Axboe   blk-merge: return...
684
  		return NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
685

288dab8a3   Christoph Hellwig   block: add a sepa...
686
  	if (req_op(req) != req_op(next))
b973cb7e8   Jens Axboe   blk-merge: return...
687
  		return NULL;
f31dc1cd4   Martin K. Petersen   block: Consolidat...
688

d6d481969   Jens Axboe   block: ll_rw_blk....
689
  	if (rq_data_dir(req) != rq_data_dir(next)
2081a56bf   Jens Axboe   block: remove req...
690
  	    || req->rq_disk != next->rq_disk)
b973cb7e8   Jens Axboe   blk-merge: return...
691
  		return NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
692

8fe0d473f   Mike Christie   block: convert me...
693
  	if (req_op(req) == REQ_OP_WRITE_SAME &&
4363ac7c1   Martin K. Petersen   block: Implement ...
694
  	    !blk_write_same_mergeable(req->bio, next->bio))
b973cb7e8   Jens Axboe   blk-merge: return...
695
  		return NULL;
4363ac7c1   Martin K. Petersen   block: Implement ...
696

d6d481969   Jens Axboe   block: ll_rw_blk....
697
  	/*
cb6934f8e   Jens Axboe   block: add suppor...
698
699
700
701
702
  	 * Don't allow merge of different write hints, or for a hint with
  	 * non-hint IO.
  	 */
  	if (req->write_hint != next->write_hint)
  		return NULL;
668ffc034   Damien Le Moal   block: prevent me...
703
704
  	if (req->ioprio != next->ioprio)
  		return NULL;
cb6934f8e   Jens Axboe   block: add suppor...
705
  	/*
d6d481969   Jens Axboe   block: ll_rw_blk....
706
707
708
  	 * If we are allowed to merge, then append bio list
  	 * from next to rq and release next. merge_requests_fn
  	 * will have updated segment counts, update sector
445251d0f   Jens Axboe   blk-mq: fix disca...
709
710
  	 * counts here. Handle DISCARDs separately, as they
  	 * have separate settings.
d6d481969   Jens Axboe   block: ll_rw_blk....
711
  	 */
698404660   Jianchao Wang   block: fix the DI...
712
713
714
  
  	switch (blk_try_req_merge(req, next)) {
  	case ELEVATOR_DISCARD_MERGE:
445251d0f   Jens Axboe   blk-mq: fix disca...
715
716
  		if (!req_attempt_discard_merge(q, req, next))
  			return NULL;
698404660   Jianchao Wang   block: fix the DI...
717
718
719
720
721
722
  		break;
  	case ELEVATOR_BACK_MERGE:
  		if (!ll_merge_requests_fn(q, req, next))
  			return NULL;
  		break;
  	default:
b973cb7e8   Jens Axboe   blk-merge: return...
723
  		return NULL;
698404660   Jianchao Wang   block: fix the DI...
724
  	}
d6d481969   Jens Axboe   block: ll_rw_blk....
725
726
  
  	/*
80a761fd3   Tejun Heo   block: implement ...
727
728
729
730
731
  	 * If failfast settings disagree or any of the two is already
  	 * a mixed merge, mark both as mixed before proceeding.  This
  	 * makes sure that all involved bios have mixable attributes
  	 * set properly.
  	 */
e80640213   Christoph Hellwig   block: split out ...
732
  	if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
80a761fd3   Tejun Heo   block: implement ...
733
734
735
736
737
738
739
  	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
  	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
  		blk_rq_set_mixed_merge(req);
  		blk_rq_set_mixed_merge(next);
  	}
  
  	/*
522a77756   Omar Sandoval   block: consolidat...
740
741
742
  	 * At this point we have either done a back merge or front merge. We
  	 * need the smaller start_time_ns of the merged requests to be the
  	 * current request for accounting purposes.
d6d481969   Jens Axboe   block: ll_rw_blk....
743
  	 */
522a77756   Omar Sandoval   block: consolidat...
744
745
  	if (next->start_time_ns < req->start_time_ns)
  		req->start_time_ns = next->start_time_ns;
d6d481969   Jens Axboe   block: ll_rw_blk....
746
747
748
  
  	req->biotail->bi_next = next->bio;
  	req->biotail = next->biotail;
a2dec7b36   Tejun Heo   block: hide reque...
749
  	req->__data_len += blk_rq_bytes(next);
d6d481969   Jens Axboe   block: ll_rw_blk....
750

2a5cf35cd   Ming Lei   block: fix single...
751
  	if (!blk_discard_mergable(req))
445251d0f   Jens Axboe   blk-mq: fix disca...
752
  		elv_merge_requests(q, req, next);
d6d481969   Jens Axboe   block: ll_rw_blk....
753

42dad7647   Jerome Marchand   block: simplify I...
754
755
756
  	/*
  	 * 'next' is going away, so update stats accordingly
  	 */
b9c54f566   Konstantin Khlebnikov   block: account me...
757
  	blk_account_io_merge_request(next);
d6d481969   Jens Axboe   block: ll_rw_blk....
758

f3bdc62fd   Jan Kara   blktrace: Provide...
759
  	trace_block_rq_merge(q, next);
e4d750c97   Jens Axboe   block: free merge...
760
761
762
763
  	/*
  	 * ownership of bio passed from next to req, return 'next' for
  	 * the caller to free
  	 */
1cd96c242   Boaz Harrosh   block: WARN in __...
764
  	next->bio = NULL;
b973cb7e8   Jens Axboe   blk-merge: return...
765
  	return next;
d6d481969   Jens Axboe   block: ll_rw_blk....
766
  }
eda5cc997   Christoph Hellwig   block: move blk_m...
767
768
  static struct request *attempt_back_merge(struct request_queue *q,
  		struct request *rq)
d6d481969   Jens Axboe   block: ll_rw_blk....
769
770
771
772
773
  {
  	struct request *next = elv_latter_request(q, rq);
  
  	if (next)
  		return attempt_merge(q, rq, next);
b973cb7e8   Jens Axboe   blk-merge: return...
774
  	return NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
775
  }
eda5cc997   Christoph Hellwig   block: move blk_m...
776
777
  static struct request *attempt_front_merge(struct request_queue *q,
  		struct request *rq)
d6d481969   Jens Axboe   block: ll_rw_blk....
778
779
780
781
782
  {
  	struct request *prev = elv_former_request(q, rq);
  
  	if (prev)
  		return attempt_merge(q, prev, rq);
b973cb7e8   Jens Axboe   blk-merge: return...
783
  	return NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
784
  }
5e84ea3a9   Jens Axboe   block: attempt to...
785
786
787
788
  
  int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
  			  struct request *next)
  {
e4d750c97   Jens Axboe   block: free merge...
789
  	struct request *free;
72ef799b3   Tahsin Erdogan   block: do not mer...
790

e4d750c97   Jens Axboe   block: free merge...
791
792
  	free = attempt_merge(q, rq, next);
  	if (free) {
92bc5a248   Jens Axboe   block: remove __b...
793
  		blk_put_request(free);
e4d750c97   Jens Axboe   block: free merge...
794
795
796
797
  		return 1;
  	}
  
  	return 0;
5e84ea3a9   Jens Axboe   block: attempt to...
798
  }
050c8ea80   Tejun Heo   block: separate o...
799
800
801
  
  bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
  {
e2a60da74   Martin K. Petersen   block: Clean up s...
802
  	if (!rq_mergeable(rq) || !bio_mergeable(bio))
050c8ea80   Tejun Heo   block: separate o...
803
  		return false;
288dab8a3   Christoph Hellwig   block: add a sepa...
804
  	if (req_op(rq) != bio_op(bio))
f31dc1cd4   Martin K. Petersen   block: Consolidat...
805
  		return false;
050c8ea80   Tejun Heo   block: separate o...
806
807
808
  	/* different data direction or already started, don't merge */
  	if (bio_data_dir(bio) != rq_data_dir(rq))
  		return false;
2081a56bf   Jens Axboe   block: remove req...
809
810
  	/* must be same device */
  	if (rq->rq_disk != bio->bi_disk)
050c8ea80   Tejun Heo   block: separate o...
811
812
813
  		return false;
  
  	/* only merge integrity protected bio into ditto rq */
4eaf99bea   Martin K. Petersen   block: Don't merg...
814
  	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
050c8ea80   Tejun Heo   block: separate o...
815
  		return false;
a892c8d52   Satya Tangirala   block: Inline enc...
816
817
818
  	/* Only merge if the crypt contexts are compatible */
  	if (!bio_crypt_rq_ctx_compatible(rq, bio))
  		return false;
4363ac7c1   Martin K. Petersen   block: Implement ...
819
  	/* must be using the same buffer */
8fe0d473f   Mike Christie   block: convert me...
820
  	if (req_op(rq) == REQ_OP_WRITE_SAME &&
4363ac7c1   Martin K. Petersen   block: Implement ...
821
822
  	    !blk_write_same_mergeable(rq->bio, bio))
  		return false;
cb6934f8e   Jens Axboe   block: add suppor...
823
824
825
826
827
828
  	/*
  	 * Don't allow merge of different write hints, or for a hint with
  	 * non-hint IO.
  	 */
  	if (rq->write_hint != bio->bi_write_hint)
  		return false;
668ffc034   Damien Le Moal   block: prevent me...
829
830
  	if (rq->ioprio != bio_prio(bio))
  		return false;
050c8ea80   Tejun Heo   block: separate o...
831
832
  	return true;
  }
34fe7c054   Christoph Hellwig   block: enumify EL...
833
  enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
050c8ea80   Tejun Heo   block: separate o...
834
  {
698404660   Jianchao Wang   block: fix the DI...
835
  	if (blk_discard_mergable(rq))
1e739730c   Christoph Hellwig   block: optionally...
836
837
  		return ELEVATOR_DISCARD_MERGE;
  	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
050c8ea80   Tejun Heo   block: separate o...
838
  		return ELEVATOR_BACK_MERGE;
4f024f379   Kent Overstreet   block: Abstract o...
839
  	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
050c8ea80   Tejun Heo   block: separate o...
840
841
842
  		return ELEVATOR_FRONT_MERGE;
  	return ELEVATOR_NO_MERGE;
  }
8e756373d   Baolin Wang   block: Move bio m...
843
844
845
846
847
848
849
850
851
852
  
  static void blk_account_io_merge_bio(struct request *req)
  {
  	if (!blk_do_io_stat(req))
  		return;
  
  	part_stat_lock();
  	part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
  	part_stat_unlock();
  }
eda5cc997   Christoph Hellwig   block: move blk_m...
853
854
855
856
857
858
859
860
  enum bio_merge_status {
  	BIO_MERGE_OK,
  	BIO_MERGE_NONE,
  	BIO_MERGE_FAILED,
  };
  
  static enum bio_merge_status bio_attempt_back_merge(struct request *req,
  		struct bio *bio, unsigned int nr_segs)
8e756373d   Baolin Wang   block: Move bio m...
861
862
863
864
  {
  	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
  
  	if (!ll_back_merge_fn(req, bio, nr_segs))
7d7ca7c52   Baolin Wang   block: Add a new ...
865
  		return BIO_MERGE_FAILED;
8e756373d   Baolin Wang   block: Move bio m...
866
867
868
869
870
871
872
873
874
875
876
877
878
879
  
  	trace_block_bio_backmerge(req->q, req, bio);
  	rq_qos_merge(req->q, req, bio);
  
  	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
  		blk_rq_set_mixed_merge(req);
  
  	req->biotail->bi_next = bio;
  	req->biotail = bio;
  	req->__data_len += bio->bi_iter.bi_size;
  
  	bio_crypt_free_ctx(bio);
  
  	blk_account_io_merge_bio(req);
7d7ca7c52   Baolin Wang   block: Add a new ...
880
  	return BIO_MERGE_OK;
8e756373d   Baolin Wang   block: Move bio m...
881
  }
eda5cc997   Christoph Hellwig   block: move blk_m...
882
883
  static enum bio_merge_status bio_attempt_front_merge(struct request *req,
  		struct bio *bio, unsigned int nr_segs)
8e756373d   Baolin Wang   block: Move bio m...
884
885
886
887
  {
  	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
  
  	if (!ll_front_merge_fn(req, bio, nr_segs))
7d7ca7c52   Baolin Wang   block: Add a new ...
888
  		return BIO_MERGE_FAILED;
8e756373d   Baolin Wang   block: Move bio m...
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
  
  	trace_block_bio_frontmerge(req->q, req, bio);
  	rq_qos_merge(req->q, req, bio);
  
  	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
  		blk_rq_set_mixed_merge(req);
  
  	bio->bi_next = req->bio;
  	req->bio = bio;
  
  	req->__sector = bio->bi_iter.bi_sector;
  	req->__data_len += bio->bi_iter.bi_size;
  
  	bio_crypt_do_front_merge(req, bio);
  
  	blk_account_io_merge_bio(req);
7d7ca7c52   Baolin Wang   block: Add a new ...
905
  	return BIO_MERGE_OK;
8e756373d   Baolin Wang   block: Move bio m...
906
  }
eda5cc997   Christoph Hellwig   block: move blk_m...
907
908
  static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
  		struct request *req, struct bio *bio)
8e756373d   Baolin Wang   block: Move bio m...
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
  {
  	unsigned short segments = blk_rq_nr_discard_segments(req);
  
  	if (segments >= queue_max_discard_segments(q))
  		goto no_merge;
  	if (blk_rq_sectors(req) + bio_sectors(bio) >
  	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
  		goto no_merge;
  
  	rq_qos_merge(q, req, bio);
  
  	req->biotail->bi_next = bio;
  	req->biotail = bio;
  	req->__data_len += bio->bi_iter.bi_size;
  	req->nr_phys_segments = segments + 1;
  
  	blk_account_io_merge_bio(req);
7d7ca7c52   Baolin Wang   block: Add a new ...
926
  	return BIO_MERGE_OK;
8e756373d   Baolin Wang   block: Move bio m...
927
928
  no_merge:
  	req_set_nomerge(q, req);
7d7ca7c52   Baolin Wang   block: Add a new ...
929
930
931
932
933
934
935
936
937
938
939
940
941
942
  	return BIO_MERGE_FAILED;
  }
  
  static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
  						   struct request *rq,
  						   struct bio *bio,
  						   unsigned int nr_segs,
  						   bool sched_allow_merge)
  {
  	if (!blk_rq_merge_ok(rq, bio))
  		return BIO_MERGE_NONE;
  
  	switch (blk_try_merge(rq, bio)) {
  	case ELEVATOR_BACK_MERGE:
265600b7b   Baolin Wang   block: Remove a d...
943
  		if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
7d7ca7c52   Baolin Wang   block: Add a new ...
944
945
946
  			return bio_attempt_back_merge(rq, bio, nr_segs);
  		break;
  	case ELEVATOR_FRONT_MERGE:
265600b7b   Baolin Wang   block: Remove a d...
947
  		if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
7d7ca7c52   Baolin Wang   block: Add a new ...
948
949
950
951
952
953
954
955
956
  			return bio_attempt_front_merge(rq, bio, nr_segs);
  		break;
  	case ELEVATOR_DISCARD_MERGE:
  		return bio_attempt_discard_merge(q, rq, bio);
  	default:
  		return BIO_MERGE_NONE;
  	}
  
  	return BIO_MERGE_FAILED;
8e756373d   Baolin Wang   block: Move bio m...
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
  }
  
  /**
   * blk_attempt_plug_merge - try to merge with %current's plugged list
   * @q: request_queue new bio is being queued at
   * @bio: new bio being queued
   * @nr_segs: number of segments in @bio
   * @same_queue_rq: pointer to &struct request that gets filled in when
   * another request associated with @q is found on the plug list
   * (optional, may be %NULL)
   *
   * Determine whether @bio being queued on @q can be merged with a request
   * on %current's plugged list.  Returns %true if merge was successful,
   * otherwise %false.
   *
   * Plugging coalesces IOs from the same issuer for the same purpose without
   * going through @q->queue_lock.  As such it's more of an issuing mechanism
   * than scheduling, and the request, while may have elvpriv data, is not
   * added on the elevator at this point.  In addition, we don't have
   * reliable access to the elevator outside queue lock.  Only check basic
   * merging parameters without querying the elevator.
   *
   * Caller must ensure !blk_queue_nomerges(q) beforehand.
   */
  bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
  		unsigned int nr_segs, struct request **same_queue_rq)
  {
  	struct blk_plug *plug;
  	struct request *rq;
  	struct list_head *plug_list;
  
  	plug = blk_mq_plug(q, bio);
  	if (!plug)
  		return false;
  
  	plug_list = &plug->mq_list;
  
  	list_for_each_entry_reverse(rq, plug_list, queuelist) {
8e756373d   Baolin Wang   block: Move bio m...
995
996
997
998
999
1000
1001
1002
  		if (rq->q == q && same_queue_rq) {
  			/*
  			 * Only blk-mq multiple hardware queues case checks the
  			 * rq in the same queue, there should be only one such
  			 * rq in a queue
  			 **/
  			*same_queue_rq = rq;
  		}
7d7ca7c52   Baolin Wang   block: Add a new ...
1003
  		if (rq->q != q)
8e756373d   Baolin Wang   block: Move bio m...
1004
  			continue;
7d7ca7c52   Baolin Wang   block: Add a new ...
1005
1006
  		if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
  		    BIO_MERGE_OK)
8e756373d   Baolin Wang   block: Move bio m...
1007
1008
1009
1010
1011
  			return true;
  	}
  
  	return false;
  }
bdc6a287b   Baolin Wang   block: Move blk_m...
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
  
  /*
   * Iterate list of requests and see if we can merge this bio with any
   * of them.
   */
  bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
  			struct bio *bio, unsigned int nr_segs)
  {
  	struct request *rq;
  	int checked = 8;
  
  	list_for_each_entry_reverse(rq, list, queuelist) {
bdc6a287b   Baolin Wang   block: Move blk_m...
1024
1025
  		if (!checked--)
  			break;
7d7ca7c52   Baolin Wang   block: Add a new ...
1026
1027
  		switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
  		case BIO_MERGE_NONE:
bdc6a287b   Baolin Wang   block: Move blk_m...
1028
  			continue;
7d7ca7c52   Baolin Wang   block: Add a new ...
1029
1030
1031
1032
  		case BIO_MERGE_OK:
  			return true;
  		case BIO_MERGE_FAILED:
  			return false;
bdc6a287b   Baolin Wang   block: Move blk_m...
1033
  		}
bdc6a287b   Baolin Wang   block: Move blk_m...
1034
1035
1036
1037
1038
  	}
  
  	return false;
  }
  EXPORT_SYMBOL_GPL(blk_bio_list_merge);
eda5cc997   Christoph Hellwig   block: move blk_m...
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
  
  bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
  		unsigned int nr_segs, struct request **merged_request)
  {
  	struct request *rq;
  
  	switch (elv_merge(q, &rq, bio)) {
  	case ELEVATOR_BACK_MERGE:
  		if (!blk_mq_sched_allow_merge(q, rq, bio))
  			return false;
  		if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
  			return false;
  		*merged_request = attempt_back_merge(q, rq);
  		if (!*merged_request)
  			elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
  		return true;
  	case ELEVATOR_FRONT_MERGE:
  		if (!blk_mq_sched_allow_merge(q, rq, bio))
  			return false;
  		if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
  			return false;
  		*merged_request = attempt_front_merge(q, rq);
  		if (!*merged_request)
  			elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
  		return true;
  	case ELEVATOR_DISCARD_MERGE:
  		return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
  	default:
  		return false;
  	}
  }
  EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);