Blame view

block/blk-merge.c 23.9 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
d6d481969   Jens Axboe   block: ll_rw_blk....
2
3
4
5
6
7
8
9
  /*
   * Functions related to segment and merge handling
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/scatterlist.h>
cda22646a   Mike Krinkin   block: add call t...
10
  #include <trace/events/block.h>
d6d481969   Jens Axboe   block: ll_rw_blk....
11
  #include "blk.h"
e9907009c   Christoph Hellwig   block: move req_g...
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
  static inline bool bio_will_gap(struct request_queue *q,
  		struct request *prev_rq, struct bio *prev, struct bio *next)
  {
  	struct bio_vec pb, nb;
  
  	if (!bio_has_data(prev) || !queue_virt_boundary(q))
  		return false;
  
  	/*
  	 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
  	 * is quite difficult to respect the sg gap limit.  We work hard to
  	 * merge a huge number of small single bios in case of mkfs.
  	 */
  	if (prev_rq)
  		bio_get_first_bvec(prev_rq->bio, &pb);
  	else
  		bio_get_first_bvec(prev, &pb);
df376b2ed   Johannes Thumshirn   block: respect vi...
29
  	if (pb.bv_offset & queue_virt_boundary(q))
e9907009c   Christoph Hellwig   block: move req_g...
30
31
32
33
34
35
36
37
38
39
40
41
42
  		return true;
  
  	/*
  	 * We don't need to worry about the situation that the merged segment
  	 * ends in unaligned virt boundary:
  	 *
  	 * - if 'pb' ends aligned, the merged segment ends aligned
  	 * - if 'pb' ends unaligned, the next bio must include
  	 *   one single bvec of 'nb', otherwise the 'nb' can't
  	 *   merge with 'pb'
  	 */
  	bio_get_last_bvec(prev, &pb);
  	bio_get_first_bvec(next, &nb);
200a9aff7   Christoph Hellwig   block: remove the...
43
  	if (biovec_phys_mergeable(q, &pb, &nb))
e9907009c   Christoph Hellwig   block: move req_g...
44
45
46
47
48
49
50
51
52
53
54
55
56
  		return false;
  	return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
  }
  
  static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
  {
  	return bio_will_gap(req->q, req, req->biotail, bio);
  }
  
  static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
  {
  	return bio_will_gap(req->q, NULL, bio, req->bio);
  }
54efd50bf   Kent Overstreet   block: make gener...
57
58
  static struct bio *blk_bio_discard_split(struct request_queue *q,
  					 struct bio *bio,
bdced438a   Ming Lei   block: setup bi_p...
59
60
  					 struct bio_set *bs,
  					 unsigned *nsegs)
54efd50bf   Kent Overstreet   block: make gener...
61
62
63
64
65
  {
  	unsigned int max_discard_sectors, granularity;
  	int alignment;
  	sector_t tmp;
  	unsigned split_sectors;
bdced438a   Ming Lei   block: setup bi_p...
66
  	*nsegs = 1;
54efd50bf   Kent Overstreet   block: make gener...
67
68
  	/* Zero-sector (unknown) and one-sector granularities are the same.  */
  	granularity = max(q->limits.discard_granularity >> 9, 1U);
1adfc5e41   Ming Lei   block: make sure ...
69
70
  	max_discard_sectors = min(q->limits.max_discard_sectors,
  			bio_allowed_max_sectors(q));
54efd50bf   Kent Overstreet   block: make gener...
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
  	max_discard_sectors -= max_discard_sectors % granularity;
  
  	if (unlikely(!max_discard_sectors)) {
  		/* XXX: warn */
  		return NULL;
  	}
  
  	if (bio_sectors(bio) <= max_discard_sectors)
  		return NULL;
  
  	split_sectors = max_discard_sectors;
  
  	/*
  	 * If the next starting sector would be misaligned, stop the discard at
  	 * the previous aligned sector.
  	 */
  	alignment = (q->limits.discard_alignment >> 9) % granularity;
  
  	tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
  	tmp = sector_div(tmp, granularity);
  
  	if (split_sectors > tmp)
  		split_sectors -= tmp;
  
  	return bio_split(bio, split_sectors, GFP_NOIO, bs);
  }
885fa13f6   Christoph Hellwig   block: implement ...
97
98
99
  static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
  		struct bio *bio, struct bio_set *bs, unsigned *nsegs)
  {
d665e12aa   Christoph Hellwig   block: nr_phys_se...
100
  	*nsegs = 0;
885fa13f6   Christoph Hellwig   block: implement ...
101
102
103
104
105
106
107
108
109
  
  	if (!q->limits.max_write_zeroes_sectors)
  		return NULL;
  
  	if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
  		return NULL;
  
  	return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
  }
54efd50bf   Kent Overstreet   block: make gener...
110
111
  static struct bio *blk_bio_write_same_split(struct request_queue *q,
  					    struct bio *bio,
bdced438a   Ming Lei   block: setup bi_p...
112
113
  					    struct bio_set *bs,
  					    unsigned *nsegs)
54efd50bf   Kent Overstreet   block: make gener...
114
  {
bdced438a   Ming Lei   block: setup bi_p...
115
  	*nsegs = 1;
54efd50bf   Kent Overstreet   block: make gener...
116
117
118
119
120
121
122
123
  	if (!q->limits.max_write_same_sectors)
  		return NULL;
  
  	if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
  		return NULL;
  
  	return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
  }
9cc5169cd   Bart Van Assche   block: Improve ph...
124
125
126
127
128
129
130
131
  /*
   * Return the maximum number of sectors from the start of a bio that may be
   * submitted as a single request to a block device. If enough sectors remain,
   * align the end to the physical block size. Otherwise align the end to the
   * logical block size. This approach minimizes the number of non-aligned
   * requests that are submitted to a block device if the start of a bio is not
   * aligned to a physical block boundary.
   */
d0e5fbb01   Ming Lei   block: fix bio sp...
132
133
134
135
  static inline unsigned get_max_io_size(struct request_queue *q,
  				       struct bio *bio)
  {
  	unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
9cc5169cd   Bart Van Assche   block: Improve ph...
136
137
138
139
  	unsigned max_sectors = sectors;
  	unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
  	unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
  	unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1);
d0e5fbb01   Ming Lei   block: fix bio sp...
140

9cc5169cd   Bart Van Assche   block: Improve ph...
141
142
143
144
  	max_sectors += start_offset;
  	max_sectors &= ~(pbs - 1);
  	if (max_sectors > start_offset)
  		return max_sectors - start_offset;
d0e5fbb01   Ming Lei   block: fix bio sp...
145

9cc5169cd   Bart Van Assche   block: Improve ph...
146
  	return sectors & (lbs - 1);
d0e5fbb01   Ming Lei   block: fix bio sp...
147
  }
af2c68fe9   Bart Van Assche   block: Declare se...
148
  static unsigned get_max_segment_size(const struct request_queue *q,
dcebd7559   Ming Lei   block: use bio_fo...
149
150
151
152
153
154
155
156
157
158
159
  				     unsigned offset)
  {
  	unsigned long mask = queue_segment_boundary(q);
  
  	/* default segment boundary mask means no boundary limit */
  	if (mask == BLK_SEG_BOUNDARY_MASK)
  		return queue_max_segment_size(q);
  
  	return min_t(unsigned long, mask - (mask & offset) + 1,
  		     queue_max_segment_size(q));
  }
708b25b34   Bart Van Assche   block: Simplify b...
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
  /**
   * bvec_split_segs - verify whether or not a bvec should be split in the middle
   * @q:        [in] request queue associated with the bio associated with @bv
   * @bv:       [in] bvec to examine
   * @nsegs:    [in,out] Number of segments in the bio being built. Incremented
   *            by the number of segments from @bv that may be appended to that
   *            bio without exceeding @max_segs
   * @sectors:  [in,out] Number of sectors in the bio being built. Incremented
   *            by the number of sectors from @bv that may be appended to that
   *            bio without exceeding @max_sectors
   * @max_segs: [in] upper bound for *@nsegs
   * @max_sectors: [in] upper bound for *@sectors
   *
   * When splitting a bio, it can happen that a bvec is encountered that is too
   * big to fit in a single segment and hence that it has to be split in the
   * middle. This function verifies whether or not that should happen. The value
   * %true is returned if and only if appending the entire @bv to a bio with
   * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
   * the block driver.
dcebd7559   Ming Lei   block: use bio_fo...
179
   */
af2c68fe9   Bart Van Assche   block: Declare se...
180
181
  static bool bvec_split_segs(const struct request_queue *q,
  			    const struct bio_vec *bv, unsigned *nsegs,
708b25b34   Bart Van Assche   block: Simplify b...
182
183
  			    unsigned *sectors, unsigned max_segs,
  			    unsigned max_sectors)
dcebd7559   Ming Lei   block: use bio_fo...
184
  {
708b25b34   Bart Van Assche   block: Simplify b...
185
186
  	unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9;
  	unsigned len = min(bv->bv_len, max_len);
dcebd7559   Ming Lei   block: use bio_fo...
187
  	unsigned total_len = 0;
ff9811b3c   Bart Van Assche   block: Simplify b...
188
  	unsigned seg_size = 0;
dcebd7559   Ming Lei   block: use bio_fo...
189

ff9811b3c   Bart Van Assche   block: Simplify b...
190
  	while (len && *nsegs < max_segs) {
dcebd7559   Ming Lei   block: use bio_fo...
191
192
  		seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
  		seg_size = min(seg_size, len);
ff9811b3c   Bart Van Assche   block: Simplify b...
193
  		(*nsegs)++;
dcebd7559   Ming Lei   block: use bio_fo...
194
195
196
197
198
199
  		total_len += seg_size;
  		len -= seg_size;
  
  		if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
  			break;
  	}
ff9811b3c   Bart Van Assche   block: Simplify b...
200
  	*sectors += total_len >> 9;
dcebd7559   Ming Lei   block: use bio_fo...
201

708b25b34   Bart Van Assche   block: Simplify b...
202
203
  	/* tell the caller to split the bvec if it is too big to fit */
  	return len > 0 || bv->bv_len > max_len;
dcebd7559   Ming Lei   block: use bio_fo...
204
  }
dad775845   Bart Van Assche   block: Document t...
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
  /**
   * blk_bio_segment_split - split a bio in two bios
   * @q:    [in] request queue pointer
   * @bio:  [in] bio to be split
   * @bs:	  [in] bio set to allocate the clone from
   * @segs: [out] number of segments in the bio with the first half of the sectors
   *
   * Clone @bio, update the bi_iter of the clone to represent the first sectors
   * of @bio and update @bio->bi_iter to represent the remaining sectors. The
   * following is guaranteed for the cloned bio:
   * - That it has at most get_max_io_size(@q, @bio) sectors.
   * - That it has at most queue_max_segments(@q) segments.
   *
   * Except for discard requests the cloned bio will point at the bi_io_vec of
   * the original bio. It is the responsibility of the caller to ensure that the
   * original bio is not freed before the cloned bio. The caller is also
   * responsible for ensuring that @bs is only destroyed after processing of the
   * split bio has finished.
   */
54efd50bf   Kent Overstreet   block: make gener...
224
225
  static struct bio *blk_bio_segment_split(struct request_queue *q,
  					 struct bio *bio,
bdced438a   Ming Lei   block: setup bi_p...
226
227
  					 struct bio_set *bs,
  					 unsigned *segs)
54efd50bf   Kent Overstreet   block: make gener...
228
  {
5014c311b   Jens Axboe   block: fix bogus ...
229
  	struct bio_vec bv, bvprv, *bvprvp = NULL;
54efd50bf   Kent Overstreet   block: make gener...
230
  	struct bvec_iter iter;
6869875fb   Christoph Hellwig   block: remove the...
231
  	unsigned nsegs = 0, sectors = 0;
d0e5fbb01   Ming Lei   block: fix bio sp...
232
  	const unsigned max_sectors = get_max_io_size(q, bio);
05b700ba6   Ming Lei   block: fix segmen...
233
  	const unsigned max_segs = queue_max_segments(q);
54efd50bf   Kent Overstreet   block: make gener...
234

dcebd7559   Ming Lei   block: use bio_fo...
235
  	bio_for_each_bvec(bv, bio, iter) {
54efd50bf   Kent Overstreet   block: make gener...
236
237
238
239
  		/*
  		 * If the queue doesn't support SG gaps and adding this
  		 * offset would create a gap, disallow it.
  		 */
5014c311b   Jens Axboe   block: fix bogus ...
240
  		if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
54efd50bf   Kent Overstreet   block: make gener...
241
  			goto split;
708b25b34   Bart Van Assche   block: Simplify b...
242
243
244
245
246
247
248
  		if (nsegs < max_segs &&
  		    sectors + (bv.bv_len >> 9) <= max_sectors &&
  		    bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
  			nsegs++;
  			sectors += bv.bv_len >> 9;
  		} else if (bvec_split_segs(q, &bv, &nsegs, &sectors, max_segs,
  					 max_sectors)) {
cf8c0c6a3   Ming Lei   block: blk-merge:...
249
  			goto split;
e36f62042   Keith Busch   block: split bios...
250
  		}
54efd50bf   Kent Overstreet   block: make gener...
251
  		bvprv = bv;
578270bfb   Ming Lei   block: fix segmen...
252
  		bvprvp = &bvprv;
54efd50bf   Kent Overstreet   block: make gener...
253
  	}
d627065d8   Christoph Hellwig   block: untangle t...
254
255
  	*segs = nsegs;
  	return NULL;
54efd50bf   Kent Overstreet   block: make gener...
256
  split:
bdced438a   Ming Lei   block: setup bi_p...
257
  	*segs = nsegs;
d627065d8   Christoph Hellwig   block: untangle t...
258
  	return bio_split(bio, sectors, GFP_NOIO, bs);
54efd50bf   Kent Overstreet   block: make gener...
259
  }
dad775845   Bart Van Assche   block: Document t...
260
261
262
263
264
265
266
267
268
269
270
271
272
  /**
   * __blk_queue_split - split a bio and submit the second half
   * @q:       [in] request queue pointer
   * @bio:     [in, out] bio to be split
   * @nr_segs: [out] number of segments in the first bio
   *
   * Split a bio into two bios, chain the two bios, submit the second half and
   * store a pointer to the first half in *@bio. If the second bio is still too
   * big it will be split by a recursive call to this function. Since this
   * function may allocate a new bio from @q->bio_split, it is the responsibility
   * of the caller to ensure that @q is only released after processing of the
   * split bio has finished.
   */
14ccb66b3   Christoph Hellwig   block: remove the...
273
274
  void __blk_queue_split(struct request_queue *q, struct bio **bio,
  		unsigned int *nr_segs)
54efd50bf   Kent Overstreet   block: make gener...
275
  {
14ccb66b3   Christoph Hellwig   block: remove the...
276
  	struct bio *split;
54efd50bf   Kent Overstreet   block: make gener...
277

7afafc8a4   Adrian Hunter   block: Fix secure...
278
279
280
  	switch (bio_op(*bio)) {
  	case REQ_OP_DISCARD:
  	case REQ_OP_SECURE_ERASE:
14ccb66b3   Christoph Hellwig   block: remove the...
281
  		split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
7afafc8a4   Adrian Hunter   block: Fix secure...
282
  		break;
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
283
  	case REQ_OP_WRITE_ZEROES:
14ccb66b3   Christoph Hellwig   block: remove the...
284
285
  		split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
  				nr_segs);
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
286
  		break;
7afafc8a4   Adrian Hunter   block: Fix secure...
287
  	case REQ_OP_WRITE_SAME:
14ccb66b3   Christoph Hellwig   block: remove the...
288
289
  		split = blk_bio_write_same_split(q, *bio, &q->bio_split,
  				nr_segs);
7afafc8a4   Adrian Hunter   block: Fix secure...
290
291
  		break;
  	default:
14ccb66b3   Christoph Hellwig   block: remove the...
292
  		split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
7afafc8a4   Adrian Hunter   block: Fix secure...
293
294
  		break;
  	}
bdced438a   Ming Lei   block: setup bi_p...
295

54efd50bf   Kent Overstreet   block: make gener...
296
  	if (split) {
6ac45aeb6   Ming Lei   block: avoid to m...
297
  		/* there isn't chance to merge the splitted bio */
1eff9d322   Jens Axboe   block: rename bio...
298
  		split->bi_opf |= REQ_NOMERGE;
6ac45aeb6   Ming Lei   block: avoid to m...
299

947b7ac13   Jens Axboe   Revert "block: co...
300
301
302
303
304
305
306
307
308
  		/*
  		 * Since we're recursing into make_request here, ensure
  		 * that we mark this bio as already having entered the queue.
  		 * If not, and the queue is going away, we can get stuck
  		 * forever on waiting for the queue reference to drop. But
  		 * that will never happen, as we're already holding a
  		 * reference to it.
  		 */
  		bio_set_flag(*bio, BIO_QUEUE_ENTERED);
54efd50bf   Kent Overstreet   block: make gener...
309
  		bio_chain(split, *bio);
cda22646a   Mike Krinkin   block: add call t...
310
  		trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
54efd50bf   Kent Overstreet   block: make gener...
311
312
313
314
  		generic_make_request(*bio);
  		*bio = split;
  	}
  }
14ccb66b3   Christoph Hellwig   block: remove the...
315

dad775845   Bart Van Assche   block: Document t...
316
317
318
319
320
321
322
323
324
325
326
  /**
   * blk_queue_split - split a bio and submit the second half
   * @q:   [in] request queue pointer
   * @bio: [in, out] bio to be split
   *
   * Split a bio into two bios, chains the two bios, submit the second half and
   * store a pointer to the first half in *@bio. Since this function may allocate
   * a new bio from @q->bio_split, it is the responsibility of the caller to
   * ensure that @q is only released after processing of the split bio has
   * finished.
   */
14ccb66b3   Christoph Hellwig   block: remove the...
327
328
329
330
331
332
  void blk_queue_split(struct request_queue *q, struct bio **bio)
  {
  	unsigned int nr_segs;
  
  	__blk_queue_split(q, bio, &nr_segs);
  }
54efd50bf   Kent Overstreet   block: make gener...
333
  EXPORT_SYMBOL(blk_queue_split);
e9cd19c0c   Christoph Hellwig   block: simplify b...
334
  unsigned int blk_recalc_rq_segments(struct request *rq)
d6d481969   Jens Axboe   block: ll_rw_blk....
335
  {
6869875fb   Christoph Hellwig   block: remove the...
336
  	unsigned int nr_phys_segs = 0;
ff9811b3c   Bart Van Assche   block: Simplify b...
337
  	unsigned int nr_sectors = 0;
e9cd19c0c   Christoph Hellwig   block: simplify b...
338
  	struct req_iterator iter;
6869875fb   Christoph Hellwig   block: remove the...
339
  	struct bio_vec bv;
d6d481969   Jens Axboe   block: ll_rw_blk....
340

e9cd19c0c   Christoph Hellwig   block: simplify b...
341
  	if (!rq->bio)
1e4280791   Jens Axboe   block: reduce sta...
342
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
343

e9cd19c0c   Christoph Hellwig   block: simplify b...
344
  	switch (bio_op(rq->bio)) {
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
345
346
  	case REQ_OP_DISCARD:
  	case REQ_OP_SECURE_ERASE:
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
347
  	case REQ_OP_WRITE_ZEROES:
f9d03f96b   Christoph Hellwig   block: improve ha...
348
349
  		return 0;
  	case REQ_OP_WRITE_SAME:
5cb8850c9   Kent Overstreet   block: Explicitly...
350
  		return 1;
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
351
  	}
5cb8850c9   Kent Overstreet   block: Explicitly...
352

e9cd19c0c   Christoph Hellwig   block: simplify b...
353
  	rq_for_each_bvec(bv, rq, iter)
ff9811b3c   Bart Van Assche   block: Simplify b...
354
  		bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors,
708b25b34   Bart Van Assche   block: Simplify b...
355
  				UINT_MAX, UINT_MAX);
1e4280791   Jens Axboe   block: reduce sta...
356
357
  	return nr_phys_segs;
  }
48d7727ca   Ming Lei   block: optimize _...
358
  static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
862e5a5e6   Ming Lei   block: use bio_fo...
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
  		struct scatterlist *sglist)
  {
  	if (!*sg)
  		return sglist;
  
  	/*
  	 * If the driver previously mapped a shorter list, we could see a
  	 * termination bit prematurely unless it fully inits the sg table
  	 * on each mapping. We KNOW that there must be more entries here
  	 * or the driver would be buggy, so force clear the termination bit
  	 * to avoid doing a full sg_init_table() in drivers for each command.
  	 */
  	sg_unmark_end(*sg);
  	return sg_next(*sg);
  }
  
  static unsigned blk_bvec_map_sg(struct request_queue *q,
  		struct bio_vec *bvec, struct scatterlist *sglist,
  		struct scatterlist **sg)
  {
  	unsigned nbytes = bvec->bv_len;
8a96a0e40   Christoph Hellwig   block: rewrite bl...
380
  	unsigned nsegs = 0, total = 0;
862e5a5e6   Ming Lei   block: use bio_fo...
381
382
  
  	while (nbytes > 0) {
8a96a0e40   Christoph Hellwig   block: rewrite bl...
383
384
  		unsigned offset = bvec->bv_offset + total;
  		unsigned len = min(get_max_segment_size(q, offset), nbytes);
f9f76879b   Christoph Hellwig   block: avoid scat...
385
386
387
388
389
390
391
392
393
394
395
396
  		struct page *page = bvec->bv_page;
  
  		/*
  		 * Unfortunately a fair number of drivers barf on scatterlists
  		 * that have an offset larger than PAGE_SIZE, despite other
  		 * subsystems dealing with that invariant just fine.  For now
  		 * stick to the legacy format where we never present those from
  		 * the block layer, but the code below should be removed once
  		 * these offenders (mostly MMC/SD drivers) are fixed.
  		 */
  		page += (offset >> PAGE_SHIFT);
  		offset &= ~PAGE_MASK;
862e5a5e6   Ming Lei   block: use bio_fo...
397
398
  
  		*sg = blk_next_sg(sg, sglist);
f9f76879b   Christoph Hellwig   block: avoid scat...
399
  		sg_set_page(*sg, page, len, offset);
862e5a5e6   Ming Lei   block: use bio_fo...
400

8a96a0e40   Christoph Hellwig   block: rewrite bl...
401
402
  		total += len;
  		nbytes -= len;
862e5a5e6   Ming Lei   block: use bio_fo...
403
404
405
406
407
  		nsegs++;
  	}
  
  	return nsegs;
  }
16e3e4187   Ming Lei   block: reuse __bl...
408
409
410
411
412
413
414
  static inline int __blk_bvec_map_sg(struct bio_vec bv,
  		struct scatterlist *sglist, struct scatterlist **sg)
  {
  	*sg = blk_next_sg(sg, sglist);
  	sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
  	return 1;
  }
f6970f83e   Ming Lei   block: don't chec...
415
416
417
418
  /* only try to merge bvecs into one sg if they are from two bios */
  static inline bool
  __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
  			   struct bio_vec *bvprv, struct scatterlist **sg)
963ab9e5d   Asias He   block: Introduce ...
419
420
421
  {
  
  	int nbytes = bvec->bv_len;
f6970f83e   Ming Lei   block: don't chec...
422
423
  	if (!*sg)
  		return false;
963ab9e5d   Asias He   block: Introduce ...
424

f6970f83e   Ming Lei   block: don't chec...
425
426
427
428
429
430
431
432
433
  	if ((*sg)->length + nbytes > queue_max_segment_size(q))
  		return false;
  
  	if (!biovec_phys_mergeable(q, bvprv, bvec))
  		return false;
  
  	(*sg)->length += nbytes;
  
  	return true;
963ab9e5d   Asias He   block: Introduce ...
434
  }
5cb8850c9   Kent Overstreet   block: Explicitly...
435
436
437
  static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
  			     struct scatterlist *sglist,
  			     struct scatterlist **sg)
d6d481969   Jens Axboe   block: ll_rw_blk....
438
  {
b21e11c5c   Ming Lei   block: fix build ...
439
  	struct bio_vec uninitialized_var(bvec), bvprv = { NULL };
5cb8850c9   Kent Overstreet   block: Explicitly...
440
  	struct bvec_iter iter;
38417468d   Christoph Hellwig   scsi: block: remo...
441
  	int nsegs = 0;
f6970f83e   Ming Lei   block: don't chec...
442
  	bool new_bio = false;
5cb8850c9   Kent Overstreet   block: Explicitly...
443

f6970f83e   Ming Lei   block: don't chec...
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
  	for_each_bio(bio) {
  		bio_for_each_bvec(bvec, bio, iter) {
  			/*
  			 * Only try to merge bvecs from two bios given we
  			 * have done bio internal merge when adding pages
  			 * to bio
  			 */
  			if (new_bio &&
  			    __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
  				goto next_bvec;
  
  			if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
  				nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
  			else
  				nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
   next_bvec:
  			new_bio = false;
  		}
b21e11c5c   Ming Lei   block: fix build ...
462
463
464
465
  		if (likely(bio->bi_iter.bi_size)) {
  			bvprv = bvec;
  			new_bio = true;
  		}
f6970f83e   Ming Lei   block: don't chec...
466
  	}
d6d481969   Jens Axboe   block: ll_rw_blk....
467

5cb8850c9   Kent Overstreet   block: Explicitly...
468
469
470
471
472
473
474
475
476
477
478
479
  	return nsegs;
  }
  
  /*
   * map a request to scatterlist, return number of sg entries setup. Caller
   * must make sure sg can hold rq->nr_phys_segments entries
   */
  int blk_rq_map_sg(struct request_queue *q, struct request *rq,
  		  struct scatterlist *sglist)
  {
  	struct scatterlist *sg = NULL;
  	int nsegs = 0;
f9d03f96b   Christoph Hellwig   block: improve ha...
480
  	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
cae6c2e54   Ming Lei   block: remove arg...
481
  		nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, &sg);
f9d03f96b   Christoph Hellwig   block: improve ha...
482
  	else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
cae6c2e54   Ming Lei   block: remove arg...
483
  		nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, &sg);
f9d03f96b   Christoph Hellwig   block: improve ha...
484
  	else if (rq->bio)
5cb8850c9   Kent Overstreet   block: Explicitly...
485
  		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
f18573abc   FUJITA Tomonori   block: move the p...
486

e80640213   Christoph Hellwig   block: split out ...
487
  	if (unlikely(rq->rq_flags & RQF_COPY_USER) &&
2e46e8b27   Tejun Heo   block: drop reque...
488
489
490
  	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
  		unsigned int pad_len =
  			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
f18573abc   FUJITA Tomonori   block: move the p...
491
492
493
494
  
  		sg->length += pad_len;
  		rq->extra_len += pad_len;
  	}
2fb98e841   Tejun Heo   block: implement ...
495
  	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
a8ebb056a   Mike Christie   block, drivers, c...
496
  		if (op_is_write(req_op(rq)))
db0a2e009   Tejun Heo   block: clear drai...
497
  			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
da81ed16b   Dan Williams   scatterlist: remo...
498
  		sg_unmark_end(sg);
d6d481969   Jens Axboe   block: ll_rw_blk....
499
500
501
502
503
504
  		sg = sg_next(sg);
  		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
  			    q->dma_drain_size,
  			    ((unsigned long)q->dma_drain_buffer) &
  			    (PAGE_SIZE - 1));
  		nsegs++;
7a85f8896   FUJITA Tomonori   block: restore th...
505
  		rq->extra_len += q->dma_drain_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
506
507
508
509
  	}
  
  	if (sg)
  		sg_mark_end(sg);
12e57f59c   Ming Lei   blk-merge: warn i...
510
511
512
513
  	/*
  	 * Something must have been wrong if the figured number of
  	 * segment is bigger than number of req's physical segments
  	 */
f9d03f96b   Christoph Hellwig   block: improve ha...
514
  	WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
12e57f59c   Ming Lei   blk-merge: warn i...
515

d6d481969   Jens Axboe   block: ll_rw_blk....
516
517
  	return nsegs;
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
518
  EXPORT_SYMBOL(blk_rq_map_sg);
14ccb66b3   Christoph Hellwig   block: remove the...
519
520
  static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
  		unsigned int nr_phys_segs)
d6d481969   Jens Axboe   block: ll_rw_blk....
521
  {
14ccb66b3   Christoph Hellwig   block: remove the...
522
  	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(req->q))
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
523
  		goto no_merge;
14ccb66b3   Christoph Hellwig   block: remove the...
524
  	if (blk_integrity_merge_bio(req->q, req, bio) == false)
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
525
  		goto no_merge;
d6d481969   Jens Axboe   block: ll_rw_blk....
526
527
528
529
530
  
  	/*
  	 * This will form the start of a new hw segment.  Bump both
  	 * counters.
  	 */
d6d481969   Jens Axboe   block: ll_rw_blk....
531
532
  	req->nr_phys_segments += nr_phys_segs;
  	return 1;
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
533
534
  
  no_merge:
14ccb66b3   Christoph Hellwig   block: remove the...
535
  	req_set_nomerge(req->q, req);
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
536
  	return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
537
  }
14ccb66b3   Christoph Hellwig   block: remove the...
538
  int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
d6d481969   Jens Axboe   block: ll_rw_blk....
539
  {
5e7c4274a   Jens Axboe   block: Check for ...
540
541
  	if (req_gap_back_merge(req, bio))
  		return 0;
7f39add3b   Sagi Grimberg   block: Refuse req...
542
543
544
  	if (blk_integrity_rq(req) &&
  	    integrity_req_gap_back_merge(req, bio))
  		return 0;
f31dc1cd4   Martin K. Petersen   block: Consolidat...
545
  	if (blk_rq_sectors(req) + bio_sectors(bio) >
17007f399   Damien Le Moal   block: Fix front ...
546
  	    blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
14ccb66b3   Christoph Hellwig   block: remove the...
547
  		req_set_nomerge(req->q, req);
d6d481969   Jens Axboe   block: ll_rw_blk....
548
549
  		return 0;
  	}
d6d481969   Jens Axboe   block: ll_rw_blk....
550

14ccb66b3   Christoph Hellwig   block: remove the...
551
  	return ll_new_hw_segment(req, bio, nr_segs);
d6d481969   Jens Axboe   block: ll_rw_blk....
552
  }
14ccb66b3   Christoph Hellwig   block: remove the...
553
  int ll_front_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
d6d481969   Jens Axboe   block: ll_rw_blk....
554
  {
5e7c4274a   Jens Axboe   block: Check for ...
555
556
  	if (req_gap_front_merge(req, bio))
  		return 0;
7f39add3b   Sagi Grimberg   block: Refuse req...
557
558
559
  	if (blk_integrity_rq(req) &&
  	    integrity_req_gap_front_merge(req, bio))
  		return 0;
f31dc1cd4   Martin K. Petersen   block: Consolidat...
560
  	if (blk_rq_sectors(req) + bio_sectors(bio) >
17007f399   Damien Le Moal   block: Fix front ...
561
  	    blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
14ccb66b3   Christoph Hellwig   block: remove the...
562
  		req_set_nomerge(req->q, req);
d6d481969   Jens Axboe   block: ll_rw_blk....
563
564
  		return 0;
  	}
d6d481969   Jens Axboe   block: ll_rw_blk....
565

14ccb66b3   Christoph Hellwig   block: remove the...
566
  	return ll_new_hw_segment(req, bio, nr_segs);
d6d481969   Jens Axboe   block: ll_rw_blk....
567
  }
445251d0f   Jens Axboe   blk-mq: fix disca...
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
  static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
  		struct request *next)
  {
  	unsigned short segments = blk_rq_nr_discard_segments(req);
  
  	if (segments >= queue_max_discard_segments(q))
  		goto no_merge;
  	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
  	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
  		goto no_merge;
  
  	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
  	return true;
  no_merge:
  	req_set_nomerge(q, req);
  	return false;
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
585
586
587
588
  static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
  				struct request *next)
  {
  	int total_phys_segments;
d6d481969   Jens Axboe   block: ll_rw_blk....
589

5e7c4274a   Jens Axboe   block: Check for ...
590
  	if (req_gap_back_merge(req, next->bio))
854fbb9c6   Keith Busch   block: prevent re...
591
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
592
593
594
  	/*
  	 * Will it become too large?
  	 */
f31dc1cd4   Martin K. Petersen   block: Consolidat...
595
  	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
17007f399   Damien Le Moal   block: Fix front ...
596
  	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
d6d481969   Jens Axboe   block: ll_rw_blk....
597
598
599
  		return 0;
  
  	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
8a78362c4   Martin K. Petersen   block: Consolidat...
600
  	if (total_phys_segments > queue_max_segments(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
601
  		return 0;
4eaf99bea   Martin K. Petersen   block: Don't merg...
602
  	if (blk_integrity_merge_rq(q, req, next) == false)
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
603
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
604
605
  	/* Merge is OK... */
  	req->nr_phys_segments = total_phys_segments;
d6d481969   Jens Axboe   block: ll_rw_blk....
606
607
  	return 1;
  }
80a761fd3   Tejun Heo   block: implement ...
608
609
610
611
612
613
614
615
616
617
618
619
620
  /**
   * blk_rq_set_mixed_merge - mark a request as mixed merge
   * @rq: request to mark as mixed merge
   *
   * Description:
   *     @rq is about to be mixed merged.  Make sure the attributes
   *     which can be mixed are set in each bio and mark @rq as mixed
   *     merged.
   */
  void blk_rq_set_mixed_merge(struct request *rq)
  {
  	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
  	struct bio *bio;
e80640213   Christoph Hellwig   block: split out ...
621
  	if (rq->rq_flags & RQF_MIXED_MERGE)
80a761fd3   Tejun Heo   block: implement ...
622
623
624
625
626
627
628
629
  		return;
  
  	/*
  	 * @rq will no longer represent mixable attributes for all the
  	 * contained bios.  It will just track those of the first one.
  	 * Distributes the attributs to each bio.
  	 */
  	for (bio = rq->bio; bio; bio = bio->bi_next) {
1eff9d322   Jens Axboe   block: rename bio...
630
631
632
  		WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
  			     (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
  		bio->bi_opf |= ff;
80a761fd3   Tejun Heo   block: implement ...
633
  	}
e80640213   Christoph Hellwig   block: split out ...
634
  	rq->rq_flags |= RQF_MIXED_MERGE;
80a761fd3   Tejun Heo   block: implement ...
635
  }
26308eab6   Jerome Marchand   block: fix incons...
636
637
638
639
  static void blk_account_io_merge(struct request *req)
  {
  	if (blk_do_io_stat(req)) {
  		struct hd_struct *part;
26308eab6   Jerome Marchand   block: fix incons...
640

112f158f6   Mike Snitzer   block: stop passi...
641
  		part_stat_lock();
09e099d4b   Jerome Marchand   block: fix accoun...
642
  		part = req->part;
26308eab6   Jerome Marchand   block: fix incons...
643

d62e26b3f   Jens Axboe   block: pass in qu...
644
  		part_dec_in_flight(req->q, part, rq_data_dir(req));
26308eab6   Jerome Marchand   block: fix incons...
645

6c23a9681   Jens Axboe   block: add intern...
646
  		hd_struct_put(part);
26308eab6   Jerome Marchand   block: fix incons...
647
648
649
  		part_stat_unlock();
  	}
  }
698404660   Jianchao Wang   block: fix the DI...
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
  /*
   * Two cases of handling DISCARD merge:
   * If max_discard_segments > 1, the driver takes every bio
   * as a range and send them to controller together. The ranges
   * needn't to be contiguous.
   * Otherwise, the bios/requests will be handled as same as
   * others which should be contiguous.
   */
  static inline bool blk_discard_mergable(struct request *req)
  {
  	if (req_op(req) == REQ_OP_DISCARD &&
  	    queue_max_discard_segments(req->q) > 1)
  		return true;
  	return false;
  }
e96c0d833   Eric Biggers   block: make blk_t...
665
666
  static enum elv_merge blk_try_req_merge(struct request *req,
  					struct request *next)
698404660   Jianchao Wang   block: fix the DI...
667
668
669
670
671
672
673
674
  {
  	if (blk_discard_mergable(req))
  		return ELEVATOR_DISCARD_MERGE;
  	else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
  		return ELEVATOR_BACK_MERGE;
  
  	return ELEVATOR_NO_MERGE;
  }
26308eab6   Jerome Marchand   block: fix incons...
675

d6d481969   Jens Axboe   block: ll_rw_blk....
676
  /*
b973cb7e8   Jens Axboe   blk-merge: return...
677
678
   * For non-mq, this has to be called with the request spinlock acquired.
   * For mq with scheduling, the appropriate queue wide lock should be held.
d6d481969   Jens Axboe   block: ll_rw_blk....
679
   */
b973cb7e8   Jens Axboe   blk-merge: return...
680
681
  static struct request *attempt_merge(struct request_queue *q,
  				     struct request *req, struct request *next)
d6d481969   Jens Axboe   block: ll_rw_blk....
682
683
  {
  	if (!rq_mergeable(req) || !rq_mergeable(next))
b973cb7e8   Jens Axboe   blk-merge: return...
684
  		return NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
685

288dab8a3   Christoph Hellwig   block: add a sepa...
686
  	if (req_op(req) != req_op(next))
b973cb7e8   Jens Axboe   blk-merge: return...
687
  		return NULL;
f31dc1cd4   Martin K. Petersen   block: Consolidat...
688

d6d481969   Jens Axboe   block: ll_rw_blk....
689
  	if (rq_data_dir(req) != rq_data_dir(next)
2081a56bf   Jens Axboe   block: remove req...
690
  	    || req->rq_disk != next->rq_disk)
b973cb7e8   Jens Axboe   blk-merge: return...
691
  		return NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
692

8fe0d473f   Mike Christie   block: convert me...
693
  	if (req_op(req) == REQ_OP_WRITE_SAME &&
4363ac7c1   Martin K. Petersen   block: Implement ...
694
  	    !blk_write_same_mergeable(req->bio, next->bio))
b973cb7e8   Jens Axboe   blk-merge: return...
695
  		return NULL;
4363ac7c1   Martin K. Petersen   block: Implement ...
696

d6d481969   Jens Axboe   block: ll_rw_blk....
697
  	/*
cb6934f8e   Jens Axboe   block: add suppor...
698
699
700
701
702
  	 * Don't allow merge of different write hints, or for a hint with
  	 * non-hint IO.
  	 */
  	if (req->write_hint != next->write_hint)
  		return NULL;
668ffc034   Damien Le Moal   block: prevent me...
703
704
  	if (req->ioprio != next->ioprio)
  		return NULL;
cb6934f8e   Jens Axboe   block: add suppor...
705
  	/*
d6d481969   Jens Axboe   block: ll_rw_blk....
706
707
708
  	 * If we are allowed to merge, then append bio list
  	 * from next to rq and release next. merge_requests_fn
  	 * will have updated segment counts, update sector
445251d0f   Jens Axboe   blk-mq: fix disca...
709
710
  	 * counts here. Handle DISCARDs separately, as they
  	 * have separate settings.
d6d481969   Jens Axboe   block: ll_rw_blk....
711
  	 */
698404660   Jianchao Wang   block: fix the DI...
712
713
714
  
  	switch (blk_try_req_merge(req, next)) {
  	case ELEVATOR_DISCARD_MERGE:
445251d0f   Jens Axboe   blk-mq: fix disca...
715
716
  		if (!req_attempt_discard_merge(q, req, next))
  			return NULL;
698404660   Jianchao Wang   block: fix the DI...
717
718
719
720
721
722
  		break;
  	case ELEVATOR_BACK_MERGE:
  		if (!ll_merge_requests_fn(q, req, next))
  			return NULL;
  		break;
  	default:
b973cb7e8   Jens Axboe   blk-merge: return...
723
  		return NULL;
698404660   Jianchao Wang   block: fix the DI...
724
  	}
d6d481969   Jens Axboe   block: ll_rw_blk....
725
726
  
  	/*
80a761fd3   Tejun Heo   block: implement ...
727
728
729
730
731
  	 * If failfast settings disagree or any of the two is already
  	 * a mixed merge, mark both as mixed before proceeding.  This
  	 * makes sure that all involved bios have mixable attributes
  	 * set properly.
  	 */
e80640213   Christoph Hellwig   block: split out ...
732
  	if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
80a761fd3   Tejun Heo   block: implement ...
733
734
735
736
737
738
739
  	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
  	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
  		blk_rq_set_mixed_merge(req);
  		blk_rq_set_mixed_merge(next);
  	}
  
  	/*
522a77756   Omar Sandoval   block: consolidat...
740
741
742
  	 * At this point we have either done a back merge or front merge. We
  	 * need the smaller start_time_ns of the merged requests to be the
  	 * current request for accounting purposes.
d6d481969   Jens Axboe   block: ll_rw_blk....
743
  	 */
522a77756   Omar Sandoval   block: consolidat...
744
745
  	if (next->start_time_ns < req->start_time_ns)
  		req->start_time_ns = next->start_time_ns;
d6d481969   Jens Axboe   block: ll_rw_blk....
746
747
748
  
  	req->biotail->bi_next = next->bio;
  	req->biotail = next->biotail;
a2dec7b36   Tejun Heo   block: hide reque...
749
  	req->__data_len += blk_rq_bytes(next);
d6d481969   Jens Axboe   block: ll_rw_blk....
750

2a5cf35cd   Ming Lei   block: fix single...
751
  	if (!blk_discard_mergable(req))
445251d0f   Jens Axboe   blk-mq: fix disca...
752
  		elv_merge_requests(q, req, next);
d6d481969   Jens Axboe   block: ll_rw_blk....
753

42dad7647   Jerome Marchand   block: simplify I...
754
755
756
757
  	/*
  	 * 'next' is going away, so update stats accordingly
  	 */
  	blk_account_io_merge(next);
d6d481969   Jens Axboe   block: ll_rw_blk....
758

e4d750c97   Jens Axboe   block: free merge...
759
760
761
762
  	/*
  	 * ownership of bio passed from next to req, return 'next' for
  	 * the caller to free
  	 */
1cd96c242   Boaz Harrosh   block: WARN in __...
763
  	next->bio = NULL;
b973cb7e8   Jens Axboe   blk-merge: return...
764
  	return next;
d6d481969   Jens Axboe   block: ll_rw_blk....
765
  }
b973cb7e8   Jens Axboe   blk-merge: return...
766
  struct request *attempt_back_merge(struct request_queue *q, struct request *rq)
d6d481969   Jens Axboe   block: ll_rw_blk....
767
768
769
770
771
  {
  	struct request *next = elv_latter_request(q, rq);
  
  	if (next)
  		return attempt_merge(q, rq, next);
b973cb7e8   Jens Axboe   blk-merge: return...
772
  	return NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
773
  }
b973cb7e8   Jens Axboe   blk-merge: return...
774
  struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
d6d481969   Jens Axboe   block: ll_rw_blk....
775
776
777
778
779
  {
  	struct request *prev = elv_former_request(q, rq);
  
  	if (prev)
  		return attempt_merge(q, prev, rq);
b973cb7e8   Jens Axboe   blk-merge: return...
780
  	return NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
781
  }
5e84ea3a9   Jens Axboe   block: attempt to...
782
783
784
785
  
  int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
  			  struct request *next)
  {
e4d750c97   Jens Axboe   block: free merge...
786
  	struct request *free;
72ef799b3   Tahsin Erdogan   block: do not mer...
787

e4d750c97   Jens Axboe   block: free merge...
788
789
  	free = attempt_merge(q, rq, next);
  	if (free) {
92bc5a248   Jens Axboe   block: remove __b...
790
  		blk_put_request(free);
e4d750c97   Jens Axboe   block: free merge...
791
792
793
794
  		return 1;
  	}
  
  	return 0;
5e84ea3a9   Jens Axboe   block: attempt to...
795
  }
050c8ea80   Tejun Heo   block: separate o...
796
797
798
  
  bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
  {
e2a60da74   Martin K. Petersen   block: Clean up s...
799
  	if (!rq_mergeable(rq) || !bio_mergeable(bio))
050c8ea80   Tejun Heo   block: separate o...
800
  		return false;
288dab8a3   Christoph Hellwig   block: add a sepa...
801
  	if (req_op(rq) != bio_op(bio))
f31dc1cd4   Martin K. Petersen   block: Consolidat...
802
  		return false;
050c8ea80   Tejun Heo   block: separate o...
803
804
805
  	/* different data direction or already started, don't merge */
  	if (bio_data_dir(bio) != rq_data_dir(rq))
  		return false;
2081a56bf   Jens Axboe   block: remove req...
806
807
  	/* must be same device */
  	if (rq->rq_disk != bio->bi_disk)
050c8ea80   Tejun Heo   block: separate o...
808
809
810
  		return false;
  
  	/* only merge integrity protected bio into ditto rq */
4eaf99bea   Martin K. Petersen   block: Don't merg...
811
  	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
050c8ea80   Tejun Heo   block: separate o...
812
  		return false;
4363ac7c1   Martin K. Petersen   block: Implement ...
813
  	/* must be using the same buffer */
8fe0d473f   Mike Christie   block: convert me...
814
  	if (req_op(rq) == REQ_OP_WRITE_SAME &&
4363ac7c1   Martin K. Petersen   block: Implement ...
815
816
  	    !blk_write_same_mergeable(rq->bio, bio))
  		return false;
cb6934f8e   Jens Axboe   block: add suppor...
817
818
819
820
821
822
  	/*
  	 * Don't allow merge of different write hints, or for a hint with
  	 * non-hint IO.
  	 */
  	if (rq->write_hint != bio->bi_write_hint)
  		return false;
668ffc034   Damien Le Moal   block: prevent me...
823
824
  	if (rq->ioprio != bio_prio(bio))
  		return false;
050c8ea80   Tejun Heo   block: separate o...
825
826
  	return true;
  }
34fe7c054   Christoph Hellwig   block: enumify EL...
827
  enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
050c8ea80   Tejun Heo   block: separate o...
828
  {
698404660   Jianchao Wang   block: fix the DI...
829
  	if (blk_discard_mergable(rq))
1e739730c   Christoph Hellwig   block: optionally...
830
831
  		return ELEVATOR_DISCARD_MERGE;
  	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
050c8ea80   Tejun Heo   block: separate o...
832
  		return ELEVATOR_BACK_MERGE;
4f024f379   Kent Overstreet   block: Abstract o...
833
  	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
050c8ea80   Tejun Heo   block: separate o...
834
835
836
  		return ELEVATOR_FRONT_MERGE;
  	return ELEVATOR_NO_MERGE;
  }