Blame view

block/blk-merge.c 20.7 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
d6d481969   Jens Axboe   block: ll_rw_blk....
2
3
4
5
6
7
8
9
  /*
   * Functions related to segment and merge handling
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/scatterlist.h>
cda22646a   Mike Krinkin   block: add call t...
10
  #include <trace/events/block.h>
d6d481969   Jens Axboe   block: ll_rw_blk....
11
  #include "blk.h"
54efd50bf   Kent Overstreet   block: make gener...
12
13
  static struct bio *blk_bio_discard_split(struct request_queue *q,
  					 struct bio *bio,
bdced438a   Ming Lei   block: setup bi_p...
14
15
  					 struct bio_set *bs,
  					 unsigned *nsegs)
54efd50bf   Kent Overstreet   block: make gener...
16
17
18
19
20
  {
  	unsigned int max_discard_sectors, granularity;
  	int alignment;
  	sector_t tmp;
  	unsigned split_sectors;
bdced438a   Ming Lei   block: setup bi_p...
21
  	*nsegs = 1;
54efd50bf   Kent Overstreet   block: make gener...
22
23
  	/* Zero-sector (unknown) and one-sector granularities are the same.  */
  	granularity = max(q->limits.discard_granularity >> 9, 1U);
14657efd3   Ming Lei   block: make sure ...
24
25
  	max_discard_sectors = min(q->limits.max_discard_sectors,
  			bio_allowed_max_sectors(q));
54efd50bf   Kent Overstreet   block: make gener...
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
  	max_discard_sectors -= max_discard_sectors % granularity;
  
  	if (unlikely(!max_discard_sectors)) {
  		/* XXX: warn */
  		return NULL;
  	}
  
  	if (bio_sectors(bio) <= max_discard_sectors)
  		return NULL;
  
  	split_sectors = max_discard_sectors;
  
  	/*
  	 * If the next starting sector would be misaligned, stop the discard at
  	 * the previous aligned sector.
  	 */
  	alignment = (q->limits.discard_alignment >> 9) % granularity;
  
  	tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
  	tmp = sector_div(tmp, granularity);
  
  	if (split_sectors > tmp)
  		split_sectors -= tmp;
  
  	return bio_split(bio, split_sectors, GFP_NOIO, bs);
  }
885fa13f6   Christoph Hellwig   block: implement ...
52
53
54
55
56
57
58
59
60
61
62
63
64
  static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
  		struct bio *bio, struct bio_set *bs, unsigned *nsegs)
  {
  	*nsegs = 1;
  
  	if (!q->limits.max_write_zeroes_sectors)
  		return NULL;
  
  	if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
  		return NULL;
  
  	return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
  }
54efd50bf   Kent Overstreet   block: make gener...
65
66
  static struct bio *blk_bio_write_same_split(struct request_queue *q,
  					    struct bio *bio,
bdced438a   Ming Lei   block: setup bi_p...
67
68
  					    struct bio_set *bs,
  					    unsigned *nsegs)
54efd50bf   Kent Overstreet   block: make gener...
69
  {
bdced438a   Ming Lei   block: setup bi_p...
70
  	*nsegs = 1;
54efd50bf   Kent Overstreet   block: make gener...
71
72
73
74
75
76
77
78
  	if (!q->limits.max_write_same_sectors)
  		return NULL;
  
  	if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
  		return NULL;
  
  	return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
  }
d0e5fbb01   Ming Lei   block: fix bio sp...
79
80
81
82
83
84
85
86
87
88
89
  static inline unsigned get_max_io_size(struct request_queue *q,
  				       struct bio *bio)
  {
  	unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
  	unsigned mask = queue_logical_block_size(q) - 1;
  
  	/* aligned to logical block size */
  	sectors &= ~(mask >> 9);
  
  	return sectors;
  }
54efd50bf   Kent Overstreet   block: make gener...
90
91
  static struct bio *blk_bio_segment_split(struct request_queue *q,
  					 struct bio *bio,
bdced438a   Ming Lei   block: setup bi_p...
92
93
  					 struct bio_set *bs,
  					 unsigned *segs)
54efd50bf   Kent Overstreet   block: make gener...
94
  {
5014c311b   Jens Axboe   block: fix bogus ...
95
  	struct bio_vec bv, bvprv, *bvprvp = NULL;
54efd50bf   Kent Overstreet   block: make gener...
96
  	struct bvec_iter iter;
8ae126660   Kent Overstreet   block: kill merge...
97
  	unsigned seg_size = 0, nsegs = 0, sectors = 0;
02e707424   Ming Lei   blk-merge: fix bl...
98
99
100
  	unsigned front_seg_size = bio->bi_seg_front_size;
  	bool do_split = true;
  	struct bio *new = NULL;
d0e5fbb01   Ming Lei   block: fix bio sp...
101
  	const unsigned max_sectors = get_max_io_size(q, bio);
54efd50bf   Kent Overstreet   block: make gener...
102

54efd50bf   Kent Overstreet   block: make gener...
103
  	bio_for_each_segment(bv, bio, iter) {
54efd50bf   Kent Overstreet   block: make gener...
104
105
106
107
  		/*
  		 * If the queue doesn't support SG gaps and adding this
  		 * offset would create a gap, disallow it.
  		 */
5014c311b   Jens Axboe   block: fix bogus ...
108
  		if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
54efd50bf   Kent Overstreet   block: make gener...
109
  			goto split;
d0e5fbb01   Ming Lei   block: fix bio sp...
110
  		if (sectors + (bv.bv_len >> 9) > max_sectors) {
e36f62042   Keith Busch   block: split bios...
111
112
113
114
115
  			/*
  			 * Consider this a new segment if we're splitting in
  			 * the middle of this vector.
  			 */
  			if (nsegs < queue_max_segments(q) &&
d0e5fbb01   Ming Lei   block: fix bio sp...
116
  			    sectors < max_sectors) {
e36f62042   Keith Busch   block: split bios...
117
  				nsegs++;
d0e5fbb01   Ming Lei   block: fix bio sp...
118
  				sectors = max_sectors;
e36f62042   Keith Busch   block: split bios...
119
  			}
cf8c0c6a3   Ming Lei   block: blk-merge:...
120
  			goto split;
e36f62042   Keith Busch   block: split bios...
121
  		}
5014c311b   Jens Axboe   block: fix bogus ...
122
  		if (bvprvp && blk_queue_cluster(q)) {
b4b6cb613   Ming Lei   Revert "block: bl...
123
124
  			if (seg_size + bv.bv_len > queue_max_segment_size(q))
  				goto new_segment;
5014c311b   Jens Axboe   block: fix bogus ...
125
  			if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
54efd50bf   Kent Overstreet   block: make gener...
126
  				goto new_segment;
5014c311b   Jens Axboe   block: fix bogus ...
127
  			if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
54efd50bf   Kent Overstreet   block: make gener...
128
129
130
131
  				goto new_segment;
  
  			seg_size += bv.bv_len;
  			bvprv = bv;
578270bfb   Ming Lei   block: fix segmen...
132
  			bvprvp = &bvprv;
52cc6eead   Ming Lei   block: blk-merge:...
133
  			sectors += bv.bv_len >> 9;
a88d32af1   Ming Lei   blk-merge: fix co...
134

54efd50bf   Kent Overstreet   block: make gener...
135
136
137
138
139
  			continue;
  		}
  new_segment:
  		if (nsegs == queue_max_segments(q))
  			goto split;
6a501bf08   Ming Lei   blk-merge: comput...
140
141
  		if (nsegs == 1 && seg_size > front_seg_size)
  			front_seg_size = seg_size;
54efd50bf   Kent Overstreet   block: make gener...
142
143
  		nsegs++;
  		bvprv = bv;
578270bfb   Ming Lei   block: fix segmen...
144
  		bvprvp = &bvprv;
54efd50bf   Kent Overstreet   block: make gener...
145
  		seg_size = bv.bv_len;
52cc6eead   Ming Lei   block: blk-merge:...
146
  		sectors += bv.bv_len >> 9;
02e707424   Ming Lei   blk-merge: fix bl...
147

54efd50bf   Kent Overstreet   block: make gener...
148
  	}
02e707424   Ming Lei   blk-merge: fix bl...
149
  	do_split = false;
54efd50bf   Kent Overstreet   block: make gener...
150
  split:
bdced438a   Ming Lei   block: setup bi_p...
151
  	*segs = nsegs;
02e707424   Ming Lei   blk-merge: fix bl...
152
153
154
155
156
157
  
  	if (do_split) {
  		new = bio_split(bio, sectors, GFP_NOIO, bs);
  		if (new)
  			bio = new;
  	}
6a501bf08   Ming Lei   blk-merge: comput...
158
159
  	if (nsegs == 1 && seg_size > front_seg_size)
  		front_seg_size = seg_size;
02e707424   Ming Lei   blk-merge: fix bl...
160
161
162
163
164
  	bio->bi_seg_front_size = front_seg_size;
  	if (seg_size > bio->bi_seg_back_size)
  		bio->bi_seg_back_size = seg_size;
  
  	return do_split ? new : NULL;
54efd50bf   Kent Overstreet   block: make gener...
165
  }
af67c31fb   NeilBrown   blk: remove bio_s...
166
  void blk_queue_split(struct request_queue *q, struct bio **bio)
54efd50bf   Kent Overstreet   block: make gener...
167
  {
bdced438a   Ming Lei   block: setup bi_p...
168
169
  	struct bio *split, *res;
  	unsigned nsegs;
54efd50bf   Kent Overstreet   block: make gener...
170

7afafc8a4   Adrian Hunter   block: Fix secure...
171
172
173
  	switch (bio_op(*bio)) {
  	case REQ_OP_DISCARD:
  	case REQ_OP_SECURE_ERASE:
338aa96d5   Kent Overstreet   block: convert bo...
174
  		split = blk_bio_discard_split(q, *bio, &q->bio_split, &nsegs);
7afafc8a4   Adrian Hunter   block: Fix secure...
175
  		break;
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
176
  	case REQ_OP_WRITE_ZEROES:
338aa96d5   Kent Overstreet   block: convert bo...
177
  		split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, &nsegs);
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
178
  		break;
7afafc8a4   Adrian Hunter   block: Fix secure...
179
  	case REQ_OP_WRITE_SAME:
338aa96d5   Kent Overstreet   block: convert bo...
180
  		split = blk_bio_write_same_split(q, *bio, &q->bio_split, &nsegs);
7afafc8a4   Adrian Hunter   block: Fix secure...
181
182
  		break;
  	default:
338aa96d5   Kent Overstreet   block: convert bo...
183
  		split = blk_bio_segment_split(q, *bio, &q->bio_split, &nsegs);
7afafc8a4   Adrian Hunter   block: Fix secure...
184
185
  		break;
  	}
bdced438a   Ming Lei   block: setup bi_p...
186
187
188
189
190
  
  	/* physical segments can be figured out during splitting */
  	res = split ? split : *bio;
  	res->bi_phys_segments = nsegs;
  	bio_set_flag(res, BIO_SEG_VALID);
54efd50bf   Kent Overstreet   block: make gener...
191
192
  
  	if (split) {
6ac45aeb6   Ming Lei   block: avoid to m...
193
  		/* there isn't chance to merge the splitted bio */
1eff9d322   Jens Axboe   block: rename bio...
194
  		split->bi_opf |= REQ_NOMERGE;
6ac45aeb6   Ming Lei   block: avoid to m...
195

cd4a4ae46   Jens Axboe   block: don't use ...
196
197
198
199
200
201
202
203
204
  		/*
  		 * Since we're recursing into make_request here, ensure
  		 * that we mark this bio as already having entered the queue.
  		 * If not, and the queue is going away, we can get stuck
  		 * forever on waiting for the queue reference to drop. But
  		 * that will never happen, as we're already holding a
  		 * reference to it.
  		 */
  		bio_set_flag(*bio, BIO_QUEUE_ENTERED);
54efd50bf   Kent Overstreet   block: make gener...
205
  		bio_chain(split, *bio);
cda22646a   Mike Krinkin   block: add call t...
206
  		trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
54efd50bf   Kent Overstreet   block: make gener...
207
208
209
210
211
  		generic_make_request(*bio);
  		*bio = split;
  	}
  }
  EXPORT_SYMBOL(blk_queue_split);
1e4280791   Jens Axboe   block: reduce sta...
212
  static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
073885493   Ming Lei   blk-merge: fix bl...
213
214
  					     struct bio *bio,
  					     bool no_sg_merge)
d6d481969   Jens Axboe   block: ll_rw_blk....
215
  {
7988613b0   Kent Overstreet   block: Convert bi...
216
  	struct bio_vec bv, bvprv = { NULL };
54efd50bf   Kent Overstreet   block: make gener...
217
  	int cluster, prev = 0;
1e4280791   Jens Axboe   block: reduce sta...
218
  	unsigned int seg_size, nr_phys_segs;
59247eaea   Jens Axboe   block: fix missin...
219
  	struct bio *fbio, *bbio;
7988613b0   Kent Overstreet   block: Convert bi...
220
  	struct bvec_iter iter;
d6d481969   Jens Axboe   block: ll_rw_blk....
221

1e4280791   Jens Axboe   block: reduce sta...
222
223
  	if (!bio)
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
224

a6f0788ec   Chaitanya Kulkarni   block: add suppor...
225
226
227
  	switch (bio_op(bio)) {
  	case REQ_OP_DISCARD:
  	case REQ_OP_SECURE_ERASE:
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
228
  	case REQ_OP_WRITE_ZEROES:
f9d03f96b   Christoph Hellwig   block: improve ha...
229
230
  		return 0;
  	case REQ_OP_WRITE_SAME:
5cb8850c9   Kent Overstreet   block: Explicitly...
231
  		return 1;
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
232
  	}
5cb8850c9   Kent Overstreet   block: Explicitly...
233

1e4280791   Jens Axboe   block: reduce sta...
234
  	fbio = bio;
e692cb668   Martin K. Petersen   block: Deprecate ...
235
  	cluster = blk_queue_cluster(q);
5df97b91b   Mikulas Patocka   drop vmerge accou...
236
  	seg_size = 0;
2c8919dee   Andi Kleen   gcc-4.6: block: f...
237
  	nr_phys_segs = 0;
1e4280791   Jens Axboe   block: reduce sta...
238
  	for_each_bio(bio) {
7988613b0   Kent Overstreet   block: Convert bi...
239
  		bio_for_each_segment(bv, bio, iter) {
1e4280791   Jens Axboe   block: reduce sta...
240
  			/*
05f1dd531   Jens Axboe   block: add queue ...
241
242
243
244
245
  			 * If SG merging is disabled, each bio vector is
  			 * a segment
  			 */
  			if (no_sg_merge)
  				goto new_segment;
54efd50bf   Kent Overstreet   block: make gener...
246
  			if (prev && cluster) {
7988613b0   Kent Overstreet   block: Convert bi...
247
  				if (seg_size + bv.bv_len
ae03bf639   Martin K. Petersen   block: Use access...
248
  				    > queue_max_segment_size(q))
1e4280791   Jens Axboe   block: reduce sta...
249
  					goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
250
  				if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
1e4280791   Jens Axboe   block: reduce sta...
251
  					goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
252
  				if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
1e4280791   Jens Axboe   block: reduce sta...
253
  					goto new_segment;
d6d481969   Jens Axboe   block: ll_rw_blk....
254

7988613b0   Kent Overstreet   block: Convert bi...
255
  				seg_size += bv.bv_len;
1e4280791   Jens Axboe   block: reduce sta...
256
257
258
  				bvprv = bv;
  				continue;
  			}
d6d481969   Jens Axboe   block: ll_rw_blk....
259
  new_segment:
1e4280791   Jens Axboe   block: reduce sta...
260
261
262
  			if (nr_phys_segs == 1 && seg_size >
  			    fbio->bi_seg_front_size)
  				fbio->bi_seg_front_size = seg_size;
867714271   FUJITA Tomonori   block: fix nr_phy...
263

1e4280791   Jens Axboe   block: reduce sta...
264
265
  			nr_phys_segs++;
  			bvprv = bv;
54efd50bf   Kent Overstreet   block: make gener...
266
  			prev = 1;
7988613b0   Kent Overstreet   block: Convert bi...
267
  			seg_size = bv.bv_len;
1e4280791   Jens Axboe   block: reduce sta...
268
  		}
59247eaea   Jens Axboe   block: fix missin...
269
  		bbio = bio;
d6d481969   Jens Axboe   block: ll_rw_blk....
270
  	}
59247eaea   Jens Axboe   block: fix missin...
271
272
273
274
  	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
  		fbio->bi_seg_front_size = seg_size;
  	if (seg_size > bbio->bi_seg_back_size)
  		bbio->bi_seg_back_size = seg_size;
1e4280791   Jens Axboe   block: reduce sta...
275
276
277
278
279
280
  
  	return nr_phys_segs;
  }
  
  void blk_recalc_rq_segments(struct request *rq)
  {
073885493   Ming Lei   blk-merge: fix bl...
281
282
283
284
285
  	bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
  			&rq->q->queue_flags);
  
  	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
  			no_sg_merge);
d6d481969   Jens Axboe   block: ll_rw_blk....
286
287
288
289
  }
  
  void blk_recount_segments(struct request_queue *q, struct bio *bio)
  {
7f60dcaaf   Ming Lei   block: blk-merge:...
290
291
292
293
294
295
296
  	unsigned short seg_cnt;
  
  	/* estimate segment number by bi_vcnt for non-cloned bio */
  	if (bio_flagged(bio, BIO_CLONED))
  		seg_cnt = bio_segments(bio);
  	else
  		seg_cnt = bio->bi_vcnt;
764f612c6   Ming Lei   blk-merge: don't ...
297

7f60dcaaf   Ming Lei   block: blk-merge:...
298
299
300
  	if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
  			(seg_cnt < queue_max_segments(q)))
  		bio->bi_phys_segments = seg_cnt;
05f1dd531   Jens Axboe   block: add queue ...
301
302
303
304
  	else {
  		struct bio *nxt = bio->bi_next;
  
  		bio->bi_next = NULL;
7f60dcaaf   Ming Lei   block: blk-merge:...
305
  		bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
05f1dd531   Jens Axboe   block: add queue ...
306
307
  		bio->bi_next = nxt;
  	}
1e4280791   Jens Axboe   block: reduce sta...
308

b7c44ed9d   Jens Axboe   block: manipulate...
309
  	bio_set_flag(bio, BIO_SEG_VALID);
d6d481969   Jens Axboe   block: ll_rw_blk....
310
311
312
313
314
315
  }
  EXPORT_SYMBOL(blk_recount_segments);
  
  static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
  				   struct bio *nxt)
  {
2b8221e18   Kent Overstreet   block: Really sil...
316
  	struct bio_vec end_bv = { NULL }, nxt_bv;
f619d2546   Kent Overstreet   block: Kill bio_i...
317

e692cb668   Martin K. Petersen   block: Deprecate ...
318
  	if (!blk_queue_cluster(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
319
  		return 0;
867714271   FUJITA Tomonori   block: fix nr_phy...
320
  	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
ae03bf639   Martin K. Petersen   block: Use access...
321
  	    queue_max_segment_size(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
322
  		return 0;
e17fc0a1c   David Woodhouse   Allow elevators t...
323
324
  	if (!bio_has_data(bio))
  		return 1;
e827091cb   Ming Lei   block: merge: get...
325
326
  	bio_get_last_bvec(bio, &end_bv);
  	bio_get_first_bvec(nxt, &nxt_bv);
f619d2546   Kent Overstreet   block: Kill bio_i...
327
328
  
  	if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
e17fc0a1c   David Woodhouse   Allow elevators t...
329
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
330
  	/*
e17fc0a1c   David Woodhouse   Allow elevators t...
331
  	 * bio and nxt are contiguous in memory; check if the queue allows
d6d481969   Jens Axboe   block: ll_rw_blk....
332
333
  	 * these two to be merged into one
  	 */
f619d2546   Kent Overstreet   block: Kill bio_i...
334
  	if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
d6d481969   Jens Axboe   block: ll_rw_blk....
335
336
337
338
  		return 1;
  
  	return 0;
  }
7988613b0   Kent Overstreet   block: Convert bi...
339
  static inline void
963ab9e5d   Asias He   block: Introduce ...
340
  __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
7988613b0   Kent Overstreet   block: Convert bi...
341
  		     struct scatterlist *sglist, struct bio_vec *bvprv,
963ab9e5d   Asias He   block: Introduce ...
342
343
344
345
  		     struct scatterlist **sg, int *nsegs, int *cluster)
  {
  
  	int nbytes = bvec->bv_len;
7988613b0   Kent Overstreet   block: Convert bi...
346
  	if (*sg && *cluster) {
b4b6cb613   Ming Lei   Revert "block: bl...
347
348
  		if ((*sg)->length + nbytes > queue_max_segment_size(q))
  			goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
349
  		if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
963ab9e5d   Asias He   block: Introduce ...
350
  			goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
351
  		if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
963ab9e5d   Asias He   block: Introduce ...
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
  			goto new_segment;
  
  		(*sg)->length += nbytes;
  	} else {
  new_segment:
  		if (!*sg)
  			*sg = sglist;
  		else {
  			/*
  			 * If the driver previously mapped a shorter
  			 * list, we could see a termination bit
  			 * prematurely unless it fully inits the sg
  			 * table on each mapping. We KNOW that there
  			 * must be more entries here or the driver
  			 * would be buggy, so force clear the
  			 * termination bit to avoid doing a full
  			 * sg_init_table() in drivers for each command.
  			 */
c8164d893   Paolo Bonzini   scatterlist: intr...
370
  			sg_unmark_end(*sg);
963ab9e5d   Asias He   block: Introduce ...
371
372
373
374
375
376
  			*sg = sg_next(*sg);
  		}
  
  		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
  		(*nsegs)++;
  	}
7988613b0   Kent Overstreet   block: Convert bi...
377
  	*bvprv = *bvec;
963ab9e5d   Asias He   block: Introduce ...
378
  }
f9d03f96b   Christoph Hellwig   block: improve ha...
379
380
381
382
383
384
385
  static inline int __blk_bvec_map_sg(struct request_queue *q, struct bio_vec bv,
  		struct scatterlist *sglist, struct scatterlist **sg)
  {
  	*sg = sglist;
  	sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
  	return 1;
  }
5cb8850c9   Kent Overstreet   block: Explicitly...
386
387
388
  static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
  			     struct scatterlist *sglist,
  			     struct scatterlist **sg)
d6d481969   Jens Axboe   block: ll_rw_blk....
389
  {
2b8221e18   Kent Overstreet   block: Really sil...
390
  	struct bio_vec bvec, bvprv = { NULL };
5cb8850c9   Kent Overstreet   block: Explicitly...
391
  	struct bvec_iter iter;
f9d03f96b   Christoph Hellwig   block: improve ha...
392
  	int cluster = blk_queue_cluster(q), nsegs = 0;
5cb8850c9   Kent Overstreet   block: Explicitly...
393
394
395
396
397
  
  	for_each_bio(bio)
  		bio_for_each_segment(bvec, bio, iter)
  			__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
  					     &nsegs, &cluster);
d6d481969   Jens Axboe   block: ll_rw_blk....
398

5cb8850c9   Kent Overstreet   block: Explicitly...
399
400
401
402
403
404
405
406
407
408
409
410
  	return nsegs;
  }
  
  /*
   * map a request to scatterlist, return number of sg entries setup. Caller
   * must make sure sg can hold rq->nr_phys_segments entries
   */
  int blk_rq_map_sg(struct request_queue *q, struct request *rq,
  		  struct scatterlist *sglist)
  {
  	struct scatterlist *sg = NULL;
  	int nsegs = 0;
f9d03f96b   Christoph Hellwig   block: improve ha...
411
412
413
414
415
  	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
  		nsegs = __blk_bvec_map_sg(q, rq->special_vec, sglist, &sg);
  	else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
  		nsegs = __blk_bvec_map_sg(q, bio_iovec(rq->bio), sglist, &sg);
  	else if (rq->bio)
5cb8850c9   Kent Overstreet   block: Explicitly...
416
  		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
f18573abc   FUJITA Tomonori   block: move the p...
417

e80640213   Christoph Hellwig   block: split out ...
418
  	if (unlikely(rq->rq_flags & RQF_COPY_USER) &&
2e46e8b27   Tejun Heo   block: drop reque...
419
420
421
  	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
  		unsigned int pad_len =
  			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
f18573abc   FUJITA Tomonori   block: move the p...
422
423
424
425
  
  		sg->length += pad_len;
  		rq->extra_len += pad_len;
  	}
2fb98e841   Tejun Heo   block: implement ...
426
  	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
a8ebb056a   Mike Christie   block, drivers, c...
427
  		if (op_is_write(req_op(rq)))
db0a2e009   Tejun Heo   block: clear drai...
428
  			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
da81ed16b   Dan Williams   scatterlist: remo...
429
  		sg_unmark_end(sg);
d6d481969   Jens Axboe   block: ll_rw_blk....
430
431
432
433
434
435
  		sg = sg_next(sg);
  		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
  			    q->dma_drain_size,
  			    ((unsigned long)q->dma_drain_buffer) &
  			    (PAGE_SIZE - 1));
  		nsegs++;
7a85f8896   FUJITA Tomonori   block: restore th...
436
  		rq->extra_len += q->dma_drain_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
437
438
439
440
  	}
  
  	if (sg)
  		sg_mark_end(sg);
12e57f59c   Ming Lei   blk-merge: warn i...
441
442
443
444
  	/*
  	 * Something must have been wrong if the figured number of
  	 * segment is bigger than number of req's physical segments
  	 */
f9d03f96b   Christoph Hellwig   block: improve ha...
445
  	WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
12e57f59c   Ming Lei   blk-merge: warn i...
446

d6d481969   Jens Axboe   block: ll_rw_blk....
447
448
  	return nsegs;
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
449
  EXPORT_SYMBOL(blk_rq_map_sg);
d6d481969   Jens Axboe   block: ll_rw_blk....
450
451
452
453
  static inline int ll_new_hw_segment(struct request_queue *q,
  				    struct request *req,
  				    struct bio *bio)
  {
d6d481969   Jens Axboe   block: ll_rw_blk....
454
  	int nr_phys_segs = bio_phys_segments(q, bio);
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
455
456
  	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
  		goto no_merge;
4eaf99bea   Martin K. Petersen   block: Don't merg...
457
  	if (blk_integrity_merge_bio(q, req, bio) == false)
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
458
  		goto no_merge;
d6d481969   Jens Axboe   block: ll_rw_blk....
459
460
461
462
463
  
  	/*
  	 * This will form the start of a new hw segment.  Bump both
  	 * counters.
  	 */
d6d481969   Jens Axboe   block: ll_rw_blk....
464
465
  	req->nr_phys_segments += nr_phys_segs;
  	return 1;
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
466
467
  
  no_merge:
e0c723000   Ritesh Harjani   block: factor out...
468
  	req_set_nomerge(q, req);
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
469
  	return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
470
471
472
473
474
  }
  
  int ll_back_merge_fn(struct request_queue *q, struct request *req,
  		     struct bio *bio)
  {
5e7c4274a   Jens Axboe   block: Check for ...
475
476
  	if (req_gap_back_merge(req, bio))
  		return 0;
7f39add3b   Sagi Grimberg   block: Refuse req...
477
478
479
  	if (blk_integrity_rq(req) &&
  	    integrity_req_gap_back_merge(req, bio))
  		return 0;
f31dc1cd4   Martin K. Petersen   block: Consolidat...
480
  	if (blk_rq_sectors(req) + bio_sectors(bio) >
17007f399   Damien Le Moal   block: Fix front ...
481
  	    blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
e0c723000   Ritesh Harjani   block: factor out...
482
  		req_set_nomerge(q, req);
d6d481969   Jens Axboe   block: ll_rw_blk....
483
484
  		return 0;
  	}
2cdf79caf   Jens Axboe   block: get rid of...
485
  	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
486
  		blk_recount_segments(q, req->biotail);
2cdf79caf   Jens Axboe   block: get rid of...
487
  	if (!bio_flagged(bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
488
  		blk_recount_segments(q, bio);
d6d481969   Jens Axboe   block: ll_rw_blk....
489
490
491
  
  	return ll_new_hw_segment(q, req, bio);
  }
6728cb0e6   Jens Axboe   block: make core ...
492
  int ll_front_merge_fn(struct request_queue *q, struct request *req,
d6d481969   Jens Axboe   block: ll_rw_blk....
493
494
  		      struct bio *bio)
  {
5e7c4274a   Jens Axboe   block: Check for ...
495
496
497
  
  	if (req_gap_front_merge(req, bio))
  		return 0;
7f39add3b   Sagi Grimberg   block: Refuse req...
498
499
500
  	if (blk_integrity_rq(req) &&
  	    integrity_req_gap_front_merge(req, bio))
  		return 0;
f31dc1cd4   Martin K. Petersen   block: Consolidat...
501
  	if (blk_rq_sectors(req) + bio_sectors(bio) >
17007f399   Damien Le Moal   block: Fix front ...
502
  	    blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
e0c723000   Ritesh Harjani   block: factor out...
503
  		req_set_nomerge(q, req);
d6d481969   Jens Axboe   block: ll_rw_blk....
504
505
  		return 0;
  	}
2cdf79caf   Jens Axboe   block: get rid of...
506
  	if (!bio_flagged(bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
507
  		blk_recount_segments(q, bio);
2cdf79caf   Jens Axboe   block: get rid of...
508
  	if (!bio_flagged(req->bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
509
  		blk_recount_segments(q, req->bio);
d6d481969   Jens Axboe   block: ll_rw_blk....
510
511
512
  
  	return ll_new_hw_segment(q, req, bio);
  }
e7e245000   Jens Axboe   blk-mq: don't dis...
513
514
515
516
517
518
519
520
521
522
  /*
   * blk-mq uses req->special to carry normal driver per-request payload, it
   * does not indicate a prepared command that we cannot merge with.
   */
  static bool req_no_special_merge(struct request *req)
  {
  	struct request_queue *q = req->q;
  
  	return !q->mq_ops && req->special;
  }
445251d0f   Jens Axboe   blk-mq: fix disca...
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
  static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
  		struct request *next)
  {
  	unsigned short segments = blk_rq_nr_discard_segments(req);
  
  	if (segments >= queue_max_discard_segments(q))
  		goto no_merge;
  	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
  	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
  		goto no_merge;
  
  	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
  	return true;
  no_merge:
  	req_set_nomerge(q, req);
  	return false;
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
540
541
542
543
  static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
  				struct request *next)
  {
  	int total_phys_segments;
867714271   FUJITA Tomonori   block: fix nr_phy...
544
545
  	unsigned int seg_size =
  		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
546
547
548
549
550
  
  	/*
  	 * First check if the either of the requests are re-queued
  	 * requests.  Can't merge them if they are.
  	 */
e7e245000   Jens Axboe   blk-mq: don't dis...
551
  	if (req_no_special_merge(req) || req_no_special_merge(next))
d6d481969   Jens Axboe   block: ll_rw_blk....
552
  		return 0;
5e7c4274a   Jens Axboe   block: Check for ...
553
  	if (req_gap_back_merge(req, next->bio))
854fbb9c6   Keith Busch   block: prevent re...
554
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
555
556
557
  	/*
  	 * Will it become too large?
  	 */
f31dc1cd4   Martin K. Petersen   block: Consolidat...
558
  	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
17007f399   Damien Le Moal   block: Fix front ...
559
  	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
d6d481969   Jens Axboe   block: ll_rw_blk....
560
561
562
  		return 0;
  
  	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
867714271   FUJITA Tomonori   block: fix nr_phy...
563
564
565
566
567
  	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
  		if (req->nr_phys_segments == 1)
  			req->bio->bi_seg_front_size = seg_size;
  		if (next->nr_phys_segments == 1)
  			next->biotail->bi_seg_back_size = seg_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
568
  		total_phys_segments--;
867714271   FUJITA Tomonori   block: fix nr_phy...
569
  	}
d6d481969   Jens Axboe   block: ll_rw_blk....
570

8a78362c4   Martin K. Petersen   block: Consolidat...
571
  	if (total_phys_segments > queue_max_segments(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
572
  		return 0;
4eaf99bea   Martin K. Petersen   block: Don't merg...
573
  	if (blk_integrity_merge_rq(q, req, next) == false)
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
574
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
575
576
  	/* Merge is OK... */
  	req->nr_phys_segments = total_phys_segments;
d6d481969   Jens Axboe   block: ll_rw_blk....
577
578
  	return 1;
  }
80a761fd3   Tejun Heo   block: implement ...
579
580
581
582
583
584
585
586
587
588
589
590
591
  /**
   * blk_rq_set_mixed_merge - mark a request as mixed merge
   * @rq: request to mark as mixed merge
   *
   * Description:
   *     @rq is about to be mixed merged.  Make sure the attributes
   *     which can be mixed are set in each bio and mark @rq as mixed
   *     merged.
   */
  void blk_rq_set_mixed_merge(struct request *rq)
  {
  	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
  	struct bio *bio;
e80640213   Christoph Hellwig   block: split out ...
592
  	if (rq->rq_flags & RQF_MIXED_MERGE)
80a761fd3   Tejun Heo   block: implement ...
593
594
595
596
597
598
599
600
  		return;
  
  	/*
  	 * @rq will no longer represent mixable attributes for all the
  	 * contained bios.  It will just track those of the first one.
  	 * Distributes the attributs to each bio.
  	 */
  	for (bio = rq->bio; bio; bio = bio->bi_next) {
1eff9d322   Jens Axboe   block: rename bio...
601
602
603
  		WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
  			     (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
  		bio->bi_opf |= ff;
80a761fd3   Tejun Heo   block: implement ...
604
  	}
e80640213   Christoph Hellwig   block: split out ...
605
  	rq->rq_flags |= RQF_MIXED_MERGE;
80a761fd3   Tejun Heo   block: implement ...
606
  }
26308eab6   Jerome Marchand   block: fix incons...
607
608
609
610
611
612
613
  static void blk_account_io_merge(struct request *req)
  {
  	if (blk_do_io_stat(req)) {
  		struct hd_struct *part;
  		int cpu;
  
  		cpu = part_stat_lock();
09e099d4b   Jerome Marchand   block: fix accoun...
614
  		part = req->part;
26308eab6   Jerome Marchand   block: fix incons...
615

d62e26b3f   Jens Axboe   block: pass in qu...
616
617
  		part_round_stats(req->q, cpu, part);
  		part_dec_in_flight(req->q, part, rq_data_dir(req));
26308eab6   Jerome Marchand   block: fix incons...
618

6c23a9681   Jens Axboe   block: add intern...
619
  		hd_struct_put(part);
26308eab6   Jerome Marchand   block: fix incons...
620
621
622
  		part_stat_unlock();
  	}
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
623
  /*
b973cb7e8   Jens Axboe   blk-merge: return...
624
625
   * For non-mq, this has to be called with the request spinlock acquired.
   * For mq with scheduling, the appropriate queue wide lock should be held.
d6d481969   Jens Axboe   block: ll_rw_blk....
626
   */
b973cb7e8   Jens Axboe   blk-merge: return...
627
628
  static struct request *attempt_merge(struct request_queue *q,
  				     struct request *req, struct request *next)
d6d481969   Jens Axboe   block: ll_rw_blk....
629
  {
2fff8a924   Bart Van Assche   block: Check lock...
630
631
  	if (!q->mq_ops)
  		lockdep_assert_held(q->queue_lock);
d6d481969   Jens Axboe   block: ll_rw_blk....
632
  	if (!rq_mergeable(req) || !rq_mergeable(next))
b973cb7e8   Jens Axboe   blk-merge: return...
633
  		return NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
634

288dab8a3   Christoph Hellwig   block: add a sepa...
635
  	if (req_op(req) != req_op(next))
b973cb7e8   Jens Axboe   blk-merge: return...
636
  		return NULL;
f31dc1cd4   Martin K. Petersen   block: Consolidat...
637

d6d481969   Jens Axboe   block: ll_rw_blk....
638
639
640
  	/*
  	 * not contiguous
  	 */
83096ebf1   Tejun Heo   block: convert to...
641
  	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
b973cb7e8   Jens Axboe   blk-merge: return...
642
  		return NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
643
644
645
  
  	if (rq_data_dir(req) != rq_data_dir(next)
  	    || req->rq_disk != next->rq_disk
e7e245000   Jens Axboe   blk-mq: don't dis...
646
  	    || req_no_special_merge(next))
b973cb7e8   Jens Axboe   blk-merge: return...
647
  		return NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
648

8fe0d473f   Mike Christie   block: convert me...
649
  	if (req_op(req) == REQ_OP_WRITE_SAME &&
4363ac7c1   Martin K. Petersen   block: Implement ...
650
  	    !blk_write_same_mergeable(req->bio, next->bio))
b973cb7e8   Jens Axboe   blk-merge: return...
651
  		return NULL;
4363ac7c1   Martin K. Petersen   block: Implement ...
652

d6d481969   Jens Axboe   block: ll_rw_blk....
653
  	/*
cb6934f8e   Jens Axboe   block: add suppor...
654
655
656
657
658
659
660
  	 * Don't allow merge of different write hints, or for a hint with
  	 * non-hint IO.
  	 */
  	if (req->write_hint != next->write_hint)
  		return NULL;
  
  	/*
d6d481969   Jens Axboe   block: ll_rw_blk....
661
662
663
  	 * If we are allowed to merge, then append bio list
  	 * from next to rq and release next. merge_requests_fn
  	 * will have updated segment counts, update sector
445251d0f   Jens Axboe   blk-mq: fix disca...
664
665
  	 * counts here. Handle DISCARDs separately, as they
  	 * have separate settings.
d6d481969   Jens Axboe   block: ll_rw_blk....
666
  	 */
445251d0f   Jens Axboe   blk-mq: fix disca...
667
668
669
670
  	if (req_op(req) == REQ_OP_DISCARD) {
  		if (!req_attempt_discard_merge(q, req, next))
  			return NULL;
  	} else if (!ll_merge_requests_fn(q, req, next))
b973cb7e8   Jens Axboe   blk-merge: return...
671
  		return NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
672
673
  
  	/*
80a761fd3   Tejun Heo   block: implement ...
674
675
676
677
678
  	 * If failfast settings disagree or any of the two is already
  	 * a mixed merge, mark both as mixed before proceeding.  This
  	 * makes sure that all involved bios have mixable attributes
  	 * set properly.
  	 */
e80640213   Christoph Hellwig   block: split out ...
679
  	if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
80a761fd3   Tejun Heo   block: implement ...
680
681
682
683
684
685
686
  	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
  	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
  		blk_rq_set_mixed_merge(req);
  		blk_rq_set_mixed_merge(next);
  	}
  
  	/*
522a77756   Omar Sandoval   block: consolidat...
687
688
689
  	 * At this point we have either done a back merge or front merge. We
  	 * need the smaller start_time_ns of the merged requests to be the
  	 * current request for accounting purposes.
d6d481969   Jens Axboe   block: ll_rw_blk....
690
  	 */
522a77756   Omar Sandoval   block: consolidat...
691
692
  	if (next->start_time_ns < req->start_time_ns)
  		req->start_time_ns = next->start_time_ns;
d6d481969   Jens Axboe   block: ll_rw_blk....
693
694
695
  
  	req->biotail->bi_next = next->bio;
  	req->biotail = next->biotail;
a2dec7b36   Tejun Heo   block: hide reque...
696
  	req->__data_len += blk_rq_bytes(next);
d6d481969   Jens Axboe   block: ll_rw_blk....
697

445251d0f   Jens Axboe   blk-mq: fix disca...
698
699
  	if (req_op(req) != REQ_OP_DISCARD)
  		elv_merge_requests(q, req, next);
d6d481969   Jens Axboe   block: ll_rw_blk....
700

42dad7647   Jerome Marchand   block: simplify I...
701
702
703
704
  	/*
  	 * 'next' is going away, so update stats accordingly
  	 */
  	blk_account_io_merge(next);
d6d481969   Jens Axboe   block: ll_rw_blk....
705
706
  
  	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
ab780f1ec   Jens Axboe   block: inherit CP...
707
708
  	if (blk_rq_cpu_valid(next))
  		req->cpu = next->cpu;
d6d481969   Jens Axboe   block: ll_rw_blk....
709

e4d750c97   Jens Axboe   block: free merge...
710
711
712
713
  	/*
  	 * ownership of bio passed from next to req, return 'next' for
  	 * the caller to free
  	 */
1cd96c242   Boaz Harrosh   block: WARN in __...
714
  	next->bio = NULL;
b973cb7e8   Jens Axboe   blk-merge: return...
715
  	return next;
d6d481969   Jens Axboe   block: ll_rw_blk....
716
  }
b973cb7e8   Jens Axboe   blk-merge: return...
717
  struct request *attempt_back_merge(struct request_queue *q, struct request *rq)
d6d481969   Jens Axboe   block: ll_rw_blk....
718
719
720
721
722
  {
  	struct request *next = elv_latter_request(q, rq);
  
  	if (next)
  		return attempt_merge(q, rq, next);
b973cb7e8   Jens Axboe   blk-merge: return...
723
  	return NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
724
  }
b973cb7e8   Jens Axboe   blk-merge: return...
725
  struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
d6d481969   Jens Axboe   block: ll_rw_blk....
726
727
728
729
730
  {
  	struct request *prev = elv_former_request(q, rq);
  
  	if (prev)
  		return attempt_merge(q, prev, rq);
b973cb7e8   Jens Axboe   blk-merge: return...
731
  	return NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
732
  }
5e84ea3a9   Jens Axboe   block: attempt to...
733
734
735
736
  
  int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
  			  struct request *next)
  {
72ef799b3   Tahsin Erdogan   block: do not mer...
737
  	struct elevator_queue *e = q->elevator;
e4d750c97   Jens Axboe   block: free merge...
738
  	struct request *free;
72ef799b3   Tahsin Erdogan   block: do not mer...
739

bd166ef18   Jens Axboe   blk-mq-sched: add...
740
  	if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn)
c51ca6cf5   Jens Axboe   block: move exist...
741
  		if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next))
72ef799b3   Tahsin Erdogan   block: do not mer...
742
  			return 0;
e4d750c97   Jens Axboe   block: free merge...
743
744
745
746
747
748
749
  	free = attempt_merge(q, rq, next);
  	if (free) {
  		__blk_put_request(q, free);
  		return 1;
  	}
  
  	return 0;
5e84ea3a9   Jens Axboe   block: attempt to...
750
  }
050c8ea80   Tejun Heo   block: separate o...
751
752
753
  
  bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
  {
e2a60da74   Martin K. Petersen   block: Clean up s...
754
  	if (!rq_mergeable(rq) || !bio_mergeable(bio))
050c8ea80   Tejun Heo   block: separate o...
755
  		return false;
288dab8a3   Christoph Hellwig   block: add a sepa...
756
  	if (req_op(rq) != bio_op(bio))
f31dc1cd4   Martin K. Petersen   block: Consolidat...
757
  		return false;
050c8ea80   Tejun Heo   block: separate o...
758
759
760
761
762
  	/* different data direction or already started, don't merge */
  	if (bio_data_dir(bio) != rq_data_dir(rq))
  		return false;
  
  	/* must be same device and not a special request */
74d46992e   Christoph Hellwig   block: replace bi...
763
  	if (rq->rq_disk != bio->bi_disk || req_no_special_merge(rq))
050c8ea80   Tejun Heo   block: separate o...
764
765
766
  		return false;
  
  	/* only merge integrity protected bio into ditto rq */
4eaf99bea   Martin K. Petersen   block: Don't merg...
767
  	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
050c8ea80   Tejun Heo   block: separate o...
768
  		return false;
4363ac7c1   Martin K. Petersen   block: Implement ...
769
  	/* must be using the same buffer */
8fe0d473f   Mike Christie   block: convert me...
770
  	if (req_op(rq) == REQ_OP_WRITE_SAME &&
4363ac7c1   Martin K. Petersen   block: Implement ...
771
772
  	    !blk_write_same_mergeable(rq->bio, bio))
  		return false;
cb6934f8e   Jens Axboe   block: add suppor...
773
774
775
776
777
778
  	/*
  	 * Don't allow merge of different write hints, or for a hint with
  	 * non-hint IO.
  	 */
  	if (rq->write_hint != bio->bi_write_hint)
  		return false;
050c8ea80   Tejun Heo   block: separate o...
779
780
  	return true;
  }
34fe7c054   Christoph Hellwig   block: enumify EL...
781
  enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
050c8ea80   Tejun Heo   block: separate o...
782
  {
1e739730c   Christoph Hellwig   block: optionally...
783
784
785
786
  	if (req_op(rq) == REQ_OP_DISCARD &&
  	    queue_max_discard_segments(rq->q) > 1)
  		return ELEVATOR_DISCARD_MERGE;
  	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
050c8ea80   Tejun Heo   block: separate o...
787
  		return ELEVATOR_BACK_MERGE;
4f024f379   Kent Overstreet   block: Abstract o...
788
  	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
050c8ea80   Tejun Heo   block: separate o...
789
790
791
  		return ELEVATOR_FRONT_MERGE;
  	return ELEVATOR_NO_MERGE;
  }