Blame view

block/blk-merge.c 20.4 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
d6d481969   Jens Axboe   block: ll_rw_blk....
2
3
4
5
6
7
8
9
  /*
   * Functions related to segment and merge handling
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/scatterlist.h>
cda22646a   Mike Krinkin   block: add call t...
10
  #include <trace/events/block.h>
d6d481969   Jens Axboe   block: ll_rw_blk....
11
  #include "blk.h"
54efd50bf   Kent Overstreet   block: make gener...
12
13
  static struct bio *blk_bio_discard_split(struct request_queue *q,
  					 struct bio *bio,
bdced438a   Ming Lei   block: setup bi_p...
14
15
  					 struct bio_set *bs,
  					 unsigned *nsegs)
54efd50bf   Kent Overstreet   block: make gener...
16
17
18
19
20
  {
  	unsigned int max_discard_sectors, granularity;
  	int alignment;
  	sector_t tmp;
  	unsigned split_sectors;
bdced438a   Ming Lei   block: setup bi_p...
21
  	*nsegs = 1;
54efd50bf   Kent Overstreet   block: make gener...
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
  	/* Zero-sector (unknown) and one-sector granularities are the same.  */
  	granularity = max(q->limits.discard_granularity >> 9, 1U);
  
  	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
  	max_discard_sectors -= max_discard_sectors % granularity;
  
  	if (unlikely(!max_discard_sectors)) {
  		/* XXX: warn */
  		return NULL;
  	}
  
  	if (bio_sectors(bio) <= max_discard_sectors)
  		return NULL;
  
  	split_sectors = max_discard_sectors;
  
  	/*
  	 * If the next starting sector would be misaligned, stop the discard at
  	 * the previous aligned sector.
  	 */
  	alignment = (q->limits.discard_alignment >> 9) % granularity;
  
  	tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
  	tmp = sector_div(tmp, granularity);
  
  	if (split_sectors > tmp)
  		split_sectors -= tmp;
  
  	return bio_split(bio, split_sectors, GFP_NOIO, bs);
  }
885fa13f6   Christoph Hellwig   block: implement ...
52
53
54
55
56
57
58
59
60
61
62
63
64
  static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
  		struct bio *bio, struct bio_set *bs, unsigned *nsegs)
  {
  	*nsegs = 1;
  
  	if (!q->limits.max_write_zeroes_sectors)
  		return NULL;
  
  	if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
  		return NULL;
  
  	return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
  }
54efd50bf   Kent Overstreet   block: make gener...
65
66
  static struct bio *blk_bio_write_same_split(struct request_queue *q,
  					    struct bio *bio,
bdced438a   Ming Lei   block: setup bi_p...
67
68
  					    struct bio_set *bs,
  					    unsigned *nsegs)
54efd50bf   Kent Overstreet   block: make gener...
69
  {
bdced438a   Ming Lei   block: setup bi_p...
70
  	*nsegs = 1;
54efd50bf   Kent Overstreet   block: make gener...
71
72
73
74
75
76
77
78
  	if (!q->limits.max_write_same_sectors)
  		return NULL;
  
  	if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
  		return NULL;
  
  	return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
  }
d0e5fbb01   Ming Lei   block: fix bio sp...
79
80
81
82
83
84
85
86
87
88
89
  static inline unsigned get_max_io_size(struct request_queue *q,
  				       struct bio *bio)
  {
  	unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
  	unsigned mask = queue_logical_block_size(q) - 1;
  
  	/* aligned to logical block size */
  	sectors &= ~(mask >> 9);
  
  	return sectors;
  }
54efd50bf   Kent Overstreet   block: make gener...
90
91
  static struct bio *blk_bio_segment_split(struct request_queue *q,
  					 struct bio *bio,
bdced438a   Ming Lei   block: setup bi_p...
92
93
  					 struct bio_set *bs,
  					 unsigned *segs)
54efd50bf   Kent Overstreet   block: make gener...
94
  {
5014c311b   Jens Axboe   block: fix bogus ...
95
  	struct bio_vec bv, bvprv, *bvprvp = NULL;
54efd50bf   Kent Overstreet   block: make gener...
96
  	struct bvec_iter iter;
8ae126660   Kent Overstreet   block: kill merge...
97
  	unsigned seg_size = 0, nsegs = 0, sectors = 0;
02e707424   Ming Lei   blk-merge: fix bl...
98
99
100
  	unsigned front_seg_size = bio->bi_seg_front_size;
  	bool do_split = true;
  	struct bio *new = NULL;
d0e5fbb01   Ming Lei   block: fix bio sp...
101
  	const unsigned max_sectors = get_max_io_size(q, bio);
54efd50bf   Kent Overstreet   block: make gener...
102

54efd50bf   Kent Overstreet   block: make gener...
103
  	bio_for_each_segment(bv, bio, iter) {
54efd50bf   Kent Overstreet   block: make gener...
104
105
106
107
  		/*
  		 * If the queue doesn't support SG gaps and adding this
  		 * offset would create a gap, disallow it.
  		 */
5014c311b   Jens Axboe   block: fix bogus ...
108
  		if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
54efd50bf   Kent Overstreet   block: make gener...
109
  			goto split;
d0e5fbb01   Ming Lei   block: fix bio sp...
110
  		if (sectors + (bv.bv_len >> 9) > max_sectors) {
e36f62042   Keith Busch   block: split bios...
111
112
113
114
115
  			/*
  			 * Consider this a new segment if we're splitting in
  			 * the middle of this vector.
  			 */
  			if (nsegs < queue_max_segments(q) &&
d0e5fbb01   Ming Lei   block: fix bio sp...
116
  			    sectors < max_sectors) {
e36f62042   Keith Busch   block: split bios...
117
  				nsegs++;
d0e5fbb01   Ming Lei   block: fix bio sp...
118
  				sectors = max_sectors;
e36f62042   Keith Busch   block: split bios...
119
  			}
d0e5fbb01   Ming Lei   block: fix bio sp...
120
121
122
  			if (sectors)
  				goto split;
  			/* Make this single bvec as the 1st segment */
e36f62042   Keith Busch   block: split bios...
123
  		}
5014c311b   Jens Axboe   block: fix bogus ...
124
  		if (bvprvp && blk_queue_cluster(q)) {
54efd50bf   Kent Overstreet   block: make gener...
125
126
  			if (seg_size + bv.bv_len > queue_max_segment_size(q))
  				goto new_segment;
5014c311b   Jens Axboe   block: fix bogus ...
127
  			if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
54efd50bf   Kent Overstreet   block: make gener...
128
  				goto new_segment;
5014c311b   Jens Axboe   block: fix bogus ...
129
  			if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
54efd50bf   Kent Overstreet   block: make gener...
130
131
132
133
  				goto new_segment;
  
  			seg_size += bv.bv_len;
  			bvprv = bv;
578270bfb   Ming Lei   block: fix segmen...
134
  			bvprvp = &bvprv;
52cc6eead   Ming Lei   block: blk-merge:...
135
  			sectors += bv.bv_len >> 9;
a88d32af1   Ming Lei   blk-merge: fix co...
136
137
138
  
  			if (nsegs == 1 && seg_size > front_seg_size)
  				front_seg_size = seg_size;
54efd50bf   Kent Overstreet   block: make gener...
139
140
141
142
143
144
145
146
  			continue;
  		}
  new_segment:
  		if (nsegs == queue_max_segments(q))
  			goto split;
  
  		nsegs++;
  		bvprv = bv;
578270bfb   Ming Lei   block: fix segmen...
147
  		bvprvp = &bvprv;
54efd50bf   Kent Overstreet   block: make gener...
148
  		seg_size = bv.bv_len;
52cc6eead   Ming Lei   block: blk-merge:...
149
  		sectors += bv.bv_len >> 9;
02e707424   Ming Lei   blk-merge: fix bl...
150
151
152
  
  		if (nsegs == 1 && seg_size > front_seg_size)
  			front_seg_size = seg_size;
54efd50bf   Kent Overstreet   block: make gener...
153
  	}
02e707424   Ming Lei   blk-merge: fix bl...
154
  	do_split = false;
54efd50bf   Kent Overstreet   block: make gener...
155
  split:
bdced438a   Ming Lei   block: setup bi_p...
156
  	*segs = nsegs;
02e707424   Ming Lei   blk-merge: fix bl...
157
158
159
160
161
162
163
164
165
166
167
168
  
  	if (do_split) {
  		new = bio_split(bio, sectors, GFP_NOIO, bs);
  		if (new)
  			bio = new;
  	}
  
  	bio->bi_seg_front_size = front_seg_size;
  	if (seg_size > bio->bi_seg_back_size)
  		bio->bi_seg_back_size = seg_size;
  
  	return do_split ? new : NULL;
54efd50bf   Kent Overstreet   block: make gener...
169
  }
af67c31fb   NeilBrown   blk: remove bio_s...
170
  void blk_queue_split(struct request_queue *q, struct bio **bio)
54efd50bf   Kent Overstreet   block: make gener...
171
  {
bdced438a   Ming Lei   block: setup bi_p...
172
173
  	struct bio *split, *res;
  	unsigned nsegs;
54efd50bf   Kent Overstreet   block: make gener...
174

7afafc8a4   Adrian Hunter   block: Fix secure...
175
176
177
  	switch (bio_op(*bio)) {
  	case REQ_OP_DISCARD:
  	case REQ_OP_SECURE_ERASE:
af67c31fb   NeilBrown   blk: remove bio_s...
178
  		split = blk_bio_discard_split(q, *bio, q->bio_split, &nsegs);
7afafc8a4   Adrian Hunter   block: Fix secure...
179
  		break;
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
180
  	case REQ_OP_WRITE_ZEROES:
af67c31fb   NeilBrown   blk: remove bio_s...
181
  		split = blk_bio_write_zeroes_split(q, *bio, q->bio_split, &nsegs);
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
182
  		break;
7afafc8a4   Adrian Hunter   block: Fix secure...
183
  	case REQ_OP_WRITE_SAME:
af67c31fb   NeilBrown   blk: remove bio_s...
184
  		split = blk_bio_write_same_split(q, *bio, q->bio_split, &nsegs);
7afafc8a4   Adrian Hunter   block: Fix secure...
185
186
  		break;
  	default:
bdced438a   Ming Lei   block: setup bi_p...
187
  		split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
7afafc8a4   Adrian Hunter   block: Fix secure...
188
189
  		break;
  	}
bdced438a   Ming Lei   block: setup bi_p...
190
191
192
193
194
  
  	/* physical segments can be figured out during splitting */
  	res = split ? split : *bio;
  	res->bi_phys_segments = nsegs;
  	bio_set_flag(res, BIO_SEG_VALID);
54efd50bf   Kent Overstreet   block: make gener...
195
196
  
  	if (split) {
6ac45aeb6   Ming Lei   block: avoid to m...
197
  		/* there isn't chance to merge the splitted bio */
1eff9d322   Jens Axboe   block: rename bio...
198
  		split->bi_opf |= REQ_NOMERGE;
6ac45aeb6   Ming Lei   block: avoid to m...
199

54efd50bf   Kent Overstreet   block: make gener...
200
  		bio_chain(split, *bio);
cda22646a   Mike Krinkin   block: add call t...
201
  		trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
54efd50bf   Kent Overstreet   block: make gener...
202
203
204
205
206
  		generic_make_request(*bio);
  		*bio = split;
  	}
  }
  EXPORT_SYMBOL(blk_queue_split);
1e4280791   Jens Axboe   block: reduce sta...
207
  static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
073885493   Ming Lei   blk-merge: fix bl...
208
209
  					     struct bio *bio,
  					     bool no_sg_merge)
d6d481969   Jens Axboe   block: ll_rw_blk....
210
  {
7988613b0   Kent Overstreet   block: Convert bi...
211
  	struct bio_vec bv, bvprv = { NULL };
54efd50bf   Kent Overstreet   block: make gener...
212
  	int cluster, prev = 0;
1e4280791   Jens Axboe   block: reduce sta...
213
  	unsigned int seg_size, nr_phys_segs;
59247eaea   Jens Axboe   block: fix missin...
214
  	struct bio *fbio, *bbio;
7988613b0   Kent Overstreet   block: Convert bi...
215
  	struct bvec_iter iter;
d6d481969   Jens Axboe   block: ll_rw_blk....
216

1e4280791   Jens Axboe   block: reduce sta...
217
218
  	if (!bio)
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
219

a6f0788ec   Chaitanya Kulkarni   block: add suppor...
220
221
222
  	switch (bio_op(bio)) {
  	case REQ_OP_DISCARD:
  	case REQ_OP_SECURE_ERASE:
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
223
  	case REQ_OP_WRITE_ZEROES:
f9d03f96b   Christoph Hellwig   block: improve ha...
224
225
  		return 0;
  	case REQ_OP_WRITE_SAME:
5cb8850c9   Kent Overstreet   block: Explicitly...
226
  		return 1;
a6f0788ec   Chaitanya Kulkarni   block: add suppor...
227
  	}
5cb8850c9   Kent Overstreet   block: Explicitly...
228

1e4280791   Jens Axboe   block: reduce sta...
229
  	fbio = bio;
e692cb668   Martin K. Petersen   block: Deprecate ...
230
  	cluster = blk_queue_cluster(q);
5df97b91b   Mikulas Patocka   drop vmerge accou...
231
  	seg_size = 0;
2c8919dee   Andi Kleen   gcc-4.6: block: f...
232
  	nr_phys_segs = 0;
1e4280791   Jens Axboe   block: reduce sta...
233
  	for_each_bio(bio) {
7988613b0   Kent Overstreet   block: Convert bi...
234
  		bio_for_each_segment(bv, bio, iter) {
1e4280791   Jens Axboe   block: reduce sta...
235
  			/*
05f1dd531   Jens Axboe   block: add queue ...
236
237
238
239
240
  			 * If SG merging is disabled, each bio vector is
  			 * a segment
  			 */
  			if (no_sg_merge)
  				goto new_segment;
54efd50bf   Kent Overstreet   block: make gener...
241
  			if (prev && cluster) {
7988613b0   Kent Overstreet   block: Convert bi...
242
  				if (seg_size + bv.bv_len
ae03bf639   Martin K. Petersen   block: Use access...
243
  				    > queue_max_segment_size(q))
1e4280791   Jens Axboe   block: reduce sta...
244
  					goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
245
  				if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
1e4280791   Jens Axboe   block: reduce sta...
246
  					goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
247
  				if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
1e4280791   Jens Axboe   block: reduce sta...
248
  					goto new_segment;
d6d481969   Jens Axboe   block: ll_rw_blk....
249

7988613b0   Kent Overstreet   block: Convert bi...
250
  				seg_size += bv.bv_len;
1e4280791   Jens Axboe   block: reduce sta...
251
252
253
  				bvprv = bv;
  				continue;
  			}
d6d481969   Jens Axboe   block: ll_rw_blk....
254
  new_segment:
1e4280791   Jens Axboe   block: reduce sta...
255
256
257
  			if (nr_phys_segs == 1 && seg_size >
  			    fbio->bi_seg_front_size)
  				fbio->bi_seg_front_size = seg_size;
867714271   FUJITA Tomonori   block: fix nr_phy...
258

1e4280791   Jens Axboe   block: reduce sta...
259
260
  			nr_phys_segs++;
  			bvprv = bv;
54efd50bf   Kent Overstreet   block: make gener...
261
  			prev = 1;
7988613b0   Kent Overstreet   block: Convert bi...
262
  			seg_size = bv.bv_len;
1e4280791   Jens Axboe   block: reduce sta...
263
  		}
59247eaea   Jens Axboe   block: fix missin...
264
  		bbio = bio;
d6d481969   Jens Axboe   block: ll_rw_blk....
265
  	}
59247eaea   Jens Axboe   block: fix missin...
266
267
268
269
  	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
  		fbio->bi_seg_front_size = seg_size;
  	if (seg_size > bbio->bi_seg_back_size)
  		bbio->bi_seg_back_size = seg_size;
1e4280791   Jens Axboe   block: reduce sta...
270
271
272
273
274
275
  
  	return nr_phys_segs;
  }
  
  void blk_recalc_rq_segments(struct request *rq)
  {
073885493   Ming Lei   blk-merge: fix bl...
276
277
278
279
280
  	bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
  			&rq->q->queue_flags);
  
  	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
  			no_sg_merge);
d6d481969   Jens Axboe   block: ll_rw_blk....
281
282
283
284
  }
  
  void blk_recount_segments(struct request_queue *q, struct bio *bio)
  {
7f60dcaaf   Ming Lei   block: blk-merge:...
285
286
287
288
289
290
291
  	unsigned short seg_cnt;
  
  	/* estimate segment number by bi_vcnt for non-cloned bio */
  	if (bio_flagged(bio, BIO_CLONED))
  		seg_cnt = bio_segments(bio);
  	else
  		seg_cnt = bio->bi_vcnt;
764f612c6   Ming Lei   blk-merge: don't ...
292

7f60dcaaf   Ming Lei   block: blk-merge:...
293
294
295
  	if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
  			(seg_cnt < queue_max_segments(q)))
  		bio->bi_phys_segments = seg_cnt;
05f1dd531   Jens Axboe   block: add queue ...
296
297
298
299
  	else {
  		struct bio *nxt = bio->bi_next;
  
  		bio->bi_next = NULL;
7f60dcaaf   Ming Lei   block: blk-merge:...
300
  		bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
05f1dd531   Jens Axboe   block: add queue ...
301
302
  		bio->bi_next = nxt;
  	}
1e4280791   Jens Axboe   block: reduce sta...
303

b7c44ed9d   Jens Axboe   block: manipulate...
304
  	bio_set_flag(bio, BIO_SEG_VALID);
d6d481969   Jens Axboe   block: ll_rw_blk....
305
306
307
308
309
310
  }
  EXPORT_SYMBOL(blk_recount_segments);
  
  static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
  				   struct bio *nxt)
  {
2b8221e18   Kent Overstreet   block: Really sil...
311
  	struct bio_vec end_bv = { NULL }, nxt_bv;
f619d2546   Kent Overstreet   block: Kill bio_i...
312

e692cb668   Martin K. Petersen   block: Deprecate ...
313
  	if (!blk_queue_cluster(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
314
  		return 0;
867714271   FUJITA Tomonori   block: fix nr_phy...
315
  	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
ae03bf639   Martin K. Petersen   block: Use access...
316
  	    queue_max_segment_size(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
317
  		return 0;
e17fc0a1c   David Woodhouse   Allow elevators t...
318
319
  	if (!bio_has_data(bio))
  		return 1;
e827091cb   Ming Lei   block: merge: get...
320
321
  	bio_get_last_bvec(bio, &end_bv);
  	bio_get_first_bvec(nxt, &nxt_bv);
f619d2546   Kent Overstreet   block: Kill bio_i...
322
323
  
  	if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
e17fc0a1c   David Woodhouse   Allow elevators t...
324
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
325
  	/*
e17fc0a1c   David Woodhouse   Allow elevators t...
326
  	 * bio and nxt are contiguous in memory; check if the queue allows
d6d481969   Jens Axboe   block: ll_rw_blk....
327
328
  	 * these two to be merged into one
  	 */
f619d2546   Kent Overstreet   block: Kill bio_i...
329
  	if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
d6d481969   Jens Axboe   block: ll_rw_blk....
330
331
332
333
  		return 1;
  
  	return 0;
  }
7988613b0   Kent Overstreet   block: Convert bi...
334
  static inline void
963ab9e5d   Asias He   block: Introduce ...
335
  __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
7988613b0   Kent Overstreet   block: Convert bi...
336
  		     struct scatterlist *sglist, struct bio_vec *bvprv,
963ab9e5d   Asias He   block: Introduce ...
337
338
339
340
  		     struct scatterlist **sg, int *nsegs, int *cluster)
  {
  
  	int nbytes = bvec->bv_len;
7988613b0   Kent Overstreet   block: Convert bi...
341
  	if (*sg && *cluster) {
963ab9e5d   Asias He   block: Introduce ...
342
343
  		if ((*sg)->length + nbytes > queue_max_segment_size(q))
  			goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
344
  		if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
963ab9e5d   Asias He   block: Introduce ...
345
  			goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
346
  		if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
963ab9e5d   Asias He   block: Introduce ...
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
  			goto new_segment;
  
  		(*sg)->length += nbytes;
  	} else {
  new_segment:
  		if (!*sg)
  			*sg = sglist;
  		else {
  			/*
  			 * If the driver previously mapped a shorter
  			 * list, we could see a termination bit
  			 * prematurely unless it fully inits the sg
  			 * table on each mapping. We KNOW that there
  			 * must be more entries here or the driver
  			 * would be buggy, so force clear the
  			 * termination bit to avoid doing a full
  			 * sg_init_table() in drivers for each command.
  			 */
c8164d893   Paolo Bonzini   scatterlist: intr...
365
  			sg_unmark_end(*sg);
963ab9e5d   Asias He   block: Introduce ...
366
367
368
369
370
371
  			*sg = sg_next(*sg);
  		}
  
  		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
  		(*nsegs)++;
  	}
7988613b0   Kent Overstreet   block: Convert bi...
372
  	*bvprv = *bvec;
963ab9e5d   Asias He   block: Introduce ...
373
  }
f9d03f96b   Christoph Hellwig   block: improve ha...
374
375
376
377
378
379
380
  static inline int __blk_bvec_map_sg(struct request_queue *q, struct bio_vec bv,
  		struct scatterlist *sglist, struct scatterlist **sg)
  {
  	*sg = sglist;
  	sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
  	return 1;
  }
5cb8850c9   Kent Overstreet   block: Explicitly...
381
382
383
  static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
  			     struct scatterlist *sglist,
  			     struct scatterlist **sg)
d6d481969   Jens Axboe   block: ll_rw_blk....
384
  {
2b8221e18   Kent Overstreet   block: Really sil...
385
  	struct bio_vec bvec, bvprv = { NULL };
5cb8850c9   Kent Overstreet   block: Explicitly...
386
  	struct bvec_iter iter;
f9d03f96b   Christoph Hellwig   block: improve ha...
387
  	int cluster = blk_queue_cluster(q), nsegs = 0;
5cb8850c9   Kent Overstreet   block: Explicitly...
388
389
390
391
392
  
  	for_each_bio(bio)
  		bio_for_each_segment(bvec, bio, iter)
  			__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
  					     &nsegs, &cluster);
d6d481969   Jens Axboe   block: ll_rw_blk....
393

5cb8850c9   Kent Overstreet   block: Explicitly...
394
395
396
397
398
399
400
401
402
403
404
405
  	return nsegs;
  }
  
  /*
   * map a request to scatterlist, return number of sg entries setup. Caller
   * must make sure sg can hold rq->nr_phys_segments entries
   */
  int blk_rq_map_sg(struct request_queue *q, struct request *rq,
  		  struct scatterlist *sglist)
  {
  	struct scatterlist *sg = NULL;
  	int nsegs = 0;
f9d03f96b   Christoph Hellwig   block: improve ha...
406
407
408
409
410
  	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
  		nsegs = __blk_bvec_map_sg(q, rq->special_vec, sglist, &sg);
  	else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
  		nsegs = __blk_bvec_map_sg(q, bio_iovec(rq->bio), sglist, &sg);
  	else if (rq->bio)
5cb8850c9   Kent Overstreet   block: Explicitly...
411
  		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
f18573abc   FUJITA Tomonori   block: move the p...
412

e80640213   Christoph Hellwig   block: split out ...
413
  	if (unlikely(rq->rq_flags & RQF_COPY_USER) &&
2e46e8b27   Tejun Heo   block: drop reque...
414
415
416
  	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
  		unsigned int pad_len =
  			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
f18573abc   FUJITA Tomonori   block: move the p...
417
418
419
420
  
  		sg->length += pad_len;
  		rq->extra_len += pad_len;
  	}
2fb98e841   Tejun Heo   block: implement ...
421
  	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
a8ebb056a   Mike Christie   block, drivers, c...
422
  		if (op_is_write(req_op(rq)))
db0a2e009   Tejun Heo   block: clear drai...
423
  			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
da81ed16b   Dan Williams   scatterlist: remo...
424
  		sg_unmark_end(sg);
d6d481969   Jens Axboe   block: ll_rw_blk....
425
426
427
428
429
430
  		sg = sg_next(sg);
  		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
  			    q->dma_drain_size,
  			    ((unsigned long)q->dma_drain_buffer) &
  			    (PAGE_SIZE - 1));
  		nsegs++;
7a85f8896   FUJITA Tomonori   block: restore th...
431
  		rq->extra_len += q->dma_drain_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
432
433
434
435
  	}
  
  	if (sg)
  		sg_mark_end(sg);
12e57f59c   Ming Lei   blk-merge: warn i...
436
437
438
439
  	/*
  	 * Something must have been wrong if the figured number of
  	 * segment is bigger than number of req's physical segments
  	 */
f9d03f96b   Christoph Hellwig   block: improve ha...
440
  	WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
12e57f59c   Ming Lei   blk-merge: warn i...
441

d6d481969   Jens Axboe   block: ll_rw_blk....
442
443
  	return nsegs;
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
444
  EXPORT_SYMBOL(blk_rq_map_sg);
d6d481969   Jens Axboe   block: ll_rw_blk....
445
446
447
448
  static inline int ll_new_hw_segment(struct request_queue *q,
  				    struct request *req,
  				    struct bio *bio)
  {
d6d481969   Jens Axboe   block: ll_rw_blk....
449
  	int nr_phys_segs = bio_phys_segments(q, bio);
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
450
451
  	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
  		goto no_merge;
4eaf99bea   Martin K. Petersen   block: Don't merg...
452
  	if (blk_integrity_merge_bio(q, req, bio) == false)
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
453
  		goto no_merge;
d6d481969   Jens Axboe   block: ll_rw_blk....
454
455
456
457
458
  
  	/*
  	 * This will form the start of a new hw segment.  Bump both
  	 * counters.
  	 */
d6d481969   Jens Axboe   block: ll_rw_blk....
459
460
  	req->nr_phys_segments += nr_phys_segs;
  	return 1;
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
461
462
  
  no_merge:
e0c723000   Ritesh Harjani   block: factor out...
463
  	req_set_nomerge(q, req);
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
464
  	return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
465
466
467
468
469
  }
  
  int ll_back_merge_fn(struct request_queue *q, struct request *req,
  		     struct bio *bio)
  {
5e7c4274a   Jens Axboe   block: Check for ...
470
471
  	if (req_gap_back_merge(req, bio))
  		return 0;
7f39add3b   Sagi Grimberg   block: Refuse req...
472
473
474
  	if (blk_integrity_rq(req) &&
  	    integrity_req_gap_back_merge(req, bio))
  		return 0;
f31dc1cd4   Martin K. Petersen   block: Consolidat...
475
  	if (blk_rq_sectors(req) + bio_sectors(bio) >
17007f399   Damien Le Moal   block: Fix front ...
476
  	    blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
e0c723000   Ritesh Harjani   block: factor out...
477
  		req_set_nomerge(q, req);
d6d481969   Jens Axboe   block: ll_rw_blk....
478
479
  		return 0;
  	}
2cdf79caf   Jens Axboe   block: get rid of...
480
  	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
481
  		blk_recount_segments(q, req->biotail);
2cdf79caf   Jens Axboe   block: get rid of...
482
  	if (!bio_flagged(bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
483
  		blk_recount_segments(q, bio);
d6d481969   Jens Axboe   block: ll_rw_blk....
484
485
486
  
  	return ll_new_hw_segment(q, req, bio);
  }
6728cb0e6   Jens Axboe   block: make core ...
487
  int ll_front_merge_fn(struct request_queue *q, struct request *req,
d6d481969   Jens Axboe   block: ll_rw_blk....
488
489
  		      struct bio *bio)
  {
5e7c4274a   Jens Axboe   block: Check for ...
490
491
492
  
  	if (req_gap_front_merge(req, bio))
  		return 0;
7f39add3b   Sagi Grimberg   block: Refuse req...
493
494
495
  	if (blk_integrity_rq(req) &&
  	    integrity_req_gap_front_merge(req, bio))
  		return 0;
f31dc1cd4   Martin K. Petersen   block: Consolidat...
496
  	if (blk_rq_sectors(req) + bio_sectors(bio) >
17007f399   Damien Le Moal   block: Fix front ...
497
  	    blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
e0c723000   Ritesh Harjani   block: factor out...
498
  		req_set_nomerge(q, req);
d6d481969   Jens Axboe   block: ll_rw_blk....
499
500
  		return 0;
  	}
2cdf79caf   Jens Axboe   block: get rid of...
501
  	if (!bio_flagged(bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
502
  		blk_recount_segments(q, bio);
2cdf79caf   Jens Axboe   block: get rid of...
503
  	if (!bio_flagged(req->bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
504
  		blk_recount_segments(q, req->bio);
d6d481969   Jens Axboe   block: ll_rw_blk....
505
506
507
  
  	return ll_new_hw_segment(q, req, bio);
  }
e7e245000   Jens Axboe   blk-mq: don't dis...
508
509
510
511
512
513
514
515
516
517
  /*
   * blk-mq uses req->special to carry normal driver per-request payload, it
   * does not indicate a prepared command that we cannot merge with.
   */
  static bool req_no_special_merge(struct request *req)
  {
  	struct request_queue *q = req->q;
  
  	return !q->mq_ops && req->special;
  }
73027d80d   Jens Axboe   blk-mq: fix disca...
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
  static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
  		struct request *next)
  {
  	unsigned short segments = blk_rq_nr_discard_segments(req);
  
  	if (segments >= queue_max_discard_segments(q))
  		goto no_merge;
  	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
  	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
  		goto no_merge;
  
  	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
  	return true;
  no_merge:
  	req_set_nomerge(q, req);
  	return false;
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
535
536
537
538
  static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
  				struct request *next)
  {
  	int total_phys_segments;
867714271   FUJITA Tomonori   block: fix nr_phy...
539
540
  	unsigned int seg_size =
  		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
541
542
543
544
545
  
  	/*
  	 * First check if the either of the requests are re-queued
  	 * requests.  Can't merge them if they are.
  	 */
e7e245000   Jens Axboe   blk-mq: don't dis...
546
  	if (req_no_special_merge(req) || req_no_special_merge(next))
d6d481969   Jens Axboe   block: ll_rw_blk....
547
  		return 0;
5e7c4274a   Jens Axboe   block: Check for ...
548
  	if (req_gap_back_merge(req, next->bio))
854fbb9c6   Keith Busch   block: prevent re...
549
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
550
551
552
  	/*
  	 * Will it become too large?
  	 */
f31dc1cd4   Martin K. Petersen   block: Consolidat...
553
  	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
17007f399   Damien Le Moal   block: Fix front ...
554
  	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
d6d481969   Jens Axboe   block: ll_rw_blk....
555
556
557
  		return 0;
  
  	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
867714271   FUJITA Tomonori   block: fix nr_phy...
558
559
560
561
562
  	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
  		if (req->nr_phys_segments == 1)
  			req->bio->bi_seg_front_size = seg_size;
  		if (next->nr_phys_segments == 1)
  			next->biotail->bi_seg_back_size = seg_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
563
  		total_phys_segments--;
867714271   FUJITA Tomonori   block: fix nr_phy...
564
  	}
d6d481969   Jens Axboe   block: ll_rw_blk....
565

8a78362c4   Martin K. Petersen   block: Consolidat...
566
  	if (total_phys_segments > queue_max_segments(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
567
  		return 0;
4eaf99bea   Martin K. Petersen   block: Don't merg...
568
  	if (blk_integrity_merge_rq(q, req, next) == false)
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
569
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
570
571
  	/* Merge is OK... */
  	req->nr_phys_segments = total_phys_segments;
d6d481969   Jens Axboe   block: ll_rw_blk....
572
573
  	return 1;
  }
80a761fd3   Tejun Heo   block: implement ...
574
575
576
577
578
579
580
581
582
583
584
585
586
  /**
   * blk_rq_set_mixed_merge - mark a request as mixed merge
   * @rq: request to mark as mixed merge
   *
   * Description:
   *     @rq is about to be mixed merged.  Make sure the attributes
   *     which can be mixed are set in each bio and mark @rq as mixed
   *     merged.
   */
  void blk_rq_set_mixed_merge(struct request *rq)
  {
  	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
  	struct bio *bio;
e80640213   Christoph Hellwig   block: split out ...
587
  	if (rq->rq_flags & RQF_MIXED_MERGE)
80a761fd3   Tejun Heo   block: implement ...
588
589
590
591
592
593
594
595
  		return;
  
  	/*
  	 * @rq will no longer represent mixable attributes for all the
  	 * contained bios.  It will just track those of the first one.
  	 * Distributes the attributs to each bio.
  	 */
  	for (bio = rq->bio; bio; bio = bio->bi_next) {
1eff9d322   Jens Axboe   block: rename bio...
596
597
598
  		WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
  			     (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
  		bio->bi_opf |= ff;
80a761fd3   Tejun Heo   block: implement ...
599
  	}
e80640213   Christoph Hellwig   block: split out ...
600
  	rq->rq_flags |= RQF_MIXED_MERGE;
80a761fd3   Tejun Heo   block: implement ...
601
  }
26308eab6   Jerome Marchand   block: fix incons...
602
603
604
605
606
607
608
  static void blk_account_io_merge(struct request *req)
  {
  	if (blk_do_io_stat(req)) {
  		struct hd_struct *part;
  		int cpu;
  
  		cpu = part_stat_lock();
09e099d4b   Jerome Marchand   block: fix accoun...
609
  		part = req->part;
26308eab6   Jerome Marchand   block: fix incons...
610

d62e26b3f   Jens Axboe   block: pass in qu...
611
612
  		part_round_stats(req->q, cpu, part);
  		part_dec_in_flight(req->q, part, rq_data_dir(req));
26308eab6   Jerome Marchand   block: fix incons...
613

6c23a9681   Jens Axboe   block: add intern...
614
  		hd_struct_put(part);
26308eab6   Jerome Marchand   block: fix incons...
615
616
617
  		part_stat_unlock();
  	}
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
618
  /*
b973cb7e8   Jens Axboe   blk-merge: return...
619
620
   * For non-mq, this has to be called with the request spinlock acquired.
   * For mq with scheduling, the appropriate queue wide lock should be held.
d6d481969   Jens Axboe   block: ll_rw_blk....
621
   */
b973cb7e8   Jens Axboe   blk-merge: return...
622
623
  static struct request *attempt_merge(struct request_queue *q,
  				     struct request *req, struct request *next)
d6d481969   Jens Axboe   block: ll_rw_blk....
624
  {
2fff8a924   Bart Van Assche   block: Check lock...
625
626
  	if (!q->mq_ops)
  		lockdep_assert_held(q->queue_lock);
d6d481969   Jens Axboe   block: ll_rw_blk....
627
  	if (!rq_mergeable(req) || !rq_mergeable(next))
b973cb7e8   Jens Axboe   blk-merge: return...
628
  		return NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
629

288dab8a3   Christoph Hellwig   block: add a sepa...
630
  	if (req_op(req) != req_op(next))
b973cb7e8   Jens Axboe   blk-merge: return...
631
  		return NULL;
f31dc1cd4   Martin K. Petersen   block: Consolidat...
632

d6d481969   Jens Axboe   block: ll_rw_blk....
633
634
635
  	/*
  	 * not contiguous
  	 */
83096ebf1   Tejun Heo   block: convert to...
636
  	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
b973cb7e8   Jens Axboe   blk-merge: return...
637
  		return NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
638
639
640
  
  	if (rq_data_dir(req) != rq_data_dir(next)
  	    || req->rq_disk != next->rq_disk
e7e245000   Jens Axboe   blk-mq: don't dis...
641
  	    || req_no_special_merge(next))
b973cb7e8   Jens Axboe   blk-merge: return...
642
  		return NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
643

8fe0d473f   Mike Christie   block: convert me...
644
  	if (req_op(req) == REQ_OP_WRITE_SAME &&
4363ac7c1   Martin K. Petersen   block: Implement ...
645
  	    !blk_write_same_mergeable(req->bio, next->bio))
b973cb7e8   Jens Axboe   blk-merge: return...
646
  		return NULL;
4363ac7c1   Martin K. Petersen   block: Implement ...
647

d6d481969   Jens Axboe   block: ll_rw_blk....
648
  	/*
cb6934f8e   Jens Axboe   block: add suppor...
649
650
651
652
653
654
655
  	 * Don't allow merge of different write hints, or for a hint with
  	 * non-hint IO.
  	 */
  	if (req->write_hint != next->write_hint)
  		return NULL;
  
  	/*
d6d481969   Jens Axboe   block: ll_rw_blk....
656
657
658
  	 * If we are allowed to merge, then append bio list
  	 * from next to rq and release next. merge_requests_fn
  	 * will have updated segment counts, update sector
73027d80d   Jens Axboe   blk-mq: fix disca...
659
660
  	 * counts here. Handle DISCARDs separately, as they
  	 * have separate settings.
d6d481969   Jens Axboe   block: ll_rw_blk....
661
  	 */
73027d80d   Jens Axboe   blk-mq: fix disca...
662
663
664
665
  	if (req_op(req) == REQ_OP_DISCARD) {
  		if (!req_attempt_discard_merge(q, req, next))
  			return NULL;
  	} else if (!ll_merge_requests_fn(q, req, next))
b973cb7e8   Jens Axboe   blk-merge: return...
666
  		return NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
667
668
  
  	/*
80a761fd3   Tejun Heo   block: implement ...
669
670
671
672
673
  	 * If failfast settings disagree or any of the two is already
  	 * a mixed merge, mark both as mixed before proceeding.  This
  	 * makes sure that all involved bios have mixable attributes
  	 * set properly.
  	 */
e80640213   Christoph Hellwig   block: split out ...
674
  	if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
80a761fd3   Tejun Heo   block: implement ...
675
676
677
678
679
680
681
  	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
  	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
  		blk_rq_set_mixed_merge(req);
  		blk_rq_set_mixed_merge(next);
  	}
  
  	/*
d6d481969   Jens Axboe   block: ll_rw_blk....
682
683
684
685
686
687
688
689
690
691
  	 * At this point we have either done a back merge
  	 * or front merge. We need the smaller start_time of
  	 * the merged requests to be the current request
  	 * for accounting purposes.
  	 */
  	if (time_after(req->start_time, next->start_time))
  		req->start_time = next->start_time;
  
  	req->biotail->bi_next = next->bio;
  	req->biotail = next->biotail;
a2dec7b36   Tejun Heo   block: hide reque...
692
  	req->__data_len += blk_rq_bytes(next);
d6d481969   Jens Axboe   block: ll_rw_blk....
693

73027d80d   Jens Axboe   blk-mq: fix disca...
694
695
  	if (req_op(req) != REQ_OP_DISCARD)
  		elv_merge_requests(q, req, next);
d6d481969   Jens Axboe   block: ll_rw_blk....
696

42dad7647   Jerome Marchand   block: simplify I...
697
698
699
700
  	/*
  	 * 'next' is going away, so update stats accordingly
  	 */
  	blk_account_io_merge(next);
d6d481969   Jens Axboe   block: ll_rw_blk....
701
702
  
  	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
ab780f1ec   Jens Axboe   block: inherit CP...
703
704
  	if (blk_rq_cpu_valid(next))
  		req->cpu = next->cpu;
d6d481969   Jens Axboe   block: ll_rw_blk....
705

e4d750c97   Jens Axboe   block: free merge...
706
707
708
709
  	/*
  	 * ownership of bio passed from next to req, return 'next' for
  	 * the caller to free
  	 */
1cd96c242   Boaz Harrosh   block: WARN in __...
710
  	next->bio = NULL;
b973cb7e8   Jens Axboe   blk-merge: return...
711
  	return next;
d6d481969   Jens Axboe   block: ll_rw_blk....
712
  }
b973cb7e8   Jens Axboe   blk-merge: return...
713
  struct request *attempt_back_merge(struct request_queue *q, struct request *rq)
d6d481969   Jens Axboe   block: ll_rw_blk....
714
715
716
717
718
  {
  	struct request *next = elv_latter_request(q, rq);
  
  	if (next)
  		return attempt_merge(q, rq, next);
b973cb7e8   Jens Axboe   blk-merge: return...
719
  	return NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
720
  }
b973cb7e8   Jens Axboe   blk-merge: return...
721
  struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
d6d481969   Jens Axboe   block: ll_rw_blk....
722
723
724
725
726
  {
  	struct request *prev = elv_former_request(q, rq);
  
  	if (prev)
  		return attempt_merge(q, prev, rq);
b973cb7e8   Jens Axboe   blk-merge: return...
727
  	return NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
728
  }
5e84ea3a9   Jens Axboe   block: attempt to...
729
730
731
732
  
  int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
  			  struct request *next)
  {
72ef799b3   Tahsin Erdogan   block: do not mer...
733
  	struct elevator_queue *e = q->elevator;
e4d750c97   Jens Axboe   block: free merge...
734
  	struct request *free;
72ef799b3   Tahsin Erdogan   block: do not mer...
735

bd166ef18   Jens Axboe   blk-mq-sched: add...
736
  	if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn)
c51ca6cf5   Jens Axboe   block: move exist...
737
  		if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next))
72ef799b3   Tahsin Erdogan   block: do not mer...
738
  			return 0;
e4d750c97   Jens Axboe   block: free merge...
739
740
741
742
743
744
745
  	free = attempt_merge(q, rq, next);
  	if (free) {
  		__blk_put_request(q, free);
  		return 1;
  	}
  
  	return 0;
5e84ea3a9   Jens Axboe   block: attempt to...
746
  }
050c8ea80   Tejun Heo   block: separate o...
747
748
749
  
  bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
  {
e2a60da74   Martin K. Petersen   block: Clean up s...
750
  	if (!rq_mergeable(rq) || !bio_mergeable(bio))
050c8ea80   Tejun Heo   block: separate o...
751
  		return false;
288dab8a3   Christoph Hellwig   block: add a sepa...
752
  	if (req_op(rq) != bio_op(bio))
f31dc1cd4   Martin K. Petersen   block: Consolidat...
753
  		return false;
050c8ea80   Tejun Heo   block: separate o...
754
755
756
757
758
  	/* different data direction or already started, don't merge */
  	if (bio_data_dir(bio) != rq_data_dir(rq))
  		return false;
  
  	/* must be same device and not a special request */
74d46992e   Christoph Hellwig   block: replace bi...
759
  	if (rq->rq_disk != bio->bi_disk || req_no_special_merge(rq))
050c8ea80   Tejun Heo   block: separate o...
760
761
762
  		return false;
  
  	/* only merge integrity protected bio into ditto rq */
4eaf99bea   Martin K. Petersen   block: Don't merg...
763
  	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
050c8ea80   Tejun Heo   block: separate o...
764
  		return false;
4363ac7c1   Martin K. Petersen   block: Implement ...
765
  	/* must be using the same buffer */
8fe0d473f   Mike Christie   block: convert me...
766
  	if (req_op(rq) == REQ_OP_WRITE_SAME &&
4363ac7c1   Martin K. Petersen   block: Implement ...
767
768
  	    !blk_write_same_mergeable(rq->bio, bio))
  		return false;
cb6934f8e   Jens Axboe   block: add suppor...
769
770
771
772
773
774
  	/*
  	 * Don't allow merge of different write hints, or for a hint with
  	 * non-hint IO.
  	 */
  	if (rq->write_hint != bio->bi_write_hint)
  		return false;
050c8ea80   Tejun Heo   block: separate o...
775
776
  	return true;
  }
34fe7c054   Christoph Hellwig   block: enumify EL...
777
  enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
050c8ea80   Tejun Heo   block: separate o...
778
  {
1e739730c   Christoph Hellwig   block: optionally...
779
780
781
782
  	if (req_op(rq) == REQ_OP_DISCARD &&
  	    queue_max_discard_segments(rq->q) > 1)
  		return ELEVATOR_DISCARD_MERGE;
  	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
050c8ea80   Tejun Heo   block: separate o...
783
  		return ELEVATOR_BACK_MERGE;
4f024f379   Kent Overstreet   block: Abstract o...
784
  	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
050c8ea80   Tejun Heo   block: separate o...
785
786
787
  		return ELEVATOR_FRONT_MERGE;
  	return ELEVATOR_NO_MERGE;
  }