Blame view

block/blk-merge.c 14.6 KB
d6d481969   Jens Axboe   block: ll_rw_blk....
1
2
3
4
5
6
7
8
9
10
  /*
   * Functions related to segment and merge handling
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/scatterlist.h>
  
  #include "blk.h"
1e4280791   Jens Axboe   block: reduce sta...
11
  static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
59247eaea   Jens Axboe   block: fix missin...
12
  					     struct bio *bio)
d6d481969   Jens Axboe   block: ll_rw_blk....
13
  {
7988613b0   Kent Overstreet   block: Convert bi...
14
  	struct bio_vec bv, bvprv = { NULL };
05f1dd531   Jens Axboe   block: add queue ...
15
  	int cluster, high, highprv = 1, no_sg_merge;
1e4280791   Jens Axboe   block: reduce sta...
16
  	unsigned int seg_size, nr_phys_segs;
59247eaea   Jens Axboe   block: fix missin...
17
  	struct bio *fbio, *bbio;
7988613b0   Kent Overstreet   block: Convert bi...
18
  	struct bvec_iter iter;
d6d481969   Jens Axboe   block: ll_rw_blk....
19

1e4280791   Jens Axboe   block: reduce sta...
20
21
  	if (!bio)
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
22

5cb8850c9   Kent Overstreet   block: Explicitly...
23
24
25
26
27
28
29
30
31
  	/*
  	 * This should probably be returning 0, but blk_add_request_payload()
  	 * (Christoph!!!!)
  	 */
  	if (bio->bi_rw & REQ_DISCARD)
  		return 1;
  
  	if (bio->bi_rw & REQ_WRITE_SAME)
  		return 1;
1e4280791   Jens Axboe   block: reduce sta...
32
  	fbio = bio;
e692cb668   Martin K. Petersen   block: Deprecate ...
33
  	cluster = blk_queue_cluster(q);
5df97b91b   Mikulas Patocka   drop vmerge accou...
34
  	seg_size = 0;
2c8919dee   Andi Kleen   gcc-4.6: block: f...
35
  	nr_phys_segs = 0;
05f1dd531   Jens Axboe   block: add queue ...
36
37
  	no_sg_merge = test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
  	high = 0;
1e4280791   Jens Axboe   block: reduce sta...
38
  	for_each_bio(bio) {
7988613b0   Kent Overstreet   block: Convert bi...
39
  		bio_for_each_segment(bv, bio, iter) {
1e4280791   Jens Axboe   block: reduce sta...
40
  			/*
05f1dd531   Jens Axboe   block: add queue ...
41
42
43
44
45
46
47
  			 * If SG merging is disabled, each bio vector is
  			 * a segment
  			 */
  			if (no_sg_merge)
  				goto new_segment;
  
  			/*
1e4280791   Jens Axboe   block: reduce sta...
48
  			 * the trick here is making sure that a high page is
05f1dd531   Jens Axboe   block: add queue ...
49
50
  			 * never considered part of another segment, since
  			 * that might change with the bounce page.
1e4280791   Jens Axboe   block: reduce sta...
51
  			 */
7988613b0   Kent Overstreet   block: Convert bi...
52
53
54
  			high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
  			if (!high && !highprv && cluster) {
  				if (seg_size + bv.bv_len
ae03bf639   Martin K. Petersen   block: Use access...
55
  				    > queue_max_segment_size(q))
1e4280791   Jens Axboe   block: reduce sta...
56
  					goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
57
  				if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
1e4280791   Jens Axboe   block: reduce sta...
58
  					goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
59
  				if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
1e4280791   Jens Axboe   block: reduce sta...
60
  					goto new_segment;
d6d481969   Jens Axboe   block: ll_rw_blk....
61

7988613b0   Kent Overstreet   block: Convert bi...
62
  				seg_size += bv.bv_len;
1e4280791   Jens Axboe   block: reduce sta...
63
64
65
  				bvprv = bv;
  				continue;
  			}
d6d481969   Jens Axboe   block: ll_rw_blk....
66
  new_segment:
1e4280791   Jens Axboe   block: reduce sta...
67
68
69
  			if (nr_phys_segs == 1 && seg_size >
  			    fbio->bi_seg_front_size)
  				fbio->bi_seg_front_size = seg_size;
867714271   FUJITA Tomonori   block: fix nr_phy...
70

1e4280791   Jens Axboe   block: reduce sta...
71
72
  			nr_phys_segs++;
  			bvprv = bv;
7988613b0   Kent Overstreet   block: Convert bi...
73
  			seg_size = bv.bv_len;
1e4280791   Jens Axboe   block: reduce sta...
74
75
  			highprv = high;
  		}
59247eaea   Jens Axboe   block: fix missin...
76
  		bbio = bio;
d6d481969   Jens Axboe   block: ll_rw_blk....
77
  	}
59247eaea   Jens Axboe   block: fix missin...
78
79
80
81
  	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
  		fbio->bi_seg_front_size = seg_size;
  	if (seg_size > bbio->bi_seg_back_size)
  		bbio->bi_seg_back_size = seg_size;
1e4280791   Jens Axboe   block: reduce sta...
82
83
84
85
86
87
  
  	return nr_phys_segs;
  }
  
  void blk_recalc_rq_segments(struct request *rq)
  {
59247eaea   Jens Axboe   block: fix missin...
88
  	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
d6d481969   Jens Axboe   block: ll_rw_blk....
89
90
91
92
  }
  
  void blk_recount_segments(struct request_queue *q, struct bio *bio)
  {
05f1dd531   Jens Axboe   block: add queue ...
93
94
95
96
97
98
99
100
101
  	if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags))
  		bio->bi_phys_segments = bio->bi_vcnt;
  	else {
  		struct bio *nxt = bio->bi_next;
  
  		bio->bi_next = NULL;
  		bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
  		bio->bi_next = nxt;
  	}
1e4280791   Jens Axboe   block: reduce sta...
102

d6d481969   Jens Axboe   block: ll_rw_blk....
103
104
105
106
107
108
109
  	bio->bi_flags |= (1 << BIO_SEG_VALID);
  }
  EXPORT_SYMBOL(blk_recount_segments);
  
  static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
  				   struct bio *nxt)
  {
2b8221e18   Kent Overstreet   block: Really sil...
110
  	struct bio_vec end_bv = { NULL }, nxt_bv;
f619d2546   Kent Overstreet   block: Kill bio_i...
111
  	struct bvec_iter iter;
e692cb668   Martin K. Petersen   block: Deprecate ...
112
  	if (!blk_queue_cluster(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
113
  		return 0;
867714271   FUJITA Tomonori   block: fix nr_phy...
114
  	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
ae03bf639   Martin K. Petersen   block: Use access...
115
  	    queue_max_segment_size(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
116
  		return 0;
e17fc0a1c   David Woodhouse   Allow elevators t...
117
118
  	if (!bio_has_data(bio))
  		return 1;
f619d2546   Kent Overstreet   block: Kill bio_i...
119
120
121
122
123
124
125
  	bio_for_each_segment(end_bv, bio, iter)
  		if (end_bv.bv_len == iter.bi_size)
  			break;
  
  	nxt_bv = bio_iovec(nxt);
  
  	if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
e17fc0a1c   David Woodhouse   Allow elevators t...
126
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
127
  	/*
e17fc0a1c   David Woodhouse   Allow elevators t...
128
  	 * bio and nxt are contiguous in memory; check if the queue allows
d6d481969   Jens Axboe   block: ll_rw_blk....
129
130
  	 * these two to be merged into one
  	 */
f619d2546   Kent Overstreet   block: Kill bio_i...
131
  	if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
d6d481969   Jens Axboe   block: ll_rw_blk....
132
133
134
135
  		return 1;
  
  	return 0;
  }
7988613b0   Kent Overstreet   block: Convert bi...
136
  static inline void
963ab9e5d   Asias He   block: Introduce ...
137
  __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
7988613b0   Kent Overstreet   block: Convert bi...
138
  		     struct scatterlist *sglist, struct bio_vec *bvprv,
963ab9e5d   Asias He   block: Introduce ...
139
140
141
142
  		     struct scatterlist **sg, int *nsegs, int *cluster)
  {
  
  	int nbytes = bvec->bv_len;
7988613b0   Kent Overstreet   block: Convert bi...
143
  	if (*sg && *cluster) {
963ab9e5d   Asias He   block: Introduce ...
144
145
  		if ((*sg)->length + nbytes > queue_max_segment_size(q))
  			goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
146
  		if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
963ab9e5d   Asias He   block: Introduce ...
147
  			goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
148
  		if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
963ab9e5d   Asias He   block: Introduce ...
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
  			goto new_segment;
  
  		(*sg)->length += nbytes;
  	} else {
  new_segment:
  		if (!*sg)
  			*sg = sglist;
  		else {
  			/*
  			 * If the driver previously mapped a shorter
  			 * list, we could see a termination bit
  			 * prematurely unless it fully inits the sg
  			 * table on each mapping. We KNOW that there
  			 * must be more entries here or the driver
  			 * would be buggy, so force clear the
  			 * termination bit to avoid doing a full
  			 * sg_init_table() in drivers for each command.
  			 */
c8164d893   Paolo Bonzini   scatterlist: intr...
167
  			sg_unmark_end(*sg);
963ab9e5d   Asias He   block: Introduce ...
168
169
170
171
172
173
  			*sg = sg_next(*sg);
  		}
  
  		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
  		(*nsegs)++;
  	}
7988613b0   Kent Overstreet   block: Convert bi...
174
  	*bvprv = *bvec;
963ab9e5d   Asias He   block: Introduce ...
175
  }
5cb8850c9   Kent Overstreet   block: Explicitly...
176
177
178
  static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
  			     struct scatterlist *sglist,
  			     struct scatterlist **sg)
d6d481969   Jens Axboe   block: ll_rw_blk....
179
  {
2b8221e18   Kent Overstreet   block: Really sil...
180
  	struct bio_vec bvec, bvprv = { NULL };
5cb8850c9   Kent Overstreet   block: Explicitly...
181
  	struct bvec_iter iter;
d6d481969   Jens Axboe   block: ll_rw_blk....
182
183
184
  	int nsegs, cluster;
  
  	nsegs = 0;
e692cb668   Martin K. Petersen   block: Deprecate ...
185
  	cluster = blk_queue_cluster(q);
d6d481969   Jens Axboe   block: ll_rw_blk....
186

5cb8850c9   Kent Overstreet   block: Explicitly...
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
  	if (bio->bi_rw & REQ_DISCARD) {
  		/*
  		 * This is a hack - drivers should be neither modifying the
  		 * biovec, nor relying on bi_vcnt - but because of
  		 * blk_add_request_payload(), a discard bio may or may not have
  		 * a payload we need to set up here (thank you Christoph) and
  		 * bi_vcnt is really the only way of telling if we need to.
  		 */
  
  		if (bio->bi_vcnt)
  			goto single_segment;
  
  		return 0;
  	}
  
  	if (bio->bi_rw & REQ_WRITE_SAME) {
  single_segment:
  		*sg = sglist;
  		bvec = bio_iovec(bio);
  		sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
  		return 1;
  	}
  
  	for_each_bio(bio)
  		bio_for_each_segment(bvec, bio, iter)
  			__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
  					     &nsegs, &cluster);
d6d481969   Jens Axboe   block: ll_rw_blk....
214

5cb8850c9   Kent Overstreet   block: Explicitly...
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
  	return nsegs;
  }
  
  /*
   * map a request to scatterlist, return number of sg entries setup. Caller
   * must make sure sg can hold rq->nr_phys_segments entries
   */
  int blk_rq_map_sg(struct request_queue *q, struct request *rq,
  		  struct scatterlist *sglist)
  {
  	struct scatterlist *sg = NULL;
  	int nsegs = 0;
  
  	if (rq->bio)
  		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
f18573abc   FUJITA Tomonori   block: move the p...
230
231
  
  	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
2e46e8b27   Tejun Heo   block: drop reque...
232
233
234
  	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
  		unsigned int pad_len =
  			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
f18573abc   FUJITA Tomonori   block: move the p...
235
236
237
238
  
  		sg->length += pad_len;
  		rq->extra_len += pad_len;
  	}
2fb98e841   Tejun Heo   block: implement ...
239
  	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
7b6d91dae   Christoph Hellwig   block: unify flag...
240
  		if (rq->cmd_flags & REQ_WRITE)
db0a2e009   Tejun Heo   block: clear drai...
241
  			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
d6d481969   Jens Axboe   block: ll_rw_blk....
242
243
244
245
246
247
248
  		sg->page_link &= ~0x02;
  		sg = sg_next(sg);
  		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
  			    q->dma_drain_size,
  			    ((unsigned long)q->dma_drain_buffer) &
  			    (PAGE_SIZE - 1));
  		nsegs++;
7a85f8896   FUJITA Tomonori   block: restore th...
249
  		rq->extra_len += q->dma_drain_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
250
251
252
253
254
255
256
  	}
  
  	if (sg)
  		sg_mark_end(sg);
  
  	return nsegs;
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
257
  EXPORT_SYMBOL(blk_rq_map_sg);
85b9f66a4   Asias He   block: Add blk_bi...
258
259
260
261
262
263
264
265
266
267
268
269
270
271
  /**
   * blk_bio_map_sg - map a bio to a scatterlist
   * @q: request_queue in question
   * @bio: bio being mapped
   * @sglist: scatterlist being mapped
   *
   * Note:
   *    Caller must make sure sg can hold bio->bi_phys_segments entries
   *
   * Will return the number of sg entries setup
   */
  int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
  		   struct scatterlist *sglist)
  {
5cb8850c9   Kent Overstreet   block: Explicitly...
272
273
274
275
  	struct scatterlist *sg = NULL;
  	int nsegs;
  	struct bio *next = bio->bi_next;
  	bio->bi_next = NULL;
85b9f66a4   Asias He   block: Add blk_bi...
276

5cb8850c9   Kent Overstreet   block: Explicitly...
277
278
  	nsegs = __blk_bios_map_sg(q, bio, sglist, &sg);
  	bio->bi_next = next;
85b9f66a4   Asias He   block: Add blk_bi...
279
280
281
282
283
284
285
  	if (sg)
  		sg_mark_end(sg);
  
  	BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
  	return nsegs;
  }
  EXPORT_SYMBOL(blk_bio_map_sg);
d6d481969   Jens Axboe   block: ll_rw_blk....
286
287
288
289
  static inline int ll_new_hw_segment(struct request_queue *q,
  				    struct request *req,
  				    struct bio *bio)
  {
d6d481969   Jens Axboe   block: ll_rw_blk....
290
  	int nr_phys_segs = bio_phys_segments(q, bio);
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
291
292
293
294
295
  	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
  		goto no_merge;
  
  	if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio))
  		goto no_merge;
d6d481969   Jens Axboe   block: ll_rw_blk....
296
297
298
299
300
  
  	/*
  	 * This will form the start of a new hw segment.  Bump both
  	 * counters.
  	 */
d6d481969   Jens Axboe   block: ll_rw_blk....
301
302
  	req->nr_phys_segments += nr_phys_segs;
  	return 1;
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
303
304
305
306
307
308
  
  no_merge:
  	req->cmd_flags |= REQ_NOMERGE;
  	if (req == q->last_merge)
  		q->last_merge = NULL;
  	return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
309
310
311
312
313
  }
  
  int ll_back_merge_fn(struct request_queue *q, struct request *req,
  		     struct bio *bio)
  {
f31dc1cd4   Martin K. Petersen   block: Consolidat...
314
315
  	if (blk_rq_sectors(req) + bio_sectors(bio) >
  	    blk_rq_get_max_sectors(req)) {
d6d481969   Jens Axboe   block: ll_rw_blk....
316
317
318
319
320
  		req->cmd_flags |= REQ_NOMERGE;
  		if (req == q->last_merge)
  			q->last_merge = NULL;
  		return 0;
  	}
2cdf79caf   Jens Axboe   block: get rid of...
321
  	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
322
  		blk_recount_segments(q, req->biotail);
2cdf79caf   Jens Axboe   block: get rid of...
323
  	if (!bio_flagged(bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
324
  		blk_recount_segments(q, bio);
d6d481969   Jens Axboe   block: ll_rw_blk....
325
326
327
  
  	return ll_new_hw_segment(q, req, bio);
  }
6728cb0e6   Jens Axboe   block: make core ...
328
  int ll_front_merge_fn(struct request_queue *q, struct request *req,
d6d481969   Jens Axboe   block: ll_rw_blk....
329
330
  		      struct bio *bio)
  {
f31dc1cd4   Martin K. Petersen   block: Consolidat...
331
332
  	if (blk_rq_sectors(req) + bio_sectors(bio) >
  	    blk_rq_get_max_sectors(req)) {
d6d481969   Jens Axboe   block: ll_rw_blk....
333
334
335
336
337
  		req->cmd_flags |= REQ_NOMERGE;
  		if (req == q->last_merge)
  			q->last_merge = NULL;
  		return 0;
  	}
2cdf79caf   Jens Axboe   block: get rid of...
338
  	if (!bio_flagged(bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
339
  		blk_recount_segments(q, bio);
2cdf79caf   Jens Axboe   block: get rid of...
340
  	if (!bio_flagged(req->bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
341
  		blk_recount_segments(q, req->bio);
d6d481969   Jens Axboe   block: ll_rw_blk....
342
343
344
  
  	return ll_new_hw_segment(q, req, bio);
  }
e7e245000   Jens Axboe   blk-mq: don't dis...
345
346
347
348
349
350
351
352
353
354
  /*
   * blk-mq uses req->special to carry normal driver per-request payload, it
   * does not indicate a prepared command that we cannot merge with.
   */
  static bool req_no_special_merge(struct request *req)
  {
  	struct request_queue *q = req->q;
  
  	return !q->mq_ops && req->special;
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
355
356
357
358
  static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
  				struct request *next)
  {
  	int total_phys_segments;
867714271   FUJITA Tomonori   block: fix nr_phy...
359
360
  	unsigned int seg_size =
  		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
361
362
363
364
365
  
  	/*
  	 * First check if the either of the requests are re-queued
  	 * requests.  Can't merge them if they are.
  	 */
e7e245000   Jens Axboe   blk-mq: don't dis...
366
  	if (req_no_special_merge(req) || req_no_special_merge(next))
d6d481969   Jens Axboe   block: ll_rw_blk....
367
368
369
370
371
  		return 0;
  
  	/*
  	 * Will it become too large?
  	 */
f31dc1cd4   Martin K. Petersen   block: Consolidat...
372
373
  	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
  	    blk_rq_get_max_sectors(req))
d6d481969   Jens Axboe   block: ll_rw_blk....
374
375
376
  		return 0;
  
  	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
867714271   FUJITA Tomonori   block: fix nr_phy...
377
378
379
380
381
  	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
  		if (req->nr_phys_segments == 1)
  			req->bio->bi_seg_front_size = seg_size;
  		if (next->nr_phys_segments == 1)
  			next->biotail->bi_seg_back_size = seg_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
382
  		total_phys_segments--;
867714271   FUJITA Tomonori   block: fix nr_phy...
383
  	}
d6d481969   Jens Axboe   block: ll_rw_blk....
384

8a78362c4   Martin K. Petersen   block: Consolidat...
385
  	if (total_phys_segments > queue_max_segments(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
386
  		return 0;
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
387
388
  	if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next))
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
389
390
  	/* Merge is OK... */
  	req->nr_phys_segments = total_phys_segments;
d6d481969   Jens Axboe   block: ll_rw_blk....
391
392
  	return 1;
  }
80a761fd3   Tejun Heo   block: implement ...
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
  /**
   * blk_rq_set_mixed_merge - mark a request as mixed merge
   * @rq: request to mark as mixed merge
   *
   * Description:
   *     @rq is about to be mixed merged.  Make sure the attributes
   *     which can be mixed are set in each bio and mark @rq as mixed
   *     merged.
   */
  void blk_rq_set_mixed_merge(struct request *rq)
  {
  	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
  	struct bio *bio;
  
  	if (rq->cmd_flags & REQ_MIXED_MERGE)
  		return;
  
  	/*
  	 * @rq will no longer represent mixable attributes for all the
  	 * contained bios.  It will just track those of the first one.
  	 * Distributes the attributs to each bio.
  	 */
  	for (bio = rq->bio; bio; bio = bio->bi_next) {
  		WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
  			     (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
  		bio->bi_rw |= ff;
  	}
  	rq->cmd_flags |= REQ_MIXED_MERGE;
  }
26308eab6   Jerome Marchand   block: fix incons...
422
423
424
425
426
427
428
  static void blk_account_io_merge(struct request *req)
  {
  	if (blk_do_io_stat(req)) {
  		struct hd_struct *part;
  		int cpu;
  
  		cpu = part_stat_lock();
09e099d4b   Jerome Marchand   block: fix accoun...
429
  		part = req->part;
26308eab6   Jerome Marchand   block: fix incons...
430
431
  
  		part_round_stats(cpu, part);
316d315bf   Nikanth Karthikesan   block: Seperate r...
432
  		part_dec_in_flight(part, rq_data_dir(req));
26308eab6   Jerome Marchand   block: fix incons...
433

6c23a9681   Jens Axboe   block: add intern...
434
  		hd_struct_put(part);
26308eab6   Jerome Marchand   block: fix incons...
435
436
437
  		part_stat_unlock();
  	}
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
438
439
440
441
442
443
444
445
  /*
   * Has to be called with the request spinlock acquired
   */
  static int attempt_merge(struct request_queue *q, struct request *req,
  			  struct request *next)
  {
  	if (!rq_mergeable(req) || !rq_mergeable(next))
  		return 0;
f31dc1cd4   Martin K. Petersen   block: Consolidat...
446
447
  	if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
448
449
450
  	/*
  	 * not contiguous
  	 */
83096ebf1   Tejun Heo   block: convert to...
451
  	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
d6d481969   Jens Axboe   block: ll_rw_blk....
452
453
454
455
  		return 0;
  
  	if (rq_data_dir(req) != rq_data_dir(next)
  	    || req->rq_disk != next->rq_disk
e7e245000   Jens Axboe   blk-mq: don't dis...
456
  	    || req_no_special_merge(next))
d6d481969   Jens Axboe   block: ll_rw_blk....
457
  		return 0;
4363ac7c1   Martin K. Petersen   block: Implement ...
458
459
460
  	if (req->cmd_flags & REQ_WRITE_SAME &&
  	    !blk_write_same_mergeable(req->bio, next->bio))
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
461
462
463
464
465
466
467
468
469
470
  	/*
  	 * If we are allowed to merge, then append bio list
  	 * from next to rq and release next. merge_requests_fn
  	 * will have updated segment counts, update sector
  	 * counts here.
  	 */
  	if (!ll_merge_requests_fn(q, req, next))
  		return 0;
  
  	/*
80a761fd3   Tejun Heo   block: implement ...
471
472
473
474
475
476
477
478
479
480
481
482
483
  	 * If failfast settings disagree or any of the two is already
  	 * a mixed merge, mark both as mixed before proceeding.  This
  	 * makes sure that all involved bios have mixable attributes
  	 * set properly.
  	 */
  	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
  	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
  	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
  		blk_rq_set_mixed_merge(req);
  		blk_rq_set_mixed_merge(next);
  	}
  
  	/*
d6d481969   Jens Axboe   block: ll_rw_blk....
484
485
486
487
488
489
490
491
492
493
  	 * At this point we have either done a back merge
  	 * or front merge. We need the smaller start_time of
  	 * the merged requests to be the current request
  	 * for accounting purposes.
  	 */
  	if (time_after(req->start_time, next->start_time))
  		req->start_time = next->start_time;
  
  	req->biotail->bi_next = next->bio;
  	req->biotail = next->biotail;
a2dec7b36   Tejun Heo   block: hide reque...
494
  	req->__data_len += blk_rq_bytes(next);
d6d481969   Jens Axboe   block: ll_rw_blk....
495
496
  
  	elv_merge_requests(q, req, next);
42dad7647   Jerome Marchand   block: simplify I...
497
498
499
500
  	/*
  	 * 'next' is going away, so update stats accordingly
  	 */
  	blk_account_io_merge(next);
d6d481969   Jens Axboe   block: ll_rw_blk....
501
502
  
  	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
ab780f1ec   Jens Axboe   block: inherit CP...
503
504
  	if (blk_rq_cpu_valid(next))
  		req->cpu = next->cpu;
d6d481969   Jens Axboe   block: ll_rw_blk....
505

1cd96c242   Boaz Harrosh   block: WARN in __...
506
507
  	/* owner-ship of bio passed from next to req */
  	next->bio = NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
  	__blk_put_request(q, next);
  	return 1;
  }
  
  int attempt_back_merge(struct request_queue *q, struct request *rq)
  {
  	struct request *next = elv_latter_request(q, rq);
  
  	if (next)
  		return attempt_merge(q, rq, next);
  
  	return 0;
  }
  
  int attempt_front_merge(struct request_queue *q, struct request *rq)
  {
  	struct request *prev = elv_former_request(q, rq);
  
  	if (prev)
  		return attempt_merge(q, prev, rq);
  
  	return 0;
  }
5e84ea3a9   Jens Axboe   block: attempt to...
531
532
533
534
535
536
  
  int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
  			  struct request *next)
  {
  	return attempt_merge(q, rq, next);
  }
050c8ea80   Tejun Heo   block: separate o...
537
538
539
  
  bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
  {
66cb45aa4   Jens Axboe   block: add suppor...
540
  	struct request_queue *q = rq->q;
e2a60da74   Martin K. Petersen   block: Clean up s...
541
  	if (!rq_mergeable(rq) || !bio_mergeable(bio))
050c8ea80   Tejun Heo   block: separate o...
542
  		return false;
f31dc1cd4   Martin K. Petersen   block: Consolidat...
543
544
  	if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
  		return false;
050c8ea80   Tejun Heo   block: separate o...
545
546
547
548
549
  	/* different data direction or already started, don't merge */
  	if (bio_data_dir(bio) != rq_data_dir(rq))
  		return false;
  
  	/* must be same device and not a special request */
e7e245000   Jens Axboe   blk-mq: don't dis...
550
  	if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
050c8ea80   Tejun Heo   block: separate o...
551
552
553
554
555
  		return false;
  
  	/* only merge integrity protected bio into ditto rq */
  	if (bio_integrity(bio) != blk_integrity_rq(rq))
  		return false;
4363ac7c1   Martin K. Petersen   block: Implement ...
556
557
558
559
  	/* must be using the same buffer */
  	if (rq->cmd_flags & REQ_WRITE_SAME &&
  	    !blk_write_same_mergeable(rq->bio, bio))
  		return false;
66cb45aa4   Jens Axboe   block: add suppor...
560
561
562
563
564
565
566
  	if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
  		struct bio_vec *bprev;
  
  		bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1];
  		if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
  			return false;
  	}
050c8ea80   Tejun Heo   block: separate o...
567
568
569
570
571
  	return true;
  }
  
  int blk_try_merge(struct request *rq, struct bio *bio)
  {
4f024f379   Kent Overstreet   block: Abstract o...
572
  	if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
050c8ea80   Tejun Heo   block: separate o...
573
  		return ELEVATOR_BACK_MERGE;
4f024f379   Kent Overstreet   block: Abstract o...
574
  	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
050c8ea80   Tejun Heo   block: separate o...
575
576
577
  		return ELEVATOR_FRONT_MERGE;
  	return ELEVATOR_NO_MERGE;
  }