Blame view

block/blk-merge.c 14.9 KB
d6d481969   Jens Axboe   block: ll_rw_blk....
1
2
3
4
5
6
7
8
9
10
  /*
   * Functions related to segment and merge handling
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/scatterlist.h>
  
  #include "blk.h"
1e4280791   Jens Axboe   block: reduce sta...
11
  static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
073885493   Ming Lei   blk-merge: fix bl...
12
13
  					     struct bio *bio,
  					     bool no_sg_merge)
d6d481969   Jens Axboe   block: ll_rw_blk....
14
  {
7988613b0   Kent Overstreet   block: Convert bi...
15
  	struct bio_vec bv, bvprv = { NULL };
073885493   Ming Lei   blk-merge: fix bl...
16
  	int cluster, high, highprv = 1;
1e4280791   Jens Axboe   block: reduce sta...
17
  	unsigned int seg_size, nr_phys_segs;
59247eaea   Jens Axboe   block: fix missin...
18
  	struct bio *fbio, *bbio;
7988613b0   Kent Overstreet   block: Convert bi...
19
  	struct bvec_iter iter;
d6d481969   Jens Axboe   block: ll_rw_blk....
20

1e4280791   Jens Axboe   block: reduce sta...
21
22
  	if (!bio)
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
23

5cb8850c9   Kent Overstreet   block: Explicitly...
24
25
26
27
28
29
30
31
32
  	/*
  	 * This should probably be returning 0, but blk_add_request_payload()
  	 * (Christoph!!!!)
  	 */
  	if (bio->bi_rw & REQ_DISCARD)
  		return 1;
  
  	if (bio->bi_rw & REQ_WRITE_SAME)
  		return 1;
1e4280791   Jens Axboe   block: reduce sta...
33
  	fbio = bio;
e692cb668   Martin K. Petersen   block: Deprecate ...
34
  	cluster = blk_queue_cluster(q);
5df97b91b   Mikulas Patocka   drop vmerge accou...
35
  	seg_size = 0;
2c8919dee   Andi Kleen   gcc-4.6: block: f...
36
  	nr_phys_segs = 0;
05f1dd531   Jens Axboe   block: add queue ...
37
  	high = 0;
1e4280791   Jens Axboe   block: reduce sta...
38
  	for_each_bio(bio) {
7988613b0   Kent Overstreet   block: Convert bi...
39
  		bio_for_each_segment(bv, bio, iter) {
1e4280791   Jens Axboe   block: reduce sta...
40
  			/*
05f1dd531   Jens Axboe   block: add queue ...
41
42
43
44
45
46
47
  			 * If SG merging is disabled, each bio vector is
  			 * a segment
  			 */
  			if (no_sg_merge)
  				goto new_segment;
  
  			/*
1e4280791   Jens Axboe   block: reduce sta...
48
  			 * the trick here is making sure that a high page is
05f1dd531   Jens Axboe   block: add queue ...
49
50
  			 * never considered part of another segment, since
  			 * that might change with the bounce page.
1e4280791   Jens Axboe   block: reduce sta...
51
  			 */
7988613b0   Kent Overstreet   block: Convert bi...
52
53
54
  			high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
  			if (!high && !highprv && cluster) {
  				if (seg_size + bv.bv_len
ae03bf639   Martin K. Petersen   block: Use access...
55
  				    > queue_max_segment_size(q))
1e4280791   Jens Axboe   block: reduce sta...
56
  					goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
57
  				if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
1e4280791   Jens Axboe   block: reduce sta...
58
  					goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
59
  				if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
1e4280791   Jens Axboe   block: reduce sta...
60
  					goto new_segment;
d6d481969   Jens Axboe   block: ll_rw_blk....
61

7988613b0   Kent Overstreet   block: Convert bi...
62
  				seg_size += bv.bv_len;
1e4280791   Jens Axboe   block: reduce sta...
63
64
65
  				bvprv = bv;
  				continue;
  			}
d6d481969   Jens Axboe   block: ll_rw_blk....
66
  new_segment:
1e4280791   Jens Axboe   block: reduce sta...
67
68
69
  			if (nr_phys_segs == 1 && seg_size >
  			    fbio->bi_seg_front_size)
  				fbio->bi_seg_front_size = seg_size;
867714271   FUJITA Tomonori   block: fix nr_phy...
70

1e4280791   Jens Axboe   block: reduce sta...
71
72
  			nr_phys_segs++;
  			bvprv = bv;
7988613b0   Kent Overstreet   block: Convert bi...
73
  			seg_size = bv.bv_len;
1e4280791   Jens Axboe   block: reduce sta...
74
75
  			highprv = high;
  		}
59247eaea   Jens Axboe   block: fix missin...
76
  		bbio = bio;
d6d481969   Jens Axboe   block: ll_rw_blk....
77
  	}
59247eaea   Jens Axboe   block: fix missin...
78
79
80
81
  	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
  		fbio->bi_seg_front_size = seg_size;
  	if (seg_size > bbio->bi_seg_back_size)
  		bbio->bi_seg_back_size = seg_size;
1e4280791   Jens Axboe   block: reduce sta...
82
83
84
85
86
87
  
  	return nr_phys_segs;
  }
  
  void blk_recalc_rq_segments(struct request *rq)
  {
073885493   Ming Lei   blk-merge: fix bl...
88
89
90
91
92
  	bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
  			&rq->q->queue_flags);
  
  	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
  			no_sg_merge);
d6d481969   Jens Axboe   block: ll_rw_blk....
93
94
95
96
  }
  
  void blk_recount_segments(struct request_queue *q, struct bio *bio)
  {
7f60dcaaf   Ming Lei   block: blk-merge:...
97
98
99
100
101
102
103
  	unsigned short seg_cnt;
  
  	/* estimate segment number by bi_vcnt for non-cloned bio */
  	if (bio_flagged(bio, BIO_CLONED))
  		seg_cnt = bio_segments(bio);
  	else
  		seg_cnt = bio->bi_vcnt;
764f612c6   Ming Lei   blk-merge: don't ...
104

7f60dcaaf   Ming Lei   block: blk-merge:...
105
106
107
  	if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
  			(seg_cnt < queue_max_segments(q)))
  		bio->bi_phys_segments = seg_cnt;
05f1dd531   Jens Axboe   block: add queue ...
108
109
110
111
  	else {
  		struct bio *nxt = bio->bi_next;
  
  		bio->bi_next = NULL;
7f60dcaaf   Ming Lei   block: blk-merge:...
112
  		bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
05f1dd531   Jens Axboe   block: add queue ...
113
114
  		bio->bi_next = nxt;
  	}
1e4280791   Jens Axboe   block: reduce sta...
115

d6d481969   Jens Axboe   block: ll_rw_blk....
116
117
118
119
120
121
122
  	bio->bi_flags |= (1 << BIO_SEG_VALID);
  }
  EXPORT_SYMBOL(blk_recount_segments);
  
  static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
  				   struct bio *nxt)
  {
2b8221e18   Kent Overstreet   block: Really sil...
123
  	struct bio_vec end_bv = { NULL }, nxt_bv;
f619d2546   Kent Overstreet   block: Kill bio_i...
124
  	struct bvec_iter iter;
e692cb668   Martin K. Petersen   block: Deprecate ...
125
  	if (!blk_queue_cluster(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
126
  		return 0;
867714271   FUJITA Tomonori   block: fix nr_phy...
127
  	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
ae03bf639   Martin K. Petersen   block: Use access...
128
  	    queue_max_segment_size(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
129
  		return 0;
e17fc0a1c   David Woodhouse   Allow elevators t...
130
131
  	if (!bio_has_data(bio))
  		return 1;
f619d2546   Kent Overstreet   block: Kill bio_i...
132
133
134
135
136
137
138
  	bio_for_each_segment(end_bv, bio, iter)
  		if (end_bv.bv_len == iter.bi_size)
  			break;
  
  	nxt_bv = bio_iovec(nxt);
  
  	if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
e17fc0a1c   David Woodhouse   Allow elevators t...
139
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
140
  	/*
e17fc0a1c   David Woodhouse   Allow elevators t...
141
  	 * bio and nxt are contiguous in memory; check if the queue allows
d6d481969   Jens Axboe   block: ll_rw_blk....
142
143
  	 * these two to be merged into one
  	 */
f619d2546   Kent Overstreet   block: Kill bio_i...
144
  	if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
d6d481969   Jens Axboe   block: ll_rw_blk....
145
146
147
148
  		return 1;
  
  	return 0;
  }
7988613b0   Kent Overstreet   block: Convert bi...
149
  static inline void
963ab9e5d   Asias He   block: Introduce ...
150
  __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
7988613b0   Kent Overstreet   block: Convert bi...
151
  		     struct scatterlist *sglist, struct bio_vec *bvprv,
963ab9e5d   Asias He   block: Introduce ...
152
153
154
155
  		     struct scatterlist **sg, int *nsegs, int *cluster)
  {
  
  	int nbytes = bvec->bv_len;
7988613b0   Kent Overstreet   block: Convert bi...
156
  	if (*sg && *cluster) {
963ab9e5d   Asias He   block: Introduce ...
157
158
  		if ((*sg)->length + nbytes > queue_max_segment_size(q))
  			goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
159
  		if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
963ab9e5d   Asias He   block: Introduce ...
160
  			goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
161
  		if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
963ab9e5d   Asias He   block: Introduce ...
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
  			goto new_segment;
  
  		(*sg)->length += nbytes;
  	} else {
  new_segment:
  		if (!*sg)
  			*sg = sglist;
  		else {
  			/*
  			 * If the driver previously mapped a shorter
  			 * list, we could see a termination bit
  			 * prematurely unless it fully inits the sg
  			 * table on each mapping. We KNOW that there
  			 * must be more entries here or the driver
  			 * would be buggy, so force clear the
  			 * termination bit to avoid doing a full
  			 * sg_init_table() in drivers for each command.
  			 */
c8164d893   Paolo Bonzini   scatterlist: intr...
180
  			sg_unmark_end(*sg);
963ab9e5d   Asias He   block: Introduce ...
181
182
183
184
185
186
  			*sg = sg_next(*sg);
  		}
  
  		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
  		(*nsegs)++;
  	}
7988613b0   Kent Overstreet   block: Convert bi...
187
  	*bvprv = *bvec;
963ab9e5d   Asias He   block: Introduce ...
188
  }
5cb8850c9   Kent Overstreet   block: Explicitly...
189
190
191
  static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
  			     struct scatterlist *sglist,
  			     struct scatterlist **sg)
d6d481969   Jens Axboe   block: ll_rw_blk....
192
  {
2b8221e18   Kent Overstreet   block: Really sil...
193
  	struct bio_vec bvec, bvprv = { NULL };
5cb8850c9   Kent Overstreet   block: Explicitly...
194
  	struct bvec_iter iter;
d6d481969   Jens Axboe   block: ll_rw_blk....
195
196
197
  	int nsegs, cluster;
  
  	nsegs = 0;
e692cb668   Martin K. Petersen   block: Deprecate ...
198
  	cluster = blk_queue_cluster(q);
d6d481969   Jens Axboe   block: ll_rw_blk....
199

5cb8850c9   Kent Overstreet   block: Explicitly...
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
  	if (bio->bi_rw & REQ_DISCARD) {
  		/*
  		 * This is a hack - drivers should be neither modifying the
  		 * biovec, nor relying on bi_vcnt - but because of
  		 * blk_add_request_payload(), a discard bio may or may not have
  		 * a payload we need to set up here (thank you Christoph) and
  		 * bi_vcnt is really the only way of telling if we need to.
  		 */
  
  		if (bio->bi_vcnt)
  			goto single_segment;
  
  		return 0;
  	}
  
  	if (bio->bi_rw & REQ_WRITE_SAME) {
  single_segment:
  		*sg = sglist;
  		bvec = bio_iovec(bio);
  		sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
  		return 1;
  	}
  
  	for_each_bio(bio)
  		bio_for_each_segment(bvec, bio, iter)
  			__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
  					     &nsegs, &cluster);
d6d481969   Jens Axboe   block: ll_rw_blk....
227

5cb8850c9   Kent Overstreet   block: Explicitly...
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
  	return nsegs;
  }
  
  /*
   * map a request to scatterlist, return number of sg entries setup. Caller
   * must make sure sg can hold rq->nr_phys_segments entries
   */
  int blk_rq_map_sg(struct request_queue *q, struct request *rq,
  		  struct scatterlist *sglist)
  {
  	struct scatterlist *sg = NULL;
  	int nsegs = 0;
  
  	if (rq->bio)
  		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
f18573abc   FUJITA Tomonori   block: move the p...
243
244
  
  	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
2e46e8b27   Tejun Heo   block: drop reque...
245
246
247
  	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
  		unsigned int pad_len =
  			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
f18573abc   FUJITA Tomonori   block: move the p...
248
249
250
251
  
  		sg->length += pad_len;
  		rq->extra_len += pad_len;
  	}
2fb98e841   Tejun Heo   block: implement ...
252
  	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
7b6d91dae   Christoph Hellwig   block: unify flag...
253
  		if (rq->cmd_flags & REQ_WRITE)
db0a2e009   Tejun Heo   block: clear drai...
254
  			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
d6d481969   Jens Axboe   block: ll_rw_blk....
255
256
257
258
259
260
261
  		sg->page_link &= ~0x02;
  		sg = sg_next(sg);
  		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
  			    q->dma_drain_size,
  			    ((unsigned long)q->dma_drain_buffer) &
  			    (PAGE_SIZE - 1));
  		nsegs++;
7a85f8896   FUJITA Tomonori   block: restore th...
262
  		rq->extra_len += q->dma_drain_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
263
264
265
266
267
268
269
  	}
  
  	if (sg)
  		sg_mark_end(sg);
  
  	return nsegs;
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
270
  EXPORT_SYMBOL(blk_rq_map_sg);
85b9f66a4   Asias He   block: Add blk_bi...
271
272
273
274
275
276
277
278
279
280
281
282
283
284
  /**
   * blk_bio_map_sg - map a bio to a scatterlist
   * @q: request_queue in question
   * @bio: bio being mapped
   * @sglist: scatterlist being mapped
   *
   * Note:
   *    Caller must make sure sg can hold bio->bi_phys_segments entries
   *
   * Will return the number of sg entries setup
   */
  int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
  		   struct scatterlist *sglist)
  {
5cb8850c9   Kent Overstreet   block: Explicitly...
285
286
287
288
  	struct scatterlist *sg = NULL;
  	int nsegs;
  	struct bio *next = bio->bi_next;
  	bio->bi_next = NULL;
85b9f66a4   Asias He   block: Add blk_bi...
289

5cb8850c9   Kent Overstreet   block: Explicitly...
290
291
  	nsegs = __blk_bios_map_sg(q, bio, sglist, &sg);
  	bio->bi_next = next;
85b9f66a4   Asias He   block: Add blk_bi...
292
293
294
295
296
297
298
  	if (sg)
  		sg_mark_end(sg);
  
  	BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
  	return nsegs;
  }
  EXPORT_SYMBOL(blk_bio_map_sg);
d6d481969   Jens Axboe   block: ll_rw_blk....
299
300
301
302
  static inline int ll_new_hw_segment(struct request_queue *q,
  				    struct request *req,
  				    struct bio *bio)
  {
d6d481969   Jens Axboe   block: ll_rw_blk....
303
  	int nr_phys_segs = bio_phys_segments(q, bio);
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
304
305
  	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
  		goto no_merge;
4eaf99bea   Martin K. Petersen   block: Don't merg...
306
  	if (blk_integrity_merge_bio(q, req, bio) == false)
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
307
  		goto no_merge;
d6d481969   Jens Axboe   block: ll_rw_blk....
308
309
310
311
312
  
  	/*
  	 * This will form the start of a new hw segment.  Bump both
  	 * counters.
  	 */
d6d481969   Jens Axboe   block: ll_rw_blk....
313
314
  	req->nr_phys_segments += nr_phys_segs;
  	return 1;
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
315
316
317
318
319
320
  
  no_merge:
  	req->cmd_flags |= REQ_NOMERGE;
  	if (req == q->last_merge)
  		q->last_merge = NULL;
  	return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
321
322
323
324
325
  }
  
  int ll_back_merge_fn(struct request_queue *q, struct request *req,
  		     struct bio *bio)
  {
f31dc1cd4   Martin K. Petersen   block: Consolidat...
326
327
  	if (blk_rq_sectors(req) + bio_sectors(bio) >
  	    blk_rq_get_max_sectors(req)) {
d6d481969   Jens Axboe   block: ll_rw_blk....
328
329
330
331
332
  		req->cmd_flags |= REQ_NOMERGE;
  		if (req == q->last_merge)
  			q->last_merge = NULL;
  		return 0;
  	}
2cdf79caf   Jens Axboe   block: get rid of...
333
  	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
334
  		blk_recount_segments(q, req->biotail);
2cdf79caf   Jens Axboe   block: get rid of...
335
  	if (!bio_flagged(bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
336
  		blk_recount_segments(q, bio);
d6d481969   Jens Axboe   block: ll_rw_blk....
337
338
339
  
  	return ll_new_hw_segment(q, req, bio);
  }
6728cb0e6   Jens Axboe   block: make core ...
340
  int ll_front_merge_fn(struct request_queue *q, struct request *req,
d6d481969   Jens Axboe   block: ll_rw_blk....
341
342
  		      struct bio *bio)
  {
f31dc1cd4   Martin K. Petersen   block: Consolidat...
343
344
  	if (blk_rq_sectors(req) + bio_sectors(bio) >
  	    blk_rq_get_max_sectors(req)) {
d6d481969   Jens Axboe   block: ll_rw_blk....
345
346
347
348
349
  		req->cmd_flags |= REQ_NOMERGE;
  		if (req == q->last_merge)
  			q->last_merge = NULL;
  		return 0;
  	}
2cdf79caf   Jens Axboe   block: get rid of...
350
  	if (!bio_flagged(bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
351
  		blk_recount_segments(q, bio);
2cdf79caf   Jens Axboe   block: get rid of...
352
  	if (!bio_flagged(req->bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
353
  		blk_recount_segments(q, req->bio);
d6d481969   Jens Axboe   block: ll_rw_blk....
354
355
356
  
  	return ll_new_hw_segment(q, req, bio);
  }
e7e245000   Jens Axboe   blk-mq: don't dis...
357
358
359
360
361
362
363
364
365
366
  /*
   * blk-mq uses req->special to carry normal driver per-request payload, it
   * does not indicate a prepared command that we cannot merge with.
   */
  static bool req_no_special_merge(struct request *req)
  {
  	struct request_queue *q = req->q;
  
  	return !q->mq_ops && req->special;
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
367
368
369
370
  static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
  				struct request *next)
  {
  	int total_phys_segments;
867714271   FUJITA Tomonori   block: fix nr_phy...
371
372
  	unsigned int seg_size =
  		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
373
374
375
376
377
  
  	/*
  	 * First check if the either of the requests are re-queued
  	 * requests.  Can't merge them if they are.
  	 */
e7e245000   Jens Axboe   blk-mq: don't dis...
378
  	if (req_no_special_merge(req) || req_no_special_merge(next))
d6d481969   Jens Axboe   block: ll_rw_blk....
379
380
381
382
383
  		return 0;
  
  	/*
  	 * Will it become too large?
  	 */
f31dc1cd4   Martin K. Petersen   block: Consolidat...
384
385
  	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
  	    blk_rq_get_max_sectors(req))
d6d481969   Jens Axboe   block: ll_rw_blk....
386
387
388
  		return 0;
  
  	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
867714271   FUJITA Tomonori   block: fix nr_phy...
389
390
391
392
393
  	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
  		if (req->nr_phys_segments == 1)
  			req->bio->bi_seg_front_size = seg_size;
  		if (next->nr_phys_segments == 1)
  			next->biotail->bi_seg_back_size = seg_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
394
  		total_phys_segments--;
867714271   FUJITA Tomonori   block: fix nr_phy...
395
  	}
d6d481969   Jens Axboe   block: ll_rw_blk....
396

8a78362c4   Martin K. Petersen   block: Consolidat...
397
  	if (total_phys_segments > queue_max_segments(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
398
  		return 0;
4eaf99bea   Martin K. Petersen   block: Don't merg...
399
  	if (blk_integrity_merge_rq(q, req, next) == false)
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
400
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
401
402
  	/* Merge is OK... */
  	req->nr_phys_segments = total_phys_segments;
d6d481969   Jens Axboe   block: ll_rw_blk....
403
404
  	return 1;
  }
80a761fd3   Tejun Heo   block: implement ...
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
  /**
   * blk_rq_set_mixed_merge - mark a request as mixed merge
   * @rq: request to mark as mixed merge
   *
   * Description:
   *     @rq is about to be mixed merged.  Make sure the attributes
   *     which can be mixed are set in each bio and mark @rq as mixed
   *     merged.
   */
  void blk_rq_set_mixed_merge(struct request *rq)
  {
  	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
  	struct bio *bio;
  
  	if (rq->cmd_flags & REQ_MIXED_MERGE)
  		return;
  
  	/*
  	 * @rq will no longer represent mixable attributes for all the
  	 * contained bios.  It will just track those of the first one.
  	 * Distributes the attributs to each bio.
  	 */
  	for (bio = rq->bio; bio; bio = bio->bi_next) {
  		WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
  			     (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
  		bio->bi_rw |= ff;
  	}
  	rq->cmd_flags |= REQ_MIXED_MERGE;
  }
26308eab6   Jerome Marchand   block: fix incons...
434
435
436
437
438
439
440
  static void blk_account_io_merge(struct request *req)
  {
  	if (blk_do_io_stat(req)) {
  		struct hd_struct *part;
  		int cpu;
  
  		cpu = part_stat_lock();
09e099d4b   Jerome Marchand   block: fix accoun...
441
  		part = req->part;
26308eab6   Jerome Marchand   block: fix incons...
442
443
  
  		part_round_stats(cpu, part);
316d315bf   Nikanth Karthikesan   block: Seperate r...
444
  		part_dec_in_flight(part, rq_data_dir(req));
26308eab6   Jerome Marchand   block: fix incons...
445

6c23a9681   Jens Axboe   block: add intern...
446
  		hd_struct_put(part);
26308eab6   Jerome Marchand   block: fix incons...
447
448
449
  		part_stat_unlock();
  	}
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
450
451
452
453
454
455
456
457
  /*
   * Has to be called with the request spinlock acquired
   */
  static int attempt_merge(struct request_queue *q, struct request *req,
  			  struct request *next)
  {
  	if (!rq_mergeable(req) || !rq_mergeable(next))
  		return 0;
f31dc1cd4   Martin K. Petersen   block: Consolidat...
458
459
  	if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
460
461
462
  	/*
  	 * not contiguous
  	 */
83096ebf1   Tejun Heo   block: convert to...
463
  	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
d6d481969   Jens Axboe   block: ll_rw_blk....
464
465
466
467
  		return 0;
  
  	if (rq_data_dir(req) != rq_data_dir(next)
  	    || req->rq_disk != next->rq_disk
e7e245000   Jens Axboe   blk-mq: don't dis...
468
  	    || req_no_special_merge(next))
d6d481969   Jens Axboe   block: ll_rw_blk....
469
  		return 0;
4363ac7c1   Martin K. Petersen   block: Implement ...
470
471
472
  	if (req->cmd_flags & REQ_WRITE_SAME &&
  	    !blk_write_same_mergeable(req->bio, next->bio))
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
473
474
475
476
477
478
479
480
481
482
  	/*
  	 * If we are allowed to merge, then append bio list
  	 * from next to rq and release next. merge_requests_fn
  	 * will have updated segment counts, update sector
  	 * counts here.
  	 */
  	if (!ll_merge_requests_fn(q, req, next))
  		return 0;
  
  	/*
80a761fd3   Tejun Heo   block: implement ...
483
484
485
486
487
488
489
490
491
492
493
494
495
  	 * If failfast settings disagree or any of the two is already
  	 * a mixed merge, mark both as mixed before proceeding.  This
  	 * makes sure that all involved bios have mixable attributes
  	 * set properly.
  	 */
  	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
  	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
  	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
  		blk_rq_set_mixed_merge(req);
  		blk_rq_set_mixed_merge(next);
  	}
  
  	/*
d6d481969   Jens Axboe   block: ll_rw_blk....
496
497
498
499
500
501
502
503
504
505
  	 * At this point we have either done a back merge
  	 * or front merge. We need the smaller start_time of
  	 * the merged requests to be the current request
  	 * for accounting purposes.
  	 */
  	if (time_after(req->start_time, next->start_time))
  		req->start_time = next->start_time;
  
  	req->biotail->bi_next = next->bio;
  	req->biotail = next->biotail;
a2dec7b36   Tejun Heo   block: hide reque...
506
  	req->__data_len += blk_rq_bytes(next);
d6d481969   Jens Axboe   block: ll_rw_blk....
507
508
  
  	elv_merge_requests(q, req, next);
42dad7647   Jerome Marchand   block: simplify I...
509
510
511
512
  	/*
  	 * 'next' is going away, so update stats accordingly
  	 */
  	blk_account_io_merge(next);
d6d481969   Jens Axboe   block: ll_rw_blk....
513
514
  
  	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
ab780f1ec   Jens Axboe   block: inherit CP...
515
516
  	if (blk_rq_cpu_valid(next))
  		req->cpu = next->cpu;
d6d481969   Jens Axboe   block: ll_rw_blk....
517

1cd96c242   Boaz Harrosh   block: WARN in __...
518
519
  	/* owner-ship of bio passed from next to req */
  	next->bio = NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
  	__blk_put_request(q, next);
  	return 1;
  }
  
  int attempt_back_merge(struct request_queue *q, struct request *rq)
  {
  	struct request *next = elv_latter_request(q, rq);
  
  	if (next)
  		return attempt_merge(q, rq, next);
  
  	return 0;
  }
  
  int attempt_front_merge(struct request_queue *q, struct request *rq)
  {
  	struct request *prev = elv_former_request(q, rq);
  
  	if (prev)
  		return attempt_merge(q, prev, rq);
  
  	return 0;
  }
5e84ea3a9   Jens Axboe   block: attempt to...
543
544
545
546
547
548
  
  int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
  			  struct request *next)
  {
  	return attempt_merge(q, rq, next);
  }
050c8ea80   Tejun Heo   block: separate o...
549
550
551
  
  bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
  {
66cb45aa4   Jens Axboe   block: add suppor...
552
  	struct request_queue *q = rq->q;
e2a60da74   Martin K. Petersen   block: Clean up s...
553
  	if (!rq_mergeable(rq) || !bio_mergeable(bio))
050c8ea80   Tejun Heo   block: separate o...
554
  		return false;
f31dc1cd4   Martin K. Petersen   block: Consolidat...
555
556
  	if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
  		return false;
050c8ea80   Tejun Heo   block: separate o...
557
558
559
560
561
  	/* different data direction or already started, don't merge */
  	if (bio_data_dir(bio) != rq_data_dir(rq))
  		return false;
  
  	/* must be same device and not a special request */
e7e245000   Jens Axboe   blk-mq: don't dis...
562
  	if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
050c8ea80   Tejun Heo   block: separate o...
563
564
565
  		return false;
  
  	/* only merge integrity protected bio into ditto rq */
4eaf99bea   Martin K. Petersen   block: Don't merg...
566
  	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
050c8ea80   Tejun Heo   block: separate o...
567
  		return false;
4363ac7c1   Martin K. Petersen   block: Implement ...
568
569
570
571
  	/* must be using the same buffer */
  	if (rq->cmd_flags & REQ_WRITE_SAME &&
  	    !blk_write_same_mergeable(rq->bio, bio))
  		return false;
66cb45aa4   Jens Axboe   block: add suppor...
572
573
574
575
576
577
578
  	if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
  		struct bio_vec *bprev;
  
  		bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1];
  		if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
  			return false;
  	}
050c8ea80   Tejun Heo   block: separate o...
579
580
581
582
583
  	return true;
  }
  
  int blk_try_merge(struct request *rq, struct bio *bio)
  {
4f024f379   Kent Overstreet   block: Abstract o...
584
  	if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
050c8ea80   Tejun Heo   block: separate o...
585
  		return ELEVATOR_BACK_MERGE;
4f024f379   Kent Overstreet   block: Abstract o...
586
  	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
050c8ea80   Tejun Heo   block: separate o...
587
588
589
  		return ELEVATOR_FRONT_MERGE;
  	return ELEVATOR_NO_MERGE;
  }