Blame view

block/blk-merge.c 14.1 KB
d6d481969   Jens Axboe   block: ll_rw_blk....
1
2
3
4
5
6
7
8
9
10
  /*
   * Functions related to segment and merge handling
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/scatterlist.h>
  
  #include "blk.h"
1e4280791   Jens Axboe   block: reduce sta...
11
  static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
59247eaea   Jens Axboe   block: fix missin...
12
  					     struct bio *bio)
d6d481969   Jens Axboe   block: ll_rw_blk....
13
  {
7988613b0   Kent Overstreet   block: Convert bi...
14
15
  	struct bio_vec bv, bvprv = { NULL };
  	int cluster, high, highprv = 1;
1e4280791   Jens Axboe   block: reduce sta...
16
  	unsigned int seg_size, nr_phys_segs;
59247eaea   Jens Axboe   block: fix missin...
17
  	struct bio *fbio, *bbio;
7988613b0   Kent Overstreet   block: Convert bi...
18
  	struct bvec_iter iter;
d6d481969   Jens Axboe   block: ll_rw_blk....
19

1e4280791   Jens Axboe   block: reduce sta...
20
21
  	if (!bio)
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
22

5cb8850c9   Kent Overstreet   block: Explicitly...
23
24
25
26
27
28
29
30
31
  	/*
  	 * This should probably be returning 0, but blk_add_request_payload()
  	 * (Christoph!!!!)
  	 */
  	if (bio->bi_rw & REQ_DISCARD)
  		return 1;
  
  	if (bio->bi_rw & REQ_WRITE_SAME)
  		return 1;
1e4280791   Jens Axboe   block: reduce sta...
32
  	fbio = bio;
e692cb668   Martin K. Petersen   block: Deprecate ...
33
  	cluster = blk_queue_cluster(q);
5df97b91b   Mikulas Patocka   drop vmerge accou...
34
  	seg_size = 0;
2c8919dee   Andi Kleen   gcc-4.6: block: f...
35
  	nr_phys_segs = 0;
1e4280791   Jens Axboe   block: reduce sta...
36
  	for_each_bio(bio) {
7988613b0   Kent Overstreet   block: Convert bi...
37
  		bio_for_each_segment(bv, bio, iter) {
1e4280791   Jens Axboe   block: reduce sta...
38
39
40
41
42
  			/*
  			 * the trick here is making sure that a high page is
  			 * never considered part of another segment, since that
  			 * might change with the bounce page.
  			 */
7988613b0   Kent Overstreet   block: Convert bi...
43
44
45
  			high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
  			if (!high && !highprv && cluster) {
  				if (seg_size + bv.bv_len
ae03bf639   Martin K. Petersen   block: Use access...
46
  				    > queue_max_segment_size(q))
1e4280791   Jens Axboe   block: reduce sta...
47
  					goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
48
  				if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
1e4280791   Jens Axboe   block: reduce sta...
49
  					goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
50
  				if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
1e4280791   Jens Axboe   block: reduce sta...
51
  					goto new_segment;
d6d481969   Jens Axboe   block: ll_rw_blk....
52

7988613b0   Kent Overstreet   block: Convert bi...
53
  				seg_size += bv.bv_len;
1e4280791   Jens Axboe   block: reduce sta...
54
55
56
  				bvprv = bv;
  				continue;
  			}
d6d481969   Jens Axboe   block: ll_rw_blk....
57
  new_segment:
1e4280791   Jens Axboe   block: reduce sta...
58
59
60
  			if (nr_phys_segs == 1 && seg_size >
  			    fbio->bi_seg_front_size)
  				fbio->bi_seg_front_size = seg_size;
867714271   FUJITA Tomonori   block: fix nr_phy...
61

1e4280791   Jens Axboe   block: reduce sta...
62
63
  			nr_phys_segs++;
  			bvprv = bv;
7988613b0   Kent Overstreet   block: Convert bi...
64
  			seg_size = bv.bv_len;
1e4280791   Jens Axboe   block: reduce sta...
65
66
  			highprv = high;
  		}
59247eaea   Jens Axboe   block: fix missin...
67
  		bbio = bio;
d6d481969   Jens Axboe   block: ll_rw_blk....
68
  	}
59247eaea   Jens Axboe   block: fix missin...
69
70
71
72
  	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
  		fbio->bi_seg_front_size = seg_size;
  	if (seg_size > bbio->bi_seg_back_size)
  		bbio->bi_seg_back_size = seg_size;
1e4280791   Jens Axboe   block: reduce sta...
73
74
75
76
77
78
  
  	return nr_phys_segs;
  }
  
  void blk_recalc_rq_segments(struct request *rq)
  {
59247eaea   Jens Axboe   block: fix missin...
79
  	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
d6d481969   Jens Axboe   block: ll_rw_blk....
80
81
82
83
  }
  
  void blk_recount_segments(struct request_queue *q, struct bio *bio)
  {
d6d481969   Jens Axboe   block: ll_rw_blk....
84
  	struct bio *nxt = bio->bi_next;
1e4280791   Jens Axboe   block: reduce sta...
85

d6d481969   Jens Axboe   block: ll_rw_blk....
86
  	bio->bi_next = NULL;
59247eaea   Jens Axboe   block: fix missin...
87
  	bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
d6d481969   Jens Axboe   block: ll_rw_blk....
88
  	bio->bi_next = nxt;
d6d481969   Jens Axboe   block: ll_rw_blk....
89
90
91
92
93
94
95
  	bio->bi_flags |= (1 << BIO_SEG_VALID);
  }
  EXPORT_SYMBOL(blk_recount_segments);
  
  static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
  				   struct bio *nxt)
  {
2b8221e18   Kent Overstreet   block: Really sil...
96
  	struct bio_vec end_bv = { NULL }, nxt_bv;
f619d2546   Kent Overstreet   block: Kill bio_i...
97
  	struct bvec_iter iter;
e692cb668   Martin K. Petersen   block: Deprecate ...
98
  	if (!blk_queue_cluster(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
99
  		return 0;
867714271   FUJITA Tomonori   block: fix nr_phy...
100
  	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
ae03bf639   Martin K. Petersen   block: Use access...
101
  	    queue_max_segment_size(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
102
  		return 0;
e17fc0a1c   David Woodhouse   Allow elevators t...
103
104
  	if (!bio_has_data(bio))
  		return 1;
f619d2546   Kent Overstreet   block: Kill bio_i...
105
106
107
108
109
110
111
  	bio_for_each_segment(end_bv, bio, iter)
  		if (end_bv.bv_len == iter.bi_size)
  			break;
  
  	nxt_bv = bio_iovec(nxt);
  
  	if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
e17fc0a1c   David Woodhouse   Allow elevators t...
112
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
113
  	/*
e17fc0a1c   David Woodhouse   Allow elevators t...
114
  	 * bio and nxt are contiguous in memory; check if the queue allows
d6d481969   Jens Axboe   block: ll_rw_blk....
115
116
  	 * these two to be merged into one
  	 */
f619d2546   Kent Overstreet   block: Kill bio_i...
117
  	if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
d6d481969   Jens Axboe   block: ll_rw_blk....
118
119
120
121
  		return 1;
  
  	return 0;
  }
7988613b0   Kent Overstreet   block: Convert bi...
122
  static inline void
963ab9e5d   Asias He   block: Introduce ...
123
  __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
7988613b0   Kent Overstreet   block: Convert bi...
124
  		     struct scatterlist *sglist, struct bio_vec *bvprv,
963ab9e5d   Asias He   block: Introduce ...
125
126
127
128
  		     struct scatterlist **sg, int *nsegs, int *cluster)
  {
  
  	int nbytes = bvec->bv_len;
7988613b0   Kent Overstreet   block: Convert bi...
129
  	if (*sg && *cluster) {
963ab9e5d   Asias He   block: Introduce ...
130
131
  		if ((*sg)->length + nbytes > queue_max_segment_size(q))
  			goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
132
  		if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
963ab9e5d   Asias He   block: Introduce ...
133
  			goto new_segment;
7988613b0   Kent Overstreet   block: Convert bi...
134
  		if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
963ab9e5d   Asias He   block: Introduce ...
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
  			goto new_segment;
  
  		(*sg)->length += nbytes;
  	} else {
  new_segment:
  		if (!*sg)
  			*sg = sglist;
  		else {
  			/*
  			 * If the driver previously mapped a shorter
  			 * list, we could see a termination bit
  			 * prematurely unless it fully inits the sg
  			 * table on each mapping. We KNOW that there
  			 * must be more entries here or the driver
  			 * would be buggy, so force clear the
  			 * termination bit to avoid doing a full
  			 * sg_init_table() in drivers for each command.
  			 */
c8164d893   Paolo Bonzini   scatterlist: intr...
153
  			sg_unmark_end(*sg);
963ab9e5d   Asias He   block: Introduce ...
154
155
156
157
158
159
  			*sg = sg_next(*sg);
  		}
  
  		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
  		(*nsegs)++;
  	}
7988613b0   Kent Overstreet   block: Convert bi...
160
  	*bvprv = *bvec;
963ab9e5d   Asias He   block: Introduce ...
161
  }
5cb8850c9   Kent Overstreet   block: Explicitly...
162
163
164
  static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
  			     struct scatterlist *sglist,
  			     struct scatterlist **sg)
d6d481969   Jens Axboe   block: ll_rw_blk....
165
  {
2b8221e18   Kent Overstreet   block: Really sil...
166
  	struct bio_vec bvec, bvprv = { NULL };
5cb8850c9   Kent Overstreet   block: Explicitly...
167
  	struct bvec_iter iter;
d6d481969   Jens Axboe   block: ll_rw_blk....
168
169
170
  	int nsegs, cluster;
  
  	nsegs = 0;
e692cb668   Martin K. Petersen   block: Deprecate ...
171
  	cluster = blk_queue_cluster(q);
d6d481969   Jens Axboe   block: ll_rw_blk....
172

5cb8850c9   Kent Overstreet   block: Explicitly...
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
  	if (bio->bi_rw & REQ_DISCARD) {
  		/*
  		 * This is a hack - drivers should be neither modifying the
  		 * biovec, nor relying on bi_vcnt - but because of
  		 * blk_add_request_payload(), a discard bio may or may not have
  		 * a payload we need to set up here (thank you Christoph) and
  		 * bi_vcnt is really the only way of telling if we need to.
  		 */
  
  		if (bio->bi_vcnt)
  			goto single_segment;
  
  		return 0;
  	}
  
  	if (bio->bi_rw & REQ_WRITE_SAME) {
  single_segment:
  		*sg = sglist;
  		bvec = bio_iovec(bio);
  		sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
  		return 1;
  	}
  
  	for_each_bio(bio)
  		bio_for_each_segment(bvec, bio, iter)
  			__blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
  					     &nsegs, &cluster);
d6d481969   Jens Axboe   block: ll_rw_blk....
200

5cb8850c9   Kent Overstreet   block: Explicitly...
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
  	return nsegs;
  }
  
  /*
   * map a request to scatterlist, return number of sg entries setup. Caller
   * must make sure sg can hold rq->nr_phys_segments entries
   */
  int blk_rq_map_sg(struct request_queue *q, struct request *rq,
  		  struct scatterlist *sglist)
  {
  	struct scatterlist *sg = NULL;
  	int nsegs = 0;
  
  	if (rq->bio)
  		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
f18573abc   FUJITA Tomonori   block: move the p...
216
217
  
  	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
2e46e8b27   Tejun Heo   block: drop reque...
218
219
220
  	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
  		unsigned int pad_len =
  			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
f18573abc   FUJITA Tomonori   block: move the p...
221
222
223
224
  
  		sg->length += pad_len;
  		rq->extra_len += pad_len;
  	}
2fb98e841   Tejun Heo   block: implement ...
225
  	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
7b6d91dae   Christoph Hellwig   block: unify flag...
226
  		if (rq->cmd_flags & REQ_WRITE)
db0a2e009   Tejun Heo   block: clear drai...
227
  			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
d6d481969   Jens Axboe   block: ll_rw_blk....
228
229
230
231
232
233
234
  		sg->page_link &= ~0x02;
  		sg = sg_next(sg);
  		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
  			    q->dma_drain_size,
  			    ((unsigned long)q->dma_drain_buffer) &
  			    (PAGE_SIZE - 1));
  		nsegs++;
7a85f8896   FUJITA Tomonori   block: restore th...
235
  		rq->extra_len += q->dma_drain_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
236
237
238
239
240
241
242
  	}
  
  	if (sg)
  		sg_mark_end(sg);
  
  	return nsegs;
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
243
  EXPORT_SYMBOL(blk_rq_map_sg);
85b9f66a4   Asias He   block: Add blk_bi...
244
245
246
247
248
249
250
251
252
253
254
255
256
257
  /**
   * blk_bio_map_sg - map a bio to a scatterlist
   * @q: request_queue in question
   * @bio: bio being mapped
   * @sglist: scatterlist being mapped
   *
   * Note:
   *    Caller must make sure sg can hold bio->bi_phys_segments entries
   *
   * Will return the number of sg entries setup
   */
  int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
  		   struct scatterlist *sglist)
  {
5cb8850c9   Kent Overstreet   block: Explicitly...
258
259
260
261
  	struct scatterlist *sg = NULL;
  	int nsegs;
  	struct bio *next = bio->bi_next;
  	bio->bi_next = NULL;
85b9f66a4   Asias He   block: Add blk_bi...
262

5cb8850c9   Kent Overstreet   block: Explicitly...
263
264
  	nsegs = __blk_bios_map_sg(q, bio, sglist, &sg);
  	bio->bi_next = next;
85b9f66a4   Asias He   block: Add blk_bi...
265
266
267
268
269
270
271
  	if (sg)
  		sg_mark_end(sg);
  
  	BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
  	return nsegs;
  }
  EXPORT_SYMBOL(blk_bio_map_sg);
d6d481969   Jens Axboe   block: ll_rw_blk....
272
273
274
275
  static inline int ll_new_hw_segment(struct request_queue *q,
  				    struct request *req,
  				    struct bio *bio)
  {
d6d481969   Jens Axboe   block: ll_rw_blk....
276
  	int nr_phys_segs = bio_phys_segments(q, bio);
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
277
278
279
280
281
  	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
  		goto no_merge;
  
  	if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio))
  		goto no_merge;
d6d481969   Jens Axboe   block: ll_rw_blk....
282
283
284
285
286
  
  	/*
  	 * This will form the start of a new hw segment.  Bump both
  	 * counters.
  	 */
d6d481969   Jens Axboe   block: ll_rw_blk....
287
288
  	req->nr_phys_segments += nr_phys_segs;
  	return 1;
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
289
290
291
292
293
294
  
  no_merge:
  	req->cmd_flags |= REQ_NOMERGE;
  	if (req == q->last_merge)
  		q->last_merge = NULL;
  	return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
295
296
297
298
299
  }
  
  int ll_back_merge_fn(struct request_queue *q, struct request *req,
  		     struct bio *bio)
  {
f31dc1cd4   Martin K. Petersen   block: Consolidat...
300
301
  	if (blk_rq_sectors(req) + bio_sectors(bio) >
  	    blk_rq_get_max_sectors(req)) {
d6d481969   Jens Axboe   block: ll_rw_blk....
302
303
304
305
306
  		req->cmd_flags |= REQ_NOMERGE;
  		if (req == q->last_merge)
  			q->last_merge = NULL;
  		return 0;
  	}
2cdf79caf   Jens Axboe   block: get rid of...
307
  	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
308
  		blk_recount_segments(q, req->biotail);
2cdf79caf   Jens Axboe   block: get rid of...
309
  	if (!bio_flagged(bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
310
  		blk_recount_segments(q, bio);
d6d481969   Jens Axboe   block: ll_rw_blk....
311
312
313
  
  	return ll_new_hw_segment(q, req, bio);
  }
6728cb0e6   Jens Axboe   block: make core ...
314
  int ll_front_merge_fn(struct request_queue *q, struct request *req,
d6d481969   Jens Axboe   block: ll_rw_blk....
315
316
  		      struct bio *bio)
  {
f31dc1cd4   Martin K. Petersen   block: Consolidat...
317
318
  	if (blk_rq_sectors(req) + bio_sectors(bio) >
  	    blk_rq_get_max_sectors(req)) {
d6d481969   Jens Axboe   block: ll_rw_blk....
319
320
321
322
323
  		req->cmd_flags |= REQ_NOMERGE;
  		if (req == q->last_merge)
  			q->last_merge = NULL;
  		return 0;
  	}
2cdf79caf   Jens Axboe   block: get rid of...
324
  	if (!bio_flagged(bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
325
  		blk_recount_segments(q, bio);
2cdf79caf   Jens Axboe   block: get rid of...
326
  	if (!bio_flagged(req->bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
327
  		blk_recount_segments(q, req->bio);
d6d481969   Jens Axboe   block: ll_rw_blk....
328
329
330
  
  	return ll_new_hw_segment(q, req, bio);
  }
e7e245000   Jens Axboe   blk-mq: don't dis...
331
332
333
334
335
336
337
338
339
340
  /*
   * blk-mq uses req->special to carry normal driver per-request payload, it
   * does not indicate a prepared command that we cannot merge with.
   */
  static bool req_no_special_merge(struct request *req)
  {
  	struct request_queue *q = req->q;
  
  	return !q->mq_ops && req->special;
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
341
342
343
344
  static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
  				struct request *next)
  {
  	int total_phys_segments;
867714271   FUJITA Tomonori   block: fix nr_phy...
345
346
  	unsigned int seg_size =
  		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
347
348
349
350
351
  
  	/*
  	 * First check if the either of the requests are re-queued
  	 * requests.  Can't merge them if they are.
  	 */
e7e245000   Jens Axboe   blk-mq: don't dis...
352
  	if (req_no_special_merge(req) || req_no_special_merge(next))
d6d481969   Jens Axboe   block: ll_rw_blk....
353
354
355
356
357
  		return 0;
  
  	/*
  	 * Will it become too large?
  	 */
f31dc1cd4   Martin K. Petersen   block: Consolidat...
358
359
  	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
  	    blk_rq_get_max_sectors(req))
d6d481969   Jens Axboe   block: ll_rw_blk....
360
361
362
  		return 0;
  
  	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
867714271   FUJITA Tomonori   block: fix nr_phy...
363
364
365
366
367
  	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
  		if (req->nr_phys_segments == 1)
  			req->bio->bi_seg_front_size = seg_size;
  		if (next->nr_phys_segments == 1)
  			next->biotail->bi_seg_back_size = seg_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
368
  		total_phys_segments--;
867714271   FUJITA Tomonori   block: fix nr_phy...
369
  	}
d6d481969   Jens Axboe   block: ll_rw_blk....
370

8a78362c4   Martin K. Petersen   block: Consolidat...
371
  	if (total_phys_segments > queue_max_segments(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
372
  		return 0;
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
373
374
  	if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next))
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
375
376
  	/* Merge is OK... */
  	req->nr_phys_segments = total_phys_segments;
d6d481969   Jens Axboe   block: ll_rw_blk....
377
378
  	return 1;
  }
80a761fd3   Tejun Heo   block: implement ...
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
  /**
   * blk_rq_set_mixed_merge - mark a request as mixed merge
   * @rq: request to mark as mixed merge
   *
   * Description:
   *     @rq is about to be mixed merged.  Make sure the attributes
   *     which can be mixed are set in each bio and mark @rq as mixed
   *     merged.
   */
  void blk_rq_set_mixed_merge(struct request *rq)
  {
  	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
  	struct bio *bio;
  
  	if (rq->cmd_flags & REQ_MIXED_MERGE)
  		return;
  
  	/*
  	 * @rq will no longer represent mixable attributes for all the
  	 * contained bios.  It will just track those of the first one.
  	 * Distributes the attributs to each bio.
  	 */
  	for (bio = rq->bio; bio; bio = bio->bi_next) {
  		WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
  			     (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
  		bio->bi_rw |= ff;
  	}
  	rq->cmd_flags |= REQ_MIXED_MERGE;
  }
26308eab6   Jerome Marchand   block: fix incons...
408
409
410
411
412
413
414
  static void blk_account_io_merge(struct request *req)
  {
  	if (blk_do_io_stat(req)) {
  		struct hd_struct *part;
  		int cpu;
  
  		cpu = part_stat_lock();
09e099d4b   Jerome Marchand   block: fix accoun...
415
  		part = req->part;
26308eab6   Jerome Marchand   block: fix incons...
416
417
  
  		part_round_stats(cpu, part);
316d315bf   Nikanth Karthikesan   block: Seperate r...
418
  		part_dec_in_flight(part, rq_data_dir(req));
26308eab6   Jerome Marchand   block: fix incons...
419

6c23a9681   Jens Axboe   block: add intern...
420
  		hd_struct_put(part);
26308eab6   Jerome Marchand   block: fix incons...
421
422
423
  		part_stat_unlock();
  	}
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
424
425
426
427
428
429
430
431
  /*
   * Has to be called with the request spinlock acquired
   */
  static int attempt_merge(struct request_queue *q, struct request *req,
  			  struct request *next)
  {
  	if (!rq_mergeable(req) || !rq_mergeable(next))
  		return 0;
f31dc1cd4   Martin K. Petersen   block: Consolidat...
432
433
  	if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
434
435
436
  	/*
  	 * not contiguous
  	 */
83096ebf1   Tejun Heo   block: convert to...
437
  	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
d6d481969   Jens Axboe   block: ll_rw_blk....
438
439
440
441
  		return 0;
  
  	if (rq_data_dir(req) != rq_data_dir(next)
  	    || req->rq_disk != next->rq_disk
e7e245000   Jens Axboe   blk-mq: don't dis...
442
  	    || req_no_special_merge(next))
d6d481969   Jens Axboe   block: ll_rw_blk....
443
  		return 0;
4363ac7c1   Martin K. Petersen   block: Implement ...
444
445
446
  	if (req->cmd_flags & REQ_WRITE_SAME &&
  	    !blk_write_same_mergeable(req->bio, next->bio))
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
447
448
449
450
451
452
453
454
455
456
  	/*
  	 * If we are allowed to merge, then append bio list
  	 * from next to rq and release next. merge_requests_fn
  	 * will have updated segment counts, update sector
  	 * counts here.
  	 */
  	if (!ll_merge_requests_fn(q, req, next))
  		return 0;
  
  	/*
80a761fd3   Tejun Heo   block: implement ...
457
458
459
460
461
462
463
464
465
466
467
468
469
  	 * If failfast settings disagree or any of the two is already
  	 * a mixed merge, mark both as mixed before proceeding.  This
  	 * makes sure that all involved bios have mixable attributes
  	 * set properly.
  	 */
  	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
  	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
  	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
  		blk_rq_set_mixed_merge(req);
  		blk_rq_set_mixed_merge(next);
  	}
  
  	/*
d6d481969   Jens Axboe   block: ll_rw_blk....
470
471
472
473
474
475
476
477
478
479
  	 * At this point we have either done a back merge
  	 * or front merge. We need the smaller start_time of
  	 * the merged requests to be the current request
  	 * for accounting purposes.
  	 */
  	if (time_after(req->start_time, next->start_time))
  		req->start_time = next->start_time;
  
  	req->biotail->bi_next = next->bio;
  	req->biotail = next->biotail;
a2dec7b36   Tejun Heo   block: hide reque...
480
  	req->__data_len += blk_rq_bytes(next);
d6d481969   Jens Axboe   block: ll_rw_blk....
481
482
  
  	elv_merge_requests(q, req, next);
42dad7647   Jerome Marchand   block: simplify I...
483
484
485
486
  	/*
  	 * 'next' is going away, so update stats accordingly
  	 */
  	blk_account_io_merge(next);
d6d481969   Jens Axboe   block: ll_rw_blk....
487
488
  
  	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
ab780f1ec   Jens Axboe   block: inherit CP...
489
490
  	if (blk_rq_cpu_valid(next))
  		req->cpu = next->cpu;
d6d481969   Jens Axboe   block: ll_rw_blk....
491

1cd96c242   Boaz Harrosh   block: WARN in __...
492
493
  	/* owner-ship of bio passed from next to req */
  	next->bio = NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
  	__blk_put_request(q, next);
  	return 1;
  }
  
  int attempt_back_merge(struct request_queue *q, struct request *rq)
  {
  	struct request *next = elv_latter_request(q, rq);
  
  	if (next)
  		return attempt_merge(q, rq, next);
  
  	return 0;
  }
  
  int attempt_front_merge(struct request_queue *q, struct request *rq)
  {
  	struct request *prev = elv_former_request(q, rq);
  
  	if (prev)
  		return attempt_merge(q, prev, rq);
  
  	return 0;
  }
5e84ea3a9   Jens Axboe   block: attempt to...
517
518
519
520
521
522
  
  int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
  			  struct request *next)
  {
  	return attempt_merge(q, rq, next);
  }
050c8ea80   Tejun Heo   block: separate o...
523
524
525
  
  bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
  {
e2a60da74   Martin K. Petersen   block: Clean up s...
526
  	if (!rq_mergeable(rq) || !bio_mergeable(bio))
050c8ea80   Tejun Heo   block: separate o...
527
  		return false;
f31dc1cd4   Martin K. Petersen   block: Consolidat...
528
529
  	if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
  		return false;
050c8ea80   Tejun Heo   block: separate o...
530
531
532
533
534
  	/* different data direction or already started, don't merge */
  	if (bio_data_dir(bio) != rq_data_dir(rq))
  		return false;
  
  	/* must be same device and not a special request */
e7e245000   Jens Axboe   blk-mq: don't dis...
535
  	if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq))
050c8ea80   Tejun Heo   block: separate o...
536
537
538
539
540
  		return false;
  
  	/* only merge integrity protected bio into ditto rq */
  	if (bio_integrity(bio) != blk_integrity_rq(rq))
  		return false;
4363ac7c1   Martin K. Petersen   block: Implement ...
541
542
543
544
  	/* must be using the same buffer */
  	if (rq->cmd_flags & REQ_WRITE_SAME &&
  	    !blk_write_same_mergeable(rq->bio, bio))
  		return false;
050c8ea80   Tejun Heo   block: separate o...
545
546
547
548
549
  	return true;
  }
  
  int blk_try_merge(struct request *rq, struct bio *bio)
  {
4f024f379   Kent Overstreet   block: Abstract o...
550
  	if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
050c8ea80   Tejun Heo   block: separate o...
551
  		return ELEVATOR_BACK_MERGE;
4f024f379   Kent Overstreet   block: Abstract o...
552
  	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
050c8ea80   Tejun Heo   block: separate o...
553
554
555
  		return ELEVATOR_FRONT_MERGE;
  	return ELEVATOR_NO_MERGE;
  }