Blame view

block/blk-merge.c 12.7 KB
d6d481969   Jens Axboe   block: ll_rw_blk....
1
2
3
4
5
6
7
8
9
10
  /*
   * Functions related to segment and merge handling
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/scatterlist.h>
  
  #include "blk.h"
1e4280791   Jens Axboe   block: reduce sta...
11
  static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
59247eaea   Jens Axboe   block: fix missin...
12
  					     struct bio *bio)
d6d481969   Jens Axboe   block: ll_rw_blk....
13
  {
d6d481969   Jens Axboe   block: ll_rw_blk....
14
  	struct bio_vec *bv, *bvprv = NULL;
1e4280791   Jens Axboe   block: reduce sta...
15
16
  	int cluster, i, high, highprv = 1;
  	unsigned int seg_size, nr_phys_segs;
59247eaea   Jens Axboe   block: fix missin...
17
  	struct bio *fbio, *bbio;
d6d481969   Jens Axboe   block: ll_rw_blk....
18

1e4280791   Jens Axboe   block: reduce sta...
19
20
  	if (!bio)
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
21

1e4280791   Jens Axboe   block: reduce sta...
22
  	fbio = bio;
e692cb668   Martin K. Petersen   block: Deprecate ...
23
  	cluster = blk_queue_cluster(q);
5df97b91b   Mikulas Patocka   drop vmerge accou...
24
  	seg_size = 0;
2c8919dee   Andi Kleen   gcc-4.6: block: f...
25
  	nr_phys_segs = 0;
1e4280791   Jens Axboe   block: reduce sta...
26
27
28
29
30
31
32
  	for_each_bio(bio) {
  		bio_for_each_segment(bv, bio, i) {
  			/*
  			 * the trick here is making sure that a high page is
  			 * never considered part of another segment, since that
  			 * might change with the bounce page.
  			 */
ae03bf639   Martin K. Petersen   block: Use access...
33
  			high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
1e4280791   Jens Axboe   block: reduce sta...
34
  			if (high || highprv)
d6d481969   Jens Axboe   block: ll_rw_blk....
35
  				goto new_segment;
1e4280791   Jens Axboe   block: reduce sta...
36
  			if (cluster) {
ae03bf639   Martin K. Petersen   block: Use access...
37
38
  				if (seg_size + bv->bv_len
  				    > queue_max_segment_size(q))
1e4280791   Jens Axboe   block: reduce sta...
39
40
41
42
43
  					goto new_segment;
  				if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
  					goto new_segment;
  				if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
  					goto new_segment;
d6d481969   Jens Axboe   block: ll_rw_blk....
44

1e4280791   Jens Axboe   block: reduce sta...
45
46
47
48
  				seg_size += bv->bv_len;
  				bvprv = bv;
  				continue;
  			}
d6d481969   Jens Axboe   block: ll_rw_blk....
49
  new_segment:
1e4280791   Jens Axboe   block: reduce sta...
50
51
52
  			if (nr_phys_segs == 1 && seg_size >
  			    fbio->bi_seg_front_size)
  				fbio->bi_seg_front_size = seg_size;
867714271   FUJITA Tomonori   block: fix nr_phy...
53

1e4280791   Jens Axboe   block: reduce sta...
54
55
56
57
58
  			nr_phys_segs++;
  			bvprv = bv;
  			seg_size = bv->bv_len;
  			highprv = high;
  		}
59247eaea   Jens Axboe   block: fix missin...
59
  		bbio = bio;
d6d481969   Jens Axboe   block: ll_rw_blk....
60
  	}
59247eaea   Jens Axboe   block: fix missin...
61
62
63
64
  	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
  		fbio->bi_seg_front_size = seg_size;
  	if (seg_size > bbio->bi_seg_back_size)
  		bbio->bi_seg_back_size = seg_size;
1e4280791   Jens Axboe   block: reduce sta...
65
66
67
68
69
70
  
  	return nr_phys_segs;
  }
  
  void blk_recalc_rq_segments(struct request *rq)
  {
59247eaea   Jens Axboe   block: fix missin...
71
  	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
d6d481969   Jens Axboe   block: ll_rw_blk....
72
73
74
75
  }
  
  void blk_recount_segments(struct request_queue *q, struct bio *bio)
  {
d6d481969   Jens Axboe   block: ll_rw_blk....
76
  	struct bio *nxt = bio->bi_next;
1e4280791   Jens Axboe   block: reduce sta...
77

d6d481969   Jens Axboe   block: ll_rw_blk....
78
  	bio->bi_next = NULL;
59247eaea   Jens Axboe   block: fix missin...
79
  	bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
d6d481969   Jens Axboe   block: ll_rw_blk....
80
  	bio->bi_next = nxt;
d6d481969   Jens Axboe   block: ll_rw_blk....
81
82
83
84
85
86
87
  	bio->bi_flags |= (1 << BIO_SEG_VALID);
  }
  EXPORT_SYMBOL(blk_recount_segments);
  
  static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
  				   struct bio *nxt)
  {
e692cb668   Martin K. Petersen   block: Deprecate ...
88
  	if (!blk_queue_cluster(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
89
  		return 0;
867714271   FUJITA Tomonori   block: fix nr_phy...
90
  	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
ae03bf639   Martin K. Petersen   block: Use access...
91
  	    queue_max_segment_size(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
92
  		return 0;
e17fc0a1c   David Woodhouse   Allow elevators t...
93
94
95
96
97
  	if (!bio_has_data(bio))
  		return 1;
  
  	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
98
  	/*
e17fc0a1c   David Woodhouse   Allow elevators t...
99
  	 * bio and nxt are contiguous in memory; check if the queue allows
d6d481969   Jens Axboe   block: ll_rw_blk....
100
101
102
103
104
105
106
  	 * these two to be merged into one
  	 */
  	if (BIO_SEG_BOUNDARY(q, bio, nxt))
  		return 1;
  
  	return 0;
  }
963ab9e5d   Asias He   block: Introduce ...
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
  static void
  __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
  		     struct scatterlist *sglist, struct bio_vec **bvprv,
  		     struct scatterlist **sg, int *nsegs, int *cluster)
  {
  
  	int nbytes = bvec->bv_len;
  
  	if (*bvprv && *cluster) {
  		if ((*sg)->length + nbytes > queue_max_segment_size(q))
  			goto new_segment;
  
  		if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
  			goto new_segment;
  		if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
  			goto new_segment;
  
  		(*sg)->length += nbytes;
  	} else {
  new_segment:
  		if (!*sg)
  			*sg = sglist;
  		else {
  			/*
  			 * If the driver previously mapped a shorter
  			 * list, we could see a termination bit
  			 * prematurely unless it fully inits the sg
  			 * table on each mapping. We KNOW that there
  			 * must be more entries here or the driver
  			 * would be buggy, so force clear the
  			 * termination bit to avoid doing a full
  			 * sg_init_table() in drivers for each command.
  			 */
  			(*sg)->page_link &= ~0x02;
  			*sg = sg_next(*sg);
  		}
  
  		sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
  		(*nsegs)++;
  	}
  	*bvprv = bvec;
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
149
150
151
152
153
154
155
156
157
158
159
160
161
  /*
   * map a request to scatterlist, return number of sg entries setup. Caller
   * must make sure sg can hold rq->nr_phys_segments entries
   */
  int blk_rq_map_sg(struct request_queue *q, struct request *rq,
  		  struct scatterlist *sglist)
  {
  	struct bio_vec *bvec, *bvprv;
  	struct req_iterator iter;
  	struct scatterlist *sg;
  	int nsegs, cluster;
  
  	nsegs = 0;
e692cb668   Martin K. Petersen   block: Deprecate ...
162
  	cluster = blk_queue_cluster(q);
d6d481969   Jens Axboe   block: ll_rw_blk....
163
164
165
166
167
168
169
  
  	/*
  	 * for each bio in rq
  	 */
  	bvprv = NULL;
  	sg = NULL;
  	rq_for_each_segment(bvec, rq, iter) {
963ab9e5d   Asias He   block: Introduce ...
170
171
  		__blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
  				     &nsegs, &cluster);
d6d481969   Jens Axboe   block: ll_rw_blk....
172
  	} /* segments in rq */
f18573abc   FUJITA Tomonori   block: move the p...
173
174
  
  	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
2e46e8b27   Tejun Heo   block: drop reque...
175
176
177
  	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
  		unsigned int pad_len =
  			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
f18573abc   FUJITA Tomonori   block: move the p...
178
179
180
181
  
  		sg->length += pad_len;
  		rq->extra_len += pad_len;
  	}
2fb98e841   Tejun Heo   block: implement ...
182
  	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
7b6d91dae   Christoph Hellwig   block: unify flag...
183
  		if (rq->cmd_flags & REQ_WRITE)
db0a2e009   Tejun Heo   block: clear drai...
184
  			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
d6d481969   Jens Axboe   block: ll_rw_blk....
185
186
187
188
189
190
191
  		sg->page_link &= ~0x02;
  		sg = sg_next(sg);
  		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
  			    q->dma_drain_size,
  			    ((unsigned long)q->dma_drain_buffer) &
  			    (PAGE_SIZE - 1));
  		nsegs++;
7a85f8896   FUJITA Tomonori   block: restore th...
192
  		rq->extra_len += q->dma_drain_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
193
194
195
196
197
198
199
  	}
  
  	if (sg)
  		sg_mark_end(sg);
  
  	return nsegs;
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
200
  EXPORT_SYMBOL(blk_rq_map_sg);
85b9f66a4   Asias He   block: Add blk_bi...
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
  /**
   * blk_bio_map_sg - map a bio to a scatterlist
   * @q: request_queue in question
   * @bio: bio being mapped
   * @sglist: scatterlist being mapped
   *
   * Note:
   *    Caller must make sure sg can hold bio->bi_phys_segments entries
   *
   * Will return the number of sg entries setup
   */
  int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
  		   struct scatterlist *sglist)
  {
  	struct bio_vec *bvec, *bvprv;
  	struct scatterlist *sg;
  	int nsegs, cluster;
  	unsigned long i;
  
  	nsegs = 0;
  	cluster = blk_queue_cluster(q);
  
  	bvprv = NULL;
  	sg = NULL;
  	bio_for_each_segment(bvec, bio, i) {
  		__blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg,
  				     &nsegs, &cluster);
  	} /* segments in bio */
  
  	if (sg)
  		sg_mark_end(sg);
  
  	BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments);
  	return nsegs;
  }
  EXPORT_SYMBOL(blk_bio_map_sg);
d6d481969   Jens Axboe   block: ll_rw_blk....
237
238
239
240
  static inline int ll_new_hw_segment(struct request_queue *q,
  				    struct request *req,
  				    struct bio *bio)
  {
d6d481969   Jens Axboe   block: ll_rw_blk....
241
  	int nr_phys_segs = bio_phys_segments(q, bio);
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
242
243
244
245
246
  	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
  		goto no_merge;
  
  	if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio))
  		goto no_merge;
d6d481969   Jens Axboe   block: ll_rw_blk....
247
248
249
250
251
  
  	/*
  	 * This will form the start of a new hw segment.  Bump both
  	 * counters.
  	 */
d6d481969   Jens Axboe   block: ll_rw_blk....
252
253
  	req->nr_phys_segments += nr_phys_segs;
  	return 1;
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
254
255
256
257
258
259
  
  no_merge:
  	req->cmd_flags |= REQ_NOMERGE;
  	if (req == q->last_merge)
  		q->last_merge = NULL;
  	return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
260
261
262
263
264
  }
  
  int ll_back_merge_fn(struct request_queue *q, struct request *req,
  		     struct bio *bio)
  {
f31dc1cd4   Martin K. Petersen   block: Consolidat...
265
266
  	if (blk_rq_sectors(req) + bio_sectors(bio) >
  	    blk_rq_get_max_sectors(req)) {
d6d481969   Jens Axboe   block: ll_rw_blk....
267
268
269
270
271
  		req->cmd_flags |= REQ_NOMERGE;
  		if (req == q->last_merge)
  			q->last_merge = NULL;
  		return 0;
  	}
2cdf79caf   Jens Axboe   block: get rid of...
272
  	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
273
  		blk_recount_segments(q, req->biotail);
2cdf79caf   Jens Axboe   block: get rid of...
274
  	if (!bio_flagged(bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
275
  		blk_recount_segments(q, bio);
d6d481969   Jens Axboe   block: ll_rw_blk....
276
277
278
  
  	return ll_new_hw_segment(q, req, bio);
  }
6728cb0e6   Jens Axboe   block: make core ...
279
  int ll_front_merge_fn(struct request_queue *q, struct request *req,
d6d481969   Jens Axboe   block: ll_rw_blk....
280
281
  		      struct bio *bio)
  {
f31dc1cd4   Martin K. Petersen   block: Consolidat...
282
283
  	if (blk_rq_sectors(req) + bio_sectors(bio) >
  	    blk_rq_get_max_sectors(req)) {
d6d481969   Jens Axboe   block: ll_rw_blk....
284
285
286
287
288
  		req->cmd_flags |= REQ_NOMERGE;
  		if (req == q->last_merge)
  			q->last_merge = NULL;
  		return 0;
  	}
2cdf79caf   Jens Axboe   block: get rid of...
289
  	if (!bio_flagged(bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
290
  		blk_recount_segments(q, bio);
2cdf79caf   Jens Axboe   block: get rid of...
291
  	if (!bio_flagged(req->bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
292
  		blk_recount_segments(q, req->bio);
d6d481969   Jens Axboe   block: ll_rw_blk....
293
294
295
296
297
298
299
300
  
  	return ll_new_hw_segment(q, req, bio);
  }
  
  static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
  				struct request *next)
  {
  	int total_phys_segments;
867714271   FUJITA Tomonori   block: fix nr_phy...
301
302
  	unsigned int seg_size =
  		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
303
304
305
306
307
308
309
310
311
312
313
  
  	/*
  	 * First check if the either of the requests are re-queued
  	 * requests.  Can't merge them if they are.
  	 */
  	if (req->special || next->special)
  		return 0;
  
  	/*
  	 * Will it become too large?
  	 */
f31dc1cd4   Martin K. Petersen   block: Consolidat...
314
315
  	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
  	    blk_rq_get_max_sectors(req))
d6d481969   Jens Axboe   block: ll_rw_blk....
316
317
318
  		return 0;
  
  	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
867714271   FUJITA Tomonori   block: fix nr_phy...
319
320
321
322
323
  	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
  		if (req->nr_phys_segments == 1)
  			req->bio->bi_seg_front_size = seg_size;
  		if (next->nr_phys_segments == 1)
  			next->biotail->bi_seg_back_size = seg_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
324
  		total_phys_segments--;
867714271   FUJITA Tomonori   block: fix nr_phy...
325
  	}
d6d481969   Jens Axboe   block: ll_rw_blk....
326

8a78362c4   Martin K. Petersen   block: Consolidat...
327
  	if (total_phys_segments > queue_max_segments(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
328
  		return 0;
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
329
330
  	if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next))
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
331
332
  	/* Merge is OK... */
  	req->nr_phys_segments = total_phys_segments;
d6d481969   Jens Axboe   block: ll_rw_blk....
333
334
  	return 1;
  }
80a761fd3   Tejun Heo   block: implement ...
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
  /**
   * blk_rq_set_mixed_merge - mark a request as mixed merge
   * @rq: request to mark as mixed merge
   *
   * Description:
   *     @rq is about to be mixed merged.  Make sure the attributes
   *     which can be mixed are set in each bio and mark @rq as mixed
   *     merged.
   */
  void blk_rq_set_mixed_merge(struct request *rq)
  {
  	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
  	struct bio *bio;
  
  	if (rq->cmd_flags & REQ_MIXED_MERGE)
  		return;
  
  	/*
  	 * @rq will no longer represent mixable attributes for all the
  	 * contained bios.  It will just track those of the first one.
  	 * Distributes the attributs to each bio.
  	 */
  	for (bio = rq->bio; bio; bio = bio->bi_next) {
  		WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
  			     (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
  		bio->bi_rw |= ff;
  	}
  	rq->cmd_flags |= REQ_MIXED_MERGE;
  }
26308eab6   Jerome Marchand   block: fix incons...
364
365
366
367
368
369
370
  static void blk_account_io_merge(struct request *req)
  {
  	if (blk_do_io_stat(req)) {
  		struct hd_struct *part;
  		int cpu;
  
  		cpu = part_stat_lock();
09e099d4b   Jerome Marchand   block: fix accoun...
371
  		part = req->part;
26308eab6   Jerome Marchand   block: fix incons...
372
373
  
  		part_round_stats(cpu, part);
316d315bf   Nikanth Karthikesan   block: Seperate r...
374
  		part_dec_in_flight(part, rq_data_dir(req));
26308eab6   Jerome Marchand   block: fix incons...
375

6c23a9681   Jens Axboe   block: add intern...
376
  		hd_struct_put(part);
26308eab6   Jerome Marchand   block: fix incons...
377
378
379
  		part_stat_unlock();
  	}
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
380
381
382
383
384
385
386
387
  /*
   * Has to be called with the request spinlock acquired
   */
  static int attempt_merge(struct request_queue *q, struct request *req,
  			  struct request *next)
  {
  	if (!rq_mergeable(req) || !rq_mergeable(next))
  		return 0;
f31dc1cd4   Martin K. Petersen   block: Consolidat...
388
389
  	if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
390
391
392
  	/*
  	 * not contiguous
  	 */
83096ebf1   Tejun Heo   block: convert to...
393
  	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
d6d481969   Jens Axboe   block: ll_rw_blk....
394
395
396
397
398
399
  		return 0;
  
  	if (rq_data_dir(req) != rq_data_dir(next)
  	    || req->rq_disk != next->rq_disk
  	    || next->special)
  		return 0;
4363ac7c1   Martin K. Petersen   block: Implement ...
400
401
402
  	if (req->cmd_flags & REQ_WRITE_SAME &&
  	    !blk_write_same_mergeable(req->bio, next->bio))
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
403
404
405
406
407
408
409
410
411
412
  	/*
  	 * If we are allowed to merge, then append bio list
  	 * from next to rq and release next. merge_requests_fn
  	 * will have updated segment counts, update sector
  	 * counts here.
  	 */
  	if (!ll_merge_requests_fn(q, req, next))
  		return 0;
  
  	/*
80a761fd3   Tejun Heo   block: implement ...
413
414
415
416
417
418
419
420
421
422
423
424
425
  	 * If failfast settings disagree or any of the two is already
  	 * a mixed merge, mark both as mixed before proceeding.  This
  	 * makes sure that all involved bios have mixable attributes
  	 * set properly.
  	 */
  	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
  	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
  	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
  		blk_rq_set_mixed_merge(req);
  		blk_rq_set_mixed_merge(next);
  	}
  
  	/*
d6d481969   Jens Axboe   block: ll_rw_blk....
426
427
428
429
430
431
432
433
434
435
  	 * At this point we have either done a back merge
  	 * or front merge. We need the smaller start_time of
  	 * the merged requests to be the current request
  	 * for accounting purposes.
  	 */
  	if (time_after(req->start_time, next->start_time))
  		req->start_time = next->start_time;
  
  	req->biotail->bi_next = next->bio;
  	req->biotail = next->biotail;
a2dec7b36   Tejun Heo   block: hide reque...
436
  	req->__data_len += blk_rq_bytes(next);
d6d481969   Jens Axboe   block: ll_rw_blk....
437
438
  
  	elv_merge_requests(q, req, next);
42dad7647   Jerome Marchand   block: simplify I...
439
440
441
442
  	/*
  	 * 'next' is going away, so update stats accordingly
  	 */
  	blk_account_io_merge(next);
d6d481969   Jens Axboe   block: ll_rw_blk....
443
444
  
  	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
ab780f1ec   Jens Axboe   block: inherit CP...
445
446
  	if (blk_rq_cpu_valid(next))
  		req->cpu = next->cpu;
d6d481969   Jens Axboe   block: ll_rw_blk....
447

1cd96c242   Boaz Harrosh   block: WARN in __...
448
449
  	/* owner-ship of bio passed from next to req */
  	next->bio = NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
  	__blk_put_request(q, next);
  	return 1;
  }
  
  int attempt_back_merge(struct request_queue *q, struct request *rq)
  {
  	struct request *next = elv_latter_request(q, rq);
  
  	if (next)
  		return attempt_merge(q, rq, next);
  
  	return 0;
  }
  
  int attempt_front_merge(struct request_queue *q, struct request *rq)
  {
  	struct request *prev = elv_former_request(q, rq);
  
  	if (prev)
  		return attempt_merge(q, prev, rq);
  
  	return 0;
  }
5e84ea3a9   Jens Axboe   block: attempt to...
473
474
475
476
477
478
  
  int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
  			  struct request *next)
  {
  	return attempt_merge(q, rq, next);
  }
050c8ea80   Tejun Heo   block: separate o...
479
480
481
  
  bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
  {
e2a60da74   Martin K. Petersen   block: Clean up s...
482
  	if (!rq_mergeable(rq) || !bio_mergeable(bio))
050c8ea80   Tejun Heo   block: separate o...
483
  		return false;
f31dc1cd4   Martin K. Petersen   block: Consolidat...
484
485
  	if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
  		return false;
050c8ea80   Tejun Heo   block: separate o...
486
487
488
489
490
491
492
493
494
495
496
  	/* different data direction or already started, don't merge */
  	if (bio_data_dir(bio) != rq_data_dir(rq))
  		return false;
  
  	/* must be same device and not a special request */
  	if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
  		return false;
  
  	/* only merge integrity protected bio into ditto rq */
  	if (bio_integrity(bio) != blk_integrity_rq(rq))
  		return false;
4363ac7c1   Martin K. Petersen   block: Implement ...
497
498
499
500
  	/* must be using the same buffer */
  	if (rq->cmd_flags & REQ_WRITE_SAME &&
  	    !blk_write_same_mergeable(rq->bio, bio))
  		return false;
050c8ea80   Tejun Heo   block: separate o...
501
502
503
504
505
506
507
508
509
510
511
  	return true;
  }
  
  int blk_try_merge(struct request *rq, struct bio *bio)
  {
  	if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
  		return ELEVATOR_BACK_MERGE;
  	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
  		return ELEVATOR_FRONT_MERGE;
  	return ELEVATOR_NO_MERGE;
  }