Blame view

block/blk-merge.c 11.1 KB
d6d481969   Jens Axboe   block: ll_rw_blk....
1
2
3
4
5
6
7
8
9
10
  /*
   * Functions related to segment and merge handling
   */
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/bio.h>
  #include <linux/blkdev.h>
  #include <linux/scatterlist.h>
  
  #include "blk.h"
1e4280791   Jens Axboe   block: reduce sta...
11
  static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
59247eaea   Jens Axboe   block: fix missin...
12
  					     struct bio *bio)
d6d481969   Jens Axboe   block: ll_rw_blk....
13
  {
d6d481969   Jens Axboe   block: ll_rw_blk....
14
  	struct bio_vec *bv, *bvprv = NULL;
1e4280791   Jens Axboe   block: reduce sta...
15
16
  	int cluster, i, high, highprv = 1;
  	unsigned int seg_size, nr_phys_segs;
59247eaea   Jens Axboe   block: fix missin...
17
  	struct bio *fbio, *bbio;
d6d481969   Jens Axboe   block: ll_rw_blk....
18

1e4280791   Jens Axboe   block: reduce sta...
19
20
  	if (!bio)
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
21

1e4280791   Jens Axboe   block: reduce sta...
22
  	fbio = bio;
e692cb668   Martin K. Petersen   block: Deprecate ...
23
  	cluster = blk_queue_cluster(q);
5df97b91b   Mikulas Patocka   drop vmerge accou...
24
  	seg_size = 0;
2c8919dee   Andi Kleen   gcc-4.6: block: f...
25
  	nr_phys_segs = 0;
1e4280791   Jens Axboe   block: reduce sta...
26
27
28
29
30
31
32
  	for_each_bio(bio) {
  		bio_for_each_segment(bv, bio, i) {
  			/*
  			 * the trick here is making sure that a high page is
  			 * never considered part of another segment, since that
  			 * might change with the bounce page.
  			 */
ae03bf639   Martin K. Petersen   block: Use access...
33
  			high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
1e4280791   Jens Axboe   block: reduce sta...
34
  			if (high || highprv)
d6d481969   Jens Axboe   block: ll_rw_blk....
35
  				goto new_segment;
1e4280791   Jens Axboe   block: reduce sta...
36
  			if (cluster) {
ae03bf639   Martin K. Petersen   block: Use access...
37
38
  				if (seg_size + bv->bv_len
  				    > queue_max_segment_size(q))
1e4280791   Jens Axboe   block: reduce sta...
39
40
41
42
43
  					goto new_segment;
  				if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
  					goto new_segment;
  				if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
  					goto new_segment;
d6d481969   Jens Axboe   block: ll_rw_blk....
44

1e4280791   Jens Axboe   block: reduce sta...
45
46
47
48
  				seg_size += bv->bv_len;
  				bvprv = bv;
  				continue;
  			}
d6d481969   Jens Axboe   block: ll_rw_blk....
49
  new_segment:
1e4280791   Jens Axboe   block: reduce sta...
50
51
52
  			if (nr_phys_segs == 1 && seg_size >
  			    fbio->bi_seg_front_size)
  				fbio->bi_seg_front_size = seg_size;
867714271   FUJITA Tomonori   block: fix nr_phy...
53

1e4280791   Jens Axboe   block: reduce sta...
54
55
56
57
58
  			nr_phys_segs++;
  			bvprv = bv;
  			seg_size = bv->bv_len;
  			highprv = high;
  		}
59247eaea   Jens Axboe   block: fix missin...
59
  		bbio = bio;
d6d481969   Jens Axboe   block: ll_rw_blk....
60
  	}
59247eaea   Jens Axboe   block: fix missin...
61
62
63
64
  	if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
  		fbio->bi_seg_front_size = seg_size;
  	if (seg_size > bbio->bi_seg_back_size)
  		bbio->bi_seg_back_size = seg_size;
1e4280791   Jens Axboe   block: reduce sta...
65
66
67
68
69
70
  
  	return nr_phys_segs;
  }
  
  void blk_recalc_rq_segments(struct request *rq)
  {
59247eaea   Jens Axboe   block: fix missin...
71
  	rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
d6d481969   Jens Axboe   block: ll_rw_blk....
72
73
74
75
  }
  
  void blk_recount_segments(struct request_queue *q, struct bio *bio)
  {
d6d481969   Jens Axboe   block: ll_rw_blk....
76
  	struct bio *nxt = bio->bi_next;
1e4280791   Jens Axboe   block: reduce sta...
77

d6d481969   Jens Axboe   block: ll_rw_blk....
78
  	bio->bi_next = NULL;
59247eaea   Jens Axboe   block: fix missin...
79
  	bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
d6d481969   Jens Axboe   block: ll_rw_blk....
80
  	bio->bi_next = nxt;
d6d481969   Jens Axboe   block: ll_rw_blk....
81
82
83
84
85
86
87
  	bio->bi_flags |= (1 << BIO_SEG_VALID);
  }
  EXPORT_SYMBOL(blk_recount_segments);
  
  static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
  				   struct bio *nxt)
  {
e692cb668   Martin K. Petersen   block: Deprecate ...
88
  	if (!blk_queue_cluster(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
89
  		return 0;
867714271   FUJITA Tomonori   block: fix nr_phy...
90
  	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
ae03bf639   Martin K. Petersen   block: Use access...
91
  	    queue_max_segment_size(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
92
  		return 0;
e17fc0a1c   David Woodhouse   Allow elevators t...
93
94
95
96
97
  	if (!bio_has_data(bio))
  		return 1;
  
  	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
98
  	/*
e17fc0a1c   David Woodhouse   Allow elevators t...
99
  	 * bio and nxt are contiguous in memory; check if the queue allows
d6d481969   Jens Axboe   block: ll_rw_blk....
100
101
102
103
104
105
106
  	 * these two to be merged into one
  	 */
  	if (BIO_SEG_BOUNDARY(q, bio, nxt))
  		return 1;
  
  	return 0;
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
107
108
109
110
111
112
113
114
115
116
117
118
119
  /*
   * map a request to scatterlist, return number of sg entries setup. Caller
   * must make sure sg can hold rq->nr_phys_segments entries
   */
  int blk_rq_map_sg(struct request_queue *q, struct request *rq,
  		  struct scatterlist *sglist)
  {
  	struct bio_vec *bvec, *bvprv;
  	struct req_iterator iter;
  	struct scatterlist *sg;
  	int nsegs, cluster;
  
  	nsegs = 0;
e692cb668   Martin K. Petersen   block: Deprecate ...
120
  	cluster = blk_queue_cluster(q);
d6d481969   Jens Axboe   block: ll_rw_blk....
121
122
123
124
125
126
127
128
129
130
  
  	/*
  	 * for each bio in rq
  	 */
  	bvprv = NULL;
  	sg = NULL;
  	rq_for_each_segment(bvec, rq, iter) {
  		int nbytes = bvec->bv_len;
  
  		if (bvprv && cluster) {
ae03bf639   Martin K. Petersen   block: Use access...
131
  			if (sg->length + nbytes > queue_max_segment_size(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
  				goto new_segment;
  
  			if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
  				goto new_segment;
  			if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
  				goto new_segment;
  
  			sg->length += nbytes;
  		} else {
  new_segment:
  			if (!sg)
  				sg = sglist;
  			else {
  				/*
  				 * If the driver previously mapped a shorter
  				 * list, we could see a termination bit
  				 * prematurely unless it fully inits the sg
  				 * table on each mapping. We KNOW that there
  				 * must be more entries here or the driver
  				 * would be buggy, so force clear the
  				 * termination bit to avoid doing a full
  				 * sg_init_table() in drivers for each command.
  				 */
  				sg->page_link &= ~0x02;
  				sg = sg_next(sg);
  			}
  
  			sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
  			nsegs++;
  		}
  		bvprv = bvec;
  	} /* segments in rq */
f18573abc   FUJITA Tomonori   block: move the p...
164
165
  
  	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
2e46e8b27   Tejun Heo   block: drop reque...
166
167
168
  	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
  		unsigned int pad_len =
  			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
f18573abc   FUJITA Tomonori   block: move the p...
169
170
171
172
  
  		sg->length += pad_len;
  		rq->extra_len += pad_len;
  	}
2fb98e841   Tejun Heo   block: implement ...
173
  	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
7b6d91dae   Christoph Hellwig   block: unify flag...
174
  		if (rq->cmd_flags & REQ_WRITE)
db0a2e009   Tejun Heo   block: clear drai...
175
  			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
d6d481969   Jens Axboe   block: ll_rw_blk....
176
177
178
179
180
181
182
  		sg->page_link &= ~0x02;
  		sg = sg_next(sg);
  		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
  			    q->dma_drain_size,
  			    ((unsigned long)q->dma_drain_buffer) &
  			    (PAGE_SIZE - 1));
  		nsegs++;
7a85f8896   FUJITA Tomonori   block: restore th...
183
  		rq->extra_len += q->dma_drain_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
184
185
186
187
188
189
190
  	}
  
  	if (sg)
  		sg_mark_end(sg);
  
  	return nsegs;
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
191
  EXPORT_SYMBOL(blk_rq_map_sg);
d6d481969   Jens Axboe   block: ll_rw_blk....
192
193
194
195
  static inline int ll_new_hw_segment(struct request_queue *q,
  				    struct request *req,
  				    struct bio *bio)
  {
d6d481969   Jens Axboe   block: ll_rw_blk....
196
  	int nr_phys_segs = bio_phys_segments(q, bio);
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
197
198
199
200
201
  	if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
  		goto no_merge;
  
  	if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio))
  		goto no_merge;
d6d481969   Jens Axboe   block: ll_rw_blk....
202
203
204
205
206
  
  	/*
  	 * This will form the start of a new hw segment.  Bump both
  	 * counters.
  	 */
d6d481969   Jens Axboe   block: ll_rw_blk....
207
208
  	req->nr_phys_segments += nr_phys_segs;
  	return 1;
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
209
210
211
212
213
214
  
  no_merge:
  	req->cmd_flags |= REQ_NOMERGE;
  	if (req == q->last_merge)
  		q->last_merge = NULL;
  	return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
215
216
217
218
219
220
  }
  
  int ll_back_merge_fn(struct request_queue *q, struct request *req,
  		     struct bio *bio)
  {
  	unsigned short max_sectors;
d6d481969   Jens Axboe   block: ll_rw_blk....
221

33659ebba   Christoph Hellwig   block: remove wra...
222
  	if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
ae03bf639   Martin K. Petersen   block: Use access...
223
  		max_sectors = queue_max_hw_sectors(q);
d6d481969   Jens Axboe   block: ll_rw_blk....
224
  	else
ae03bf639   Martin K. Petersen   block: Use access...
225
  		max_sectors = queue_max_sectors(q);
d6d481969   Jens Axboe   block: ll_rw_blk....
226

83096ebf1   Tejun Heo   block: convert to...
227
  	if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
d6d481969   Jens Axboe   block: ll_rw_blk....
228
229
230
231
232
  		req->cmd_flags |= REQ_NOMERGE;
  		if (req == q->last_merge)
  			q->last_merge = NULL;
  		return 0;
  	}
2cdf79caf   Jens Axboe   block: get rid of...
233
  	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
234
  		blk_recount_segments(q, req->biotail);
2cdf79caf   Jens Axboe   block: get rid of...
235
  	if (!bio_flagged(bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
236
  		blk_recount_segments(q, bio);
d6d481969   Jens Axboe   block: ll_rw_blk....
237
238
239
  
  	return ll_new_hw_segment(q, req, bio);
  }
6728cb0e6   Jens Axboe   block: make core ...
240
  int ll_front_merge_fn(struct request_queue *q, struct request *req,
d6d481969   Jens Axboe   block: ll_rw_blk....
241
242
243
  		      struct bio *bio)
  {
  	unsigned short max_sectors;
d6d481969   Jens Axboe   block: ll_rw_blk....
244

33659ebba   Christoph Hellwig   block: remove wra...
245
  	if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
ae03bf639   Martin K. Petersen   block: Use access...
246
  		max_sectors = queue_max_hw_sectors(q);
d6d481969   Jens Axboe   block: ll_rw_blk....
247
  	else
ae03bf639   Martin K. Petersen   block: Use access...
248
  		max_sectors = queue_max_sectors(q);
d6d481969   Jens Axboe   block: ll_rw_blk....
249

83096ebf1   Tejun Heo   block: convert to...
250
  	if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
d6d481969   Jens Axboe   block: ll_rw_blk....
251
252
253
254
255
  		req->cmd_flags |= REQ_NOMERGE;
  		if (req == q->last_merge)
  			q->last_merge = NULL;
  		return 0;
  	}
2cdf79caf   Jens Axboe   block: get rid of...
256
  	if (!bio_flagged(bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
257
  		blk_recount_segments(q, bio);
2cdf79caf   Jens Axboe   block: get rid of...
258
  	if (!bio_flagged(req->bio, BIO_SEG_VALID))
d6d481969   Jens Axboe   block: ll_rw_blk....
259
  		blk_recount_segments(q, req->bio);
d6d481969   Jens Axboe   block: ll_rw_blk....
260
261
262
263
264
265
266
267
  
  	return ll_new_hw_segment(q, req, bio);
  }
  
  static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
  				struct request *next)
  {
  	int total_phys_segments;
867714271   FUJITA Tomonori   block: fix nr_phy...
268
269
  	unsigned int seg_size =
  		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
270
271
272
273
274
275
276
277
278
279
280
  
  	/*
  	 * First check if the either of the requests are re-queued
  	 * requests.  Can't merge them if they are.
  	 */
  	if (req->special || next->special)
  		return 0;
  
  	/*
  	 * Will it become too large?
  	 */
ae03bf639   Martin K. Petersen   block: Use access...
281
  	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
282
283
284
  		return 0;
  
  	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
867714271   FUJITA Tomonori   block: fix nr_phy...
285
286
287
288
289
  	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
  		if (req->nr_phys_segments == 1)
  			req->bio->bi_seg_front_size = seg_size;
  		if (next->nr_phys_segments == 1)
  			next->biotail->bi_seg_back_size = seg_size;
d6d481969   Jens Axboe   block: ll_rw_blk....
290
  		total_phys_segments--;
867714271   FUJITA Tomonori   block: fix nr_phy...
291
  	}
d6d481969   Jens Axboe   block: ll_rw_blk....
292

8a78362c4   Martin K. Petersen   block: Consolidat...
293
  	if (total_phys_segments > queue_max_segments(q))
d6d481969   Jens Axboe   block: ll_rw_blk....
294
  		return 0;
13f05c8d8   Martin K. Petersen   block/scsi: Provi...
295
296
  	if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next))
  		return 0;
d6d481969   Jens Axboe   block: ll_rw_blk....
297
298
  	/* Merge is OK... */
  	req->nr_phys_segments = total_phys_segments;
d6d481969   Jens Axboe   block: ll_rw_blk....
299
300
  	return 1;
  }
80a761fd3   Tejun Heo   block: implement ...
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
  /**
   * blk_rq_set_mixed_merge - mark a request as mixed merge
   * @rq: request to mark as mixed merge
   *
   * Description:
   *     @rq is about to be mixed merged.  Make sure the attributes
   *     which can be mixed are set in each bio and mark @rq as mixed
   *     merged.
   */
  void blk_rq_set_mixed_merge(struct request *rq)
  {
  	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
  	struct bio *bio;
  
  	if (rq->cmd_flags & REQ_MIXED_MERGE)
  		return;
  
  	/*
  	 * @rq will no longer represent mixable attributes for all the
  	 * contained bios.  It will just track those of the first one.
  	 * Distributes the attributs to each bio.
  	 */
  	for (bio = rq->bio; bio; bio = bio->bi_next) {
  		WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
  			     (bio->bi_rw & REQ_FAILFAST_MASK) != ff);
  		bio->bi_rw |= ff;
  	}
  	rq->cmd_flags |= REQ_MIXED_MERGE;
  }
26308eab6   Jerome Marchand   block: fix incons...
330
331
332
333
334
335
336
  static void blk_account_io_merge(struct request *req)
  {
  	if (blk_do_io_stat(req)) {
  		struct hd_struct *part;
  		int cpu;
  
  		cpu = part_stat_lock();
09e099d4b   Jerome Marchand   block: fix accoun...
337
  		part = req->part;
26308eab6   Jerome Marchand   block: fix incons...
338
339
  
  		part_round_stats(cpu, part);
316d315bf   Nikanth Karthikesan   block: Seperate r...
340
  		part_dec_in_flight(part, rq_data_dir(req));
26308eab6   Jerome Marchand   block: fix incons...
341

6c23a9681   Jens Axboe   block: add intern...
342
  		hd_struct_put(part);
26308eab6   Jerome Marchand   block: fix incons...
343
344
345
  		part_stat_unlock();
  	}
  }
d6d481969   Jens Axboe   block: ll_rw_blk....
346
347
348
349
350
351
352
353
354
355
  /*
   * Has to be called with the request spinlock acquired
   */
  static int attempt_merge(struct request_queue *q, struct request *req,
  			  struct request *next)
  {
  	if (!rq_mergeable(req) || !rq_mergeable(next))
  		return 0;
  
  	/*
f281fb5fe   Adrian Hunter   block: prevent me...
356
357
358
359
360
361
362
363
364
365
366
367
  	 * Don't merge file system requests and discard requests
  	 */
  	if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
  		return 0;
  
  	/*
  	 * Don't merge discard requests and secure discard requests
  	 */
  	if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
  		return 0;
  
  	/*
d6d481969   Jens Axboe   block: ll_rw_blk....
368
369
  	 * not contiguous
  	 */
83096ebf1   Tejun Heo   block: convert to...
370
  	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
d6d481969   Jens Axboe   block: ll_rw_blk....
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
  		return 0;
  
  	if (rq_data_dir(req) != rq_data_dir(next)
  	    || req->rq_disk != next->rq_disk
  	    || next->special)
  		return 0;
  
  	/*
  	 * If we are allowed to merge, then append bio list
  	 * from next to rq and release next. merge_requests_fn
  	 * will have updated segment counts, update sector
  	 * counts here.
  	 */
  	if (!ll_merge_requests_fn(q, req, next))
  		return 0;
  
  	/*
80a761fd3   Tejun Heo   block: implement ...
388
389
390
391
392
393
394
395
396
397
398
399
400
  	 * If failfast settings disagree or any of the two is already
  	 * a mixed merge, mark both as mixed before proceeding.  This
  	 * makes sure that all involved bios have mixable attributes
  	 * set properly.
  	 */
  	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
  	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
  	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
  		blk_rq_set_mixed_merge(req);
  		blk_rq_set_mixed_merge(next);
  	}
  
  	/*
d6d481969   Jens Axboe   block: ll_rw_blk....
401
402
403
404
405
406
407
408
409
410
  	 * At this point we have either done a back merge
  	 * or front merge. We need the smaller start_time of
  	 * the merged requests to be the current request
  	 * for accounting purposes.
  	 */
  	if (time_after(req->start_time, next->start_time))
  		req->start_time = next->start_time;
  
  	req->biotail->bi_next = next->bio;
  	req->biotail = next->biotail;
a2dec7b36   Tejun Heo   block: hide reque...
411
  	req->__data_len += blk_rq_bytes(next);
d6d481969   Jens Axboe   block: ll_rw_blk....
412
413
  
  	elv_merge_requests(q, req, next);
42dad7647   Jerome Marchand   block: simplify I...
414
415
416
417
  	/*
  	 * 'next' is going away, so update stats accordingly
  	 */
  	blk_account_io_merge(next);
d6d481969   Jens Axboe   block: ll_rw_blk....
418
419
  
  	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
ab780f1ec   Jens Axboe   block: inherit CP...
420
421
  	if (blk_rq_cpu_valid(next))
  		req->cpu = next->cpu;
d6d481969   Jens Axboe   block: ll_rw_blk....
422

1cd96c242   Boaz Harrosh   block: WARN in __...
423
424
  	/* owner-ship of bio passed from next to req */
  	next->bio = NULL;
d6d481969   Jens Axboe   block: ll_rw_blk....
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
  	__blk_put_request(q, next);
  	return 1;
  }
  
  int attempt_back_merge(struct request_queue *q, struct request *rq)
  {
  	struct request *next = elv_latter_request(q, rq);
  
  	if (next)
  		return attempt_merge(q, rq, next);
  
  	return 0;
  }
  
  int attempt_front_merge(struct request_queue *q, struct request *rq)
  {
  	struct request *prev = elv_former_request(q, rq);
  
  	if (prev)
  		return attempt_merge(q, prev, rq);
  
  	return 0;
  }
5e84ea3a9   Jens Axboe   block: attempt to...
448
449
450
451
452
453
  
  int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
  			  struct request *next)
  {
  	return attempt_merge(q, rq, next);
  }