Commit 2fb98e8414c42cb14698833aac640b143b9ade4f

Authored by Tejun Heo
Committed by Jens Axboe
1 parent 6b00769fe1

block: implement request_queue->dma_drain_needed

Draining shouldn't be done for commands where overflow may indicate
data integrity issues.  Add dma_drain_needed callback to
request_queue.  Drain buffer is appened iff this function returns
non-zero.

Signed-off-by: Tejun Heo <htejun@gmail.com>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>

Showing 3 changed files with 11 additions and 5 deletions Side-by-side Diff

... ... @@ -220,7 +220,7 @@
220 220 bvprv = bvec;
221 221 } /* segments in rq */
222 222  
223   - if (q->dma_drain_size) {
  223 + if (q->dma_drain_size && q->dma_drain_needed(rq)) {
224 224 sg->page_link &= ~0x02;
225 225 sg = sg_next(sg);
226 226 sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
block/blk-settings.c
... ... @@ -296,6 +296,7 @@
296 296 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
297 297 *
298 298 * @q: the request queue for the device
  299 + * @dma_drain_needed: fn which returns non-zero if drain is necessary
299 300 * @buf: physically contiguous buffer
300 301 * @size: size of the buffer in bytes
301 302 *
302 303  
... ... @@ -315,14 +316,16 @@
315 316 * device can support otherwise there won't be room for the drain
316 317 * buffer.
317 318 */
318   -int blk_queue_dma_drain(struct request_queue *q, void *buf,
319   - unsigned int size)
  319 +extern int blk_queue_dma_drain(struct request_queue *q,
  320 + dma_drain_needed_fn *dma_drain_needed,
  321 + void *buf, unsigned int size)
320 322 {
321 323 if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
322 324 return -EINVAL;
323 325 /* make room for appending the drain */
324 326 --q->max_hw_segments;
325 327 --q->max_phys_segments;
  328 + q->dma_drain_needed = dma_drain_needed;
326 329 q->dma_drain_buffer = buf;
327 330 q->dma_drain_size = size;
328 331  
include/linux/blkdev.h
... ... @@ -259,6 +259,7 @@
259 259 typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
260 260 typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
261 261 typedef void (softirq_done_fn)(struct request *);
  262 +typedef int (dma_drain_needed_fn)(struct request *);
262 263  
263 264 enum blk_queue_state {
264 265 Queue_down,
... ... @@ -295,6 +296,7 @@
295 296 merge_bvec_fn *merge_bvec_fn;
296 297 prepare_flush_fn *prepare_flush_fn;
297 298 softirq_done_fn *softirq_done_fn;
  299 + dma_drain_needed_fn *dma_drain_needed;
298 300  
299 301 /*
300 302 * Dispatch queue sorting
... ... @@ -699,8 +701,9 @@
699 701 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
700 702 extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
701 703 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
702   -extern int blk_queue_dma_drain(struct request_queue *q, void *buf,
703   - unsigned int size);
  704 +extern int blk_queue_dma_drain(struct request_queue *q,
  705 + dma_drain_needed_fn *dma_drain_needed,
  706 + void *buf, unsigned int size);
704 707 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
705 708 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
706 709 extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);