Commit f355265571440a7db16e784b6edf4e7d26971a03

Authored by Ming Lei
Committed by Jens Axboe
1 parent 1bcb1eada4

block: introduce blk_init_flush and its pair

These two temporary functions are introduced for holding flush
initialization and de-initialization, so that we can
introduce 'flush queue' easier in the following patch. And
once 'flush queue' and its allocation/free functions are ready,
they will be removed for sake of code readability.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>

Showing 6 changed files with 26 additions and 8 deletions Side-by-side Diff

... ... @@ -705,8 +705,7 @@
705 705 if (!q)
706 706 return NULL;
707 707  
708   - q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
709   - if (!q->flush_rq)
  708 + if (blk_init_flush(q))
710 709 return NULL;
711 710  
712 711 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
... ... @@ -742,7 +741,7 @@
742 741 return q;
743 742  
744 743 fail:
745   - kfree(q->flush_rq);
  744 + blk_exit_flush(q);
746 745 return NULL;
747 746 }
748 747 EXPORT_SYMBOL(blk_init_allocated_queue);
... ... @@ -472,7 +472,7 @@
472 472 }
473 473 EXPORT_SYMBOL(blkdev_issue_flush);
474 474  
475   -int blk_mq_init_flush(struct request_queue *q)
  475 +static int blk_mq_init_flush(struct request_queue *q)
476 476 {
477 477 struct blk_mq_tag_set *set = q->tag_set;
478 478  
... ... @@ -484,5 +484,22 @@
484 484 if (!q->flush_rq)
485 485 return -ENOMEM;
486 486 return 0;
  487 +}
  488 +
  489 +int blk_init_flush(struct request_queue *q)
  490 +{
  491 + if (q->mq_ops)
  492 + return blk_mq_init_flush(q);
  493 +
  494 + q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
  495 + if (!q->flush_rq)
  496 + return -ENOMEM;
  497 +
  498 + return 0;
  499 +}
  500 +
  501 +void blk_exit_flush(struct request_queue *q)
  502 +{
  503 + kfree(q->flush_rq);
487 504 }
... ... @@ -1859,7 +1859,7 @@
1859 1859  
1860 1860 blk_mq_add_queue_tag_set(set, q);
1861 1861  
1862   - if (blk_mq_init_flush(q))
  1862 + if (blk_init_flush(q))
1863 1863 goto err_hw_queues;
1864 1864  
1865 1865 blk_mq_map_swqueue(q);
... ... @@ -27,7 +27,6 @@
27 27  
28 28 void __blk_mq_complete_request(struct request *rq);
29 29 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
30   -int blk_mq_init_flush(struct request_queue *q);
31 30 void blk_mq_freeze_queue(struct request_queue *q);
32 31 void blk_mq_free_queue(struct request_queue *q);
33 32 void blk_mq_clone_flush_request(struct request *flush_rq,
... ... @@ -517,10 +517,10 @@
517 517 if (q->queue_tags)
518 518 __blk_queue_free_tags(q);
519 519  
  520 + blk_exit_flush(q);
  521 +
520 522 if (q->mq_ops)
521 523 blk_mq_free_queue(q);
522   -
523   - kfree(q->flush_rq);
524 524  
525 525 blk_trace_shutdown(q);
526 526  
... ... @@ -22,6 +22,9 @@
22 22 kobject_get(&q->kobj);
23 23 }
24 24  
  25 +int blk_init_flush(struct request_queue *q);
  26 +void blk_exit_flush(struct request_queue *q);
  27 +
25 28 int blk_init_rl(struct request_list *rl, struct request_queue *q,
26 29 gfp_t gfp_mask);
27 30 void blk_exit_rl(struct request_list *rl);