Commit 30a91cb4ef385fe1b260df204ef314d86fff2850

Authored by Christoph Hellwig
Committed by Jens Axboe
1 parent c4540a7d8c

blk-mq: rework I/O completions

Rework I/O completions to work more like the old code path.  blk_mq_end_io
now stays out of the business of deferring completions to others CPUs
and calling blk_mark_rq_complete.  The latter is very important to allow
completing requests that have timed out and thus are already marked completed,
the former allows using the IPI callout even for driver specific completions
instead of having to reimplement them.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>

Showing 4 changed files with 37 additions and 24 deletions Side-by-side Diff

... ... @@ -326,7 +326,7 @@
326 326 bio_endio(bio, error);
327 327 }
328 328  
329   -void blk_mq_complete_request(struct request *rq, int error)
  329 +void blk_mq_end_io(struct request *rq, int error)
330 330 {
331 331 struct bio *bio = rq->bio;
332 332 unsigned int bytes = 0;
333 333  
334 334  
335 335  
336 336  
337 337  
338 338  
339 339  
340 340  
341 341  
... ... @@ -351,47 +351,54 @@
351 351 else
352 352 blk_mq_free_request(rq);
353 353 }
  354 +EXPORT_SYMBOL(blk_mq_end_io);
354 355  
355   -void __blk_mq_end_io(struct request *rq, int error)
  356 +static void __blk_mq_complete_request_remote(void *data)
356 357 {
357   - if (!blk_mark_rq_complete(rq))
358   - blk_mq_complete_request(rq, error);
359   -}
360   -
361   -static void blk_mq_end_io_remote(void *data)
362   -{
363 358 struct request *rq = data;
364 359  
365   - __blk_mq_end_io(rq, rq->errors);
  360 + rq->q->softirq_done_fn(rq);
366 361 }
367 362  
368   -/*
369   - * End IO on this request on a multiqueue enabled driver. We'll either do
370   - * it directly inline, or punt to a local IPI handler on the matching
371   - * remote CPU.
372   - */
373   -void blk_mq_end_io(struct request *rq, int error)
  363 +void __blk_mq_complete_request(struct request *rq)
374 364 {
375 365 struct blk_mq_ctx *ctx = rq->mq_ctx;
376 366 int cpu;
377 367  
378   - if (!ctx->ipi_redirect)
379   - return __blk_mq_end_io(rq, error);
  368 + if (!ctx->ipi_redirect) {
  369 + rq->q->softirq_done_fn(rq);
  370 + return;
  371 + }
380 372  
381 373 cpu = get_cpu();
382 374 if (cpu != ctx->cpu && cpu_online(ctx->cpu)) {
383   - rq->errors = error;
384   - rq->csd.func = blk_mq_end_io_remote;
  375 + rq->csd.func = __blk_mq_complete_request_remote;
385 376 rq->csd.info = rq;
386 377 rq->csd.flags = 0;
387 378 __smp_call_function_single(ctx->cpu, &rq->csd, 0);
388 379 } else {
389   - __blk_mq_end_io(rq, error);
  380 + rq->q->softirq_done_fn(rq);
390 381 }
391 382 put_cpu();
392 383 }
393   -EXPORT_SYMBOL(blk_mq_end_io);
394 384  
  385 +/**
  386 + * blk_mq_complete_request - end I/O on a request
  387 + * @rq: the request being processed
  388 + *
  389 + * Description:
  390 + * Ends all I/O on a request. It does not handle partial completions.
  391 + * The actual completion happens out-of-order, through a IPI handler.
  392 + **/
  393 +void blk_mq_complete_request(struct request *rq)
  394 +{
  395 + if (unlikely(blk_should_fake_timeout(rq->q)))
  396 + return;
  397 + if (!blk_mark_rq_complete(rq))
  398 + __blk_mq_complete_request(rq);
  399 +}
  400 +EXPORT_SYMBOL(blk_mq_complete_request);
  401 +
395 402 static void blk_mq_start_request(struct request *rq)
396 403 {
397 404 struct request_queue *q = rq->q;
... ... @@ -1398,6 +1405,9 @@
1398 1405 blk_queue_rq_timed_out(q, reg->ops->timeout);
1399 1406 if (reg->timeout)
1400 1407 blk_queue_rq_timeout(q, reg->timeout);
  1408 +
  1409 + if (reg->ops->complete)
  1410 + blk_queue_softirq_done(q, reg->ops->complete);
1401 1411  
1402 1412 blk_mq_init_flush(q);
1403 1413 blk_mq_init_cpu_queues(q, reg->nr_hw_queues);
... ... @@ -22,8 +22,7 @@
22 22 struct kobject kobj;
23 23 };
24 24  
25   -void __blk_mq_end_io(struct request *rq, int error);
26   -void blk_mq_complete_request(struct request *rq, int error);
  25 +void __blk_mq_complete_request(struct request *rq);
27 26 void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
28 27 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
29 28 void blk_mq_init_flush(struct request_queue *q);
... ... @@ -91,7 +91,7 @@
91 91 case BLK_EH_HANDLED:
92 92 /* Can we use req->errors here? */
93 93 if (q->mq_ops)
94   - blk_mq_complete_request(req, req->errors);
  94 + __blk_mq_complete_request(req);
95 95 else
96 96 __blk_complete_request(req);
97 97 break;
include/linux/blk-mq.h
... ... @@ -86,6 +86,8 @@
86 86 */
87 87 rq_timed_out_fn *timeout;
88 88  
  89 + softirq_done_fn *complete;
  90 +
89 91 /*
90 92 * Override for hctx allocations (should probably go)
91 93 */
... ... @@ -136,6 +138,8 @@
136 138 void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
137 139  
138 140 void blk_mq_end_io(struct request *rq, int error);
  141 +
  142 +void blk_mq_complete_request(struct request *rq);
139 143  
140 144 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
141 145 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);