Commit 336cdb4003200a90f4fc52a4e9ccc2baa570fffb

Authored by Kiyoshi Ueda
Committed by Jens Axboe
1 parent 91525300ba

blk_end_request: add new request completion interface (take 4)

This patch adds 2 new interfaces for request completion:
  o blk_end_request()   : called without queue lock
  o __blk_end_request() : called with queue lock held

blk_end_request takes 'error' as an argument instead of 'uptodate',
which current end_that_request_* take.
The meanings of values are below and the value is used when bio is
completed.
    0 : success
  < 0 : error

Some device drivers call some generic functions below between
end_that_request_{first/chunk} and end_that_request_last().
  o add_disk_randomness()
  o blk_queue_end_tag()
  o blkdev_dequeue_request()
These are called in the blk_end_request interfaces as a part of
generic request completion.
So all device drivers become to call above functions.
To decide whether to call blkdev_dequeue_request(), blk_end_request
uses list_empty(&rq->queuelist) (blk_queued_rq() macro is added for it).
So drivers must re-initialize it using list_init() or so before calling
blk_end_request if drivers use it for its specific purpose.
(Currently, there is no driver which completes request without
 re-initializing the queuelist after used it.  So rq->queuelist
 can be used for the purpose above.)

"Normal" drivers can be converted to use blk_end_request()
in a standard way shown below.

 a) end_that_request_{chunk/first}
    spin_lock_irqsave()
    (add_disk_randomness(), blk_queue_end_tag(), blkdev_dequeue_request())
    end_that_request_last()
    spin_unlock_irqrestore()
    => blk_end_request()

 b) spin_lock_irqsave()
    end_that_request_{chunk/first}
    (add_disk_randomness(), blk_queue_end_tag(), blkdev_dequeue_request())
    end_that_request_last()
    spin_unlock_irqrestore()
    => spin_lock_irqsave()
       __blk_end_request()
       spin_unlock_irqsave()

 c) spin_lock_irqsave()
    (add_disk_randomness(), blk_queue_end_tag(), blkdev_dequeue_request())
    end_that_request_last()
    spin_unlock_irqrestore()
    => blk_end_request()   or   spin_lock_irqsave()
                                __blk_end_request()
                                spin_unlock_irqrestore()

Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>

Showing 2 changed files with 100 additions and 0 deletions Side-by-side Diff

... ... @@ -3791,6 +3791,102 @@
3791 3791 }
3792 3792 EXPORT_SYMBOL(end_request);
3793 3793  
  3794 +static void complete_request(struct request *rq, int error)
  3795 +{
  3796 + /*
  3797 + * REMOVEME: This conversion is transitional and will be removed
  3798 + * when old end_that_request_* are unexported.
  3799 + */
  3800 + int uptodate = 1;
  3801 + if (error)
  3802 + uptodate = (error == -EIO) ? 0 : error;
  3803 +
  3804 + if (blk_rq_tagged(rq))
  3805 + blk_queue_end_tag(rq->q, rq);
  3806 +
  3807 + if (blk_queued_rq(rq))
  3808 + blkdev_dequeue_request(rq);
  3809 +
  3810 + end_that_request_last(rq, uptodate);
  3811 +}
  3812 +
  3813 +/**
  3814 + * blk_end_request - Helper function for drivers to complete the request.
  3815 + * @rq: the request being processed
  3816 + * @error: 0 for success, < 0 for error
  3817 + * @nr_bytes: number of bytes to complete
  3818 + *
  3819 + * Description:
  3820 + * Ends I/O on a number of bytes attached to @rq.
  3821 + * If @rq has leftover, sets it up for the next range of segments.
  3822 + *
  3823 + * Return:
  3824 + * 0 - we are done with this request
  3825 + * 1 - still buffers pending for this request
  3826 + **/
  3827 +int blk_end_request(struct request *rq, int error, int nr_bytes)
  3828 +{
  3829 + struct request_queue *q = rq->q;
  3830 + unsigned long flags = 0UL;
  3831 + /*
  3832 + * REMOVEME: This conversion is transitional and will be removed
  3833 + * when old end_that_request_* are unexported.
  3834 + */
  3835 + int uptodate = 1;
  3836 + if (error)
  3837 + uptodate = (error == -EIO) ? 0 : error;
  3838 +
  3839 + if (blk_fs_request(rq) || blk_pc_request(rq)) {
  3840 + if (__end_that_request_first(rq, uptodate, nr_bytes))
  3841 + return 1;
  3842 + }
  3843 +
  3844 + add_disk_randomness(rq->rq_disk);
  3845 +
  3846 + spin_lock_irqsave(q->queue_lock, flags);
  3847 + complete_request(rq, error);
  3848 + spin_unlock_irqrestore(q->queue_lock, flags);
  3849 +
  3850 + return 0;
  3851 +}
  3852 +EXPORT_SYMBOL_GPL(blk_end_request);
  3853 +
  3854 +/**
  3855 + * __blk_end_request - Helper function for drivers to complete the request.
  3856 + * @rq: the request being processed
  3857 + * @error: 0 for success, < 0 for error
  3858 + * @nr_bytes: number of bytes to complete
  3859 + *
  3860 + * Description:
  3861 + * Must be called with queue lock held unlike blk_end_request().
  3862 + *
  3863 + * Return:
  3864 + * 0 - we are done with this request
  3865 + * 1 - still buffers pending for this request
  3866 + **/
  3867 +int __blk_end_request(struct request *rq, int error, int nr_bytes)
  3868 +{
  3869 + /*
  3870 + * REMOVEME: This conversion is transitional and will be removed
  3871 + * when old end_that_request_* are unexported.
  3872 + */
  3873 + int uptodate = 1;
  3874 + if (error)
  3875 + uptodate = (error == -EIO) ? 0 : error;
  3876 +
  3877 + if (blk_fs_request(rq) || blk_pc_request(rq)) {
  3878 + if (__end_that_request_first(rq, uptodate, nr_bytes))
  3879 + return 1;
  3880 + }
  3881 +
  3882 + add_disk_randomness(rq->rq_disk);
  3883 +
  3884 + complete_request(rq, error);
  3885 +
  3886 + return 0;
  3887 +}
  3888 +EXPORT_SYMBOL_GPL(__blk_end_request);
  3889 +
3794 3890 static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
3795 3891 struct bio *bio)
3796 3892 {
include/linux/blkdev.h
... ... @@ -537,6 +537,8 @@
537 537 #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
538 538 #define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
539 539 #define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
  540 +/* rq->queuelist of dequeued request must be list_empty() */
  541 +#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
540 542  
541 543 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
542 544  
... ... @@ -724,6 +726,8 @@
724 726 * for parts of the original function. This prevents
725 727 * code duplication in drivers.
726 728 */
  729 +extern int blk_end_request(struct request *rq, int error, int nr_bytes);
  730 +extern int __blk_end_request(struct request *rq, int error, int nr_bytes);
727 731 extern int end_that_request_first(struct request *, int, int);
728 732 extern int end_that_request_chunk(struct request *, int, int);
729 733 extern void end_that_request_last(struct request *, int);