Commit 158dbda0068e63c7cce7bd47c123bd1dfa5a902c
Committed by
Jens Axboe
1 parent
5efccd17ce
Exists in
master
and in
7 other branches
block: reorganize request fetching functions
Impact: code reorganization elv_next_request() and elv_dequeue_request() are public block layer interface than actual elevator implementation. They mostly deal with how requests interact with block layer and low level drivers at the beginning of rqeuest processing whereas __elv_next_request() is the actual eleveator request fetching interface. Move the two functions to blk-core.c. This prepares for further interface cleanup. Signed-off-by: Tejun Heo <tj@kernel.org>
Showing 3 changed files with 132 additions and 128 deletions Side-by-side Diff
block/blk-core.c
... | ... | @@ -1712,6 +1712,101 @@ |
1712 | 1712 | } |
1713 | 1713 | EXPORT_SYMBOL_GPL(blk_rq_cur_bytes); |
1714 | 1714 | |
1715 | +struct request *elv_next_request(struct request_queue *q) | |
1716 | +{ | |
1717 | + struct request *rq; | |
1718 | + int ret; | |
1719 | + | |
1720 | + while ((rq = __elv_next_request(q)) != NULL) { | |
1721 | + if (!(rq->cmd_flags & REQ_STARTED)) { | |
1722 | + /* | |
1723 | + * This is the first time the device driver | |
1724 | + * sees this request (possibly after | |
1725 | + * requeueing). Notify IO scheduler. | |
1726 | + */ | |
1727 | + if (blk_sorted_rq(rq)) | |
1728 | + elv_activate_rq(q, rq); | |
1729 | + | |
1730 | + /* | |
1731 | + * just mark as started even if we don't start | |
1732 | + * it, a request that has been delayed should | |
1733 | + * not be passed by new incoming requests | |
1734 | + */ | |
1735 | + rq->cmd_flags |= REQ_STARTED; | |
1736 | + trace_block_rq_issue(q, rq); | |
1737 | + } | |
1738 | + | |
1739 | + if (!q->boundary_rq || q->boundary_rq == rq) { | |
1740 | + q->end_sector = rq_end_sector(rq); | |
1741 | + q->boundary_rq = NULL; | |
1742 | + } | |
1743 | + | |
1744 | + if (rq->cmd_flags & REQ_DONTPREP) | |
1745 | + break; | |
1746 | + | |
1747 | + if (q->dma_drain_size && rq->data_len) { | |
1748 | + /* | |
1749 | + * make sure space for the drain appears we | |
1750 | + * know we can do this because max_hw_segments | |
1751 | + * has been adjusted to be one fewer than the | |
1752 | + * device can handle | |
1753 | + */ | |
1754 | + rq->nr_phys_segments++; | |
1755 | + } | |
1756 | + | |
1757 | + if (!q->prep_rq_fn) | |
1758 | + break; | |
1759 | + | |
1760 | + ret = q->prep_rq_fn(q, rq); | |
1761 | + if (ret == BLKPREP_OK) { | |
1762 | + break; | |
1763 | + } else if (ret == BLKPREP_DEFER) { | |
1764 | + /* | |
1765 | + * the request may have been (partially) prepped. | |
1766 | + * we need to keep this request in the front to | |
1767 | + * avoid resource deadlock. REQ_STARTED will | |
1768 | + * prevent other fs requests from passing this one. | |
1769 | + */ | |
1770 | + if (q->dma_drain_size && rq->data_len && | |
1771 | + !(rq->cmd_flags & REQ_DONTPREP)) { | |
1772 | + /* | |
1773 | + * remove the space for the drain we added | |
1774 | + * so that we don't add it again | |
1775 | + */ | |
1776 | + --rq->nr_phys_segments; | |
1777 | + } | |
1778 | + | |
1779 | + rq = NULL; | |
1780 | + break; | |
1781 | + } else if (ret == BLKPREP_KILL) { | |
1782 | + rq->cmd_flags |= REQ_QUIET; | |
1783 | + __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); | |
1784 | + } else { | |
1785 | + printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); | |
1786 | + break; | |
1787 | + } | |
1788 | + } | |
1789 | + | |
1790 | + return rq; | |
1791 | +} | |
1792 | +EXPORT_SYMBOL(elv_next_request); | |
1793 | + | |
1794 | +void elv_dequeue_request(struct request_queue *q, struct request *rq) | |
1795 | +{ | |
1796 | + BUG_ON(list_empty(&rq->queuelist)); | |
1797 | + BUG_ON(ELV_ON_HASH(rq)); | |
1798 | + | |
1799 | + list_del_init(&rq->queuelist); | |
1800 | + | |
1801 | + /* | |
1802 | + * the time frame between a request being removed from the lists | |
1803 | + * and to it is freed is accounted as io that is in progress at | |
1804 | + * the driver side. | |
1805 | + */ | |
1806 | + if (blk_account_rq(rq)) | |
1807 | + q->in_flight++; | |
1808 | +} | |
1809 | + | |
1715 | 1810 | /** |
1716 | 1811 | * __end_that_request_first - end I/O on a request |
1717 | 1812 | * @req: the request being processed |
block/blk.h
... | ... | @@ -43,6 +43,43 @@ |
43 | 43 | clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); |
44 | 44 | } |
45 | 45 | |
46 | +/* | |
47 | + * Internal elevator interface | |
48 | + */ | |
49 | +#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) | |
50 | + | |
51 | +static inline struct request *__elv_next_request(struct request_queue *q) | |
52 | +{ | |
53 | + struct request *rq; | |
54 | + | |
55 | + while (1) { | |
56 | + while (!list_empty(&q->queue_head)) { | |
57 | + rq = list_entry_rq(q->queue_head.next); | |
58 | + if (blk_do_ordered(q, &rq)) | |
59 | + return rq; | |
60 | + } | |
61 | + | |
62 | + if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) | |
63 | + return NULL; | |
64 | + } | |
65 | +} | |
66 | + | |
67 | +static inline void elv_activate_rq(struct request_queue *q, struct request *rq) | |
68 | +{ | |
69 | + struct elevator_queue *e = q->elevator; | |
70 | + | |
71 | + if (e->ops->elevator_activate_req_fn) | |
72 | + e->ops->elevator_activate_req_fn(q, rq); | |
73 | +} | |
74 | + | |
75 | +static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq) | |
76 | +{ | |
77 | + struct elevator_queue *e = q->elevator; | |
78 | + | |
79 | + if (e->ops->elevator_deactivate_req_fn) | |
80 | + e->ops->elevator_deactivate_req_fn(q, rq); | |
81 | +} | |
82 | + | |
46 | 83 | #ifdef CONFIG_FAIL_IO_TIMEOUT |
47 | 84 | int blk_should_fake_timeout(struct request_queue *); |
48 | 85 | ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); |
block/elevator.c
... | ... | @@ -53,7 +53,6 @@ |
53 | 53 | (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) |
54 | 54 | #define ELV_HASH_ENTRIES (1 << elv_hash_shift) |
55 | 55 | #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) |
56 | -#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) | |
57 | 56 | |
58 | 57 | DEFINE_TRACE(block_rq_insert); |
59 | 58 | DEFINE_TRACE(block_rq_issue); |
... | ... | @@ -310,22 +309,6 @@ |
310 | 309 | } |
311 | 310 | EXPORT_SYMBOL(elevator_exit); |
312 | 311 | |
313 | -static void elv_activate_rq(struct request_queue *q, struct request *rq) | |
314 | -{ | |
315 | - struct elevator_queue *e = q->elevator; | |
316 | - | |
317 | - if (e->ops->elevator_activate_req_fn) | |
318 | - e->ops->elevator_activate_req_fn(q, rq); | |
319 | -} | |
320 | - | |
321 | -static void elv_deactivate_rq(struct request_queue *q, struct request *rq) | |
322 | -{ | |
323 | - struct elevator_queue *e = q->elevator; | |
324 | - | |
325 | - if (e->ops->elevator_deactivate_req_fn) | |
326 | - e->ops->elevator_deactivate_req_fn(q, rq); | |
327 | -} | |
328 | - | |
329 | 312 | static inline void __elv_rqhash_del(struct request *rq) |
330 | 313 | { |
331 | 314 | hlist_del_init(&rq->hash); |
... | ... | @@ -757,117 +740,6 @@ |
757 | 740 | spin_unlock_irqrestore(q->queue_lock, flags); |
758 | 741 | } |
759 | 742 | EXPORT_SYMBOL(elv_add_request); |
760 | - | |
761 | -static inline struct request *__elv_next_request(struct request_queue *q) | |
762 | -{ | |
763 | - struct request *rq; | |
764 | - | |
765 | - while (1) { | |
766 | - while (!list_empty(&q->queue_head)) { | |
767 | - rq = list_entry_rq(q->queue_head.next); | |
768 | - if (blk_do_ordered(q, &rq)) | |
769 | - return rq; | |
770 | - } | |
771 | - | |
772 | - if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) | |
773 | - return NULL; | |
774 | - } | |
775 | -} | |
776 | - | |
777 | -struct request *elv_next_request(struct request_queue *q) | |
778 | -{ | |
779 | - struct request *rq; | |
780 | - int ret; | |
781 | - | |
782 | - while ((rq = __elv_next_request(q)) != NULL) { | |
783 | - if (!(rq->cmd_flags & REQ_STARTED)) { | |
784 | - /* | |
785 | - * This is the first time the device driver | |
786 | - * sees this request (possibly after | |
787 | - * requeueing). Notify IO scheduler. | |
788 | - */ | |
789 | - if (blk_sorted_rq(rq)) | |
790 | - elv_activate_rq(q, rq); | |
791 | - | |
792 | - /* | |
793 | - * just mark as started even if we don't start | |
794 | - * it, a request that has been delayed should | |
795 | - * not be passed by new incoming requests | |
796 | - */ | |
797 | - rq->cmd_flags |= REQ_STARTED; | |
798 | - trace_block_rq_issue(q, rq); | |
799 | - } | |
800 | - | |
801 | - if (!q->boundary_rq || q->boundary_rq == rq) { | |
802 | - q->end_sector = rq_end_sector(rq); | |
803 | - q->boundary_rq = NULL; | |
804 | - } | |
805 | - | |
806 | - if (rq->cmd_flags & REQ_DONTPREP) | |
807 | - break; | |
808 | - | |
809 | - if (q->dma_drain_size && rq->data_len) { | |
810 | - /* | |
811 | - * make sure space for the drain appears we | |
812 | - * know we can do this because max_hw_segments | |
813 | - * has been adjusted to be one fewer than the | |
814 | - * device can handle | |
815 | - */ | |
816 | - rq->nr_phys_segments++; | |
817 | - } | |
818 | - | |
819 | - if (!q->prep_rq_fn) | |
820 | - break; | |
821 | - | |
822 | - ret = q->prep_rq_fn(q, rq); | |
823 | - if (ret == BLKPREP_OK) { | |
824 | - break; | |
825 | - } else if (ret == BLKPREP_DEFER) { | |
826 | - /* | |
827 | - * the request may have been (partially) prepped. | |
828 | - * we need to keep this request in the front to | |
829 | - * avoid resource deadlock. REQ_STARTED will | |
830 | - * prevent other fs requests from passing this one. | |
831 | - */ | |
832 | - if (q->dma_drain_size && rq->data_len && | |
833 | - !(rq->cmd_flags & REQ_DONTPREP)) { | |
834 | - /* | |
835 | - * remove the space for the drain we added | |
836 | - * so that we don't add it again | |
837 | - */ | |
838 | - --rq->nr_phys_segments; | |
839 | - } | |
840 | - | |
841 | - rq = NULL; | |
842 | - break; | |
843 | - } else if (ret == BLKPREP_KILL) { | |
844 | - rq->cmd_flags |= REQ_QUIET; | |
845 | - __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); | |
846 | - } else { | |
847 | - printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); | |
848 | - break; | |
849 | - } | |
850 | - } | |
851 | - | |
852 | - return rq; | |
853 | -} | |
854 | -EXPORT_SYMBOL(elv_next_request); | |
855 | - | |
856 | -void elv_dequeue_request(struct request_queue *q, struct request *rq) | |
857 | -{ | |
858 | - BUG_ON(list_empty(&rq->queuelist)); | |
859 | - BUG_ON(ELV_ON_HASH(rq)); | |
860 | - | |
861 | - list_del_init(&rq->queuelist); | |
862 | - | |
863 | - /* | |
864 | - * the time frame between a request being removed from the lists | |
865 | - * and to it is freed is accounted as io that is in progress at | |
866 | - * the driver side. | |
867 | - */ | |
868 | - if (blk_account_rq(rq)) | |
869 | - q->in_flight++; | |
870 | -} | |
871 | 743 | |
872 | 744 | int elv_queue_empty(struct request_queue *q) |
873 | 745 | { |