Commit a7f557923441186a3cdbabc54f1bcacf42b63bf5
Committed by
Jens Axboe
1 parent
a538cd03be
Exists in
master
and in
7 other branches
block: kill blk_start_queueing()
blk_start_queueing() is identical to __blk_run_queue() except that it doesn't check for recursion. None of the current users depends on blk_start_queueing() running request_fn directly. Replace usages of blk_start_queueing() with [__]blk_run_queue() and kill it. [ Impact: removal of mostly duplicate interface function ] Signed-off-by: Tejun Heo <tj@kernel.org>
Showing 5 changed files with 9 additions and 39 deletions Side-by-side Diff
block/as-iosched.c
... | ... | @@ -1312,12 +1312,8 @@ |
1312 | 1312 | static void as_work_handler(struct work_struct *work) |
1313 | 1313 | { |
1314 | 1314 | struct as_data *ad = container_of(work, struct as_data, antic_work); |
1315 | - struct request_queue *q = ad->q; | |
1316 | - unsigned long flags; | |
1317 | 1315 | |
1318 | - spin_lock_irqsave(q->queue_lock, flags); | |
1319 | - blk_start_queueing(q); | |
1320 | - spin_unlock_irqrestore(q->queue_lock, flags); | |
1316 | + blk_run_queue(ad->q); | |
1321 | 1317 | } |
1322 | 1318 | |
1323 | 1319 | static int as_may_queue(struct request_queue *q, int rw) |
block/blk-core.c
... | ... | @@ -433,9 +433,7 @@ |
433 | 433 | * |
434 | 434 | * Description: |
435 | 435 | * Invoke request handling on this queue, if it has pending work to do. |
436 | - * May be used to restart queueing when a request has completed. Also | |
437 | - * See @blk_start_queueing. | |
438 | - * | |
436 | + * May be used to restart queueing when a request has completed. | |
439 | 437 | */ |
440 | 438 | void blk_run_queue(struct request_queue *q) |
441 | 439 | { |
... | ... | @@ -895,28 +893,6 @@ |
895 | 893 | EXPORT_SYMBOL(blk_get_request); |
896 | 894 | |
897 | 895 | /** |
898 | - * blk_start_queueing - initiate dispatch of requests to device | |
899 | - * @q: request queue to kick into gear | |
900 | - * | |
901 | - * This is basically a helper to remove the need to know whether a queue | |
902 | - * is plugged or not if someone just wants to initiate dispatch of requests | |
903 | - * for this queue. Should be used to start queueing on a device outside | |
904 | - * of ->request_fn() context. Also see @blk_run_queue. | |
905 | - * | |
906 | - * The queue lock must be held with interrupts disabled. | |
907 | - */ | |
908 | -void blk_start_queueing(struct request_queue *q) | |
909 | -{ | |
910 | - if (!blk_queue_plugged(q)) { | |
911 | - if (unlikely(blk_queue_stopped(q))) | |
912 | - return; | |
913 | - q->request_fn(q); | |
914 | - } else | |
915 | - __generic_unplug_device(q); | |
916 | -} | |
917 | -EXPORT_SYMBOL(blk_start_queueing); | |
918 | - | |
919 | -/** | |
920 | 896 | * blk_requeue_request - put a request back on queue |
921 | 897 | * @q: request queue where request should be inserted |
922 | 898 | * @rq: request to be inserted |
... | ... | @@ -984,7 +960,7 @@ |
984 | 960 | |
985 | 961 | drive_stat_acct(rq, 1); |
986 | 962 | __elv_add_request(q, rq, where, 0); |
987 | - blk_start_queueing(q); | |
963 | + __blk_run_queue(q); | |
988 | 964 | spin_unlock_irqrestore(q->queue_lock, flags); |
989 | 965 | } |
990 | 966 | EXPORT_SYMBOL(blk_insert_request); |
block/cfq-iosched.c
... | ... | @@ -2088,7 +2088,7 @@ |
2088 | 2088 | if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || |
2089 | 2089 | cfqd->busy_queues > 1) { |
2090 | 2090 | del_timer(&cfqd->idle_slice_timer); |
2091 | - blk_start_queueing(cfqd->queue); | |
2091 | + __blk_run_queue(cfqd->queue); | |
2092 | 2092 | } |
2093 | 2093 | cfq_mark_cfqq_must_dispatch(cfqq); |
2094 | 2094 | } |
... | ... | @@ -2100,7 +2100,7 @@ |
2100 | 2100 | * this new queue is RT and the current one is BE |
2101 | 2101 | */ |
2102 | 2102 | cfq_preempt_queue(cfqd, cfqq); |
2103 | - blk_start_queueing(cfqd->queue); | |
2103 | + __blk_run_queue(cfqd->queue); | |
2104 | 2104 | } |
2105 | 2105 | } |
2106 | 2106 | |
... | ... | @@ -2345,7 +2345,7 @@ |
2345 | 2345 | struct request_queue *q = cfqd->queue; |
2346 | 2346 | |
2347 | 2347 | spin_lock_irq(q->queue_lock); |
2348 | - blk_start_queueing(q); | |
2348 | + __blk_run_queue(cfqd->queue); | |
2349 | 2349 | spin_unlock_irq(q->queue_lock); |
2350 | 2350 | } |
2351 | 2351 |
block/elevator.c
... | ... | @@ -599,7 +599,7 @@ |
599 | 599 | */ |
600 | 600 | elv_drain_elevator(q); |
601 | 601 | while (q->rq.elvpriv) { |
602 | - blk_start_queueing(q); | |
602 | + __blk_run_queue(q); | |
603 | 603 | spin_unlock_irq(q->queue_lock); |
604 | 604 | msleep(10); |
605 | 605 | spin_lock_irq(q->queue_lock); |
... | ... | @@ -643,8 +643,7 @@ |
643 | 643 | * with anything. There's no point in delaying queue |
644 | 644 | * processing. |
645 | 645 | */ |
646 | - blk_remove_plug(q); | |
647 | - blk_start_queueing(q); | |
646 | + __blk_run_queue(q); | |
648 | 647 | break; |
649 | 648 | |
650 | 649 | case ELEVATOR_INSERT_SORT: |
... | ... | @@ -971,7 +970,7 @@ |
971 | 970 | blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && |
972 | 971 | (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) { |
973 | 972 | blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); |
974 | - blk_start_queueing(q); | |
973 | + __blk_run_queue(q); | |
975 | 974 | } |
976 | 975 | } |
977 | 976 | } |
include/linux/blkdev.h
... | ... | @@ -797,7 +797,6 @@ |
797 | 797 | extern void __blk_stop_queue(struct request_queue *q); |
798 | 798 | extern void __blk_run_queue(struct request_queue *); |
799 | 799 | extern void blk_run_queue(struct request_queue *); |
800 | -extern void blk_start_queueing(struct request_queue *); | |
801 | 800 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
802 | 801 | struct rq_map_data *, void __user *, unsigned long, |
803 | 802 | gfp_t); |