Commit 23e018a1b083ecb4b8bb2fb43d58e7c19b5d7959
1 parent
48e025e63a
Exists in
master
and in
7 other branches
block: get rid of kblock_schedule_delayed_work()
It was briefly introduced to allow CFQ to to delayed scheduling, but we ended up removing that feature again. So lets kill the function and export, and just switch CFQ back to the normal work schedule since it is now passing in a '0' delay from all call sites. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Showing 3 changed files with 11 additions and 25 deletions Side-by-side Diff
block/blk-core.c
... | ... | @@ -2492,14 +2492,6 @@ |
2492 | 2492 | } |
2493 | 2493 | EXPORT_SYMBOL(kblockd_schedule_work); |
2494 | 2494 | |
2495 | -int kblockd_schedule_delayed_work(struct request_queue *q, | |
2496 | - struct delayed_work *work, | |
2497 | - unsigned long delay) | |
2498 | -{ | |
2499 | - return queue_delayed_work(kblockd_workqueue, work, delay); | |
2500 | -} | |
2501 | -EXPORT_SYMBOL(kblockd_schedule_delayed_work); | |
2502 | - | |
2503 | 2495 | int __init blk_dev_init(void) |
2504 | 2496 | { |
2505 | 2497 | BUILD_BUG_ON(__REQ_NR_BITS > 8 * |
block/cfq-iosched.c
... | ... | @@ -150,7 +150,7 @@ |
150 | 150 | * idle window management |
151 | 151 | */ |
152 | 152 | struct timer_list idle_slice_timer; |
153 | - struct delayed_work unplug_work; | |
153 | + struct work_struct unplug_work; | |
154 | 154 | |
155 | 155 | struct cfq_queue *active_queue; |
156 | 156 | struct cfq_io_context *active_cic; |
157 | 157 | |
... | ... | @@ -268,13 +268,11 @@ |
268 | 268 | * scheduler run of queue, if there are requests pending and no one in the |
269 | 269 | * driver that will restart queueing |
270 | 270 | */ |
271 | -static inline void cfq_schedule_dispatch(struct cfq_data *cfqd, | |
272 | - unsigned long delay) | |
271 | +static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) | |
273 | 272 | { |
274 | 273 | if (cfqd->busy_queues) { |
275 | 274 | cfq_log(cfqd, "schedule dispatch"); |
276 | - kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work, | |
277 | - delay); | |
275 | + kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); | |
278 | 276 | } |
279 | 277 | } |
280 | 278 | |
... | ... | @@ -1400,7 +1398,7 @@ |
1400 | 1398 | |
1401 | 1399 | if (unlikely(cfqd->active_queue == cfqq)) { |
1402 | 1400 | __cfq_slice_expired(cfqd, cfqq, 0); |
1403 | - cfq_schedule_dispatch(cfqd, 0); | |
1401 | + cfq_schedule_dispatch(cfqd); | |
1404 | 1402 | } |
1405 | 1403 | |
1406 | 1404 | kmem_cache_free(cfq_pool, cfqq); |
... | ... | @@ -1495,7 +1493,7 @@ |
1495 | 1493 | { |
1496 | 1494 | if (unlikely(cfqq == cfqd->active_queue)) { |
1497 | 1495 | __cfq_slice_expired(cfqd, cfqq, 0); |
1498 | - cfq_schedule_dispatch(cfqd, 0); | |
1496 | + cfq_schedule_dispatch(cfqd); | |
1499 | 1497 | } |
1500 | 1498 | |
1501 | 1499 | cfq_put_queue(cfqq); |
... | ... | @@ -2213,7 +2211,7 @@ |
2213 | 2211 | } |
2214 | 2212 | |
2215 | 2213 | if (!rq_in_driver(cfqd)) |
2216 | - cfq_schedule_dispatch(cfqd, 0); | |
2214 | + cfq_schedule_dispatch(cfqd); | |
2217 | 2215 | } |
2218 | 2216 | |
2219 | 2217 | /* |
... | ... | @@ -2343,7 +2341,7 @@ |
2343 | 2341 | if (cic) |
2344 | 2342 | put_io_context(cic->ioc); |
2345 | 2343 | |
2346 | - cfq_schedule_dispatch(cfqd, 0); | |
2344 | + cfq_schedule_dispatch(cfqd); | |
2347 | 2345 | spin_unlock_irqrestore(q->queue_lock, flags); |
2348 | 2346 | cfq_log(cfqd, "set_request fail"); |
2349 | 2347 | return 1; |
... | ... | @@ -2352,7 +2350,7 @@ |
2352 | 2350 | static void cfq_kick_queue(struct work_struct *work) |
2353 | 2351 | { |
2354 | 2352 | struct cfq_data *cfqd = |
2355 | - container_of(work, struct cfq_data, unplug_work.work); | |
2353 | + container_of(work, struct cfq_data, unplug_work); | |
2356 | 2354 | struct request_queue *q = cfqd->queue; |
2357 | 2355 | |
2358 | 2356 | spin_lock_irq(q->queue_lock); |
... | ... | @@ -2406,7 +2404,7 @@ |
2406 | 2404 | expire: |
2407 | 2405 | cfq_slice_expired(cfqd, timed_out); |
2408 | 2406 | out_kick: |
2409 | - cfq_schedule_dispatch(cfqd, 0); | |
2407 | + cfq_schedule_dispatch(cfqd); | |
2410 | 2408 | out_cont: |
2411 | 2409 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); |
2412 | 2410 | } |
... | ... | @@ -2414,7 +2412,7 @@ |
2414 | 2412 | static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) |
2415 | 2413 | { |
2416 | 2414 | del_timer_sync(&cfqd->idle_slice_timer); |
2417 | - cancel_delayed_work_sync(&cfqd->unplug_work); | |
2415 | + cancel_work_sync(&cfqd->unplug_work); | |
2418 | 2416 | } |
2419 | 2417 | |
2420 | 2418 | static void cfq_put_async_queues(struct cfq_data *cfqd) |
... | ... | @@ -2496,7 +2494,7 @@ |
2496 | 2494 | cfqd->idle_slice_timer.function = cfq_idle_slice_timer; |
2497 | 2495 | cfqd->idle_slice_timer.data = (unsigned long) cfqd; |
2498 | 2496 | |
2499 | - INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue); | |
2497 | + INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); | |
2500 | 2498 | |
2501 | 2499 | cfqd->cfq_quantum = cfq_quantum; |
2502 | 2500 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; |
include/linux/blkdev.h
... | ... | @@ -1172,11 +1172,7 @@ |
1172 | 1172 | } |
1173 | 1173 | |
1174 | 1174 | struct work_struct; |
1175 | -struct delayed_work; | |
1176 | 1175 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); |
1177 | -int kblockd_schedule_delayed_work(struct request_queue *q, | |
1178 | - struct delayed_work *work, | |
1179 | - unsigned long delay); | |
1180 | 1176 | |
1181 | 1177 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ |
1182 | 1178 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) |