Commit 24ecfbe27f65563909b14492afda2f1c21f7c044
Committed by
Jens Axboe
1 parent
4521cc4ed5
Exists in
master
and in
4 other branches
block: add blk_run_queue_async
Instead of overloading __blk_run_queue to force an offload to kblockd add a new blk_run_queue_async helper to do it explicitly. I've kept the blk_queue_stopped check for now, but I suspect it's not needed as the check we do when the workqueue items runs should be enough. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Showing 9 changed files with 36 additions and 23 deletions Side-by-side Diff
block/blk-core.c
... | ... | @@ -204,7 +204,7 @@ |
204 | 204 | |
205 | 205 | q = container_of(work, struct request_queue, delay_work.work); |
206 | 206 | spin_lock_irq(q->queue_lock); |
207 | - __blk_run_queue(q, false); | |
207 | + __blk_run_queue(q); | |
208 | 208 | spin_unlock_irq(q->queue_lock); |
209 | 209 | } |
210 | 210 | |
... | ... | @@ -239,7 +239,7 @@ |
239 | 239 | WARN_ON(!irqs_disabled()); |
240 | 240 | |
241 | 241 | queue_flag_clear(QUEUE_FLAG_STOPPED, q); |
242 | - __blk_run_queue(q, false); | |
242 | + __blk_run_queue(q); | |
243 | 243 | } |
244 | 244 | EXPORT_SYMBOL(blk_start_queue); |
245 | 245 | |
246 | 246 | |
... | ... | @@ -296,11 +296,9 @@ |
296 | 296 | * |
297 | 297 | * Description: |
298 | 298 | * See @blk_run_queue. This variant must be called with the queue lock |
299 | - * held and interrupts disabled. If force_kblockd is true, then it is | |
300 | - * safe to call this without holding the queue lock. | |
301 | - * | |
299 | + * held and interrupts disabled. | |
302 | 300 | */ |
303 | -void __blk_run_queue(struct request_queue *q, bool force_kblockd) | |
301 | +void __blk_run_queue(struct request_queue *q) | |
304 | 302 | { |
305 | 303 | if (unlikely(blk_queue_stopped(q))) |
306 | 304 | return; |
... | ... | @@ -309,7 +307,7 @@ |
309 | 307 | * Only recurse once to avoid overrunning the stack, let the unplug |
310 | 308 | * handling reinvoke the handler shortly if we already got there. |
311 | 309 | */ |
312 | - if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { | |
310 | + if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { | |
313 | 311 | q->request_fn(q); |
314 | 312 | queue_flag_clear(QUEUE_FLAG_REENTER, q); |
315 | 313 | } else |
... | ... | @@ -318,6 +316,20 @@ |
318 | 316 | EXPORT_SYMBOL(__blk_run_queue); |
319 | 317 | |
320 | 318 | /** |
319 | + * blk_run_queue_async - run a single device queue in workqueue context | |
320 | + * @q: The queue to run | |
321 | + * | |
322 | + * Description: | |
323 | + * Tells kblockd to perform the equivalent of @blk_run_queue on behalf | |
324 | + * of us. | |
325 | + */ | |
326 | +void blk_run_queue_async(struct request_queue *q) | |
327 | +{ | |
328 | + if (likely(!blk_queue_stopped(q))) | |
329 | + queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); | |
330 | +} | |
331 | + | |
332 | +/** | |
321 | 333 | * blk_run_queue - run a single device queue |
322 | 334 | * @q: The queue to run |
323 | 335 | * |
... | ... | @@ -330,7 +342,7 @@ |
330 | 342 | unsigned long flags; |
331 | 343 | |
332 | 344 | spin_lock_irqsave(q->queue_lock, flags); |
333 | - __blk_run_queue(q, false); | |
345 | + __blk_run_queue(q); | |
334 | 346 | spin_unlock_irqrestore(q->queue_lock, flags); |
335 | 347 | } |
336 | 348 | EXPORT_SYMBOL(blk_run_queue); |
... | ... | @@ -979,7 +991,7 @@ |
979 | 991 | blk_queue_end_tag(q, rq); |
980 | 992 | |
981 | 993 | add_acct_request(q, rq, where); |
982 | - __blk_run_queue(q, false); | |
994 | + __blk_run_queue(q); | |
983 | 995 | spin_unlock_irqrestore(q->queue_lock, flags); |
984 | 996 | } |
985 | 997 | EXPORT_SYMBOL(blk_insert_request); |
... | ... | @@ -1323,7 +1335,7 @@ |
1323 | 1335 | } else { |
1324 | 1336 | spin_lock_irq(q->queue_lock); |
1325 | 1337 | add_acct_request(q, req, where); |
1326 | - __blk_run_queue(q, false); | |
1338 | + __blk_run_queue(q); | |
1327 | 1339 | out_unlock: |
1328 | 1340 | spin_unlock_irq(q->queue_lock); |
1329 | 1341 | } |
1330 | 1342 | |
... | ... | @@ -2684,9 +2696,9 @@ |
2684 | 2696 | */ |
2685 | 2697 | if (from_schedule) { |
2686 | 2698 | spin_unlock(q->queue_lock); |
2687 | - __blk_run_queue(q, true); | |
2699 | + blk_run_queue_async(q); | |
2688 | 2700 | } else { |
2689 | - __blk_run_queue(q, false); | |
2701 | + __blk_run_queue(q); | |
2690 | 2702 | spin_unlock(q->queue_lock); |
2691 | 2703 | } |
2692 | 2704 |
block/blk-exec.c
... | ... | @@ -55,7 +55,7 @@ |
55 | 55 | WARN_ON(irqs_disabled()); |
56 | 56 | spin_lock_irq(q->queue_lock); |
57 | 57 | __elv_add_request(q, rq, where); |
58 | - __blk_run_queue(q, false); | |
58 | + __blk_run_queue(q); | |
59 | 59 | /* the queue is stopped so it won't be plugged+unplugged */ |
60 | 60 | if (rq->cmd_type == REQ_TYPE_PM_RESUME) |
61 | 61 | q->request_fn(q); |
block/blk-flush.c
... | ... | @@ -218,7 +218,7 @@ |
218 | 218 | * request_fn may confuse the driver. Always use kblockd. |
219 | 219 | */ |
220 | 220 | if (queued) |
221 | - __blk_run_queue(q, true); | |
221 | + blk_run_queue_async(q); | |
222 | 222 | } |
223 | 223 | |
224 | 224 | /** |
... | ... | @@ -274,7 +274,7 @@ |
274 | 274 | * the comment in flush_end_io(). |
275 | 275 | */ |
276 | 276 | if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error)) |
277 | - __blk_run_queue(q, true); | |
277 | + blk_run_queue_async(q); | |
278 | 278 | } |
279 | 279 | |
280 | 280 | /** |
block/blk.h
block/cfq-iosched.c
... | ... | @@ -3368,7 +3368,7 @@ |
3368 | 3368 | cfqd->busy_queues > 1) { |
3369 | 3369 | cfq_del_timer(cfqd, cfqq); |
3370 | 3370 | cfq_clear_cfqq_wait_request(cfqq); |
3371 | - __blk_run_queue(cfqd->queue, false); | |
3371 | + __blk_run_queue(cfqd->queue); | |
3372 | 3372 | } else { |
3373 | 3373 | cfq_blkiocg_update_idle_time_stats( |
3374 | 3374 | &cfqq->cfqg->blkg); |
... | ... | @@ -3383,7 +3383,7 @@ |
3383 | 3383 | * this new queue is RT and the current one is BE |
3384 | 3384 | */ |
3385 | 3385 | cfq_preempt_queue(cfqd, cfqq); |
3386 | - __blk_run_queue(cfqd->queue, false); | |
3386 | + __blk_run_queue(cfqd->queue); | |
3387 | 3387 | } |
3388 | 3388 | } |
3389 | 3389 | |
... | ... | @@ -3743,7 +3743,7 @@ |
3743 | 3743 | struct request_queue *q = cfqd->queue; |
3744 | 3744 | |
3745 | 3745 | spin_lock_irq(q->queue_lock); |
3746 | - __blk_run_queue(cfqd->queue, false); | |
3746 | + __blk_run_queue(cfqd->queue); | |
3747 | 3747 | spin_unlock_irq(q->queue_lock); |
3748 | 3748 | } |
3749 | 3749 |
block/elevator.c
... | ... | @@ -642,7 +642,7 @@ |
642 | 642 | */ |
643 | 643 | elv_drain_elevator(q); |
644 | 644 | while (q->rq.elvpriv) { |
645 | - __blk_run_queue(q, false); | |
645 | + __blk_run_queue(q); | |
646 | 646 | spin_unlock_irq(q->queue_lock); |
647 | 647 | msleep(10); |
648 | 648 | spin_lock_irq(q->queue_lock); |
... | ... | @@ -695,7 +695,7 @@ |
695 | 695 | * with anything. There's no point in delaying queue |
696 | 696 | * processing. |
697 | 697 | */ |
698 | - __blk_run_queue(q, false); | |
698 | + __blk_run_queue(q); | |
699 | 699 | break; |
700 | 700 | |
701 | 701 | case ELEVATOR_INSERT_SORT_MERGE: |
drivers/scsi/scsi_lib.c
... | ... | @@ -443,7 +443,7 @@ |
443 | 443 | &sdev->request_queue->queue_flags); |
444 | 444 | if (flagset) |
445 | 445 | queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); |
446 | - __blk_run_queue(sdev->request_queue, false); | |
446 | + __blk_run_queue(sdev->request_queue); | |
447 | 447 | if (flagset) |
448 | 448 | queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); |
449 | 449 | spin_unlock(sdev->request_queue->queue_lock); |
drivers/scsi/scsi_transport_fc.c
... | ... | @@ -3829,7 +3829,7 @@ |
3829 | 3829 | !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); |
3830 | 3830 | if (flagset) |
3831 | 3831 | queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); |
3832 | - __blk_run_queue(rport->rqst_q, false); | |
3832 | + __blk_run_queue(rport->rqst_q); | |
3833 | 3833 | if (flagset) |
3834 | 3834 | queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); |
3835 | 3835 | spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); |
include/linux/blkdev.h
... | ... | @@ -697,7 +697,7 @@ |
697 | 697 | extern void blk_stop_queue(struct request_queue *q); |
698 | 698 | extern void blk_sync_queue(struct request_queue *q); |
699 | 699 | extern void __blk_stop_queue(struct request_queue *q); |
700 | -extern void __blk_run_queue(struct request_queue *q, bool force_kblockd); | |
700 | +extern void __blk_run_queue(struct request_queue *q); | |
701 | 701 | extern void blk_run_queue(struct request_queue *); |
702 | 702 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
703 | 703 | struct rq_map_data *, void __user *, unsigned long, |