Commit 99e22598e9a8e0a996d69c8c0f6b7027cb57720a
1 parent
b4cb290e0a
block: drop queue lock before calling __blk_run_queue() for kblockd punt
If we know we are going to punt to kblockd, we can drop the queue lock before calling into __blk_run_queue() since it only does a safe bit test and a workqueue call. Since kblockd needs to grab this very lock as one of the first things it does, it's a good optimization to drop the lock before waking kblockd. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Showing 1 changed file with 25 additions and 8 deletions Side-by-side Diff
block/blk-core.c
... | ... | @@ -295,7 +295,8 @@ |
295 | 295 | * |
296 | 296 | * Description: |
297 | 297 | * See @blk_run_queue. This variant must be called with the queue lock |
298 | - * held and interrupts disabled. | |
298 | + * held and interrupts disabled. If force_kblockd is true, then it is | |
299 | + * safe to call this without holding the queue lock. | |
299 | 300 | * |
300 | 301 | */ |
301 | 302 | void __blk_run_queue(struct request_queue *q, bool force_kblockd) |
302 | 303 | |
... | ... | @@ -2671,9 +2672,23 @@ |
2671 | 2672 | */ |
2672 | 2673 | static void queue_unplugged(struct request_queue *q, unsigned int depth, |
2673 | 2674 | bool from_schedule) |
2675 | + __releases(q->queue_lock) | |
2674 | 2676 | { |
2675 | 2677 | trace_block_unplug(q, depth, !from_schedule); |
2676 | - __blk_run_queue(q, from_schedule); | |
2678 | + | |
2679 | + /* | |
2680 | + * If we are punting this to kblockd, then we can safely drop | |
2681 | + * the queue_lock before waking kblockd (which needs to take | |
2682 | + * this lock). | |
2683 | + */ | |
2684 | + if (from_schedule) { | |
2685 | + spin_unlock(q->queue_lock); | |
2686 | + __blk_run_queue(q, true); | |
2687 | + } else { | |
2688 | + __blk_run_queue(q, false); | |
2689 | + spin_unlock(q->queue_lock); | |
2690 | + } | |
2691 | + | |
2677 | 2692 | } |
2678 | 2693 | |
2679 | 2694 | static void flush_plug_callbacks(struct blk_plug *plug) |
2680 | 2695 | |
... | ... | @@ -2729,10 +2744,11 @@ |
2729 | 2744 | BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG)); |
2730 | 2745 | BUG_ON(!rq->q); |
2731 | 2746 | if (rq->q != q) { |
2732 | - if (q) { | |
2747 | + /* | |
2748 | + * This drops the queue lock | |
2749 | + */ | |
2750 | + if (q) | |
2733 | 2751 | queue_unplugged(q, depth, from_schedule); |
2734 | - spin_unlock(q->queue_lock); | |
2735 | - } | |
2736 | 2752 | q = rq->q; |
2737 | 2753 | depth = 0; |
2738 | 2754 | spin_lock(q->queue_lock); |
2739 | 2755 | |
... | ... | @@ -2750,10 +2766,11 @@ |
2750 | 2766 | depth++; |
2751 | 2767 | } |
2752 | 2768 | |
2753 | - if (q) { | |
2769 | + /* | |
2770 | + * This drops the queue lock | |
2771 | + */ | |
2772 | + if (q) | |
2754 | 2773 | queue_unplugged(q, depth, from_schedule); |
2755 | - spin_unlock(q->queue_lock); | |
2756 | - } | |
2757 | 2774 | |
2758 | 2775 | local_irq_restore(flags); |
2759 | 2776 | } |