Commit 8ba61435d73f2274e12d4d823fde06735e8f6a54

Authored by Tejun Heo
Committed by Jens Axboe
1 parent 481a7d6479

block: add missing blk_queue_dead() checks

blk_insert_cloned_request(), blk_execute_rq_nowait() and
blk_flush_plug_list() either didn't check whether the queue was dead
or did it without holding queue_lock.  Update them so that dead state
is checked while holding queue_lock.

AFAICS, this plugs all holes (requeue doesn't matter as the request is
transitioning atomically from in_flight to queued).

Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

Showing 2 changed files with 25 additions and 2 deletions Side-by-side Diff

... ... @@ -1731,6 +1731,10 @@
1731 1731 return -EIO;
1732 1732  
1733 1733 spin_lock_irqsave(q->queue_lock, flags);
  1734 + if (unlikely(blk_queue_dead(q))) {
  1735 + spin_unlock_irqrestore(q->queue_lock, flags);
  1736 + return -ENODEV;
  1737 + }
1734 1738  
1735 1739 /*
1736 1740 * Submitting request must be dequeued before calling this function
... ... @@ -2705,6 +2709,14 @@
2705 2709 trace_block_unplug(q, depth, !from_schedule);
2706 2710  
2707 2711 /*
  2712 + * Don't mess with dead queue.
  2713 + */
  2714 + if (unlikely(blk_queue_dead(q))) {
  2715 + spin_unlock(q->queue_lock);
  2716 + return;
  2717 + }
  2718 +
  2719 + /*
2708 2720 * If we are punting this to kblockd, then we can safely drop
2709 2721 * the queue_lock before waking kblockd (which needs to take
2710 2722 * this lock).
... ... @@ -2780,6 +2792,15 @@
2780 2792 depth = 0;
2781 2793 spin_lock(q->queue_lock);
2782 2794 }
  2795 +
  2796 + /*
  2797 + * Short-circuit if @q is dead
  2798 + */
  2799 + if (unlikely(blk_queue_dead(q))) {
  2800 + __blk_end_request_all(rq, -ENODEV);
  2801 + continue;
  2802 + }
  2803 +
2783 2804 /*
2784 2805 * rq is already accounted, so use raw insert
2785 2806 */
... ... @@ -50,7 +50,11 @@
50 50 {
51 51 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
52 52  
  53 + WARN_ON(irqs_disabled());
  54 + spin_lock_irq(q->queue_lock);
  55 +
53 56 if (unlikely(blk_queue_dead(q))) {
  57 + spin_unlock_irq(q->queue_lock);
54 58 rq->errors = -ENXIO;
55 59 if (rq->end_io)
56 60 rq->end_io(rq, rq->errors);
... ... @@ -59,8 +63,6 @@
59 63  
60 64 rq->rq_disk = bd_disk;
61 65 rq->end_io = done;
62   - WARN_ON(irqs_disabled());
63   - spin_lock_irq(q->queue_lock);
64 66 __elv_add_request(q, rq, where);
65 67 __blk_run_queue(q);
66 68 /* the queue is stopped so it won't be run */