Commit 49cac01e1fa74174d72adb0e872504a7fefd7c01

Authored by Jens Axboe
1 parent a237c1c5bc

block: make unplug timer trace event correspond to the schedule() unplug

It's a pretty close match to what we had before - the timer triggering
would mean that nobody unplugged the plug in due time, in the new
scheme this matches very closely what the schedule() unplug now is.
It's essentially the difference between an explicit unplug (IO unplug)
or an implicit unplug (timer unplug, we scheduled with pending IO
queued).

Signed-off-by: Jens Axboe <jaxboe@fusionio.com>

Showing 3 changed files with 31 additions and 18 deletions Side-by-side Diff

... ... @@ -2662,17 +2662,23 @@
2662 2662 return !(rqa->q <= rqb->q);
2663 2663 }
2664 2664  
  2665 +/*
  2666 + * If 'from_schedule' is true, then postpone the dispatch of requests
  2667 + * until a safe kblockd context. We due this to avoid accidental big
  2668 + * additional stack usage in driver dispatch, in places where the originally
  2669 + * plugger did not intend it.
  2670 + */
2665 2671 static void queue_unplugged(struct request_queue *q, unsigned int depth,
2666   - bool force_kblockd)
  2672 + bool from_schedule)
2667 2673 {
2668   - trace_block_unplug_io(q, depth);
2669   - __blk_run_queue(q, force_kblockd);
  2674 + trace_block_unplug(q, depth, !from_schedule);
  2675 + __blk_run_queue(q, from_schedule);
2670 2676  
2671 2677 if (q->unplugged_fn)
2672 2678 q->unplugged_fn(q);
2673 2679 }
2674 2680  
2675   -void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd)
  2681 +void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2676 2682 {
2677 2683 struct request_queue *q;
2678 2684 unsigned long flags;
... ... @@ -2707,7 +2713,7 @@
2707 2713 BUG_ON(!rq->q);
2708 2714 if (rq->q != q) {
2709 2715 if (q) {
2710   - queue_unplugged(q, depth, force_kblockd);
  2716 + queue_unplugged(q, depth, from_schedule);
2711 2717 spin_unlock(q->queue_lock);
2712 2718 }
2713 2719 q = rq->q;
... ... @@ -2728,7 +2734,7 @@
2728 2734 }
2729 2735  
2730 2736 if (q) {
2731   - queue_unplugged(q, depth, force_kblockd);
  2737 + queue_unplugged(q, depth, from_schedule);
2732 2738 spin_unlock(q->queue_lock);
2733 2739 }
2734 2740  
include/trace/events/block.h
... ... @@ -401,9 +401,9 @@
401 401  
402 402 DECLARE_EVENT_CLASS(block_unplug,
403 403  
404   - TP_PROTO(struct request_queue *q, unsigned int depth),
  404 + TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
405 405  
406   - TP_ARGS(q, depth),
  406 + TP_ARGS(q, depth, explicit),
407 407  
408 408 TP_STRUCT__entry(
409 409 __field( int, nr_rq )
410 410  
411 411  
412 412  
413 413  
... ... @@ -419,18 +419,19 @@
419 419 );
420 420  
421 421 /**
422   - * block_unplug_io - release of operations requests in request queue
  422 + * block_unplug - release of operations requests in request queue
423 423 * @q: request queue to unplug
424 424 * @depth: number of requests just added to the queue
  425 + * @explicit: whether this was an explicit unplug, or one from schedule()
425 426 *
426 427 * Unplug request queue @q because device driver is scheduled to work
427 428 * on elements in the request queue.
428 429 */
429   -DEFINE_EVENT(block_unplug, block_unplug_io,
  430 +DEFINE_EVENT(block_unplug, block_unplug,
430 431  
431   - TP_PROTO(struct request_queue *q, unsigned int depth),
  432 + TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
432 433  
433   - TP_ARGS(q, depth)
  434 + TP_ARGS(q, depth, explicit)
434 435 );
435 436  
436 437 /**
kernel/trace/blktrace.c
... ... @@ -850,16 +850,21 @@
850 850 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
851 851 }
852 852  
853   -static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q,
854   - unsigned int depth)
  853 +static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
  854 + unsigned int depth, bool explicit)
855 855 {
856 856 struct blk_trace *bt = q->blk_trace;
857 857  
858 858 if (bt) {
859 859 __be64 rpdu = cpu_to_be64(depth);
  860 + u32 what;
860 861  
861   - __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
862   - sizeof(rpdu), &rpdu);
  862 + if (explicit)
  863 + what = BLK_TA_UNPLUG_IO;
  864 + else
  865 + what = BLK_TA_UNPLUG_TIMER;
  866 +
  867 + __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
863 868 }
864 869 }
865 870  
... ... @@ -1002,7 +1007,7 @@
1002 1007 WARN_ON(ret);
1003 1008 ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1004 1009 WARN_ON(ret);
1005   - ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
  1010 + ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1006 1011 WARN_ON(ret);
1007 1012 ret = register_trace_block_split(blk_add_trace_split, NULL);
1008 1013 WARN_ON(ret);
... ... @@ -1017,7 +1022,7 @@
1017 1022 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1018 1023 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1019 1024 unregister_trace_block_split(blk_add_trace_split, NULL);
1020   - unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
  1025 + unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1021 1026 unregister_trace_block_plug(blk_add_trace_plug, NULL);
1022 1027 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1023 1028 unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
... ... @@ -1332,6 +1337,7 @@
1332 1337 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
1333 1338 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
1334 1339 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
  1340 + [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1335 1341 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
1336 1342 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
1337 1343 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },