Commit e98ef89b30b8a2e882b11d4965347015770f3627

Authored by Vivek Goyal
Committed by Jens Axboe
1 parent c10b61f091

cfq-iosched: Fixed boot warning with BLK_CGROUP=y and CFQ_GROUP_IOSCHED=n

Hi Jens,

Few days back Ingo noticed a CFQ boot time warning. This patch fixes it.
The issue here is that with CFQ_GROUP_IOSCHED=n, CFQ should not really
be making blkio stat related calls.

> Hm, it's still not entirely fixed, as of 2.6.35-rc2-00131-g7908a9e. With
> some
> configs i get bad spinlock warnings during bootup:
>
> [   28.968013] initcall net_olddevs_init+0x0/0x82 returned 0 after 93750
> usecs
> [   28.972003] calling  b44_init+0x0/0x55 @ 1
> [   28.976009] bus: 'pci': add driver b44
> [   28.976374]  sda:
> [   28.978157] BUG: spinlock bad magic on CPU#1, async/0/117
> [   28.980000]  lock: 7e1c5bbc, .magic: 00000000, .owner: <none>/-1, +.owner_cpu: 0
> [   28.980000] Pid: 117, comm: async/0 Not tainted +2.6.35-rc2-tip-01092-g010e7ef-dirty #8183
> [   28.980000] Call Trace:
> [   28.980000]  [<41ba6d55>] ? printk+0x20/0x24
> [   28.980000]  [<4134b7b7>] spin_bug+0x7c/0x87
> [   28.980000]  [<4134b853>] do_raw_spin_lock+0x1e/0x123
> [   28.980000]  [<41ba92ca>] ? _raw_spin_lock_irqsave+0x12/0x20
> [   28.980000]  [<41ba92d2>] _raw_spin_lock_irqsave+0x1a/0x20
> [   28.980000]  [<4133476f>] blkiocg_update_io_add_stats+0x25/0xfb
> [   28.980000]  [<41335dae>] ? cfq_prio_tree_add+0xb1/0xc1
> [   28.980000]  [<41337bc7>] cfq_insert_request+0x8c/0x425

Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>

Showing 2 changed files with 142 additions and 27 deletions Side-by-side Diff

... ... @@ -14,7 +14,7 @@
14 14 #include <linux/rbtree.h>
15 15 #include <linux/ioprio.h>
16 16 #include <linux/blktrace_api.h>
17   -#include "blk-cgroup.h"
  17 +#include "cfq.h"
18 18  
19 19 /*
20 20 * tunables
... ... @@ -879,7 +879,7 @@
879 879 if (!RB_EMPTY_NODE(&cfqg->rb_node))
880 880 cfq_rb_erase(&cfqg->rb_node, st);
881 881 cfqg->saved_workload_slice = 0;
882   - blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
  882 + cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
883 883 }
884 884  
885 885 static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
... ... @@ -939,8 +939,8 @@
939 939  
940 940 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
941 941 st->min_vdisktime);
942   - blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
943   - blkiocg_set_start_empty_time(&cfqg->blkg);
  942 + cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
  943 + cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
944 944 }
945 945  
946 946 #ifdef CONFIG_CFQ_GROUP_IOSCHED
... ... @@ -995,7 +995,7 @@
995 995  
996 996 /* Add group onto cgroup list */
997 997 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
998   - blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
  998 + cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
999 999 MKDEV(major, minor));
1000 1000 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
1001 1001  
... ... @@ -1079,7 +1079,7 @@
1079 1079 * it from cgroup list, then it will take care of destroying
1080 1080 * cfqg also.
1081 1081 */
1082   - if (!blkiocg_del_blkio_group(&cfqg->blkg))
  1082 + if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
1083 1083 cfq_destroy_cfqg(cfqd, cfqg);
1084 1084 }
1085 1085 }
1086 1086  
... ... @@ -1421,10 +1421,10 @@
1421 1421 {
1422 1422 elv_rb_del(&cfqq->sort_list, rq);
1423 1423 cfqq->queued[rq_is_sync(rq)]--;
1424   - blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq),
1425   - rq_is_sync(rq));
  1424 + cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
  1425 + rq_data_dir(rq), rq_is_sync(rq));
1426 1426 cfq_add_rq_rb(rq);
1427   - blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
  1427 + cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
1428 1428 &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
1429 1429 rq_is_sync(rq));
1430 1430 }
... ... @@ -1482,8 +1482,8 @@
1482 1482 cfq_del_rq_rb(rq);
1483 1483  
1484 1484 cfqq->cfqd->rq_queued--;
1485   - blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq),
1486   - rq_is_sync(rq));
  1485 + cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
  1486 + rq_data_dir(rq), rq_is_sync(rq));
1487 1487 if (rq_is_meta(rq)) {
1488 1488 WARN_ON(!cfqq->meta_pending);
1489 1489 cfqq->meta_pending--;
... ... @@ -1518,8 +1518,8 @@
1518 1518 static void cfq_bio_merged(struct request_queue *q, struct request *req,
1519 1519 struct bio *bio)
1520 1520 {
1521   - blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg, bio_data_dir(bio),
1522   - cfq_bio_sync(bio));
  1521 + cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
  1522 + bio_data_dir(bio), cfq_bio_sync(bio));
1523 1523 }
1524 1524  
1525 1525 static void
... ... @@ -1539,8 +1539,8 @@
1539 1539 if (cfqq->next_rq == next)
1540 1540 cfqq->next_rq = rq;
1541 1541 cfq_remove_request(next);
1542   - blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(next),
1543   - rq_is_sync(next));
  1542 + cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
  1543 + rq_data_dir(next), rq_is_sync(next));
1544 1544 }
1545 1545  
1546 1546 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
... ... @@ -1571,7 +1571,7 @@
1571 1571 static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1572 1572 {
1573 1573 del_timer(&cfqd->idle_slice_timer);
1574   - blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
  1574 + cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
1575 1575 }
1576 1576  
1577 1577 static void __cfq_set_active_queue(struct cfq_data *cfqd,
... ... @@ -1580,7 +1580,7 @@
1580 1580 if (cfqq) {
1581 1581 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1582 1582 cfqd->serving_prio, cfqd->serving_type);
1583   - blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
  1583 + cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
1584 1584 cfqq->slice_start = 0;
1585 1585 cfqq->dispatch_start = jiffies;
1586 1586 cfqq->allocated_slice = 0;
... ... @@ -1911,7 +1911,7 @@
1911 1911 sl = cfqd->cfq_slice_idle;
1912 1912  
1913 1913 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1914   - blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
  1914 + cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
1915 1915 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
1916 1916 }
1917 1917  
... ... @@ -1931,7 +1931,7 @@
1931 1931 elv_dispatch_sort(q, rq);
1932 1932  
1933 1933 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
1934   - blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
  1934 + cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
1935 1935 rq_data_dir(rq), rq_is_sync(rq));
1936 1936 }
1937 1937  
... ... @@ -3257,7 +3257,7 @@
3257 3257 cfq_clear_cfqq_wait_request(cfqq);
3258 3258 __blk_run_queue(cfqd->queue);
3259 3259 } else {
3260   - blkiocg_update_idle_time_stats(
  3260 + cfq_blkiocg_update_idle_time_stats(
3261 3261 &cfqq->cfqg->blkg);
3262 3262 cfq_mark_cfqq_must_dispatch(cfqq);
3263 3263 }
... ... @@ -3285,7 +3285,7 @@
3285 3285 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3286 3286 list_add_tail(&rq->queuelist, &cfqq->fifo);
3287 3287 cfq_add_rq_rb(rq);
3288   - blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
  3288 + cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
3289 3289 &cfqd->serving_group->blkg, rq_data_dir(rq),
3290 3290 rq_is_sync(rq));
3291 3291 cfq_rq_enqueued(cfqd, cfqq, rq);
... ... @@ -3373,9 +3373,9 @@
3373 3373 WARN_ON(!cfqq->dispatched);
3374 3374 cfqd->rq_in_driver--;
3375 3375 cfqq->dispatched--;
3376   - blkiocg_update_completion_stats(&cfqq->cfqg->blkg, rq_start_time_ns(rq),
3377   - rq_io_start_time_ns(rq), rq_data_dir(rq),
3378   - rq_is_sync(rq));
  3376 + cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
  3377 + rq_start_time_ns(rq), rq_io_start_time_ns(rq),
  3378 + rq_data_dir(rq), rq_is_sync(rq));
3379 3379  
3380 3380 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3381 3381  
... ... @@ -3739,7 +3739,7 @@
3739 3739  
3740 3740 cfq_put_async_queues(cfqd);
3741 3741 cfq_release_cfq_groups(cfqd);
3742   - blkiocg_del_blkio_group(&cfqd->root_group.blkg);
  3742 + cfq_blkiocg_del_blkio_group(&cfqd->root_group.blkg);
3743 3743  
3744 3744 spin_unlock_irq(q->queue_lock);
3745 3745  
... ... @@ -3807,8 +3807,8 @@
3807 3807 */
3808 3808 atomic_set(&cfqg->ref, 1);
3809 3809 rcu_read_lock();
3810   - blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd,
3811   - 0);
  3810 + cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
  3811 + (void *)cfqd, 0);
3812 3812 rcu_read_unlock();
3813 3813 #endif
3814 3814 /*
  1 +#ifndef _CFQ_H
  2 +#define _CFQ_H
  3 +#include "blk-cgroup.h"
  4 +
  5 +#ifdef CONFIG_CFQ_GROUP_IOSCHED
  6 +static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
  7 + struct blkio_group *curr_blkg, bool direction, bool sync)
  8 +{
  9 + blkiocg_update_io_add_stats(blkg, curr_blkg, direction, sync);
  10 +}
  11 +
  12 +static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  13 + unsigned long dequeue)
  14 +{
  15 + blkiocg_update_dequeue_stats(blkg, dequeue);
  16 +}
  17 +
  18 +static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
  19 + unsigned long time)
  20 +{
  21 + blkiocg_update_timeslice_used(blkg, time);
  22 +}
  23 +
  24 +static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg)
  25 +{
  26 + blkiocg_set_start_empty_time(blkg);
  27 +}
  28 +
  29 +static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  30 + bool direction, bool sync)
  31 +{
  32 + blkiocg_update_io_remove_stats(blkg, direction, sync);
  33 +}
  34 +
  35 +static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
  36 + bool direction, bool sync)
  37 +{
  38 + blkiocg_update_io_merged_stats(blkg, direction, sync);
  39 +}
  40 +
  41 +static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
  42 +{
  43 + blkiocg_update_idle_time_stats(blkg);
  44 +}
  45 +
  46 +static inline void
  47 +cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
  48 +{
  49 + blkiocg_update_avg_queue_size_stats(blkg);
  50 +}
  51 +
  52 +static inline void
  53 +cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
  54 +{
  55 + blkiocg_update_set_idle_time_stats(blkg);
  56 +}
  57 +
  58 +static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  59 + uint64_t bytes, bool direction, bool sync)
  60 +{
  61 + blkiocg_update_dispatch_stats(blkg, bytes, direction, sync);
  62 +}
  63 +
  64 +static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
  65 +{
  66 + cfq_blkiocg_update_completion_stats(blkg, start_time, io_start_time,
  67 + direction, sync);
  68 +}
  69 +
  70 +static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
  71 + struct blkio_group *blkg, void *key, dev_t dev) {
  72 + blkiocg_add_blkio_group(blkcg, blkg, key, dev);
  73 +}
  74 +
  75 +static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
  76 +{
  77 + return blkiocg_del_blkio_group(blkg);
  78 +}
  79 +
  80 +#else /* CFQ_GROUP_IOSCHED */
  81 +static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
  82 + struct blkio_group *curr_blkg, bool direction, bool sync) {}
  83 +
  84 +static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  85 + unsigned long dequeue) {}
  86 +
  87 +static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
  88 + unsigned long time) {}
  89 +static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
  90 +static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  91 + bool direction, bool sync) {}
  92 +static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
  93 + bool direction, bool sync) {}
  94 +static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
  95 +{
  96 +}
  97 +static inline void
  98 +cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) {}
  99 +
  100 +static inline void
  101 +cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) {}
  102 +
  103 +static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  104 + uint64_t bytes, bool direction, bool sync) {}
  105 +static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {}
  106 +
  107 +static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
  108 + struct blkio_group *blkg, void *key, dev_t dev) {}
  109 +static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
  110 +{
  111 + return 0;
  112 +}
  113 +
  114 +#endif /* CFQ_GROUP_IOSCHED */
  115 +#endif