Commit af75cd3c67845ebe31d2df9a780889a5ebecef11

Authored by Vivek Goyal
Committed by Jens Axboe
1 parent f0bdc8cdd9

blk-throttle: Make no throttling rule group processing lockless

Currently we take a queue lock on each bio to check if there are any
throttling rules associated with the group and also update the stats.
Now access the group under rcu and update the stats without taking
the queue lock. Queue lock is taken only if there are throttling rules
associated with the group.

So the common case of root group when there are no rules, save
unnecessary pounding of request queue lock.

Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>

Showing 1 changed file with 49 additions and 4 deletions Side-by-side Diff

block/blk-throttle.c
... ... @@ -229,6 +229,22 @@
229 229 }
230 230 }
231 231  
  232 +/*
  233 + * Should be called with without queue lock held. Here queue lock will be
  234 + * taken rarely. It will be taken only once during life time of a group
  235 + * if need be
  236 + */
  237 +static void
  238 +throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg)
  239 +{
  240 + if (!tg || tg->blkg.dev)
  241 + return;
  242 +
  243 + spin_lock_irq(td->queue->queue_lock);
  244 + __throtl_tg_fill_dev_details(td, tg);
  245 + spin_unlock_irq(td->queue->queue_lock);
  246 +}
  247 +
232 248 static void throtl_init_add_tg_lists(struct throtl_data *td,
233 249 struct throtl_grp *tg, struct blkio_cgroup *blkcg)
234 250 {
... ... @@ -666,6 +682,12 @@
666 682 return 0;
667 683 }
668 684  
  685 +static bool tg_no_rule_group(struct throtl_grp *tg, bool rw) {
  686 + if (tg->bps[rw] == -1 && tg->iops[rw] == -1)
  687 + return 1;
  688 + return 0;
  689 +}
  690 +
669 691 /*
670 692 * Returns whether one can dispatch a bio or not. Also returns approx number
671 693 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
... ... @@ -730,10 +752,6 @@
730 752 tg->bytes_disp[rw] += bio->bi_size;
731 753 tg->io_disp[rw]++;
732 754  
733   - /*
734   - * TODO: This will take blkg->stats_lock. Figure out a way
735   - * to avoid this cost.
736   - */
737 755 blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, rw, sync);
738 756 }
739 757  
740 758  
... ... @@ -1111,11 +1129,38 @@
1111 1129 struct throtl_grp *tg;
1112 1130 struct bio *bio = *biop;
1113 1131 bool rw = bio_data_dir(bio), update_disptime = true;
  1132 + struct blkio_cgroup *blkcg;
1114 1133  
1115 1134 if (bio->bi_rw & REQ_THROTTLED) {
1116 1135 bio->bi_rw &= ~REQ_THROTTLED;
1117 1136 return 0;
1118 1137 }
  1138 +
  1139 + /*
  1140 + * A throtl_grp pointer retrieved under rcu can be used to access
  1141 + * basic fields like stats and io rates. If a group has no rules,
  1142 + * just update the dispatch stats in lockless manner and return.
  1143 + */
  1144 +
  1145 + rcu_read_lock();
  1146 + blkcg = task_blkio_cgroup(current);
  1147 + tg = throtl_find_tg(td, blkcg);
  1148 + if (tg) {
  1149 + throtl_tg_fill_dev_details(td, tg);
  1150 +
  1151 + if (tg_no_rule_group(tg, rw)) {
  1152 + blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
  1153 + rw, bio->bi_rw & REQ_SYNC);
  1154 + rcu_read_unlock();
  1155 + return 0;
  1156 + }
  1157 + }
  1158 + rcu_read_unlock();
  1159 +
  1160 + /*
  1161 + * Either group has not been allocated yet or it is not an unlimited
  1162 + * IO group
  1163 + */
1119 1164  
1120 1165 spin_lock_irq(q->queue_lock);
1121 1166 tg = throtl_get_tg(td);