Commit 73f0d49a9637a7ec3448a62a0042e35b14ba18a3

Authored by Tejun Heo
1 parent 49a2f1e3f2

blk-throttle: move bio_lists[] and friends to throtl_service_queue

throtl_service_queues will eventually form a tree which is anchored at
throtl_data->service_queue and queue bios will climb the tree to the
top service_queue to be executed.

This patch moves bio_lists[] and nr_queued[] from throtl_grp to its
service_queue to prepare for that.  As currently only the
throtl_data->service_queue is in use, this patch just ends up moving
throtl_grp->bio_lists[] and ->nr_queued[] to
throtl_grp->service_queue.bio_lists[] and ->nr_queued[] without making
any functional differences.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Vivek Goyal <vgoyal@redhat.com>

Showing 1 changed file with 39 additions and 24 deletions Side-by-side Diff

block/blk-throttle.c
... ... @@ -27,6 +27,17 @@
27 27 static struct workqueue_struct *kthrotld_workqueue;
28 28  
29 29 struct throtl_service_queue {
  30 + /*
  31 + * Bios queued directly to this service_queue or dispatched from
  32 + * children throtl_grp's.
  33 + */
  34 + struct bio_list bio_lists[2]; /* queued bios [READ/WRITE] */
  35 + unsigned int nr_queued[2]; /* number of queued bios */
  36 +
  37 + /*
  38 + * RB tree of active children throtl_grp's, which are sorted by
  39 + * their ->disptime.
  40 + */
30 41 struct rb_root pending_tree; /* RB tree of active tgs */
31 42 struct rb_node *first_pending; /* first node in the tree */
32 43 unsigned int nr_pending; /* # queued in the tree */
... ... @@ -69,12 +80,6 @@
69 80  
70 81 unsigned int flags;
71 82  
72   - /* Two lists for READ and WRITE */
73   - struct bio_list bio_lists[2];
74   -
75   - /* Number of queued bios on READ and WRITE lists */
76   - unsigned int nr_queued[2];
77   -
78 83 /* bytes per second rate limits */
79 84 uint64_t bps[2];
80 85  
... ... @@ -193,6 +198,8 @@
193 198 /* init a service_queue, assumes the caller zeroed it */
194 199 static void throtl_service_queue_init(struct throtl_service_queue *sq)
195 200 {
  201 + bio_list_init(&sq->bio_lists[0]);
  202 + bio_list_init(&sq->bio_lists[1]);
196 203 sq->pending_tree = RB_ROOT;
197 204 }
198 205  
... ... @@ -204,8 +211,6 @@
204 211 throtl_service_queue_init(&tg->service_queue);
205 212 RB_CLEAR_NODE(&tg->rb_node);
206 213 tg->td = blkg->q->td;
207   - bio_list_init(&tg->bio_lists[0]);
208   - bio_list_init(&tg->bio_lists[1]);
209 214  
210 215 tg->bps[READ] = -1;
211 216 tg->bps[WRITE] = -1;
... ... @@ -624,7 +629,8 @@
624 629 * this function with a different bio if there are other bios
625 630 * queued.
626 631 */
627   - BUG_ON(tg->nr_queued[rw] && bio != bio_list_peek(&tg->bio_lists[rw]));
  632 + BUG_ON(tg->service_queue.nr_queued[rw] &&
  633 + bio != bio_list_peek(&tg->service_queue.bio_lists[rw]));
628 634  
629 635 /* If tg->bps = -1, then BW is unlimited */
630 636 if (tg->bps[rw] == -1 && tg->iops[rw] == -1) {
631 637  
632 638  
... ... @@ -703,12 +709,13 @@
703 709 static void throtl_add_bio_tg(struct bio *bio, struct throtl_grp *tg,
704 710 struct throtl_service_queue *parent_sq)
705 711 {
  712 + struct throtl_service_queue *sq = &tg->service_queue;
706 713 bool rw = bio_data_dir(bio);
707 714  
708   - bio_list_add(&tg->bio_lists[rw], bio);
  715 + bio_list_add(&sq->bio_lists[rw], bio);
709 716 /* Take a bio reference on tg */
710 717 blkg_get(tg_to_blkg(tg));
711   - tg->nr_queued[rw]++;
  718 + sq->nr_queued[rw]++;
712 719 tg->td->nr_queued[rw]++;
713 720 throtl_enqueue_tg(tg, parent_sq);
714 721 }
715 722  
716 723  
... ... @@ -716,13 +723,14 @@
716 723 static void tg_update_disptime(struct throtl_grp *tg,
717 724 struct throtl_service_queue *parent_sq)
718 725 {
  726 + struct throtl_service_queue *sq = &tg->service_queue;
719 727 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
720 728 struct bio *bio;
721 729  
722   - if ((bio = bio_list_peek(&tg->bio_lists[READ])))
  730 + if ((bio = bio_list_peek(&sq->bio_lists[READ])))
723 731 tg_may_dispatch(tg, bio, &read_wait);
724 732  
725   - if ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
  733 + if ((bio = bio_list_peek(&sq->bio_lists[WRITE])))
726 734 tg_may_dispatch(tg, bio, &write_wait);
727 735  
728 736 min_wait = min(read_wait, write_wait);
729 737  
... ... @@ -737,10 +745,11 @@
737 745 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw,
738 746 struct bio_list *bl)
739 747 {
  748 + struct throtl_service_queue *sq = &tg->service_queue;
740 749 struct bio *bio;
741 750  
742   - bio = bio_list_pop(&tg->bio_lists[rw]);
743   - tg->nr_queued[rw]--;
  751 + bio = bio_list_pop(&sq->bio_lists[rw]);
  752 + sq->nr_queued[rw]--;
744 753 /* Drop bio reference on blkg */
745 754 blkg_put(tg_to_blkg(tg));
746 755  
... ... @@ -756,6 +765,7 @@
756 765  
757 766 static int throtl_dispatch_tg(struct throtl_grp *tg, struct bio_list *bl)
758 767 {
  768 + struct throtl_service_queue *sq = &tg->service_queue;
759 769 unsigned int nr_reads = 0, nr_writes = 0;
760 770 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
761 771 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
... ... @@ -763,7 +773,7 @@
763 773  
764 774 /* Try to dispatch 75% READS and 25% WRITES */
765 775  
766   - while ((bio = bio_list_peek(&tg->bio_lists[READ])) &&
  776 + while ((bio = bio_list_peek(&sq->bio_lists[READ])) &&
767 777 tg_may_dispatch(tg, bio, NULL)) {
768 778  
769 779 tg_dispatch_one_bio(tg, bio_data_dir(bio), bl);
... ... @@ -773,7 +783,7 @@
773 783 break;
774 784 }
775 785  
776   - while ((bio = bio_list_peek(&tg->bio_lists[WRITE])) &&
  786 + while ((bio = bio_list_peek(&sq->bio_lists[WRITE])) &&
777 787 tg_may_dispatch(tg, bio, NULL)) {
778 788  
779 789 tg_dispatch_one_bio(tg, bio_data_dir(bio), bl);
780 790  
... ... @@ -790,10 +800,10 @@
790 800 struct bio_list *bl)
791 801 {
792 802 unsigned int nr_disp = 0;
793   - struct throtl_grp *tg;
794 803  
795 804 while (1) {
796   - tg = throtl_rb_first(parent_sq);
  805 + struct throtl_grp *tg = throtl_rb_first(parent_sq);
  806 + struct throtl_service_queue *sq = &tg->service_queue;
797 807  
798 808 if (!tg)
799 809 break;
... ... @@ -805,7 +815,7 @@
805 815  
806 816 nr_disp += throtl_dispatch_tg(tg, bl);
807 817  
808   - if (tg->nr_queued[0] || tg->nr_queued[1])
  818 + if (sq->nr_queued[0] || sq->nr_queued[1])
809 819 tg_update_disptime(tg, parent_sq);
810 820  
811 821 if (nr_disp >= throtl_quantum)
... ... @@ -1043,6 +1053,7 @@
1043 1053 {
1044 1054 struct throtl_data *td = q->td;
1045 1055 struct throtl_grp *tg;
  1056 + struct throtl_service_queue *sq;
1046 1057 bool rw = bio_data_dir(bio), update_disptime = true;
1047 1058 struct blkcg *blkcg;
1048 1059 bool throttled = false;
... ... @@ -1077,7 +1088,9 @@
1077 1088 if (unlikely(!tg))
1078 1089 goto out_unlock;
1079 1090  
1080   - if (tg->nr_queued[rw]) {
  1091 + sq = &tg->service_queue;
  1092 +
  1093 + if (sq->nr_queued[rw]) {
1081 1094 /*
1082 1095 * There is already another bio queued in same dir. No
1083 1096 * need to update dispatch time.
... ... @@ -1112,7 +1125,7 @@
1112 1125 rw == READ ? 'R' : 'W',
1113 1126 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw],
1114 1127 tg->io_disp[rw], tg->iops[rw],
1115   - tg->nr_queued[READ], tg->nr_queued[WRITE]);
  1128 + sq->nr_queued[READ], sq->nr_queued[WRITE]);
1116 1129  
1117 1130 bio_associate_current(bio);
1118 1131 throtl_add_bio_tg(bio, tg, &q->td->service_queue);
1119 1132  
1120 1133  
... ... @@ -1151,11 +1164,13 @@
1151 1164 bio_list_init(&bl);
1152 1165  
1153 1166 while ((tg = throtl_rb_first(parent_sq))) {
  1167 + struct throtl_service_queue *sq = &tg->service_queue;
  1168 +
1154 1169 throtl_dequeue_tg(tg, parent_sq);
1155 1170  
1156   - while ((bio = bio_list_peek(&tg->bio_lists[READ])))
  1171 + while ((bio = bio_list_peek(&sq->bio_lists[READ])))
1157 1172 tg_dispatch_one_bio(tg, bio_data_dir(bio), &bl);
1158   - while ((bio = bio_list_peek(&tg->bio_lists[WRITE])))
  1173 + while ((bio = bio_list_peek(&sq->bio_lists[WRITE])))
1159 1174 tg_dispatch_one_bio(tg, bio_data_dir(bio), &bl);
1160 1175 }
1161 1176 spin_unlock_irq(q->queue_lock);