Commit 0049af73bb4b74d1407db59caefc5fe057ee434a

Authored by Tejun Heo
1 parent e2d57e6019

blk-throttle: reorganize throtl_service_queue passed around as argument

throtl_service_queue will be the building block of hierarchy support
and will form a tree.  This patch updates its usages as arguments to
reduce confusion.

* When a service queue is used as the parent role - the host of the
  rbtree - use @parent_sq instead of @sq.

* For functions taking both @tg and @parent_sq, reorder them so that
  the order is (@tg, @parent_sq) not the other way around.  This makes
  the code follow the usual convention of specifying the primary
  target of the operation as the first argument.

This patch doesn't make any functional differences.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Vivek Goyal <vgoyal@redhat.com>

Showing 1 changed file with 51 additions and 49 deletions Side-by-side Diff

block/blk-throttle.c
... ... @@ -284,17 +284,18 @@
284 284 return tg;
285 285 }
286 286  
287   -static struct throtl_grp *throtl_rb_first(struct throtl_service_queue *sq)
  287 +static struct throtl_grp *
  288 +throtl_rb_first(struct throtl_service_queue *parent_sq)
288 289 {
289 290 /* Service tree is empty */
290   - if (!sq->nr_pending)
  291 + if (!parent_sq->nr_pending)
291 292 return NULL;
292 293  
293   - if (!sq->first_pending)
294   - sq->first_pending = rb_first(&sq->pending_tree);
  294 + if (!parent_sq->first_pending)
  295 + parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
295 296  
296   - if (sq->first_pending)
297   - return rb_entry_tg(sq->first_pending);
  297 + if (parent_sq->first_pending)
  298 + return rb_entry_tg(parent_sq->first_pending);
298 299  
299 300 return NULL;
300 301 }
301 302  
302 303  
303 304  
304 305  
305 306  
306 307  
... ... @@ -305,29 +306,30 @@
305 306 RB_CLEAR_NODE(n);
306 307 }
307 308  
308   -static void throtl_rb_erase(struct rb_node *n, struct throtl_service_queue *sq)
  309 +static void throtl_rb_erase(struct rb_node *n,
  310 + struct throtl_service_queue *parent_sq)
309 311 {
310   - if (sq->first_pending == n)
311   - sq->first_pending = NULL;
312   - rb_erase_init(n, &sq->pending_tree);
313   - --sq->nr_pending;
  312 + if (parent_sq->first_pending == n)
  313 + parent_sq->first_pending = NULL;
  314 + rb_erase_init(n, &parent_sq->pending_tree);
  315 + --parent_sq->nr_pending;
314 316 }
315 317  
316   -static void update_min_dispatch_time(struct throtl_service_queue *sq)
  318 +static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
317 319 {
318 320 struct throtl_grp *tg;
319 321  
320   - tg = throtl_rb_first(sq);
  322 + tg = throtl_rb_first(parent_sq);
321 323 if (!tg)
322 324 return;
323 325  
324   - sq->first_pending_disptime = tg->disptime;
  326 + parent_sq->first_pending_disptime = tg->disptime;
325 327 }
326 328  
327   -static void tg_service_queue_add(struct throtl_service_queue *sq,
328   - struct throtl_grp *tg)
  329 +static void tg_service_queue_add(struct throtl_grp *tg,
  330 + struct throtl_service_queue *parent_sq)
329 331 {
330   - struct rb_node **node = &sq->pending_tree.rb_node;
  332 + struct rb_node **node = &parent_sq->pending_tree.rb_node;
331 333 struct rb_node *parent = NULL;
332 334 struct throtl_grp *__tg;
333 335 unsigned long key = tg->disptime;
334 336  
335 337  
336 338  
337 339  
338 340  
339 341  
340 342  
341 343  
342 344  
343 345  
... ... @@ -346,39 +348,39 @@
346 348 }
347 349  
348 350 if (left)
349   - sq->first_pending = &tg->rb_node;
  351 + parent_sq->first_pending = &tg->rb_node;
350 352  
351 353 rb_link_node(&tg->rb_node, parent, node);
352   - rb_insert_color(&tg->rb_node, &sq->pending_tree);
  354 + rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
353 355 }
354 356  
355   -static void __throtl_enqueue_tg(struct throtl_service_queue *sq,
356   - struct throtl_grp *tg)
  357 +static void __throtl_enqueue_tg(struct throtl_grp *tg,
  358 + struct throtl_service_queue *parent_sq)
357 359 {
358   - tg_service_queue_add(sq, tg);
  360 + tg_service_queue_add(tg, parent_sq);
359 361 tg->flags |= THROTL_TG_PENDING;
360   - sq->nr_pending++;
  362 + parent_sq->nr_pending++;
361 363 }
362 364  
363   -static void throtl_enqueue_tg(struct throtl_service_queue *sq,
364   - struct throtl_grp *tg)
  365 +static void throtl_enqueue_tg(struct throtl_grp *tg,
  366 + struct throtl_service_queue *parent_sq)
365 367 {
366 368 if (!(tg->flags & THROTL_TG_PENDING))
367   - __throtl_enqueue_tg(sq, tg);
  369 + __throtl_enqueue_tg(tg, parent_sq);
368 370 }
369 371  
370   -static void __throtl_dequeue_tg(struct throtl_service_queue *sq,
371   - struct throtl_grp *tg)
  372 +static void __throtl_dequeue_tg(struct throtl_grp *tg,
  373 + struct throtl_service_queue *parent_sq)
372 374 {
373   - throtl_rb_erase(&tg->rb_node, sq);
  375 + throtl_rb_erase(&tg->rb_node, parent_sq);
374 376 tg->flags &= ~THROTL_TG_PENDING;
375 377 }
376 378  
377   -static void throtl_dequeue_tg(struct throtl_service_queue *sq,
378   - struct throtl_grp *tg)
  379 +static void throtl_dequeue_tg(struct throtl_grp *tg,
  380 + struct throtl_service_queue *parent_sq)
379 381 {
380 382 if (tg->flags & THROTL_TG_PENDING)
381   - __throtl_dequeue_tg(sq, tg);
  383 + __throtl_dequeue_tg(tg, parent_sq);
382 384 }
383 385  
384 386 /* Call with queue lock held */
... ... @@ -691,8 +693,8 @@
691 693 throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw);
692 694 }
693 695  
694   -static void throtl_add_bio_tg(struct throtl_service_queue *sq,
695   - struct throtl_grp *tg, struct bio *bio)
  696 +static void throtl_add_bio_tg(struct bio *bio, struct throtl_grp *tg,
  697 + struct throtl_service_queue *parent_sq)
696 698 {
697 699 bool rw = bio_data_dir(bio);
698 700  
699 701  
... ... @@ -701,11 +703,11 @@
701 703 blkg_get(tg_to_blkg(tg));
702 704 tg->nr_queued[rw]++;
703 705 tg->td->nr_queued[rw]++;
704   - throtl_enqueue_tg(sq, tg);
  706 + throtl_enqueue_tg(tg, parent_sq);
705 707 }
706 708  
707   -static void tg_update_disptime(struct throtl_service_queue *sq,
708   - struct throtl_grp *tg)
  709 +static void tg_update_disptime(struct throtl_grp *tg,
  710 + struct throtl_service_queue *parent_sq)
709 711 {
710 712 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
711 713 struct bio *bio;
712 714  
... ... @@ -720,9 +722,9 @@
720 722 disptime = jiffies + min_wait;
721 723  
722 724 /* Update dispatch time */
723   - throtl_dequeue_tg(sq, tg);
  725 + throtl_dequeue_tg(tg, parent_sq);
724 726 tg->disptime = disptime;
725   - throtl_enqueue_tg(sq, tg);
  727 + throtl_enqueue_tg(tg, parent_sq);
726 728 }
727 729  
728 730 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw,
729 731  
... ... @@ -777,14 +779,14 @@
777 779 return nr_reads + nr_writes;
778 780 }
779 781  
780   -static int throtl_select_dispatch(struct throtl_service_queue *sq,
  782 +static int throtl_select_dispatch(struct throtl_service_queue *parent_sq,
781 783 struct bio_list *bl)
782 784 {
783 785 unsigned int nr_disp = 0;
784 786 struct throtl_grp *tg;
785 787  
786 788 while (1) {
787   - tg = throtl_rb_first(sq);
  789 + tg = throtl_rb_first(parent_sq);
788 790  
789 791 if (!tg)
790 792 break;
791 793  
... ... @@ -792,12 +794,12 @@
792 794 if (time_before(jiffies, tg->disptime))
793 795 break;
794 796  
795   - throtl_dequeue_tg(sq, tg);
  797 + throtl_dequeue_tg(tg, parent_sq);
796 798  
797 799 nr_disp += throtl_dispatch_tg(tg, bl);
798 800  
799 801 if (tg->nr_queued[0] || tg->nr_queued[1])
800   - tg_update_disptime(sq, tg);
  802 + tg_update_disptime(tg, parent_sq);
801 803  
802 804 if (nr_disp >= throtl_quantum)
803 805 break;
... ... @@ -952,7 +954,7 @@
952 954 throtl_start_new_slice(tg, 1);
953 955  
954 956 if (tg->flags & THROTL_TG_PENDING) {
955   - tg_update_disptime(&td->service_queue, tg);
  957 + tg_update_disptime(tg, &td->service_queue);
956 958 throtl_schedule_next_dispatch(td);
957 959 }
958 960  
959 961  
... ... @@ -1106,11 +1108,11 @@
1106 1108 tg->nr_queued[READ], tg->nr_queued[WRITE]);
1107 1109  
1108 1110 bio_associate_current(bio);
1109   - throtl_add_bio_tg(&q->td->service_queue, tg, bio);
  1111 + throtl_add_bio_tg(bio, tg, &q->td->service_queue);
1110 1112 throttled = true;
1111 1113  
1112 1114 if (update_disptime) {
1113   - tg_update_disptime(&td->service_queue, tg);
  1115 + tg_update_disptime(tg, &td->service_queue);
1114 1116 throtl_schedule_next_dispatch(td);
1115 1117 }
1116 1118  
... ... @@ -1132,7 +1134,7 @@
1132 1134 __releases(q->queue_lock) __acquires(q->queue_lock)
1133 1135 {
1134 1136 struct throtl_data *td = q->td;
1135   - struct throtl_service_queue *sq = &td->service_queue;
  1137 + struct throtl_service_queue *parent_sq = &td->service_queue;
1136 1138 struct throtl_grp *tg;
1137 1139 struct bio_list bl;
1138 1140 struct bio *bio;
... ... @@ -1141,8 +1143,8 @@
1141 1143  
1142 1144 bio_list_init(&bl);
1143 1145  
1144   - while ((tg = throtl_rb_first(sq))) {
1145   - throtl_dequeue_tg(sq, tg);
  1146 + while ((tg = throtl_rb_first(parent_sq))) {
  1147 + throtl_dequeue_tg(tg, parent_sq);
1146 1148  
1147 1149 while ((bio = bio_list_peek(&tg->bio_lists[READ])))
1148 1150 tg_dispatch_one_bio(tg, bio_data_dir(bio), &bl);