Commit 6a525600ffeb9e0d6cbbebda49eb89d6d3408c2b
1 parent
a9131a27e2
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
blk-throttle: remove pointless throtl_nr_queued() optimizations
throtl_nr_queued() is used in several places to avoid performing certain operations when the throtl_data is empty. This usually is useless as those paths usually aren't traveled if there's no bio queued. * throtl_schedule_delayed_work() skips scheduling dispatch work item if @td doesn't have any bios queued; however, the only case it can be called when @td is empty is from tg_set_conf() which isn't something we should be optimizing for. * throtl_schedule_next_dispatch() takes a quick exit if @td is empty; however, right after that it triggers BUG if the service tree is empty. The two conditions are equivalent and it can just test @st->count for the quick exit. * blk_throtl_dispatch_work_fn() skips dispatch if @td is empty. This work function isn't usually invoked when @td is empty. The only possibility is from tg_set_conf() and when it happens the normal dispatching path can handle empty @td fine. No need to add special skip path. This patch removes the above three unnecessary optimizations, which leave throtl_log() call in blk_throtl_dispatch_work_fn() the only user of throtl_nr_queued(). Remove throtl_nr_queued() and open code it in throtl_log(). I don't think we need td->nr_queued[] at all. Maybe we can remove it later. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Vivek Goyal <vgoyal@redhat.com>
Showing 1 changed file with 7 additions and 22 deletions Side-by-side Diff
block/blk-throttle.c
... | ... | @@ -166,11 +166,6 @@ |
166 | 166 | #define throtl_log(td, fmt, args...) \ |
167 | 167 | blk_add_trace_msg((td)->queue, "throtl " fmt, ##args) |
168 | 168 | |
169 | -static inline unsigned int total_nr_queued(struct throtl_data *td) | |
170 | -{ | |
171 | - return td->nr_queued[0] + td->nr_queued[1]; | |
172 | -} | |
173 | - | |
174 | 169 | /* |
175 | 170 | * Worker for allocating per cpu stat for tgs. This is scheduled on the |
176 | 171 | * system_wq once there are some groups on the alloc_list waiting for |
177 | 172 | |
178 | 173 | |
... | ... | @@ -402,25 +397,18 @@ |
402 | 397 | { |
403 | 398 | struct delayed_work *dwork = &td->dispatch_work; |
404 | 399 | |
405 | - if (total_nr_queued(td)) { | |
406 | - mod_delayed_work(kthrotld_workqueue, dwork, delay); | |
407 | - throtl_log(td, "schedule work. delay=%lu jiffies=%lu", | |
408 | - delay, jiffies); | |
409 | - } | |
400 | + mod_delayed_work(kthrotld_workqueue, dwork, delay); | |
401 | + throtl_log(td, "schedule work. delay=%lu jiffies=%lu", delay, jiffies); | |
410 | 402 | } |
411 | 403 | |
412 | 404 | static void throtl_schedule_next_dispatch(struct throtl_data *td) |
413 | 405 | { |
414 | 406 | struct throtl_rb_root *st = &td->tg_service_tree; |
415 | 407 | |
416 | - /* | |
417 | - * If there are more bios pending, schedule more work. | |
418 | - */ | |
419 | - if (!total_nr_queued(td)) | |
408 | + /* any pending children left? */ | |
409 | + if (!st->count) | |
420 | 410 | return; |
421 | 411 | |
422 | - BUG_ON(!st->count); | |
423 | - | |
424 | 412 | update_min_dispatch_time(st); |
425 | 413 | |
426 | 414 | if (time_before_eq(st->min_disptime, jiffies)) |
427 | 415 | |
... | ... | @@ -844,14 +832,11 @@ |
844 | 832 | |
845 | 833 | spin_lock_irq(q->queue_lock); |
846 | 834 | |
847 | - if (!total_nr_queued(td)) | |
848 | - goto out; | |
849 | - | |
850 | 835 | bio_list_init(&bio_list_on_stack); |
851 | 836 | |
852 | 837 | throtl_log(td, "dispatch nr_queued=%u read=%u write=%u", |
853 | - total_nr_queued(td), td->nr_queued[READ], | |
854 | - td->nr_queued[WRITE]); | |
838 | + td->nr_queued[READ] + td->nr_queued[WRITE], | |
839 | + td->nr_queued[READ], td->nr_queued[WRITE]); | |
855 | 840 | |
856 | 841 | nr_disp = throtl_select_dispatch(td, &bio_list_on_stack); |
857 | 842 | |
... | ... | @@ -859,7 +844,7 @@ |
859 | 844 | throtl_log(td, "bios disp=%u", nr_disp); |
860 | 845 | |
861 | 846 | throtl_schedule_next_dispatch(td); |
862 | -out: | |
847 | + | |
863 | 848 | spin_unlock_irq(q->queue_lock); |
864 | 849 | |
865 | 850 | /* |