Commit 926f75f6a9ef503d45dced061e304d0324beeba1

Authored by Baolin Wang
Committed by Jens Axboe
1 parent 2474787a75

blk-iocost: Factor out the base vrate change into a separate function

Factor out the base vrate change code into a separate function
to fimplify the ioc_timer_fn().

No functional change.

Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

Showing 1 changed file with 54 additions and 45 deletions Side-by-side Diff

... ... @@ -971,6 +971,58 @@
971 971 ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
972 972 }
973 973  
  974 +static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct,
  975 + int nr_lagging, int nr_shortages,
  976 + int prev_busy_level, u32 *missed_ppm)
  977 +{
  978 + u64 vrate = ioc->vtime_base_rate;
  979 + u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
  980 +
  981 + if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) {
  982 + if (ioc->busy_level != prev_busy_level || nr_lagging)
  983 + trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
  984 + missed_ppm, rq_wait_pct,
  985 + nr_lagging, nr_shortages);
  986 +
  987 + return;
  988 + }
  989 +
  990 + /* rq_wait signal is always reliable, ignore user vrate_min */
  991 + if (rq_wait_pct > RQ_WAIT_BUSY_PCT)
  992 + vrate_min = VRATE_MIN;
  993 +
  994 + /*
  995 + * If vrate is out of bounds, apply clamp gradually as the
  996 + * bounds can change abruptly. Otherwise, apply busy_level
  997 + * based adjustment.
  998 + */
  999 + if (vrate < vrate_min) {
  1000 + vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT), 100);
  1001 + vrate = min(vrate, vrate_min);
  1002 + } else if (vrate > vrate_max) {
  1003 + vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT), 100);
  1004 + vrate = max(vrate, vrate_max);
  1005 + } else {
  1006 + int idx = min_t(int, abs(ioc->busy_level),
  1007 + ARRAY_SIZE(vrate_adj_pct) - 1);
  1008 + u32 adj_pct = vrate_adj_pct[idx];
  1009 +
  1010 + if (ioc->busy_level > 0)
  1011 + adj_pct = 100 - adj_pct;
  1012 + else
  1013 + adj_pct = 100 + adj_pct;
  1014 +
  1015 + vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
  1016 + vrate_min, vrate_max);
  1017 + }
  1018 +
  1019 + trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
  1020 + nr_lagging, nr_shortages);
  1021 +
  1022 + ioc->vtime_base_rate = vrate;
  1023 + ioc_refresh_margins(ioc);
  1024 +}
  1025 +
974 1026 /* take a snapshot of the current [v]time and vrate */
975 1027 static void ioc_now(struct ioc *ioc, struct ioc_now *now)
976 1028 {
... ... @@ -2323,51 +2375,8 @@
2323 2375  
2324 2376 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2325 2377  
2326   - if (ioc->busy_level > 0 || (ioc->busy_level < 0 && !nr_lagging)) {
2327   - u64 vrate = ioc->vtime_base_rate;
2328   - u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
2329   -
2330   - /* rq_wait signal is always reliable, ignore user vrate_min */
2331   - if (rq_wait_pct > RQ_WAIT_BUSY_PCT)
2332   - vrate_min = VRATE_MIN;
2333   -
2334   - /*
2335   - * If vrate is out of bounds, apply clamp gradually as the
2336   - * bounds can change abruptly. Otherwise, apply busy_level
2337   - * based adjustment.
2338   - */
2339   - if (vrate < vrate_min) {
2340   - vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT),
2341   - 100);
2342   - vrate = min(vrate, vrate_min);
2343   - } else if (vrate > vrate_max) {
2344   - vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT),
2345   - 100);
2346   - vrate = max(vrate, vrate_max);
2347   - } else {
2348   - int idx = min_t(int, abs(ioc->busy_level),
2349   - ARRAY_SIZE(vrate_adj_pct) - 1);
2350   - u32 adj_pct = vrate_adj_pct[idx];
2351   -
2352   - if (ioc->busy_level > 0)
2353   - adj_pct = 100 - adj_pct;
2354   - else
2355   - adj_pct = 100 + adj_pct;
2356   -
2357   - vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
2358   - vrate_min, vrate_max);
2359   - }
2360   -
2361   - trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
2362   - nr_lagging, nr_shortages);
2363   -
2364   - ioc->vtime_base_rate = vrate;
2365   - ioc_refresh_margins(ioc);
2366   - } else if (ioc->busy_level != prev_busy_level || nr_lagging) {
2367   - trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
2368   - missed_ppm, rq_wait_pct, nr_lagging,
2369   - nr_shortages);
2370   - }
  2378 + ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages,
  2379 + prev_busy_level, missed_ppm);
2371 2380  
2372 2381 ioc_refresh_params(ioc, false);
2373 2382