Commit 2d72376b3af1e7d4d4515ebfd0f4383f2e92c343

Authored by Ingo Molnar
1 parent 2b1e315dd2

sched: clean up schedstats, cnt -> count

rename all 'cnt' fields and variables to the less yucky 'count' name.

yuckage noticed by Andrew Morton.

no change in code, other than the /proc/sched_debug bkl_count string got
a bit larger:

   text    data     bss     dec     hex filename
  38236    3506      24   41766    a326 sched.o.before
  38240    3506      24   41770    a32a sched.o.after

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>

Showing 6 changed files with 36 additions and 36 deletions Side-by-side Diff

... ... @@ -304,7 +304,7 @@
304 304 return sprintf(buffer, "%llu %llu %lu\n",
305 305 task->sched_info.cpu_time,
306 306 task->sched_info.run_delay,
307   - task->sched_info.pcnt);
  307 + task->sched_info.pcount);
308 308 }
309 309 #endif
310 310  
include/linux/sched.h
... ... @@ -614,7 +614,7 @@
614 614 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
615 615 struct sched_info {
616 616 /* cumulative counters */
617   - unsigned long pcnt; /* # of times run on this cpu */
  617 + unsigned long pcount; /* # of times run on this cpu */
618 618 unsigned long long cpu_time, /* time spent on the cpu */
619 619 run_delay; /* time spent waiting on a runqueue */
620 620  
... ... @@ -623,7 +623,7 @@
623 623 last_queued; /* when we were last queued to run */
624 624 #ifdef CONFIG_SCHEDSTATS
625 625 /* BKL stats */
626   - unsigned long bkl_cnt;
  626 + unsigned long bkl_count;
627 627 #endif
628 628 };
629 629 #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
... ... @@ -759,7 +759,7 @@
759 759  
760 760 #ifdef CONFIG_SCHEDSTATS
761 761 /* load_balance() stats */
762   - unsigned long lb_cnt[CPU_MAX_IDLE_TYPES];
  762 + unsigned long lb_count[CPU_MAX_IDLE_TYPES];
763 763 unsigned long lb_failed[CPU_MAX_IDLE_TYPES];
764 764 unsigned long lb_balanced[CPU_MAX_IDLE_TYPES];
765 765 unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES];
766 766  
767 767  
... ... @@ -769,17 +769,17 @@
769 769 unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES];
770 770  
771 771 /* Active load balancing */
772   - unsigned long alb_cnt;
  772 + unsigned long alb_count;
773 773 unsigned long alb_failed;
774 774 unsigned long alb_pushed;
775 775  
776 776 /* SD_BALANCE_EXEC stats */
777   - unsigned long sbe_cnt;
  777 + unsigned long sbe_count;
778 778 unsigned long sbe_balanced;
779 779 unsigned long sbe_pushed;
780 780  
781 781 /* SD_BALANCE_FORK stats */
782   - unsigned long sbf_cnt;
  782 + unsigned long sbf_count;
783 783 unsigned long sbf_balanced;
784 784 unsigned long sbf_pushed;
785 785  
... ... @@ -119,7 +119,7 @@
119 119 * No locking available for sched_info (and too expensive to add one)
120 120 * Mitigate by taking snapshot of values
121 121 */
122   - t1 = tsk->sched_info.pcnt;
  122 + t1 = tsk->sched_info.pcount;
123 123 t2 = tsk->sched_info.run_delay;
124 124 t3 = tsk->sched_info.cpu_time;
125 125  
... ... @@ -349,19 +349,19 @@
349 349 unsigned long yld_exp_empty;
350 350 unsigned long yld_act_empty;
351 351 unsigned long yld_both_empty;
352   - unsigned long yld_cnt;
  352 + unsigned long yld_count;
353 353  
354 354 /* schedule() stats */
355 355 unsigned long sched_switch;
356   - unsigned long sched_cnt;
  356 + unsigned long sched_count;
357 357 unsigned long sched_goidle;
358 358  
359 359 /* try_to_wake_up() stats */
360   - unsigned long ttwu_cnt;
  360 + unsigned long ttwu_count;
361 361 unsigned long ttwu_local;
362 362  
363 363 /* BKL stats */
364   - unsigned long bkl_cnt;
  364 + unsigned long bkl_count;
365 365 #endif
366 366 struct lock_class_key rq_lock_key;
367 367 };
... ... @@ -1481,7 +1481,7 @@
1481 1481  
1482 1482 new_cpu = cpu;
1483 1483  
1484   - schedstat_inc(rq, ttwu_cnt);
  1484 + schedstat_inc(rq, ttwu_count);
1485 1485 if (cpu == this_cpu) {
1486 1486 schedstat_inc(rq, ttwu_local);
1487 1487 goto out_set_cpu;
... ... @@ -2637,7 +2637,7 @@
2637 2637 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2638 2638 sd_idle = 1;
2639 2639  
2640   - schedstat_inc(sd, lb_cnt[idle]);
  2640 + schedstat_inc(sd, lb_count[idle]);
2641 2641  
2642 2642 redo:
2643 2643 group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
... ... @@ -2790,7 +2790,7 @@
2790 2790 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2791 2791 sd_idle = 1;
2792 2792  
2793   - schedstat_inc(sd, lb_cnt[CPU_NEWLY_IDLE]);
  2793 + schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
2794 2794 redo:
2795 2795 group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
2796 2796 &sd_idle, &cpus, NULL);
... ... @@ -2924,7 +2924,7 @@
2924 2924 }
2925 2925  
2926 2926 if (likely(sd)) {
2927   - schedstat_inc(sd, alb_cnt);
  2927 + schedstat_inc(sd, alb_count);
2928 2928  
2929 2929 if (move_one_task(target_rq, target_cpu, busiest_rq,
2930 2930 sd, CPU_IDLE))
2931 2931  
... ... @@ -3414,11 +3414,11 @@
3414 3414  
3415 3415 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3416 3416  
3417   - schedstat_inc(this_rq(), sched_cnt);
  3417 + schedstat_inc(this_rq(), sched_count);
3418 3418 #ifdef CONFIG_SCHEDSTATS
3419 3419 if (unlikely(prev->lock_depth >= 0)) {
3420   - schedstat_inc(this_rq(), bkl_cnt);
3421   - schedstat_inc(prev, sched_info.bkl_cnt);
  3420 + schedstat_inc(this_rq(), bkl_count);
  3421 + schedstat_inc(prev, sched_info.bkl_count);
3422 3422 }
3423 3423 #endif
3424 3424 }
... ... @@ -4558,7 +4558,7 @@
4558 4558 {
4559 4559 struct rq *rq = this_rq_lock();
4560 4560  
4561   - schedstat_inc(rq, yld_cnt);
  4561 + schedstat_inc(rq, yld_count);
4562 4562 current->sched_class->yield_task(rq);
4563 4563  
4564 4564 /*
kernel/sched_debug.c
... ... @@ -137,8 +137,8 @@
137 137 SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
138 138 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
139 139 #ifdef CONFIG_SCHEDSTATS
140   - SEQ_printf(m, " .%-30s: %ld\n", "bkl_cnt",
141   - rq->bkl_cnt);
  140 + SEQ_printf(m, " .%-30s: %ld\n", "bkl_count",
  141 + rq->bkl_count);
142 142 #endif
143 143 SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over",
144 144 cfs_rq->nr_spread_over);
... ... @@ -342,7 +342,7 @@
342 342 PN(se.exec_max);
343 343 PN(se.slice_max);
344 344 PN(se.wait_max);
345   - P(sched_info.bkl_cnt);
  345 + P(sched_info.bkl_count);
346 346 #endif
347 347 SEQ_printf(m, "%-25s:%20Ld\n",
348 348 "nr_switches", (long long)(p->nvcsw + p->nivcsw));
... ... @@ -370,7 +370,7 @@
370 370 p->se.exec_max = 0;
371 371 p->se.slice_max = 0;
372 372 p->se.wait_max = 0;
373   - p->sched_info.bkl_cnt = 0;
  373 + p->sched_info.bkl_count = 0;
374 374 #endif
375 375 p->se.sum_exec_runtime = 0;
376 376 p->se.prev_sum_exec_runtime = 0;
kernel/sched_stats.h
... ... @@ -16,18 +16,18 @@
16 16 struct rq *rq = cpu_rq(cpu);
17 17 #ifdef CONFIG_SMP
18 18 struct sched_domain *sd;
19   - int dcnt = 0;
  19 + int dcount = 0;
20 20 #endif
21 21  
22 22 /* runqueue-specific stats */
23 23 seq_printf(seq,
24 24 "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %llu %llu %lu",
25 25 cpu, rq->yld_both_empty,
26   - rq->yld_act_empty, rq->yld_exp_empty, rq->yld_cnt,
27   - rq->sched_switch, rq->sched_cnt, rq->sched_goidle,
28   - rq->ttwu_cnt, rq->ttwu_local,
  26 + rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
  27 + rq->sched_switch, rq->sched_count, rq->sched_goidle,
  28 + rq->ttwu_count, rq->ttwu_local,
29 29 rq->rq_sched_info.cpu_time,
30   - rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt);
  30 + rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
31 31  
32 32 seq_printf(seq, "\n");
33 33  
34 34  
... ... @@ -39,12 +39,12 @@
39 39 char mask_str[NR_CPUS];
40 40  
41 41 cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
42   - seq_printf(seq, "domain%d %s", dcnt++, mask_str);
  42 + seq_printf(seq, "domain%d %s", dcount++, mask_str);
43 43 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
44 44 itype++) {
45 45 seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu "
46 46 "%lu",
47   - sd->lb_cnt[itype],
  47 + sd->lb_count[itype],
48 48 sd->lb_balanced[itype],
49 49 sd->lb_failed[itype],
50 50 sd->lb_imbalance[itype],
... ... @@ -55,9 +55,9 @@
55 55 }
56 56 seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu"
57 57 " %lu %lu %lu\n",
58   - sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
59   - sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed,
60   - sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
  58 + sd->alb_count, sd->alb_failed, sd->alb_pushed,
  59 + sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
  60 + sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
61 61 sd->ttwu_wake_remote, sd->ttwu_move_affine,
62 62 sd->ttwu_move_balance);
63 63 }
... ... @@ -101,7 +101,7 @@
101 101 {
102 102 if (rq) {
103 103 rq->rq_sched_info.run_delay += delta;
104   - rq->rq_sched_info.pcnt++;
  104 + rq->rq_sched_info.pcount++;
105 105 }
106 106 }
107 107  
... ... @@ -164,7 +164,7 @@
164 164 sched_info_dequeued(t);
165 165 t->sched_info.run_delay += delta;
166 166 t->sched_info.last_arrival = now;
167   - t->sched_info.pcnt++;
  167 + t->sched_info.pcount++;
168 168  
169 169 rq_sched_info_arrive(task_rq(t), delta);
170 170 }