Commit 6378ddb592158db4b42197f1bc8666228800e379

Authored by Venki Pallipadi
Committed by Ingo Molnar
1 parent bbe4d18ac2

time: track accurate idle time with tick_sched.idle_sleeptime

Current idle time in kstat is based on jiffies and is coarse grained.
tick_sched.idle_sleeptime is making some attempt to keep track of idle time
in a fine grained manner.  But, it is not handling the time spent in
interrupts fully.

Make tick_sched.idle_sleeptime accurate with respect to time spent on
handling interrupts and also add tick_sched.idle_lastupdate, which keeps
track of last time when idle_sleeptime was updated.

This statistics will be crucial for cpufreq-ondemand governor, which can
shed some conservative gaurd band that is uses today while setting the
frequency.  The ondemand changes that uses the exact idle time is coming
soon.

Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

Showing 3 changed files with 60 additions and 23 deletions Side-by-side Diff

include/linux/tick.h
... ... @@ -51,8 +51,10 @@
51 51 unsigned long idle_jiffies;
52 52 unsigned long idle_calls;
53 53 unsigned long idle_sleeps;
  54 + int idle_active;
54 55 ktime_t idle_entrytime;
55 56 ktime_t idle_sleeptime;
  57 + ktime_t idle_lastupdate;
56 58 ktime_t sleep_length;
57 59 unsigned long last_jiffies;
58 60 unsigned long next_jiffies;
... ... @@ -103,6 +105,8 @@
103 105 extern void tick_nohz_restart_sched_tick(void);
104 106 extern void tick_nohz_update_jiffies(void);
105 107 extern ktime_t tick_nohz_get_sleep_length(void);
  108 +extern void tick_nohz_stop_idle(int cpu);
  109 +extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
106 110 # else
107 111 static inline void tick_nohz_stop_sched_tick(void) { }
108 112 static inline void tick_nohz_restart_sched_tick(void) { }
... ... @@ -113,6 +117,8 @@
113 117  
114 118 return len;
115 119 }
  120 +static inline void tick_nohz_stop_idle(int cpu) { }
  121 +static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return 0; }
116 122 # endif /* !NO_HZ */
117 123  
118 124 #endif
... ... @@ -280,9 +280,14 @@
280 280 */
281 281 void irq_enter(void)
282 282 {
  283 +#ifdef CONFIG_NO_HZ
  284 + int cpu = smp_processor_id();
  285 + if (idle_cpu(cpu) && !in_interrupt())
  286 + tick_nohz_stop_idle(cpu);
  287 +#endif
283 288 __irq_enter();
284 289 #ifdef CONFIG_NO_HZ
285   - if (idle_cpu(smp_processor_id()))
  290 + if (idle_cpu(cpu))
286 291 tick_nohz_update_jiffies();
287 292 #endif
288 293 }
kernel/time/tick-sched.c
... ... @@ -143,6 +143,44 @@
143 143 local_irq_restore(flags);
144 144 }
145 145  
  146 +void tick_nohz_stop_idle(int cpu)
  147 +{
  148 + struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
  149 +
  150 + if (ts->idle_active) {
  151 + ktime_t now, delta;
  152 + now = ktime_get();
  153 + delta = ktime_sub(now, ts->idle_entrytime);
  154 + ts->idle_lastupdate = now;
  155 + ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
  156 + ts->idle_active = 0;
  157 + }
  158 +}
  159 +
  160 +static ktime_t tick_nohz_start_idle(int cpu)
  161 +{
  162 + struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
  163 + ktime_t now, delta;
  164 +
  165 + now = ktime_get();
  166 + if (ts->idle_active) {
  167 + delta = ktime_sub(now, ts->idle_entrytime);
  168 + ts->idle_lastupdate = now;
  169 + ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
  170 + }
  171 + ts->idle_entrytime = now;
  172 + ts->idle_active = 1;
  173 + return now;
  174 +}
  175 +
  176 +u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
  177 +{
  178 + struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
  179 +
  180 + *last_update_time = ktime_to_us(ts->idle_lastupdate);
  181 + return ktime_to_us(ts->idle_sleeptime);
  182 +}
  183 +
146 184 /**
147 185 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
148 186 *
149 187  
... ... @@ -155,13 +193,14 @@
155 193 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
156 194 unsigned long rt_jiffies;
157 195 struct tick_sched *ts;
158   - ktime_t last_update, expires, now, delta;
  196 + ktime_t last_update, expires, now;
159 197 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
160 198 int cpu;
161 199  
162 200 local_irq_save(flags);
163 201  
164 202 cpu = smp_processor_id();
  203 + now = tick_nohz_start_idle(cpu);
165 204 ts = &per_cpu(tick_cpu_sched, cpu);
166 205  
167 206 /*
168 207  
... ... @@ -193,19 +232,7 @@
193 232 }
194 233 }
195 234  
196   - now = ktime_get();
197   - /*
198   - * When called from irq_exit we need to account the idle sleep time
199   - * correctly.
200   - */
201   - if (ts->tick_stopped) {
202   - delta = ktime_sub(now, ts->idle_entrytime);
203   - ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
204   - }
205   -
206   - ts->idle_entrytime = now;
207 235 ts->idle_calls++;
208   -
209 236 /* Read jiffies and the time when jiffies were updated last */
210 237 do {
211 238 seq = read_seqbegin(&xtime_lock);
212 239  
213 240  
214 241  
215 242  
216 243  
... ... @@ -337,22 +364,21 @@
337 364 int cpu = smp_processor_id();
338 365 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
339 366 unsigned long ticks;
340   - ktime_t now, delta;
  367 + ktime_t now;
341 368  
342   - if (!ts->tick_stopped)
  369 + local_irq_disable();
  370 + tick_nohz_stop_idle(cpu);
  371 +
  372 + if (!ts->tick_stopped) {
  373 + local_irq_enable();
343 374 return;
  375 + }
344 376  
345 377 /* Update jiffies first */
346   - now = ktime_get();
347   -
348   - local_irq_disable();
349 378 select_nohz_load_balancer(0);
  379 + now = ktime_get();
350 380 tick_do_update_jiffies64(now);
351 381 cpu_clear(cpu, nohz_cpu_mask);
352   -
353   - /* Account the idle time */
354   - delta = ktime_sub(now, ts->idle_entrytime);
355   - ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
356 382  
357 383 /*
358 384 * We stopped the tick in idle. Update process times would miss the