Commit 833883d9ac4cfb31c1c4419335e68e6895a05b6b

Authored by Dimitri Sivanich
Committed by Thomas Gleixner
1 parent 833df317f9

hrtimer: reduce calls to hrtimer_get_softirq_time()

It seems that hrtimer_run_queues() is calling hrtimer_get_softirq_time() more
often than it needs to.  This can cause frequent contention on systems with
large numbers of processors/cores.

With this patch, hrtimer_run_queues only calls hrtimer_get_softirq_time() if
there is a pending timer in one of the hrtimer bases, and only once.

This also combines hrtimer_run_queues() and the inline run_hrtimer_queue()
into one function.

[ tglx@linutronix.de: coding style ]

Signed-off-by: Dimitri Sivanich <sivanich@sgi.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

Showing 1 changed file with 32 additions and 32 deletions Side-by-side Diff

... ... @@ -1238,51 +1238,51 @@
1238 1238 /*
1239 1239 * Called from hardirq context every jiffy
1240 1240 */
1241   -static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base,
1242   - int index)
  1241 +void hrtimer_run_queues(void)
1243 1242 {
1244 1243 struct rb_node *node;
1245   - struct hrtimer_clock_base *base = &cpu_base->clock_base[index];
  1244 + struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  1245 + struct hrtimer_clock_base *base;
  1246 + int index, gettime = 1;
1246 1247  
1247   - if (!base->first)
  1248 + if (hrtimer_hres_active())
1248 1249 return;
1249 1250  
1250   - if (base->get_softirq_time)
1251   - base->softirq_time = base->get_softirq_time();
  1251 + for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
  1252 + base = &cpu_base->clock_base[index];
1252 1253  
1253   - spin_lock(&cpu_base->lock);
1254   -
1255   - while ((node = base->first)) {
1256   - struct hrtimer *timer;
1257   -
1258   - timer = rb_entry(node, struct hrtimer, node);
1259   - if (base->softirq_time.tv64 <= timer->expires.tv64)
1260   - break;
1261   -
1262   - if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
1263   - __remove_hrtimer(timer, base, HRTIMER_STATE_PENDING, 0);
1264   - list_add_tail(&timer->cb_entry,
1265   - &base->cpu_base->cb_pending);
  1254 + if (!base->first)
1266 1255 continue;
  1256 +
  1257 + if (gettime) {
  1258 + hrtimer_get_softirq_time(cpu_base);
  1259 + gettime = 0;
1267 1260 }
1268 1261  
1269   - __run_hrtimer(timer);
1270   - }
1271   - spin_unlock(&cpu_base->lock);
1272   -}
  1262 + if (base->get_softirq_time)
  1263 + base->softirq_time = base->get_softirq_time();
1273 1264  
1274   -void hrtimer_run_queues(void)
1275   -{
1276   - struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1277   - int i;
  1265 + spin_lock(&cpu_base->lock);
1278 1266  
1279   - if (hrtimer_hres_active())
1280   - return;
  1267 + while ((node = base->first)) {
  1268 + struct hrtimer *timer;
1281 1269  
1282   - hrtimer_get_softirq_time(cpu_base);
  1270 + timer = rb_entry(node, struct hrtimer, node);
  1271 + if (base->softirq_time.tv64 <= timer->expires.tv64)
  1272 + break;
1283 1273  
1284   - for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
1285   - run_hrtimer_queue(cpu_base, i);
  1274 + if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
  1275 + __remove_hrtimer(timer, base,
  1276 + HRTIMER_STATE_PENDING, 0);
  1277 + list_add_tail(&timer->cb_entry,
  1278 + &base->cpu_base->cb_pending);
  1279 + continue;
  1280 + }
  1281 +
  1282 + __run_hrtimer(timer);
  1283 + }
  1284 + spin_unlock(&cpu_base->lock);
  1285 + }
1286 1286 }
1287 1287  
1288 1288 /*