Commit e8fcaa5c54e3b0371230e5d43a6f650c667da9c5

Authored by Frederic Weisbecker
1 parent dc1ccc4815

nohz: Convert a few places to use local per cpu accesses

A few functions use remote per CPU access APIs when they
deal with local values.

Just do the right conversion to improve performance, code
readability and debug checks.

While at it, lets extend some of these function names with *_this_cpu()
suffix in order to display their purpose more clearly.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>

Showing 5 changed files with 25 additions and 34 deletions Side-by-side Diff

include/linux/tick.h
... ... @@ -104,7 +104,7 @@
104 104 extern void tick_clock_notify(void);
105 105 extern int tick_check_oneshot_change(int allow_nohz);
106 106 extern struct tick_sched *tick_get_tick_sched(int cpu);
107   -extern void tick_check_idle(int cpu);
  107 +extern void tick_check_idle(void);
108 108 extern int tick_oneshot_mode_active(void);
109 109 # ifndef arch_needs_cpu
110 110 # define arch_needs_cpu(cpu) (0)
... ... @@ -112,7 +112,7 @@
112 112 # else
113 113 static inline void tick_clock_notify(void) { }
114 114 static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
115   -static inline void tick_check_idle(int cpu) { }
  115 +static inline void tick_check_idle(void) { }
116 116 static inline int tick_oneshot_mode_active(void) { return 0; }
117 117 # endif
118 118  
... ... @@ -121,7 +121,7 @@
121 121 static inline void tick_cancel_sched_timer(int cpu) { }
122 122 static inline void tick_clock_notify(void) { }
123 123 static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
124   -static inline void tick_check_idle(int cpu) { }
  124 +static inline void tick_check_idle(void) { }
125 125 static inline int tick_oneshot_mode_active(void) { return 0; }
126 126 #endif /* !CONFIG_GENERIC_CLOCKEVENTS */
127 127  
... ... @@ -311,8 +311,6 @@
311 311 */
312 312 void irq_enter(void)
313 313 {
314   - int cpu = smp_processor_id();
315   -
316 314 rcu_irq_enter();
317 315 if (is_idle_task(current) && !in_interrupt()) {
318 316 /*
... ... @@ -320,7 +318,7 @@
320 318 * here, as softirq will be serviced on return from interrupt.
321 319 */
322 320 local_bh_disable();
323   - tick_check_idle(cpu);
  321 + tick_check_idle();
324 322 _local_bh_enable();
325 323 }
326 324  
kernel/time/tick-broadcast.c
... ... @@ -538,10 +538,10 @@
538 538 * Called from irq_enter() when idle was interrupted to reenable the
539 539 * per cpu device.
540 540 */
541   -void tick_check_oneshot_broadcast(int cpu)
  541 +void tick_check_oneshot_broadcast_this_cpu(void)
542 542 {
543   - if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) {
544   - struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
  543 + if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
  544 + struct tick_device *td = &__get_cpu_var(tick_cpu_device);
545 545  
546 546 /*
547 547 * We might be in the middle of switching over from
kernel/time/tick-internal.h
... ... @@ -51,7 +51,7 @@
51 51 extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
52 52 extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
53 53 extern int tick_broadcast_oneshot_active(void);
54   -extern void tick_check_oneshot_broadcast(int cpu);
  54 +extern void tick_check_oneshot_broadcast_this_cpu(void);
55 55 bool tick_broadcast_oneshot_available(void);
56 56 # else /* BROADCAST */
57 57 static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
... ... @@ -62,7 +62,7 @@
62 62 static inline void tick_broadcast_switch_to_oneshot(void) { }
63 63 static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
64 64 static inline int tick_broadcast_oneshot_active(void) { return 0; }
65   -static inline void tick_check_oneshot_broadcast(int cpu) { }
  65 +static inline void tick_check_oneshot_broadcast_this_cpu(void) { }
66 66 static inline bool tick_broadcast_oneshot_available(void) { return true; }
67 67 # endif /* !BROADCAST */
68 68  
kernel/time/tick-sched.c
... ... @@ -391,11 +391,9 @@
391 391 */
392 392 static void tick_nohz_update_jiffies(ktime_t now)
393 393 {
394   - int cpu = smp_processor_id();
395   - struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
396 394 unsigned long flags;
397 395  
398   - ts->idle_waketime = now;
  396 + __this_cpu_write(tick_cpu_sched.idle_waketime, now);
399 397  
400 398 local_irq_save(flags);
401 399 tick_do_update_jiffies64(now);
402 400  
403 401  
... ... @@ -426,17 +424,15 @@
426 424  
427 425 }
428 426  
429   -static void tick_nohz_stop_idle(int cpu, ktime_t now)
  427 +static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
430 428 {
431   - struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
432   -
433   - update_ts_time_stats(cpu, ts, now, NULL);
  429 + update_ts_time_stats(smp_processor_id(), ts, now, NULL);
434 430 ts->idle_active = 0;
435 431  
436 432 sched_clock_idle_wakeup_event(0);
437 433 }
438 434  
439   -static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
  435 +static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
440 436 {
441 437 ktime_t now = ktime_get();
442 438  
... ... @@ -752,7 +748,7 @@
752 748 ktime_t now, expires;
753 749 int cpu = smp_processor_id();
754 750  
755   - now = tick_nohz_start_idle(cpu, ts);
  751 + now = tick_nohz_start_idle(ts);
756 752  
757 753 if (can_stop_idle_tick(cpu, ts)) {
758 754 int was_stopped = ts->tick_stopped;
... ... @@ -914,8 +910,7 @@
914 910 */
915 911 void tick_nohz_idle_exit(void)
916 912 {
917   - int cpu = smp_processor_id();
918   - struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
  913 + struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
919 914 ktime_t now;
920 915  
921 916 local_irq_disable();
... ... @@ -928,7 +923,7 @@
928 923 now = ktime_get();
929 924  
930 925 if (ts->idle_active)
931   - tick_nohz_stop_idle(cpu, now);
  926 + tick_nohz_stop_idle(ts, now);
932 927  
933 928 if (ts->tick_stopped) {
934 929 tick_nohz_restart_sched_tick(ts, now);
935 930  
... ... @@ -1012,12 +1007,10 @@
1012 1007 * timer and do not touch the other magic bits which need to be done
1013 1008 * when idle is left.
1014 1009 */
1015   -static void tick_nohz_kick_tick(int cpu, ktime_t now)
  1010 +static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
1016 1011 {
1017 1012 #if 0
1018 1013 /* Switch back to 2.6.27 behaviour */
1019   -
1020   - struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
1021 1014 ktime_t delta;
1022 1015  
1023 1016 /*
1024 1017  
1025 1018  
1026 1019  
1027 1020  
1028 1021  
1029 1022  
... ... @@ -1032,36 +1025,36 @@
1032 1025 #endif
1033 1026 }
1034 1027  
1035   -static inline void tick_check_nohz(int cpu)
  1028 +static inline void tick_check_nohz_this_cpu(void)
1036 1029 {
1037   - struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
  1030 + struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
1038 1031 ktime_t now;
1039 1032  
1040 1033 if (!ts->idle_active && !ts->tick_stopped)
1041 1034 return;
1042 1035 now = ktime_get();
1043 1036 if (ts->idle_active)
1044   - tick_nohz_stop_idle(cpu, now);
  1037 + tick_nohz_stop_idle(ts, now);
1045 1038 if (ts->tick_stopped) {
1046 1039 tick_nohz_update_jiffies(now);
1047   - tick_nohz_kick_tick(cpu, now);
  1040 + tick_nohz_kick_tick(ts, now);
1048 1041 }
1049 1042 }
1050 1043  
1051 1044 #else
1052 1045  
1053 1046 static inline void tick_nohz_switch_to_nohz(void) { }
1054   -static inline void tick_check_nohz(int cpu) { }
  1047 +static inline void tick_check_nohz_this_cpu(void) { }
1055 1048  
1056 1049 #endif /* CONFIG_NO_HZ_COMMON */
1057 1050  
1058 1051 /*
1059 1052 * Called from irq_enter to notify about the possible interruption of idle()
1060 1053 */
1061   -void tick_check_idle(int cpu)
  1054 +void tick_check_idle(void)
1062 1055 {
1063   - tick_check_oneshot_broadcast(cpu);
1064   - tick_check_nohz(cpu);
  1056 + tick_check_oneshot_broadcast_this_cpu();
  1057 + tick_check_nohz_this_cpu();
1065 1058 }
1066 1059  
1067 1060 /*