Blame view

kernel/sched_clock.c 5.74 KB
3e51f33fc   Peter Zijlstra   sched: add option...
1
2
3
4
5
  /*
   * sched_clock for unstable cpu clocks
   *
   *  Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
   *
c300ba252   Steven Rostedt   sched_clock: and ...
6
7
8
   *  Updates and enhancements:
   *    Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
   *
3e51f33fc   Peter Zijlstra   sched: add option...
9
10
11
12
13
14
   * Based on code by:
   *   Ingo Molnar <mingo@redhat.com>
   *   Guillaume Chazarain <guichaz@gmail.com>
   *
   * Create a semi stable clock from a mixture of other events, including:
   *  - gtod
3e51f33fc   Peter Zijlstra   sched: add option...
15
16
17
18
   *  - sched_clock()
   *  - explicit idle events
   *
   * We use gtod as base and the unstable clock deltas. The deltas are filtered,
354879bb9   Peter Zijlstra   sched_clock: fix ...
19
   * making it monotonic and keeping it within an expected window.
3e51f33fc   Peter Zijlstra   sched: add option...
20
21
22
23
24
   *
   * Furthermore, explicit sleep and wakeup hooks allow us to account for time
   * that is otherwise invisible (TSC gets stopped).
   *
   * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
354879bb9   Peter Zijlstra   sched_clock: fix ...
25
   * consistent between cpus (never more than 2 jiffies difference).
3e51f33fc   Peter Zijlstra   sched: add option...
26
27
28
29
30
31
   */
  #include <linux/sched.h>
  #include <linux/percpu.h>
  #include <linux/spinlock.h>
  #include <linux/ktime.h>
  #include <linux/module.h>
2c3d103ba   Hugh Dickins   sched: move sched...
32
33
34
35
36
37
38
39
40
  /*
   * Scheduler clock - returns current time in nanosec units.
   * This is default implementation.
   * Architectures and sub-architectures can override this.
   */
  unsigned long long __attribute__((weak)) sched_clock(void)
  {
  	return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
  }
3e51f33fc   Peter Zijlstra   sched: add option...
41

c1955a3d4   Peter Zijlstra   sched_clock: dela...
42
  static __read_mostly int sched_clock_running;
3e51f33fc   Peter Zijlstra   sched: add option...
43
44
45
46
47
48
49
50
51
  #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
  
  struct sched_clock_data {
  	/*
  	 * Raw spinlock - this is a special case: this might be called
  	 * from within instrumentation code so we dont want to do any
  	 * instrumentation ourselves.
  	 */
  	raw_spinlock_t		lock;
3e51f33fc   Peter Zijlstra   sched: add option...
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
  	u64			tick_raw;
  	u64			tick_gtod;
  	u64			clock;
  };
  
  static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
  
  static inline struct sched_clock_data *this_scd(void)
  {
  	return &__get_cpu_var(sched_clock_data);
  }
  
  static inline struct sched_clock_data *cpu_sdc(int cpu)
  {
  	return &per_cpu(sched_clock_data, cpu);
  }
  
  void sched_clock_init(void)
  {
  	u64 ktime_now = ktime_to_ns(ktime_get());
3e51f33fc   Peter Zijlstra   sched: add option...
72
73
74
75
76
77
  	int cpu;
  
  	for_each_possible_cpu(cpu) {
  		struct sched_clock_data *scd = cpu_sdc(cpu);
  
  		scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
a381759d6   Peter Zijlstra   sched: fix sched_...
78
  		scd->tick_raw = 0;
3e51f33fc   Peter Zijlstra   sched: add option...
79
80
81
  		scd->tick_gtod = ktime_now;
  		scd->clock = ktime_now;
  	}
a381759d6   Peter Zijlstra   sched: fix sched_...
82
83
  
  	sched_clock_running = 1;
3e51f33fc   Peter Zijlstra   sched: add option...
84
85
86
  }
  
  /*
354879bb9   Peter Zijlstra   sched_clock: fix ...
87
88
89
90
91
92
93
94
95
96
97
98
99
100
   * min,max except they take wrapping into account
   */
  
  static inline u64 wrap_min(u64 x, u64 y)
  {
  	return (s64)(x - y) < 0 ? x : y;
  }
  
  static inline u64 wrap_max(u64 x, u64 y)
  {
  	return (s64)(x - y) > 0 ? x : y;
  }
  
  /*
3e51f33fc   Peter Zijlstra   sched: add option...
101
102
103
   * update the percpu scd from the raw @now value
   *
   *  - filter out backward motion
354879bb9   Peter Zijlstra   sched_clock: fix ...
104
   *  - use the GTOD tick value to create a window to filter crazy TSC values
3e51f33fc   Peter Zijlstra   sched: add option...
105
   */
56b906126   Ingo Molnar   sched clock: simp...
106
  static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
3e51f33fc   Peter Zijlstra   sched: add option...
107
  {
18e4e36c6   Ingo Molnar   sched: eliminate ...
108
  	s64 delta = now - scd->tick_raw;
354879bb9   Peter Zijlstra   sched_clock: fix ...
109
  	u64 clock, min_clock, max_clock;
3e51f33fc   Peter Zijlstra   sched: add option...
110
111
  
  	WARN_ON_ONCE(!irqs_disabled());
3e51f33fc   Peter Zijlstra   sched: add option...
112

354879bb9   Peter Zijlstra   sched_clock: fix ...
113
114
  	if (unlikely(delta < 0))
  		delta = 0;
3e51f33fc   Peter Zijlstra   sched: add option...
115

354879bb9   Peter Zijlstra   sched_clock: fix ...
116
117
  	/*
  	 * scd->clock = clamp(scd->tick_gtod + delta,
ca7e716c7   Linus Torvalds   Revert "sched_clo...
118
119
  	 * 		      max(scd->tick_gtod, scd->clock),
  	 * 		      scd->tick_gtod + TICK_NSEC);
354879bb9   Peter Zijlstra   sched_clock: fix ...
120
  	 */
3e51f33fc   Peter Zijlstra   sched: add option...
121

354879bb9   Peter Zijlstra   sched_clock: fix ...
122
123
  	clock = scd->tick_gtod + delta;
  	min_clock = wrap_max(scd->tick_gtod, scd->clock);
1c5745aa3   Thomas Gleixner   sched_clock: prev...
124
  	max_clock = wrap_max(scd->clock, scd->tick_gtod + TICK_NSEC);
3e51f33fc   Peter Zijlstra   sched: add option...
125

354879bb9   Peter Zijlstra   sched_clock: fix ...
126
127
  	clock = wrap_max(clock, min_clock);
  	clock = wrap_min(clock, max_clock);
3e51f33fc   Peter Zijlstra   sched: add option...
128

e4e4e534f   Ingo Molnar   sched clock: reve...
129
  	scd->clock = clock;
56b906126   Ingo Molnar   sched clock: simp...
130

354879bb9   Peter Zijlstra   sched_clock: fix ...
131
  	return scd->clock;
3e51f33fc   Peter Zijlstra   sched: add option...
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
  }
  
  static void lock_double_clock(struct sched_clock_data *data1,
  				struct sched_clock_data *data2)
  {
  	if (data1 < data2) {
  		__raw_spin_lock(&data1->lock);
  		__raw_spin_lock(&data2->lock);
  	} else {
  		__raw_spin_lock(&data2->lock);
  		__raw_spin_lock(&data1->lock);
  	}
  }
  
  u64 sched_clock_cpu(int cpu)
  {
  	struct sched_clock_data *scd = cpu_sdc(cpu);
4a273f209   Ingo Molnar   sched clock: coup...
149
  	u64 now, clock, this_clock, remote_clock;
3e51f33fc   Peter Zijlstra   sched: add option...
150

a381759d6   Peter Zijlstra   sched: fix sched_...
151
152
  	if (unlikely(!sched_clock_running))
  		return 0ull;
3e51f33fc   Peter Zijlstra   sched: add option...
153
154
155
156
  	WARN_ON_ONCE(!irqs_disabled());
  	now = sched_clock();
  
  	if (cpu != raw_smp_processor_id()) {
3e51f33fc   Peter Zijlstra   sched: add option...
157
158
159
  		struct sched_clock_data *my_scd = this_scd();
  
  		lock_double_clock(scd, my_scd);
4a273f209   Ingo Molnar   sched clock: coup...
160
161
162
163
164
165
166
167
168
  		this_clock = __update_sched_clock(my_scd, now);
  		remote_clock = scd->clock;
  
  		/*
  		 * Use the opportunity that we have both locks
  		 * taken to couple the two clocks: we take the
  		 * larger time as the latest time for both
  		 * runqueues. (this creates monotonic movement)
  		 */
354879bb9   Peter Zijlstra   sched_clock: fix ...
169
  		if (likely((s64)(remote_clock - this_clock) < 0)) {
4a273f209   Ingo Molnar   sched clock: coup...
170
171
172
173
174
175
176
177
178
  			clock = this_clock;
  			scd->clock = clock;
  		} else {
  			/*
  			 * Should be rare, but possible:
  			 */
  			clock = remote_clock;
  			my_scd->clock = remote_clock;
  		}
3e51f33fc   Peter Zijlstra   sched: add option...
179
180
181
182
  
  		__raw_spin_unlock(&my_scd->lock);
  	} else {
  		__raw_spin_lock(&scd->lock);
4a273f209   Ingo Molnar   sched clock: coup...
183
  		clock = __update_sched_clock(scd, now);
3e51f33fc   Peter Zijlstra   sched: add option...
184
  	}
e4e4e534f   Ingo Molnar   sched clock: reve...
185
  	__raw_spin_unlock(&scd->lock);
3e51f33fc   Peter Zijlstra   sched: add option...
186
187
188
189
190
191
192
  	return clock;
  }
  
  void sched_clock_tick(void)
  {
  	struct sched_clock_data *scd = this_scd();
  	u64 now, now_gtod;
a381759d6   Peter Zijlstra   sched: fix sched_...
193
194
  	if (unlikely(!sched_clock_running))
  		return;
3e51f33fc   Peter Zijlstra   sched: add option...
195
  	WARN_ON_ONCE(!irqs_disabled());
3e51f33fc   Peter Zijlstra   sched: add option...
196
  	now_gtod = ktime_to_ns(ktime_get());
a83bc47c3   Steven Rostedt   sched_clock: reco...
197
  	now = sched_clock();
3e51f33fc   Peter Zijlstra   sched: add option...
198
199
  
  	__raw_spin_lock(&scd->lock);
3e51f33fc   Peter Zijlstra   sched: add option...
200
201
  	scd->tick_raw = now;
  	scd->tick_gtod = now_gtod;
354879bb9   Peter Zijlstra   sched_clock: fix ...
202
  	__update_sched_clock(scd, now);
3e51f33fc   Peter Zijlstra   sched: add option...
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
  	__raw_spin_unlock(&scd->lock);
  }
  
  /*
   * We are going deep-idle (irqs are disabled):
   */
  void sched_clock_idle_sleep_event(void)
  {
  	sched_clock_cpu(smp_processor_id());
  }
  EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
  
  /*
   * We just idled delta nanoseconds (called with irqs disabled):
   */
  void sched_clock_idle_wakeup_event(u64 delta_ns)
  {
1c5745aa3   Thomas Gleixner   sched_clock: prev...
220
221
  	if (timekeeping_suspended)
  		return;
354879bb9   Peter Zijlstra   sched_clock: fix ...
222
  	sched_clock_tick();
3e51f33fc   Peter Zijlstra   sched: add option...
223
224
225
  	touch_softlockup_watchdog();
  }
  EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
c1955a3d4   Peter Zijlstra   sched_clock: dela...
226
227
228
229
230
231
232
233
234
235
236
237
238
239
  #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
  
  void sched_clock_init(void)
  {
  	sched_clock_running = 1;
  }
  
  u64 sched_clock_cpu(int cpu)
  {
  	if (unlikely(!sched_clock_running))
  		return 0;
  
  	return sched_clock();
  }
3e51f33fc   Peter Zijlstra   sched: add option...
240
  #endif
76a2a6ee8   Peter Zijlstra   sched: sched_cloc...
241
242
243
244
  unsigned long long cpu_clock(int cpu)
  {
  	unsigned long long clock;
  	unsigned long flags;
2d452c9b1   Ingo Molnar   sched: sched_cloc...
245
  	local_irq_save(flags);
76a2a6ee8   Peter Zijlstra   sched: sched_cloc...
246
  	clock = sched_clock_cpu(cpu);
2d452c9b1   Ingo Molnar   sched: sched_cloc...
247
  	local_irq_restore(flags);
76a2a6ee8   Peter Zijlstra   sched: sched_cloc...
248
249
250
  
  	return clock;
  }
4c9fe8ad8   Ingo Molnar   sched: export cpu...
251
  EXPORT_SYMBOL_GPL(cpu_clock);