Blame view

kernel/sched_clock.c 6.09 KB
3e51f33fc   Peter Zijlstra   sched: add option...
1
2
3
4
5
  /*
   * sched_clock for unstable cpu clocks
   *
   *  Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
   *
c300ba252   Steven Rostedt   sched_clock: and ...
6
7
8
   *  Updates and enhancements:
   *    Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
   *
3e51f33fc   Peter Zijlstra   sched: add option...
9
10
11
12
13
14
   * Based on code by:
   *   Ingo Molnar <mingo@redhat.com>
   *   Guillaume Chazarain <guichaz@gmail.com>
   *
   * Create a semi stable clock from a mixture of other events, including:
   *  - gtod
3e51f33fc   Peter Zijlstra   sched: add option...
15
16
17
18
   *  - sched_clock()
   *  - explicit idle events
   *
   * We use gtod as base and the unstable clock deltas. The deltas are filtered,
354879bb9   Peter Zijlstra   sched_clock: fix ...
19
   * making it monotonic and keeping it within an expected window.
3e51f33fc   Peter Zijlstra   sched: add option...
20
21
22
23
24
   *
   * Furthermore, explicit sleep and wakeup hooks allow us to account for time
   * that is otherwise invisible (TSC gets stopped).
   *
   * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
354879bb9   Peter Zijlstra   sched_clock: fix ...
25
   * consistent between cpus (never more than 2 jiffies difference).
3e51f33fc   Peter Zijlstra   sched: add option...
26
   */
3e51f33fc   Peter Zijlstra   sched: add option...
27
  #include <linux/spinlock.h>
6409c4da2   Ingo Molnar   sched: sched_cloc...
28
  #include <linux/hardirq.h>
3e51f33fc   Peter Zijlstra   sched: add option...
29
  #include <linux/module.h>
b342501cd   Ingo Molnar   sched: allow arch...
30
31
32
  #include <linux/percpu.h>
  #include <linux/ktime.h>
  #include <linux/sched.h>
3e51f33fc   Peter Zijlstra   sched: add option...
33

2c3d103ba   Hugh Dickins   sched: move sched...
34
35
36
37
38
39
40
  /*
   * Scheduler clock - returns current time in nanosec units.
   * This is default implementation.
   * Architectures and sub-architectures can override this.
   */
  unsigned long long __attribute__((weak)) sched_clock(void)
  {
92d23f703   Ron   sched: Fix fallba...
41
42
  	return (unsigned long long)(jiffies - INITIAL_JIFFIES)
  					* (NSEC_PER_SEC / HZ);
2c3d103ba   Hugh Dickins   sched: move sched...
43
  }
3e51f33fc   Peter Zijlstra   sched: add option...
44

c1955a3d4   Peter Zijlstra   sched_clock: dela...
45
  static __read_mostly int sched_clock_running;
3e51f33fc   Peter Zijlstra   sched: add option...
46
  #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
b342501cd   Ingo Molnar   sched: allow arch...
47
  __read_mostly int sched_clock_stable;
3e51f33fc   Peter Zijlstra   sched: add option...
48
49
50
51
52
53
54
55
  
  struct sched_clock_data {
  	/*
  	 * Raw spinlock - this is a special case: this might be called
  	 * from within instrumentation code so we dont want to do any
  	 * instrumentation ourselves.
  	 */
  	raw_spinlock_t		lock;
3e51f33fc   Peter Zijlstra   sched: add option...
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
  	u64			tick_raw;
  	u64			tick_gtod;
  	u64			clock;
  };
  
  static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
  
  static inline struct sched_clock_data *this_scd(void)
  {
  	return &__get_cpu_var(sched_clock_data);
  }
  
  static inline struct sched_clock_data *cpu_sdc(int cpu)
  {
  	return &per_cpu(sched_clock_data, cpu);
  }
  
  void sched_clock_init(void)
  {
  	u64 ktime_now = ktime_to_ns(ktime_get());
3e51f33fc   Peter Zijlstra   sched: add option...
76
77
78
79
80
81
  	int cpu;
  
  	for_each_possible_cpu(cpu) {
  		struct sched_clock_data *scd = cpu_sdc(cpu);
  
  		scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
a381759d6   Peter Zijlstra   sched: fix sched_...
82
  		scd->tick_raw = 0;
3e51f33fc   Peter Zijlstra   sched: add option...
83
84
85
  		scd->tick_gtod = ktime_now;
  		scd->clock = ktime_now;
  	}
a381759d6   Peter Zijlstra   sched: fix sched_...
86
87
  
  	sched_clock_running = 1;
3e51f33fc   Peter Zijlstra   sched: add option...
88
89
90
  }
  
  /*
b342501cd   Ingo Molnar   sched: allow arch...
91
   * min, max except they take wrapping into account
354879bb9   Peter Zijlstra   sched_clock: fix ...
92
93
94
95
96
97
98
99
100
101
102
103
104
   */
  
  static inline u64 wrap_min(u64 x, u64 y)
  {
  	return (s64)(x - y) < 0 ? x : y;
  }
  
  static inline u64 wrap_max(u64 x, u64 y)
  {
  	return (s64)(x - y) > 0 ? x : y;
  }
  
  /*
3e51f33fc   Peter Zijlstra   sched: add option...
105
106
107
   * update the percpu scd from the raw @now value
   *
   *  - filter out backward motion
354879bb9   Peter Zijlstra   sched_clock: fix ...
108
   *  - use the GTOD tick value to create a window to filter crazy TSC values
3e51f33fc   Peter Zijlstra   sched: add option...
109
   */
56b906126   Ingo Molnar   sched clock: simp...
110
  static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
3e51f33fc   Peter Zijlstra   sched: add option...
111
  {
18e4e36c6   Ingo Molnar   sched: eliminate ...
112
  	s64 delta = now - scd->tick_raw;
354879bb9   Peter Zijlstra   sched_clock: fix ...
113
  	u64 clock, min_clock, max_clock;
3e51f33fc   Peter Zijlstra   sched: add option...
114

354879bb9   Peter Zijlstra   sched_clock: fix ...
115
116
  	if (unlikely(delta < 0))
  		delta = 0;
3e51f33fc   Peter Zijlstra   sched: add option...
117

354879bb9   Peter Zijlstra   sched_clock: fix ...
118
119
  	/*
  	 * scd->clock = clamp(scd->tick_gtod + delta,
b342501cd   Ingo Molnar   sched: allow arch...
120
121
  	 *		      max(scd->tick_gtod, scd->clock),
  	 *		      scd->tick_gtod + TICK_NSEC);
354879bb9   Peter Zijlstra   sched_clock: fix ...
122
  	 */
3e51f33fc   Peter Zijlstra   sched: add option...
123

354879bb9   Peter Zijlstra   sched_clock: fix ...
124
125
  	clock = scd->tick_gtod + delta;
  	min_clock = wrap_max(scd->tick_gtod, scd->clock);
1c5745aa3   Thomas Gleixner   sched_clock: prev...
126
  	max_clock = wrap_max(scd->clock, scd->tick_gtod + TICK_NSEC);
3e51f33fc   Peter Zijlstra   sched: add option...
127

354879bb9   Peter Zijlstra   sched_clock: fix ...
128
129
  	clock = wrap_max(clock, min_clock);
  	clock = wrap_min(clock, max_clock);
3e51f33fc   Peter Zijlstra   sched: add option...
130

e4e4e534f   Ingo Molnar   sched clock: reve...
131
  	scd->clock = clock;
56b906126   Ingo Molnar   sched clock: simp...
132

354879bb9   Peter Zijlstra   sched_clock: fix ...
133
  	return scd->clock;
3e51f33fc   Peter Zijlstra   sched: add option...
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
  }
  
  static void lock_double_clock(struct sched_clock_data *data1,
  				struct sched_clock_data *data2)
  {
  	if (data1 < data2) {
  		__raw_spin_lock(&data1->lock);
  		__raw_spin_lock(&data2->lock);
  	} else {
  		__raw_spin_lock(&data2->lock);
  		__raw_spin_lock(&data1->lock);
  	}
  }
  
  u64 sched_clock_cpu(int cpu)
  {
4a273f209   Ingo Molnar   sched clock: coup...
150
  	u64 now, clock, this_clock, remote_clock;
b342501cd   Ingo Molnar   sched: allow arch...
151
  	struct sched_clock_data *scd;
3e51f33fc   Peter Zijlstra   sched: add option...
152

b342501cd   Ingo Molnar   sched: allow arch...
153
154
  	if (sched_clock_stable)
  		return sched_clock();
a381759d6   Peter Zijlstra   sched: fix sched_...
155

b342501cd   Ingo Molnar   sched: allow arch...
156
  	scd = cpu_sdc(cpu);
3e51f33fc   Peter Zijlstra   sched: add option...
157

6409c4da2   Ingo Molnar   sched: sched_cloc...
158
159
160
161
162
163
  	/*
  	 * Normally this is not called in NMI context - but if it is,
  	 * trying to do any locking here is totally lethal.
  	 */
  	if (unlikely(in_nmi()))
  		return scd->clock;
a381759d6   Peter Zijlstra   sched: fix sched_...
164
165
  	if (unlikely(!sched_clock_running))
  		return 0ull;
3e51f33fc   Peter Zijlstra   sched: add option...
166
167
168
169
  	WARN_ON_ONCE(!irqs_disabled());
  	now = sched_clock();
  
  	if (cpu != raw_smp_processor_id()) {
3e51f33fc   Peter Zijlstra   sched: add option...
170
171
172
  		struct sched_clock_data *my_scd = this_scd();
  
  		lock_double_clock(scd, my_scd);
4a273f209   Ingo Molnar   sched clock: coup...
173
174
175
176
177
178
179
180
181
  		this_clock = __update_sched_clock(my_scd, now);
  		remote_clock = scd->clock;
  
  		/*
  		 * Use the opportunity that we have both locks
  		 * taken to couple the two clocks: we take the
  		 * larger time as the latest time for both
  		 * runqueues. (this creates monotonic movement)
  		 */
354879bb9   Peter Zijlstra   sched_clock: fix ...
182
  		if (likely((s64)(remote_clock - this_clock) < 0)) {
4a273f209   Ingo Molnar   sched clock: coup...
183
184
185
186
187
188
189
190
191
  			clock = this_clock;
  			scd->clock = clock;
  		} else {
  			/*
  			 * Should be rare, but possible:
  			 */
  			clock = remote_clock;
  			my_scd->clock = remote_clock;
  		}
3e51f33fc   Peter Zijlstra   sched: add option...
192
193
194
195
  
  		__raw_spin_unlock(&my_scd->lock);
  	} else {
  		__raw_spin_lock(&scd->lock);
4a273f209   Ingo Molnar   sched clock: coup...
196
  		clock = __update_sched_clock(scd, now);
3e51f33fc   Peter Zijlstra   sched: add option...
197
  	}
e4e4e534f   Ingo Molnar   sched clock: reve...
198
  	__raw_spin_unlock(&scd->lock);
3e51f33fc   Peter Zijlstra   sched: add option...
199
200
201
202
203
  	return clock;
  }
  
  void sched_clock_tick(void)
  {
8325d9c09   Peter Zijlstra   sched_clock: clea...
204
  	struct sched_clock_data *scd;
3e51f33fc   Peter Zijlstra   sched: add option...
205
  	u64 now, now_gtod;
8325d9c09   Peter Zijlstra   sched_clock: clea...
206
207
  	if (sched_clock_stable)
  		return;
a381759d6   Peter Zijlstra   sched: fix sched_...
208
209
  	if (unlikely(!sched_clock_running))
  		return;
3e51f33fc   Peter Zijlstra   sched: add option...
210
  	WARN_ON_ONCE(!irqs_disabled());
8325d9c09   Peter Zijlstra   sched_clock: clea...
211
  	scd = this_scd();
3e51f33fc   Peter Zijlstra   sched: add option...
212
  	now_gtod = ktime_to_ns(ktime_get());
a83bc47c3   Steven Rostedt   sched_clock: reco...
213
  	now = sched_clock();
3e51f33fc   Peter Zijlstra   sched: add option...
214
215
  
  	__raw_spin_lock(&scd->lock);
3e51f33fc   Peter Zijlstra   sched: add option...
216
217
  	scd->tick_raw = now;
  	scd->tick_gtod = now_gtod;
354879bb9   Peter Zijlstra   sched_clock: fix ...
218
  	__update_sched_clock(scd, now);
3e51f33fc   Peter Zijlstra   sched: add option...
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
  	__raw_spin_unlock(&scd->lock);
  }
  
  /*
   * We are going deep-idle (irqs are disabled):
   */
  void sched_clock_idle_sleep_event(void)
  {
  	sched_clock_cpu(smp_processor_id());
  }
  EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
  
  /*
   * We just idled delta nanoseconds (called with irqs disabled):
   */
  void sched_clock_idle_wakeup_event(u64 delta_ns)
  {
1c5745aa3   Thomas Gleixner   sched_clock: prev...
236
237
  	if (timekeeping_suspended)
  		return;
354879bb9   Peter Zijlstra   sched_clock: fix ...
238
  	sched_clock_tick();
3e51f33fc   Peter Zijlstra   sched: add option...
239
240
241
  	touch_softlockup_watchdog();
  }
  EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
8325d9c09   Peter Zijlstra   sched_clock: clea...
242
243
244
245
246
247
248
249
250
251
252
253
254
255
  #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
  
  void sched_clock_init(void)
  {
  	sched_clock_running = 1;
  }
  
  u64 sched_clock_cpu(int cpu)
  {
  	if (unlikely(!sched_clock_running))
  		return 0;
  
  	return sched_clock();
  }
b342501cd   Ingo Molnar   sched: allow arch...
256
  #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
3e51f33fc   Peter Zijlstra   sched: add option...
257

76a2a6ee8   Peter Zijlstra   sched: sched_cloc...
258
259
260
261
  unsigned long long cpu_clock(int cpu)
  {
  	unsigned long long clock;
  	unsigned long flags;
2d452c9b1   Ingo Molnar   sched: sched_cloc...
262
  	local_irq_save(flags);
76a2a6ee8   Peter Zijlstra   sched: sched_cloc...
263
  	clock = sched_clock_cpu(cpu);
2d452c9b1   Ingo Molnar   sched: sched_cloc...
264
  	local_irq_restore(flags);
76a2a6ee8   Peter Zijlstra   sched: sched_cloc...
265
266
267
  
  	return clock;
  }
4c9fe8ad8   Ingo Molnar   sched: export cpu...
268
  EXPORT_SYMBOL_GPL(cpu_clock);