Blame view

kernel/hrtimer.c 47.5 KB
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1
2
3
  /*
   *  linux/kernel/hrtimer.c
   *
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
4
   *  Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
5
   *  Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
6
   *  Copyright(C) 2006-2007  Timesys Corp., Thomas Gleixner
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
   *
   *  High-resolution kernel timers
   *
   *  In contrast to the low-resolution timeout API implemented in
   *  kernel/timer.c, hrtimers provide finer resolution and accuracy
   *  depending on system configuration and capabilities.
   *
   *  These timers are currently used for:
   *   - itimers
   *   - POSIX timers
   *   - nanosleep
   *   - precise in-kernel timing
   *
   *  Started by: Thomas Gleixner and Ingo Molnar
   *
   *  Credits:
   *	based on kernel/timer.c
   *
66188fae3   Thomas Gleixner   [PATCH] hrtimers:...
25
26
27
28
29
30
   *	Help, testing, suggestions, bugfixes, improvements were
   *	provided by:
   *
   *	George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
   *	et. al.
   *
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
31
32
33
34
   *  For licencing details see kernel-base/COPYING
   */
  
  #include <linux/cpu.h>
9984de1a5   Paul Gortmaker   kernel: Map most ...
35
  #include <linux/export.h>
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
36
37
38
39
  #include <linux/percpu.h>
  #include <linux/hrtimer.h>
  #include <linux/notifier.h>
  #include <linux/syscalls.h>
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
40
  #include <linux/kallsyms.h>
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
41
  #include <linux/interrupt.h>
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
42
  #include <linux/tick.h>
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
43
44
  #include <linux/seq_file.h>
  #include <linux/err.h>
237fc6e7a   Thomas Gleixner   add hrtimer speci...
45
  #include <linux/debugobjects.h>
eea08f32a   Arun R Bharadwaj   timers: Logic to ...
46
  #include <linux/sched.h>
cf4aebc29   Clark Williams   sched: Move sched...
47
  #include <linux/sched/sysctl.h>
8bd75c77b   Clark Williams   sched/rt: Move rt...
48
  #include <linux/sched/rt.h>
aab03e05e   Dario Faggioli   sched/deadline: A...
49
  #include <linux/sched/deadline.h>
eea08f32a   Arun R Bharadwaj   timers: Logic to ...
50
  #include <linux/timer.h>
b0f8c44f3   Colin Cross   nanosleep: use fr...
51
  #include <linux/freezer.h>
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
52
53
  
  #include <asm/uaccess.h>
c6a2a1770   Xiao Guangrong   hrtimer: Add trac...
54
  #include <trace/events/timer.h>
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
55
56
  /*
   * The timer bases:
7978672c4   George Anzinger   [PATCH] hrtimers:...
57
   *
e06383db9   John Stultz   hrtimers: extend ...
58
59
60
61
   * There are more clockids then hrtimer bases. Thus, we index
   * into the timer bases by the hrtimer_base_type enum. When trying
   * to reach a base using a clockid, hrtimer_clockid_to_base()
   * is used to convert from clockid to the proper hrtimer_base_type.
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
62
   */
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
63
  DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
64
  {
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
65

84cc8fd2f   Michael Bohan   hrtimer: Don't re...
66
  	.lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
67
  	.clock_base =
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
68
  	{
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
69
  		{
ab8177bc5   Thomas Gleixner   hrtimers: Avoid t...
70
71
  			.index = HRTIMER_BASE_MONOTONIC,
  			.clockid = CLOCK_MONOTONIC,
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
72
  			.get_time = &ktime_get,
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
73
  			.resolution = KTIME_LOW_RES,
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
74
  		},
70a08cca1   John Stultz   timers: Add CLOCK...
75
  		{
68fa61c02   Thomas Gleixner   hrtimers: Reorder...
76
77
78
79
80
81
  			.index = HRTIMER_BASE_REALTIME,
  			.clockid = CLOCK_REALTIME,
  			.get_time = &ktime_get_real,
  			.resolution = KTIME_LOW_RES,
  		},
  		{
ab8177bc5   Thomas Gleixner   hrtimers: Avoid t...
82
83
  			.index = HRTIMER_BASE_BOOTTIME,
  			.clockid = CLOCK_BOOTTIME,
70a08cca1   John Stultz   timers: Add CLOCK...
84
85
86
  			.get_time = &ktime_get_boottime,
  			.resolution = KTIME_LOW_RES,
  		},
90adda98b   John Stultz   hrtimer: Add hrti...
87
88
89
90
91
92
  		{
  			.index = HRTIMER_BASE_TAI,
  			.clockid = CLOCK_TAI,
  			.get_time = &ktime_get_clocktai,
  			.resolution = KTIME_LOW_RES,
  		},
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
93
  	}
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
94
  };
942c3c5c3   Mike Frysinger   hrtimer: Make loo...
95
  static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
ce31332d3   Thomas Gleixner   hrtimer: Initiali...
96
97
98
  	[CLOCK_REALTIME]	= HRTIMER_BASE_REALTIME,
  	[CLOCK_MONOTONIC]	= HRTIMER_BASE_MONOTONIC,
  	[CLOCK_BOOTTIME]	= HRTIMER_BASE_BOOTTIME,
90adda98b   John Stultz   hrtimer: Add hrti...
99
  	[CLOCK_TAI]		= HRTIMER_BASE_TAI,
ce31332d3   Thomas Gleixner   hrtimer: Initiali...
100
  };
e06383db9   John Stultz   hrtimers: extend ...
101
102
103
104
105
  
  static inline int hrtimer_clockid_to_base(clockid_t clock_id)
  {
  	return hrtimer_clock_to_base_table[clock_id];
  }
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
106
  /*
92127c7a4   Thomas Gleixner   [PATCH] hrtimers:...
107
108
109
   * Get the coarse grained time at the softirq based on xtime and
   * wall_to_monotonic.
   */
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
110
  static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
92127c7a4   Thomas Gleixner   [PATCH] hrtimers:...
111
  {
70a08cca1   John Stultz   timers: Add CLOCK...
112
  	ktime_t xtim, mono, boot;
314ac3715   John Stultz   time: Extend get_...
113
  	struct timespec xts, tom, slp;
90adda98b   John Stultz   hrtimer: Add hrti...
114
  	s32 tai_offset;
92127c7a4   Thomas Gleixner   [PATCH] hrtimers:...
115

314ac3715   John Stultz   time: Extend get_...
116
  	get_xtime_and_monotonic_and_sleep_offset(&xts, &tom, &slp);
90adda98b   John Stultz   hrtimer: Add hrti...
117
  	tai_offset = timekeeping_get_tai_offset();
92127c7a4   Thomas Gleixner   [PATCH] hrtimers:...
118

f4304ab21   John Stultz   [PATCH] HZ free ntp
119
  	xtim = timespec_to_ktime(xts);
70a08cca1   John Stultz   timers: Add CLOCK...
120
121
  	mono = ktime_add(xtim, timespec_to_ktime(tom));
  	boot = ktime_add(mono, timespec_to_ktime(slp));
e06383db9   John Stultz   hrtimers: extend ...
122
  	base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim;
70a08cca1   John Stultz   timers: Add CLOCK...
123
124
  	base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono;
  	base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot;
90adda98b   John Stultz   hrtimer: Add hrti...
125
126
  	base->clock_base[HRTIMER_BASE_TAI].softirq_time =
  				ktime_add(xtim,	ktime_set(tai_offset, 0));
92127c7a4   Thomas Gleixner   [PATCH] hrtimers:...
127
128
129
  }
  
  /*
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
130
131
132
133
   * Functions and macros which are different for UP/SMP systems are kept in a
   * single place
   */
  #ifdef CONFIG_SMP
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
134
135
136
137
138
139
140
141
142
143
144
145
  /*
   * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
   * means that all timers which are tied to this base via timer->base are
   * locked, and the base itself is locked too.
   *
   * So __run_timers/migrate_timers can safely modify all timers which could
   * be found on the lists/queues.
   *
   * When the timer's base is locked, and the timer removed from list, it is
   * possible to set timer->base = NULL and drop the lock: the timer remains
   * locked.
   */
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
146
147
148
  static
  struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
  					     unsigned long *flags)
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
149
  {
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
150
  	struct hrtimer_clock_base *base;
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
151
152
153
154
  
  	for (;;) {
  		base = timer->base;
  		if (likely(base != NULL)) {
ecb49d1a6   Thomas Gleixner   hrtimers: Convert...
155
  			raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
156
157
158
  			if (likely(base == timer->base))
  				return base;
  			/* The timer has migrated to another CPU: */
ecb49d1a6   Thomas Gleixner   hrtimers: Convert...
159
  			raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
160
161
162
163
  		}
  		cpu_relax();
  	}
  }
6ff7041db   Thomas Gleixner   hrtimer: Fix migr...
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
  /*
   * With HIGHRES=y we do not migrate the timer when it is expiring
   * before the next event on the target cpu because we cannot reprogram
   * the target cpu hardware and we would cause it to fire late.
   *
   * Called with cpu_base->lock of target cpu held.
   */
  static int
  hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
  {
  #ifdef CONFIG_HIGH_RES_TIMERS
  	ktime_t expires;
  
  	if (!new_base->cpu_base->hres_active)
  		return 0;
  
  	expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
  	return expires.tv64 <= new_base->cpu_base->expires_next.tv64;
  #else
  	return 0;
  #endif
  }
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
186
187
188
  /*
   * Switch the timer base to the current CPU when possible.
   */
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
189
  static inline struct hrtimer_clock_base *
597d02757   Arun R Bharadwaj   timers: Framework...
190
191
  switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
  		    int pinned)
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
192
  {
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
193
194
  	struct hrtimer_clock_base *new_base;
  	struct hrtimer_cpu_base *new_cpu_base;
6ff7041db   Thomas Gleixner   hrtimer: Fix migr...
195
  	int this_cpu = smp_processor_id();
6201b4d61   Viresh Kumar   timer: Remove cod...
196
  	int cpu = get_nohz_timer_target(pinned);
ab8177bc5   Thomas Gleixner   hrtimers: Avoid t...
197
  	int basenum = base->index;
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
198

eea08f32a   Arun R Bharadwaj   timers: Logic to ...
199
200
  again:
  	new_cpu_base = &per_cpu(hrtimer_bases, cpu);
e06383db9   John Stultz   hrtimers: extend ...
201
  	new_base = &new_cpu_base->clock_base[basenum];
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
202
203
204
  
  	if (base != new_base) {
  		/*
6ff7041db   Thomas Gleixner   hrtimer: Fix migr...
205
  		 * We are trying to move timer to new_base.
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
206
207
208
209
210
211
212
  		 * However we can't change timer's base while it is running,
  		 * so we keep it on the same CPU. No hassle vs. reprogramming
  		 * the event source in the high resolution case. The softirq
  		 * code will take care of this when the timer function has
  		 * completed. There is no conflict as we hold the lock until
  		 * the timer is enqueued.
  		 */
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
213
  		if (unlikely(hrtimer_callback_running(timer)))
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
214
215
216
217
  			return base;
  
  		/* See the comment in lock_timer_base() */
  		timer->base = NULL;
ecb49d1a6   Thomas Gleixner   hrtimers: Convert...
218
219
  		raw_spin_unlock(&base->cpu_base->lock);
  		raw_spin_lock(&new_base->cpu_base->lock);
eea08f32a   Arun R Bharadwaj   timers: Logic to ...
220

6ff7041db   Thomas Gleixner   hrtimer: Fix migr...
221
222
  		if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
  			cpu = this_cpu;
ecb49d1a6   Thomas Gleixner   hrtimers: Convert...
223
224
  			raw_spin_unlock(&new_base->cpu_base->lock);
  			raw_spin_lock(&base->cpu_base->lock);
6ff7041db   Thomas Gleixner   hrtimer: Fix migr...
225
226
  			timer->base = base;
  			goto again;
eea08f32a   Arun R Bharadwaj   timers: Logic to ...
227
  		}
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
228
229
230
231
232
233
  		timer->base = new_base;
  	}
  	return new_base;
  }
  
  #else /* CONFIG_SMP */
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
234
  static inline struct hrtimer_clock_base *
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
235
236
  lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
  {
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
237
  	struct hrtimer_clock_base *base = timer->base;
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
238

ecb49d1a6   Thomas Gleixner   hrtimers: Convert...
239
  	raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
240
241
242
  
  	return base;
  }
eea08f32a   Arun R Bharadwaj   timers: Logic to ...
243
  # define switch_hrtimer_base(t, b, p)	(b)
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
244
245
246
247
248
249
250
251
252
253
254
  
  #endif	/* !CONFIG_SMP */
  
  /*
   * Functions for the union type storage format of ktime_t which are
   * too large for inlining:
   */
  #if BITS_PER_LONG < 64
  # ifndef CONFIG_KTIME_SCALAR
  /**
   * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
255
256
257
258
259
260
261
262
263
264
265
266
267
   * @kt:		addend
   * @nsec:	the scalar nsec value to add
   *
   * Returns the sum of kt and nsec in ktime_t format
   */
  ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
  {
  	ktime_t tmp;
  
  	if (likely(nsec < NSEC_PER_SEC)) {
  		tmp.tv64 = nsec;
  	} else {
  		unsigned long rem = do_div(nsec, NSEC_PER_SEC);
51fd36f3f   David Engraf   hrtimer: Fix ktim...
268
269
270
  		/* Make sure nsec fits into long */
  		if (unlikely(nsec > KTIME_SEC_MAX))
  			return (ktime_t){ .tv64 = KTIME_MAX };
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
271
272
273
274
275
  		tmp = ktime_set((long)nsec, rem);
  	}
  
  	return ktime_add(kt, tmp);
  }
b8b8fd2dc   David Howells   [NET]: Fix networ...
276
277
  
  EXPORT_SYMBOL_GPL(ktime_add_ns);
a272378d1   Arnaldo Carvalho de Melo   [KTIME]: Introduc...
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
  
  /**
   * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
   * @kt:		minuend
   * @nsec:	the scalar nsec value to subtract
   *
   * Returns the subtraction of @nsec from @kt in ktime_t format
   */
  ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
  {
  	ktime_t tmp;
  
  	if (likely(nsec < NSEC_PER_SEC)) {
  		tmp.tv64 = nsec;
  	} else {
  		unsigned long rem = do_div(nsec, NSEC_PER_SEC);
  
  		tmp = ktime_set((long)nsec, rem);
  	}
  
  	return ktime_sub(kt, tmp);
  }
  
  EXPORT_SYMBOL_GPL(ktime_sub_ns);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
302
303
304
305
306
  # endif /* !CONFIG_KTIME_SCALAR */
  
  /*
   * Divide a ktime value by a nanosecond value
   */
4d672e7ac   Davide Libenzi   timerfd: new time...
307
  u64 ktime_divns(const ktime_t kt, s64 div)
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
308
  {
900cfa461   Carlos R. Mafra   hrtimer: Remove u...
309
  	u64 dclc;
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
310
  	int sft = 0;
900cfa461   Carlos R. Mafra   hrtimer: Remove u...
311
  	dclc = ktime_to_ns(kt);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
312
313
314
315
316
317
318
  	/* Make sure the divisor is less than 2^32: */
  	while (div >> 32) {
  		sft++;
  		div >>= 1;
  	}
  	dclc >>= sft;
  	do_div(dclc, (unsigned long) div);
4d672e7ac   Davide Libenzi   timerfd: new time...
319
  	return dclc;
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
320
  }
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
321
  #endif /* BITS_PER_LONG >= 64 */
d3d74453c   Peter Zijlstra   hrtimer: fixup th...
322
  /*
5a7780e72   Thomas Gleixner   hrtimer: check re...
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
   * Add two ktime values and do a safety check for overflow:
   */
  ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
  {
  	ktime_t res = ktime_add(lhs, rhs);
  
  	/*
  	 * We use KTIME_SEC_MAX here, the maximum timeout which we can
  	 * return to user space in a timespec:
  	 */
  	if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64)
  		res = ktime_set(KTIME_SEC_MAX, 0);
  
  	return res;
  }
8daa21e61   Artem Bityutskiy   hrtimer: export k...
338
  EXPORT_SYMBOL_GPL(ktime_add_safe);
237fc6e7a   Thomas Gleixner   add hrtimer speci...
339
340
341
  #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
  
  static struct debug_obj_descr hrtimer_debug_descr;
997772884   Stanislaw Gruszka   debugobjects: Add...
342
343
344
345
  static void *hrtimer_debug_hint(void *addr)
  {
  	return ((struct hrtimer *) addr)->function;
  }
237fc6e7a   Thomas Gleixner   add hrtimer speci...
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
  /*
   * fixup_init is called when:
   * - an active object is initialized
   */
  static int hrtimer_fixup_init(void *addr, enum debug_obj_state state)
  {
  	struct hrtimer *timer = addr;
  
  	switch (state) {
  	case ODEBUG_STATE_ACTIVE:
  		hrtimer_cancel(timer);
  		debug_object_init(timer, &hrtimer_debug_descr);
  		return 1;
  	default:
  		return 0;
  	}
  }
  
  /*
   * fixup_activate is called when:
   * - an active object is activated
   * - an unknown object is activated (might be a statically initialized object)
   */
  static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
  {
  	switch (state) {
  
  	case ODEBUG_STATE_NOTAVAILABLE:
  		WARN_ON_ONCE(1);
  		return 0;
  
  	case ODEBUG_STATE_ACTIVE:
  		WARN_ON(1);
  
  	default:
  		return 0;
  	}
  }
  
  /*
   * fixup_free is called when:
   * - an active object is freed
   */
  static int hrtimer_fixup_free(void *addr, enum debug_obj_state state)
  {
  	struct hrtimer *timer = addr;
  
  	switch (state) {
  	case ODEBUG_STATE_ACTIVE:
  		hrtimer_cancel(timer);
  		debug_object_free(timer, &hrtimer_debug_descr);
  		return 1;
  	default:
  		return 0;
  	}
  }
  
  static struct debug_obj_descr hrtimer_debug_descr = {
  	.name		= "hrtimer",
997772884   Stanislaw Gruszka   debugobjects: Add...
405
  	.debug_hint	= hrtimer_debug_hint,
237fc6e7a   Thomas Gleixner   add hrtimer speci...
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
  	.fixup_init	= hrtimer_fixup_init,
  	.fixup_activate	= hrtimer_fixup_activate,
  	.fixup_free	= hrtimer_fixup_free,
  };
  
  static inline void debug_hrtimer_init(struct hrtimer *timer)
  {
  	debug_object_init(timer, &hrtimer_debug_descr);
  }
  
  static inline void debug_hrtimer_activate(struct hrtimer *timer)
  {
  	debug_object_activate(timer, &hrtimer_debug_descr);
  }
  
  static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
  {
  	debug_object_deactivate(timer, &hrtimer_debug_descr);
  }
  
  static inline void debug_hrtimer_free(struct hrtimer *timer)
  {
  	debug_object_free(timer, &hrtimer_debug_descr);
  }
  
  static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
  			   enum hrtimer_mode mode);
  
  void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
  			   enum hrtimer_mode mode)
  {
  	debug_object_init_on_stack(timer, &hrtimer_debug_descr);
  	__hrtimer_init(timer, clock_id, mode);
  }
2bc481cf4   Stephen Hemminger   pktgen: spin usin...
440
  EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
237fc6e7a   Thomas Gleixner   add hrtimer speci...
441
442
443
444
445
446
447
448
449
450
451
  
  void destroy_hrtimer_on_stack(struct hrtimer *timer)
  {
  	debug_object_free(timer, &hrtimer_debug_descr);
  }
  
  #else
  static inline void debug_hrtimer_init(struct hrtimer *timer) { }
  static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
  static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
  #endif
c6a2a1770   Xiao Guangrong   hrtimer: Add trac...
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
  static inline void
  debug_init(struct hrtimer *timer, clockid_t clockid,
  	   enum hrtimer_mode mode)
  {
  	debug_hrtimer_init(timer);
  	trace_hrtimer_init(timer, clockid, mode);
  }
  
  static inline void debug_activate(struct hrtimer *timer)
  {
  	debug_hrtimer_activate(timer);
  	trace_hrtimer_start(timer);
  }
  
  static inline void debug_deactivate(struct hrtimer *timer)
  {
  	debug_hrtimer_deactivate(timer);
  	trace_hrtimer_cancel(timer);
  }
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
  /* High resolution timer related functions */
  #ifdef CONFIG_HIGH_RES_TIMERS
  
  /*
   * High resolution timer enabled ?
   */
  static int hrtimer_hres_enabled __read_mostly  = 1;
  
  /*
   * Enable / Disable high resolution mode
   */
  static int __init setup_hrtimer_hres(char *str)
  {
  	if (!strcmp(str, "off"))
  		hrtimer_hres_enabled = 0;
  	else if (!strcmp(str, "on"))
  		hrtimer_hres_enabled = 1;
  	else
  		return 0;
  	return 1;
  }
  
  __setup("highres=", setup_hrtimer_hres);
  
  /*
   * hrtimer_high_res_enabled - query, if the highres mode is enabled
   */
  static inline int hrtimer_is_hres_enabled(void)
  {
  	return hrtimer_hres_enabled;
  }
  
  /*
   * Is the high resolution mode active ?
   */
  static inline int hrtimer_hres_active(void)
  {
909ea9646   Christoph Lameter   core: Replace __g...
508
  	return __this_cpu_read(hrtimer_bases.hres_active);
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
509
510
511
512
513
514
515
  }
  
  /*
   * Reprogram the event source with checking both queues for the
   * next event
   * Called with interrupts disabled and base->lock held
   */
7403f41f1   Ashwin Chaugule   hrtimer: Eliminat...
516
517
  static void
  hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
518
519
520
  {
  	int i;
  	struct hrtimer_clock_base *base = cpu_base->clock_base;
7403f41f1   Ashwin Chaugule   hrtimer: Eliminat...
521
  	ktime_t expires, expires_next;
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
522

7403f41f1   Ashwin Chaugule   hrtimer: Eliminat...
523
  	expires_next.tv64 = KTIME_MAX;
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
524
525
526
  
  	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
  		struct hrtimer *timer;
998adc3dd   John Stultz   hrtimers: Convert...
527
  		struct timerqueue_node *next;
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
528

998adc3dd   John Stultz   hrtimers: Convert...
529
530
  		next = timerqueue_getnext(&base->active);
  		if (!next)
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
531
  			continue;
998adc3dd   John Stultz   hrtimers: Convert...
532
  		timer = container_of(next, struct hrtimer, node);
cc584b213   Arjan van de Ven   hrtimer: convert ...
533
  		expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
b0a9b5111   Thomas Gleixner   hrtimer: prevent ...
534
535
536
537
538
539
540
  		/*
  		 * clock_was_set() has changed base->offset so the
  		 * result might be negative. Fix it up to prevent a
  		 * false positive in clockevents_program_event()
  		 */
  		if (expires.tv64 < 0)
  			expires.tv64 = 0;
7403f41f1   Ashwin Chaugule   hrtimer: Eliminat...
541
542
  		if (expires.tv64 < expires_next.tv64)
  			expires_next = expires;
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
543
  	}
7403f41f1   Ashwin Chaugule   hrtimer: Eliminat...
544
545
546
547
  	if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
  		return;
  
  	cpu_base->expires_next.tv64 = expires_next.tv64;
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
  	if (cpu_base->expires_next.tv64 != KTIME_MAX)
  		tick_program_event(cpu_base->expires_next, 1);
  }
  
  /*
   * Shared reprogramming for clock_realtime and clock_monotonic
   *
   * When a timer is enqueued and expires earlier than the already enqueued
   * timers, we have to check, whether it expires earlier than the timer for
   * which the clock event device was armed.
   *
   * Called with interrupts disabled and base->cpu_base.lock held
   */
  static int hrtimer_reprogram(struct hrtimer *timer,
  			     struct hrtimer_clock_base *base)
  {
41d2e4949   Thomas Gleixner   hrtimer: Tune hrt...
564
  	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
cc584b213   Arjan van de Ven   hrtimer: convert ...
565
  	ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
566
  	int res;
cc584b213   Arjan van de Ven   hrtimer: convert ...
567
  	WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
63070a79b   Thomas Gleixner   hrtimer: catch ex...
568

54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
569
570
571
  	/*
  	 * When the callback is running, we do not reprogram the clock event
  	 * device. The timer callback is either running on a different CPU or
3a4fa0a25   Robert P. J. Day   Fix misspellings ...
572
  	 * the callback is executed in the hrtimer_interrupt context. The
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
573
574
575
576
577
  	 * reprogramming is handled either by the softirq, which called the
  	 * callback or at the end of the hrtimer_interrupt.
  	 */
  	if (hrtimer_callback_running(timer))
  		return 0;
63070a79b   Thomas Gleixner   hrtimer: catch ex...
578
579
580
581
582
583
584
585
  	/*
  	 * CLOCK_REALTIME timer might be requested with an absolute
  	 * expiry time which is less than base->offset. Nothing wrong
  	 * about that, just avoid to call into the tick code, which
  	 * has now objections against negative expiry values.
  	 */
  	if (expires.tv64 < 0)
  		return -ETIME;
41d2e4949   Thomas Gleixner   hrtimer: Tune hrt...
586
587
588
589
590
591
592
593
594
595
  	if (expires.tv64 >= cpu_base->expires_next.tv64)
  		return 0;
  
  	/*
  	 * If a hang was detected in the last timer interrupt then we
  	 * do not schedule a timer which is earlier than the expiry
  	 * which we enforced in the hang detection. We want the system
  	 * to make progress.
  	 */
  	if (cpu_base->hang_detected)
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
596
597
598
599
600
601
602
  		return 0;
  
  	/*
  	 * Clockevents returns -ETIME, when the event was in the past.
  	 */
  	res = tick_program_event(expires, 0);
  	if (!IS_ERR_VALUE(res))
41d2e4949   Thomas Gleixner   hrtimer: Tune hrt...
603
  		cpu_base->expires_next = expires;
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
604
605
  	return res;
  }
995f054f2   Ingo Molnar   [PATCH] high-res ...
606
  /*
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
607
608
609
610
611
612
   * Initialize the high resolution related parts of cpu_base
   */
  static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
  {
  	base->expires_next.tv64 = KTIME_MAX;
  	base->hres_active = 0;
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
613
614
615
  }
  
  /*
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
616
617
618
619
620
621
   * When High resolution timers are active, try to reprogram. Note, that in case
   * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
   * check happens. The timer gets enqueued into the rbtree. The reprogramming
   * and expiry check is done in the hrtimer_interrupt or in the softirq.
   */
  static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
b22affe0a   Leonid Shatz   hrtimer: Prevent ...
622
  					    struct hrtimer_clock_base *base)
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
623
  {
b22affe0a   Leonid Shatz   hrtimer: Prevent ...
624
  	return base->cpu_base->hres_active && hrtimer_reprogram(timer, base);
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
625
  }
5baefd6d8   John Stultz   hrtimer: Update h...
626
627
628
629
  static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
  {
  	ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
  	ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
90adda98b   John Stultz   hrtimer: Add hrti...
630
  	ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
5baefd6d8   John Stultz   hrtimer: Update h...
631

90adda98b   John Stultz   hrtimer: Add hrti...
632
  	return ktime_get_update_offsets(offs_real, offs_boot, offs_tai);
5baefd6d8   John Stultz   hrtimer: Update h...
633
  }
9ec269075   Thomas Gleixner   timerfd: Manage c...
634
635
636
637
638
639
640
641
  /*
   * Retrigger next event is called after clock was set
   *
   * Called with interrupts disabled via on_each_cpu()
   */
  static void retrigger_next_event(void *arg)
  {
  	struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
9ec269075   Thomas Gleixner   timerfd: Manage c...
642
643
644
  
  	if (!hrtimer_hres_active())
  		return;
9ec269075   Thomas Gleixner   timerfd: Manage c...
645
  	raw_spin_lock(&base->lock);
5baefd6d8   John Stultz   hrtimer: Update h...
646
  	hrtimer_update_base(base);
9ec269075   Thomas Gleixner   timerfd: Manage c...
647
648
649
  	hrtimer_force_reprogram(base, 0);
  	raw_spin_unlock(&base->lock);
  }
b12a03ce4   Thomas Gleixner   hrtimers: Prepare...
650

54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
651
652
653
  /*
   * Switch to high resolution mode
   */
f8953856e   Thomas Gleixner   [PATCH] highres: ...
654
  static int hrtimer_switch_to_hres(void)
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
655
  {
b12a03ce4   Thomas Gleixner   hrtimers: Prepare...
656
  	int i, cpu = smp_processor_id();
820de5c39   Ingo Molnar   highres: improve ...
657
  	struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
658
659
660
  	unsigned long flags;
  
  	if (base->hres_active)
f8953856e   Thomas Gleixner   [PATCH] highres: ...
661
  		return 1;
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
662
663
664
665
666
  
  	local_irq_save(flags);
  
  	if (tick_init_highres()) {
  		local_irq_restore(flags);
820de5c39   Ingo Molnar   highres: improve ...
667
668
669
  		printk(KERN_WARNING "Could not switch to high resolution "
  				    "mode on CPU %d
  ", cpu);
f8953856e   Thomas Gleixner   [PATCH] highres: ...
670
  		return 0;
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
671
672
  	}
  	base->hres_active = 1;
b12a03ce4   Thomas Gleixner   hrtimers: Prepare...
673
674
  	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
  		base->clock_base[i].resolution = KTIME_HIGH_RES;
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
675
676
  
  	tick_setup_sched_timer();
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
677
678
679
  	/* "Retrigger" the interrupt to get things going */
  	retrigger_next_event(NULL);
  	local_irq_restore(flags);
f8953856e   Thomas Gleixner   [PATCH] highres: ...
680
  	return 1;
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
681
  }
5ec2481b7   Thomas Gleixner   hrtimers: Move SM...
682
683
684
685
686
687
  static void clock_was_set_work(struct work_struct *work)
  {
  	clock_was_set();
  }
  
  static DECLARE_WORK(hrtimer_work, clock_was_set_work);
f55a6faa3   John Stultz   hrtimer: Provide ...
688
  /*
5ec2481b7   Thomas Gleixner   hrtimers: Move SM...
689
690
   * Called from timekeeping and resume code to reprogramm the hrtimer
   * interrupt device on all cpus.
f55a6faa3   John Stultz   hrtimer: Provide ...
691
692
693
   */
  void clock_was_set_delayed(void)
  {
5ec2481b7   Thomas Gleixner   hrtimers: Move SM...
694
  	schedule_work(&hrtimer_work);
f55a6faa3   John Stultz   hrtimer: Provide ...
695
  }
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
696
697
698
699
  #else
  
  static inline int hrtimer_hres_active(void) { return 0; }
  static inline int hrtimer_is_hres_enabled(void) { return 0; }
f8953856e   Thomas Gleixner   [PATCH] highres: ...
700
  static inline int hrtimer_switch_to_hres(void) { return 0; }
7403f41f1   Ashwin Chaugule   hrtimer: Eliminat...
701
702
  static inline void
  hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
703
  static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
b22affe0a   Leonid Shatz   hrtimer: Prevent ...
704
  					    struct hrtimer_clock_base *base)
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
705
706
707
  {
  	return 0;
  }
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
708
  static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
9ec269075   Thomas Gleixner   timerfd: Manage c...
709
  static inline void retrigger_next_event(void *arg) { }
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
710
711
  
  #endif /* CONFIG_HIGH_RES_TIMERS */
b12a03ce4   Thomas Gleixner   hrtimers: Prepare...
712
  /*
b12a03ce4   Thomas Gleixner   hrtimers: Prepare...
713
714
715
716
717
718
719
720
721
722
723
724
   * Clock realtime was set
   *
   * Change the offset of the realtime clock vs. the monotonic
   * clock.
   *
   * We might have to reprogram the high resolution timer interrupt. On
   * SMP we call the architecture specific code to retrigger _all_ high
   * resolution timer interrupts. On UP we just disable interrupts and
   * call the high resolution interrupt code.
   */
  void clock_was_set(void)
  {
90ff1f30c   Thomas Gleixner   hrtimers: Fix typ...
725
  #ifdef CONFIG_HIGH_RES_TIMERS
b12a03ce4   Thomas Gleixner   hrtimers: Prepare...
726
727
  	/* Retrigger the CPU local events everywhere */
  	on_each_cpu(retrigger_next_event, NULL, 1);
9ec269075   Thomas Gleixner   timerfd: Manage c...
728
729
  #endif
  	timerfd_clock_was_set();
b12a03ce4   Thomas Gleixner   hrtimers: Prepare...
730
731
732
733
  }
  
  /*
   * During resume we might have to reprogram the high resolution timer
7c4c3a0f1   David Vrabel   hrtimers: Support...
734
735
   * interrupt on all online CPUs.  However, all other CPUs will be
   * stopped with IRQs interrupts disabled so the clock_was_set() call
5ec2481b7   Thomas Gleixner   hrtimers: Move SM...
736
   * must be deferred.
b12a03ce4   Thomas Gleixner   hrtimers: Prepare...
737
738
739
740
741
   */
  void hrtimers_resume(void)
  {
  	WARN_ONCE(!irqs_disabled(),
  		  KERN_INFO "hrtimers_resume() called with IRQs enabled!");
5ec2481b7   Thomas Gleixner   hrtimers: Move SM...
742
  	/* Retrigger on the local CPU */
b12a03ce4   Thomas Gleixner   hrtimers: Prepare...
743
  	retrigger_next_event(NULL);
5ec2481b7   Thomas Gleixner   hrtimers: Move SM...
744
745
  	/* And schedule a retrigger for all others */
  	clock_was_set_delayed();
b12a03ce4   Thomas Gleixner   hrtimers: Prepare...
746
  }
5f201907d   Heiko Carstens   hrtimer: move tim...
747
  static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
82f67cd9f   Ingo Molnar   [PATCH] Add debug...
748
  {
5f201907d   Heiko Carstens   hrtimer: move tim...
749
  #ifdef CONFIG_TIMER_STATS
82f67cd9f   Ingo Molnar   [PATCH] Add debug...
750
751
  	if (timer->start_site)
  		return;
5f201907d   Heiko Carstens   hrtimer: move tim...
752
  	timer->start_site = __builtin_return_address(0);
82f67cd9f   Ingo Molnar   [PATCH] Add debug...
753
754
  	memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
  	timer->start_pid = current->pid;
5f201907d   Heiko Carstens   hrtimer: move tim...
755
756
757
758
759
760
761
762
  #endif
  }
  
  static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
  {
  #ifdef CONFIG_TIMER_STATS
  	timer->start_site = NULL;
  #endif
82f67cd9f   Ingo Molnar   [PATCH] Add debug...
763
  }
5f201907d   Heiko Carstens   hrtimer: move tim...
764
765
766
767
768
769
770
771
  
  static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
  {
  #ifdef CONFIG_TIMER_STATS
  	if (likely(!timer_stats_active))
  		return;
  	timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
  				 timer->function, timer->start_comm, 0);
82f67cd9f   Ingo Molnar   [PATCH] Add debug...
772
  #endif
5f201907d   Heiko Carstens   hrtimer: move tim...
773
  }
82f67cd9f   Ingo Molnar   [PATCH] Add debug...
774

c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
775
  /*
6506f2aa6   Uwe Kleine-König   fix comment: unlo...
776
   * Counterpart to lock_hrtimer_base above:
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
777
778
779
780
   */
  static inline
  void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
  {
ecb49d1a6   Thomas Gleixner   hrtimers: Convert...
781
  	raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
782
783
784
785
  }
  
  /**
   * hrtimer_forward - forward the timer expiry
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
786
   * @timer:	hrtimer to forward
44f214755   Roman Zippel   [PATCH] hrtimers:...
787
   * @now:	forward past this time
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
788
789
790
   * @interval:	the interval to forward
   *
   * Forward the timer expiry so it will expire in the future.
8dca6f33f   Jonathan Corbet   [PATCH] hrtimer c...
791
   * Returns the number of overruns.
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
792
   */
4d672e7ac   Davide Libenzi   timerfd: new time...
793
  u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
794
  {
4d672e7ac   Davide Libenzi   timerfd: new time...
795
  	u64 orun = 1;
44f214755   Roman Zippel   [PATCH] hrtimers:...
796
  	ktime_t delta;
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
797

cc584b213   Arjan van de Ven   hrtimer: convert ...
798
  	delta = ktime_sub(now, hrtimer_get_expires(timer));
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
799
800
801
  
  	if (delta.tv64 < 0)
  		return 0;
c9db4fa11   Thomas Gleixner   [hrtimer] Enforce...
802
803
  	if (interval.tv64 < timer->base->resolution.tv64)
  		interval.tv64 = timer->base->resolution.tv64;
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
804
  	if (unlikely(delta.tv64 >= interval.tv64)) {
df869b630   Roman Zippel   [PATCH] hrtimers:...
805
  		s64 incr = ktime_to_ns(interval);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
806
807
  
  		orun = ktime_divns(delta, incr);
cc584b213   Arjan van de Ven   hrtimer: convert ...
808
809
  		hrtimer_add_expires_ns(timer, incr * orun);
  		if (hrtimer_get_expires_tv64(timer) > now.tv64)
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
810
811
812
813
814
815
816
  			return orun;
  		/*
  		 * This (and the ktime_add() below) is the
  		 * correction for exact:
  		 */
  		orun++;
  	}
cc584b213   Arjan van de Ven   hrtimer: convert ...
817
  	hrtimer_add_expires(timer, interval);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
818
819
820
  
  	return orun;
  }
6bdb6b620   Stas Sergeev   export hrtimer_fo...
821
  EXPORT_SYMBOL_GPL(hrtimer_forward);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
822
823
824
825
826
827
  
  /*
   * enqueue_hrtimer - internal function to (re)start a timer
   *
   * The timer is inserted in expiry order. Insertion into the
   * red black tree is O(log(n)). Must hold the base lock.
a6037b61c   Peter Zijlstra   hrtimer: fix recu...
828
829
   *
   * Returns 1 when the new timer is the leftmost timer in the tree.
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
830
   */
a6037b61c   Peter Zijlstra   hrtimer: fix recu...
831
832
  static int enqueue_hrtimer(struct hrtimer *timer,
  			   struct hrtimer_clock_base *base)
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
833
  {
c6a2a1770   Xiao Guangrong   hrtimer: Add trac...
834
  	debug_activate(timer);
237fc6e7a   Thomas Gleixner   add hrtimer speci...
835

998adc3dd   John Stultz   hrtimers: Convert...
836
  	timerqueue_add(&base->active, &timer->node);
ab8177bc5   Thomas Gleixner   hrtimers: Avoid t...
837
  	base->cpu_base->active_bases |= 1 << base->index;
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
838

303e967ff   Thomas Gleixner   [PATCH] hrtimers;...
839
840
841
842
843
  	/*
  	 * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
  	 * state of a possibly running callback.
  	 */
  	timer->state |= HRTIMER_STATE_ENQUEUED;
a6037b61c   Peter Zijlstra   hrtimer: fix recu...
844

998adc3dd   John Stultz   hrtimers: Convert...
845
  	return (&timer->node == base->active.next);
288867ec5   Thomas Gleixner   [hrtimer] Remove ...
846
  }
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
847
848
849
850
851
  
  /*
   * __remove_hrtimer - internal function to remove a timer
   *
   * Caller must hold the base lock.
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
852
853
854
855
856
   *
   * High resolution timer mode reprograms the clock event device when the
   * timer is the one which expires next. The caller can disable this by setting
   * reprogram to zero. This is useful, when the context does a reprogramming
   * anyway (e.g. timer interrupt)
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
857
   */
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
858
  static void __remove_hrtimer(struct hrtimer *timer,
303e967ff   Thomas Gleixner   [PATCH] hrtimers;...
859
  			     struct hrtimer_clock_base *base,
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
860
  			     unsigned long newstate, int reprogram)
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
861
  {
27c9cd7e6   Jeff Ohlstein   hrtimer: Fix extr...
862
  	struct timerqueue_node *next_timer;
7403f41f1   Ashwin Chaugule   hrtimer: Eliminat...
863
864
  	if (!(timer->state & HRTIMER_STATE_ENQUEUED))
  		goto out;
27c9cd7e6   Jeff Ohlstein   hrtimer: Fix extr...
865
866
867
  	next_timer = timerqueue_getnext(&base->active);
  	timerqueue_del(&base->active, &timer->node);
  	if (&timer->node == next_timer) {
7403f41f1   Ashwin Chaugule   hrtimer: Eliminat...
868
869
870
871
872
873
874
875
876
  #ifdef CONFIG_HIGH_RES_TIMERS
  		/* Reprogram the clock event device. if enabled */
  		if (reprogram && hrtimer_hres_active()) {
  			ktime_t expires;
  
  			expires = ktime_sub(hrtimer_get_expires(timer),
  					    base->offset);
  			if (base->cpu_base->expires_next.tv64 == expires.tv64)
  				hrtimer_force_reprogram(base->cpu_base, 1);
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
877
  		}
7403f41f1   Ashwin Chaugule   hrtimer: Eliminat...
878
  #endif
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
879
  	}
ab8177bc5   Thomas Gleixner   hrtimers: Avoid t...
880
881
  	if (!timerqueue_getnext(&base->active))
  		base->cpu_base->active_bases &= ~(1 << base->index);
7403f41f1   Ashwin Chaugule   hrtimer: Eliminat...
882
  out:
303e967ff   Thomas Gleixner   [PATCH] hrtimers;...
883
  	timer->state = newstate;
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
884
885
886
887
888
889
  }
  
  /*
   * remove hrtimer, called with base lock held
   */
  static inline int
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
890
  remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
891
  {
303e967ff   Thomas Gleixner   [PATCH] hrtimers;...
892
  	if (hrtimer_is_queued(timer)) {
f13d4f979   Salman Qazi   hrtimer: Preserve...
893
  		unsigned long state;
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
894
895
896
897
898
899
900
901
902
903
  		int reprogram;
  
  		/*
  		 * Remove the timer and force reprogramming when high
  		 * resolution mode is active and the timer is on the current
  		 * CPU. If we remove a timer on another CPU, reprogramming is
  		 * skipped. The interrupt event on this CPU is fired and
  		 * reprogramming happens in the interrupt handler. This is a
  		 * rare case and less expensive than a smp call.
  		 */
c6a2a1770   Xiao Guangrong   hrtimer: Add trac...
904
  		debug_deactivate(timer);
82f67cd9f   Ingo Molnar   [PATCH] Add debug...
905
  		timer_stats_hrtimer_clear_start_info(timer);
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
906
  		reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
f13d4f979   Salman Qazi   hrtimer: Preserve...
907
908
909
910
911
912
913
  		/*
  		 * We must preserve the CALLBACK state flag here,
  		 * otherwise we could move the timer base in
  		 * switch_hrtimer_base.
  		 */
  		state = timer->state & HRTIMER_STATE_CALLBACK;
  		__remove_hrtimer(timer, base, state, reprogram);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
914
915
916
917
  		return 1;
  	}
  	return 0;
  }
7f1e2ca9f   Peter Zijlstra   hrtimer: fix rq->...
918
919
920
  int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
  		unsigned long delta_ns, const enum hrtimer_mode mode,
  		int wakeup)
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
921
  {
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
922
  	struct hrtimer_clock_base *base, *new_base;
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
923
  	unsigned long flags;
a6037b61c   Peter Zijlstra   hrtimer: fix recu...
924
  	int ret, leftmost;
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
925
926
927
928
929
930
931
  
  	base = lock_hrtimer_base(timer, &flags);
  
  	/* Remove an active timer from the queue: */
  	ret = remove_hrtimer(timer, base);
  
  	/* Switch the timer base, if necessary: */
597d02757   Arun R Bharadwaj   timers: Framework...
932
  	new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
933

597d02757   Arun R Bharadwaj   timers: Framework...
934
  	if (mode & HRTIMER_MODE_REL) {
5a7780e72   Thomas Gleixner   hrtimer: check re...
935
  		tim = ktime_add_safe(tim, new_base->get_time());
06027bdd2   Ingo Molnar   [PATCH] hrtimer: ...
936
937
938
939
940
941
942
943
  		/*
  		 * CONFIG_TIME_LOW_RES is a temporary way for architectures
  		 * to signal that they simply return xtime in
  		 * do_gettimeoffset(). In this case we want to round up by
  		 * resolution when starting a relative timer, to avoid short
  		 * timeouts. This will go away with the GTOD framework.
  		 */
  #ifdef CONFIG_TIME_LOW_RES
5a7780e72   Thomas Gleixner   hrtimer: check re...
944
  		tim = ktime_add_safe(tim, base->resolution);
06027bdd2   Ingo Molnar   [PATCH] hrtimer: ...
945
946
  #endif
  	}
237fc6e7a   Thomas Gleixner   add hrtimer speci...
947

da8f2e170   Arjan van de Ven   hrtimer: add a hr...
948
  	hrtimer_set_expires_range_ns(timer, tim, delta_ns);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
949

82f67cd9f   Ingo Molnar   [PATCH] Add debug...
950
  	timer_stats_hrtimer_set_start_info(timer);
a6037b61c   Peter Zijlstra   hrtimer: fix recu...
951
  	leftmost = enqueue_hrtimer(timer, new_base);
935c631db   Ingo Molnar   [PATCH] hrtimers:...
952
953
954
  	/*
  	 * Only allow reprogramming if the new base is on this CPU.
  	 * (it might still be on another CPU if the timer was pending)
a6037b61c   Peter Zijlstra   hrtimer: fix recu...
955
956
  	 *
  	 * XXX send_remote_softirq() ?
935c631db   Ingo Molnar   [PATCH] hrtimers:...
957
  	 */
b22affe0a   Leonid Shatz   hrtimer: Prevent ...
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
  	if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)
  		&& hrtimer_enqueue_reprogram(timer, new_base)) {
  		if (wakeup) {
  			/*
  			 * We need to drop cpu_base->lock to avoid a
  			 * lock ordering issue vs. rq->lock.
  			 */
  			raw_spin_unlock(&new_base->cpu_base->lock);
  			raise_softirq_irqoff(HRTIMER_SOFTIRQ);
  			local_irq_restore(flags);
  			return ret;
  		} else {
  			__raise_softirq_irqoff(HRTIMER_SOFTIRQ);
  		}
  	}
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
973
974
975
976
977
  
  	unlock_hrtimer_base(timer, &flags);
  
  	return ret;
  }
7f1e2ca9f   Peter Zijlstra   hrtimer: fix rq->...
978
979
980
981
982
983
  
  /**
   * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
   * @timer:	the timer to be added
   * @tim:	expiry time
   * @delta_ns:	"slack" range for the timer
8ffbc7d9f   David Daney   hrtimer/trivial: ...
984
985
   * @mode:	expiry mode: absolute (HRTIMER_MODE_ABS) or
   *		relative (HRTIMER_MODE_REL)
7f1e2ca9f   Peter Zijlstra   hrtimer: fix rq->...
986
987
988
989
990
991
992
993
994
995
   *
   * Returns:
   *  0 on success
   *  1 when the timer was active
   */
  int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
  		unsigned long delta_ns, const enum hrtimer_mode mode)
  {
  	return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1);
  }
da8f2e170   Arjan van de Ven   hrtimer: add a hr...
996
997
998
  EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
  
  /**
e1dd7bc58   Thomas Gleixner   hrtimers: fix doc...
999
   * hrtimer_start - (re)start an hrtimer on the current CPU
da8f2e170   Arjan van de Ven   hrtimer: add a hr...
1000
1001
   * @timer:	the timer to be added
   * @tim:	expiry time
8ffbc7d9f   David Daney   hrtimer/trivial: ...
1002
1003
   * @mode:	expiry mode: absolute (HRTIMER_MODE_ABS) or
   *		relative (HRTIMER_MODE_REL)
da8f2e170   Arjan van de Ven   hrtimer: add a hr...
1004
1005
1006
1007
1008
1009
1010
1011
   *
   * Returns:
   *  0 on success
   *  1 when the timer was active
   */
  int
  hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
  {
7f1e2ca9f   Peter Zijlstra   hrtimer: fix rq->...
1012
  	return __hrtimer_start_range_ns(timer, tim, 0, mode, 1);
da8f2e170   Arjan van de Ven   hrtimer: add a hr...
1013
  }
8d16b7642   Stephen Hemminger   [PATCH] hrtimer: ...
1014
  EXPORT_SYMBOL_GPL(hrtimer_start);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1015

da8f2e170   Arjan van de Ven   hrtimer: add a hr...
1016

c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1017
1018
  /**
   * hrtimer_try_to_cancel - try to deactivate a timer
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1019
1020
1021
1022
1023
1024
   * @timer:	hrtimer to stop
   *
   * Returns:
   *  0 when the timer was not active
   *  1 when the timer was active
   * -1 when the timer is currently excuting the callback function and
fa9799e33   Randy Dunlap   [PATCH] ktime/hrt...
1025
   *    cannot be stopped
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1026
1027
1028
   */
  int hrtimer_try_to_cancel(struct hrtimer *timer)
  {
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
1029
  	struct hrtimer_clock_base *base;
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1030
1031
1032
1033
  	unsigned long flags;
  	int ret = -1;
  
  	base = lock_hrtimer_base(timer, &flags);
303e967ff   Thomas Gleixner   [PATCH] hrtimers;...
1034
  	if (!hrtimer_callback_running(timer))
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1035
1036
1037
1038
1039
1040
1041
  		ret = remove_hrtimer(timer, base);
  
  	unlock_hrtimer_base(timer, &flags);
  
  	return ret;
  
  }
8d16b7642   Stephen Hemminger   [PATCH] hrtimer: ...
1042
  EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1043
1044
1045
  
  /**
   * hrtimer_cancel - cancel a timer and wait for the handler to finish.
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
   * @timer:	the timer to be cancelled
   *
   * Returns:
   *  0 when the timer was not active
   *  1 when the timer was active
   */
  int hrtimer_cancel(struct hrtimer *timer)
  {
  	for (;;) {
  		int ret = hrtimer_try_to_cancel(timer);
  
  		if (ret >= 0)
  			return ret;
5ef37b196   Joe Korty   [PATCH] add cpu_r...
1059
  		cpu_relax();
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1060
1061
  	}
  }
8d16b7642   Stephen Hemminger   [PATCH] hrtimer: ...
1062
  EXPORT_SYMBOL_GPL(hrtimer_cancel);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1063
1064
1065
  
  /**
   * hrtimer_get_remaining - get remaining time for the timer
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1066
1067
1068
1069
   * @timer:	the timer to read
   */
  ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
  {
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1070
1071
  	unsigned long flags;
  	ktime_t rem;
b3bd3de66   Andi Kleen   gcc-4.6: kernel/*...
1072
  	lock_hrtimer_base(timer, &flags);
cc584b213   Arjan van de Ven   hrtimer: convert ...
1073
  	rem = hrtimer_expires_remaining(timer);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1074
1075
1076
1077
  	unlock_hrtimer_base(timer, &flags);
  
  	return rem;
  }
8d16b7642   Stephen Hemminger   [PATCH] hrtimer: ...
1078
  EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1079

3451d0243   Frederic Weisbecker   nohz: Rename CONF...
1080
  #ifdef CONFIG_NO_HZ_COMMON
69239749e   Tony Lindgren   [PATCH] fix next_...
1081
1082
1083
1084
1085
1086
1087
1088
  /**
   * hrtimer_get_next_event - get the time until next expiry event
   *
   * Returns the delta to the next expiry event or KTIME_MAX if no timer
   * is pending.
   */
  ktime_t hrtimer_get_next_event(void)
  {
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
1089
1090
  	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  	struct hrtimer_clock_base *base = cpu_base->clock_base;
69239749e   Tony Lindgren   [PATCH] fix next_...
1091
1092
1093
  	ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
  	unsigned long flags;
  	int i;
ecb49d1a6   Thomas Gleixner   hrtimers: Convert...
1094
  	raw_spin_lock_irqsave(&cpu_base->lock, flags);
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
1095

54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1096
1097
1098
  	if (!hrtimer_hres_active()) {
  		for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
  			struct hrtimer *timer;
998adc3dd   John Stultz   hrtimers: Convert...
1099
  			struct timerqueue_node *next;
69239749e   Tony Lindgren   [PATCH] fix next_...
1100

998adc3dd   John Stultz   hrtimers: Convert...
1101
1102
  			next = timerqueue_getnext(&base->active);
  			if (!next)
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1103
  				continue;
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
1104

998adc3dd   John Stultz   hrtimers: Convert...
1105
  			timer = container_of(next, struct hrtimer, node);
cc584b213   Arjan van de Ven   hrtimer: convert ...
1106
  			delta.tv64 = hrtimer_get_expires_tv64(timer);
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1107
1108
1109
1110
  			delta = ktime_sub(delta, base->get_time());
  			if (delta.tv64 < mindelta.tv64)
  				mindelta.tv64 = delta.tv64;
  		}
69239749e   Tony Lindgren   [PATCH] fix next_...
1111
  	}
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
1112

ecb49d1a6   Thomas Gleixner   hrtimers: Convert...
1113
  	raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
1114

69239749e   Tony Lindgren   [PATCH] fix next_...
1115
1116
1117
1118
1119
  	if (mindelta.tv64 < 0)
  		mindelta.tv64 = 0;
  	return mindelta;
  }
  #endif
237fc6e7a   Thomas Gleixner   add hrtimer speci...
1120
1121
  static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
  			   enum hrtimer_mode mode)
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1122
  {
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
1123
  	struct hrtimer_cpu_base *cpu_base;
e06383db9   John Stultz   hrtimers: extend ...
1124
  	int base;
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1125

7978672c4   George Anzinger   [PATCH] hrtimers:...
1126
  	memset(timer, 0, sizeof(struct hrtimer));
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
1127
  	cpu_base = &__raw_get_cpu_var(hrtimer_bases);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1128

c9cb2e3d7   Thomas Gleixner   [PATCH] hrtimers:...
1129
  	if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
7978672c4   George Anzinger   [PATCH] hrtimers:...
1130
  		clock_id = CLOCK_MONOTONIC;
e06383db9   John Stultz   hrtimers: extend ...
1131
1132
  	base = hrtimer_clockid_to_base(clock_id);
  	timer->base = &cpu_base->clock_base[base];
998adc3dd   John Stultz   hrtimers: Convert...
1133
  	timerqueue_init(&timer->node);
82f67cd9f   Ingo Molnar   [PATCH] Add debug...
1134
1135
1136
1137
1138
1139
  
  #ifdef CONFIG_TIMER_STATS
  	timer->start_site = NULL;
  	timer->start_pid = -1;
  	memset(timer->start_comm, 0, TASK_COMM_LEN);
  #endif
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1140
  }
237fc6e7a   Thomas Gleixner   add hrtimer speci...
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
  
  /**
   * hrtimer_init - initialize a timer to the given clock
   * @timer:	the timer to be initialized
   * @clock_id:	the clock to be used
   * @mode:	timer mode abs/rel
   */
  void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
  		  enum hrtimer_mode mode)
  {
c6a2a1770   Xiao Guangrong   hrtimer: Add trac...
1151
  	debug_init(timer, clock_id, mode);
237fc6e7a   Thomas Gleixner   add hrtimer speci...
1152
1153
  	__hrtimer_init(timer, clock_id, mode);
  }
8d16b7642   Stephen Hemminger   [PATCH] hrtimer: ...
1154
  EXPORT_SYMBOL_GPL(hrtimer_init);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1155
1156
1157
  
  /**
   * hrtimer_get_res - get the timer resolution for a clock
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1158
1159
1160
   * @which_clock: which clock to query
   * @tp:		 pointer to timespec variable to store the resolution
   *
72fd4a35a   Robert P. J. Day   [PATCH] Numerous ...
1161
1162
   * Store the resolution of the clock selected by @which_clock in the
   * variable pointed to by @tp.
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1163
1164
1165
   */
  int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
  {
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
1166
  	struct hrtimer_cpu_base *cpu_base;
e06383db9   John Stultz   hrtimers: extend ...
1167
  	int base = hrtimer_clockid_to_base(which_clock);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1168

3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
1169
  	cpu_base = &__raw_get_cpu_var(hrtimer_bases);
e06383db9   John Stultz   hrtimers: extend ...
1170
  	*tp = ktime_to_timespec(cpu_base->clock_base[base].resolution);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1171
1172
1173
  
  	return 0;
  }
8d16b7642   Stephen Hemminger   [PATCH] hrtimer: ...
1174
  EXPORT_SYMBOL_GPL(hrtimer_get_res);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1175

c6a2a1770   Xiao Guangrong   hrtimer: Add trac...
1176
  static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
d3d74453c   Peter Zijlstra   hrtimer: fixup th...
1177
1178
1179
1180
1181
  {
  	struct hrtimer_clock_base *base = timer->base;
  	struct hrtimer_cpu_base *cpu_base = base->cpu_base;
  	enum hrtimer_restart (*fn)(struct hrtimer *);
  	int restart;
ca109491f   Peter Zijlstra   hrtimer: removing...
1182
  	WARN_ON(!irqs_disabled());
c6a2a1770   Xiao Guangrong   hrtimer: Add trac...
1183
  	debug_deactivate(timer);
d3d74453c   Peter Zijlstra   hrtimer: fixup th...
1184
1185
  	__remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
  	timer_stats_account_hrtimer(timer);
d3d74453c   Peter Zijlstra   hrtimer: fixup th...
1186
  	fn = timer->function;
ca109491f   Peter Zijlstra   hrtimer: removing...
1187
1188
1189
1190
1191
1192
  
  	/*
  	 * Because we run timers from hardirq context, there is no chance
  	 * they get migrated to another cpu, therefore its safe to unlock
  	 * the timer base.
  	 */
ecb49d1a6   Thomas Gleixner   hrtimers: Convert...
1193
  	raw_spin_unlock(&cpu_base->lock);
c6a2a1770   Xiao Guangrong   hrtimer: Add trac...
1194
  	trace_hrtimer_expire_entry(timer, now);
ca109491f   Peter Zijlstra   hrtimer: removing...
1195
  	restart = fn(timer);
c6a2a1770   Xiao Guangrong   hrtimer: Add trac...
1196
  	trace_hrtimer_expire_exit(timer);
ecb49d1a6   Thomas Gleixner   hrtimers: Convert...
1197
  	raw_spin_lock(&cpu_base->lock);
d3d74453c   Peter Zijlstra   hrtimer: fixup th...
1198
1199
  
  	/*
e3f1d8837   Thomas Gleixner   hrtimer: fixup co...
1200
1201
1202
  	 * Note: We clear the CALLBACK bit after enqueue_hrtimer and
  	 * we do not reprogramm the event hardware. Happens either in
  	 * hrtimer_start_range_ns() or in hrtimer_interrupt()
d3d74453c   Peter Zijlstra   hrtimer: fixup th...
1203
1204
1205
  	 */
  	if (restart != HRTIMER_NORESTART) {
  		BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
a6037b61c   Peter Zijlstra   hrtimer: fix recu...
1206
  		enqueue_hrtimer(timer, base);
d3d74453c   Peter Zijlstra   hrtimer: fixup th...
1207
  	}
f13d4f979   Salman Qazi   hrtimer: Preserve...
1208
1209
  
  	WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK));
d3d74453c   Peter Zijlstra   hrtimer: fixup th...
1210
1211
  	timer->state &= ~HRTIMER_STATE_CALLBACK;
  }
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1212
1213
1214
1215
1216
1217
1218
1219
1220
  #ifdef CONFIG_HIGH_RES_TIMERS
  
  /*
   * High resolution timer interrupt
   * Called with interrupts disabled
   */
  void hrtimer_interrupt(struct clock_event_device *dev)
  {
  	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
41d2e4949   Thomas Gleixner   hrtimer: Tune hrt...
1221
1222
  	ktime_t expires_next, now, entry_time, delta;
  	int i, retries = 0;
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1223
1224
1225
1226
  
  	BUG_ON(!cpu_base->hres_active);
  	cpu_base->nr_events++;
  	dev->next_event.tv64 = KTIME_MAX;
196951e91   Thomas Gleixner   hrtimers: Move lo...
1227
  	raw_spin_lock(&cpu_base->lock);
5baefd6d8   John Stultz   hrtimer: Update h...
1228
  	entry_time = now = hrtimer_update_base(cpu_base);
41d2e4949   Thomas Gleixner   hrtimer: Tune hrt...
1229
  retry:
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1230
  	expires_next.tv64 = KTIME_MAX;
6ff7041db   Thomas Gleixner   hrtimer: Fix migr...
1231
1232
1233
1234
1235
1236
1237
1238
  	/*
  	 * We set expires_next to KTIME_MAX here with cpu_base->lock
  	 * held to prevent that a timer is enqueued in our queue via
  	 * the migration code. This does not affect enqueueing of
  	 * timers which run their callback and need to be requeued on
  	 * this CPU.
  	 */
  	cpu_base->expires_next.tv64 = KTIME_MAX;
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1239
  	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
ab8177bc5   Thomas Gleixner   hrtimers: Avoid t...
1240
  		struct hrtimer_clock_base *base;
998adc3dd   John Stultz   hrtimers: Convert...
1241
  		struct timerqueue_node *node;
ab8177bc5   Thomas Gleixner   hrtimers: Avoid t...
1242
1243
1244
1245
  		ktime_t basenow;
  
  		if (!(cpu_base->active_bases & (1 << i)))
  			continue;
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1246

ab8177bc5   Thomas Gleixner   hrtimers: Avoid t...
1247
  		base = cpu_base->clock_base + i;
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1248
  		basenow = ktime_add(now, base->offset);
998adc3dd   John Stultz   hrtimers: Convert...
1249
  		while ((node = timerqueue_getnext(&base->active))) {
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1250
  			struct hrtimer *timer;
998adc3dd   John Stultz   hrtimers: Convert...
1251
  			timer = container_of(node, struct hrtimer, node);
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1252

654c8e0b1   Arjan van de Ven   hrtimer: turn hrt...
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
  			/*
  			 * The immediate goal for using the softexpires is
  			 * minimizing wakeups, not running timers at the
  			 * earliest interrupt after their soft expiration.
  			 * This allows us to avoid using a Priority Search
  			 * Tree, which can answer a stabbing querry for
  			 * overlapping intervals and instead use the simple
  			 * BST we already have.
  			 * We don't add extra wakeups by delaying timers that
  			 * are right-of a not yet expired timer, because that
  			 * timer will have to trigger a wakeup anyway.
  			 */
  
  			if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1267
  				ktime_t expires;
cc584b213   Arjan van de Ven   hrtimer: convert ...
1268
  				expires = ktime_sub(hrtimer_get_expires(timer),
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1269
  						    base->offset);
8f294b5a1   Prarit Bhargava   hrtimer: Add expi...
1270
1271
  				if (expires.tv64 < 0)
  					expires.tv64 = KTIME_MAX;
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1272
1273
1274
1275
  				if (expires.tv64 < expires_next.tv64)
  					expires_next = expires;
  				break;
  			}
c6a2a1770   Xiao Guangrong   hrtimer: Add trac...
1276
  			__run_hrtimer(timer, &basenow);
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1277
  		}
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1278
  	}
6ff7041db   Thomas Gleixner   hrtimer: Fix migr...
1279
1280
1281
1282
  	/*
  	 * Store the new expiry value so the migration code can verify
  	 * against it.
  	 */
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1283
  	cpu_base->expires_next = expires_next;
ecb49d1a6   Thomas Gleixner   hrtimers: Convert...
1284
  	raw_spin_unlock(&cpu_base->lock);
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1285
1286
  
  	/* Reprogramming necessary ? */
41d2e4949   Thomas Gleixner   hrtimer: Tune hrt...
1287
1288
1289
1290
  	if (expires_next.tv64 == KTIME_MAX ||
  	    !tick_program_event(expires_next, 0)) {
  		cpu_base->hang_detected = 0;
  		return;
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1291
  	}
41d2e4949   Thomas Gleixner   hrtimer: Tune hrt...
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
  
  	/*
  	 * The next timer was already expired due to:
  	 * - tracing
  	 * - long lasting callbacks
  	 * - being scheduled away when running in a VM
  	 *
  	 * We need to prevent that we loop forever in the hrtimer
  	 * interrupt routine. We give it 3 attempts to avoid
  	 * overreacting on some spurious event.
5baefd6d8   John Stultz   hrtimer: Update h...
1302
1303
1304
  	 *
  	 * Acquire base lock for updating the offsets and retrieving
  	 * the current time.
41d2e4949   Thomas Gleixner   hrtimer: Tune hrt...
1305
  	 */
196951e91   Thomas Gleixner   hrtimers: Move lo...
1306
  	raw_spin_lock(&cpu_base->lock);
5baefd6d8   John Stultz   hrtimer: Update h...
1307
  	now = hrtimer_update_base(cpu_base);
41d2e4949   Thomas Gleixner   hrtimer: Tune hrt...
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
  	cpu_base->nr_retries++;
  	if (++retries < 3)
  		goto retry;
  	/*
  	 * Give the system a chance to do something else than looping
  	 * here. We stored the entry time, so we know exactly how long
  	 * we spent here. We schedule the next event this amount of
  	 * time away.
  	 */
  	cpu_base->nr_hangs++;
  	cpu_base->hang_detected = 1;
196951e91   Thomas Gleixner   hrtimers: Move lo...
1319
  	raw_spin_unlock(&cpu_base->lock);
41d2e4949   Thomas Gleixner   hrtimer: Tune hrt...
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
  	delta = ktime_sub(now, entry_time);
  	if (delta.tv64 > cpu_base->max_hang_time.tv64)
  		cpu_base->max_hang_time = delta;
  	/*
  	 * Limit it to a sensible value as we enforce a longer
  	 * delay. Give the CPU at least 100ms to catch up.
  	 */
  	if (delta.tv64 > 100 * NSEC_PER_MSEC)
  		expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
  	else
  		expires_next = ktime_add(now, delta);
  	tick_program_event(expires_next, 1);
  	printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns
  ",
  		    ktime_to_ns(delta));
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1335
  }
8bdec955b   Thomas Gleixner   hrtimer: splitout...
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
  /*
   * local version of hrtimer_peek_ahead_timers() called with interrupts
   * disabled.
   */
  static void __hrtimer_peek_ahead_timers(void)
  {
  	struct tick_device *td;
  
  	if (!hrtimer_hres_active())
  		return;
  
  	td = &__get_cpu_var(tick_cpu_device);
  	if (td && td->evtdev)
  		hrtimer_interrupt(td->evtdev);
  }
2e94d1f71   Arjan van de Ven   hrtimer: peek at ...
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
  /**
   * hrtimer_peek_ahead_timers -- run soft-expired timers now
   *
   * hrtimer_peek_ahead_timers will peek at the timer queue of
   * the current cpu and check if there are any timers for which
   * the soft expires time has passed. If any such timers exist,
   * they are run immediately and then removed from the timer queue.
   *
   */
  void hrtimer_peek_ahead_timers(void)
  {
643bdf68f   Thomas Gleixner   hrtimers: simplif...
1362
  	unsigned long flags;
dc4304f7d   Arjan van de Ven   rangetimers: fix ...
1363

2e94d1f71   Arjan van de Ven   hrtimer: peek at ...
1364
  	local_irq_save(flags);
8bdec955b   Thomas Gleixner   hrtimer: splitout...
1365
  	__hrtimer_peek_ahead_timers();
2e94d1f71   Arjan van de Ven   hrtimer: peek at ...
1366
1367
  	local_irq_restore(flags);
  }
a6037b61c   Peter Zijlstra   hrtimer: fix recu...
1368
1369
1370
1371
  static void run_hrtimer_softirq(struct softirq_action *h)
  {
  	hrtimer_peek_ahead_timers();
  }
82c5b7b52   Ingo Molnar   hrtimer: splitout...
1372
1373
1374
1375
1376
  #else /* CONFIG_HIGH_RES_TIMERS */
  
  static inline void __hrtimer_peek_ahead_timers(void) { }
  
  #endif	/* !CONFIG_HIGH_RES_TIMERS */
82f67cd9f   Ingo Molnar   [PATCH] Add debug...
1377

d3d74453c   Peter Zijlstra   hrtimer: fixup th...
1378
1379
1380
1381
1382
1383
1384
1385
1386
  /*
   * Called from timer softirq every jiffy, expire hrtimers:
   *
   * For HRT its the fall back code to run the softirq in the timer
   * softirq context in case the hrtimer initialization failed or has
   * not been done yet.
   */
  void hrtimer_run_pending(void)
  {
d3d74453c   Peter Zijlstra   hrtimer: fixup th...
1387
1388
  	if (hrtimer_hres_active())
  		return;
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1389

d3d74453c   Peter Zijlstra   hrtimer: fixup th...
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
  	/*
  	 * This _is_ ugly: We have to check in the softirq context,
  	 * whether we can switch to highres and / or nohz mode. The
  	 * clocksource switch happens in the timer interrupt with
  	 * xtime_lock held. Notification from there only sets the
  	 * check bit in the tick_oneshot code, otherwise we might
  	 * deadlock vs. xtime_lock.
  	 */
  	if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
  		hrtimer_switch_to_hres();
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1400
  }
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1401
  /*
d3d74453c   Peter Zijlstra   hrtimer: fixup th...
1402
   * Called from hardirq context every jiffy
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1403
   */
833883d9a   Dimitri Sivanich   hrtimer: reduce c...
1404
  void hrtimer_run_queues(void)
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1405
  {
998adc3dd   John Stultz   hrtimers: Convert...
1406
  	struct timerqueue_node *node;
833883d9a   Dimitri Sivanich   hrtimer: reduce c...
1407
1408
1409
  	struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  	struct hrtimer_clock_base *base;
  	int index, gettime = 1;
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1410

833883d9a   Dimitri Sivanich   hrtimer: reduce c...
1411
  	if (hrtimer_hres_active())
3055addad   Dimitri Sivanich   [PATCH] hrtimer: ...
1412
  		return;
833883d9a   Dimitri Sivanich   hrtimer: reduce c...
1413
1414
  	for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
  		base = &cpu_base->clock_base[index];
b007c389d   John Stultz   hrtimer: fix time...
1415
  		if (!timerqueue_getnext(&base->active))
d3d74453c   Peter Zijlstra   hrtimer: fixup th...
1416
  			continue;
833883d9a   Dimitri Sivanich   hrtimer: reduce c...
1417

d7cfb60c5   Mark McLoughlin   hrtimer: remove h...
1418
  		if (gettime) {
833883d9a   Dimitri Sivanich   hrtimer: reduce c...
1419
1420
  			hrtimer_get_softirq_time(cpu_base);
  			gettime = 0;
b75f7a51c   Roman Zippel   [PATCH] hrtimers:...
1421
  		}
d3d74453c   Peter Zijlstra   hrtimer: fixup th...
1422

ecb49d1a6   Thomas Gleixner   hrtimers: Convert...
1423
  		raw_spin_lock(&cpu_base->lock);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1424

b007c389d   John Stultz   hrtimer: fix time...
1425
  		while ((node = timerqueue_getnext(&base->active))) {
833883d9a   Dimitri Sivanich   hrtimer: reduce c...
1426
  			struct hrtimer *timer;
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1427

998adc3dd   John Stultz   hrtimers: Convert...
1428
  			timer = container_of(node, struct hrtimer, node);
cc584b213   Arjan van de Ven   hrtimer: convert ...
1429
1430
  			if (base->softirq_time.tv64 <=
  					hrtimer_get_expires_tv64(timer))
833883d9a   Dimitri Sivanich   hrtimer: reduce c...
1431
  				break;
c6a2a1770   Xiao Guangrong   hrtimer: Add trac...
1432
  			__run_hrtimer(timer, &base->softirq_time);
833883d9a   Dimitri Sivanich   hrtimer: reduce c...
1433
  		}
ecb49d1a6   Thomas Gleixner   hrtimers: Convert...
1434
  		raw_spin_unlock(&cpu_base->lock);
833883d9a   Dimitri Sivanich   hrtimer: reduce c...
1435
  	}
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1436
1437
1438
  }
  
  /*
10c94ec16   Thomas Gleixner   [PATCH] hrtimer: ...
1439
1440
   * Sleep related functions:
   */
c9cb2e3d7   Thomas Gleixner   [PATCH] hrtimers:...
1441
  static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
00362e33f   Thomas Gleixner   [PATCH] hrtimer: ...
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
  {
  	struct hrtimer_sleeper *t =
  		container_of(timer, struct hrtimer_sleeper, timer);
  	struct task_struct *task = t->task;
  
  	t->task = NULL;
  	if (task)
  		wake_up_process(task);
  
  	return HRTIMER_NORESTART;
  }
36c8b5868   Ingo Molnar   [PATCH] sched: cl...
1453
  void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
00362e33f   Thomas Gleixner   [PATCH] hrtimer: ...
1454
1455
1456
1457
  {
  	sl->timer.function = hrtimer_wakeup;
  	sl->task = task;
  }
2bc481cf4   Stephen Hemminger   pktgen: spin usin...
1458
  EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
00362e33f   Thomas Gleixner   [PATCH] hrtimer: ...
1459

669d7868a   Thomas Gleixner   [PATCH] hrtimer: ...
1460
  static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
432569bb9   Roman Zippel   [PATCH] hrtimers:...
1461
  {
669d7868a   Thomas Gleixner   [PATCH] hrtimer: ...
1462
  	hrtimer_init_sleeper(t, current);
10c94ec16   Thomas Gleixner   [PATCH] hrtimer: ...
1463

432569bb9   Roman Zippel   [PATCH] hrtimers:...
1464
1465
  	do {
  		set_current_state(TASK_INTERRUPTIBLE);
cc584b213   Arjan van de Ven   hrtimer: convert ...
1466
  		hrtimer_start_expires(&t->timer, mode);
37bb6cb40   Peter Zijlstra   hrtimer: unlock h...
1467
1468
  		if (!hrtimer_active(&t->timer))
  			t->task = NULL;
432569bb9   Roman Zippel   [PATCH] hrtimers:...
1469

54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1470
  		if (likely(t->task))
b0f8c44f3   Colin Cross   nanosleep: use fr...
1471
  			freezable_schedule();
432569bb9   Roman Zippel   [PATCH] hrtimers:...
1472

669d7868a   Thomas Gleixner   [PATCH] hrtimer: ...
1473
  		hrtimer_cancel(&t->timer);
c9cb2e3d7   Thomas Gleixner   [PATCH] hrtimers:...
1474
  		mode = HRTIMER_MODE_ABS;
669d7868a   Thomas Gleixner   [PATCH] hrtimer: ...
1475
1476
  
  	} while (t->task && !signal_pending(current));
432569bb9   Roman Zippel   [PATCH] hrtimers:...
1477

3588a085c   Peter Zijlstra   hrtimer: fix hrti...
1478
  	__set_current_state(TASK_RUNNING);
669d7868a   Thomas Gleixner   [PATCH] hrtimer: ...
1479
  	return t->task == NULL;
10c94ec16   Thomas Gleixner   [PATCH] hrtimer: ...
1480
  }
080344b98   Oleg Nesterov   hrtimer: fix *rmt...
1481
1482
1483
1484
  static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
  {
  	struct timespec rmt;
  	ktime_t rem;
cc584b213   Arjan van de Ven   hrtimer: convert ...
1485
  	rem = hrtimer_expires_remaining(timer);
080344b98   Oleg Nesterov   hrtimer: fix *rmt...
1486
1487
1488
1489
1490
1491
1492
1493
1494
  	if (rem.tv64 <= 0)
  		return 0;
  	rmt = ktime_to_timespec(rem);
  
  	if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
  		return -EFAULT;
  
  	return 1;
  }
1711ef386   Toyo Abe   [PATCH] posix-tim...
1495
  long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
10c94ec16   Thomas Gleixner   [PATCH] hrtimer: ...
1496
  {
669d7868a   Thomas Gleixner   [PATCH] hrtimer: ...
1497
  	struct hrtimer_sleeper t;
080344b98   Oleg Nesterov   hrtimer: fix *rmt...
1498
  	struct timespec __user  *rmtp;
237fc6e7a   Thomas Gleixner   add hrtimer speci...
1499
  	int ret = 0;
10c94ec16   Thomas Gleixner   [PATCH] hrtimer: ...
1500

ab8177bc5   Thomas Gleixner   hrtimers: Avoid t...
1501
  	hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid,
237fc6e7a   Thomas Gleixner   add hrtimer speci...
1502
  				HRTIMER_MODE_ABS);
cc584b213   Arjan van de Ven   hrtimer: convert ...
1503
  	hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
10c94ec16   Thomas Gleixner   [PATCH] hrtimer: ...
1504

c9cb2e3d7   Thomas Gleixner   [PATCH] hrtimers:...
1505
  	if (do_nanosleep(&t, HRTIMER_MODE_ABS))
237fc6e7a   Thomas Gleixner   add hrtimer speci...
1506
  		goto out;
10c94ec16   Thomas Gleixner   [PATCH] hrtimer: ...
1507

029a07e03   Thomas Gleixner   hrtimer: use nano...
1508
  	rmtp = restart->nanosleep.rmtp;
432569bb9   Roman Zippel   [PATCH] hrtimers:...
1509
  	if (rmtp) {
237fc6e7a   Thomas Gleixner   add hrtimer speci...
1510
  		ret = update_rmtp(&t.timer, rmtp);
080344b98   Oleg Nesterov   hrtimer: fix *rmt...
1511
  		if (ret <= 0)
237fc6e7a   Thomas Gleixner   add hrtimer speci...
1512
  			goto out;
432569bb9   Roman Zippel   [PATCH] hrtimers:...
1513
  	}
10c94ec16   Thomas Gleixner   [PATCH] hrtimer: ...
1514

10c94ec16   Thomas Gleixner   [PATCH] hrtimer: ...
1515
  	/* The other values in restart are already filled in */
237fc6e7a   Thomas Gleixner   add hrtimer speci...
1516
1517
1518
1519
  	ret = -ERESTART_RESTARTBLOCK;
  out:
  	destroy_hrtimer_on_stack(&t.timer);
  	return ret;
10c94ec16   Thomas Gleixner   [PATCH] hrtimer: ...
1520
  }
080344b98   Oleg Nesterov   hrtimer: fix *rmt...
1521
  long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
10c94ec16   Thomas Gleixner   [PATCH] hrtimer: ...
1522
1523
1524
  		       const enum hrtimer_mode mode, const clockid_t clockid)
  {
  	struct restart_block *restart;
669d7868a   Thomas Gleixner   [PATCH] hrtimer: ...
1525
  	struct hrtimer_sleeper t;
237fc6e7a   Thomas Gleixner   add hrtimer speci...
1526
  	int ret = 0;
3bd012060   Arjan van de Ven   hrtimer: make the...
1527
1528
1529
  	unsigned long slack;
  
  	slack = current->timer_slack_ns;
aab03e05e   Dario Faggioli   sched/deadline: A...
1530
  	if (dl_task(current) || rt_task(current))
3bd012060   Arjan van de Ven   hrtimer: make the...
1531
  		slack = 0;
10c94ec16   Thomas Gleixner   [PATCH] hrtimer: ...
1532

237fc6e7a   Thomas Gleixner   add hrtimer speci...
1533
  	hrtimer_init_on_stack(&t.timer, clockid, mode);
3bd012060   Arjan van de Ven   hrtimer: make the...
1534
  	hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
432569bb9   Roman Zippel   [PATCH] hrtimers:...
1535
  	if (do_nanosleep(&t, mode))
237fc6e7a   Thomas Gleixner   add hrtimer speci...
1536
  		goto out;
10c94ec16   Thomas Gleixner   [PATCH] hrtimer: ...
1537

7978672c4   George Anzinger   [PATCH] hrtimers:...
1538
  	/* Absolute timers do not update the rmtp value and restart: */
237fc6e7a   Thomas Gleixner   add hrtimer speci...
1539
1540
1541
1542
  	if (mode == HRTIMER_MODE_ABS) {
  		ret = -ERESTARTNOHAND;
  		goto out;
  	}
10c94ec16   Thomas Gleixner   [PATCH] hrtimer: ...
1543

432569bb9   Roman Zippel   [PATCH] hrtimers:...
1544
  	if (rmtp) {
237fc6e7a   Thomas Gleixner   add hrtimer speci...
1545
  		ret = update_rmtp(&t.timer, rmtp);
080344b98   Oleg Nesterov   hrtimer: fix *rmt...
1546
  		if (ret <= 0)
237fc6e7a   Thomas Gleixner   add hrtimer speci...
1547
  			goto out;
432569bb9   Roman Zippel   [PATCH] hrtimers:...
1548
  	}
10c94ec16   Thomas Gleixner   [PATCH] hrtimer: ...
1549
1550
  
  	restart = &current_thread_info()->restart_block;
1711ef386   Toyo Abe   [PATCH] posix-tim...
1551
  	restart->fn = hrtimer_nanosleep_restart;
ab8177bc5   Thomas Gleixner   hrtimers: Avoid t...
1552
  	restart->nanosleep.clockid = t.timer.base->clockid;
029a07e03   Thomas Gleixner   hrtimer: use nano...
1553
  	restart->nanosleep.rmtp = rmtp;
cc584b213   Arjan van de Ven   hrtimer: convert ...
1554
  	restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
10c94ec16   Thomas Gleixner   [PATCH] hrtimer: ...
1555

237fc6e7a   Thomas Gleixner   add hrtimer speci...
1556
1557
1558
1559
  	ret = -ERESTART_RESTARTBLOCK;
  out:
  	destroy_hrtimer_on_stack(&t.timer);
  	return ret;
10c94ec16   Thomas Gleixner   [PATCH] hrtimer: ...
1560
  }
58fd3aa28   Heiko Carstens   [CVE-2009-0029] S...
1561
1562
  SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
  		struct timespec __user *, rmtp)
6ba1b9121   Thomas Gleixner   [PATCH] hrtimer: ...
1563
  {
080344b98   Oleg Nesterov   hrtimer: fix *rmt...
1564
  	struct timespec tu;
6ba1b9121   Thomas Gleixner   [PATCH] hrtimer: ...
1565
1566
1567
1568
1569
1570
  
  	if (copy_from_user(&tu, rqtp, sizeof(tu)))
  		return -EFAULT;
  
  	if (!timespec_valid(&tu))
  		return -EINVAL;
080344b98   Oleg Nesterov   hrtimer: fix *rmt...
1571
  	return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
6ba1b9121   Thomas Gleixner   [PATCH] hrtimer: ...
1572
  }
10c94ec16   Thomas Gleixner   [PATCH] hrtimer: ...
1573
  /*
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1574
1575
   * Functions related to boot-time initialization:
   */
0db0628d9   Paul Gortmaker   kernel: delete __...
1576
  static void init_hrtimers_cpu(int cpu)
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1577
  {
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
1578
  	struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1579
  	int i;
998adc3dd   John Stultz   hrtimers: Convert...
1580
  	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
1581
  		cpu_base->clock_base[i].cpu_base = cpu_base;
998adc3dd   John Stultz   hrtimers: Convert...
1582
1583
  		timerqueue_init_head(&cpu_base->clock_base[i].active);
  	}
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
1584

54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1585
  	hrtimer_init_hres(cpu_base);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1586
1587
1588
  }
  
  #ifdef CONFIG_HOTPLUG_CPU
ca109491f   Peter Zijlstra   hrtimer: removing...
1589
  static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
37810659e   Peter Zijlstra   hrtimer: removing...
1590
  				struct hrtimer_clock_base *new_base)
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1591
1592
  {
  	struct hrtimer *timer;
998adc3dd   John Stultz   hrtimers: Convert...
1593
  	struct timerqueue_node *node;
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1594

998adc3dd   John Stultz   hrtimers: Convert...
1595
1596
  	while ((node = timerqueue_getnext(&old_base->active))) {
  		timer = container_of(node, struct hrtimer, node);
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1597
  		BUG_ON(hrtimer_callback_running(timer));
c6a2a1770   Xiao Guangrong   hrtimer: Add trac...
1598
  		debug_deactivate(timer);
b00c1a99e   Thomas Gleixner   hrtimer: mark mig...
1599
1600
1601
1602
1603
1604
1605
  
  		/*
  		 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
  		 * timer could be seen as !active and just vanish away
  		 * under us on another CPU
  		 */
  		__remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1606
  		timer->base = new_base;
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1607
  		/*
e3f1d8837   Thomas Gleixner   hrtimer: fixup co...
1608
1609
1610
1611
1612
1613
  		 * Enqueue the timers on the new cpu. This does not
  		 * reprogram the event device in case the timer
  		 * expires before the earliest on this CPU, but we run
  		 * hrtimer_interrupt after we migrated everything to
  		 * sort out already expired timers and reprogram the
  		 * event device.
54cdfdb47   Thomas Gleixner   [PATCH] hrtimers:...
1614
  		 */
a6037b61c   Peter Zijlstra   hrtimer: fix recu...
1615
  		enqueue_hrtimer(timer, new_base);
41e1022ea   Thomas Gleixner   hrtimer: fix migr...
1616

b00c1a99e   Thomas Gleixner   hrtimer: mark mig...
1617
1618
  		/* Clear the migration state bit */
  		timer->state &= ~HRTIMER_STATE_MIGRATE;
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1619
1620
  	}
  }
d5fd43c4a   Thomas Gleixner   hrtimer: fix HOTP...
1621
  static void migrate_hrtimers(int scpu)
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1622
  {
3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
1623
  	struct hrtimer_cpu_base *old_base, *new_base;
731a55ba0   Thomas Gleixner   hrtimer: simplify...
1624
  	int i;
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1625

37810659e   Peter Zijlstra   hrtimer: removing...
1626
  	BUG_ON(cpu_online(scpu));
37810659e   Peter Zijlstra   hrtimer: removing...
1627
  	tick_cancel_sched_timer(scpu);
731a55ba0   Thomas Gleixner   hrtimer: simplify...
1628
1629
1630
1631
  
  	local_irq_disable();
  	old_base = &per_cpu(hrtimer_bases, scpu);
  	new_base = &__get_cpu_var(hrtimer_bases);
d82f0b0f6   Oleg Nesterov   migrate_timers: a...
1632
1633
1634
1635
  	/*
  	 * The caller is globally serialized and nobody else
  	 * takes two locks at once, deadlock is not possible.
  	 */
ecb49d1a6   Thomas Gleixner   hrtimers: Convert...
1636
1637
  	raw_spin_lock(&new_base->lock);
  	raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1638

3c8aa39d7   Thomas Gleixner   [PATCH] hrtimers:...
1639
  	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
ca109491f   Peter Zijlstra   hrtimer: removing...
1640
  		migrate_hrtimer_list(&old_base->clock_base[i],
37810659e   Peter Zijlstra   hrtimer: removing...
1641
  				     &new_base->clock_base[i]);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1642
  	}
ecb49d1a6   Thomas Gleixner   hrtimers: Convert...
1643
1644
  	raw_spin_unlock(&old_base->lock);
  	raw_spin_unlock(&new_base->lock);
37810659e   Peter Zijlstra   hrtimer: removing...
1645

731a55ba0   Thomas Gleixner   hrtimer: simplify...
1646
1647
1648
  	/* Check, if we got expired work to do */
  	__hrtimer_peek_ahead_timers();
  	local_irq_enable();
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1649
  }
37810659e   Peter Zijlstra   hrtimer: removing...
1650

c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1651
  #endif /* CONFIG_HOTPLUG_CPU */
0db0628d9   Paul Gortmaker   kernel: delete __...
1652
  static int hrtimer_cpu_notify(struct notifier_block *self,
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1653
1654
  					unsigned long action, void *hcpu)
  {
b2e3c0ade   Ingo Molnar   hrtimers: fix war...
1655
  	int scpu = (long)hcpu;
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1656
1657
1658
1659
  
  	switch (action) {
  
  	case CPU_UP_PREPARE:
8bb784428   Rafael J. Wysocki   Add suspend-relat...
1660
  	case CPU_UP_PREPARE_FROZEN:
37810659e   Peter Zijlstra   hrtimer: removing...
1661
  		init_hrtimers_cpu(scpu);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1662
1663
1664
  		break;
  
  #ifdef CONFIG_HOTPLUG_CPU
94df7de02   Sebastien Dugue   hrtimers: allow t...
1665
1666
1667
1668
  	case CPU_DYING:
  	case CPU_DYING_FROZEN:
  		clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu);
  		break;
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1669
  	case CPU_DEAD:
8bb784428   Rafael J. Wysocki   Add suspend-relat...
1670
  	case CPU_DEAD_FROZEN:
b2e3c0ade   Ingo Molnar   hrtimers: fix war...
1671
  	{
37810659e   Peter Zijlstra   hrtimer: removing...
1672
  		clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
d5fd43c4a   Thomas Gleixner   hrtimer: fix HOTP...
1673
  		migrate_hrtimers(scpu);
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1674
  		break;
b2e3c0ade   Ingo Molnar   hrtimers: fix war...
1675
  	}
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1676
1677
1678
1679
1680
1681
1682
1683
  #endif
  
  	default:
  		break;
  	}
  
  	return NOTIFY_OK;
  }
0db0628d9   Paul Gortmaker   kernel: delete __...
1684
  static struct notifier_block hrtimers_nb = {
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1685
1686
1687
1688
1689
1690
1691
1692
  	.notifier_call = hrtimer_cpu_notify,
  };
  
  void __init hrtimers_init(void)
  {
  	hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
  			  (void *)(long)smp_processor_id());
  	register_cpu_notifier(&hrtimers_nb);
a6037b61c   Peter Zijlstra   hrtimer: fix recu...
1693
1694
1695
  #ifdef CONFIG_HIGH_RES_TIMERS
  	open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
  #endif
c0a313296   Thomas Gleixner   [PATCH] hrtimer: ...
1696
  }
7bb67439b   Arjan van de Ven   select: Introduce...
1697
  /**
351b3f7a2   Carsten Emde   hrtimers: Provide...
1698
   * schedule_hrtimeout_range_clock - sleep until timeout
7bb67439b   Arjan van de Ven   select: Introduce...
1699
   * @expires:	timeout value (ktime_t)
654c8e0b1   Arjan van de Ven   hrtimer: turn hrt...
1700
   * @delta:	slack in expires timeout (ktime_t)
7bb67439b   Arjan van de Ven   select: Introduce...
1701
   * @mode:	timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
351b3f7a2   Carsten Emde   hrtimers: Provide...
1702
   * @clock:	timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME
7bb67439b   Arjan van de Ven   select: Introduce...
1703
   */
351b3f7a2   Carsten Emde   hrtimers: Provide...
1704
1705
1706
  int __sched
  schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
  			       const enum hrtimer_mode mode, int clock)
7bb67439b   Arjan van de Ven   select: Introduce...
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
  {
  	struct hrtimer_sleeper t;
  
  	/*
  	 * Optimize when a zero timeout value is given. It does not
  	 * matter whether this is an absolute or a relative time.
  	 */
  	if (expires && !expires->tv64) {
  		__set_current_state(TASK_RUNNING);
  		return 0;
  	}
  
  	/*
43b210139   Namhyung Kim   hrtimer: fix a ty...
1720
  	 * A NULL parameter means "infinite"
7bb67439b   Arjan van de Ven   select: Introduce...
1721
1722
1723
1724
1725
1726
  	 */
  	if (!expires) {
  		schedule();
  		__set_current_state(TASK_RUNNING);
  		return -EINTR;
  	}
351b3f7a2   Carsten Emde   hrtimers: Provide...
1727
  	hrtimer_init_on_stack(&t.timer, clock, mode);
654c8e0b1   Arjan van de Ven   hrtimer: turn hrt...
1728
  	hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
7bb67439b   Arjan van de Ven   select: Introduce...
1729
1730
  
  	hrtimer_init_sleeper(&t, current);
cc584b213   Arjan van de Ven   hrtimer: convert ...
1731
  	hrtimer_start_expires(&t.timer, mode);
7bb67439b   Arjan van de Ven   select: Introduce...
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
  	if (!hrtimer_active(&t.timer))
  		t.task = NULL;
  
  	if (likely(t.task))
  		schedule();
  
  	hrtimer_cancel(&t.timer);
  	destroy_hrtimer_on_stack(&t.timer);
  
  	__set_current_state(TASK_RUNNING);
  
  	return !t.task ? 0 : -EINTR;
  }
351b3f7a2   Carsten Emde   hrtimers: Provide...
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
  
  /**
   * schedule_hrtimeout_range - sleep until timeout
   * @expires:	timeout value (ktime_t)
   * @delta:	slack in expires timeout (ktime_t)
   * @mode:	timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
   *
   * Make the current task sleep until the given expiry time has
   * elapsed. The routine will return immediately unless
   * the current task state has been set (see set_current_state()).
   *
   * The @delta argument gives the kernel the freedom to schedule the
   * actual wakeup to a time that is both power and performance friendly.
   * The kernel give the normal best effort behavior for "@expires+@delta",
   * but may decide to fire the timer earlier, but no earlier than @expires.
   *
   * You can set the task state as follows -
   *
   * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
   * pass before the routine returns.
   *
   * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
   * delivered to the current task.
   *
   * The current task state is guaranteed to be TASK_RUNNING when this
   * routine returns.
   *
   * Returns 0 when the timer has expired otherwise -EINTR
   */
  int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
  				     const enum hrtimer_mode mode)
  {
  	return schedule_hrtimeout_range_clock(expires, delta, mode,
  					      CLOCK_MONOTONIC);
  }
654c8e0b1   Arjan van de Ven   hrtimer: turn hrt...
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
  EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
  
  /**
   * schedule_hrtimeout - sleep until timeout
   * @expires:	timeout value (ktime_t)
   * @mode:	timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
   *
   * Make the current task sleep until the given expiry time has
   * elapsed. The routine will return immediately unless
   * the current task state has been set (see set_current_state()).
   *
   * You can set the task state as follows -
   *
   * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
   * pass before the routine returns.
   *
   * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
   * delivered to the current task.
   *
   * The current task state is guaranteed to be TASK_RUNNING when this
   * routine returns.
   *
   * Returns 0 when the timer has expired otherwise -EINTR
   */
  int __sched schedule_hrtimeout(ktime_t *expires,
  			       const enum hrtimer_mode mode)
  {
  	return schedule_hrtimeout_range(expires, 0, mode);
  }
7bb67439b   Arjan van de Ven   select: Introduce...
1809
  EXPORT_SYMBOL_GPL(schedule_hrtimeout);