Blame view

kernel/time/tick-sched.c 21.9 KB
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
1
2
3
4
5
6
7
8
9
10
11
  /*
   *  linux/kernel/time/tick-sched.c
   *
   *  Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
   *  Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
   *  Copyright(C) 2006-2007  Timesys Corp., Thomas Gleixner
   *
   *  No idle tick implementation for low and high resolution timers
   *
   *  Started by: Thomas Gleixner and Ingo Molnar
   *
b10db7f0d   Pavel Machek   time: more timer ...
12
   *  Distribute under GPLv2.
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
13
14
15
16
17
18
19
20
21
22
   */
  #include <linux/cpu.h>
  #include <linux/err.h>
  #include <linux/hrtimer.h>
  #include <linux/interrupt.h>
  #include <linux/kernel_stat.h>
  #include <linux/percpu.h>
  #include <linux/profile.h>
  #include <linux/sched.h>
  #include <linux/tick.h>
8083e4ad9   venkatesh.pallipadi@intel.com   [CPUFREQ][5/6] cp...
23
  #include <linux/module.h>
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
24

9e203bcc1   David S. Miller   [TIME] tick-sched...
25
  #include <asm/irq_regs.h>
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
26
27
28
29
30
31
32
33
34
35
36
  #include "tick-internal.h"
  
  /*
   * Per cpu nohz control structure
   */
  static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
  
  /*
   * The time, when the last jiffy update happened. Protected by xtime_lock.
   */
  static ktime_t last_jiffies_update;
289f480af   Ingo Molnar   [PATCH] Add debug...
37
38
39
40
  struct tick_sched *tick_get_tick_sched(int cpu)
  {
  	return &per_cpu(tick_cpu_sched, cpu);
  }
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
41
42
43
44
45
46
47
  /*
   * Must be called with interrupts disabled !
   */
  static void tick_do_update_jiffies64(ktime_t now)
  {
  	unsigned long ticks = 0;
  	ktime_t delta;
7a14ce1d8   Ingo Molnar   nohz: reduce jiff...
48
49
50
51
52
53
  	/*
  	 * Do a quick check without holding xtime_lock:
  	 */
  	delta = ktime_sub(now, last_jiffies_update);
  	if (delta.tv64 < tick_period.tv64)
  		return;
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
  	/* Reevalute with xtime_lock held */
  	write_seqlock(&xtime_lock);
  
  	delta = ktime_sub(now, last_jiffies_update);
  	if (delta.tv64 >= tick_period.tv64) {
  
  		delta = ktime_sub(delta, tick_period);
  		last_jiffies_update = ktime_add(last_jiffies_update,
  						tick_period);
  
  		/* Slow path for long timeouts */
  		if (unlikely(delta.tv64 >= tick_period.tv64)) {
  			s64 incr = ktime_to_ns(tick_period);
  
  			ticks = ktime_divns(delta, incr);
  
  			last_jiffies_update = ktime_add_ns(last_jiffies_update,
  							   incr * ticks);
  		}
  		do_timer(++ticks);
49d670fb8   Thomas Gleixner   clockevents: prev...
74
75
76
  
  		/* Keep the tick_next_period variable up to date */
  		tick_next_period = ktime_add(last_jiffies_update, tick_period);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
  	}
  	write_sequnlock(&xtime_lock);
  }
  
  /*
   * Initialize and return retrieve the jiffies update.
   */
  static ktime_t tick_init_jiffy_update(void)
  {
  	ktime_t period;
  
  	write_seqlock(&xtime_lock);
  	/* Did we start the jiffies update yet ? */
  	if (last_jiffies_update.tv64 == 0)
  		last_jiffies_update = tick_next_period;
  	period = last_jiffies_update;
  	write_sequnlock(&xtime_lock);
  	return period;
  }
  
  /*
   * NOHZ - aka dynamic tick functionality
   */
  #ifdef CONFIG_NO_HZ
  /*
   * NO HZ enabled ?
   */
  static int tick_nohz_enabled __read_mostly  = 1;
  
  /*
   * Enable / Disable tickless mode
   */
  static int __init setup_tick_nohz(char *str)
  {
  	if (!strcmp(str, "off"))
  		tick_nohz_enabled = 0;
  	else if (!strcmp(str, "on"))
  		tick_nohz_enabled = 1;
  	else
  		return 0;
  	return 1;
  }
  
  __setup("nohz=", setup_tick_nohz);
  
  /**
   * tick_nohz_update_jiffies - update jiffies when idle was interrupted
   *
   * Called from interrupt entry when the CPU was idle
   *
   * In case the sched_tick was stopped on this CPU, we have to check if jiffies
   * must be updated. Otherwise an interrupt handler could use a stale jiffy
   * value. We do this unconditionally on any cpu, as we don't know whether the
   * cpu, which has the update task assigned is in a long sleep.
   */
eed3b9cf3   Martin Schwidefsky   nohz: Reuse ktime...
132
  static void tick_nohz_update_jiffies(ktime_t now)
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
133
134
135
136
  {
  	int cpu = smp_processor_id();
  	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
  	unsigned long flags;
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
137

6a7b3dc34   Rusty Russell   sched: convert no...
138
  	cpumask_clear_cpu(cpu, nohz_cpu_mask);
5df7fa1c6   Thomas Gleixner   tick-sched: add m...
139
  	ts->idle_waketime = now;
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
140
141
142
143
  
  	local_irq_save(flags);
  	tick_do_update_jiffies64(now);
  	local_irq_restore(flags);
02ff37559   Ingo Molnar   softlockup: fix f...
144
145
  
  	touch_softlockup_watchdog();
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
146
  }
595aac488   Arjan van de Ven   sched: Introduce ...
147
148
149
  /*
   * Updates the per cpu time idle statistics counters
   */
8d63bf949   Arjan van de Ven   sched: Fold updat...
150
  static void
8c215bd38   Peter Zijlstra   sched: Cure nr_io...
151
  update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
6378ddb59   Venki Pallipadi   time: track accur...
152
  {
eed3b9cf3   Martin Schwidefsky   nohz: Reuse ktime...
153
  	ktime_t delta;
6378ddb59   Venki Pallipadi   time: track accur...
154

595aac488   Arjan van de Ven   sched: Introduce ...
155
156
157
  	if (ts->idle_active) {
  		delta = ktime_sub(now, ts->idle_entrytime);
  		ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
8c215bd38   Peter Zijlstra   sched: Cure nr_io...
158
  		if (nr_iowait_cpu(cpu) > 0)
0224cf4c5   Arjan van de Ven   sched: Intoduce g...
159
  			ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
8c7b09f43   Arjan van de Ven   sched: Update the...
160
  		ts->idle_entrytime = now;
595aac488   Arjan van de Ven   sched: Introduce ...
161
  	}
8d63bf949   Arjan van de Ven   sched: Fold updat...
162

e0e37c200   Arjan van de Ven   sched: Eliminate ...
163
  	if (last_update_time)
8d63bf949   Arjan van de Ven   sched: Fold updat...
164
  		*last_update_time = ktime_to_us(now);
595aac488   Arjan van de Ven   sched: Introduce ...
165
166
167
168
169
  }
  
  static void tick_nohz_stop_idle(int cpu, ktime_t now)
  {
  	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
8c215bd38   Peter Zijlstra   sched: Cure nr_io...
170
  	update_ts_time_stats(cpu, ts, now, NULL);
eed3b9cf3   Martin Schwidefsky   nohz: Reuse ktime...
171
  	ts->idle_active = 0;
56c7426b3   Peter Zijlstra   sched_clock: fix ...
172

eed3b9cf3   Martin Schwidefsky   nohz: Reuse ktime...
173
  	sched_clock_idle_wakeup_event(0);
6378ddb59   Venki Pallipadi   time: track accur...
174
  }
8c215bd38   Peter Zijlstra   sched: Cure nr_io...
175
  static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
6378ddb59   Venki Pallipadi   time: track accur...
176
  {
595aac488   Arjan van de Ven   sched: Introduce ...
177
  	ktime_t now;
6378ddb59   Venki Pallipadi   time: track accur...
178
179
  
  	now = ktime_get();
595aac488   Arjan van de Ven   sched: Introduce ...
180

8c215bd38   Peter Zijlstra   sched: Cure nr_io...
181
  	update_ts_time_stats(cpu, ts, now, NULL);
595aac488   Arjan van de Ven   sched: Introduce ...
182

6378ddb59   Venki Pallipadi   time: track accur...
183
184
  	ts->idle_entrytime = now;
  	ts->idle_active = 1;
56c7426b3   Peter Zijlstra   sched_clock: fix ...
185
  	sched_clock_idle_sleep_event();
6378ddb59   Venki Pallipadi   time: track accur...
186
187
  	return now;
  }
b1f724c30   Arjan van de Ven   sched: Add a comm...
188
189
190
191
192
193
194
195
196
197
198
199
200
201
  /**
   * get_cpu_idle_time_us - get the total idle time of a cpu
   * @cpu: CPU number to query
   * @last_update_time: variable to store update time in
   *
   * Return the cummulative idle time (since boot) for a given
   * CPU, in microseconds. The idle time returned includes
   * the iowait time (unlike what "top" and co report).
   *
   * This time is measured via accounting rather than sampling,
   * and is as accurate as ktime_get() is.
   *
   * This function returns -1 if NOHZ is not enabled.
   */
6378ddb59   Venki Pallipadi   time: track accur...
202
203
204
  u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
  {
  	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
8083e4ad9   venkatesh.pallipadi@intel.com   [CPUFREQ][5/6] cp...
205
206
  	if (!tick_nohz_enabled)
  		return -1;
8c215bd38   Peter Zijlstra   sched: Cure nr_io...
207
  	update_ts_time_stats(cpu, ts, ktime_get(), last_update_time);
8083e4ad9   venkatesh.pallipadi@intel.com   [CPUFREQ][5/6] cp...
208

6378ddb59   Venki Pallipadi   time: track accur...
209
210
  	return ktime_to_us(ts->idle_sleeptime);
  }
8083e4ad9   venkatesh.pallipadi@intel.com   [CPUFREQ][5/6] cp...
211
  EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
6378ddb59   Venki Pallipadi   time: track accur...
212

0224cf4c5   Arjan van de Ven   sched: Intoduce g...
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
  /*
   * get_cpu_iowait_time_us - get the total iowait time of a cpu
   * @cpu: CPU number to query
   * @last_update_time: variable to store update time in
   *
   * Return the cummulative iowait time (since boot) for a given
   * CPU, in microseconds.
   *
   * This time is measured via accounting rather than sampling,
   * and is as accurate as ktime_get() is.
   *
   * This function returns -1 if NOHZ is not enabled.
   */
  u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
  {
  	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
  
  	if (!tick_nohz_enabled)
  		return -1;
8c215bd38   Peter Zijlstra   sched: Cure nr_io...
232
  	update_ts_time_stats(cpu, ts, ktime_get(), last_update_time);
0224cf4c5   Arjan van de Ven   sched: Intoduce g...
233
234
235
236
  
  	return ktime_to_us(ts->iowait_sleeptime);
  }
  EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
237
238
239
240
241
242
243
  /**
   * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
   *
   * When the next event is more than a tick into the future, stop the idle tick
   * Called either from the idle loop or from irq_exit() when an idle period was
   * just interrupted by an interrupt which did not cause a reschedule.
   */
b8f8c3cf0   Thomas Gleixner   nohz: prevent tic...
244
  void tick_nohz_stop_sched_tick(int inidle)
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
245
246
247
  {
  	unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
  	struct tick_sched *ts;
6378ddb59   Venki Pallipadi   time: track accur...
248
  	ktime_t last_update, expires, now;
4f86d3a8e   Len Brown   cpuidle: consolid...
249
  	struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
98962465e   Jon Hunter   nohz: Prevent clo...
250
  	u64 time_delta;
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
251
252
253
254
255
256
  	int cpu;
  
  	local_irq_save(flags);
  
  	cpu = smp_processor_id();
  	ts = &per_cpu(tick_cpu_sched, cpu);
f2e21c961   Eero Nurkkala   NOHZ: Properly fe...
257
258
259
260
261
262
263
264
  
  	/*
  	 * Call to tick_nohz_start_idle stops the last_update_time from being
  	 * updated. Thus, it must not be called in the event we are called from
  	 * irq_exit() with the prior state different than idle.
  	 */
  	if (!inidle && !ts->inidle)
  		goto end;
fdc6f192e   Eero Nurkkala   NOHZ: update idle...
265
266
267
268
269
270
  	/*
  	 * Set ts->inidle unconditionally. Even if the system did not
  	 * switch to NOHZ mode the cpu frequency governers rely on the
  	 * update of the idle time accounting in tick_nohz_start_idle().
  	 */
  	ts->inidle = 1;
8c215bd38   Peter Zijlstra   sched: Cure nr_io...
271
  	now = tick_nohz_start_idle(cpu, ts);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
272

5e41d0d60   Thomas Gleixner   clockevents: prev...
273
274
275
276
277
278
279
280
281
  	/*
  	 * If this cpu is offline and it is the one which updates
  	 * jiffies, then give up the assignment and let it be taken by
  	 * the cpu which runs the tick timer next. If we don't drop
  	 * this here the jiffies might be stale and do_timer() never
  	 * invoked.
  	 */
  	if (unlikely(!cpu_online(cpu))) {
  		if (cpu == tick_do_timer_cpu)
6441402b1   Thomas Gleixner   clockevents: prev...
282
  			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
5e41d0d60   Thomas Gleixner   clockevents: prev...
283
  	}
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
284
285
286
287
288
  	if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
  		goto end;
  
  	if (need_resched())
  		goto end;
fa116ea35   Heiko Carstens   nohz: no softirq ...
289
  	if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
352823160   Thomas Gleixner   NOHZ: Rate limit ...
290
291
292
293
294
  		static int ratelimit;
  
  		if (ratelimit < 10) {
  			printk(KERN_ERR "NOHZ: local_softirq_pending %02x
  ",
529eaccd9   Thomas Gleixner   nohz: Type cast p...
295
  			       (unsigned int) local_softirq_pending());
352823160   Thomas Gleixner   NOHZ: Rate limit ...
296
297
  			ratelimit++;
  		}
857f3fd7a   Heiko Carstens   nohz: don't stop ...
298
  		goto end;
352823160   Thomas Gleixner   NOHZ: Rate limit ...
299
  	}
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
300

79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
301
  	ts->idle_calls++;
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
302
303
304
305
306
  	/* Read jiffies and the time when jiffies were updated last */
  	do {
  		seq = read_seqbegin(&xtime_lock);
  		last_update = last_jiffies_update;
  		last_jiffies = jiffies;
27185016b   Thomas Gleixner   nohz: Track last ...
307
  		time_delta = timekeeping_max_deferment();
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
308
  	} while (read_seqretry(&xtime_lock, seq));
3c5d92a0c   Martin Schwidefsky   nohz: Introduce a...
309
  	if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) ||
396e894d2   Peter Zijlstra   sched: Revert noh...
310
  	    arch_needs_cpu(cpu)) {
3c5d92a0c   Martin Schwidefsky   nohz: Introduce a...
311
  		next_jiffies = last_jiffies + 1;
6ba9b346e   Ingo Molnar   [PATCH] NOHZ: Fix...
312
  		delta_jiffies = 1;
3c5d92a0c   Martin Schwidefsky   nohz: Introduce a...
313
314
315
316
317
  	} else {
  		/* Get the next timer wheel timer */
  		next_jiffies = get_next_timer_interrupt(last_jiffies);
  		delta_jiffies = next_jiffies - last_jiffies;
  	}
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
318
319
320
321
  	/*
  	 * Do not stop the tick, if we are only one off
  	 * or if the cpu is required for rcu
  	 */
6ba9b346e   Ingo Molnar   [PATCH] NOHZ: Fix...
322
  	if (!ts->tick_stopped && delta_jiffies == 1)
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
323
324
325
326
  		goto out;
  
  	/* Schedule the tick, if we are at least one jiffie off */
  	if ((long)delta_jiffies >= 1) {
001474491   Woodruff, Richard   nohz: suppress ne...
327
  		/*
001474491   Woodruff, Richard   nohz: suppress ne...
328
329
330
331
332
  		 * If this cpu is the one which updates jiffies, then
  		 * give up the assignment and let it be taken by the
  		 * cpu which runs the tick timer next, which might be
  		 * this cpu as well. If we don't drop this here the
  		 * jiffies might be stale and do_timer() never
27185016b   Thomas Gleixner   nohz: Track last ...
333
334
335
336
337
338
  		 * invoked. Keep track of the fact that it was the one
  		 * which had the do_timer() duty last. If this cpu is
  		 * the one which had the do_timer() duty last, we
  		 * limit the sleep time to the timekeeping
  		 * max_deferement value which we retrieved
  		 * above. Otherwise we can sleep as long as we want.
001474491   Woodruff, Richard   nohz: suppress ne...
339
  		 */
27185016b   Thomas Gleixner   nohz: Track last ...
340
  		if (cpu == tick_do_timer_cpu) {
001474491   Woodruff, Richard   nohz: suppress ne...
341
  			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
27185016b   Thomas Gleixner   nohz: Track last ...
342
343
344
345
346
347
348
349
350
  			ts->do_timer_last = 1;
  		} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
  			time_delta = KTIME_MAX;
  			ts->do_timer_last = 0;
  		} else if (!ts->do_timer_last) {
  			time_delta = KTIME_MAX;
  		}
  
  		/*
98962465e   Jon Hunter   nohz: Prevent clo...
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
  		 * calculate the expiry time for the next timer wheel
  		 * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
  		 * that there is no timer pending or at least extremely
  		 * far into the future (12 days for HZ=1000). In this
  		 * case we set the expiry to the end of time.
  		 */
  		if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
  			/*
  			 * Calculate the time delta for the next timer event.
  			 * If the time delta exceeds the maximum time delta
  			 * permitted by the current clocksource then adjust
  			 * the time delta accordingly to ensure the
  			 * clocksource does not wrap.
  			 */
  			time_delta = min_t(u64, time_delta,
  					   tick_period.tv64 * delta_jiffies);
98962465e   Jon Hunter   nohz: Prevent clo...
367
  		}
001474491   Woodruff, Richard   nohz: suppress ne...
368

27185016b   Thomas Gleixner   nohz: Track last ...
369
370
371
372
  		if (time_delta < KTIME_MAX)
  			expires = ktime_add_ns(last_update, time_delta);
  		else
  			expires.tv64 = KTIME_MAX;
001474491   Woodruff, Richard   nohz: suppress ne...
373

6ba9b346e   Ingo Molnar   [PATCH] NOHZ: Fix...
374
  		if (delta_jiffies > 1)
6a7b3dc34   Rusty Russell   sched: convert no...
375
  			cpumask_set_cpu(cpu, nohz_cpu_mask);
001474491   Woodruff, Richard   nohz: suppress ne...
376
377
378
379
  
  		/* Skip reprogram of event if its not changed */
  		if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
  			goto out;
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
380
381
382
383
384
385
386
387
  		/*
  		 * nohz_stop_sched_tick can be called several times before
  		 * the nohz_restart_sched_tick is called. This happens when
  		 * interrupts arrive which do not cause a reschedule. In the
  		 * first call we save the current tick time, so we can restart
  		 * the scheduler tick in nohz_restart_sched_tick.
  		 */
  		if (!ts->tick_stopped) {
83cd4fe27   Venkatesh Pallipadi   sched: Change noh...
388
  			select_nohz_load_balancer(1);
46cb4b7c8   Siddha, Suresh B   sched: dynticks i...
389

cc584b213   Arjan van de Ven   hrtimer: convert ...
390
  			ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
391
392
  			ts->tick_stopped = 1;
  			ts->idle_jiffies = last_jiffies;
2232c2d8e   Steven Rostedt   rcu: add support ...
393
  			rcu_enter_nohz();
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
394
  		}
d3ed78245   Thomas Gleixner   highres/dyntick: ...
395

eaad084bb   Thomas Gleixner   NOHZ: prevent mul...
396
  		ts->idle_sleeps++;
98962465e   Jon Hunter   nohz: Prevent clo...
397
398
  		/* Mark expires */
  		ts->idle_expires = expires;
eaad084bb   Thomas Gleixner   NOHZ: prevent mul...
399
  		/*
98962465e   Jon Hunter   nohz: Prevent clo...
400
401
  		 * If the expiration time == KTIME_MAX, then
  		 * in this case we simply stop the tick timer.
eaad084bb   Thomas Gleixner   NOHZ: prevent mul...
402
  		 */
98962465e   Jon Hunter   nohz: Prevent clo...
403
  		 if (unlikely(expires.tv64 == KTIME_MAX)) {
eaad084bb   Thomas Gleixner   NOHZ: prevent mul...
404
405
406
407
  			if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
  				hrtimer_cancel(&ts->sched_timer);
  			goto out;
  		}
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
408
409
  		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
  			hrtimer_start(&ts->sched_timer, expires,
5c333864a   Arun R Bharadwaj   timers: Identifyi...
410
  				      HRTIMER_MODE_ABS_PINNED);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
411
412
413
  			/* Check, if the timer was already in the past */
  			if (hrtimer_active(&ts->sched_timer))
  				goto out;
4c9dc6412   Pavel Machek   time: timer cleanups
414
  		} else if (!tick_program_event(expires, 0))
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
415
416
417
418
419
420
421
  				goto out;
  		/*
  		 * We are past the event already. So we crossed a
  		 * jiffie boundary. Update jiffies and raise the
  		 * softirq.
  		 */
  		tick_do_update_jiffies64(ktime_get());
6a7b3dc34   Rusty Russell   sched: convert no...
422
  		cpumask_clear_cpu(cpu, nohz_cpu_mask);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
423
424
425
426
427
  	}
  	raise_softirq_irqoff(TIMER_SOFTIRQ);
  out:
  	ts->next_jiffies = next_jiffies;
  	ts->last_jiffies = last_jiffies;
4f86d3a8e   Len Brown   cpuidle: consolid...
428
  	ts->sleep_length = ktime_sub(dev->next_event, now);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
429
430
431
432
433
  end:
  	local_irq_restore(flags);
  }
  
  /**
4f86d3a8e   Len Brown   cpuidle: consolid...
434
435
436
437
438
439
440
441
442
443
   * tick_nohz_get_sleep_length - return the length of the current sleep
   *
   * Called from power state control code with interrupts disabled
   */
  ktime_t tick_nohz_get_sleep_length(void)
  {
  	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
  
  	return ts->sleep_length;
  }
c34bec5a4   Thomas Gleixner   NOHZ: split tick_...
444
445
446
  static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
  {
  	hrtimer_cancel(&ts->sched_timer);
268a3dcfe   Thomas Gleixner   Merge branch 'tim...
447
  	hrtimer_set_expires(&ts->sched_timer, ts->idle_tick);
c34bec5a4   Thomas Gleixner   NOHZ: split tick_...
448
449
450
451
452
453
  
  	while (1) {
  		/* Forward the time to expire in the future */
  		hrtimer_forward(&ts->sched_timer, now, tick_period);
  
  		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
268a3dcfe   Thomas Gleixner   Merge branch 'tim...
454
  			hrtimer_start_expires(&ts->sched_timer,
5c333864a   Arun R Bharadwaj   timers: Identifyi...
455
  					      HRTIMER_MODE_ABS_PINNED);
c34bec5a4   Thomas Gleixner   NOHZ: split tick_...
456
457
458
459
  			/* Check, if the timer was already in the past */
  			if (hrtimer_active(&ts->sched_timer))
  				break;
  		} else {
268a3dcfe   Thomas Gleixner   Merge branch 'tim...
460
461
  			if (!tick_program_event(
  				hrtimer_get_expires(&ts->sched_timer), 0))
c34bec5a4   Thomas Gleixner   NOHZ: split tick_...
462
463
464
465
466
467
468
  				break;
  		}
  		/* Update jiffies and reread time */
  		tick_do_update_jiffies64(now);
  		now = ktime_get();
  	}
  }
4f86d3a8e   Len Brown   cpuidle: consolid...
469
  /**
8dce39c23   Li Zefan   time: fix inconsi...
470
   * tick_nohz_restart_sched_tick - restart the idle tick from the idle task
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
471
472
473
474
475
476
477
   *
   * Restart the idle tick when the CPU is woken up from idle
   */
  void tick_nohz_restart_sched_tick(void)
  {
  	int cpu = smp_processor_id();
  	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
79741dd35   Martin Schwidefsky   [PATCH] idle cput...
478
  #ifndef CONFIG_VIRT_CPU_ACCOUNTING
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
479
  	unsigned long ticks;
79741dd35   Martin Schwidefsky   [PATCH] idle cput...
480
  #endif
6378ddb59   Venki Pallipadi   time: track accur...
481
  	ktime_t now;
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
482

6378ddb59   Venki Pallipadi   time: track accur...
483
  	local_irq_disable();
eed3b9cf3   Martin Schwidefsky   nohz: Reuse ktime...
484
485
486
487
488
  	if (ts->idle_active || (ts->inidle && ts->tick_stopped))
  		now = ktime_get();
  
  	if (ts->idle_active)
  		tick_nohz_stop_idle(cpu, now);
6378ddb59   Venki Pallipadi   time: track accur...
489

b8f8c3cf0   Thomas Gleixner   nohz: prevent tic...
490
491
  	if (!ts->inidle || !ts->tick_stopped) {
  		ts->inidle = 0;
6378ddb59   Venki Pallipadi   time: track accur...
492
  		local_irq_enable();
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
493
  		return;
6378ddb59   Venki Pallipadi   time: track accur...
494
  	}
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
495

b8f8c3cf0   Thomas Gleixner   nohz: prevent tic...
496
  	ts->inidle = 0;
2232c2d8e   Steven Rostedt   rcu: add support ...
497
  	rcu_exit_nohz();
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
498
  	/* Update jiffies first */
46cb4b7c8   Siddha, Suresh B   sched: dynticks i...
499
  	select_nohz_load_balancer(0);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
500
  	tick_do_update_jiffies64(now);
6a7b3dc34   Rusty Russell   sched: convert no...
501
  	cpumask_clear_cpu(cpu, nohz_cpu_mask);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
502

79741dd35   Martin Schwidefsky   [PATCH] idle cput...
503
  #ifndef CONFIG_VIRT_CPU_ACCOUNTING
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
504
505
506
507
508
509
510
511
512
  	/*
  	 * We stopped the tick in idle. Update process times would miss the
  	 * time we slept as update_process_times does only a 1 tick
  	 * accounting. Enforce that this is accounted to idle !
  	 */
  	ticks = jiffies - ts->idle_jiffies;
  	/*
  	 * We might be one off. Do not randomly account a huge number of ticks!
  	 */
79741dd35   Martin Schwidefsky   [PATCH] idle cput...
513
514
515
  	if (ticks && ticks < LONG_MAX)
  		account_idle_ticks(ticks);
  #endif
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
516

126e01bf9   Ingo Molnar   softlockup: fix N...
517
  	touch_softlockup_watchdog();
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
518
519
520
521
  	/*
  	 * Cancel the scheduled timer and restore the tick
  	 */
  	ts->tick_stopped  = 0;
5df7fa1c6   Thomas Gleixner   tick-sched: add m...
522
  	ts->idle_exittime = now;
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
523

c34bec5a4   Thomas Gleixner   NOHZ: split tick_...
524
  	tick_nohz_restart(ts, now);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
525

79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
526
527
528
529
530
531
  	local_irq_enable();
  }
  
  static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
  {
  	hrtimer_forward(&ts->sched_timer, now, tick_period);
cc584b213   Arjan van de Ven   hrtimer: convert ...
532
  	return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
533
534
535
536
537
538
539
540
541
  }
  
  /*
   * The nohz low res interrupt handler
   */
  static void tick_nohz_handler(struct clock_event_device *dev)
  {
  	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
  	struct pt_regs *regs = get_irq_regs();
d3ed78245   Thomas Gleixner   highres/dyntick: ...
542
  	int cpu = smp_processor_id();
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
543
544
545
  	ktime_t now = ktime_get();
  
  	dev->next_event.tv64 = KTIME_MAX;
d3ed78245   Thomas Gleixner   highres/dyntick: ...
546
547
548
549
550
551
552
  	/*
  	 * Check if the do_timer duty was dropped. We don't care about
  	 * concurrency: This happens only when the cpu in charge went
  	 * into a long sleep. If two cpus happen to assign themself to
  	 * this duty, then the jiffies update is still serialized by
  	 * xtime_lock.
  	 */
6441402b1   Thomas Gleixner   clockevents: prev...
553
  	if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
d3ed78245   Thomas Gleixner   highres/dyntick: ...
554
  		tick_do_timer_cpu = cpu;
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
555
  	/* Check, if the jiffies need an update */
d3ed78245   Thomas Gleixner   highres/dyntick: ...
556
557
  	if (tick_do_timer_cpu == cpu)
  		tick_do_update_jiffies64(now);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
  
  	/*
  	 * When we are idle and the tick is stopped, we have to touch
  	 * the watchdog as we might not schedule for a really long
  	 * time. This happens on complete idle SMP systems while
  	 * waiting on the login prompt. We also increment the "start
  	 * of idle" jiffy stamp so the idle accounting adjustment we
  	 * do when we go busy again does not account too much ticks.
  	 */
  	if (ts->tick_stopped) {
  		touch_softlockup_watchdog();
  		ts->idle_jiffies++;
  	}
  
  	update_process_times(user_mode(regs));
  	profile_tick(CPU_PROFILING);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
  	while (tick_nohz_reprogram(ts, now)) {
  		now = ktime_get();
  		tick_do_update_jiffies64(now);
  	}
  }
  
  /**
   * tick_nohz_switch_to_nohz - switch to nohz mode
   */
  static void tick_nohz_switch_to_nohz(void)
  {
  	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
  	ktime_t next;
  
  	if (!tick_nohz_enabled)
  		return;
  
  	local_irq_disable();
  	if (tick_switch_to_oneshot(tick_nohz_handler)) {
  		local_irq_enable();
  		return;
  	}
  
  	ts->nohz_mode = NOHZ_MODE_LOWRES;
  
  	/*
  	 * Recycle the hrtimer in ts, so we can share the
  	 * hrtimer_forward with the highres code.
  	 */
  	hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
  	/* Get the next period */
  	next = tick_init_jiffy_update();
  
  	for (;;) {
cc584b213   Arjan van de Ven   hrtimer: convert ...
608
  		hrtimer_set_expires(&ts->sched_timer, next);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
609
610
611
612
613
614
615
616
617
618
  		if (!tick_program_event(next, 0))
  			break;
  		next = ktime_add(next, tick_period);
  	}
  	local_irq_enable();
  
  	printk(KERN_INFO "Switched to NOHz mode on CPU #%d
  ",
  	       smp_processor_id());
  }
fb02fbc14   Thomas Gleixner   NOHZ: restart tic...
619
620
621
622
623
624
625
626
627
628
629
  /*
   * When NOHZ is enabled and the tick is stopped, we need to kick the
   * tick timer from irq_enter() so that the jiffies update is kept
   * alive during long running softirqs. That's ugly as hell, but
   * correctness is key even if we need to fix the offending softirq in
   * the first place.
   *
   * Note, this is different to tick_nohz_restart. We just kick the
   * timer and do not touch the other magic bits which need to be done
   * when idle is left.
   */
eed3b9cf3   Martin Schwidefsky   nohz: Reuse ktime...
630
  static void tick_nohz_kick_tick(int cpu, ktime_t now)
fb02fbc14   Thomas Gleixner   NOHZ: restart tic...
631
  {
ae99286b4   Thomas Gleixner   nohz: disable tic...
632
633
  #if 0
  	/* Switch back to 2.6.27 behaviour */
fb02fbc14   Thomas Gleixner   NOHZ: restart tic...
634
  	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
eed3b9cf3   Martin Schwidefsky   nohz: Reuse ktime...
635
  	ktime_t delta;
fb02fbc14   Thomas Gleixner   NOHZ: restart tic...
636

c4bd822e7   Thomas Gleixner   NOHZ: fix thinko ...
637
638
639
640
  	/*
  	 * Do not touch the tick device, when the next expiry is either
  	 * already reached or less/equal than the tick period.
  	 */
268a3dcfe   Thomas Gleixner   Merge branch 'tim...
641
  	delta =	ktime_sub(hrtimer_get_expires(&ts->sched_timer), now);
c4bd822e7   Thomas Gleixner   NOHZ: fix thinko ...
642
643
644
645
  	if (delta.tv64 <= tick_period.tv64)
  		return;
  
  	tick_nohz_restart(ts, now);
ae99286b4   Thomas Gleixner   nohz: disable tic...
646
  #endif
fb02fbc14   Thomas Gleixner   NOHZ: restart tic...
647
  }
eed3b9cf3   Martin Schwidefsky   nohz: Reuse ktime...
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
  static inline void tick_check_nohz(int cpu)
  {
  	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
  	ktime_t now;
  
  	if (!ts->idle_active && !ts->tick_stopped)
  		return;
  	now = ktime_get();
  	if (ts->idle_active)
  		tick_nohz_stop_idle(cpu, now);
  	if (ts->tick_stopped) {
  		tick_nohz_update_jiffies(now);
  		tick_nohz_kick_tick(cpu, now);
  	}
  }
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
663
664
665
  #else
  
  static inline void tick_nohz_switch_to_nohz(void) { }
eed3b9cf3   Martin Schwidefsky   nohz: Reuse ktime...
666
  static inline void tick_check_nohz(int cpu) { }
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
667
668
669
670
  
  #endif /* NO_HZ */
  
  /*
719254faa   Thomas Gleixner   NOHZ: unify the n...
671
672
673
674
   * Called from irq_enter to notify about the possible interruption of idle()
   */
  void tick_check_idle(int cpu)
  {
fb02fbc14   Thomas Gleixner   NOHZ: restart tic...
675
  	tick_check_oneshot_broadcast(cpu);
eed3b9cf3   Martin Schwidefsky   nohz: Reuse ktime...
676
  	tick_check_nohz(cpu);
719254faa   Thomas Gleixner   NOHZ: unify the n...
677
678
679
  }
  
  /*
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
680
681
682
683
   * High resolution timer specific code
   */
  #ifdef CONFIG_HIGH_RES_TIMERS
  /*
4c9dc6412   Pavel Machek   time: timer cleanups
684
   * We rearm the timer until we get disabled by the idle code.
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
685
686
687
688
689
690
   * Called with interrupts disabled and timer->base->cpu_base->lock held.
   */
  static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
  {
  	struct tick_sched *ts =
  		container_of(timer, struct tick_sched, sched_timer);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
691
692
  	struct pt_regs *regs = get_irq_regs();
  	ktime_t now = ktime_get();
d3ed78245   Thomas Gleixner   highres/dyntick: ...
693
694
695
696
697
698
699
700
701
702
  	int cpu = smp_processor_id();
  
  #ifdef CONFIG_NO_HZ
  	/*
  	 * Check if the do_timer duty was dropped. We don't care about
  	 * concurrency: This happens only when the cpu in charge went
  	 * into a long sleep. If two cpus happen to assign themself to
  	 * this duty, then the jiffies update is still serialized by
  	 * xtime_lock.
  	 */
6441402b1   Thomas Gleixner   clockevents: prev...
703
  	if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
d3ed78245   Thomas Gleixner   highres/dyntick: ...
704
705
  		tick_do_timer_cpu = cpu;
  #endif
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
706
707
  
  	/* Check, if the jiffies need an update */
d3ed78245   Thomas Gleixner   highres/dyntick: ...
708
709
  	if (tick_do_timer_cpu == cpu)
  		tick_do_update_jiffies64(now);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
  
  	/*
  	 * Do not call, when we are not in irq context and have
  	 * no valid regs pointer
  	 */
  	if (regs) {
  		/*
  		 * When we are idle and the tick is stopped, we have to touch
  		 * the watchdog as we might not schedule for a really long
  		 * time. This happens on complete idle SMP systems while
  		 * waiting on the login prompt. We also increment the "start of
  		 * idle" jiffy stamp so the idle accounting adjustment we do
  		 * when we go busy again does not account too much ticks.
  		 */
  		if (ts->tick_stopped) {
  			touch_softlockup_watchdog();
  			ts->idle_jiffies++;
  		}
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
728
729
  		update_process_times(user_mode(regs));
  		profile_tick(CPU_PROFILING);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
730
  	}
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
  	hrtimer_forward(timer, now, tick_period);
  
  	return HRTIMER_RESTART;
  }
  
  /**
   * tick_setup_sched_timer - setup the tick emulation timer
   */
  void tick_setup_sched_timer(void)
  {
  	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
  	ktime_t now = ktime_get();
  
  	/*
  	 * Emulate tick processing via per-CPU hrtimers:
  	 */
  	hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
  	ts->sched_timer.function = tick_sched_timer;
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
749

3704540b4   John Stultz   tick management: ...
750
  	/* Get the next period (per cpu) */
cc584b213   Arjan van de Ven   hrtimer: convert ...
751
  	hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
752
753
754
  
  	for (;;) {
  		hrtimer_forward(&ts->sched_timer, now, tick_period);
5c333864a   Arun R Bharadwaj   timers: Identifyi...
755
756
  		hrtimer_start_expires(&ts->sched_timer,
  				      HRTIMER_MODE_ABS_PINNED);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
757
758
759
760
761
762
763
764
765
766
767
  		/* Check, if the timer was already in the past */
  		if (hrtimer_active(&ts->sched_timer))
  			break;
  		now = ktime_get();
  	}
  
  #ifdef CONFIG_NO_HZ
  	if (tick_nohz_enabled)
  		ts->nohz_mode = NOHZ_MODE_HIGHRES;
  #endif
  }
3c4fbe5e0   Miao Xie   nohz: fix wrong e...
768
  #endif /* HIGH_RES_TIMERS */
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
769

3c4fbe5e0   Miao Xie   nohz: fix wrong e...
770
  #if defined CONFIG_NO_HZ || defined CONFIG_HIGH_RES_TIMERS
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
771
772
773
  void tick_cancel_sched_timer(int cpu)
  {
  	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
3c4fbe5e0   Miao Xie   nohz: fix wrong e...
774
  # ifdef CONFIG_HIGH_RES_TIMERS
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
775
776
  	if (ts->sched_timer.base)
  		hrtimer_cancel(&ts->sched_timer);
3c4fbe5e0   Miao Xie   nohz: fix wrong e...
777
  # endif
a79017660   Karsten Wiese   time: don't touch...
778

79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
779
780
  	ts->nohz_mode = NOHZ_MODE_INACTIVE;
  }
3c4fbe5e0   Miao Xie   nohz: fix wrong e...
781
  #endif
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
  
  /**
   * Async notification about clocksource changes
   */
  void tick_clock_notify(void)
  {
  	int cpu;
  
  	for_each_possible_cpu(cpu)
  		set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);
  }
  
  /*
   * Async notification about clock event changes
   */
  void tick_oneshot_notify(void)
  {
  	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
  
  	set_bit(0, &ts->check_clocks);
  }
  
  /**
   * Check, if a change happened, which makes oneshot possible.
   *
   * Called cyclic from the hrtimer softirq (driven by the timer
   * softirq) allow_nohz signals, that we can switch into low-res nohz
   * mode, because high resolution timers are disabled (either compile
   * or runtime).
   */
  int tick_check_oneshot_change(int allow_nohz)
  {
  	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
  
  	if (!test_and_clear_bit(0, &ts->check_clocks))
  		return 0;
  
  	if (ts->nohz_mode != NOHZ_MODE_INACTIVE)
  		return 0;
cf4fc6cb7   Li Zefan   timekeeping: rena...
821
  	if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available())
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
822
823
824
825
826
827
828
829
  		return 0;
  
  	if (!allow_nohz)
  		return 1;
  
  	tick_nohz_switch_to_nohz();
  	return 0;
  }