Blame view

kernel/time/tick-sched.c 29.1 KB
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
1
2
3
4
5
6
7
8
9
10
11
  /*
   *  linux/kernel/time/tick-sched.c
   *
   *  Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
   *  Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
   *  Copyright(C) 2006-2007  Timesys Corp., Thomas Gleixner
   *
   *  No idle tick implementation for low and high resolution timers
   *
   *  Started by: Thomas Gleixner and Ingo Molnar
   *
b10db7f0d   Pavel Machek   time: more timer ...
12
   *  Distribute under GPLv2.
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
13
14
15
16
17
18
19
20
21
   */
  #include <linux/cpu.h>
  #include <linux/err.h>
  #include <linux/hrtimer.h>
  #include <linux/interrupt.h>
  #include <linux/kernel_stat.h>
  #include <linux/percpu.h>
  #include <linux/profile.h>
  #include <linux/sched.h>
8083e4ad9   venkatesh.pallipadi@intel.com   [CPUFREQ][5/6] cp...
22
  #include <linux/module.h>
00b429591   Frederic Weisbecker   irq_work: Don't s...
23
  #include <linux/irq_work.h>
9014c45d9   Frederic Weisbecker   nohz: Implement f...
24
25
  #include <linux/posix-timers.h>
  #include <linux/perf_event.h>
2e7093386   Frederic Weisbecker   nohz: Only enable...
26
  #include <linux/context_tracking.h>
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
27

9e203bcc1   David S. Miller   [TIME] tick-sched...
28
  #include <asm/irq_regs.h>
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
29
  #include "tick-internal.h"
cb41a2907   Frederic Weisbecker   nohz: Add basic t...
30
  #include <trace/events/timer.h>
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
31
32
33
  /*
   * Per cpu nohz control structure
   */
33a5f6261   Frederic Weisbecker   nohz: Add API to ...
34
  DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
35
36
  
  /*
d6ad41876   John Stultz   time: Kill xtime_...
37
   * The time, when the last jiffy update happened. Protected by jiffies_lock.
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
38
39
   */
  static ktime_t last_jiffies_update;
289f480af   Ingo Molnar   [PATCH] Add debug...
40
41
42
43
  struct tick_sched *tick_get_tick_sched(int cpu)
  {
  	return &per_cpu(tick_cpu_sched, cpu);
  }
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
44
45
46
47
48
49
50
  /*
   * Must be called with interrupts disabled !
   */
  static void tick_do_update_jiffies64(ktime_t now)
  {
  	unsigned long ticks = 0;
  	ktime_t delta;
7a14ce1d8   Ingo Molnar   nohz: reduce jiff...
51
  	/*
d6ad41876   John Stultz   time: Kill xtime_...
52
  	 * Do a quick check without holding jiffies_lock:
7a14ce1d8   Ingo Molnar   nohz: reduce jiff...
53
54
55
56
  	 */
  	delta = ktime_sub(now, last_jiffies_update);
  	if (delta.tv64 < tick_period.tv64)
  		return;
d6ad41876   John Stultz   time: Kill xtime_...
57
58
  	/* Reevalute with jiffies_lock held */
  	write_seqlock(&jiffies_lock);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
  
  	delta = ktime_sub(now, last_jiffies_update);
  	if (delta.tv64 >= tick_period.tv64) {
  
  		delta = ktime_sub(delta, tick_period);
  		last_jiffies_update = ktime_add(last_jiffies_update,
  						tick_period);
  
  		/* Slow path for long timeouts */
  		if (unlikely(delta.tv64 >= tick_period.tv64)) {
  			s64 incr = ktime_to_ns(tick_period);
  
  			ticks = ktime_divns(delta, incr);
  
  			last_jiffies_update = ktime_add_ns(last_jiffies_update,
  							   incr * ticks);
  		}
  		do_timer(++ticks);
49d670fb8   Thomas Gleixner   clockevents: prev...
77
78
79
  
  		/* Keep the tick_next_period variable up to date */
  		tick_next_period = ktime_add(last_jiffies_update, tick_period);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
80
  	}
d6ad41876   John Stultz   time: Kill xtime_...
81
  	write_sequnlock(&jiffies_lock);
47a1b7963   John Stultz   tick/timekeeping:...
82
  	update_wall_time();
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
83
84
85
86
87
88
89
90
  }
  
  /*
   * Initialize and return retrieve the jiffies update.
   */
  static ktime_t tick_init_jiffy_update(void)
  {
  	ktime_t period;
d6ad41876   John Stultz   time: Kill xtime_...
91
  	write_seqlock(&jiffies_lock);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
92
93
94
95
  	/* Did we start the jiffies update yet ? */
  	if (last_jiffies_update.tv64 == 0)
  		last_jiffies_update = tick_next_period;
  	period = last_jiffies_update;
d6ad41876   John Stultz   time: Kill xtime_...
96
  	write_sequnlock(&jiffies_lock);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
97
98
  	return period;
  }
5bb962269   Frederic Weisbecker   tick: Consolidate...
99
100
101
102
  
  static void tick_sched_do_timer(ktime_t now)
  {
  	int cpu = smp_processor_id();
3451d0243   Frederic Weisbecker   nohz: Rename CONF...
103
  #ifdef CONFIG_NO_HZ_COMMON
5bb962269   Frederic Weisbecker   tick: Consolidate...
104
105
106
107
108
  	/*
  	 * Check if the do_timer duty was dropped. We don't care about
  	 * concurrency: This happens only when the cpu in charge went
  	 * into a long sleep. If two cpus happen to assign themself to
  	 * this duty, then the jiffies update is still serialized by
9c3f9e281   Thomas Gleixner   Merge branch 'for...
109
  	 * jiffies_lock.
5bb962269   Frederic Weisbecker   tick: Consolidate...
110
  	 */
a382bf934   Frederic Weisbecker   nohz: Assign time...
111
  	if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)
c5bfece2d   Frederic Weisbecker   nohz: Switch from...
112
  	    && !tick_nohz_full_cpu(cpu))
5bb962269   Frederic Weisbecker   tick: Consolidate...
113
114
115
116
117
118
119
  		tick_do_timer_cpu = cpu;
  #endif
  
  	/* Check, if the jiffies need an update */
  	if (tick_do_timer_cpu == cpu)
  		tick_do_update_jiffies64(now);
  }
9e8f559b0   Frederic Weisbecker   tick: Consolidate...
120
121
  static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
  {
3451d0243   Frederic Weisbecker   nohz: Rename CONF...
122
  #ifdef CONFIG_NO_HZ_COMMON
9e8f559b0   Frederic Weisbecker   tick: Consolidate...
123
124
125
126
127
128
129
130
131
132
133
134
135
  	/*
  	 * When we are idle and the tick is stopped, we have to touch
  	 * the watchdog as we might not schedule for a really long
  	 * time. This happens on complete idle SMP systems while
  	 * waiting on the login prompt. We also increment the "start of
  	 * idle" jiffy stamp so the idle accounting adjustment we do
  	 * when we go busy again does not account too much ticks.
  	 */
  	if (ts->tick_stopped) {
  		touch_softlockup_watchdog();
  		if (is_idle_task(current))
  			ts->idle_jiffies++;
  	}
94a571402   Frederic Weisbecker   tick: Conditional...
136
  #endif
9e8f559b0   Frederic Weisbecker   tick: Consolidate...
137
138
139
  	update_process_times(user_mode(regs));
  	profile_tick(CPU_PROFILING);
  }
c5bfece2d   Frederic Weisbecker   nohz: Switch from...
140
  #ifdef CONFIG_NO_HZ_FULL
460775df4   Frederic Weisbecker   nohz: Optimize fu...
141
  cpumask_var_t tick_nohz_full_mask;
73867dcd0   Frederic Weisbecker   nohz: Rename a fe...
142
  bool tick_nohz_full_running;
a831881be   Frederic Weisbecker   nohz: Basic full ...
143

9014c45d9   Frederic Weisbecker   nohz: Implement f...
144
145
146
  static bool can_stop_full_tick(void)
  {
  	WARN_ON_ONCE(!irqs_disabled());
cb41a2907   Frederic Weisbecker   nohz: Add basic t...
147
148
149
  	if (!sched_can_stop_tick()) {
  		trace_tick_stop(0, "more than 1 task in runqueue
  ");
9014c45d9   Frederic Weisbecker   nohz: Implement f...
150
  		return false;
cb41a2907   Frederic Weisbecker   nohz: Add basic t...
151
  	}
9014c45d9   Frederic Weisbecker   nohz: Implement f...
152

cb41a2907   Frederic Weisbecker   nohz: Add basic t...
153
154
155
  	if (!posix_cpu_timers_can_stop_tick(current)) {
  		trace_tick_stop(0, "posix timers running
  ");
9014c45d9   Frederic Weisbecker   nohz: Implement f...
156
  		return false;
cb41a2907   Frederic Weisbecker   nohz: Add basic t...
157
  	}
9014c45d9   Frederic Weisbecker   nohz: Implement f...
158

cb41a2907   Frederic Weisbecker   nohz: Add basic t...
159
160
161
  	if (!perf_event_can_stop_tick()) {
  		trace_tick_stop(0, "perf events running
  ");
9014c45d9   Frederic Weisbecker   nohz: Implement f...
162
  		return false;
cb41a2907   Frederic Weisbecker   nohz: Add basic t...
163
  	}
9014c45d9   Frederic Weisbecker   nohz: Implement f...
164
165
166
167
168
169
170
  
  	/* sched_clock_tick() needs us? */
  #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
  	/*
  	 * TODO: kick full dynticks CPUs when
  	 * sched_clock_stable is set.
  	 */
35af99e64   Peter Zijlstra   sched/clock, x86:...
171
  	if (!sched_clock_stable()) {
cb41a2907   Frederic Weisbecker   nohz: Add basic t...
172
173
  		trace_tick_stop(0, "unstable sched clock
  ");
e12d02717   Steven Rostedt   nohz: Warn if the...
174
175
176
177
  		/*
  		 * Don't allow the user to think they can get
  		 * full NO_HZ with this machine.
  		 */
73867dcd0   Frederic Weisbecker   nohz: Rename a fe...
178
  		WARN_ONCE(tick_nohz_full_running,
543487c7a   Steven Rostedt   nohz: Do not warn...
179
  			  "NO_HZ FULL will not work with unstable sched clock");
9014c45d9   Frederic Weisbecker   nohz: Implement f...
180
  		return false;
cb41a2907   Frederic Weisbecker   nohz: Add basic t...
181
  	}
9014c45d9   Frederic Weisbecker   nohz: Implement f...
182
183
184
185
186
187
  #endif
  
  	return true;
  }
  
  static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now);
76c24fb05   Frederic Weisbecker   nohz: New APIs to...
188
189
190
191
  /*
   * Re-evaluate the need for the tick on the current CPU
   * and restart it if necessary.
   */
d13508f94   Frederic Weisbecker   nohz: Optimize fu...
192
  void __tick_nohz_full_check(void)
76c24fb05   Frederic Weisbecker   nohz: New APIs to...
193
  {
9014c45d9   Frederic Weisbecker   nohz: Implement f...
194
195
196
197
198
199
200
201
  	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
  
  	if (tick_nohz_full_cpu(smp_processor_id())) {
  		if (ts->tick_stopped && !is_idle_task(current)) {
  			if (!can_stop_full_tick())
  				tick_nohz_restart_sched_tick(ts, ktime_get());
  		}
  	}
76c24fb05   Frederic Weisbecker   nohz: New APIs to...
202
203
204
205
  }
  
  static void nohz_full_kick_work_func(struct irq_work *work)
  {
d13508f94   Frederic Weisbecker   nohz: Optimize fu...
206
  	__tick_nohz_full_check();
76c24fb05   Frederic Weisbecker   nohz: New APIs to...
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
  }
  
  static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
  	.func = nohz_full_kick_work_func,
  };
  
  /*
   * Kick the current CPU if it's full dynticks in order to force it to
   * re-evaluate its dependency on the tick and restart it if necessary.
   */
  void tick_nohz_full_kick(void)
  {
  	if (tick_nohz_full_cpu(smp_processor_id()))
  		irq_work_queue(&__get_cpu_var(nohz_full_kick_work));
  }
  
  static void nohz_full_kick_ipi(void *info)
  {
d13508f94   Frederic Weisbecker   nohz: Optimize fu...
225
  	__tick_nohz_full_check();
76c24fb05   Frederic Weisbecker   nohz: New APIs to...
226
227
228
229
230
231
232
233
  }
  
  /*
   * Kick all full dynticks CPUs in order to force these to re-evaluate
   * their dependency on the tick and restart it if necessary.
   */
  void tick_nohz_full_kick_all(void)
  {
73867dcd0   Frederic Weisbecker   nohz: Rename a fe...
234
  	if (!tick_nohz_full_running)
76c24fb05   Frederic Weisbecker   nohz: New APIs to...
235
236
237
  		return;
  
  	preempt_disable();
73867dcd0   Frederic Weisbecker   nohz: Rename a fe...
238
  	smp_call_function_many(tick_nohz_full_mask,
76c24fb05   Frederic Weisbecker   nohz: New APIs to...
239
  			       nohz_full_kick_ipi, NULL, false);
c2e7fcf53   Frederic Weisbecker   nohz: Include loc...
240
  	tick_nohz_full_kick();
76c24fb05   Frederic Weisbecker   nohz: New APIs to...
241
242
  	preempt_enable();
  }
99e5ada94   Frederic Weisbecker   nohz: Re-evaluate...
243
244
245
246
247
  /*
   * Re-evaluate the need for the tick as we switch the current task.
   * It might need the tick due to per task/process properties:
   * perf events, posix cpu timers, ...
   */
d13508f94   Frederic Weisbecker   nohz: Optimize fu...
248
  void __tick_nohz_task_switch(struct task_struct *tsk)
99e5ada94   Frederic Weisbecker   nohz: Re-evaluate...
249
250
  {
  	unsigned long flags;
99e5ada94   Frederic Weisbecker   nohz: Re-evaluate...
251
  	local_irq_save(flags);
6296ace46   Li Zhong   nohz: Protect smp...
252
253
  	if (!tick_nohz_full_cpu(smp_processor_id()))
  		goto out;
99e5ada94   Frederic Weisbecker   nohz: Re-evaluate...
254
255
  	if (tick_nohz_tick_stopped() && !can_stop_full_tick())
  		tick_nohz_full_kick();
6296ace46   Li Zhong   nohz: Protect smp...
256
  out:
99e5ada94   Frederic Weisbecker   nohz: Re-evaluate...
257
258
  	local_irq_restore(flags);
  }
a831881be   Frederic Weisbecker   nohz: Basic full ...
259
  /* Parse the boot-time nohz CPU list from the kernel parameters. */
c5bfece2d   Frederic Weisbecker   nohz: Switch from...
260
  static int __init tick_nohz_full_setup(char *str)
a831881be   Frederic Weisbecker   nohz: Basic full ...
261
  {
0453b435d   Frederic Weisbecker   nohz: Force boot ...
262
  	int cpu;
73867dcd0   Frederic Weisbecker   nohz: Rename a fe...
263
264
  	alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
  	if (cpulist_parse(str, tick_nohz_full_mask) < 0) {
c5bfece2d   Frederic Weisbecker   nohz: Switch from...
265
266
  		pr_warning("NOHZ: Incorrect nohz_full cpumask
  ");
0453b435d   Frederic Weisbecker   nohz: Force boot ...
267
268
269
270
  		return 1;
  	}
  
  	cpu = smp_processor_id();
73867dcd0   Frederic Weisbecker   nohz: Rename a fe...
271
  	if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
0453b435d   Frederic Weisbecker   nohz: Force boot ...
272
273
  		pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping
  ", cpu);
73867dcd0   Frederic Weisbecker   nohz: Rename a fe...
274
  		cpumask_clear_cpu(cpu, tick_nohz_full_mask);
0453b435d   Frederic Weisbecker   nohz: Force boot ...
275
  	}
73867dcd0   Frederic Weisbecker   nohz: Rename a fe...
276
  	tick_nohz_full_running = true;
0453b435d   Frederic Weisbecker   nohz: Force boot ...
277

a831881be   Frederic Weisbecker   nohz: Basic full ...
278
279
  	return 1;
  }
c5bfece2d   Frederic Weisbecker   nohz: Switch from...
280
  __setup("nohz_full=", tick_nohz_full_setup);
a831881be   Frederic Weisbecker   nohz: Basic full ...
281

0db0628d9   Paul Gortmaker   kernel: delete __...
282
  static int tick_nohz_cpu_down_callback(struct notifier_block *nfb,
a382bf934   Frederic Weisbecker   nohz: Assign time...
283
284
285
286
287
288
289
290
291
292
293
  						 unsigned long action,
  						 void *hcpu)
  {
  	unsigned int cpu = (unsigned long)hcpu;
  
  	switch (action & ~CPU_TASKS_FROZEN) {
  	case CPU_DOWN_PREPARE:
  		/*
  		 * If we handle the timekeeping duty for full dynticks CPUs,
  		 * we can't safely shutdown that CPU.
  		 */
73867dcd0   Frederic Weisbecker   nohz: Rename a fe...
294
  		if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
1a7f829f0   Li Zhong   nohz: Fix notifie...
295
  			return NOTIFY_BAD;
a382bf934   Frederic Weisbecker   nohz: Assign time...
296
297
298
299
  		break;
  	}
  	return NOTIFY_OK;
  }
1034fc2f4   Frederic Weisbecker   nohz: Print final...
300
301
302
303
304
  /*
   * Worst case string length in chunks of CPU range seems 2 steps
   * separations: 0,2,4,6,...
   * This is NR_CPUS + sizeof('\0')
   */
c5bfece2d   Frederic Weisbecker   nohz: Switch from...
305
  static char __initdata nohz_full_buf[NR_CPUS + 1];
1034fc2f4   Frederic Weisbecker   nohz: Print final...
306

f98823ac7   Frederic Weisbecker   nohz: New option ...
307
308
309
310
311
  static int tick_nohz_init_all(void)
  {
  	int err = -1;
  
  #ifdef CONFIG_NO_HZ_FULL_ALL
73867dcd0   Frederic Weisbecker   nohz: Rename a fe...
312
  	if (!alloc_cpumask_var(&tick_nohz_full_mask, GFP_KERNEL)) {
f98823ac7   Frederic Weisbecker   nohz: New option ...
313
314
315
316
317
  		pr_err("NO_HZ: Can't allocate full dynticks cpumask
  ");
  		return err;
  	}
  	err = 0;
73867dcd0   Frederic Weisbecker   nohz: Rename a fe...
318
319
320
  	cpumask_setall(tick_nohz_full_mask);
  	cpumask_clear_cpu(smp_processor_id(), tick_nohz_full_mask);
  	tick_nohz_full_running = true;
f98823ac7   Frederic Weisbecker   nohz: New option ...
321
322
323
  #endif
  	return err;
  }
d1e43fa5f   Frederic Weisbecker   nohz: Ensure full...
324
  void __init tick_nohz_init(void)
a831881be   Frederic Weisbecker   nohz: Basic full ...
325
  {
d1e43fa5f   Frederic Weisbecker   nohz: Ensure full...
326
  	int cpu;
73867dcd0   Frederic Weisbecker   nohz: Rename a fe...
327
  	if (!tick_nohz_full_running) {
f98823ac7   Frederic Weisbecker   nohz: New option ...
328
329
330
  		if (tick_nohz_init_all() < 0)
  			return;
  	}
d1e43fa5f   Frederic Weisbecker   nohz: Ensure full...
331

73867dcd0   Frederic Weisbecker   nohz: Rename a fe...
332
  	for_each_cpu(cpu, tick_nohz_full_mask)
2e7093386   Frederic Weisbecker   nohz: Only enable...
333
  		context_tracking_cpu_set(cpu);
d1e43fa5f   Frederic Weisbecker   nohz: Ensure full...
334
  	cpu_notifier(tick_nohz_cpu_down_callback, 0);
73867dcd0   Frederic Weisbecker   nohz: Rename a fe...
335
  	cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), tick_nohz_full_mask);
c5bfece2d   Frederic Weisbecker   nohz: Switch from...
336
337
  	pr_info("NO_HZ: Full dynticks CPUs: %s.
  ", nohz_full_buf);
a831881be   Frederic Weisbecker   nohz: Basic full ...
338
  }
a831881be   Frederic Weisbecker   nohz: Basic full ...
339
  #endif
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
340
341
342
  /*
   * NOHZ - aka dynamic tick functionality
   */
3451d0243   Frederic Weisbecker   nohz: Rename CONF...
343
  #ifdef CONFIG_NO_HZ_COMMON
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
344
345
346
  /*
   * NO HZ enabled ?
   */
d689fe222   Thomas Gleixner   NOHZ: Check for n...
347
348
  static int tick_nohz_enabled __read_mostly  = 1;
  int tick_nohz_active  __read_mostly;
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
  /*
   * Enable / Disable tickless mode
   */
  static int __init setup_tick_nohz(char *str)
  {
  	if (!strcmp(str, "off"))
  		tick_nohz_enabled = 0;
  	else if (!strcmp(str, "on"))
  		tick_nohz_enabled = 1;
  	else
  		return 0;
  	return 1;
  }
  
  __setup("nohz=", setup_tick_nohz);
  
  /**
   * tick_nohz_update_jiffies - update jiffies when idle was interrupted
   *
   * Called from interrupt entry when the CPU was idle
   *
   * In case the sched_tick was stopped on this CPU, we have to check if jiffies
   * must be updated. Otherwise an interrupt handler could use a stale jiffy
   * value. We do this unconditionally on any cpu, as we don't know whether the
   * cpu, which has the update task assigned is in a long sleep.
   */
eed3b9cf3   Martin Schwidefsky   nohz: Reuse ktime...
375
  static void tick_nohz_update_jiffies(ktime_t now)
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
376
  {
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
377
  	unsigned long flags;
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
378

e8fcaa5c5   Frederic Weisbecker   nohz: Convert a f...
379
  	__this_cpu_write(tick_cpu_sched.idle_waketime, now);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
380
381
382
383
  
  	local_irq_save(flags);
  	tick_do_update_jiffies64(now);
  	local_irq_restore(flags);
02ff37559   Ingo Molnar   softlockup: fix f...
384
385
  
  	touch_softlockup_watchdog();
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
386
  }
595aac488   Arjan van de Ven   sched: Introduce ...
387
388
389
  /*
   * Updates the per cpu time idle statistics counters
   */
8d63bf949   Arjan van de Ven   sched: Fold updat...
390
  static void
8c215bd38   Peter Zijlstra   sched: Cure nr_io...
391
  update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
6378ddb59   Venki Pallipadi   time: track accur...
392
  {
eed3b9cf3   Martin Schwidefsky   nohz: Reuse ktime...
393
  	ktime_t delta;
6378ddb59   Venki Pallipadi   time: track accur...
394

595aac488   Arjan van de Ven   sched: Introduce ...
395
396
  	if (ts->idle_active) {
  		delta = ktime_sub(now, ts->idle_entrytime);
8c215bd38   Peter Zijlstra   sched: Cure nr_io...
397
  		if (nr_iowait_cpu(cpu) > 0)
0224cf4c5   Arjan van de Ven   sched: Intoduce g...
398
  			ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
6beea0cda   Michal Hocko   nohz: Fix update_...
399
400
  		else
  			ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
8c7b09f43   Arjan van de Ven   sched: Update the...
401
  		ts->idle_entrytime = now;
595aac488   Arjan van de Ven   sched: Introduce ...
402
  	}
8d63bf949   Arjan van de Ven   sched: Fold updat...
403

e0e37c200   Arjan van de Ven   sched: Eliminate ...
404
  	if (last_update_time)
8d63bf949   Arjan van de Ven   sched: Fold updat...
405
  		*last_update_time = ktime_to_us(now);
595aac488   Arjan van de Ven   sched: Introduce ...
406
  }
e8fcaa5c5   Frederic Weisbecker   nohz: Convert a f...
407
  static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
595aac488   Arjan van de Ven   sched: Introduce ...
408
  {
e8fcaa5c5   Frederic Weisbecker   nohz: Convert a f...
409
  	update_ts_time_stats(smp_processor_id(), ts, now, NULL);
eed3b9cf3   Martin Schwidefsky   nohz: Reuse ktime...
410
  	ts->idle_active = 0;
56c7426b3   Peter Zijlstra   sched_clock: fix ...
411

eed3b9cf3   Martin Schwidefsky   nohz: Reuse ktime...
412
  	sched_clock_idle_wakeup_event(0);
6378ddb59   Venki Pallipadi   time: track accur...
413
  }
e8fcaa5c5   Frederic Weisbecker   nohz: Convert a f...
414
  static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
6378ddb59   Venki Pallipadi   time: track accur...
415
  {
430ee8819   Michal Hocko   nohz: Remove upda...
416
  	ktime_t now = ktime_get();
595aac488   Arjan van de Ven   sched: Introduce ...
417

6378ddb59   Venki Pallipadi   time: track accur...
418
419
  	ts->idle_entrytime = now;
  	ts->idle_active = 1;
56c7426b3   Peter Zijlstra   sched_clock: fix ...
420
  	sched_clock_idle_sleep_event();
6378ddb59   Venki Pallipadi   time: track accur...
421
422
  	return now;
  }
b1f724c30   Arjan van de Ven   sched: Add a comm...
423
424
425
  /**
   * get_cpu_idle_time_us - get the total idle time of a cpu
   * @cpu: CPU number to query
09a1d34f8   Michal Hocko   nohz: Make idle/i...
426
427
   * @last_update_time: variable to store update time in. Do not update
   * counters if NULL.
b1f724c30   Arjan van de Ven   sched: Add a comm...
428
429
   *
   * Return the cummulative idle time (since boot) for a given
6beea0cda   Michal Hocko   nohz: Fix update_...
430
   * CPU, in microseconds.
b1f724c30   Arjan van de Ven   sched: Add a comm...
431
432
433
434
435
436
   *
   * This time is measured via accounting rather than sampling,
   * and is as accurate as ktime_get() is.
   *
   * This function returns -1 if NOHZ is not enabled.
   */
6378ddb59   Venki Pallipadi   time: track accur...
437
438
439
  u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
  {
  	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
09a1d34f8   Michal Hocko   nohz: Make idle/i...
440
  	ktime_t now, idle;
6378ddb59   Venki Pallipadi   time: track accur...
441

d689fe222   Thomas Gleixner   NOHZ: Check for n...
442
  	if (!tick_nohz_active)
8083e4ad9   venkatesh.pallipadi@intel.com   [CPUFREQ][5/6] cp...
443
  		return -1;
09a1d34f8   Michal Hocko   nohz: Make idle/i...
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
  	now = ktime_get();
  	if (last_update_time) {
  		update_ts_time_stats(cpu, ts, now, last_update_time);
  		idle = ts->idle_sleeptime;
  	} else {
  		if (ts->idle_active && !nr_iowait_cpu(cpu)) {
  			ktime_t delta = ktime_sub(now, ts->idle_entrytime);
  
  			idle = ktime_add(ts->idle_sleeptime, delta);
  		} else {
  			idle = ts->idle_sleeptime;
  		}
  	}
  
  	return ktime_to_us(idle);
8083e4ad9   venkatesh.pallipadi@intel.com   [CPUFREQ][5/6] cp...
459

6378ddb59   Venki Pallipadi   time: track accur...
460
  }
8083e4ad9   venkatesh.pallipadi@intel.com   [CPUFREQ][5/6] cp...
461
  EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
6378ddb59   Venki Pallipadi   time: track accur...
462

6beea0cda   Michal Hocko   nohz: Fix update_...
463
  /**
0224cf4c5   Arjan van de Ven   sched: Intoduce g...
464
465
   * get_cpu_iowait_time_us - get the total iowait time of a cpu
   * @cpu: CPU number to query
09a1d34f8   Michal Hocko   nohz: Make idle/i...
466
467
   * @last_update_time: variable to store update time in. Do not update
   * counters if NULL.
0224cf4c5   Arjan van de Ven   sched: Intoduce g...
468
469
470
471
472
473
474
475
476
477
478
479
   *
   * Return the cummulative iowait time (since boot) for a given
   * CPU, in microseconds.
   *
   * This time is measured via accounting rather than sampling,
   * and is as accurate as ktime_get() is.
   *
   * This function returns -1 if NOHZ is not enabled.
   */
  u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
  {
  	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
09a1d34f8   Michal Hocko   nohz: Make idle/i...
480
  	ktime_t now, iowait;
0224cf4c5   Arjan van de Ven   sched: Intoduce g...
481

d689fe222   Thomas Gleixner   NOHZ: Check for n...
482
  	if (!tick_nohz_active)
0224cf4c5   Arjan van de Ven   sched: Intoduce g...
483
  		return -1;
09a1d34f8   Michal Hocko   nohz: Make idle/i...
484
485
486
487
488
489
490
  	now = ktime_get();
  	if (last_update_time) {
  		update_ts_time_stats(cpu, ts, now, last_update_time);
  		iowait = ts->iowait_sleeptime;
  	} else {
  		if (ts->idle_active && nr_iowait_cpu(cpu) > 0) {
  			ktime_t delta = ktime_sub(now, ts->idle_entrytime);
0224cf4c5   Arjan van de Ven   sched: Intoduce g...
491

09a1d34f8   Michal Hocko   nohz: Make idle/i...
492
493
494
495
496
  			iowait = ktime_add(ts->iowait_sleeptime, delta);
  		} else {
  			iowait = ts->iowait_sleeptime;
  		}
  	}
0224cf4c5   Arjan van de Ven   sched: Intoduce g...
497

09a1d34f8   Michal Hocko   nohz: Make idle/i...
498
  	return ktime_to_us(iowait);
0224cf4c5   Arjan van de Ven   sched: Intoduce g...
499
500
  }
  EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
84bf1bccc   Frederic Weisbecker   nohz: Move next i...
501
502
  static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
  					 ktime_t now, int cpu)
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
503
  {
280f06774   Frederic Weisbecker   nohz: Separate ou...
504
  	unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
84bf1bccc   Frederic Weisbecker   nohz: Move next i...
505
  	ktime_t last_update, expires, ret = { .tv64 = 0 };
aa9b16306   Paul E. McKenney   rcu: Precompute R...
506
  	unsigned long rcu_delta_jiffies;
4f86d3a8e   Len Brown   cpuidle: consolid...
507
  	struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
98962465e   Jon Hunter   nohz: Prevent clo...
508
  	u64 time_delta;
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
509

855a0fc30   Frederic Weisbecker   nohz: Get timekee...
510
  	time_delta = timekeeping_max_deferment();
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
511
512
  	/* Read jiffies and the time when jiffies were updated last */
  	do {
d6ad41876   John Stultz   time: Kill xtime_...
513
  		seq = read_seqbegin(&jiffies_lock);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
514
515
  		last_update = last_jiffies_update;
  		last_jiffies = jiffies;
d6ad41876   John Stultz   time: Kill xtime_...
516
  	} while (read_seqretry(&jiffies_lock, seq));
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
517

74876a98a   Frederic Weisbecker   printk: Wake up k...
518
  	if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) ||
00b429591   Frederic Weisbecker   irq_work: Don't s...
519
  	    arch_needs_cpu(cpu) || irq_work_needs_cpu()) {
3c5d92a0c   Martin Schwidefsky   nohz: Introduce a...
520
  		next_jiffies = last_jiffies + 1;
6ba9b346e   Ingo Molnar   [PATCH] NOHZ: Fix...
521
  		delta_jiffies = 1;
3c5d92a0c   Martin Schwidefsky   nohz: Introduce a...
522
523
524
525
  	} else {
  		/* Get the next timer wheel timer */
  		next_jiffies = get_next_timer_interrupt(last_jiffies);
  		delta_jiffies = next_jiffies - last_jiffies;
aa9b16306   Paul E. McKenney   rcu: Precompute R...
526
527
528
529
  		if (rcu_delta_jiffies < delta_jiffies) {
  			next_jiffies = last_jiffies + rcu_delta_jiffies;
  			delta_jiffies = rcu_delta_jiffies;
  		}
3c5d92a0c   Martin Schwidefsky   nohz: Introduce a...
530
  	}
47aa8b6cb   Ingo Molnar   nohz: Reduce over...
531

79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
532
  	/*
47aa8b6cb   Ingo Molnar   nohz: Reduce over...
533
534
  	 * Do not stop the tick, if we are only one off (or less)
  	 * or if the cpu is required for RCU:
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
535
  	 */
47aa8b6cb   Ingo Molnar   nohz: Reduce over...
536
  	if (!ts->tick_stopped && delta_jiffies <= 1)
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
537
538
539
540
  		goto out;
  
  	/* Schedule the tick, if we are at least one jiffie off */
  	if ((long)delta_jiffies >= 1) {
001474491   Woodruff, Richard   nohz: suppress ne...
541
  		/*
001474491   Woodruff, Richard   nohz: suppress ne...
542
543
544
545
546
  		 * If this cpu is the one which updates jiffies, then
  		 * give up the assignment and let it be taken by the
  		 * cpu which runs the tick timer next, which might be
  		 * this cpu as well. If we don't drop this here the
  		 * jiffies might be stale and do_timer() never
27185016b   Thomas Gleixner   nohz: Track last ...
547
548
549
550
551
552
  		 * invoked. Keep track of the fact that it was the one
  		 * which had the do_timer() duty last. If this cpu is
  		 * the one which had the do_timer() duty last, we
  		 * limit the sleep time to the timekeeping
  		 * max_deferement value which we retrieved
  		 * above. Otherwise we can sleep as long as we want.
001474491   Woodruff, Richard   nohz: suppress ne...
553
  		 */
27185016b   Thomas Gleixner   nohz: Track last ...
554
  		if (cpu == tick_do_timer_cpu) {
001474491   Woodruff, Richard   nohz: suppress ne...
555
  			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
27185016b   Thomas Gleixner   nohz: Track last ...
556
557
558
559
560
561
562
  			ts->do_timer_last = 1;
  		} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
  			time_delta = KTIME_MAX;
  			ts->do_timer_last = 0;
  		} else if (!ts->do_timer_last) {
  			time_delta = KTIME_MAX;
  		}
265f22a97   Frederic Weisbecker   sched: Keep at le...
563
564
565
566
567
568
  #ifdef CONFIG_NO_HZ_FULL
  		if (!ts->inidle) {
  			time_delta = min(time_delta,
  					 scheduler_tick_max_deferment());
  		}
  #endif
27185016b   Thomas Gleixner   nohz: Track last ...
569
  		/*
98962465e   Jon Hunter   nohz: Prevent clo...
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
  		 * calculate the expiry time for the next timer wheel
  		 * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
  		 * that there is no timer pending or at least extremely
  		 * far into the future (12 days for HZ=1000). In this
  		 * case we set the expiry to the end of time.
  		 */
  		if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
  			/*
  			 * Calculate the time delta for the next timer event.
  			 * If the time delta exceeds the maximum time delta
  			 * permitted by the current clocksource then adjust
  			 * the time delta accordingly to ensure the
  			 * clocksource does not wrap.
  			 */
  			time_delta = min_t(u64, time_delta,
  					   tick_period.tv64 * delta_jiffies);
98962465e   Jon Hunter   nohz: Prevent clo...
586
  		}
001474491   Woodruff, Richard   nohz: suppress ne...
587

27185016b   Thomas Gleixner   nohz: Track last ...
588
589
590
591
  		if (time_delta < KTIME_MAX)
  			expires = ktime_add_ns(last_update, time_delta);
  		else
  			expires.tv64 = KTIME_MAX;
001474491   Woodruff, Richard   nohz: suppress ne...
592

001474491   Woodruff, Richard   nohz: suppress ne...
593
594
595
  		/* Skip reprogram of event if its not changed */
  		if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
  			goto out;
84bf1bccc   Frederic Weisbecker   nohz: Move next i...
596
  		ret = expires;
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
597
598
599
600
601
602
603
604
  		/*
  		 * nohz_stop_sched_tick can be called several times before
  		 * the nohz_restart_sched_tick is called. This happens when
  		 * interrupts arrive which do not cause a reschedule. In the
  		 * first call we save the current tick time, so we can restart
  		 * the scheduler tick in nohz_restart_sched_tick.
  		 */
  		if (!ts->tick_stopped) {
c1cc017c5   Alex Shi   sched/nohz: Clean...
605
  			nohz_balance_enter_idle(cpu);
5167e8d54   Peter Zijlstra   sched/nohz: Rewri...
606
  			calc_load_enter_idle();
46cb4b7c8   Siddha, Suresh B   sched: dynticks i...
607

f5d411c91   Frederic Weisbecker   nohz: Rename ts->...
608
  			ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
609
  			ts->tick_stopped = 1;
cb41a2907   Frederic Weisbecker   nohz: Add basic t...
610
  			trace_tick_stop(1, " ");
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
611
  		}
d3ed78245   Thomas Gleixner   highres/dyntick: ...
612

eaad084bb   Thomas Gleixner   NOHZ: prevent mul...
613
  		/*
98962465e   Jon Hunter   nohz: Prevent clo...
614
615
  		 * If the expiration time == KTIME_MAX, then
  		 * in this case we simply stop the tick timer.
eaad084bb   Thomas Gleixner   NOHZ: prevent mul...
616
  		 */
98962465e   Jon Hunter   nohz: Prevent clo...
617
  		 if (unlikely(expires.tv64 == KTIME_MAX)) {
eaad084bb   Thomas Gleixner   NOHZ: prevent mul...
618
619
620
621
  			if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
  				hrtimer_cancel(&ts->sched_timer);
  			goto out;
  		}
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
622
623
  		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
  			hrtimer_start(&ts->sched_timer, expires,
5c333864a   Arun R Bharadwaj   timers: Identifyi...
624
  				      HRTIMER_MODE_ABS_PINNED);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
625
626
627
  			/* Check, if the timer was already in the past */
  			if (hrtimer_active(&ts->sched_timer))
  				goto out;
4c9dc6412   Pavel Machek   time: timer cleanups
628
  		} else if (!tick_program_event(expires, 0))
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
629
630
631
632
633
634
635
  				goto out;
  		/*
  		 * We are past the event already. So we crossed a
  		 * jiffie boundary. Update jiffies and raise the
  		 * softirq.
  		 */
  		tick_do_update_jiffies64(ktime_get());
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
636
637
638
639
640
  	}
  	raise_softirq_irqoff(TIMER_SOFTIRQ);
  out:
  	ts->next_jiffies = next_jiffies;
  	ts->last_jiffies = last_jiffies;
4f86d3a8e   Len Brown   cpuidle: consolid...
641
  	ts->sleep_length = ktime_sub(dev->next_event, now);
84bf1bccc   Frederic Weisbecker   nohz: Move next i...
642
643
  
  	return ret;
280f06774   Frederic Weisbecker   nohz: Separate ou...
644
  }
5811d9963   Frederic Weisbecker   nohz: Prepare to ...
645
646
647
  static void tick_nohz_full_stop_tick(struct tick_sched *ts)
  {
  #ifdef CONFIG_NO_HZ_FULL
e9a2eb403   Alex Shi   nohz_full: fix co...
648
  	int cpu = smp_processor_id();
5811d9963   Frederic Weisbecker   nohz: Prepare to ...
649

e9a2eb403   Alex Shi   nohz_full: fix co...
650
651
  	if (!tick_nohz_full_cpu(cpu) || is_idle_task(current))
  		return;
5811d9963   Frederic Weisbecker   nohz: Prepare to ...
652

e9a2eb403   Alex Shi   nohz_full: fix co...
653
654
  	if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
  		return;
5811d9963   Frederic Weisbecker   nohz: Prepare to ...
655

e9a2eb403   Alex Shi   nohz_full: fix co...
656
657
  	if (!can_stop_full_tick())
  		return;
5811d9963   Frederic Weisbecker   nohz: Prepare to ...
658

e9a2eb403   Alex Shi   nohz_full: fix co...
659
  	tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
5811d9963   Frederic Weisbecker   nohz: Prepare to ...
660
661
  #endif
  }
5b39939a4   Frederic Weisbecker   nohz: Move ts->id...
662
663
664
665
666
667
668
669
670
671
672
673
  static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
  {
  	/*
  	 * If this cpu is offline and it is the one which updates
  	 * jiffies, then give up the assignment and let it be taken by
  	 * the cpu which runs the tick timer next. If we don't drop
  	 * this here the jiffies might be stale and do_timer() never
  	 * invoked.
  	 */
  	if (unlikely(!cpu_online(cpu))) {
  		if (cpu == tick_do_timer_cpu)
  			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
f7ea0fd63   Thomas Gleixner   tick: Don't invok...
674
  		return false;
5b39939a4   Frederic Weisbecker   nohz: Move ts->id...
675
  	}
0e576acbc   Thomas Gleixner   nohz: Fix another...
676
677
  	if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
  		ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ };
5b39939a4   Frederic Weisbecker   nohz: Move ts->id...
678
  		return false;
0e576acbc   Thomas Gleixner   nohz: Fix another...
679
  	}
5b39939a4   Frederic Weisbecker   nohz: Move ts->id...
680
681
682
683
684
685
  
  	if (need_resched())
  		return false;
  
  	if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
  		static int ratelimit;
803b0ebae   Paul E. McKenney   time: RCU permitt...
686
687
  		if (ratelimit < 10 &&
  		    (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
cfea7d7e4   Rado Vrbovsky   tick: Change log ...
688
689
690
  			pr_warn("NOHZ: local_softirq_pending %02x
  ",
  				(unsigned int) local_softirq_pending());
5b39939a4   Frederic Weisbecker   nohz: Move ts->id...
691
692
693
694
  			ratelimit++;
  		}
  		return false;
  	}
460775df4   Frederic Weisbecker   nohz: Optimize fu...
695
  	if (tick_nohz_full_enabled()) {
a382bf934   Frederic Weisbecker   nohz: Assign time...
696
697
698
699
700
701
702
703
704
705
706
707
708
  		/*
  		 * Keep the tick alive to guarantee timekeeping progression
  		 * if there are full dynticks CPUs around
  		 */
  		if (tick_do_timer_cpu == cpu)
  			return false;
  		/*
  		 * Boot safety: make sure the timekeeping duty has been
  		 * assigned before entering dyntick-idle mode,
  		 */
  		if (tick_do_timer_cpu == TICK_DO_TIMER_NONE)
  			return false;
  	}
5b39939a4   Frederic Weisbecker   nohz: Move ts->id...
709
710
  	return true;
  }
19f5f7364   Frederic Weisbecker   nohz: Separate id...
711
712
  static void __tick_nohz_idle_enter(struct tick_sched *ts)
  {
84bf1bccc   Frederic Weisbecker   nohz: Move next i...
713
  	ktime_t now, expires;
5b39939a4   Frederic Weisbecker   nohz: Move ts->id...
714
  	int cpu = smp_processor_id();
19f5f7364   Frederic Weisbecker   nohz: Separate id...
715

e8fcaa5c5   Frederic Weisbecker   nohz: Convert a f...
716
  	now = tick_nohz_start_idle(ts);
2ac0d98fd   Frederic Weisbecker   nohz: Make nohz A...
717

5b39939a4   Frederic Weisbecker   nohz: Move ts->id...
718
719
720
721
  	if (can_stop_idle_tick(cpu, ts)) {
  		int was_stopped = ts->tick_stopped;
  
  		ts->idle_calls++;
84bf1bccc   Frederic Weisbecker   nohz: Move next i...
722
723
724
725
726
727
  
  		expires = tick_nohz_stop_sched_tick(ts, now, cpu);
  		if (expires.tv64 > 0LL) {
  			ts->idle_sleeps++;
  			ts->idle_expires = expires;
  		}
5b39939a4   Frederic Weisbecker   nohz: Move ts->id...
728
729
730
731
  
  		if (!was_stopped && ts->tick_stopped)
  			ts->idle_jiffies = ts->last_jiffies;
  	}
280f06774   Frederic Weisbecker   nohz: Separate ou...
732
733
734
735
736
737
738
  }
  
  /**
   * tick_nohz_idle_enter - stop the idle tick from the idle task
   *
   * When the next event is more than a tick into the future, stop the idle tick
   * Called when we start the idle loop.
2bbb6817c   Frederic Weisbecker   nohz: Allow rcu e...
739
   *
1268fbc74   Frederic Weisbecker   nohz: Remove tick...
740
   * The arch is responsible of calling:
2bbb6817c   Frederic Weisbecker   nohz: Allow rcu e...
741
742
743
744
   *
   * - rcu_idle_enter() after its last use of RCU before the CPU is put
   *  to sleep.
   * - rcu_idle_exit() before the first use of RCU after the CPU is woken up.
280f06774   Frederic Weisbecker   nohz: Separate ou...
745
   */
1268fbc74   Frederic Weisbecker   nohz: Remove tick...
746
  void tick_nohz_idle_enter(void)
280f06774   Frederic Weisbecker   nohz: Separate ou...
747
748
  {
  	struct tick_sched *ts;
1268fbc74   Frederic Weisbecker   nohz: Remove tick...
749
  	WARN_ON_ONCE(irqs_disabled());
0db49b72b   Linus Torvalds   Merge branch 'sch...
750
751
752
753
754
755
756
  	/*
   	 * Update the idle state in the scheduler domain hierarchy
   	 * when tick_nohz_stop_sched_tick() is called from the idle loop.
   	 * State will be updated to busy during the first busy tick after
   	 * exiting idle.
   	 */
  	set_cpu_sd_state_idle();
1268fbc74   Frederic Weisbecker   nohz: Remove tick...
757
  	local_irq_disable();
280f06774   Frederic Weisbecker   nohz: Separate ou...
758
  	ts = &__get_cpu_var(tick_cpu_sched);
280f06774   Frederic Weisbecker   nohz: Separate ou...
759
  	ts->inidle = 1;
19f5f7364   Frederic Weisbecker   nohz: Separate id...
760
  	__tick_nohz_idle_enter(ts);
1268fbc74   Frederic Weisbecker   nohz: Remove tick...
761
762
  
  	local_irq_enable();
280f06774   Frederic Weisbecker   nohz: Separate ou...
763
  }
4dbd27711   Jacob Pan   tick: export nohz...
764
  EXPORT_SYMBOL_GPL(tick_nohz_idle_enter);
280f06774   Frederic Weisbecker   nohz: Separate ou...
765
766
767
768
769
770
771
772
773
774
775
776
  
  /**
   * tick_nohz_irq_exit - update next tick event from interrupt exit
   *
   * When an interrupt fires while we are idle and it doesn't cause
   * a reschedule, it may still add, modify or delete a timer, enqueue
   * an RCU callback, etc...
   * So we need to re-calculate and reprogram the next tick event.
   */
  void tick_nohz_irq_exit(void)
  {
  	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
148519120   Rafael J. Wysocki   Revert "cpuidle: ...
777
  	if (ts->inidle)
5811d9963   Frederic Weisbecker   nohz: Prepare to ...
778
  		__tick_nohz_idle_enter(ts);
148519120   Rafael J. Wysocki   Revert "cpuidle: ...
779
  	else
5811d9963   Frederic Weisbecker   nohz: Prepare to ...
780
  		tick_nohz_full_stop_tick(ts);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
781
782
783
  }
  
  /**
4f86d3a8e   Len Brown   cpuidle: consolid...
784
785
786
787
788
789
790
791
792
793
   * tick_nohz_get_sleep_length - return the length of the current sleep
   *
   * Called from power state control code with interrupts disabled
   */
  ktime_t tick_nohz_get_sleep_length(void)
  {
  	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
  
  	return ts->sleep_length;
  }
c34bec5a4   Thomas Gleixner   NOHZ: split tick_...
794
795
796
  static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
  {
  	hrtimer_cancel(&ts->sched_timer);
f5d411c91   Frederic Weisbecker   nohz: Rename ts->...
797
  	hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
c34bec5a4   Thomas Gleixner   NOHZ: split tick_...
798
799
800
801
802
803
  
  	while (1) {
  		/* Forward the time to expire in the future */
  		hrtimer_forward(&ts->sched_timer, now, tick_period);
  
  		if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
268a3dcfe   Thomas Gleixner   Merge branch 'tim...
804
  			hrtimer_start_expires(&ts->sched_timer,
5c333864a   Arun R Bharadwaj   timers: Identifyi...
805
  					      HRTIMER_MODE_ABS_PINNED);
c34bec5a4   Thomas Gleixner   NOHZ: split tick_...
806
807
808
809
  			/* Check, if the timer was already in the past */
  			if (hrtimer_active(&ts->sched_timer))
  				break;
  		} else {
268a3dcfe   Thomas Gleixner   Merge branch 'tim...
810
811
  			if (!tick_program_event(
  				hrtimer_get_expires(&ts->sched_timer), 0))
c34bec5a4   Thomas Gleixner   NOHZ: split tick_...
812
813
  				break;
  		}
6f103929f   Neal Cardwell   nohz: Fix stale j...
814
  		/* Reread time and update jiffies */
c34bec5a4   Thomas Gleixner   NOHZ: split tick_...
815
  		now = ktime_get();
6f103929f   Neal Cardwell   nohz: Fix stale j...
816
  		tick_do_update_jiffies64(now);
c34bec5a4   Thomas Gleixner   NOHZ: split tick_...
817
818
  	}
  }
19f5f7364   Frederic Weisbecker   nohz: Separate id...
819
  static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
820
  {
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
821
  	/* Update jiffies first */
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
822
  	tick_do_update_jiffies64(now);
5aaa0b7a2   Peter Zijlstra   sched/nohz: Fix r...
823
  	update_cpu_load_nohz();
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
824

749c8814f   Charles Wang   sched: Add missin...
825
  	calc_load_exit_idle();
2ac0d98fd   Frederic Weisbecker   nohz: Make nohz A...
826
827
828
829
830
831
832
833
834
835
836
837
  	touch_softlockup_watchdog();
  	/*
  	 * Cancel the scheduled timer and restore the tick
  	 */
  	ts->tick_stopped  = 0;
  	ts->idle_exittime = now;
  
  	tick_nohz_restart(ts, now);
  }
  
  static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
  {
3f4724ea8   Frederic Weisbecker   cputime: Allow dy...
838
  #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
2ac0d98fd   Frederic Weisbecker   nohz: Make nohz A...
839
  	unsigned long ticks;
3f4724ea8   Frederic Weisbecker   cputime: Allow dy...
840
841
842
  
  	if (vtime_accounting_enabled())
  		return;
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
843
844
845
846
847
848
849
850
851
  	/*
  	 * We stopped the tick in idle. Update process times would miss the
  	 * time we slept as update_process_times does only a 1 tick
  	 * accounting. Enforce that this is accounted to idle !
  	 */
  	ticks = jiffies - ts->idle_jiffies;
  	/*
  	 * We might be one off. Do not randomly account a huge number of ticks!
  	 */
79741dd35   Martin Schwidefsky   [PATCH] idle cput...
852
853
854
  	if (ticks && ticks < LONG_MAX)
  		account_idle_ticks(ticks);
  #endif
19f5f7364   Frederic Weisbecker   nohz: Separate id...
855
  }
4f86d3a8e   Len Brown   cpuidle: consolid...
856
  /**
280f06774   Frederic Weisbecker   nohz: Separate ou...
857
   * tick_nohz_idle_exit - restart the idle tick from the idle task
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
858
859
   *
   * Restart the idle tick when the CPU is woken up from idle
280f06774   Frederic Weisbecker   nohz: Separate ou...
860
861
   * This also exit the RCU extended quiescent state. The CPU
   * can use RCU again after this function is called.
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
862
   */
280f06774   Frederic Weisbecker   nohz: Separate ou...
863
  void tick_nohz_idle_exit(void)
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
864
  {
e8fcaa5c5   Frederic Weisbecker   nohz: Convert a f...
865
  	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
6378ddb59   Venki Pallipadi   time: track accur...
866
  	ktime_t now;
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
867

6378ddb59   Venki Pallipadi   time: track accur...
868
  	local_irq_disable();
2bbb6817c   Frederic Weisbecker   nohz: Allow rcu e...
869

15f827be9   Frederic Weisbecker   nohz: Remove ts->...
870
871
872
873
874
  	WARN_ON_ONCE(!ts->inidle);
  
  	ts->inidle = 0;
  
  	if (ts->idle_active || ts->tick_stopped)
eed3b9cf3   Martin Schwidefsky   nohz: Reuse ktime...
875
876
877
  		now = ktime_get();
  
  	if (ts->idle_active)
e8fcaa5c5   Frederic Weisbecker   nohz: Convert a f...
878
  		tick_nohz_stop_idle(ts, now);
6378ddb59   Venki Pallipadi   time: track accur...
879

2ac0d98fd   Frederic Weisbecker   nohz: Make nohz A...
880
  	if (ts->tick_stopped) {
19f5f7364   Frederic Weisbecker   nohz: Separate id...
881
  		tick_nohz_restart_sched_tick(ts, now);
2ac0d98fd   Frederic Weisbecker   nohz: Make nohz A...
882
  		tick_nohz_account_idle_ticks(ts);
6378ddb59   Venki Pallipadi   time: track accur...
883
  	}
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
884

79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
885
886
  	local_irq_enable();
  }
4dbd27711   Jacob Pan   tick: export nohz...
887
  EXPORT_SYMBOL_GPL(tick_nohz_idle_exit);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
888
889
890
891
  
  static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
  {
  	hrtimer_forward(&ts->sched_timer, now, tick_period);
cc584b213   Arjan van de Ven   hrtimer: convert ...
892
  	return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
893
894
895
896
897
898
899
900
901
902
903
904
  }
  
  /*
   * The nohz low res interrupt handler
   */
  static void tick_nohz_handler(struct clock_event_device *dev)
  {
  	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
  	struct pt_regs *regs = get_irq_regs();
  	ktime_t now = ktime_get();
  
  	dev->next_event.tv64 = KTIME_MAX;
5bb962269   Frederic Weisbecker   tick: Consolidate...
905
  	tick_sched_do_timer(now);
9e8f559b0   Frederic Weisbecker   tick: Consolidate...
906
  	tick_sched_handle(ts, regs);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
907

79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
908
909
910
911
912
913
914
915
916
917
918
919
920
  	while (tick_nohz_reprogram(ts, now)) {
  		now = ktime_get();
  		tick_do_update_jiffies64(now);
  	}
  }
  
  /**
   * tick_nohz_switch_to_nohz - switch to nohz mode
   */
  static void tick_nohz_switch_to_nohz(void)
  {
  	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
  	ktime_t next;
d689fe222   Thomas Gleixner   NOHZ: Check for n...
921
  	if (!tick_nohz_active)
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
922
923
924
925
926
927
928
  		return;
  
  	local_irq_disable();
  	if (tick_switch_to_oneshot(tick_nohz_handler)) {
  		local_irq_enable();
  		return;
  	}
d689fe222   Thomas Gleixner   NOHZ: Check for n...
929
  	tick_nohz_active = 1;
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
930
931
932
933
934
935
936
937
938
939
940
  	ts->nohz_mode = NOHZ_MODE_LOWRES;
  
  	/*
  	 * Recycle the hrtimer in ts, so we can share the
  	 * hrtimer_forward with the highres code.
  	 */
  	hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
  	/* Get the next period */
  	next = tick_init_jiffy_update();
  
  	for (;;) {
cc584b213   Arjan van de Ven   hrtimer: convert ...
941
  		hrtimer_set_expires(&ts->sched_timer, next);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
942
943
944
945
946
  		if (!tick_program_event(next, 0))
  			break;
  		next = ktime_add(next, tick_period);
  	}
  	local_irq_enable();
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
947
  }
fb02fbc14   Thomas Gleixner   NOHZ: restart tic...
948
949
950
951
952
953
954
955
956
957
958
  /*
   * When NOHZ is enabled and the tick is stopped, we need to kick the
   * tick timer from irq_enter() so that the jiffies update is kept
   * alive during long running softirqs. That's ugly as hell, but
   * correctness is key even if we need to fix the offending softirq in
   * the first place.
   *
   * Note, this is different to tick_nohz_restart. We just kick the
   * timer and do not touch the other magic bits which need to be done
   * when idle is left.
   */
e8fcaa5c5   Frederic Weisbecker   nohz: Convert a f...
959
  static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
fb02fbc14   Thomas Gleixner   NOHZ: restart tic...
960
  {
ae99286b4   Thomas Gleixner   nohz: disable tic...
961
962
  #if 0
  	/* Switch back to 2.6.27 behaviour */
eed3b9cf3   Martin Schwidefsky   nohz: Reuse ktime...
963
  	ktime_t delta;
fb02fbc14   Thomas Gleixner   NOHZ: restart tic...
964

c4bd822e7   Thomas Gleixner   NOHZ: fix thinko ...
965
966
967
968
  	/*
  	 * Do not touch the tick device, when the next expiry is either
  	 * already reached or less/equal than the tick period.
  	 */
268a3dcfe   Thomas Gleixner   Merge branch 'tim...
969
  	delta =	ktime_sub(hrtimer_get_expires(&ts->sched_timer), now);
c4bd822e7   Thomas Gleixner   NOHZ: fix thinko ...
970
971
972
973
  	if (delta.tv64 <= tick_period.tv64)
  		return;
  
  	tick_nohz_restart(ts, now);
ae99286b4   Thomas Gleixner   nohz: disable tic...
974
  #endif
fb02fbc14   Thomas Gleixner   NOHZ: restart tic...
975
  }
5acac1be4   Frederic Weisbecker   tick: Rename tick...
976
  static inline void tick_nohz_irq_enter(void)
eed3b9cf3   Martin Schwidefsky   nohz: Reuse ktime...
977
  {
e8fcaa5c5   Frederic Weisbecker   nohz: Convert a f...
978
  	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
eed3b9cf3   Martin Schwidefsky   nohz: Reuse ktime...
979
980
981
982
983
984
  	ktime_t now;
  
  	if (!ts->idle_active && !ts->tick_stopped)
  		return;
  	now = ktime_get();
  	if (ts->idle_active)
e8fcaa5c5   Frederic Weisbecker   nohz: Convert a f...
985
  		tick_nohz_stop_idle(ts, now);
eed3b9cf3   Martin Schwidefsky   nohz: Reuse ktime...
986
987
  	if (ts->tick_stopped) {
  		tick_nohz_update_jiffies(now);
e8fcaa5c5   Frederic Weisbecker   nohz: Convert a f...
988
  		tick_nohz_kick_tick(ts, now);
eed3b9cf3   Martin Schwidefsky   nohz: Reuse ktime...
989
990
  	}
  }
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
991
992
993
  #else
  
  static inline void tick_nohz_switch_to_nohz(void) { }
5acac1be4   Frederic Weisbecker   tick: Rename tick...
994
  static inline void tick_nohz_irq_enter(void) { }
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
995

3451d0243   Frederic Weisbecker   nohz: Rename CONF...
996
  #endif /* CONFIG_NO_HZ_COMMON */
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
997
998
  
  /*
719254faa   Thomas Gleixner   NOHZ: unify the n...
999
1000
   * Called from irq_enter to notify about the possible interruption of idle()
   */
5acac1be4   Frederic Weisbecker   tick: Rename tick...
1001
  void tick_irq_enter(void)
719254faa   Thomas Gleixner   NOHZ: unify the n...
1002
  {
e8fcaa5c5   Frederic Weisbecker   nohz: Convert a f...
1003
  	tick_check_oneshot_broadcast_this_cpu();
5acac1be4   Frederic Weisbecker   tick: Rename tick...
1004
  	tick_nohz_irq_enter();
719254faa   Thomas Gleixner   NOHZ: unify the n...
1005
1006
1007
  }
  
  /*
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
1008
1009
1010
1011
   * High resolution timer specific code
   */
  #ifdef CONFIG_HIGH_RES_TIMERS
  /*
4c9dc6412   Pavel Machek   time: timer cleanups
1012
   * We rearm the timer until we get disabled by the idle code.
351f181f9   Chuansheng Liu   timers, sched: Co...
1013
   * Called with interrupts disabled.
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
1014
1015
1016
1017
1018
   */
  static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
  {
  	struct tick_sched *ts =
  		container_of(timer, struct tick_sched, sched_timer);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
1019
1020
  	struct pt_regs *regs = get_irq_regs();
  	ktime_t now = ktime_get();
d3ed78245   Thomas Gleixner   highres/dyntick: ...
1021

5bb962269   Frederic Weisbecker   tick: Consolidate...
1022
  	tick_sched_do_timer(now);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
1023
1024
1025
1026
1027
  
  	/*
  	 * Do not call, when we are not in irq context and have
  	 * no valid regs pointer
  	 */
9e8f559b0   Frederic Weisbecker   tick: Consolidate...
1028
1029
  	if (regs)
  		tick_sched_handle(ts, regs);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
1030

79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
1031
1032
1033
1034
  	hrtimer_forward(timer, now, tick_period);
  
  	return HRTIMER_RESTART;
  }
5307c9556   Mike Galbraith   tick: Add tick sk...
1035
  static int sched_skew_tick;
62cf20b32   Thomas Gleixner   tick: Move skew_t...
1036
1037
1038
1039
1040
1041
1042
  static int __init skew_tick(char *str)
  {
  	get_option(&str, &sched_skew_tick);
  
  	return 0;
  }
  early_param("skew_tick", skew_tick);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
  /**
   * tick_setup_sched_timer - setup the tick emulation timer
   */
  void tick_setup_sched_timer(void)
  {
  	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
  	ktime_t now = ktime_get();
  
  	/*
  	 * Emulate tick processing via per-CPU hrtimers:
  	 */
  	hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
  	ts->sched_timer.function = tick_sched_timer;
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
1056

3704540b4   John Stultz   tick management: ...
1057
  	/* Get the next period (per cpu) */
cc584b213   Arjan van de Ven   hrtimer: convert ...
1058
  	hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
1059

9c3f9e281   Thomas Gleixner   Merge branch 'for...
1060
  	/* Offset the tick to avert jiffies_lock contention. */
5307c9556   Mike Galbraith   tick: Add tick sk...
1061
1062
1063
1064
1065
1066
  	if (sched_skew_tick) {
  		u64 offset = ktime_to_ns(tick_period) >> 1;
  		do_div(offset, num_possible_cpus());
  		offset *= smp_processor_id();
  		hrtimer_add_expires_ns(&ts->sched_timer, offset);
  	}
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
1067
1068
  	for (;;) {
  		hrtimer_forward(&ts->sched_timer, now, tick_period);
5c333864a   Arun R Bharadwaj   timers: Identifyi...
1069
1070
  		hrtimer_start_expires(&ts->sched_timer,
  				      HRTIMER_MODE_ABS_PINNED);
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
1071
1072
1073
1074
1075
  		/* Check, if the timer was already in the past */
  		if (hrtimer_active(&ts->sched_timer))
  			break;
  		now = ktime_get();
  	}
3451d0243   Frederic Weisbecker   nohz: Rename CONF...
1076
  #ifdef CONFIG_NO_HZ_COMMON
d689fe222   Thomas Gleixner   NOHZ: Check for n...
1077
  	if (tick_nohz_enabled) {
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
1078
  		ts->nohz_mode = NOHZ_MODE_HIGHRES;
d689fe222   Thomas Gleixner   NOHZ: Check for n...
1079
1080
  		tick_nohz_active = 1;
  	}
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
1081
1082
  #endif
  }
3c4fbe5e0   Miao Xie   nohz: fix wrong e...
1083
  #endif /* HIGH_RES_TIMERS */
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
1084

3451d0243   Frederic Weisbecker   nohz: Rename CONF...
1085
  #if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
1086
1087
1088
  void tick_cancel_sched_timer(int cpu)
  {
  	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
3c4fbe5e0   Miao Xie   nohz: fix wrong e...
1089
  # ifdef CONFIG_HIGH_RES_TIMERS
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
1090
1091
  	if (ts->sched_timer.base)
  		hrtimer_cancel(&ts->sched_timer);
3c4fbe5e0   Miao Xie   nohz: fix wrong e...
1092
  # endif
a79017660   Karsten Wiese   time: don't touch...
1093

4b0c0f294   Thomas Gleixner   tick: Cleanup NOH...
1094
  	memset(ts, 0, sizeof(*ts));
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
1095
  }
3c4fbe5e0   Miao Xie   nohz: fix wrong e...
1096
  #endif
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
  
  /**
   * Async notification about clocksource changes
   */
  void tick_clock_notify(void)
  {
  	int cpu;
  
  	for_each_possible_cpu(cpu)
  		set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);
  }
  
  /*
   * Async notification about clock event changes
   */
  void tick_oneshot_notify(void)
  {
  	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
  
  	set_bit(0, &ts->check_clocks);
  }
  
  /**
   * Check, if a change happened, which makes oneshot possible.
   *
   * Called cyclic from the hrtimer softirq (driven by the timer
   * softirq) allow_nohz signals, that we can switch into low-res nohz
   * mode, because high resolution timers are disabled (either compile
   * or runtime).
   */
  int tick_check_oneshot_change(int allow_nohz)
  {
  	struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
  
  	if (!test_and_clear_bit(0, &ts->check_clocks))
  		return 0;
  
  	if (ts->nohz_mode != NOHZ_MODE_INACTIVE)
  		return 0;
cf4fc6cb7   Li Zefan   timekeeping: rena...
1136
  	if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available())
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
1137
1138
1139
1140
1141
1142
1143
1144
  		return 0;
  
  	if (!allow_nohz)
  		return 1;
  
  	tick_nohz_switch_to_nohz();
  	return 0;
  }