Blame view
kernel/time/tick-common.c
9.86 KB
906568c9c [PATCH] tick-mana... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 |
/* * linux/kernel/time/tick-common.c * * This file contains the base functions to manage periodic tick * related events. * * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner * * This code is licenced under the GPL version 2. For details see * kernel-base/COPYING. */ #include <linux/cpu.h> #include <linux/err.h> #include <linux/hrtimer.h> |
d7b906897 [S390] genirq/clo... |
17 |
#include <linux/interrupt.h> |
906568c9c [PATCH] tick-mana... |
18 19 20 |
#include <linux/percpu.h> #include <linux/profile.h> #include <linux/sched.h> |
ccf33d688 clockevents: Add ... |
21 |
#include <linux/module.h> |
906568c9c [PATCH] tick-mana... |
22 |
|
d7b906897 [S390] genirq/clo... |
23 |
#include <asm/irq_regs.h> |
f8381cba0 [PATCH] tick-mana... |
24 |
#include "tick-internal.h" |
906568c9c [PATCH] tick-mana... |
25 26 27 |
/* * Tick devices */ |
f8381cba0 [PATCH] tick-mana... |
28 |
DEFINE_PER_CPU(struct tick_device, tick_cpu_device); |
906568c9c [PATCH] tick-mana... |
29 30 31 |
/* * Tick next event: keeps track of the tick time */ |
f8381cba0 [PATCH] tick-mana... |
32 33 |
ktime_t tick_next_period; ktime_t tick_period; |
050ded1bb tick: Document ti... |
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
/* * tick_do_timer_cpu is a timer core internal variable which holds the CPU NR * which is responsible for calling do_timer(), i.e. the timekeeping stuff. This * variable has two functions: * * 1) Prevent a thundering herd issue of a gazillion of CPUs trying to grab the * timekeeping lock all at once. Only the CPU which is assigned to do the * update is handling it. * * 2) Hand off the duty in the NOHZ idle case by setting the value to * TICK_DO_TIMER_NONE, i.e. a non existing CPU. So the next cpu which looks * at it will take over and keep the time keeping alive. The handover * procedure also covers cpu hotplug. */ |
6441402b1 clockevents: prev... |
49 |
int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; |
906568c9c [PATCH] tick-mana... |
50 |
|
289f480af [PATCH] Add debug... |
51 52 53 54 55 56 57 |
/* * Debugging: see timer_list.c */ struct tick_device *tick_get_device(int cpu) { return &per_cpu(tick_cpu_device, cpu); } |
79bf2bb33 [PATCH] tick-mana... |
58 59 60 61 62 |
/** * tick_is_oneshot_available - check for a oneshot capable event device */ int tick_is_oneshot_available(void) { |
909ea9646 core: Replace __g... |
63 |
struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); |
79bf2bb33 [PATCH] tick-mana... |
64 |
|
3a142a067 clockevents: Prev... |
65 66 67 68 69 |
if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT)) return 0; if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) return 1; return tick_broadcast_oneshot_available(); |
79bf2bb33 [PATCH] tick-mana... |
70 |
} |
906568c9c [PATCH] tick-mana... |
71 72 73 74 75 76 |
/* * Periodic tick */ static void tick_periodic(int cpu) { if (tick_do_timer_cpu == cpu) { |
d6ad41876 time: Kill xtime_... |
77 |
write_seqlock(&jiffies_lock); |
906568c9c [PATCH] tick-mana... |
78 79 80 81 82 |
/* Keep track of the next tick event */ tick_next_period = ktime_add(tick_next_period, tick_period); do_timer(1); |
d6ad41876 time: Kill xtime_... |
83 |
write_sequnlock(&jiffies_lock); |
47a1b7963 tick/timekeeping:... |
84 |
update_wall_time(); |
906568c9c [PATCH] tick-mana... |
85 86 87 88 89 90 91 92 93 94 95 96 |
} update_process_times(user_mode(get_irq_regs())); profile_tick(CPU_PROFILING); } /* * Event handler for periodic ticks */ void tick_handle_periodic(struct clock_event_device *dev) { int cpu = smp_processor_id(); |
b97f0291a tick: Remove code... |
97 |
ktime_t next = dev->next_event; |
906568c9c [PATCH] tick-mana... |
98 99 100 101 102 |
tick_periodic(cpu); if (dev->mode != CLOCK_EVT_MODE_ONESHOT) return; |
906568c9c [PATCH] tick-mana... |
103 |
for (;;) { |
b97f0291a tick: Remove code... |
104 105 106 107 108 |
/* * Setup the next period for devices, which do not have * periodic mode: */ next = ktime_add(next, tick_period); |
d1748302f clockevents: Make... |
109 |
if (!clockevents_program_event(dev, next, false)) |
906568c9c [PATCH] tick-mana... |
110 |
return; |
74a03b69d clockevents: prev... |
111 112 113 114 115 116 |
/* * Have to be careful here. If we're in oneshot mode, * before we call tick_periodic() in a loop, we need * to be sure we're using a real hardware clocksource. * Otherwise we could get trapped in an infinite * loop, as the tick_periodic() increments jiffies, |
cacb3c76c tick: Fix spellin... |
117 |
* which then will increment time, possibly causing |
74a03b69d clockevents: prev... |
118 119 120 121 |
* the loop to trigger again and again. */ if (timekeeping_valid_for_hres()) tick_periodic(cpu); |
906568c9c [PATCH] tick-mana... |
122 123 124 125 126 127 |
} } /* * Setup the device for a periodic tick */ |
f8381cba0 [PATCH] tick-mana... |
128 |
void tick_setup_periodic(struct clock_event_device *dev, int broadcast) |
906568c9c [PATCH] tick-mana... |
129 |
{ |
f8381cba0 [PATCH] tick-mana... |
130 131 132 133 134 |
tick_set_periodic_handler(dev, broadcast); /* Broadcast setup ? */ if (!tick_device_is_functional(dev)) return; |
906568c9c [PATCH] tick-mana... |
135 |
|
27ce4cb4a clockevents: prev... |
136 137 |
if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && !tick_broadcast_oneshot_active()) { |
906568c9c [PATCH] tick-mana... |
138 139 140 141 142 143 |
clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); } else { unsigned long seq; ktime_t next; do { |
d6ad41876 time: Kill xtime_... |
144 |
seq = read_seqbegin(&jiffies_lock); |
906568c9c [PATCH] tick-mana... |
145 |
next = tick_next_period; |
d6ad41876 time: Kill xtime_... |
146 |
} while (read_seqretry(&jiffies_lock, seq)); |
906568c9c [PATCH] tick-mana... |
147 148 149 150 |
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); for (;;) { |
d1748302f clockevents: Make... |
151 |
if (!clockevents_program_event(dev, next, false)) |
906568c9c [PATCH] tick-mana... |
152 153 154 155 156 157 158 159 160 161 162 |
return; next = ktime_add(next, tick_period); } } } /* * Setup the tick device */ static void tick_setup_device(struct tick_device *td, struct clock_event_device *newdev, int cpu, |
0de26520c cpumask: make irq... |
163 |
const struct cpumask *cpumask) |
906568c9c [PATCH] tick-mana... |
164 165 166 167 168 169 170 171 172 173 174 175 |
{ ktime_t next_event; void (*handler)(struct clock_event_device *) = NULL; /* * First device setup ? */ if (!td->evtdev) { /* * If no cpu took the do_timer update, assign it to * this cpu: */ |
6441402b1 clockevents: prev... |
176 |
if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) { |
c5bfece2d nohz: Switch from... |
177 |
if (!tick_nohz_full_cpu(cpu)) |
a382bf934 nohz: Assign time... |
178 179 180 |
tick_do_timer_cpu = cpu; else tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
906568c9c [PATCH] tick-mana... |
181 182 183 184 185 186 187 188 189 190 191 |
tick_next_period = ktime_get(); tick_period = ktime_set(0, NSEC_PER_SEC / HZ); } /* * Startup in periodic mode first. */ td->mode = TICKDEV_MODE_PERIODIC; } else { handler = td->evtdev->event_handler; next_event = td->evtdev->next_event; |
7c1e76897 clockevents: prev... |
192 |
td->evtdev->event_handler = clockevents_handle_noop; |
906568c9c [PATCH] tick-mana... |
193 194 195 196 197 198 199 200 |
} td->evtdev = newdev; /* * When the device is not per cpu, pin the interrupt to the * current cpu: */ |
320ab2b0b cpumask: convert ... |
201 |
if (!cpumask_equal(newdev->cpumask, cpumask)) |
0de26520c cpumask: make irq... |
202 |
irq_set_affinity(newdev->irq, cpumask); |
906568c9c [PATCH] tick-mana... |
203 |
|
f8381cba0 [PATCH] tick-mana... |
204 205 206 207 |
/* * When global broadcasting is active, check if the current * device is registered as a placeholder for broadcast mode. * This allows us to handle this x86 misfeature in a generic |
07bd11729 tick: Sanitize br... |
208 209 |
* way. This function also returns !=0 when we keep the * current active broadcast state for this CPU. |
f8381cba0 [PATCH] tick-mana... |
210 211 212 |
*/ if (tick_device_uses_broadcast(newdev, cpu)) return; |
906568c9c [PATCH] tick-mana... |
213 214 |
if (td->mode == TICKDEV_MODE_PERIODIC) tick_setup_periodic(newdev, 0); |
79bf2bb33 [PATCH] tick-mana... |
215 216 |
else tick_setup_oneshot(newdev, handler, next_event); |
906568c9c [PATCH] tick-mana... |
217 |
} |
03e13cf5e clockevents: Impl... |
218 219 |
void tick_install_replacement(struct clock_event_device *newdev) { |
22127e93c time: Replace __g... |
220 |
struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
03e13cf5e clockevents: Impl... |
221 222 223 224 225 226 227 |
int cpu = smp_processor_id(); clockevents_exchange_device(td->evtdev, newdev); tick_setup_device(td, newdev, cpu, cpumask_of(cpu)); if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) tick_oneshot_notify(); } |
45cb8e01b clockevents: Spli... |
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 |
static bool tick_check_percpu(struct clock_event_device *curdev, struct clock_event_device *newdev, int cpu) { if (!cpumask_test_cpu(cpu, newdev->cpumask)) return false; if (cpumask_equal(newdev->cpumask, cpumask_of(cpu))) return true; /* Check if irq affinity can be set */ if (newdev->irq >= 0 && !irq_can_set_affinity(newdev->irq)) return false; /* Prefer an existing cpu local device */ if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu))) return false; return true; } static bool tick_check_preferred(struct clock_event_device *curdev, struct clock_event_device *newdev) { /* Prefer oneshot capable device */ if (!(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) { if (curdev && (curdev->features & CLOCK_EVT_FEAT_ONESHOT)) return false; if (tick_oneshot_mode_active()) return false; } |
70e5975d3 clockevents: Pref... |
254 255 256 257 258 259 260 |
/* * Use the higher rated one, but prefer a CPU local device with a lower * rating than a non-CPU local device */ return !curdev || newdev->rating > curdev->rating || !cpumask_equal(curdev->cpumask, newdev->cpumask); |
45cb8e01b clockevents: Spli... |
261 |
} |
906568c9c [PATCH] tick-mana... |
262 |
/* |
03e13cf5e clockevents: Impl... |
263 264 265 266 267 268 |
* Check whether the new device is a better fit than curdev. curdev * can be NULL ! */ bool tick_check_replacement(struct clock_event_device *curdev, struct clock_event_device *newdev) { |
521c42990 tick-common: Fix ... |
269 |
if (!tick_check_percpu(curdev, newdev, smp_processor_id())) |
03e13cf5e clockevents: Impl... |
270 271 272 273 274 275 |
return false; return tick_check_preferred(curdev, newdev); } /* |
7126cac42 clockevents: Simp... |
276 277 |
* Check, if the new registered device should be used. Called with * clockevents_lock held and interrupts disabled. |
906568c9c [PATCH] tick-mana... |
278 |
*/ |
7172a286c clockevents: Get ... |
279 |
void tick_check_new_device(struct clock_event_device *newdev) |
906568c9c [PATCH] tick-mana... |
280 281 282 |
{ struct clock_event_device *curdev; struct tick_device *td; |
7172a286c clockevents: Get ... |
283 |
int cpu; |
906568c9c [PATCH] tick-mana... |
284 285 |
cpu = smp_processor_id(); |
320ab2b0b cpumask: convert ... |
286 |
if (!cpumask_test_cpu(cpu, newdev->cpumask)) |
4a93232da clock events: all... |
287 |
goto out_bc; |
906568c9c [PATCH] tick-mana... |
288 289 290 |
td = &per_cpu(tick_cpu_device, cpu); curdev = td->evtdev; |
906568c9c [PATCH] tick-mana... |
291 292 |
/* cpu local device ? */ |
45cb8e01b clockevents: Spli... |
293 294 |
if (!tick_check_percpu(curdev, newdev, cpu)) goto out_bc; |
906568c9c [PATCH] tick-mana... |
295 |
|
45cb8e01b clockevents: Spli... |
296 297 298 |
/* Preference decision */ if (!tick_check_preferred(curdev, newdev)) goto out_bc; |
906568c9c [PATCH] tick-mana... |
299 |
|
ccf33d688 clockevents: Add ... |
300 301 |
if (!try_module_get(newdev->owner)) return; |
906568c9c [PATCH] tick-mana... |
302 303 |
/* * Replace the eventually existing device by the new |
f8381cba0 [PATCH] tick-mana... |
304 305 |
* device. If the current device is the broadcast device, do * not give it back to the clockevents layer ! |
906568c9c [PATCH] tick-mana... |
306 |
*/ |
f8381cba0 [PATCH] tick-mana... |
307 |
if (tick_is_broadcast_device(curdev)) { |
2344abbcb clockevents: make... |
308 |
clockevents_shutdown(curdev); |
f8381cba0 [PATCH] tick-mana... |
309 310 |
curdev = NULL; } |
906568c9c [PATCH] tick-mana... |
311 |
clockevents_exchange_device(curdev, newdev); |
6b954823c cpumask: convert ... |
312 |
tick_setup_device(td, newdev, cpu, cpumask_of(cpu)); |
79bf2bb33 [PATCH] tick-mana... |
313 314 |
if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) tick_oneshot_notify(); |
7172a286c clockevents: Get ... |
315 |
return; |
f8381cba0 [PATCH] tick-mana... |
316 317 318 319 320 |
out_bc: /* * Can the new device be used as a broadcast device ? */ |
7172a286c clockevents: Get ... |
321 |
tick_install_broadcast_device(newdev); |
906568c9c [PATCH] tick-mana... |
322 323 324 |
} /* |
94df7de02 hrtimers: allow t... |
325 326 327 328 |
* Transfer the do_timer job away from a dying cpu. * * Called with interrupts disabled. */ |
8c53daf63 clockevents: Move... |
329 |
void tick_handover_do_timer(int *cpup) |
94df7de02 hrtimers: allow t... |
330 331 332 333 334 335 336 337 338 339 |
{ if (*cpup == tick_do_timer_cpu) { int cpu = cpumask_first(cpu_online_mask); tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu : TICK_DO_TIMER_NONE; } } /* |
906568c9c [PATCH] tick-mana... |
340 341 342 343 344 345 |
* Shutdown an event device on a given cpu: * * This is called on a life CPU, when a CPU is dead. So we cannot * access the hardware device itself. * We just set the mode and remove it from the lists. */ |
8c53daf63 clockevents: Move... |
346 |
void tick_shutdown(unsigned int *cpup) |
906568c9c [PATCH] tick-mana... |
347 348 349 |
{ struct tick_device *td = &per_cpu(tick_cpu_device, *cpup); struct clock_event_device *dev = td->evtdev; |
906568c9c [PATCH] tick-mana... |
350 |
|
906568c9c [PATCH] tick-mana... |
351 352 353 354 355 356 357 358 |
td->mode = TICKDEV_MODE_PERIODIC; if (dev) { /* * Prevent that the clock events layer tries to call * the set mode function! */ dev->mode = CLOCK_EVT_MODE_UNUSED; clockevents_exchange_device(dev, NULL); |
6f7a05d70 clockevents: Set ... |
359 |
dev->event_handler = clockevents_handle_noop; |
906568c9c [PATCH] tick-mana... |
360 361 |
td->evtdev = NULL; } |
906568c9c [PATCH] tick-mana... |
362 |
} |
8c53daf63 clockevents: Move... |
363 |
void tick_suspend(void) |
6321dd60c [PATCH] Save/rest... |
364 |
{ |
22127e93c time: Replace __g... |
365 |
struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
6321dd60c [PATCH] Save/rest... |
366 |
|
2344abbcb clockevents: make... |
367 |
clockevents_shutdown(td->evtdev); |
6321dd60c [PATCH] Save/rest... |
368 |
} |
8c53daf63 clockevents: Move... |
369 |
void tick_resume(void) |
6321dd60c [PATCH] Save/rest... |
370 |
{ |
22127e93c time: Replace __g... |
371 |
struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
18de5bc4c clockevents: fix ... |
372 |
int broadcast = tick_resume_broadcast(); |
6321dd60c [PATCH] Save/rest... |
373 |
|
18de5bc4c clockevents: fix ... |
374 375 376 377 378 379 380 381 |
clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME); if (!broadcast) { if (td->mode == TICKDEV_MODE_PERIODIC) tick_setup_periodic(td->evtdev, 0); else tick_resume_oneshot(); } |
6321dd60c [PATCH] Save/rest... |
382 |
} |
906568c9c [PATCH] tick-mana... |
383 384 |
/** * tick_init - initialize the tick control |
906568c9c [PATCH] tick-mana... |
385 386 387 |
*/ void __init tick_init(void) { |
b352bc1cb tick: Convert bro... |
388 |
tick_broadcast_init(); |
a80e49e2c nohz: Move nohz f... |
389 |
tick_nohz_init(); |
906568c9c [PATCH] tick-mana... |
390 |
} |