Blame view
kernel/time/tick-broadcast.c
26.9 KB
35728b820 time: Add SPDX li... |
1 |
// SPDX-License-Identifier: GPL-2.0 |
f8381cba0 [PATCH] tick-mana... |
2 |
/* |
f8381cba0 [PATCH] tick-mana... |
3 4 5 6 7 8 |
* This file contains functions which emulate a local clock-event * device via a broadcast event source. * * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner |
f8381cba0 [PATCH] tick-mana... |
9 10 11 12 |
*/ #include <linux/cpu.h> #include <linux/err.h> #include <linux/hrtimer.h> |
d7b906897 [S390] genirq/clo... |
13 |
#include <linux/interrupt.h> |
f8381cba0 [PATCH] tick-mana... |
14 15 16 |
#include <linux/percpu.h> #include <linux/profile.h> #include <linux/sched.h> |
12ad10004 clockevents: Add ... |
17 |
#include <linux/smp.h> |
ccf33d688 clockevents: Add ... |
18 |
#include <linux/module.h> |
f8381cba0 [PATCH] tick-mana... |
19 20 21 22 23 24 25 |
#include "tick-internal.h" /* * Broadcast support for broken x86 hardware, where the local apic * timer stops in C3 state. */ |
a52f5c562 clockevents: tick... |
26 |
static struct tick_device tick_broadcast_device; |
668802c25 tick/broadcast: R... |
27 28 29 |
static cpumask_var_t tick_broadcast_mask __cpumask_var_read_mostly; static cpumask_var_t tick_broadcast_on __cpumask_var_read_mostly; static cpumask_var_t tmpmask __cpumask_var_read_mostly; |
592a438ff clockevents: Prov... |
30 |
static int tick_broadcast_forced; |
f8381cba0 [PATCH] tick-mana... |
31 |
|
668802c25 tick/broadcast: R... |
32 |
static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock); |
5590a536c clockevents: fix ... |
33 |
#ifdef CONFIG_TICK_ONESHOT |
94114c367 tick/broadcast: M... |
34 |
static void tick_broadcast_setup_oneshot(struct clock_event_device *bc); |
5590a536c clockevents: fix ... |
35 |
static void tick_broadcast_clear_oneshot(int cpu); |
080873ce2 tick: Make tick_r... |
36 |
static void tick_resume_broadcast_oneshot(struct clock_event_device *bc); |
aba095432 tick/broadcast: F... |
37 |
# ifdef CONFIG_HOTPLUG_CPU |
1b72d4323 tick: Remove outg... |
38 |
static void tick_broadcast_oneshot_offline(unsigned int cpu); |
aba095432 tick/broadcast: F... |
39 |
# endif |
5590a536c clockevents: fix ... |
40 |
#else |
94114c367 tick/broadcast: M... |
41 |
static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); } |
5590a536c clockevents: fix ... |
42 |
static inline void tick_broadcast_clear_oneshot(int cpu) { } |
080873ce2 tick: Make tick_r... |
43 |
static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { } |
aba095432 tick/broadcast: F... |
44 |
# ifdef CONFIG_HOTPLUG_CPU |
1b72d4323 tick: Remove outg... |
45 |
static inline void tick_broadcast_oneshot_offline(unsigned int cpu) { } |
aba095432 tick/broadcast: F... |
46 |
# endif |
5590a536c clockevents: fix ... |
47 |
#endif |
f8381cba0 [PATCH] tick-mana... |
48 |
/* |
289f480af [PATCH] Add debug... |
49 50 51 52 53 54 |
* Debugging: see timer_list.c */ struct tick_device *tick_get_broadcast_device(void) { return &tick_broadcast_device; } |
6b954823c cpumask: convert ... |
55 |
struct cpumask *tick_get_broadcast_mask(void) |
289f480af [PATCH] Add debug... |
56 |
{ |
b352bc1cb tick: Convert bro... |
57 |
return tick_broadcast_mask; |
289f480af [PATCH] Add debug... |
58 59 60 |
} /* |
f8381cba0 [PATCH] tick-mana... |
61 62 63 64 |
* Start the device in periodic mode */ static void tick_broadcast_start_periodic(struct clock_event_device *bc) { |
18de5bc4c clockevents: fix ... |
65 |
if (bc) |
f8381cba0 [PATCH] tick-mana... |
66 67 68 69 70 71 |
tick_setup_periodic(bc, 1); } /* * Check, if the device can be utilized as broadcast device: */ |
45cb8e01b clockevents: Spli... |
72 73 74 75 |
static bool tick_check_broadcast_device(struct clock_event_device *curdev, struct clock_event_device *newdev) { if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) || |
245a34962 tick: broadcast: ... |
76 |
(newdev->features & CLOCK_EVT_FEAT_PERCPU) || |
45cb8e01b clockevents: Spli... |
77 78 79 80 81 82 83 84 85 86 87 88 89 |
(newdev->features & CLOCK_EVT_FEAT_C3STOP)) return false; if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT && !(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) return false; return !curdev || newdev->rating > curdev->rating; } /* * Conditionally install/replace broadcast device */ |
7172a286c clockevents: Get ... |
90 |
void tick_install_broadcast_device(struct clock_event_device *dev) |
f8381cba0 [PATCH] tick-mana... |
91 |
{ |
6f7a05d70 clockevents: Set ... |
92 |
struct clock_event_device *cur = tick_broadcast_device.evtdev; |
45cb8e01b clockevents: Spli... |
93 |
if (!tick_check_broadcast_device(cur, dev)) |
7172a286c clockevents: Get ... |
94 |
return; |
45cb8e01b clockevents: Spli... |
95 |
|
ccf33d688 clockevents: Add ... |
96 97 |
if (!try_module_get(dev->owner)) return; |
f8381cba0 [PATCH] tick-mana... |
98 |
|
45cb8e01b clockevents: Spli... |
99 |
clockevents_exchange_device(cur, dev); |
6f7a05d70 clockevents: Set ... |
100 101 |
if (cur) cur->event_handler = clockevents_handle_noop; |
f8381cba0 [PATCH] tick-mana... |
102 |
tick_broadcast_device.evtdev = dev; |
b352bc1cb tick: Convert bro... |
103 |
if (!cpumask_empty(tick_broadcast_mask)) |
f8381cba0 [PATCH] tick-mana... |
104 |
tick_broadcast_start_periodic(dev); |
c038c1c44 clockevents: Swit... |
105 106 107 108 109 110 111 112 113 114 |
/* * Inform all cpus about this. We might be in a situation * where we did not switch to oneshot mode because the per cpu * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack * of a oneshot capable broadcast device. Without that * notification the systems stays stuck in periodic mode * forever. */ if (dev->features & CLOCK_EVT_FEAT_ONESHOT) tick_clock_notify(); |
f8381cba0 [PATCH] tick-mana... |
115 116 117 118 119 120 121 122 123 |
} /* * Check, if the device is the broadcast device */ int tick_is_broadcast_device(struct clock_event_device *dev) { return (dev && tick_broadcast_device.evtdev == dev); } |
627ee7947 clockevents: Seri... |
124 125 126 127 128 129 130 131 132 133 134 |
int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq) { int ret = -ENODEV; if (tick_is_broadcast_device(dev)) { raw_spin_lock(&tick_broadcast_lock); ret = __clockevents_update_freq(dev, freq); raw_spin_unlock(&tick_broadcast_lock); } return ret; } |
12ad10004 clockevents: Add ... |
135 136 137 138 139 |
static void err_broadcast(const struct cpumask *mask) { pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive. "); } |
5d1d9a29b clockevents: Fix ... |
140 141 142 143 144 145 146 147 148 149 150 |
static void tick_device_setup_broadcast_func(struct clock_event_device *dev) { if (!dev->broadcast) dev->broadcast = tick_broadcast; if (!dev->broadcast) { pr_warn_once("%s depends on broadcast, but no broadcast function available ", dev->name); dev->broadcast = err_broadcast; } } |
f8381cba0 [PATCH] tick-mana... |
151 152 153 154 155 156 |
/* * Check, if the device is disfunctional and a place holder, which * needs to be handled by the broadcast device. */ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) { |
07bd11729 tick: Sanitize br... |
157 |
struct clock_event_device *bc = tick_broadcast_device.evtdev; |
f8381cba0 [PATCH] tick-mana... |
158 |
unsigned long flags; |
e04543119 tick/broadcast: S... |
159 |
int ret = 0; |
f8381cba0 [PATCH] tick-mana... |
160 |
|
b5f91da0a clockevents: Conv... |
161 |
raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
f8381cba0 [PATCH] tick-mana... |
162 163 164 165 166 167 168 169 170 |
/* * Devices might be registered with both periodic and oneshot * mode disabled. This signals, that the device needs to be * operated from the broadcast device and is a placeholder for * the cpu local device. */ if (!tick_device_is_functional(dev)) { dev->event_handler = tick_handle_periodic; |
5d1d9a29b clockevents: Fix ... |
171 |
tick_device_setup_broadcast_func(dev); |
b352bc1cb tick: Convert bro... |
172 |
cpumask_set_cpu(cpu, tick_broadcast_mask); |
a272dcca1 tick: broadcast: ... |
173 174 175 176 |
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) tick_broadcast_start_periodic(bc); else tick_broadcast_setup_oneshot(bc); |
f8381cba0 [PATCH] tick-mana... |
177 |
ret = 1; |
5590a536c clockevents: fix ... |
178 179 |
} else { /* |
07bd11729 tick: Sanitize br... |
180 181 |
* Clear the broadcast bit for this cpu if the * device is not power state affected. |
5590a536c clockevents: fix ... |
182 |
*/ |
07bd11729 tick: Sanitize br... |
183 |
if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) |
b352bc1cb tick: Convert bro... |
184 |
cpumask_clear_cpu(cpu, tick_broadcast_mask); |
07bd11729 tick: Sanitize br... |
185 |
else |
5d1d9a29b clockevents: Fix ... |
186 |
tick_device_setup_broadcast_func(dev); |
07bd11729 tick: Sanitize br... |
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 |
/* * Clear the broadcast bit if the CPU is not in * periodic broadcast on state. */ if (!cpumask_test_cpu(cpu, tick_broadcast_on)) cpumask_clear_cpu(cpu, tick_broadcast_mask); switch (tick_broadcast_device.mode) { case TICKDEV_MODE_ONESHOT: /* * If the system is in oneshot mode we can * unconditionally clear the oneshot mask bit, * because the CPU is running and therefore * not in an idle state which causes the power * state affected device to stop. Let the * caller initialize the device. */ tick_broadcast_clear_oneshot(cpu); ret = 0; break; case TICKDEV_MODE_PERIODIC: /* * If the system is in periodic mode, check * whether the broadcast device can be * switched off now. */ if (cpumask_empty(tick_broadcast_mask) && bc) clockevents_shutdown(bc); /* * If we kept the cpu in the broadcast mask, * tell the caller to leave the per cpu device * in shutdown state. The periodic interrupt |
e04543119 tick/broadcast: S... |
221 222 223 |
* is delivered by the broadcast device, if * the broadcast device exists and is not * hrtimer based. |
07bd11729 tick: Sanitize br... |
224 |
*/ |
e04543119 tick/broadcast: S... |
225 226 |
if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER)) ret = cpumask_test_cpu(cpu, tick_broadcast_mask); |
07bd11729 tick: Sanitize br... |
227 228 |
break; default: |
07bd11729 tick: Sanitize br... |
229 |
break; |
5590a536c clockevents: fix ... |
230 231 |
} } |
b5f91da0a clockevents: Conv... |
232 |
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
f8381cba0 [PATCH] tick-mana... |
233 234 |
return ret; } |
12572dbb5 clockevents: Add ... |
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 |
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST int tick_receive_broadcast(void) { struct tick_device *td = this_cpu_ptr(&tick_cpu_device); struct clock_event_device *evt = td->evtdev; if (!evt) return -ENODEV; if (!evt->event_handler) return -EINVAL; evt->event_handler(evt); return 0; } #endif |
f8381cba0 [PATCH] tick-mana... |
251 |
/* |
6b954823c cpumask: convert ... |
252 |
* Broadcast the event to the cpus, which are set in the mask (mangled). |
f8381cba0 [PATCH] tick-mana... |
253 |
*/ |
2951d5c03 tick: broadcast: ... |
254 |
static bool tick_do_broadcast(struct cpumask *mask) |
f8381cba0 [PATCH] tick-mana... |
255 |
{ |
186e3cb8a timer: clean up t... |
256 |
int cpu = smp_processor_id(); |
f8381cba0 [PATCH] tick-mana... |
257 |
struct tick_device *td; |
2951d5c03 tick: broadcast: ... |
258 |
bool local = false; |
f8381cba0 [PATCH] tick-mana... |
259 260 261 262 |
/* * Check, if the current cpu is in the mask */ |
6b954823c cpumask: convert ... |
263 |
if (cpumask_test_cpu(cpu, mask)) { |
8eb231261 tick/broadcast: P... |
264 |
struct clock_event_device *bc = tick_broadcast_device.evtdev; |
6b954823c cpumask: convert ... |
265 |
cpumask_clear_cpu(cpu, mask); |
8eb231261 tick/broadcast: P... |
266 267 268 269 270 271 272 273 274 275 276 277 278 |
/* * We only run the local handler, if the broadcast * device is not hrtimer based. Otherwise we run into * a hrtimer recursion. * * local timer_interrupt() * local_handler() * expire_hrtimers() * bc_handler() * local_handler() * expire_hrtimers() */ local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER); |
f8381cba0 [PATCH] tick-mana... |
279 |
} |
6b954823c cpumask: convert ... |
280 |
if (!cpumask_empty(mask)) { |
f8381cba0 [PATCH] tick-mana... |
281 282 283 284 285 286 |
/* * It might be necessary to actually check whether the devices * have different broadcast functions. For now, just use the * one of the first device. This works as long as we have this * misfeature only on x86 (lapic) */ |
6b954823c cpumask: convert ... |
287 288 |
td = &per_cpu(tick_cpu_device, cpumask_first(mask)); td->evtdev->broadcast(mask); |
f8381cba0 [PATCH] tick-mana... |
289 |
} |
2951d5c03 tick: broadcast: ... |
290 |
return local; |
f8381cba0 [PATCH] tick-mana... |
291 292 293 294 295 296 |
} /* * Periodic broadcast: * - invoke the broadcast handlers */ |
2951d5c03 tick: broadcast: ... |
297 |
static bool tick_do_periodic_broadcast(void) |
f8381cba0 [PATCH] tick-mana... |
298 |
{ |
b352bc1cb tick: Convert bro... |
299 |
cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask); |
2951d5c03 tick: broadcast: ... |
300 |
return tick_do_broadcast(tmpmask); |
f8381cba0 [PATCH] tick-mana... |
301 302 303 304 305 306 307 |
} /* * Event handler for periodic broadcast ticks */ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) { |
2951d5c03 tick: broadcast: ... |
308 309 |
struct tick_device *td = this_cpu_ptr(&tick_cpu_device); bool bc_local; |
d4496b395 clockevents: prev... |
310 |
|
627ee7947 clockevents: Seri... |
311 |
raw_spin_lock(&tick_broadcast_lock); |
c42883348 tick/broadcast: H... |
312 313 314 315 316 317 |
/* Handle spurious interrupts gracefully */ if (clockevent_state_shutdown(tick_broadcast_device.evtdev)) { raw_spin_unlock(&tick_broadcast_lock); return; } |
2951d5c03 tick: broadcast: ... |
318 |
bc_local = tick_do_periodic_broadcast(); |
627ee7947 clockevents: Seri... |
319 |
|
472c4a943 clockevents: Use ... |
320 |
if (clockevent_state_oneshot(dev)) { |
2951d5c03 tick: broadcast: ... |
321 |
ktime_t next = ktime_add(dev->next_event, tick_period); |
f8381cba0 [PATCH] tick-mana... |
322 |
|
2951d5c03 tick: broadcast: ... |
323 324 325 |
clockevents_program_event(dev, next, true); } raw_spin_unlock(&tick_broadcast_lock); |
f8381cba0 [PATCH] tick-mana... |
326 327 |
/* |
2951d5c03 tick: broadcast: ... |
328 329 330 |
* We run the handler of the local cpu after dropping * tick_broadcast_lock because the handler might deadlock when * trying to switch to oneshot mode. |
f8381cba0 [PATCH] tick-mana... |
331 |
*/ |
2951d5c03 tick: broadcast: ... |
332 333 |
if (bc_local) td->evtdev->event_handler(td->evtdev); |
f8381cba0 [PATCH] tick-mana... |
334 |
} |
592a438ff clockevents: Prov... |
335 336 337 338 339 340 |
/** * tick_broadcast_control - Enable/disable or force broadcast mode * @mode: The selected broadcast mode * * Called when the system enters a state where affected tick devices * might stop. Note: TICK_BROADCAST_FORCE cannot be undone. |
f8381cba0 [PATCH] tick-mana... |
341 |
*/ |
592a438ff clockevents: Prov... |
342 |
void tick_broadcast_control(enum tick_broadcast_mode mode) |
f8381cba0 [PATCH] tick-mana... |
343 344 345 |
{ struct clock_event_device *bc, *dev; struct tick_device *td; |
9c17bcda9 clockevents: prev... |
346 |
int cpu, bc_stopped; |
202461e2f tick/broadcast: P... |
347 |
unsigned long flags; |
f8381cba0 [PATCH] tick-mana... |
348 |
|
202461e2f tick/broadcast: P... |
349 350 |
/* Protects also the local clockevent device. */ raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
592a438ff clockevents: Prov... |
351 |
td = this_cpu_ptr(&tick_cpu_device); |
f8381cba0 [PATCH] tick-mana... |
352 |
dev = td->evtdev; |
f8381cba0 [PATCH] tick-mana... |
353 354 |
/* |
1595f452f clockevents: intr... |
355 |
* Is the device not affected by the powerstate ? |
f8381cba0 [PATCH] tick-mana... |
356 |
*/ |
1595f452f clockevents: intr... |
357 |
if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP)) |
202461e2f tick/broadcast: P... |
358 |
goto out; |
f8381cba0 [PATCH] tick-mana... |
359 |
|
3dfbc8846 x86: C1E late det... |
360 |
if (!tick_device_is_functional(dev)) |
202461e2f tick/broadcast: P... |
361 |
goto out; |
1595f452f clockevents: intr... |
362 |
|
592a438ff clockevents: Prov... |
363 364 |
cpu = smp_processor_id(); bc = tick_broadcast_device.evtdev; |
b352bc1cb tick: Convert bro... |
365 |
bc_stopped = cpumask_empty(tick_broadcast_mask); |
9c17bcda9 clockevents: prev... |
366 |
|
592a438ff clockevents: Prov... |
367 368 369 |
switch (mode) { case TICK_BROADCAST_FORCE: tick_broadcast_forced = 1; |
75b710af7 timers: Mark expe... |
370 |
/* fall through */ |
592a438ff clockevents: Prov... |
371 |
case TICK_BROADCAST_ON: |
07bd11729 tick: Sanitize br... |
372 |
cpumask_set_cpu(cpu, tick_broadcast_on); |
b352bc1cb tick: Convert bro... |
373 |
if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { |
e04543119 tick/broadcast: S... |
374 375 376 377 378 379 380 381 382 383 |
/* * Only shutdown the cpu local device, if: * * - the broadcast device exists * - the broadcast device is not a hrtimer based one * - the broadcast device is in periodic mode to * avoid a hickup during switch to oneshot mode */ if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) && tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) |
2344abbcb clockevents: make... |
384 |
clockevents_shutdown(dev); |
f8381cba0 [PATCH] tick-mana... |
385 |
} |
1595f452f clockevents: intr... |
386 |
break; |
592a438ff clockevents: Prov... |
387 388 389 |
case TICK_BROADCAST_OFF: if (tick_broadcast_forced) |
07bd11729 tick: Sanitize br... |
390 391 |
break; cpumask_clear_cpu(cpu, tick_broadcast_on); |
07bd11729 tick: Sanitize br... |
392 |
if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) { |
07454bfff clockevents: chec... |
393 394 |
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) |
f8381cba0 [PATCH] tick-mana... |
395 396 |
tick_setup_periodic(dev, 0); } |
1595f452f clockevents: intr... |
397 |
break; |
f8381cba0 [PATCH] tick-mana... |
398 |
} |
c4d029f2d tick/broadcast: P... |
399 400 401 402 403 404 405 406 407 408 |
if (bc) { if (cpumask_empty(tick_broadcast_mask)) { if (!bc_stopped) clockevents_shutdown(bc); } else if (bc_stopped) { if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) tick_broadcast_start_periodic(bc); else tick_broadcast_setup_oneshot(bc); } |
f8381cba0 [PATCH] tick-mana... |
409 |
} |
202461e2f tick/broadcast: P... |
410 411 |
out: raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
f8381cba0 [PATCH] tick-mana... |
412 |
} |
592a438ff clockevents: Prov... |
413 |
EXPORT_SYMBOL_GPL(tick_broadcast_control); |
f8381cba0 [PATCH] tick-mana... |
414 415 416 417 418 419 420 421 422 423 424 |
/* * Set the periodic handler depending on broadcast on/off */ void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast) { if (!broadcast) dev->event_handler = tick_handle_periodic; else dev->event_handler = tick_handle_periodic_broadcast; } |
a49b116dc clockevents: Clea... |
425 |
#ifdef CONFIG_HOTPLUG_CPU |
1b72d4323 tick: Remove outg... |
426 |
static void tick_shutdown_broadcast(void) |
f8381cba0 [PATCH] tick-mana... |
427 |
{ |
1b72d4323 tick: Remove outg... |
428 |
struct clock_event_device *bc = tick_broadcast_device.evtdev; |
f8381cba0 [PATCH] tick-mana... |
429 430 |
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { |
b352bc1cb tick: Convert bro... |
431 |
if (bc && cpumask_empty(tick_broadcast_mask)) |
2344abbcb clockevents: make... |
432 |
clockevents_shutdown(bc); |
f8381cba0 [PATCH] tick-mana... |
433 |
} |
1b72d4323 tick: Remove outg... |
434 |
} |
f8381cba0 [PATCH] tick-mana... |
435 |
|
1b72d4323 tick: Remove outg... |
436 437 438 439 440 441 442 443 444 445 446 |
/* * Remove a CPU from broadcasting */ void tick_broadcast_offline(unsigned int cpu) { raw_spin_lock(&tick_broadcast_lock); cpumask_clear_cpu(cpu, tick_broadcast_mask); cpumask_clear_cpu(cpu, tick_broadcast_on); tick_broadcast_oneshot_offline(cpu); tick_shutdown_broadcast(); raw_spin_unlock(&tick_broadcast_lock); |
f8381cba0 [PATCH] tick-mana... |
447 |
} |
1b72d4323 tick: Remove outg... |
448 |
|
a49b116dc clockevents: Clea... |
449 |
#endif |
79bf2bb33 [PATCH] tick-mana... |
450 |
|
6321dd60c [PATCH] Save/rest... |
451 452 453 454 |
void tick_suspend_broadcast(void) { struct clock_event_device *bc; unsigned long flags; |
b5f91da0a clockevents: Conv... |
455 |
raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
6321dd60c [PATCH] Save/rest... |
456 457 |
bc = tick_broadcast_device.evtdev; |
18de5bc4c clockevents: fix ... |
458 |
if (bc) |
2344abbcb clockevents: make... |
459 |
clockevents_shutdown(bc); |
6321dd60c [PATCH] Save/rest... |
460 |
|
b5f91da0a clockevents: Conv... |
461 |
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
6321dd60c [PATCH] Save/rest... |
462 |
} |
f46481d0a tick/xen: Provide... |
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 |
/* * This is called from tick_resume_local() on a resuming CPU. That's * called from the core resume function, tick_unfreeze() and the magic XEN * resume hackery. * * In none of these cases the broadcast device mode can change and the * bit of the resuming CPU in the broadcast mask is safe as well. */ bool tick_resume_check_broadcast(void) { if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT) return false; else return cpumask_test_cpu(smp_processor_id(), tick_broadcast_mask); } void tick_resume_broadcast(void) |
6321dd60c [PATCH] Save/rest... |
480 481 482 |
{ struct clock_event_device *bc; unsigned long flags; |
6321dd60c [PATCH] Save/rest... |
483 |
|
b5f91da0a clockevents: Conv... |
484 |
raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
6321dd60c [PATCH] Save/rest... |
485 486 |
bc = tick_broadcast_device.evtdev; |
6321dd60c [PATCH] Save/rest... |
487 |
|
cd05a1f81 [PATCH] clockeven... |
488 |
if (bc) { |
554ef3876 clockevents: Hand... |
489 |
clockevents_tick_resume(bc); |
18de5bc4c clockevents: fix ... |
490 |
|
cd05a1f81 [PATCH] clockeven... |
491 492 |
switch (tick_broadcast_device.mode) { case TICKDEV_MODE_PERIODIC: |
b352bc1cb tick: Convert bro... |
493 |
if (!cpumask_empty(tick_broadcast_mask)) |
cd05a1f81 [PATCH] clockeven... |
494 |
tick_broadcast_start_periodic(bc); |
cd05a1f81 [PATCH] clockeven... |
495 496 |
break; case TICKDEV_MODE_ONESHOT: |
b352bc1cb tick: Convert bro... |
497 |
if (!cpumask_empty(tick_broadcast_mask)) |
080873ce2 tick: Make tick_r... |
498 |
tick_resume_broadcast_oneshot(bc); |
cd05a1f81 [PATCH] clockeven... |
499 500 |
break; } |
6321dd60c [PATCH] Save/rest... |
501 |
} |
b5f91da0a clockevents: Conv... |
502 |
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
6321dd60c [PATCH] Save/rest... |
503 |
} |
79bf2bb33 [PATCH] tick-mana... |
504 |
#ifdef CONFIG_TICK_ONESHOT |
668802c25 tick/broadcast: R... |
505 506 507 |
static cpumask_var_t tick_broadcast_oneshot_mask __cpumask_var_read_mostly; static cpumask_var_t tick_broadcast_pending_mask __cpumask_var_read_mostly; static cpumask_var_t tick_broadcast_force_mask __cpumask_var_read_mostly; |
79bf2bb33 [PATCH] tick-mana... |
508 |
|
289f480af [PATCH] Add debug... |
509 |
/* |
6b954823c cpumask: convert ... |
510 |
* Exposed for debugging: see timer_list.c |
289f480af [PATCH] Add debug... |
511 |
*/ |
6b954823c cpumask: convert ... |
512 |
struct cpumask *tick_get_broadcast_oneshot_mask(void) |
289f480af [PATCH] Add debug... |
513 |
{ |
b352bc1cb tick: Convert bro... |
514 |
return tick_broadcast_oneshot_mask; |
289f480af [PATCH] Add debug... |
515 |
} |
d2348fb6f tick: Dynamically... |
516 |
/* |
eaa907c54 tick: Provide a c... |
517 518 519 520 521 522 523 524 525 526 527 528 |
* Called before going idle with interrupts disabled. Checks whether a * broadcast event from the other core is about to happen. We detected * that in tick_broadcast_oneshot_control(). The callsite can use this * to avoid a deep idle transition as we are about to get the * broadcast IPI right away. */ int tick_check_broadcast_expired(void) { return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask); } /* |
d2348fb6f tick: Dynamically... |
529 530 531 532 533 534 535 536 537 538 539 540 541 542 |
* Set broadcast interrupt affinity */ static void tick_broadcast_set_affinity(struct clock_event_device *bc, const struct cpumask *cpumask) { if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ)) return; if (cpumask_equal(bc->cpumask, cpumask)) return; bc->cpumask = cpumask; irq_set_affinity(bc->irq, bc->cpumask); } |
298dbd1c5 tick: broadcast: ... |
543 544 |
static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu, ktime_t expires) |
79bf2bb33 [PATCH] tick-mana... |
545 |
{ |
472c4a943 clockevents: Use ... |
546 |
if (!clockevent_state_oneshot(bc)) |
d7eb231c7 clockevents: Prov... |
547 |
clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT); |
b9a6a2356 tick: Ensure that... |
548 |
|
298dbd1c5 tick: broadcast: ... |
549 550 |
clockevents_program_event(bc, expires, 1); tick_broadcast_set_affinity(bc, cpumask_of(cpu)); |
79bf2bb33 [PATCH] tick-mana... |
551 |
} |
080873ce2 tick: Make tick_r... |
552 |
static void tick_resume_broadcast_oneshot(struct clock_event_device *bc) |
cd05a1f81 [PATCH] clockeven... |
553 |
{ |
d7eb231c7 clockevents: Prov... |
554 |
clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT); |
cd05a1f81 [PATCH] clockeven... |
555 |
} |
79bf2bb33 [PATCH] tick-mana... |
556 |
/* |
fb02fbc14 NOHZ: restart tic... |
557 558 559 |
* Called from irq_enter() when idle was interrupted to reenable the * per cpu device. */ |
e8fcaa5c5 nohz: Convert a f... |
560 |
void tick_check_oneshot_broadcast_this_cpu(void) |
fb02fbc14 NOHZ: restart tic... |
561 |
{ |
e8fcaa5c5 nohz: Convert a f... |
562 |
if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) { |
22127e93c time: Replace __g... |
563 |
struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
fb02fbc14 NOHZ: restart tic... |
564 |
|
1f73a9806 tick: Prevent unc... |
565 566 567 568 569 570 |
/* * We might be in the middle of switching over from * periodic to oneshot. If the CPU has not yet * switched over, leave the device alone. */ if (td->mode == TICKDEV_MODE_ONESHOT) { |
d7eb231c7 clockevents: Prov... |
571 |
clockevents_switch_state(td->evtdev, |
77e32c89a clockevents: Mana... |
572 |
CLOCK_EVT_STATE_ONESHOT); |
1f73a9806 tick: Prevent unc... |
573 |
} |
fb02fbc14 NOHZ: restart tic... |
574 575 576 577 |
} } /* |
79bf2bb33 [PATCH] tick-mana... |
578 579 580 581 582 |
* Handle oneshot mode broadcasting */ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) { struct tick_device *td; |
cdc6f27d9 clockevents: fix ... |
583 |
ktime_t now, next_event; |
d2348fb6f tick: Dynamically... |
584 |
int cpu, next_cpu = 0; |
298dbd1c5 tick: broadcast: ... |
585 |
bool bc_local; |
79bf2bb33 [PATCH] tick-mana... |
586 |
|
b5f91da0a clockevents: Conv... |
587 |
raw_spin_lock(&tick_broadcast_lock); |
2456e8553 ktime: Get rid of... |
588 589 |
dev->next_event = KTIME_MAX; next_event = KTIME_MAX; |
b352bc1cb tick: Convert bro... |
590 |
cpumask_clear(tmpmask); |
79bf2bb33 [PATCH] tick-mana... |
591 592 |
now = ktime_get(); /* Find all expired events */ |
b352bc1cb tick: Convert bro... |
593 |
for_each_cpu(cpu, tick_broadcast_oneshot_mask) { |
5596fe344 tick/broadcast: U... |
594 595 596 597 598 599 600 |
/* * Required for !SMP because for_each_cpu() reports * unconditionally CPU0 as set on UP kernels. */ if (!IS_ENABLED(CONFIG_SMP) && cpumask_empty(tick_broadcast_oneshot_mask)) break; |
79bf2bb33 [PATCH] tick-mana... |
601 |
td = &per_cpu(tick_cpu_device, cpu); |
2456e8553 ktime: Get rid of... |
602 |
if (td->evtdev->next_event <= now) { |
b352bc1cb tick: Convert bro... |
603 |
cpumask_set_cpu(cpu, tmpmask); |
26517f3e9 tick: Avoid progr... |
604 605 606 607 608 609 |
/* * Mark the remote cpu in the pending mask, so * it can avoid reprogramming the cpu local * timer in tick_broadcast_oneshot_control(). */ cpumask_set_cpu(cpu, tick_broadcast_pending_mask); |
2456e8553 ktime: Get rid of... |
610 611 |
} else if (td->evtdev->next_event < next_event) { next_event = td->evtdev->next_event; |
d2348fb6f tick: Dynamically... |
612 613 |
next_cpu = cpu; } |
79bf2bb33 [PATCH] tick-mana... |
614 |
} |
2938d2757 tick: Cure broadc... |
615 616 617 618 619 |
/* * Remove the current cpu from the pending mask. The event is * delivered immediately in tick_do_broadcast() ! */ cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask); |
989dcb645 tick: Handle broa... |
620 621 622 |
/* Take care of enforced broadcast requests */ cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask); cpumask_clear(tick_broadcast_force_mask); |
79bf2bb33 [PATCH] tick-mana... |
623 |
/* |
c9b5a266b tick: Make onesho... |
624 625 626 627 628 629 630 |
* Sanity check. Catch the case where we try to broadcast to * offline cpus. */ if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask))) cpumask_and(tmpmask, tmpmask, cpu_online_mask); /* |
298dbd1c5 tick: broadcast: ... |
631 |
* Wakeup the cpus which have an expired event. |
cdc6f27d9 clockevents: fix ... |
632 |
*/ |
298dbd1c5 tick: broadcast: ... |
633 |
bc_local = tick_do_broadcast(tmpmask); |
cdc6f27d9 clockevents: fix ... |
634 635 636 637 638 639 640 641 642 643 |
/* * Two reasons for reprogram: * * - The global event did not expire any CPU local * events. This happens in dyntick mode, as the maximum PIT * delta is quite small. * * - There are pending events on sleeping CPUs which were not * in the event mask |
79bf2bb33 [PATCH] tick-mana... |
644 |
*/ |
2456e8553 ktime: Get rid of... |
645 |
if (next_event != KTIME_MAX) |
298dbd1c5 tick: broadcast: ... |
646 |
tick_broadcast_set_event(dev, next_cpu, next_event); |
b5f91da0a clockevents: Conv... |
647 |
raw_spin_unlock(&tick_broadcast_lock); |
298dbd1c5 tick: broadcast: ... |
648 649 650 651 652 |
if (bc_local) { td = this_cpu_ptr(&tick_cpu_device); td->evtdev->event_handler(td->evtdev); } |
79bf2bb33 [PATCH] tick-mana... |
653 |
} |
5d1638acb tick: Introduce h... |
654 655 656 657 |
static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu) { if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER)) return 0; |
2456e8553 ktime: Get rid of... |
658 |
if (bc->next_event == KTIME_MAX) |
5d1638acb tick: Introduce h... |
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 |
return 0; return bc->bound_on == cpu ? -EBUSY : 0; } static void broadcast_shutdown_local(struct clock_event_device *bc, struct clock_event_device *dev) { /* * For hrtimer based broadcasting we cannot shutdown the cpu * local device if our own event is the first one to expire or * if we own the broadcast timer. */ if (bc->features & CLOCK_EVT_FEAT_HRTIMER) { if (broadcast_needs_cpu(bc, smp_processor_id())) return; |
2456e8553 ktime: Get rid of... |
674 |
if (dev->next_event < bc->next_event) |
5d1638acb tick: Introduce h... |
675 676 |
return; } |
d7eb231c7 clockevents: Prov... |
677 |
clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); |
5d1638acb tick: Introduce h... |
678 |
} |
f32dd1170 tick/broadcast: M... |
679 |
int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) |
79bf2bb33 [PATCH] tick-mana... |
680 681 |
{ struct clock_event_device *bc, *dev; |
da7e6f45c time: Change the ... |
682 |
int cpu, ret = 0; |
1fe5d5c3c clockevents: Prov... |
683 |
ktime_t now; |
79bf2bb33 [PATCH] tick-mana... |
684 |
|
79bf2bb33 [PATCH] tick-mana... |
685 |
/* |
b78f3f3c8 tick/broadcast: P... |
686 687 688 689 690 |
* If there is no broadcast device, tell the caller not to go * into deep idle. */ if (!tick_broadcast_device.evtdev) return -EBUSY; |
e3ac79e08 tick/broadcast: M... |
691 |
dev = this_cpu_ptr(&tick_cpu_device)->evtdev; |
79bf2bb33 [PATCH] tick-mana... |
692 |
|
1fe5d5c3c clockevents: Prov... |
693 |
raw_spin_lock(&tick_broadcast_lock); |
7372b0b12 clockevents: Move... |
694 |
bc = tick_broadcast_device.evtdev; |
1fe5d5c3c clockevents: Prov... |
695 |
cpu = smp_processor_id(); |
79bf2bb33 [PATCH] tick-mana... |
696 |
|
1fe5d5c3c clockevents: Prov... |
697 |
if (state == TICK_BROADCAST_ENTER) { |
e3ac79e08 tick/broadcast: M... |
698 |
/* |
d5113e13a tick/broadcast: C... |
699 700 701 702 703 704 705 706 707 708 709 |
* If the current CPU owns the hrtimer broadcast * mechanism, it cannot go deep idle and we do not add * the CPU to the broadcast mask. We don't have to go * through the EXIT path as the local timer is not * shutdown. */ ret = broadcast_needs_cpu(bc, cpu); if (ret) goto out; /* |
e3ac79e08 tick/broadcast: M... |
710 711 712 |
* If the broadcast device is in periodic mode, we * return. */ |
d33257264 tick/broadcast: R... |
713 714 715 716 |
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { /* If it is a hrtimer based broadcast, return busy */ if (bc->features & CLOCK_EVT_FEAT_HRTIMER) ret = -EBUSY; |
e3ac79e08 tick/broadcast: M... |
717 |
goto out; |
d33257264 tick/broadcast: R... |
718 |
} |
e3ac79e08 tick/broadcast: M... |
719 |
|
b352bc1cb tick: Convert bro... |
720 |
if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) { |
2938d2757 tick: Cure broadc... |
721 |
WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask)); |
d5113e13a tick/broadcast: C... |
722 723 |
/* Conditionally shut down the local timer. */ |
5d1638acb tick: Introduce h... |
724 |
broadcast_shutdown_local(bc, dev); |
d5113e13a tick/broadcast: C... |
725 |
|
989dcb645 tick: Handle broa... |
726 727 728 729 730 731 |
/* * We only reprogram the broadcast timer if we * did not mark ourself in the force mask and * if the cpu local event is earlier than the * broadcast event. If the current CPU is in * the force mask, then we are going to be |
0cc5281aa tick/broadcast: R... |
732 733 734 |
* woken by the IPI right away; we return * busy, so the CPU does not try to go deep * idle. |
989dcb645 tick: Handle broa... |
735 |
*/ |
0cc5281aa tick/broadcast: R... |
736 737 |
if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) { ret = -EBUSY; |
2456e8553 ktime: Get rid of... |
738 |
} else if (dev->next_event < bc->next_event) { |
298dbd1c5 tick: broadcast: ... |
739 |
tick_broadcast_set_event(bc, cpu, dev->next_event); |
d5113e13a tick/broadcast: C... |
740 741 742 743 744 745 746 747 748 749 750 751 |
/* * In case of hrtimer broadcasts the * programming might have moved the * timer to this cpu. If yes, remove * us from the broadcast mask and * return busy. */ ret = broadcast_needs_cpu(bc, cpu); if (ret) { cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); } |
0cc5281aa tick/broadcast: R... |
752 |
} |
79bf2bb33 [PATCH] tick-mana... |
753 754 |
} } else { |
b352bc1cb tick: Convert bro... |
755 |
if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) { |
d7eb231c7 clockevents: Prov... |
756 |
clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); |
26517f3e9 tick: Avoid progr... |
757 758 759 760 761 762 763 764 765 766 767 768 |
/* * The cpu which was handling the broadcast * timer marked this cpu in the broadcast * pending mask and fired the broadcast * IPI. So we are going to handle the expired * event anyway via the broadcast IPI * handler. No need to reprogram the timer * with an already expired event. */ if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_pending_mask)) goto out; |
989dcb645 tick: Handle broa... |
769 |
/* |
ea8deb8df tick: Fix tick_br... |
770 771 |
* Bail out if there is no next event. */ |
2456e8553 ktime: Get rid of... |
772 |
if (dev->next_event == KTIME_MAX) |
ea8deb8df tick: Fix tick_br... |
773 774 |
goto out; /* |
989dcb645 tick: Handle broa... |
775 776 777 778 |
* If the pending bit is not set, then we are * either the CPU handling the broadcast * interrupt or we got woken by something else. * |
13e792a19 tick: Fix typos i... |
779 |
* We are no longer in the broadcast mask, so |
989dcb645 tick: Handle broa... |
780 781 782 783 784 |
* if the cpu local expiry time is already * reached, we would reprogram the cpu local * timer with an already expired event. * * This can lead to a ping-pong when we return |
13e792a19 tick: Fix typos i... |
785 |
* to idle and therefore rearm the broadcast |
989dcb645 tick: Handle broa... |
786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 |
* timer before the cpu local timer was able * to fire. This happens because the forced * reprogramming makes sure that the event * will happen in the future and depending on * the min_delta setting this might be far * enough out that the ping-pong starts. * * If the cpu local next_event has expired * then we know that the broadcast timer * next_event has expired as well and * broadcast is about to be handled. So we * avoid reprogramming and enforce that the * broadcast handler, which did not run yet, * will invoke the cpu local handler. * * We cannot call the handler directly from * here, because we might be in a NOHZ phase * and we did not go through the irq_enter() * nohz fixups. */ now = ktime_get(); |
2456e8553 ktime: Get rid of... |
807 |
if (dev->next_event <= now) { |
989dcb645 tick: Handle broa... |
808 809 810 811 812 813 814 |
cpumask_set_cpu(cpu, tick_broadcast_force_mask); goto out; } /* * We got woken by something else. Reprogram * the cpu local timer device. */ |
26517f3e9 tick: Avoid progr... |
815 |
tick_program_event(dev->next_event, 1); |
79bf2bb33 [PATCH] tick-mana... |
816 817 |
} } |
26517f3e9 tick: Avoid progr... |
818 |
out: |
1fe5d5c3c clockevents: Prov... |
819 |
raw_spin_unlock(&tick_broadcast_lock); |
da7e6f45c time: Change the ... |
820 |
return ret; |
79bf2bb33 [PATCH] tick-mana... |
821 |
} |
5590a536c clockevents: fix ... |
822 823 824 825 826 827 828 |
/* * Reset the one shot broadcast for a cpu * * Called with tick_broadcast_lock held */ static void tick_broadcast_clear_oneshot(int cpu) { |
b352bc1cb tick: Convert bro... |
829 |
cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); |
dd5fd9b91 tick: Clear broad... |
830 |
cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); |
5590a536c clockevents: fix ... |
831 |
} |
6b954823c cpumask: convert ... |
832 833 |
static void tick_broadcast_init_next_event(struct cpumask *mask, ktime_t expires) |
7300711e8 clockevents: broa... |
834 835 836 |
{ struct tick_device *td; int cpu; |
5db0e1e9e cpumask: replace ... |
837 |
for_each_cpu(cpu, mask) { |
7300711e8 clockevents: broa... |
838 839 840 841 842 |
td = &per_cpu(tick_cpu_device, cpu); if (td->evtdev) td->evtdev->next_event = expires; } } |
79bf2bb33 [PATCH] tick-mana... |
843 |
/** |
8dce39c23 time: fix inconsi... |
844 |
* tick_broadcast_setup_oneshot - setup the broadcast device |
79bf2bb33 [PATCH] tick-mana... |
845 |
*/ |
94114c367 tick/broadcast: M... |
846 |
static void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
79bf2bb33 [PATCH] tick-mana... |
847 |
{ |
07f4beb0b tick: Clear broad... |
848 |
int cpu = smp_processor_id(); |
c1a9eeb93 tick/broadcast: P... |
849 850 |
if (!bc) return; |
9c17bcda9 clockevents: prev... |
851 852 |
/* Set it up only once ! */ if (bc->event_handler != tick_handle_oneshot_broadcast) { |
472c4a943 clockevents: Use ... |
853 |
int was_periodic = clockevent_state_periodic(bc); |
7300711e8 clockevents: broa... |
854 |
|
9c17bcda9 clockevents: prev... |
855 |
bc->event_handler = tick_handle_oneshot_broadcast; |
7300711e8 clockevents: broa... |
856 |
|
7300711e8 clockevents: broa... |
857 858 859 860 861 862 |
/* * We must be careful here. There might be other CPUs * waiting for periodic broadcast. We need to set the * oneshot_mask bits for those and program the * broadcast device to fire. */ |
b352bc1cb tick: Convert bro... |
863 864 865 866 |
cpumask_copy(tmpmask, tick_broadcast_mask); cpumask_clear_cpu(cpu, tmpmask); cpumask_or(tick_broadcast_oneshot_mask, tick_broadcast_oneshot_mask, tmpmask); |
6b954823c cpumask: convert ... |
867 |
|
b352bc1cb tick: Convert bro... |
868 |
if (was_periodic && !cpumask_empty(tmpmask)) { |
d7eb231c7 clockevents: Prov... |
869 |
clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT); |
b352bc1cb tick: Convert bro... |
870 |
tick_broadcast_init_next_event(tmpmask, |
6b954823c cpumask: convert ... |
871 |
tick_next_period); |
298dbd1c5 tick: broadcast: ... |
872 |
tick_broadcast_set_event(bc, cpu, tick_next_period); |
7300711e8 clockevents: broa... |
873 |
} else |
2456e8553 ktime: Get rid of... |
874 |
bc->next_event = KTIME_MAX; |
07f4beb0b tick: Clear broad... |
875 876 877 878 879 880 881 882 883 |
} else { /* * The first cpu which switches to oneshot mode sets * the bit for all other cpus which are in the general * (periodic) broadcast mask. So the bit is set and * would prevent the first broadcast enter after this * to program the bc device. */ tick_broadcast_clear_oneshot(cpu); |
9c17bcda9 clockevents: prev... |
884 |
} |
79bf2bb33 [PATCH] tick-mana... |
885 886 887 888 889 890 891 892 893 |
} /* * Select oneshot operating mode for the broadcast device */ void tick_broadcast_switch_to_oneshot(void) { struct clock_event_device *bc; unsigned long flags; |
b5f91da0a clockevents: Conv... |
894 |
raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
fa4da365b clockevents: tTac... |
895 896 |
tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; |
79bf2bb33 [PATCH] tick-mana... |
897 898 899 |
bc = tick_broadcast_device.evtdev; if (bc) tick_broadcast_setup_oneshot(bc); |
77b0d60c5 clockevents: Leav... |
900 |
|
b5f91da0a clockevents: Conv... |
901 |
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
79bf2bb33 [PATCH] tick-mana... |
902 |
} |
a49b116dc clockevents: Clea... |
903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 |
#ifdef CONFIG_HOTPLUG_CPU void hotplug_cpu__broadcast_tick_pull(int deadcpu) { struct clock_event_device *bc; unsigned long flags; raw_spin_lock_irqsave(&tick_broadcast_lock, flags); bc = tick_broadcast_device.evtdev; if (bc && broadcast_needs_cpu(bc, deadcpu)) { /* This moves the broadcast assignment to this CPU: */ clockevents_program_event(bc, bc->next_event, 1); } raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); } |
79bf2bb33 [PATCH] tick-mana... |
918 919 |
/* |
1b72d4323 tick: Remove outg... |
920 |
* Remove a dying CPU from broadcasting |
79bf2bb33 [PATCH] tick-mana... |
921 |
*/ |
1b72d4323 tick: Remove outg... |
922 |
static void tick_broadcast_oneshot_offline(unsigned int cpu) |
79bf2bb33 [PATCH] tick-mana... |
923 |
{ |
31d9b3938 clockevents: do n... |
924 |
/* |
c9b5a266b tick: Make onesho... |
925 926 |
* Clear the broadcast masks for the dead cpu, but do not stop * the broadcast device! |
31d9b3938 clockevents: do n... |
927 |
*/ |
b352bc1cb tick: Convert bro... |
928 |
cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); |
c9b5a266b tick: Make onesho... |
929 930 |
cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); cpumask_clear_cpu(cpu, tick_broadcast_force_mask); |
79bf2bb33 [PATCH] tick-mana... |
931 |
} |
a49b116dc clockevents: Clea... |
932 |
#endif |
79bf2bb33 [PATCH] tick-mana... |
933 |
|
27ce4cb4a clockevents: prev... |
934 935 936 937 938 939 940 |
/* * Check, whether the broadcast device is in one shot mode */ int tick_broadcast_oneshot_active(void) { return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; } |
3a142a067 clockevents: Prev... |
941 942 943 944 945 946 947 948 949 |
/* * Check whether the broadcast device supports oneshot. */ bool tick_broadcast_oneshot_available(void) { struct clock_event_device *bc = tick_broadcast_device.evtdev; return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false; } |
f32dd1170 tick/broadcast: M... |
950 951 952 953 954 955 956 957 958 959 |
#else int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) { struct clock_event_device *bc = tick_broadcast_device.evtdev; if (!bc || (bc->features & CLOCK_EVT_FEAT_HRTIMER)) return -EBUSY; return 0; } |
79bf2bb33 [PATCH] tick-mana... |
960 |
#endif |
b352bc1cb tick: Convert bro... |
961 962 963 |
void __init tick_broadcast_init(void) { |
fbd44a607 tick: Use zalloc_... |
964 |
zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT); |
07bd11729 tick: Sanitize br... |
965 |
zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT); |
fbd44a607 tick: Use zalloc_... |
966 |
zalloc_cpumask_var(&tmpmask, GFP_NOWAIT); |
b352bc1cb tick: Convert bro... |
967 |
#ifdef CONFIG_TICK_ONESHOT |
fbd44a607 tick: Use zalloc_... |
968 969 970 |
zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT); zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT); zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT); |
b352bc1cb tick: Convert bro... |
971 972 |
#endif } |