Blame view
kernel/watchdog_hld.c
7.68 KB
b24413180 License cleanup: ... |
1 |
// SPDX-License-Identifier: GPL-2.0 |
73ce0511c kernel/watchdog.c... |
2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
/* * Detect hard lockups on a system * * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. * * Note: Most of this code is borrowed heavily from the original softlockup * detector, so thanks to Ingo for the initial implementation. * Some chunks also taken from the old x86-specific nmi watchdog code, thanks * to those contributors as well. */ #define pr_fmt(fmt) "NMI watchdog: " fmt #include <linux/nmi.h> |
42f930da7 watchdog/hardlock... |
16 |
#include <linux/atomic.h> |
73ce0511c kernel/watchdog.c... |
17 |
#include <linux/module.h> |
b17b01533 sched/headers: Pr... |
18 |
#include <linux/sched/debug.h> |
73ce0511c kernel/watchdog.c... |
19 20 21 22 23 24 |
#include <asm/irq_regs.h> #include <linux/perf_event.h> static DEFINE_PER_CPU(bool, hard_watchdog_warn); static DEFINE_PER_CPU(bool, watchdog_nmi_touch); static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); |
9c388a5ed watchdog/harclock... |
25 |
static DEFINE_PER_CPU(struct perf_event *, dead_event); |
941154bd6 watchdog/hardlock... |
26 |
static struct cpumask dead_events_mask; |
73ce0511c kernel/watchdog.c... |
27 |
|
73ce0511c kernel/watchdog.c... |
28 |
static unsigned long hardlockup_allcpu_dumped; |
42f930da7 watchdog/hardlock... |
29 |
static atomic_t watchdog_cpus = ATOMIC_INIT(0); |
73ce0511c kernel/watchdog.c... |
30 |
|
cb9d7fd51 watchdog: Mark wa... |
31 |
notrace void arch_touch_nmi_watchdog(void) |
73ce0511c kernel/watchdog.c... |
32 33 34 35 36 37 38 39 40 |
{ /* * Using __raw here because some code paths have * preemption enabled. If preemption is enabled * then interrupts should be enabled too, in which * case we shouldn't have to worry about the watchdog * going off. */ raw_cpu_write(watchdog_nmi_touch, true); |
73ce0511c kernel/watchdog.c... |
41 |
} |
f2e0cff85 kernel/watchdog: ... |
42 |
EXPORT_SYMBOL(arch_touch_nmi_watchdog); |
73ce0511c kernel/watchdog.c... |
43 |
|
7edaeb684 kernel/watchdog: ... |
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
#ifdef CONFIG_HARDLOCKUP_CHECK_TIMESTAMP static DEFINE_PER_CPU(ktime_t, last_timestamp); static DEFINE_PER_CPU(unsigned int, nmi_rearmed); static ktime_t watchdog_hrtimer_sample_threshold __read_mostly; void watchdog_update_hrtimer_threshold(u64 period) { /* * The hrtimer runs with a period of (watchdog_threshold * 2) / 5 * * So it runs effectively with 2.5 times the rate of the NMI * watchdog. That means the hrtimer should fire 2-3 times before * the NMI watchdog expires. The NMI watchdog on x86 is based on * unhalted CPU cycles, so if Turbo-Mode is enabled the CPU cycles * might run way faster than expected and the NMI fires in a * smaller period than the one deduced from the nominal CPU * frequency. Depending on the Turbo-Mode factor this might be fast * enough to get the NMI period smaller than the hrtimer watchdog * period and trigger false positives. * * The sample threshold is used to check in the NMI handler whether * the minimum time between two NMI samples has elapsed. That * prevents false positives. * * Set this to 4/5 of the actual watchdog threshold period so the * hrtimer is guaranteed to fire at least once within the real * watchdog threshold. */ watchdog_hrtimer_sample_threshold = period * 2; } static bool watchdog_check_timestamp(void) { ktime_t delta, now = ktime_get_mono_fast_ns(); delta = now - __this_cpu_read(last_timestamp); if (delta < watchdog_hrtimer_sample_threshold) { /* * If ktime is jiffies based, a stalled timer would prevent * jiffies from being incremented and the filter would look * at a stale timestamp and never trigger. */ if (__this_cpu_inc_return(nmi_rearmed) < 10) return false; } __this_cpu_write(nmi_rearmed, 0); __this_cpu_write(last_timestamp, now); return true; } #else static inline bool watchdog_check_timestamp(void) { return true; } #endif |
73ce0511c kernel/watchdog.c... |
99 100 101 102 103 104 105 106 107 108 |
static struct perf_event_attr wd_hw_attr = { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES, .size = sizeof(struct perf_event_attr), .pinned = 1, .disabled = 1, }; /* Callback function for perf event subsystem */ static void watchdog_overflow_callback(struct perf_event *event, |
01f0a0270 watchdog/core: Re... |
109 110 |
struct perf_sample_data *data, struct pt_regs *regs) |
73ce0511c kernel/watchdog.c... |
111 112 113 114 115 116 117 118 |
{ /* Ensure the watchdog never gets throttled */ event->hw.interrupts = 0; if (__this_cpu_read(watchdog_nmi_touch) == true) { __this_cpu_write(watchdog_nmi_touch, false); return; } |
7edaeb684 kernel/watchdog: ... |
119 120 |
if (!watchdog_check_timestamp()) return; |
73ce0511c kernel/watchdog.c... |
121 122 123 124 125 126 127 128 129 130 131 132 |
/* check for a hardlockup * This is done by making sure our timer interrupt * is incrementing. The timer interrupt should have * fired multiple times before we overflow'd. If it hasn't * then this is a good indication the cpu is stuck */ if (is_hardlockup()) { int this_cpu = smp_processor_id(); /* only print hardlockups once */ if (__this_cpu_read(hard_watchdog_warn) == true) return; |
8f4a8c12c kernel/watchdog_h... |
133 134 135 |
pr_emerg("Watchdog detected hard LOCKUP on cpu %d ", this_cpu); |
73ce0511c kernel/watchdog.c... |
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 |
print_modules(); print_irqtrace_events(current); if (regs) show_regs(regs); else dump_stack(); /* * Perform all-CPU dump only once to avoid multiple hardlockups * generating interleaving traces */ if (sysctl_hardlockup_all_cpu_backtrace && !test_and_set_bit(0, &hardlockup_allcpu_dumped)) trigger_allbutself_cpu_backtrace(); if (hardlockup_panic) nmi_panic(regs, "Hard LOCKUP"); __this_cpu_write(hard_watchdog_warn, true); return; } __this_cpu_write(hard_watchdog_warn, false); return; } |
178b9f7a3 watchdog/hardlock... |
161 162 163 164 165 166 167 168 169 170 171 172 173 |
static int hardlockup_detector_event_create(void) { unsigned int cpu = smp_processor_id(); struct perf_event_attr *wd_attr; struct perf_event *evt; wd_attr = &wd_hw_attr; wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); /* Try to register using hardware perf events */ evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); if (IS_ERR(evt)) { |
1b6266ebe watchdog: Reduce ... |
174 175 176 |
pr_debug("Perf event create on CPU %d failed with %ld ", cpu, PTR_ERR(evt)); |
178b9f7a3 watchdog/hardlock... |
177 178 179 180 181 |
return PTR_ERR(evt); } this_cpu_write(watchdog_ev, evt); return 0; } |
941154bd6 watchdog/hardlock... |
182 |
/** |
2a1b8ee4f watchdog/hardlock... |
183 184 185 186 187 188 |
* hardlockup_detector_perf_enable - Enable the local event */ void hardlockup_detector_perf_enable(void) { if (hardlockup_detector_event_create()) return; |
42f930da7 watchdog/hardlock... |
189 190 |
/* use original value for check */ if (!atomic_fetch_inc(&watchdog_cpus)) |
146c9d0e9 watchdog/hardlock... |
191 192 |
pr_info("Enabled. Permanently consumes one hw-PMU counter. "); |
2a1b8ee4f watchdog/hardlock... |
193 194 195 196 |
perf_event_enable(this_cpu_read(watchdog_ev)); } /** |
941154bd6 watchdog/hardlock... |
197 198 199 |
* hardlockup_detector_perf_disable - Disable the local event */ void hardlockup_detector_perf_disable(void) |
73ce0511c kernel/watchdog.c... |
200 |
{ |
941154bd6 watchdog/hardlock... |
201 |
struct perf_event *event = this_cpu_read(watchdog_ev); |
73ce0511c kernel/watchdog.c... |
202 203 204 |
if (event) { perf_event_disable(event); |
9c388a5ed watchdog/harclock... |
205 206 |
this_cpu_write(watchdog_ev, NULL); this_cpu_write(dead_event, event); |
941154bd6 watchdog/hardlock... |
207 |
cpumask_set_cpu(smp_processor_id(), &dead_events_mask); |
42f930da7 watchdog/hardlock... |
208 |
atomic_dec(&watchdog_cpus); |
73ce0511c kernel/watchdog.c... |
209 210 |
} } |
d0b6e0a8e watchdog/hardlock... |
211 212 |
/** |
941154bd6 watchdog/hardlock... |
213 214 215 216 217 218 219 220 221 |
* hardlockup_detector_perf_cleanup - Cleanup disabled events and destroy them * * Called from lockup_detector_cleanup(). Serialized by the caller. */ void hardlockup_detector_perf_cleanup(void) { int cpu; for_each_cpu(cpu, &dead_events_mask) { |
9c388a5ed watchdog/harclock... |
222 |
struct perf_event *event = per_cpu(dead_event, cpu); |
941154bd6 watchdog/hardlock... |
223 |
|
115ef3b7e watchdog/hardlock... |
224 225 226 227 228 229 |
/* * Required because for_each_cpu() reports unconditionally * CPU0 as set on UP kernels. Sigh. */ if (event) perf_event_release_kernel(event); |
9c388a5ed watchdog/harclock... |
230 |
per_cpu(dead_event, cpu) = NULL; |
941154bd6 watchdog/hardlock... |
231 232 233 234 235 |
} cpumask_clear(&dead_events_mask); } /** |
d0b6e0a8e watchdog/hardlock... |
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 |
* hardlockup_detector_perf_stop - Globally stop watchdog events * * Special interface for x86 to handle the perf HT bug. */ void __init hardlockup_detector_perf_stop(void) { int cpu; lockdep_assert_cpus_held(); for_each_online_cpu(cpu) { struct perf_event *event = per_cpu(watchdog_ev, cpu); if (event) perf_event_disable(event); } } /** * hardlockup_detector_perf_restart - Globally restart watchdog events * * Special interface for x86 to handle the perf HT bug. */ void __init hardlockup_detector_perf_restart(void) { int cpu; lockdep_assert_cpus_held(); if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) return; for_each_online_cpu(cpu) { struct perf_event *event = per_cpu(watchdog_ev, cpu); if (event) perf_event_enable(event); } } |
178b9f7a3 watchdog/hardlock... |
275 276 277 278 279 280 281 282 283 |
/** * hardlockup_detector_perf_init - Probe whether NMI event is available at all */ int __init hardlockup_detector_perf_init(void) { int ret = hardlockup_detector_event_create(); if (ret) { |
77c01d11b watchdog/hardlock... |
284 285 |
pr_info("Perf NMI watchdog permanently disabled "); |
178b9f7a3 watchdog/hardlock... |
286 287 288 289 290 291 |
} else { perf_event_release_kernel(this_cpu_read(watchdog_ev)); this_cpu_write(watchdog_ev, NULL); } return ret; } |