Blame view
kernel/watchdog.c
20.1 KB
b24413180 License cleanup: ... |
1 |
// SPDX-License-Identifier: GPL-2.0 |
58687acba lockup_detector: ... |
2 3 4 5 6 |
/* * Detect hard and soft lockups on a system * * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. * |
86f5e6a7b watchdog: Fix cod... |
7 8 9 |
* Note: Most of this code is borrowed heavily from the original softlockup * detector, so thanks to Ingo for the initial implementation. * Some chunks also taken from the old x86-specific nmi watchdog code, thanks |
58687acba lockup_detector: ... |
10 11 |
* to those contributors as well. */ |
5f92a7b0f kernel/watchdog.c... |
12 |
#define pr_fmt(fmt) "watchdog: " fmt |
4501980aa kernel/watchdog.c... |
13 |
|
58687acba lockup_detector: ... |
14 15 16 17 |
#include <linux/mm.h> #include <linux/cpu.h> #include <linux/nmi.h> #include <linux/init.h> |
58687acba lockup_detector: ... |
18 19 |
#include <linux/module.h> #include <linux/sysctl.h> |
fe4ba3c34 watchdog: add wat... |
20 |
#include <linux/tick.h> |
e60175710 sched/headers: Pr... |
21 |
#include <linux/sched/clock.h> |
b17b01533 sched/headers: Pr... |
22 |
#include <linux/sched/debug.h> |
786340614 sched/isolation: ... |
23 |
#include <linux/sched/isolation.h> |
9cf57731b watchdog/softlock... |
24 |
#include <linux/stop_machine.h> |
58687acba lockup_detector: ... |
25 26 |
#include <asm/irq_regs.h> |
5d1c0f4a8 watchdog: add che... |
27 |
#include <linux/kvm_para.h> |
58687acba lockup_detector: ... |
28 |
|
946d19779 watchdog/core: Re... |
29 |
static DEFINE_MUTEX(watchdog_mutex); |
ab992dc38 watchdog: Fix mer... |
30 |
|
05a4a9527 kernel/watchdog: ... |
31 |
#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG) |
091549858 watchdog/core: Ge... |
32 33 |
# define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED) # define NMI_WATCHDOG_DEFAULT 1 |
84d56e66b watchdog: new def... |
34 |
#else |
091549858 watchdog/core: Ge... |
35 36 |
# define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED) # define NMI_WATCHDOG_DEFAULT 0 |
84d56e66b watchdog: new def... |
37 |
#endif |
05a4a9527 kernel/watchdog: ... |
38 |
|
091549858 watchdog/core: Ge... |
39 40 41 42 |
unsigned long __read_mostly watchdog_enabled; int __read_mostly watchdog_user_enabled = 1; int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT; int __read_mostly soft_watchdog_user_enabled = 1; |
7feeb9cd4 watchdog/sysctl: ... |
43 |
int __read_mostly watchdog_thresh = 10; |
48084abf2 watchdog/core: Ma... |
44 |
static int __read_mostly nmi_watchdog_available; |
7feeb9cd4 watchdog/sysctl: ... |
45 |
|
48084abf2 watchdog/core: Ma... |
46 |
static struct cpumask watchdog_allowed_mask __read_mostly; |
7feeb9cd4 watchdog/sysctl: ... |
47 48 49 |
struct cpumask watchdog_cpumask __read_mostly; unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask); |
05a4a9527 kernel/watchdog: ... |
50 |
#ifdef CONFIG_HARDLOCKUP_DETECTOR |
f117955a2 kernel/watchdog.c... |
51 52 53 54 |
# ifdef CONFIG_SMP int __read_mostly sysctl_hardlockup_all_cpu_backtrace; # endif /* CONFIG_SMP */ |
05a4a9527 kernel/watchdog: ... |
55 56 57 58 59 60 61 62 63 64 65 66 67 |
/* * Should we panic when a soft-lockup or hard-lockup occurs: */ unsigned int __read_mostly hardlockup_panic = CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; /* * We may not want to enable hard lockup detection by default in all cases, * for example when running the kernel as a guest on a hypervisor. In these * cases this function can be called to disable hard lockup detection. This * function should only be executed once by the boot processor before the * kernel command line parameters are parsed, because otherwise it is not * possible to override this in hardlockup_panic_setup(). */ |
7a3558200 watchdog/core: Ma... |
68 |
void __init hardlockup_detector_disable(void) |
05a4a9527 kernel/watchdog: ... |
69 |
{ |
091549858 watchdog/core: Ge... |
70 |
nmi_watchdog_user_enabled = 0; |
05a4a9527 kernel/watchdog: ... |
71 72 73 74 75 76 77 78 79 |
} static int __init hardlockup_panic_setup(char *str) { if (!strncmp(str, "panic", 5)) hardlockup_panic = 1; else if (!strncmp(str, "nopanic", 7)) hardlockup_panic = 0; else if (!strncmp(str, "0", 1)) |
091549858 watchdog/core: Ge... |
80 |
nmi_watchdog_user_enabled = 0; |
05a4a9527 kernel/watchdog: ... |
81 |
else if (!strncmp(str, "1", 1)) |
091549858 watchdog/core: Ge... |
82 |
nmi_watchdog_user_enabled = 1; |
05a4a9527 kernel/watchdog: ... |
83 84 85 |
return 1; } __setup("nmi_watchdog=", hardlockup_panic_setup); |
368a7e2ce watchdog/core: Cl... |
86 |
#endif /* CONFIG_HARDLOCKUP_DETECTOR */ |
05a4a9527 kernel/watchdog: ... |
87 |
|
ec6a90661 watchdog: rename ... |
88 |
/* |
05a4a9527 kernel/watchdog: ... |
89 90 |
* These functions can be overridden if an architecture implements its * own hardlockup detector. |
a10a842ff kernel/watchdog: ... |
91 92 93 94 |
* * watchdog_nmi_enable/disable can be implemented to start and stop when * softlockup watchdog threads start and stop. The arch must select the * SOFTLOCKUP_DETECTOR Kconfig. |
05a4a9527 kernel/watchdog: ... |
95 96 97 |
*/ int __weak watchdog_nmi_enable(unsigned int cpu) { |
146c9d0e9 watchdog/hardlock... |
98 |
hardlockup_detector_perf_enable(); |
05a4a9527 kernel/watchdog: ... |
99 100 |
return 0; } |
941154bd6 watchdog/hardlock... |
101 |
|
05a4a9527 kernel/watchdog: ... |
102 103 |
void __weak watchdog_nmi_disable(unsigned int cpu) { |
941154bd6 watchdog/hardlock... |
104 |
hardlockup_detector_perf_disable(); |
05a4a9527 kernel/watchdog: ... |
105 |
} |
a994a3147 watchdog/hardlock... |
106 107 108 109 110 |
/* Return 0, if a NMI watchdog is available. Error code otherwise */ int __weak __init watchdog_nmi_probe(void) { return hardlockup_detector_perf_init(); } |
6592ad2fc watchdog/core, po... |
111 |
/** |
6b9dc4806 watchdog/core, po... |
112 |
* watchdog_nmi_stop - Stop the watchdog for reconfiguration |
6592ad2fc watchdog/core, po... |
113 |
* |
6b9dc4806 watchdog/core, po... |
114 115 |
* The reconfiguration steps are: * watchdog_nmi_stop(); |
6592ad2fc watchdog/core, po... |
116 |
* update_variables(); |
6b9dc4806 watchdog/core, po... |
117 118 119 120 121 122 |
* watchdog_nmi_start(); */ void __weak watchdog_nmi_stop(void) { } /** * watchdog_nmi_start - Start the watchdog after reconfiguration |
6592ad2fc watchdog/core, po... |
123 |
* |
6b9dc4806 watchdog/core, po... |
124 125 126 127 |
* Counterpart to watchdog_nmi_stop(). * * The following variables have been updated in update_variables() and * contain the currently valid configuration: |
7feeb9cd4 watchdog/sysctl: ... |
128 |
* - watchdog_enabled |
a10a842ff kernel/watchdog: ... |
129 130 |
* - watchdog_thresh * - watchdog_cpumask |
a10a842ff kernel/watchdog: ... |
131 |
*/ |
6b9dc4806 watchdog/core, po... |
132 |
void __weak watchdog_nmi_start(void) { } |
a10a842ff kernel/watchdog: ... |
133 |
|
091549858 watchdog/core: Ge... |
134 135 136 137 138 139 140 141 142 143 144 |
/** * lockup_detector_update_enable - Update the sysctl enable bit * * Caller needs to make sure that the NMI/perf watchdogs are off, so this * can't race with watchdog_nmi_disable(). */ static void lockup_detector_update_enable(void) { watchdog_enabled = 0; if (!watchdog_user_enabled) return; |
a994a3147 watchdog/hardlock... |
145 |
if (nmi_watchdog_available && nmi_watchdog_user_enabled) |
091549858 watchdog/core: Ge... |
146 147 148 149 |
watchdog_enabled |= NMI_WATCHDOG_ENABLED; if (soft_watchdog_user_enabled) watchdog_enabled |= SOFT_WATCHDOG_ENABLED; } |
05a4a9527 kernel/watchdog: ... |
150 |
#ifdef CONFIG_SOFTLOCKUP_DETECTOR |
11e31f608 watchdog/softlock... |
151 |
#define SOFTLOCKUP_RESET ULONG_MAX |
f117955a2 kernel/watchdog.c... |
152 153 154 |
#ifdef CONFIG_SMP int __read_mostly sysctl_softlockup_all_cpu_backtrace; #endif |
2b9d7f233 watchdog/core: Cl... |
155 156 157 |
/* Global variables, exported for sysctl */ unsigned int __read_mostly softlockup_panic = CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; |
2eb2527f8 watchdog/core: Cr... |
158 |
|
9cf57731b watchdog/softlock... |
159 |
static bool softlockup_initialized __read_mostly; |
0f34c4009 watchdog: store t... |
160 |
static u64 __read_mostly sample_period; |
58687acba lockup_detector: ... |
161 162 |
static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); |
58687acba lockup_detector: ... |
163 164 |
static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); static DEFINE_PER_CPU(bool, softlockup_touch_sync); |
58687acba lockup_detector: ... |
165 |
static DEFINE_PER_CPU(bool, soft_watchdog_warn); |
bcd951cf1 watchdog: Use hot... |
166 |
static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); |
58687acba lockup_detector: ... |
167 |
static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); |
ed235875e kernel/watchdog.c... |
168 |
static unsigned long soft_lockup_nmi_warn; |
58687acba lockup_detector: ... |
169 |
|
58687acba lockup_detector: ... |
170 171 |
static int __init nowatchdog_setup(char *str) { |
091549858 watchdog/core: Ge... |
172 |
watchdog_user_enabled = 0; |
58687acba lockup_detector: ... |
173 174 175 |
return 1; } __setup("nowatchdog", nowatchdog_setup); |
58687acba lockup_detector: ... |
176 177 |
static int __init nosoftlockup_setup(char *str) { |
091549858 watchdog/core: Ge... |
178 |
soft_watchdog_user_enabled = 0; |
58687acba lockup_detector: ... |
179 180 181 |
return 1; } __setup("nosoftlockup", nosoftlockup_setup); |
195daf665 watchdog: enable ... |
182 |
|
112950555 watchdog/core: Ad... |
183 184 185 186 187 188 |
static int __init watchdog_thresh_setup(char *str) { get_option(&str, &watchdog_thresh); return 1; } __setup("watchdog_thresh=", watchdog_thresh_setup); |
941154bd6 watchdog/hardlock... |
189 |
static void __lockup_detector_cleanup(void); |
4eec42f39 watchdog: Change ... |
190 191 192 193 194 195 196 |
/* * Hard-lockup warnings should be triggered after just a few seconds. Soft- * lockups can have false positives under extreme conditions. So we generally * want a higher threshold for soft lockups than for hard lockups. So we couple * the thresholds with a factor: we make the soft threshold twice the amount of * time the hard threshold is. */ |
6e9101aee watchdog: Fix non... |
197 |
static int get_softlockup_thresh(void) |
4eec42f39 watchdog: Change ... |
198 199 200 |
{ return watchdog_thresh * 2; } |
58687acba lockup_detector: ... |
201 202 203 204 205 206 |
/* * Returns seconds, approximately. We don't need nanosecond * resolution, and we don't need to waste time with a big divide when * 2^30ns == 1.074s. */ |
c06b4f194 watchdog: Use loc... |
207 |
static unsigned long get_timestamp(void) |
58687acba lockup_detector: ... |
208 |
{ |
545a2bf74 kernel/sched/cloc... |
209 |
return running_clock() >> 30LL; /* 2^30 ~= 10^9 */ |
58687acba lockup_detector: ... |
210 |
} |
0f34c4009 watchdog: store t... |
211 |
static void set_sample_period(void) |
58687acba lockup_detector: ... |
212 213 |
{ /* |
586692a5a watchdog: Disable... |
214 |
* convert watchdog_thresh from seconds to ns |
86f5e6a7b watchdog: Fix cod... |
215 216 217 218 |
* the divide by 5 is to give hrtimer several chances (two * or three with the current relation between the soft * and hard thresholds) to increment before the * hardlockup detector generates a warning |
58687acba lockup_detector: ... |
219 |
*/ |
0f34c4009 watchdog: store t... |
220 |
sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5); |
7edaeb684 kernel/watchdog: ... |
221 |
watchdog_update_hrtimer_threshold(sample_period); |
58687acba lockup_detector: ... |
222 223 224 225 226 |
} /* Commands for resetting the watchdog */ static void __touch_watchdog(void) { |
c06b4f194 watchdog: Use loc... |
227 |
__this_cpu_write(watchdog_touch_ts, get_timestamp()); |
58687acba lockup_detector: ... |
228 |
} |
03e0d4610 watchdog: introdu... |
229 230 231 232 233 234 235 236 |
/** * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls * * Call when the scheduler may have stalled for legitimate reasons * preventing the watchdog task from executing - e.g. the scheduler * entering idle state. This should only be used for scheduler events. * Use touch_softlockup_watchdog() for everything else. */ |
cb9d7fd51 watchdog: Mark wa... |
237 |
notrace void touch_softlockup_watchdog_sched(void) |
58687acba lockup_detector: ... |
238 |
{ |
7861144b8 kernel/watchdog.c... |
239 240 241 242 |
/* * Preemption can be enabled. It doesn't matter which CPU's timestamp * gets zeroed here, so use the raw_ operation. */ |
11e31f608 watchdog/softlock... |
243 |
raw_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET); |
58687acba lockup_detector: ... |
244 |
} |
03e0d4610 watchdog: introdu... |
245 |
|
cb9d7fd51 watchdog: Mark wa... |
246 |
notrace void touch_softlockup_watchdog(void) |
03e0d4610 watchdog: introdu... |
247 248 |
{ touch_softlockup_watchdog_sched(); |
82607adcf workqueue: implem... |
249 |
wq_watchdog_touch(raw_smp_processor_id()); |
03e0d4610 watchdog: introdu... |
250 |
} |
0167c7819 watchdog: Export ... |
251 |
EXPORT_SYMBOL(touch_softlockup_watchdog); |
58687acba lockup_detector: ... |
252 |
|
332fbdbca lockup_detector: ... |
253 |
void touch_all_softlockup_watchdogs(void) |
58687acba lockup_detector: ... |
254 255 256 257 |
{ int cpu; /* |
d57108d4f watchdog/core: Ge... |
258 259 260 261 262 263 264 |
* watchdog_mutex cannpt be taken here, as this might be called * from (soft)interrupt context, so the access to * watchdog_allowed_cpumask might race with a concurrent update. * * The watchdog time stamp can race against a concurrent real * update as well, the only side effect might be a cycle delay for * the softlockup check. |
58687acba lockup_detector: ... |
265 |
*/ |
d57108d4f watchdog/core: Ge... |
266 |
for_each_cpu(cpu, &watchdog_allowed_mask) |
11e31f608 watchdog/softlock... |
267 |
per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET; |
82607adcf workqueue: implem... |
268 |
wq_watchdog_touch(-1); |
58687acba lockup_detector: ... |
269 |
} |
58687acba lockup_detector: ... |
270 271 |
void touch_softlockup_watchdog_sync(void) { |
f7f66b05a watchdog: Replace... |
272 |
__this_cpu_write(softlockup_touch_sync, true); |
11e31f608 watchdog/softlock... |
273 |
__this_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET); |
58687acba lockup_detector: ... |
274 |
} |
26e09c6ee lockup_detector: ... |
275 |
static int is_softlockup(unsigned long touch_ts) |
58687acba lockup_detector: ... |
276 |
{ |
c06b4f194 watchdog: Use loc... |
277 |
unsigned long now = get_timestamp(); |
58687acba lockup_detector: ... |
278 |
|
39d2da216 kernel/watchdog.c... |
279 |
if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){ |
195daf665 watchdog: enable ... |
280 281 282 283 |
/* Warn about unreasonable delays. */ if (time_after(now, touch_ts + get_softlockup_thresh())) return now - touch_ts; } |
58687acba lockup_detector: ... |
284 285 |
return 0; } |
05a4a9527 kernel/watchdog: ... |
286 287 |
/* watchdog detector functions */ bool is_hardlockup(void) |
58687acba lockup_detector: ... |
288 |
{ |
05a4a9527 kernel/watchdog: ... |
289 |
unsigned long hrint = __this_cpu_read(hrtimer_interrupts); |
bcd951cf1 watchdog: Use hot... |
290 |
|
05a4a9527 kernel/watchdog: ... |
291 292 293 294 295 |
if (__this_cpu_read(hrtimer_interrupts_saved) == hrint) return true; __this_cpu_write(hrtimer_interrupts_saved, hrint); return false; |
73ce0511c kernel/watchdog.c... |
296 |
} |
05a4a9527 kernel/watchdog: ... |
297 298 |
static void watchdog_interrupt_count(void) |
73ce0511c kernel/watchdog.c... |
299 |
{ |
05a4a9527 kernel/watchdog: ... |
300 |
__this_cpu_inc(hrtimer_interrupts); |
73ce0511c kernel/watchdog.c... |
301 |
} |
58687acba lockup_detector: ... |
302 |
|
be45bf539 watchdog/softlock... |
303 304 |
static DEFINE_PER_CPU(struct completion, softlockup_completion); static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work); |
9cf57731b watchdog/softlock... |
305 306 307 308 309 310 311 312 313 314 |
/* * The watchdog thread function - touches the timestamp. * * It only runs once every sample_period seconds (4 seconds by * default) to reset the softlockup timestamp. If this gets delayed * for more than 2*watchdog_thresh seconds then the debug-printout * triggers in watchdog_timer_fn(). */ static int softlockup_fn(void *data) { |
9cf57731b watchdog/softlock... |
315 |
__touch_watchdog(); |
be45bf539 watchdog/softlock... |
316 |
complete(this_cpu_ptr(&softlockup_completion)); |
9cf57731b watchdog/softlock... |
317 318 319 |
return 0; } |
58687acba lockup_detector: ... |
320 321 322 |
/* watchdog kicker functions */ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) { |
909ea9646 core: Replace __g... |
323 |
unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts); |
58687acba lockup_detector: ... |
324 325 |
struct pt_regs *regs = get_irq_regs(); int duration; |
ed235875e kernel/watchdog.c... |
326 |
int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace; |
58687acba lockup_detector: ... |
327 |
|
01f0a0270 watchdog/core: Re... |
328 |
if (!watchdog_enabled) |
b94f51183 kernel/watchdog: ... |
329 |
return HRTIMER_NORESTART; |
58687acba lockup_detector: ... |
330 331 332 333 |
/* kick the hardlockup detector */ watchdog_interrupt_count(); /* kick the softlockup detector */ |
be45bf539 watchdog/softlock... |
334 335 336 337 338 339 |
if (completion_done(this_cpu_ptr(&softlockup_completion))) { reinit_completion(this_cpu_ptr(&softlockup_completion)); stop_one_cpu_nowait(smp_processor_id(), softlockup_fn, NULL, this_cpu_ptr(&softlockup_stop_work)); } |
58687acba lockup_detector: ... |
340 341 |
/* .. and repeat */ |
0f34c4009 watchdog: store t... |
342 |
hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period)); |
58687acba lockup_detector: ... |
343 |
|
11e31f608 watchdog/softlock... |
344 |
if (touch_ts == SOFTLOCKUP_RESET) { |
909ea9646 core: Replace __g... |
345 |
if (unlikely(__this_cpu_read(softlockup_touch_sync))) { |
58687acba lockup_detector: ... |
346 347 348 349 |
/* * If the time stamp was touched atomically * make sure the scheduler tick is up to date. */ |
909ea9646 core: Replace __g... |
350 |
__this_cpu_write(softlockup_touch_sync, false); |
58687acba lockup_detector: ... |
351 352 |
sched_clock_tick(); } |
5d1c0f4a8 watchdog: add che... |
353 354 355 |
/* Clear the guest paused flag on watchdog reset */ kvm_check_and_clear_guest_paused(); |
58687acba lockup_detector: ... |
356 357 358 359 360 361 362 363 364 365 |
__touch_watchdog(); return HRTIMER_RESTART; } /* check for a softlockup * This is done by making sure a high priority task is * being scheduled. The task touches the watchdog to * indicate it is getting cpu time. If it hasn't then * this is a good indication some task is hogging the cpu */ |
26e09c6ee lockup_detector: ... |
366 |
duration = is_softlockup(touch_ts); |
58687acba lockup_detector: ... |
367 |
if (unlikely(duration)) { |
5d1c0f4a8 watchdog: add che... |
368 369 370 371 372 373 374 |
/* * If a virtual machine is stopped by the host it can look to * the watchdog like a soft lockup, check to see if the host * stopped the vm before we issue the warning */ if (kvm_check_and_clear_guest_paused()) return HRTIMER_RESTART; |
58687acba lockup_detector: ... |
375 |
/* only warn once */ |
3a51449b7 watchdog/softlock... |
376 |
if (__this_cpu_read(soft_watchdog_warn) == true) |
58687acba lockup_detector: ... |
377 |
return HRTIMER_RESTART; |
ed235875e kernel/watchdog.c... |
378 379 380 381 382 383 384 385 386 387 |
if (softlockup_all_cpu_backtrace) { /* Prevent multiple soft-lockup reports if one cpu is already * engaged in dumping cpu back traces */ if (test_and_set_bit(0, &soft_lockup_nmi_warn)) { /* Someone else will report us. Let's give up */ __this_cpu_write(soft_watchdog_warn, true); return HRTIMER_RESTART; } } |
656c3b79f kernel/watchdog.c... |
388 389 |
pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d] ", |
26e09c6ee lockup_detector: ... |
390 |
smp_processor_id(), duration, |
58687acba lockup_detector: ... |
391 392 393 394 395 396 397 |
current->comm, task_pid_nr(current)); print_modules(); print_irqtrace_events(current); if (regs) show_regs(regs); else dump_stack(); |
ed235875e kernel/watchdog.c... |
398 399 400 401 402 403 404 405 406 407 |
if (softlockup_all_cpu_backtrace) { /* Avoid generating two back traces for current * given that one is already made above */ trigger_allbutself_cpu_backtrace(); clear_bit(0, &soft_lockup_nmi_warn); /* Barrier to sync with other cpus */ smp_mb__after_atomic(); } |
69361eef9 panic: add TAINT_... |
408 |
add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); |
58687acba lockup_detector: ... |
409 410 |
if (softlockup_panic) panic("softlockup: hung tasks"); |
909ea9646 core: Replace __g... |
411 |
__this_cpu_write(soft_watchdog_warn, true); |
58687acba lockup_detector: ... |
412 |
} else |
909ea9646 core: Replace __g... |
413 |
__this_cpu_write(soft_watchdog_warn, false); |
58687acba lockup_detector: ... |
414 415 416 |
return HRTIMER_RESTART; } |
bcd951cf1 watchdog: Use hot... |
417 |
static void watchdog_enable(unsigned int cpu) |
58687acba lockup_detector: ... |
418 |
{ |
01f0a0270 watchdog/core: Re... |
419 |
struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer); |
be45bf539 watchdog/softlock... |
420 |
struct completion *done = this_cpu_ptr(&softlockup_completion); |
58687acba lockup_detector: ... |
421 |
|
9cf57731b watchdog/softlock... |
422 |
WARN_ON_ONCE(cpu != smp_processor_id()); |
be45bf539 watchdog/softlock... |
423 424 |
init_completion(done); complete(done); |
01f0a0270 watchdog/core: Re... |
425 426 427 428 |
/* * Start the timer first to prevent the NMI watchdog triggering * before the timer has a chance to fire. */ |
d2ab4cf49 watchdog: Mark wa... |
429 |
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); |
3935e8950 watchdog: Fix dis... |
430 |
hrtimer->function = watchdog_timer_fn; |
01f0a0270 watchdog/core: Re... |
431 |
hrtimer_start(hrtimer, ns_to_ktime(sample_period), |
d2ab4cf49 watchdog: Mark wa... |
432 |
HRTIMER_MODE_REL_PINNED_HARD); |
3935e8950 watchdog: Fix dis... |
433 |
|
01f0a0270 watchdog/core: Re... |
434 435 |
/* Initialize timestamp */ __touch_watchdog(); |
bcd951cf1 watchdog: Use hot... |
436 |
/* Enable the perf event */ |
146c9d0e9 watchdog/hardlock... |
437 438 |
if (watchdog_enabled & NMI_WATCHDOG_ENABLED) watchdog_nmi_enable(cpu); |
bcd951cf1 watchdog: Use hot... |
439 |
} |
58687acba lockup_detector: ... |
440 |
|
bcd951cf1 watchdog: Use hot... |
441 442 |
static void watchdog_disable(unsigned int cpu) { |
01f0a0270 watchdog/core: Re... |
443 |
struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer); |
58687acba lockup_detector: ... |
444 |
|
9cf57731b watchdog/softlock... |
445 |
WARN_ON_ONCE(cpu != smp_processor_id()); |
01f0a0270 watchdog/core: Re... |
446 447 448 449 450 |
/* * Disable the perf event first. That prevents that a large delay * between disabling the timer and disabling the perf event causes * the perf NMI to detect a false positive. */ |
bcd951cf1 watchdog: Use hot... |
451 |
watchdog_nmi_disable(cpu); |
01f0a0270 watchdog/core: Re... |
452 |
hrtimer_cancel(hrtimer); |
be45bf539 watchdog/softlock... |
453 |
wait_for_completion(this_cpu_ptr(&softlockup_completion)); |
58687acba lockup_detector: ... |
454 |
} |
9cf57731b watchdog/softlock... |
455 |
static int softlockup_stop_fn(void *data) |
b8900bc02 watchdog: Registe... |
456 |
{ |
9cf57731b watchdog/softlock... |
457 458 |
watchdog_disable(smp_processor_id()); return 0; |
b8900bc02 watchdog: Registe... |
459 |
} |
9cf57731b watchdog/softlock... |
460 |
static void softlockup_stop_all(void) |
bcd951cf1 watchdog: Use hot... |
461 |
{ |
9cf57731b watchdog/softlock... |
462 463 464 465 466 467 468 469 470 |
int cpu; if (!softlockup_initialized) return; for_each_cpu(cpu, &watchdog_allowed_mask) smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false); cpumask_clear(&watchdog_allowed_mask); |
bcd951cf1 watchdog: Use hot... |
471 |
} |
9cf57731b watchdog/softlock... |
472 |
static int softlockup_start_fn(void *data) |
bcd951cf1 watchdog: Use hot... |
473 |
{ |
9cf57731b watchdog/softlock... |
474 475 |
watchdog_enable(smp_processor_id()); return 0; |
bcd951cf1 watchdog: Use hot... |
476 |
} |
58687acba lockup_detector: ... |
477 |
|
9cf57731b watchdog/softlock... |
478 |
static void softlockup_start_all(void) |
2eb2527f8 watchdog/core: Cr... |
479 |
{ |
9cf57731b watchdog/softlock... |
480 |
int cpu; |
2eb2527f8 watchdog/core: Cr... |
481 |
|
9cf57731b watchdog/softlock... |
482 483 484 |
cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask); for_each_cpu(cpu, &watchdog_allowed_mask) smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false); |
2eb2527f8 watchdog/core: Cr... |
485 |
} |
9cf57731b watchdog/softlock... |
486 |
int lockup_detector_online_cpu(unsigned int cpu) |
2eb2527f8 watchdog/core: Cr... |
487 |
{ |
7dd476171 watchdog: Respect... |
488 489 |
if (cpumask_test_cpu(cpu, &watchdog_allowed_mask)) watchdog_enable(cpu); |
9cf57731b watchdog/softlock... |
490 |
return 0; |
2eb2527f8 watchdog/core: Cr... |
491 |
} |
9cf57731b watchdog/softlock... |
492 |
int lockup_detector_offline_cpu(unsigned int cpu) |
2eb2527f8 watchdog/core: Cr... |
493 |
{ |
7dd476171 watchdog: Respect... |
494 495 |
if (cpumask_test_cpu(cpu, &watchdog_allowed_mask)) watchdog_disable(cpu); |
9cf57731b watchdog/softlock... |
496 |
return 0; |
2eb2527f8 watchdog/core: Cr... |
497 |
} |
5587185dd watchdog/core: Re... |
498 |
static void lockup_detector_reconfigure(void) |
2eb2527f8 watchdog/core: Cr... |
499 |
{ |
e31d6883f watchdog/core, po... |
500 |
cpus_read_lock(); |
6b9dc4806 watchdog/core, po... |
501 |
watchdog_nmi_stop(); |
9cf57731b watchdog/softlock... |
502 503 |
softlockup_stop_all(); |
2eb2527f8 watchdog/core: Cr... |
504 |
set_sample_period(); |
091549858 watchdog/core: Ge... |
505 506 |
lockup_detector_update_enable(); if (watchdog_enabled && watchdog_thresh) |
9cf57731b watchdog/softlock... |
507 |
softlockup_start_all(); |
6b9dc4806 watchdog/core, po... |
508 |
watchdog_nmi_start(); |
e31d6883f watchdog/core, po... |
509 510 511 512 513 514 |
cpus_read_unlock(); /* * Must be called outside the cpus locked section to prevent * recursive locking in the perf code. */ __lockup_detector_cleanup(); |
2eb2527f8 watchdog/core: Cr... |
515 516 517 |
} /* |
5587185dd watchdog/core: Re... |
518 |
* Create the watchdog thread infrastructure and configure the detector(s). |
2eb2527f8 watchdog/core: Cr... |
519 520 |
* * The threads are not unparked as watchdog_allowed_mask is empty. When |
76e155246 watchdog: Fix typ... |
521 |
* the threads are successfully initialized, take the proper locks and |
2eb2527f8 watchdog/core: Cr... |
522 523 |
* unpark the threads in the watchdog_cpumask if the watchdog is enabled. */ |
5587185dd watchdog/core: Re... |
524 |
static __init void lockup_detector_setup(void) |
2eb2527f8 watchdog/core: Cr... |
525 |
{ |
2eb2527f8 watchdog/core: Cr... |
526 527 528 529 |
/* * If sysctl is off and watchdog got disabled on the command line, * nothing to do here. */ |
091549858 watchdog/core: Ge... |
530 |
lockup_detector_update_enable(); |
2eb2527f8 watchdog/core: Cr... |
531 532 533 |
if (!IS_ENABLED(CONFIG_SYSCTL) && !(watchdog_enabled && watchdog_thresh)) return; |
2eb2527f8 watchdog/core: Cr... |
534 |
mutex_lock(&watchdog_mutex); |
5587185dd watchdog/core: Re... |
535 |
lockup_detector_reconfigure(); |
9cf57731b watchdog/softlock... |
536 |
softlockup_initialized = true; |
2eb2527f8 watchdog/core: Cr... |
537 538 |
mutex_unlock(&watchdog_mutex); } |
2b9d7f233 watchdog/core: Cl... |
539 |
#else /* CONFIG_SOFTLOCKUP_DETECTOR */ |
5587185dd watchdog/core: Re... |
540 |
static void lockup_detector_reconfigure(void) |
6592ad2fc watchdog/core, po... |
541 |
{ |
e31d6883f watchdog/core, po... |
542 |
cpus_read_lock(); |
6b9dc4806 watchdog/core, po... |
543 |
watchdog_nmi_stop(); |
091549858 watchdog/core: Ge... |
544 |
lockup_detector_update_enable(); |
6b9dc4806 watchdog/core, po... |
545 |
watchdog_nmi_start(); |
e31d6883f watchdog/core, po... |
546 |
cpus_read_unlock(); |
6592ad2fc watchdog/core, po... |
547 |
} |
5587185dd watchdog/core: Re... |
548 |
static inline void lockup_detector_setup(void) |
34ddaa3e5 powerpc/watchdog:... |
549 |
{ |
5587185dd watchdog/core: Re... |
550 |
lockup_detector_reconfigure(); |
34ddaa3e5 powerpc/watchdog:... |
551 |
} |
2b9d7f233 watchdog/core: Cl... |
552 |
#endif /* !CONFIG_SOFTLOCKUP_DETECTOR */ |
05a4a9527 kernel/watchdog: ... |
553 |
|
941154bd6 watchdog/hardlock... |
554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 |
static void __lockup_detector_cleanup(void) { lockdep_assert_held(&watchdog_mutex); hardlockup_detector_perf_cleanup(); } /** * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes * * Caller must not hold the cpu hotplug rwsem. */ void lockup_detector_cleanup(void) { mutex_lock(&watchdog_mutex); __lockup_detector_cleanup(); mutex_unlock(&watchdog_mutex); } |
6554fd8cf watchdog/core: Pr... |
571 572 573 574 575 576 577 578 579 580 |
/** * lockup_detector_soft_poweroff - Interface to stop lockup detector(s) * * Special interface for parisc. It prevents lockup detector warnings from * the default pm_poweroff() function which busy loops forever. */ void lockup_detector_soft_poweroff(void) { watchdog_enabled = 0; } |
58cf690a0 watchdog: move wa... |
581 |
#ifdef CONFIG_SYSCTL |
e8b62b2dd watchdog/core: Fu... |
582 |
/* Propagate any changes to the watchdog threads */ |
d57108d4f watchdog/core: Ge... |
583 |
static void proc_watchdog_update(void) |
a0c9cbb93 watchdog: introdu... |
584 |
{ |
e8b62b2dd watchdog/core: Fu... |
585 586 |
/* Remove impossible cpus to keep sysctl output clean. */ cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask); |
5587185dd watchdog/core: Re... |
587 |
lockup_detector_reconfigure(); |
a0c9cbb93 watchdog: introdu... |
588 589 590 |
} /* |
ef246a216 watchdog: introdu... |
591 592 |
* common function for watchdog, nmi_watchdog and soft_watchdog parameter * |
7feeb9cd4 watchdog/sysctl: ... |
593 594 595 596 597 598 599 600 |
* caller | table->data points to | 'which' * -------------------|----------------------------|-------------------------- * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED | * | | SOFT_WATCHDOG_ENABLED * -------------------|----------------------------|-------------------------- * proc_nmi_watchdog | nmi_watchdog_user_enabled | NMI_WATCHDOG_ENABLED * -------------------|----------------------------|-------------------------- * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED |
ef246a216 watchdog: introdu... |
601 602 |
*/ static int proc_watchdog_common(int which, struct ctl_table *table, int write, |
32927393d sysctl: pass kern... |
603 |
void *buffer, size_t *lenp, loff_t *ppos) |
ef246a216 watchdog: introdu... |
604 |
{ |
091549858 watchdog/core: Ge... |
605 |
int err, old, *param = table->data; |
ef246a216 watchdog: introdu... |
606 |
|
946d19779 watchdog/core: Re... |
607 |
mutex_lock(&watchdog_mutex); |
ef246a216 watchdog: introdu... |
608 |
|
ef246a216 watchdog: introdu... |
609 |
if (!write) { |
091549858 watchdog/core: Ge... |
610 611 612 613 614 |
/* * On read synchronize the userspace interface. This is a * racy snapshot. */ *param = (watchdog_enabled & which) != 0; |
ef246a216 watchdog: introdu... |
615 616 |
err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); } else { |
091549858 watchdog/core: Ge... |
617 |
old = READ_ONCE(*param); |
ef246a216 watchdog: introdu... |
618 |
err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
091549858 watchdog/core: Ge... |
619 |
if (!err && old != READ_ONCE(*param)) |
d57108d4f watchdog/core: Ge... |
620 |
proc_watchdog_update(); |
ef246a216 watchdog: introdu... |
621 |
} |
946d19779 watchdog/core: Re... |
622 |
mutex_unlock(&watchdog_mutex); |
ef246a216 watchdog: introdu... |
623 624 625 626 |
return err; } /* |
83a80a390 watchdog: introdu... |
627 628 629 |
* /proc/sys/kernel/watchdog */ int proc_watchdog(struct ctl_table *table, int write, |
32927393d sysctl: pass kern... |
630 |
void *buffer, size_t *lenp, loff_t *ppos) |
83a80a390 watchdog: introdu... |
631 632 633 634 635 636 637 |
{ return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED, table, write, buffer, lenp, ppos); } /* * /proc/sys/kernel/nmi_watchdog |
58687acba lockup_detector: ... |
638 |
*/ |
83a80a390 watchdog: introdu... |
639 |
int proc_nmi_watchdog(struct ctl_table *table, int write, |
32927393d sysctl: pass kern... |
640 |
void *buffer, size_t *lenp, loff_t *ppos) |
83a80a390 watchdog: introdu... |
641 |
{ |
a994a3147 watchdog/hardlock... |
642 643 |
if (!nmi_watchdog_available && write) return -ENOTSUPP; |
83a80a390 watchdog: introdu... |
644 645 646 647 648 649 650 651 |
return proc_watchdog_common(NMI_WATCHDOG_ENABLED, table, write, buffer, lenp, ppos); } /* * /proc/sys/kernel/soft_watchdog */ int proc_soft_watchdog(struct ctl_table *table, int write, |
32927393d sysctl: pass kern... |
652 |
void *buffer, size_t *lenp, loff_t *ppos) |
83a80a390 watchdog: introdu... |
653 654 655 656 |
{ return proc_watchdog_common(SOFT_WATCHDOG_ENABLED, table, write, buffer, lenp, ppos); } |
58687acba lockup_detector: ... |
657 |
|
83a80a390 watchdog: introdu... |
658 659 660 661 |
/* * /proc/sys/kernel/watchdog_thresh */ int proc_watchdog_thresh(struct ctl_table *table, int write, |
32927393d sysctl: pass kern... |
662 |
void *buffer, size_t *lenp, loff_t *ppos) |
58687acba lockup_detector: ... |
663 |
{ |
d57108d4f watchdog/core: Ge... |
664 |
int err, old; |
58687acba lockup_detector: ... |
665 |
|
946d19779 watchdog/core: Re... |
666 |
mutex_lock(&watchdog_mutex); |
bcd951cf1 watchdog: Use hot... |
667 |
|
d57108d4f watchdog/core: Ge... |
668 |
old = READ_ONCE(watchdog_thresh); |
b8900bc02 watchdog: Registe... |
669 |
err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
83a80a390 watchdog: introdu... |
670 |
|
d57108d4f watchdog/core: Ge... |
671 672 |
if (!err && write && old != READ_ONCE(watchdog_thresh)) proc_watchdog_update(); |
e04ab2bc4 watchdog: Only di... |
673 |
|
946d19779 watchdog/core: Re... |
674 |
mutex_unlock(&watchdog_mutex); |
b8900bc02 watchdog: Registe... |
675 |
return err; |
58687acba lockup_detector: ... |
676 |
} |
fe4ba3c34 watchdog: add wat... |
677 678 679 680 681 682 683 684 |
/* * The cpumask is the mask of possible cpus that the watchdog can run * on, not the mask of cpus it is actually running on. This allows the * user to specify a mask that will include cpus that have not yet * been brought online, if desired. */ int proc_watchdog_cpumask(struct ctl_table *table, int write, |
32927393d sysctl: pass kern... |
685 |
void *buffer, size_t *lenp, loff_t *ppos) |
fe4ba3c34 watchdog: add wat... |
686 687 |
{ int err; |
946d19779 watchdog/core: Re... |
688 |
mutex_lock(&watchdog_mutex); |
8c073d27d watchdog: introdu... |
689 |
|
fe4ba3c34 watchdog: add wat... |
690 |
err = proc_do_large_bitmap(table, write, buffer, lenp, ppos); |
05ba3de74 watchdog/core: Sp... |
691 |
if (!err && write) |
e8b62b2dd watchdog/core: Fu... |
692 |
proc_watchdog_update(); |
5490125d7 watchdog/core: Re... |
693 |
|
946d19779 watchdog/core: Re... |
694 |
mutex_unlock(&watchdog_mutex); |
fe4ba3c34 watchdog: add wat... |
695 696 |
return err; } |
58687acba lockup_detector: ... |
697 |
#endif /* CONFIG_SYSCTL */ |
004417a6d perf, arch: Clean... |
698 |
void __init lockup_detector_init(void) |
58687acba lockup_detector: ... |
699 |
{ |
13316b31f sched/isolation, ... |
700 |
if (tick_nohz_full_enabled()) |
314b08ff5 watchdog: simplif... |
701 702 |
pr_info("Disabling watchdog on nohz_full cores by default "); |
13316b31f sched/isolation, ... |
703 |
|
de201559d sched/isolation: ... |
704 705 |
cpumask_copy(&watchdog_cpumask, housekeeping_cpumask(HK_FLAG_TIMER)); |
fe4ba3c34 watchdog: add wat... |
706 |
|
a994a3147 watchdog/hardlock... |
707 708 |
if (!watchdog_nmi_probe()) nmi_watchdog_available = true; |
5587185dd watchdog/core: Re... |
709 |
lockup_detector_setup(); |
58687acba lockup_detector: ... |
710 |
} |