Blame view
include/linux/nmi.h
1.32 KB
1da177e4c
|
1 2 3 4 5 |
/* * linux/include/linux/nmi.h */ #ifndef LINUX_NMI_H #define LINUX_NMI_H |
9938406ab
|
6 |
#include <linux/sched.h> |
1da177e4c
|
7 8 9 10 11 12 13 14 15 |
#include <asm/irq.h> /** * touch_nmi_watchdog - restart NMI watchdog timeout. * * If the architecture supports the NMI watchdog, touch_nmi_watchdog() * may be used to reset the timeout - for code which intentionally * disables interrupts for a long time. This call is stateless. */ |
4a7863cc2
|
16 |
#if defined(ARCH_HAS_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) |
bb81a09e5
|
17 |
#include <asm/nmi.h> |
96a84c20d
|
18 19 |
extern void touch_nmi_watchdog(void); #else |
5d0e600d9
|
20 21 22 23 |
static inline void touch_nmi_watchdog(void) { touch_softlockup_watchdog(); } |
96a84c20d
|
24 |
#endif |
1da177e4c
|
25 |
|
47cab6a72
|
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
/* * Create trigger_all_cpu_backtrace() out of the arch-provided * base function. Return whether such support was available, * to allow calling code to fall back to some other mechanism: */ #ifdef arch_trigger_all_cpu_backtrace static inline bool trigger_all_cpu_backtrace(void) { arch_trigger_all_cpu_backtrace(); return true; } #else static inline bool trigger_all_cpu_backtrace(void) { return false; } |
bb81a09e5
|
43 |
#endif |
58687acba
|
44 |
#ifdef CONFIG_LOCKUP_DETECTOR |
84e478c6f
|
45 |
int hw_nmi_is_cpu_stuck(struct pt_regs *); |
4eec42f39
|
46 |
u64 hw_nmi_get_sample_period(int watchdog_thresh); |
58687acba
|
47 |
extern int watchdog_enabled; |
586692a5a
|
48 |
extern int watchdog_thresh; |
504d7cf10
|
49 |
struct ctl_table; |
586692a5a
|
50 51 |
extern int proc_dowatchdog(struct ctl_table *, int , void __user *, size_t *, loff_t *); |
84e478c6f
|
52 |
#endif |
1da177e4c
|
53 |
#endif |