Blame view
lib/nmi_backtrace.c
2.33 KB
b2c0b2cbb
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 |
/* * NMI backtrace support * * Gratuitously copied from arch/x86/kernel/apic/hw_nmi.c by Russell King, * with the following header: * * HW NMI watchdog support * * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. * * Arch specific calls to support NMI watchdog * * Bits copied from original nmi.c file */ #include <linux/cpumask.h> #include <linux/delay.h> #include <linux/kprobes.h> #include <linux/nmi.h> |
b2c0b2cbb
|
19 20 21 22 |
#ifdef arch_trigger_all_cpu_backtrace /* For reliability, we're prepared to waste bits here. */ static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; |
b2c0b2cbb
|
23 24 25 |
/* "in progress" flag of arch_trigger_all_cpu_backtrace */ static unsigned long backtrace_flag; |
0768330d4
|
26 27 28 29 30 31 |
/* * When raise() is called it will be is passed a pointer to the * backtrace_mask. Architectures that call nmi_cpu_backtrace() * directly from their raise() functions may rely on the mask * they are passed being updated as a side effect of this call. */ |
b2c0b2cbb
|
32 33 34 |
void nmi_trigger_all_cpu_backtrace(bool include_self, void (*raise)(cpumask_t *mask)) { |
42a0bb3f7
|
35 |
int i, this_cpu = get_cpu(); |
b2c0b2cbb
|
36 37 38 39 40 41 42 43 44 45 46 47 48 |
if (test_and_set_bit(0, &backtrace_flag)) { /* * If there is already a trigger_all_cpu_backtrace() in progress * (backtrace_flag == 1), don't output double cpu dump infos. */ put_cpu(); return; } cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask); if (!include_self) cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask)); |
b2c0b2cbb
|
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
if (!cpumask_empty(to_cpumask(backtrace_mask))) { pr_info("Sending NMI to %s CPUs: ", (include_self ? "all" : "other")); raise(to_cpumask(backtrace_mask)); } /* Wait for up to 10 seconds for all CPUs to do the backtrace */ for (i = 0; i < 10 * 1000; i++) { if (cpumask_empty(to_cpumask(backtrace_mask))) break; mdelay(1); touch_softlockup_watchdog(); } /* |
42a0bb3f7
|
65 66 |
* Force flush any remote buffers that might be stuck in IRQ context * and therefore could not run their irq_work. |
b2c0b2cbb
|
67 |
*/ |
42a0bb3f7
|
68 |
printk_nmi_flush(); |
b2c0b2cbb
|
69 |
|
42a0bb3f7
|
70 |
clear_bit_unlock(0, &backtrace_flag); |
b2c0b2cbb
|
71 72 |
put_cpu(); } |
b2c0b2cbb
|
73 74 75 76 77 |
bool nmi_cpu_backtrace(struct pt_regs *regs) { int cpu = smp_processor_id(); if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { |
b2c0b2cbb
|
78 79 |
pr_warn("NMI backtrace for cpu %d ", cpu); |
0768330d4
|
80 81 82 83 |
if (regs) show_regs(regs); else dump_stack(); |
b2c0b2cbb
|
84 85 86 87 88 89 90 91 |
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); return true; } return false; } NOKPROBE_SYMBOL(nmi_cpu_backtrace); #endif |