Blame view
lib/dump_stack.c
1.18 KB
1da177e4c
|
1 2 3 4 5 6 |
/* * Provide a default dump_stack() function for architectures * which don't implement their own. */ #include <linux/kernel.h> |
8bc3bcc93
|
7 |
#include <linux/export.h> |
196779b9b
|
8 |
#include <linux/sched.h> |
b58d97743
|
9 10 11 12 13 14 15 16 |
#include <linux/smp.h> #include <linux/atomic.h> static void __dump_stack(void) { dump_stack_print_info(KERN_DEFAULT); show_stack(NULL, NULL); } |
1da177e4c
|
17 |
|
196779b9b
|
18 19 20 21 22 |
/** * dump_stack - dump the current task information and its stack trace * * Architectures can override this implementation by implementing its own. */ |
b58d97743
|
23 24 |
#ifdef CONFIG_SMP static atomic_t dump_lock = ATOMIC_INIT(-1); |
722a9f929
|
25 |
asmlinkage __visible void dump_stack(void) |
1da177e4c
|
26 |
{ |
d7ce36924
|
27 |
unsigned long flags; |
b58d97743
|
28 29 30 31 32 33 34 35 |
int was_locked; int old; int cpu; /* * Permit this cpu to perform nested stack dumps while serialising * against other CPUs */ |
b58d97743
|
36 |
retry: |
d7ce36924
|
37 |
local_irq_save(flags); |
b58d97743
|
38 39 40 41 42 43 44 |
cpu = smp_processor_id(); old = atomic_cmpxchg(&dump_lock, -1, cpu); if (old == -1) { was_locked = 0; } else if (old == cpu) { was_locked = 1; } else { |
d7ce36924
|
45 |
local_irq_restore(flags); |
b58d97743
|
46 47 48 49 50 51 52 53 |
cpu_relax(); goto retry; } __dump_stack(); if (!was_locked) atomic_set(&dump_lock, -1); |
d7ce36924
|
54 |
local_irq_restore(flags); |
b58d97743
|
55 56 |
} #else |
722a9f929
|
57 |
asmlinkage __visible void dump_stack(void) |
b58d97743
|
58 59 |
{ __dump_stack(); |
1da177e4c
|
60 |
} |
b58d97743
|
61 |
#endif |
1da177e4c
|
62 |
EXPORT_SYMBOL(dump_stack); |