Commit 652953b72eea8b9d1bd6b9f67b796c6722bada3a
Merge branch 'oprofile/core' (early part) into oprofile/perf
Conflicts: arch/arm/oprofile/common.c Signed-off-by: Robert Richter <robert.richter@amd.com>
Showing 2 changed files Side-by-side Diff
arch/x86/oprofile/backtrace.c
... | ... | @@ -14,6 +14,7 @@ |
14 | 14 | #include <asm/ptrace.h> |
15 | 15 | #include <asm/uaccess.h> |
16 | 16 | #include <asm/stacktrace.h> |
17 | +#include <linux/compat.h> | |
17 | 18 | |
18 | 19 | static void backtrace_warning_symbol(void *data, char *msg, |
19 | 20 | unsigned long symbol) |
20 | 21 | |
... | ... | @@ -48,14 +49,12 @@ |
48 | 49 | .walk_stack = print_context_stack, |
49 | 50 | }; |
50 | 51 | |
51 | -struct frame_head { | |
52 | - struct frame_head *bp; | |
53 | - unsigned long ret; | |
54 | -} __attribute__((packed)); | |
55 | - | |
56 | -static struct frame_head *dump_user_backtrace(struct frame_head *head) | |
52 | +#ifdef CONFIG_COMPAT | |
53 | +static struct stack_frame_ia32 * | |
54 | +dump_user_backtrace_32(struct stack_frame_ia32 *head) | |
57 | 55 | { |
58 | - struct frame_head bufhead[2]; | |
56 | + struct stack_frame_ia32 bufhead[2]; | |
57 | + struct stack_frame_ia32 *fp; | |
59 | 58 | |
60 | 59 | /* Also check accessibility of one struct frame_head beyond */ |
61 | 60 | if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) |
62 | 61 | |
63 | 62 | |
64 | 63 | |
65 | 64 | |
66 | 65 | |
... | ... | @@ -63,20 +62,66 @@ |
63 | 62 | if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) |
64 | 63 | return NULL; |
65 | 64 | |
66 | - oprofile_add_trace(bufhead[0].ret); | |
65 | + fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame); | |
67 | 66 | |
67 | + oprofile_add_trace(bufhead[0].return_address); | |
68 | + | |
68 | 69 | /* frame pointers should strictly progress back up the stack |
70 | + * (towards higher addresses) */ | |
71 | + if (head >= fp) | |
72 | + return NULL; | |
73 | + | |
74 | + return fp; | |
75 | +} | |
76 | + | |
77 | +static inline int | |
78 | +x86_backtrace_32(struct pt_regs * const regs, unsigned int depth) | |
79 | +{ | |
80 | + struct stack_frame_ia32 *head; | |
81 | + | |
82 | + /* User process is 32-bit */ | |
83 | + if (!current || !test_thread_flag(TIF_IA32)) | |
84 | + return 0; | |
85 | + | |
86 | + head = (struct stack_frame_ia32 *) regs->bp; | |
87 | + while (depth-- && head) | |
88 | + head = dump_user_backtrace_32(head); | |
89 | + | |
90 | + return 1; | |
91 | +} | |
92 | + | |
93 | +#else | |
94 | +static inline int | |
95 | +x86_backtrace_32(struct pt_regs * const regs, unsigned int depth) | |
96 | +{ | |
97 | + return 0; | |
98 | +} | |
99 | +#endif /* CONFIG_COMPAT */ | |
100 | + | |
101 | +static struct stack_frame *dump_user_backtrace(struct stack_frame *head) | |
102 | +{ | |
103 | + struct stack_frame bufhead[2]; | |
104 | + | |
105 | + /* Also check accessibility of one struct stack_frame beyond */ | |
106 | + if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) | |
107 | + return NULL; | |
108 | + if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) | |
109 | + return NULL; | |
110 | + | |
111 | + oprofile_add_trace(bufhead[0].return_address); | |
112 | + | |
113 | + /* frame pointers should strictly progress back up the stack | |
69 | 114 | * (towards higher addresses) */ |
70 | - if (head >= bufhead[0].bp) | |
115 | + if (head >= bufhead[0].next_frame) | |
71 | 116 | return NULL; |
72 | 117 | |
73 | - return bufhead[0].bp; | |
118 | + return bufhead[0].next_frame; | |
74 | 119 | } |
75 | 120 | |
76 | 121 | void |
77 | 122 | x86_backtrace(struct pt_regs * const regs, unsigned int depth) |
78 | 123 | { |
79 | - struct frame_head *head = (struct frame_head *)frame_pointer(regs); | |
124 | + struct stack_frame *head = (struct stack_frame *)frame_pointer(regs); | |
80 | 125 | |
81 | 126 | if (!user_mode_vm(regs)) { |
82 | 127 | unsigned long stack = kernel_stack_pointer(regs); |
... | ... | @@ -85,6 +130,9 @@ |
85 | 130 | &backtrace_ops, &depth); |
86 | 131 | return; |
87 | 132 | } |
133 | + | |
134 | + if (x86_backtrace_32(regs, depth)) | |
135 | + return; | |
88 | 136 | |
89 | 137 | while (depth-- && head) |
90 | 138 | head = dump_user_backtrace(head); |
arch/x86/oprofile/nmi_int.c
... | ... | @@ -695,9 +695,6 @@ |
695 | 695 | return 1; |
696 | 696 | } |
697 | 697 | |
698 | -/* in order to get sysfs right */ | |
699 | -static int using_nmi; | |
700 | - | |
701 | 698 | int __init op_nmi_init(struct oprofile_operations *ops) |
702 | 699 | { |
703 | 700 | __u8 vendor = boot_cpu_data.x86_vendor; |
... | ... | @@ -705,8 +702,6 @@ |
705 | 702 | char *cpu_type = NULL; |
706 | 703 | int ret = 0; |
707 | 704 | |
708 | - using_nmi = 0; | |
709 | - | |
710 | 705 | if (!cpu_has_apic) |
711 | 706 | return -ENODEV; |
712 | 707 | |
713 | 708 | |
... | ... | @@ -790,14 +785,12 @@ |
790 | 785 | if (ret) |
791 | 786 | return ret; |
792 | 787 | |
793 | - using_nmi = 1; | |
794 | 788 | printk(KERN_INFO "oprofile: using NMI interrupt.\n"); |
795 | 789 | return 0; |
796 | 790 | } |
797 | 791 | |
798 | 792 | void op_nmi_exit(void) |
799 | 793 | { |
800 | - if (using_nmi) | |
801 | - exit_sysfs(); | |
794 | + exit_sysfs(); | |
802 | 795 | } |