Commit a406ab6d77ea86ba7c713276f30ed7058ca64e31
Merge branch 'oprofile/urgent' into HEAD
Showing 2 changed files Side-by-side Diff
arch/x86/oprofile/backtrace.c
... | ... | @@ -11,10 +11,12 @@ |
11 | 11 | #include <linux/oprofile.h> |
12 | 12 | #include <linux/sched.h> |
13 | 13 | #include <linux/mm.h> |
14 | +#include <linux/compat.h> | |
15 | +#include <linux/highmem.h> | |
16 | + | |
14 | 17 | #include <asm/ptrace.h> |
15 | 18 | #include <asm/uaccess.h> |
16 | 19 | #include <asm/stacktrace.h> |
17 | -#include <linux/compat.h> | |
18 | 20 | |
19 | 21 | static int backtrace_stack(void *data, char *name) |
20 | 22 | { |
21 | 23 | |
22 | 24 | |
23 | 25 | |
24 | 26 | |
... | ... | @@ -36,18 +38,54 @@ |
36 | 38 | .walk_stack = print_context_stack, |
37 | 39 | }; |
38 | 40 | |
41 | +/* from arch/x86/kernel/cpu/perf_event.c: */ | |
42 | + | |
43 | +/* | |
44 | + * best effort, GUP based copy_from_user() that assumes IRQ or NMI context | |
45 | + */ | |
46 | +static unsigned long | |
47 | +copy_from_user_nmi(void *to, const void __user *from, unsigned long n) | |
48 | +{ | |
49 | + unsigned long offset, addr = (unsigned long)from; | |
50 | + unsigned long size, len = 0; | |
51 | + struct page *page; | |
52 | + void *map; | |
53 | + int ret; | |
54 | + | |
55 | + do { | |
56 | + ret = __get_user_pages_fast(addr, 1, 0, &page); | |
57 | + if (!ret) | |
58 | + break; | |
59 | + | |
60 | + offset = addr & (PAGE_SIZE - 1); | |
61 | + size = min(PAGE_SIZE - offset, n - len); | |
62 | + | |
63 | + map = kmap_atomic(page); | |
64 | + memcpy(to, map+offset, size); | |
65 | + kunmap_atomic(map); | |
66 | + put_page(page); | |
67 | + | |
68 | + len += size; | |
69 | + to += size; | |
70 | + addr += size; | |
71 | + | |
72 | + } while (len < n); | |
73 | + | |
74 | + return len; | |
75 | +} | |
76 | + | |
39 | 77 | #ifdef CONFIG_COMPAT |
40 | 78 | static struct stack_frame_ia32 * |
41 | 79 | dump_user_backtrace_32(struct stack_frame_ia32 *head) |
42 | 80 | { |
81 | + /* Also check accessibility of one struct frame_head beyond: */ | |
43 | 82 | struct stack_frame_ia32 bufhead[2]; |
44 | 83 | struct stack_frame_ia32 *fp; |
84 | + unsigned long bytes; | |
45 | 85 | |
46 | - /* Also check accessibility of one struct frame_head beyond */ | |
47 | - if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) | |
86 | + bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); | |
87 | + if (bytes != sizeof(bufhead)) | |
48 | 88 | return NULL; |
49 | - if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) | |
50 | - return NULL; | |
51 | 89 | |
52 | 90 | fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame); |
53 | 91 | |
54 | 92 | |
55 | 93 | |
... | ... | @@ -87,12 +125,12 @@ |
87 | 125 | |
88 | 126 | static struct stack_frame *dump_user_backtrace(struct stack_frame *head) |
89 | 127 | { |
128 | + /* Also check accessibility of one struct frame_head beyond: */ | |
90 | 129 | struct stack_frame bufhead[2]; |
130 | + unsigned long bytes; | |
91 | 131 | |
92 | - /* Also check accessibility of one struct stack_frame beyond */ | |
93 | - if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) | |
94 | - return NULL; | |
95 | - if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) | |
132 | + bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead)); | |
133 | + if (bytes != sizeof(bufhead)) | |
96 | 134 | return NULL; |
97 | 135 | |
98 | 136 | oprofile_add_trace(bufhead[0].return_address); |
arch/x86/oprofile/nmi_int.c
... | ... | @@ -112,8 +112,10 @@ |
112 | 112 | static int nmi_start(void) |
113 | 113 | { |
114 | 114 | get_online_cpus(); |
115 | - on_each_cpu(nmi_cpu_start, NULL, 1); | |
116 | 115 | ctr_running = 1; |
116 | + /* make ctr_running visible to the nmi handler: */ | |
117 | + smp_mb(); | |
118 | + on_each_cpu(nmi_cpu_start, NULL, 1); | |
117 | 119 | put_online_cpus(); |
118 | 120 | return 0; |
119 | 121 | } |
120 | 122 | |
121 | 123 | |
... | ... | @@ -504,15 +506,18 @@ |
504 | 506 | |
505 | 507 | nmi_enabled = 0; |
506 | 508 | ctr_running = 0; |
507 | - barrier(); | |
509 | + /* make variables visible to the nmi handler: */ | |
510 | + smp_mb(); | |
508 | 511 | err = register_die_notifier(&profile_exceptions_nb); |
509 | 512 | if (err) |
510 | 513 | goto fail; |
511 | 514 | |
512 | 515 | get_online_cpus(); |
513 | 516 | register_cpu_notifier(&oprofile_cpu_nb); |
514 | - on_each_cpu(nmi_cpu_setup, NULL, 1); | |
515 | 517 | nmi_enabled = 1; |
518 | + /* make nmi_enabled visible to the nmi handler: */ | |
519 | + smp_mb(); | |
520 | + on_each_cpu(nmi_cpu_setup, NULL, 1); | |
516 | 521 | put_online_cpus(); |
517 | 522 | |
518 | 523 | return 0; |
... | ... | @@ -531,7 +536,8 @@ |
531 | 536 | nmi_enabled = 0; |
532 | 537 | ctr_running = 0; |
533 | 538 | put_online_cpus(); |
534 | - barrier(); | |
539 | + /* make variables visible to the nmi handler: */ | |
540 | + smp_mb(); | |
535 | 541 | unregister_die_notifier(&profile_exceptions_nb); |
536 | 542 | msrs = &get_cpu_var(cpu_msrs); |
537 | 543 | model->shutdown(msrs); |