Commit f92f6e6ee35d2779aa62e70f78ad8e1cd417eb52

Authored by Ingo Molnar

Merge branch 'core' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile into perf/core

Showing 18 changed files Side-by-side Diff

arch/arm/kernel/perf_event.c
... ... @@ -123,6 +123,12 @@
123 123 }
124 124 EXPORT_SYMBOL_GPL(armpmu_get_max_events);
125 125  
  126 +int perf_num_counters(void)
  127 +{
  128 + return armpmu_get_max_events();
  129 +}
  130 +EXPORT_SYMBOL_GPL(perf_num_counters);
  131 +
126 132 #define HW_OP_UNSUPPORTED 0xFFFF
127 133  
128 134 #define C(_x) \
arch/arm/oprofile/Makefile
... ... @@ -6,5 +6,9 @@
6 6 oprofilefs.o oprofile_stats.o \
7 7 timer_int.o )
8 8  
  9 +ifeq ($(CONFIG_HW_PERF_EVENTS),y)
  10 +DRIVER_OBJS += $(addprefix ../../../drivers/oprofile/, oprofile_perf.o)
  11 +endif
  12 +
9 13 oprofile-y := $(DRIVER_OBJS) common.o
arch/arm/oprofile/common.c
... ... @@ -25,139 +25,10 @@
25 25 #include <asm/ptrace.h>
26 26  
27 27 #ifdef CONFIG_HW_PERF_EVENTS
28   -/*
29   - * Per performance monitor configuration as set via oprofilefs.
30   - */
31   -struct op_counter_config {
32   - unsigned long count;
33   - unsigned long enabled;
34   - unsigned long event;
35   - unsigned long unit_mask;
36   - unsigned long kernel;
37   - unsigned long user;
38   - struct perf_event_attr attr;
39   -};
40   -
41   -static int op_arm_enabled;
42   -static DEFINE_MUTEX(op_arm_mutex);
43   -
44   -static struct op_counter_config *counter_config;
45   -static struct perf_event **perf_events[nr_cpumask_bits];
46   -static int perf_num_counters;
47   -
48   -/*
49   - * Overflow callback for oprofile.
50   - */
51   -static void op_overflow_handler(struct perf_event *event, int unused,
52   - struct perf_sample_data *data, struct pt_regs *regs)
  28 +char *op_name_from_perf_id(void)
53 29 {
54   - int id;
55   - u32 cpu = smp_processor_id();
  30 + enum arm_perf_pmu_ids id = armpmu_get_pmu_id();
56 31  
57   - for (id = 0; id < perf_num_counters; ++id)
58   - if (perf_events[cpu][id] == event)
59   - break;
60   -
61   - if (id != perf_num_counters)
62   - oprofile_add_sample(regs, id);
63   - else
64   - pr_warning("oprofile: ignoring spurious overflow "
65   - "on cpu %u\n", cpu);
66   -}
67   -
68   -/*
69   - * Called by op_arm_setup to create perf attributes to mirror the oprofile
70   - * settings in counter_config. Attributes are created as `pinned' events and
71   - * so are permanently scheduled on the PMU.
72   - */
73   -static void op_perf_setup(void)
74   -{
75   - int i;
76   - u32 size = sizeof(struct perf_event_attr);
77   - struct perf_event_attr *attr;
78   -
79   - for (i = 0; i < perf_num_counters; ++i) {
80   - attr = &counter_config[i].attr;
81   - memset(attr, 0, size);
82   - attr->type = PERF_TYPE_RAW;
83   - attr->size = size;
84   - attr->config = counter_config[i].event;
85   - attr->sample_period = counter_config[i].count;
86   - attr->pinned = 1;
87   - }
88   -}
89   -
90   -static int op_create_counter(int cpu, int event)
91   -{
92   - int ret = 0;
93   - struct perf_event *pevent;
94   -
95   - if (!counter_config[event].enabled || (perf_events[cpu][event] != NULL))
96   - return ret;
97   -
98   - pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
99   - cpu, NULL,
100   - op_overflow_handler);
101   -
102   - if (IS_ERR(pevent)) {
103   - ret = PTR_ERR(pevent);
104   - } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
105   - perf_event_release_kernel(pevent);
106   - pr_warning("oprofile: failed to enable event %d "
107   - "on CPU %d\n", event, cpu);
108   - ret = -EBUSY;
109   - } else {
110   - perf_events[cpu][event] = pevent;
111   - }
112   -
113   - return ret;
114   -}
115   -
116   -static void op_destroy_counter(int cpu, int event)
117   -{
118   - struct perf_event *pevent = perf_events[cpu][event];
119   -
120   - if (pevent) {
121   - perf_event_release_kernel(pevent);
122   - perf_events[cpu][event] = NULL;
123   - }
124   -}
125   -
126   -/*
127   - * Called by op_arm_start to create active perf events based on the
128   - * perviously configured attributes.
129   - */
130   -static int op_perf_start(void)
131   -{
132   - int cpu, event, ret = 0;
133   -
134   - for_each_online_cpu(cpu) {
135   - for (event = 0; event < perf_num_counters; ++event) {
136   - ret = op_create_counter(cpu, event);
137   - if (ret)
138   - goto out;
139   - }
140   - }
141   -
142   -out:
143   - return ret;
144   -}
145   -
146   -/*
147   - * Called by op_arm_stop at the end of a profiling run.
148   - */
149   -static void op_perf_stop(void)
150   -{
151   - int cpu, event;
152   -
153   - for_each_online_cpu(cpu)
154   - for (event = 0; event < perf_num_counters; ++event)
155   - op_destroy_counter(cpu, event);
156   -}
157   -
158   -
159   -static char *op_name_from_perf_id(enum arm_perf_pmu_ids id)
160   -{
161 32 switch (id) {
162 33 case ARM_PERF_PMU_ID_XSCALE1:
163 34 return "arm/xscale1";
... ... @@ -176,116 +47,6 @@
176 47 }
177 48 }
178 49  
179   -static int op_arm_create_files(struct super_block *sb, struct dentry *root)
180   -{
181   - unsigned int i;
182   -
183   - for (i = 0; i < perf_num_counters; i++) {
184   - struct dentry *dir;
185   - char buf[4];
186   -
187   - snprintf(buf, sizeof buf, "%d", i);
188   - dir = oprofilefs_mkdir(sb, root, buf);
189   - oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
190   - oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
191   - oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
192   - oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
193   - oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
194   - oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
195   - }
196   -
197   - return 0;
198   -}
199   -
200   -static int op_arm_setup(void)
201   -{
202   - spin_lock(&oprofilefs_lock);
203   - op_perf_setup();
204   - spin_unlock(&oprofilefs_lock);
205   - return 0;
206   -}
207   -
208   -static int op_arm_start(void)
209   -{
210   - int ret = -EBUSY;
211   -
212   - mutex_lock(&op_arm_mutex);
213   - if (!op_arm_enabled) {
214   - ret = 0;
215   - op_perf_start();
216   - op_arm_enabled = 1;
217   - }
218   - mutex_unlock(&op_arm_mutex);
219   - return ret;
220   -}
221   -
222   -static void op_arm_stop(void)
223   -{
224   - mutex_lock(&op_arm_mutex);
225   - if (op_arm_enabled)
226   - op_perf_stop();
227   - op_arm_enabled = 0;
228   - mutex_unlock(&op_arm_mutex);
229   -}
230   -
231   -#ifdef CONFIG_PM
232   -static int op_arm_suspend(struct platform_device *dev, pm_message_t state)
233   -{
234   - mutex_lock(&op_arm_mutex);
235   - if (op_arm_enabled)
236   - op_perf_stop();
237   - mutex_unlock(&op_arm_mutex);
238   - return 0;
239   -}
240   -
241   -static int op_arm_resume(struct platform_device *dev)
242   -{
243   - mutex_lock(&op_arm_mutex);
244   - if (op_arm_enabled && op_perf_start())
245   - op_arm_enabled = 0;
246   - mutex_unlock(&op_arm_mutex);
247   - return 0;
248   -}
249   -
250   -static struct platform_driver oprofile_driver = {
251   - .driver = {
252   - .name = "arm-oprofile",
253   - },
254   - .resume = op_arm_resume,
255   - .suspend = op_arm_suspend,
256   -};
257   -
258   -static struct platform_device *oprofile_pdev;
259   -
260   -static int __init init_driverfs(void)
261   -{
262   - int ret;
263   -
264   - ret = platform_driver_register(&oprofile_driver);
265   - if (ret)
266   - goto out;
267   -
268   - oprofile_pdev = platform_device_register_simple(
269   - oprofile_driver.driver.name, 0, NULL, 0);
270   - if (IS_ERR(oprofile_pdev)) {
271   - ret = PTR_ERR(oprofile_pdev);
272   - platform_driver_unregister(&oprofile_driver);
273   - }
274   -
275   -out:
276   - return ret;
277   -}
278   -
279   -static void exit_driverfs(void)
280   -{
281   - platform_device_unregister(oprofile_pdev);
282   - platform_driver_unregister(&oprofile_driver);
283   -}
284   -#else
285   -static int __init init_driverfs(void) { return 0; }
286   -#define exit_driverfs() do { } while (0)
287   -#endif /* CONFIG_PM */
288   -
289 50 static int report_trace(struct stackframe *frame, void *d)
290 51 {
291 52 unsigned int *depth = d;
292 53  
293 54  
294 55  
295 56  
... ... @@ -350,74 +111,14 @@
350 111  
351 112 int __init oprofile_arch_init(struct oprofile_operations *ops)
352 113 {
353   - int cpu, ret = 0;
354   -
355   - perf_num_counters = armpmu_get_max_events();
356   -
357   - counter_config = kcalloc(perf_num_counters,
358   - sizeof(struct op_counter_config), GFP_KERNEL);
359   -
360   - if (!counter_config) {
361   - pr_info("oprofile: failed to allocate %d "
362   - "counters\n", perf_num_counters);
363   - return -ENOMEM;
364   - }
365   -
366   - ret = init_driverfs();
367   - if (ret) {
368   - kfree(counter_config);
369   - counter_config = NULL;
370   - return ret;
371   - }
372   -
373   - for_each_possible_cpu(cpu) {
374   - perf_events[cpu] = kcalloc(perf_num_counters,
375   - sizeof(struct perf_event *), GFP_KERNEL);
376   - if (!perf_events[cpu]) {
377   - pr_info("oprofile: failed to allocate %d perf events "
378   - "for cpu %d\n", perf_num_counters, cpu);
379   - while (--cpu >= 0)
380   - kfree(perf_events[cpu]);
381   - return -ENOMEM;
382   - }
383   - }
384   -
385 114 ops->backtrace = arm_backtrace;
386   - ops->create_files = op_arm_create_files;
387   - ops->setup = op_arm_setup;
388   - ops->start = op_arm_start;
389   - ops->stop = op_arm_stop;
390   - ops->shutdown = op_arm_stop;
391   - ops->cpu_type = op_name_from_perf_id(armpmu_get_pmu_id());
392 115  
393   - if (!ops->cpu_type)
394   - ret = -ENODEV;
395   - else
396   - pr_info("oprofile: using %s\n", ops->cpu_type);
397   -
398   - return ret;
  116 + return oprofile_perf_init(ops);
399 117 }
400 118  
401   -void oprofile_arch_exit(void)
  119 +void __exit oprofile_arch_exit(void)
402 120 {
403   - int cpu, id;
404   - struct perf_event *event;
405   -
406   - if (*perf_events) {
407   - for_each_possible_cpu(cpu) {
408   - for (id = 0; id < perf_num_counters; ++id) {
409   - event = perf_events[cpu][id];
410   - if (event != NULL)
411   - perf_event_release_kernel(event);
412   - }
413   - kfree(perf_events[cpu]);
414   - }
415   - }
416   -
417   - if (counter_config) {
418   - kfree(counter_config);
419   - exit_driverfs();
420   - }
  121 + oprofile_perf_exit();
421 122 }
422 123 #else
423 124 int __init oprofile_arch_init(struct oprofile_operations *ops)
... ... @@ -425,6 +126,6 @@
425 126 pr_info("oprofile: hardware counters not available\n");
426 127 return -ENODEV;
427 128 }
428   -void oprofile_arch_exit(void) {}
  129 +void __exit oprofile_arch_exit(void) {}
429 130 #endif /* CONFIG_HW_PERF_EVENTS */
... ... @@ -249,6 +249,11 @@
249 249 select PM
250 250 select PM_RUNTIME
251 251  
  252 +config CPU_HAS_PMU
  253 + depends on CPU_SH4 || CPU_SH4A
  254 + default y
  255 + bool
  256 +
252 257 if SUPERH32
253 258  
254 259 choice
... ... @@ -737,6 +742,14 @@
737 742 store-conditional (LLSC). On machines which do not have hardware
738 743 LLSC, this should be more efficient than the other alternative of
739 744 disabling interrupts around the atomic sequence.
  745 +
  746 +config HW_PERF_EVENTS
  747 + bool "Enable hardware performance counter support for perf events"
  748 + depends on PERF_EVENTS && CPU_HAS_PMU
  749 + default y
  750 + help
  751 + Enable hardware performance counter support for perf events. If
  752 + disabled, perf events will use software events only.
740 753  
741 754 source "drivers/sh/Kconfig"
742 755  
arch/sh/kernel/perf_event.c
... ... @@ -59,6 +59,24 @@
59 59 return !!sh_pmu;
60 60 }
61 61  
  62 +const char *perf_pmu_name(void)
  63 +{
  64 + if (!sh_pmu)
  65 + return NULL;
  66 +
  67 + return sh_pmu->name;
  68 +}
  69 +EXPORT_SYMBOL_GPL(perf_pmu_name);
  70 +
  71 +int perf_num_counters(void)
  72 +{
  73 + if (!sh_pmu)
  74 + return 0;
  75 +
  76 + return sh_pmu->num_events;
  77 +}
  78 +EXPORT_SYMBOL_GPL(perf_num_counters);
  79 +
62 80 /*
63 81 * Release the PMU if this is the last perf_event.
64 82 */
arch/sh/oprofile/Makefile
... ... @@ -6,5 +6,9 @@
6 6 oprofilefs.o oprofile_stats.o \
7 7 timer_int.o )
8 8  
  9 +ifeq ($(CONFIG_HW_PERF_EVENTS),y)
  10 +DRIVER_OBJS += $(addprefix ../../../drivers/oprofile/, oprofile_perf.o)
  11 +endif
  12 +
9 13 oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
arch/sh/oprofile/common.c
... ... @@ -17,115 +17,46 @@
17 17 #include <linux/init.h>
18 18 #include <linux/errno.h>
19 19 #include <linux/smp.h>
  20 +#include <linux/perf_event.h>
20 21 #include <asm/processor.h>
21   -#include "op_impl.h"
22 22  
23   -static struct op_sh_model *model;
24   -
25   -static struct op_counter_config ctr[20];
26   -
  23 +#ifdef CONFIG_HW_PERF_EVENTS
27 24 extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth);
28 25  
29   -static int op_sh_setup(void)
  26 +char *op_name_from_perf_id(void)
30 27 {
31   - /* Pre-compute the values to stuff in the hardware registers. */
32   - model->reg_setup(ctr);
  28 + const char *pmu;
  29 + char buf[20];
  30 + int size;
33 31  
34   - /* Configure the registers on all cpus. */
35   - on_each_cpu(model->cpu_setup, NULL, 1);
  32 + pmu = perf_pmu_name();
  33 + if (!pmu)
  34 + return NULL;
36 35  
37   - return 0;
38   -}
  36 + size = snprintf(buf, sizeof(buf), "sh/%s", pmu);
  37 + if (size > -1 && size < sizeof(buf))
  38 + return buf;
39 39  
40   -static int op_sh_create_files(struct super_block *sb, struct dentry *root)
41   -{
42   - int i, ret = 0;
43   -
44   - for (i = 0; i < model->num_counters; i++) {
45   - struct dentry *dir;
46   - char buf[4];
47   -
48   - snprintf(buf, sizeof(buf), "%d", i);
49   - dir = oprofilefs_mkdir(sb, root, buf);
50   -
51   - ret |= oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
52   - ret |= oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
53   - ret |= oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
54   - ret |= oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
55   -
56   - if (model->create_files)
57   - ret |= model->create_files(sb, dir);
58   - else
59   - ret |= oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
60   -
61   - /* Dummy entries */
62   - ret |= oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
63   - }
64   -
65   - return ret;
  40 + return NULL;
66 41 }
67 42  
68   -static int op_sh_start(void)
  43 +int __init oprofile_arch_init(struct oprofile_operations *ops)
69 44 {
70   - /* Enable performance monitoring for all counters. */
71   - on_each_cpu(model->cpu_start, NULL, 1);
  45 + ops->backtrace = sh_backtrace;
72 46  
73   - return 0;
  47 + return oprofile_perf_init(ops);
74 48 }
75 49  
76   -static void op_sh_stop(void)
  50 +void __exit oprofile_arch_exit(void)
77 51 {
78   - /* Disable performance monitoring for all counters. */
79   - on_each_cpu(model->cpu_stop, NULL, 1);
  52 + oprofile_perf_exit();
80 53 }
81   -
  54 +#else
82 55 int __init oprofile_arch_init(struct oprofile_operations *ops)
83 56 {
84   - struct op_sh_model *lmodel = NULL;
85   - int ret;
86   -
87   - /*
88   - * Always assign the backtrace op. If the counter initialization
89   - * fails, we fall back to the timer which will still make use of
90   - * this.
91   - */
92   - ops->backtrace = sh_backtrace;
93   -
94   - /*
95   - * XXX
96   - *
97   - * All of the SH7750/SH-4A counters have been converted to perf,
98   - * this infrastructure hook is left for other users until they've
99   - * had a chance to convert over, at which point all of this
100   - * will be deleted.
101   - */
102   -
103   - if (!lmodel)
104   - return -ENODEV;
105   - if (!(current_cpu_data.flags & CPU_HAS_PERF_COUNTER))
106   - return -ENODEV;
107   -
108   - ret = lmodel->init();
109   - if (unlikely(ret != 0))
110   - return ret;
111   -
112   - model = lmodel;
113   -
114   - ops->setup = op_sh_setup;
115   - ops->create_files = op_sh_create_files;
116   - ops->start = op_sh_start;
117   - ops->stop = op_sh_stop;
118   - ops->cpu_type = lmodel->cpu_type;
119   -
120   - printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
121   - lmodel->cpu_type);
122   -
123   - return 0;
  57 + pr_info("oprofile: hardware counters not available\n");
  58 + return -ENODEV;
124 59 }
125   -
126   -void oprofile_arch_exit(void)
127   -{
128   - if (model && model->exit)
129   - model->exit();
130   -}
  60 +void __exit oprofile_arch_exit(void) {}
  61 +#endif /* CONFIG_HW_PERF_EVENTS */
arch/sh/oprofile/op_impl.h
1   -#ifndef __OP_IMPL_H
2   -#define __OP_IMPL_H
3   -
4   -/* Per-counter configuration as set via oprofilefs. */
5   -struct op_counter_config {
6   - unsigned long enabled;
7   - unsigned long event;
8   -
9   - unsigned long count;
10   -
11   - /* Dummy values for userspace tool compliance */
12   - unsigned long kernel;
13   - unsigned long user;
14   - unsigned long unit_mask;
15   -};
16   -
17   -/* Per-architecture configury and hooks. */
18   -struct op_sh_model {
19   - void (*reg_setup)(struct op_counter_config *);
20   - int (*create_files)(struct super_block *sb, struct dentry *dir);
21   - void (*cpu_setup)(void *dummy);
22   - int (*init)(void);
23   - void (*exit)(void);
24   - void (*cpu_start)(void *args);
25   - void (*cpu_stop)(void *args);
26   - char *cpu_type;
27   - unsigned char num_counters;
28   -};
29   -
30   -/* arch/sh/oprofile/common.c */
31   -extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth);
32   -
33   -#endif /* __OP_IMPL_H */
arch/x86/oprofile/backtrace.c
... ... @@ -14,6 +14,7 @@
14 14 #include <asm/ptrace.h>
15 15 #include <asm/uaccess.h>
16 16 #include <asm/stacktrace.h>
  17 +#include <linux/compat.h>
17 18  
18 19 static void backtrace_warning_symbol(void *data, char *msg,
19 20 unsigned long symbol)
20 21  
... ... @@ -48,14 +49,12 @@
48 49 .walk_stack = print_context_stack,
49 50 };
50 51  
51   -struct frame_head {
52   - struct frame_head *bp;
53   - unsigned long ret;
54   -} __attribute__((packed));
55   -
56   -static struct frame_head *dump_user_backtrace(struct frame_head *head)
  52 +#ifdef CONFIG_COMPAT
  53 +static struct stack_frame_ia32 *
  54 +dump_user_backtrace_32(struct stack_frame_ia32 *head)
57 55 {
58   - struct frame_head bufhead[2];
  56 + struct stack_frame_ia32 bufhead[2];
  57 + struct stack_frame_ia32 *fp;
59 58  
60 59 /* Also check accessibility of one struct frame_head beyond */
61 60 if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
62 61  
63 62  
64 63  
65 64  
66 65  
... ... @@ -63,20 +62,66 @@
63 62 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
64 63 return NULL;
65 64  
66   - oprofile_add_trace(bufhead[0].ret);
  65 + fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
67 66  
  67 + oprofile_add_trace(bufhead[0].return_address);
  68 +
68 69 /* frame pointers should strictly progress back up the stack
  70 + * (towards higher addresses) */
  71 + if (head >= fp)
  72 + return NULL;
  73 +
  74 + return fp;
  75 +}
  76 +
  77 +static inline int
  78 +x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
  79 +{
  80 + struct stack_frame_ia32 *head;
  81 +
  82 + /* User process is 32-bit */
  83 + if (!current || !test_thread_flag(TIF_IA32))
  84 + return 0;
  85 +
  86 + head = (struct stack_frame_ia32 *) regs->bp;
  87 + while (depth-- && head)
  88 + head = dump_user_backtrace_32(head);
  89 +
  90 + return 1;
  91 +}
  92 +
  93 +#else
  94 +static inline int
  95 +x86_backtrace_32(struct pt_regs * const regs, unsigned int depth)
  96 +{
  97 + return 0;
  98 +}
  99 +#endif /* CONFIG_COMPAT */
  100 +
  101 +static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
  102 +{
  103 + struct stack_frame bufhead[2];
  104 +
  105 + /* Also check accessibility of one struct stack_frame beyond */
  106 + if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
  107 + return NULL;
  108 + if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
  109 + return NULL;
  110 +
  111 + oprofile_add_trace(bufhead[0].return_address);
  112 +
  113 + /* frame pointers should strictly progress back up the stack
69 114 * (towards higher addresses) */
70   - if (head >= bufhead[0].bp)
  115 + if (head >= bufhead[0].next_frame)
71 116 return NULL;
72 117  
73   - return bufhead[0].bp;
  118 + return bufhead[0].next_frame;
74 119 }
75 120  
76 121 void
77 122 x86_backtrace(struct pt_regs * const regs, unsigned int depth)
78 123 {
79   - struct frame_head *head = (struct frame_head *)frame_pointer(regs);
  124 + struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
80 125  
81 126 if (!user_mode_vm(regs)) {
82 127 unsigned long stack = kernel_stack_pointer(regs);
... ... @@ -85,6 +130,9 @@
85 130 &backtrace_ops, &depth);
86 131 return;
87 132 }
  133 +
  134 + if (x86_backtrace_32(regs, depth))
  135 + return;
88 136  
89 137 while (depth-- && head)
90 138 head = dump_user_backtrace(head);
arch/x86/oprofile/nmi_int.c
... ... @@ -695,9 +695,6 @@
695 695 return 1;
696 696 }
697 697  
698   -/* in order to get sysfs right */
699   -static int using_nmi;
700   -
701 698 int __init op_nmi_init(struct oprofile_operations *ops)
702 699 {
703 700 __u8 vendor = boot_cpu_data.x86_vendor;
... ... @@ -705,8 +702,6 @@
705 702 char *cpu_type = NULL;
706 703 int ret = 0;
707 704  
708   - using_nmi = 0;
709   -
710 705 if (!cpu_has_apic)
711 706 return -ENODEV;
712 707  
713 708  
... ... @@ -790,14 +785,12 @@
790 785 if (ret)
791 786 return ret;
792 787  
793   - using_nmi = 1;
794 788 printk(KERN_INFO "oprofile: using NMI interrupt.\n");
795 789 return 0;
796 790 }
797 791  
798 792 void op_nmi_exit(void)
799 793 {
800   - if (using_nmi)
801   - exit_sysfs();
  794 + exit_sysfs();
802 795 }
drivers/oprofile/oprof.c
... ... @@ -225,26 +225,17 @@
225 225 mutex_unlock(&start_mutex);
226 226 }
227 227  
228   -int oprofile_set_backtrace(unsigned long val)
  228 +int oprofile_set_ulong(unsigned long *addr, unsigned long val)
229 229 {
230   - int err = 0;
  230 + int err = -EBUSY;
231 231  
232 232 mutex_lock(&start_mutex);
233   -
234   - if (oprofile_started) {
235   - err = -EBUSY;
236   - goto out;
  233 + if (!oprofile_started) {
  234 + *addr = val;
  235 + err = 0;
237 236 }
238   -
239   - if (!oprofile_ops.backtrace) {
240   - err = -EINVAL;
241   - goto out;
242   - }
243   -
244   - oprofile_backtrace_depth = val;
245   -
246   -out:
247 237 mutex_unlock(&start_mutex);
  238 +
248 239 return err;
249 240 }
250 241  
251 242  
... ... @@ -257,16 +248,9 @@
257 248 printk(KERN_INFO "oprofile: using timer interrupt.\n");
258 249 err = oprofile_timer_init(&oprofile_ops);
259 250 if (err)
260   - goto out_arch;
  251 + return err;
261 252 }
262   - err = oprofilefs_register();
263   - if (err)
264   - goto out_arch;
265   - return 0;
266   -
267   -out_arch:
268   - oprofile_arch_exit();
269   - return err;
  253 + return oprofilefs_register();
270 254 }
271 255  
272 256  
drivers/oprofile/oprof.h
... ... @@ -37,7 +37,7 @@
37 37 int oprofile_timer_init(struct oprofile_operations *ops);
38 38 void oprofile_timer_exit(void);
39 39  
40   -int oprofile_set_backtrace(unsigned long depth);
  40 +int oprofile_set_ulong(unsigned long *addr, unsigned long val);
41 41 int oprofile_set_timeout(unsigned long time);
42 42  
43 43 #endif /* OPROF_H */
drivers/oprofile/oprofile_files.c
... ... @@ -79,14 +79,17 @@
79 79 if (*offset)
80 80 return -EINVAL;
81 81  
  82 + if (!oprofile_ops.backtrace)
  83 + return -EINVAL;
  84 +
82 85 retval = oprofilefs_ulong_from_user(&val, buf, count);
83 86 if (retval)
84 87 return retval;
85 88  
86   - retval = oprofile_set_backtrace(val);
87   -
  89 + retval = oprofile_set_ulong(&oprofile_backtrace_depth, val);
88 90 if (retval)
89 91 return retval;
  92 +
90 93 return count;
91 94 }
92 95  
drivers/oprofile/oprofile_perf.c
  1 +/*
  2 + * Copyright 2010 ARM Ltd.
  3 + *
  4 + * Perf-events backend for OProfile.
  5 + */
  6 +#include <linux/perf_event.h>
  7 +#include <linux/platform_device.h>
  8 +#include <linux/oprofile.h>
  9 +#include <linux/slab.h>
  10 +
  11 +/*
  12 + * Per performance monitor configuration as set via oprofilefs.
  13 + */
  14 +struct op_counter_config {
  15 + unsigned long count;
  16 + unsigned long enabled;
  17 + unsigned long event;
  18 + unsigned long unit_mask;
  19 + unsigned long kernel;
  20 + unsigned long user;
  21 + struct perf_event_attr attr;
  22 +};
  23 +
  24 +static int oprofile_perf_enabled;
  25 +static DEFINE_MUTEX(oprofile_perf_mutex);
  26 +
  27 +static struct op_counter_config *counter_config;
  28 +static struct perf_event **perf_events[nr_cpumask_bits];
  29 +static int num_counters;
  30 +
  31 +/*
  32 + * Overflow callback for oprofile.
  33 + */
  34 +static void op_overflow_handler(struct perf_event *event, int unused,
  35 + struct perf_sample_data *data, struct pt_regs *regs)
  36 +{
  37 + int id;
  38 + u32 cpu = smp_processor_id();
  39 +
  40 + for (id = 0; id < num_counters; ++id)
  41 + if (perf_events[cpu][id] == event)
  42 + break;
  43 +
  44 + if (id != num_counters)
  45 + oprofile_add_sample(regs, id);
  46 + else
  47 + pr_warning("oprofile: ignoring spurious overflow "
  48 + "on cpu %u\n", cpu);
  49 +}
  50 +
  51 +/*
  52 + * Called by oprofile_perf_setup to create perf attributes to mirror the oprofile
  53 + * settings in counter_config. Attributes are created as `pinned' events and
  54 + * so are permanently scheduled on the PMU.
  55 + */
  56 +static void op_perf_setup(void)
  57 +{
  58 + int i;
  59 + u32 size = sizeof(struct perf_event_attr);
  60 + struct perf_event_attr *attr;
  61 +
  62 + for (i = 0; i < num_counters; ++i) {
  63 + attr = &counter_config[i].attr;
  64 + memset(attr, 0, size);
  65 + attr->type = PERF_TYPE_RAW;
  66 + attr->size = size;
  67 + attr->config = counter_config[i].event;
  68 + attr->sample_period = counter_config[i].count;
  69 + attr->pinned = 1;
  70 + }
  71 +}
  72 +
  73 +static int op_create_counter(int cpu, int event)
  74 +{
  75 + struct perf_event *pevent;
  76 +
  77 + if (!counter_config[event].enabled || perf_events[cpu][event])
  78 + return 0;
  79 +
  80 + pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
  81 + cpu, NULL,
  82 + op_overflow_handler);
  83 +
  84 + if (IS_ERR(pevent))
  85 + return PTR_ERR(pevent);
  86 +
  87 + if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
  88 + perf_event_release_kernel(pevent);
  89 + pr_warning("oprofile: failed to enable event %d "
  90 + "on CPU %d\n", event, cpu);
  91 + return -EBUSY;
  92 + }
  93 +
  94 + perf_events[cpu][event] = pevent;
  95 +
  96 + return 0;
  97 +}
  98 +
  99 +static void op_destroy_counter(int cpu, int event)
  100 +{
  101 + struct perf_event *pevent = perf_events[cpu][event];
  102 +
  103 + if (pevent) {
  104 + perf_event_release_kernel(pevent);
  105 + perf_events[cpu][event] = NULL;
  106 + }
  107 +}
  108 +
  109 +/*
  110 + * Called by oprofile_perf_start to create active perf events based on the
  111 + * perviously configured attributes.
  112 + */
  113 +static int op_perf_start(void)
  114 +{
  115 + int cpu, event, ret = 0;
  116 +
  117 + for_each_online_cpu(cpu) {
  118 + for (event = 0; event < num_counters; ++event) {
  119 + ret = op_create_counter(cpu, event);
  120 + if (ret)
  121 + return ret;
  122 + }
  123 + }
  124 +
  125 + return ret;
  126 +}
  127 +
  128 +/*
  129 + * Called by oprofile_perf_stop at the end of a profiling run.
  130 + */
  131 +static void op_perf_stop(void)
  132 +{
  133 + int cpu, event;
  134 +
  135 + for_each_online_cpu(cpu)
  136 + for (event = 0; event < num_counters; ++event)
  137 + op_destroy_counter(cpu, event);
  138 +}
  139 +
  140 +static int oprofile_perf_create_files(struct super_block *sb, struct dentry *root)
  141 +{
  142 + unsigned int i;
  143 +
  144 + for (i = 0; i < num_counters; i++) {
  145 + struct dentry *dir;
  146 + char buf[4];
  147 +
  148 + snprintf(buf, sizeof buf, "%d", i);
  149 + dir = oprofilefs_mkdir(sb, root, buf);
  150 + oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
  151 + oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
  152 + oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
  153 + oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
  154 + oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
  155 + oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
  156 + }
  157 +
  158 + return 0;
  159 +}
  160 +
  161 +static int oprofile_perf_setup(void)
  162 +{
  163 + spin_lock(&oprofilefs_lock);
  164 + op_perf_setup();
  165 + spin_unlock(&oprofilefs_lock);
  166 + return 0;
  167 +}
  168 +
  169 +static int oprofile_perf_start(void)
  170 +{
  171 + int ret = -EBUSY;
  172 +
  173 + mutex_lock(&oprofile_perf_mutex);
  174 + if (!oprofile_perf_enabled) {
  175 + ret = 0;
  176 + op_perf_start();
  177 + oprofile_perf_enabled = 1;
  178 + }
  179 + mutex_unlock(&oprofile_perf_mutex);
  180 + return ret;
  181 +}
  182 +
  183 +static void oprofile_perf_stop(void)
  184 +{
  185 + mutex_lock(&oprofile_perf_mutex);
  186 + if (oprofile_perf_enabled)
  187 + op_perf_stop();
  188 + oprofile_perf_enabled = 0;
  189 + mutex_unlock(&oprofile_perf_mutex);
  190 +}
  191 +
  192 +#ifdef CONFIG_PM
  193 +
  194 +static int oprofile_perf_suspend(struct platform_device *dev, pm_message_t state)
  195 +{
  196 + mutex_lock(&oprofile_perf_mutex);
  197 + if (oprofile_perf_enabled)
  198 + op_perf_stop();
  199 + mutex_unlock(&oprofile_perf_mutex);
  200 + return 0;
  201 +}
  202 +
  203 +static int oprofile_perf_resume(struct platform_device *dev)
  204 +{
  205 + mutex_lock(&oprofile_perf_mutex);
  206 + if (oprofile_perf_enabled && op_perf_start())
  207 + oprofile_perf_enabled = 0;
  208 + mutex_unlock(&oprofile_perf_mutex);
  209 + return 0;
  210 +}
  211 +
  212 +static struct platform_driver oprofile_driver = {
  213 + .driver = {
  214 + .name = "oprofile-perf",
  215 + },
  216 + .resume = oprofile_perf_resume,
  217 + .suspend = oprofile_perf_suspend,
  218 +};
  219 +
  220 +static struct platform_device *oprofile_pdev;
  221 +
  222 +static int __init init_driverfs(void)
  223 +{
  224 + int ret;
  225 +
  226 + ret = platform_driver_register(&oprofile_driver);
  227 + if (ret)
  228 + return ret;
  229 +
  230 + oprofile_pdev = platform_device_register_simple(
  231 + oprofile_driver.driver.name, 0, NULL, 0);
  232 + if (IS_ERR(oprofile_pdev)) {
  233 + ret = PTR_ERR(oprofile_pdev);
  234 + platform_driver_unregister(&oprofile_driver);
  235 + }
  236 +
  237 + return ret;
  238 +}
  239 +
  240 +static void exit_driverfs(void)
  241 +{
  242 + platform_device_unregister(oprofile_pdev);
  243 + platform_driver_unregister(&oprofile_driver);
  244 +}
  245 +
  246 +#else
  247 +
  248 +static inline int init_driverfs(void) { return 0; }
  249 +static inline void exit_driverfs(void) { }
  250 +
  251 +#endif /* CONFIG_PM */
  252 +
  253 +void oprofile_perf_exit(void)
  254 +{
  255 + int cpu, id;
  256 + struct perf_event *event;
  257 +
  258 + for_each_possible_cpu(cpu) {
  259 + for (id = 0; id < num_counters; ++id) {
  260 + event = perf_events[cpu][id];
  261 + if (event)
  262 + perf_event_release_kernel(event);
  263 + }
  264 +
  265 + kfree(perf_events[cpu]);
  266 + }
  267 +
  268 + kfree(counter_config);
  269 + exit_driverfs();
  270 +}
  271 +
  272 +int __init oprofile_perf_init(struct oprofile_operations *ops)
  273 +{
  274 + int cpu, ret = 0;
  275 +
  276 + ret = init_driverfs();
  277 + if (ret)
  278 + return ret;
  279 +
  280 + memset(&perf_events, 0, sizeof(perf_events));
  281 +
  282 + num_counters = perf_num_counters();
  283 + if (num_counters <= 0) {
  284 + pr_info("oprofile: no performance counters\n");
  285 + ret = -ENODEV;
  286 + goto out;
  287 + }
  288 +
  289 + counter_config = kcalloc(num_counters,
  290 + sizeof(struct op_counter_config), GFP_KERNEL);
  291 +
  292 + if (!counter_config) {
  293 + pr_info("oprofile: failed to allocate %d "
  294 + "counters\n", num_counters);
  295 + ret = -ENOMEM;
  296 + num_counters = 0;
  297 + goto out;
  298 + }
  299 +
  300 + for_each_possible_cpu(cpu) {
  301 + perf_events[cpu] = kcalloc(num_counters,
  302 + sizeof(struct perf_event *), GFP_KERNEL);
  303 + if (!perf_events[cpu]) {
  304 + pr_info("oprofile: failed to allocate %d perf events "
  305 + "for cpu %d\n", num_counters, cpu);
  306 + ret = -ENOMEM;
  307 + goto out;
  308 + }
  309 + }
  310 +
  311 + ops->create_files = oprofile_perf_create_files;
  312 + ops->setup = oprofile_perf_setup;
  313 + ops->start = oprofile_perf_start;
  314 + ops->stop = oprofile_perf_stop;
  315 + ops->shutdown = oprofile_perf_stop;
  316 + ops->cpu_type = op_name_from_perf_id();
  317 +
  318 + if (!ops->cpu_type)
  319 + ret = -ENODEV;
  320 + else
  321 + pr_info("oprofile: using %s\n", ops->cpu_type);
  322 +
  323 +out:
  324 + if (ret)
  325 + oprofile_perf_exit();
  326 +
  327 + return ret;
  328 +}
drivers/oprofile/oprofilefs.c
... ... @@ -91,16 +91,20 @@
91 91  
92 92 static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
93 93 {
94   - unsigned long *value = file->private_data;
  94 + unsigned long value;
95 95 int retval;
96 96  
97 97 if (*offset)
98 98 return -EINVAL;
99 99  
100   - retval = oprofilefs_ulong_from_user(value, buf, count);
  100 + retval = oprofilefs_ulong_from_user(&value, buf, count);
  101 + if (retval)
  102 + return retval;
101 103  
  104 + retval = oprofile_set_ulong(file->private_data, value);
102 105 if (retval)
103 106 return retval;
  107 +
104 108 return count;
105 109 }
106 110  
107 111  
108 112  
109 113  
110 114  
111 115  
112 116  
... ... @@ -126,50 +130,41 @@
126 130 };
127 131  
128 132  
129   -static struct dentry *__oprofilefs_create_file(struct super_block *sb,
  133 +static int __oprofilefs_create_file(struct super_block *sb,
130 134 struct dentry *root, char const *name, const struct file_operations *fops,
131   - int perm)
  135 + int perm, void *priv)
132 136 {
133 137 struct dentry *dentry;
134 138 struct inode *inode;
135 139  
136 140 dentry = d_alloc_name(root, name);
137 141 if (!dentry)
138   - return NULL;
  142 + return -ENOMEM;
139 143 inode = oprofilefs_get_inode(sb, S_IFREG | perm);
140 144 if (!inode) {
141 145 dput(dentry);
142   - return NULL;
  146 + return -ENOMEM;
143 147 }
144 148 inode->i_fop = fops;
145 149 d_add(dentry, inode);
146   - return dentry;
  150 + dentry->d_inode->i_private = priv;
  151 + return 0;
147 152 }
148 153  
149 154  
150 155 int oprofilefs_create_ulong(struct super_block *sb, struct dentry *root,
151 156 char const *name, unsigned long *val)
152 157 {
153   - struct dentry *d = __oprofilefs_create_file(sb, root, name,
154   - &ulong_fops, 0644);
155   - if (!d)
156   - return -EFAULT;
157   -
158   - d->d_inode->i_private = val;
159   - return 0;
  158 + return __oprofilefs_create_file(sb, root, name,
  159 + &ulong_fops, 0644, val);
160 160 }
161 161  
162 162  
163 163 int oprofilefs_create_ro_ulong(struct super_block *sb, struct dentry *root,
164 164 char const *name, unsigned long *val)
165 165 {
166   - struct dentry *d = __oprofilefs_create_file(sb, root, name,
167   - &ulong_ro_fops, 0444);
168   - if (!d)
169   - return -EFAULT;
170   -
171   - d->d_inode->i_private = val;
172   - return 0;
  166 + return __oprofilefs_create_file(sb, root, name,
  167 + &ulong_ro_fops, 0444, val);
173 168 }
174 169  
175 170  
176 171  
177 172  
... ... @@ -189,31 +184,22 @@
189 184 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
190 185 char const *name, atomic_t *val)
191 186 {
192   - struct dentry *d = __oprofilefs_create_file(sb, root, name,
193   - &atomic_ro_fops, 0444);
194   - if (!d)
195   - return -EFAULT;
196   -
197   - d->d_inode->i_private = val;
198   - return 0;
  187 + return __oprofilefs_create_file(sb, root, name,
  188 + &atomic_ro_fops, 0444, val);
199 189 }
200 190  
201 191  
202 192 int oprofilefs_create_file(struct super_block *sb, struct dentry *root,
203 193 char const *name, const struct file_operations *fops)
204 194 {
205   - if (!__oprofilefs_create_file(sb, root, name, fops, 0644))
206   - return -EFAULT;
207   - return 0;
  195 + return __oprofilefs_create_file(sb, root, name, fops, 0644, NULL);
208 196 }
209 197  
210 198  
211 199 int oprofilefs_create_file_perm(struct super_block *sb, struct dentry *root,
212 200 char const *name, const struct file_operations *fops, int perm)
213 201 {
214   - if (!__oprofilefs_create_file(sb, root, name, fops, perm))
215   - return -EFAULT;
216   - return 0;
  202 + return __oprofilefs_create_file(sb, root, name, fops, perm, NULL);
217 203 }
218 204  
219 205  
include/linux/oprofile.h
... ... @@ -15,6 +15,7 @@
15 15  
16 16 #include <linux/types.h>
17 17 #include <linux/spinlock.h>
  18 +#include <linux/init.h>
18 19 #include <asm/atomic.h>
19 20  
20 21 /* Each escaped entry is prefixed by ESCAPE_CODE
... ... @@ -184,6 +185,12 @@
184 185 int oprofile_add_data(struct op_entry *entry, unsigned long val);
185 186 int oprofile_add_data64(struct op_entry *entry, u64 val);
186 187 int oprofile_write_commit(struct op_entry *entry);
  188 +
  189 +#ifdef CONFIG_PERF_EVENTS
  190 +int __init oprofile_perf_init(struct oprofile_operations *ops);
  191 +void oprofile_perf_exit(void);
  192 +char *op_name_from_perf_id(void);
  193 +#endif /* CONFIG_PERF_EVENTS */
187 194  
188 195 #endif /* OPROFILE_H */
include/linux/perf_event.h
... ... @@ -890,6 +890,8 @@
890 890 extern int perf_pmu_register(struct pmu *pmu);
891 891 extern void perf_pmu_unregister(struct pmu *pmu);
892 892  
  893 +extern int perf_num_counters(void);
  894 +extern const char *perf_pmu_name(void);
893 895 extern void perf_event_task_sched_in(struct task_struct *task);
894 896 extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
895 897 extern int perf_event_init_task(struct task_struct *child);
... ... @@ -63,6 +63,11 @@
63 63  
64 64 void __weak perf_event_print_debug(void) { }
65 65  
  66 +extern __weak const char *perf_pmu_name(void)
  67 +{
  68 + return "pmu";
  69 +}
  70 +
66 71 void perf_pmu_disable(struct pmu *pmu)
67 72 {
68 73 int *count = this_cpu_ptr(pmu->pmu_disable_count);