Commit 3bf101ba42a1c89b5afbc7492e7647dae5e18735
Committed by
Robert Richter
1 parent
4cbe75be5c
perf: Add helper function to return number of counters
The number of counters for the registered pmu is needed in a few places so provide a helper function that returns this number. Signed-off-by: Matt Fleming <matt@console-pimps.org> Tested-by: Will Deacon <will.deacon@arm.com> Acked-by: Paul Mundt <lethal@linux-sh.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Robert Richter <robert.richter@amd.com>
Showing 4 changed files with 34 additions and 13 deletions Side-by-side Diff
arch/arm/kernel/perf_event.c
... | ... | @@ -123,6 +123,12 @@ |
123 | 123 | } |
124 | 124 | EXPORT_SYMBOL_GPL(armpmu_get_max_events); |
125 | 125 | |
126 | +int perf_num_counters(void) | |
127 | +{ | |
128 | + return armpmu_get_max_events(); | |
129 | +} | |
130 | +EXPORT_SYMBOL_GPL(perf_num_counters); | |
131 | + | |
126 | 132 | #define HW_OP_UNSUPPORTED 0xFFFF |
127 | 133 | |
128 | 134 | #define C(_x) \ |
arch/arm/oprofile/common.c
... | ... | @@ -43,7 +43,7 @@ |
43 | 43 | |
44 | 44 | static struct op_counter_config *counter_config; |
45 | 45 | static struct perf_event **perf_events[nr_cpumask_bits]; |
46 | -static int perf_num_counters; | |
46 | +static int num_counters; | |
47 | 47 | |
48 | 48 | /* |
49 | 49 | * Overflow callback for oprofile. |
50 | 50 | |
... | ... | @@ -54,11 +54,11 @@ |
54 | 54 | int id; |
55 | 55 | u32 cpu = smp_processor_id(); |
56 | 56 | |
57 | - for (id = 0; id < perf_num_counters; ++id) | |
57 | + for (id = 0; id < num_counters; ++id) | |
58 | 58 | if (perf_events[cpu][id] == event) |
59 | 59 | break; |
60 | 60 | |
61 | - if (id != perf_num_counters) | |
61 | + if (id != num_counters) | |
62 | 62 | oprofile_add_sample(regs, id); |
63 | 63 | else |
64 | 64 | pr_warning("oprofile: ignoring spurious overflow " |
... | ... | @@ -76,7 +76,7 @@ |
76 | 76 | u32 size = sizeof(struct perf_event_attr); |
77 | 77 | struct perf_event_attr *attr; |
78 | 78 | |
79 | - for (i = 0; i < perf_num_counters; ++i) { | |
79 | + for (i = 0; i < num_counters; ++i) { | |
80 | 80 | attr = &counter_config[i].attr; |
81 | 81 | memset(attr, 0, size); |
82 | 82 | attr->type = PERF_TYPE_RAW; |
... | ... | @@ -131,7 +131,7 @@ |
131 | 131 | int cpu, event, ret = 0; |
132 | 132 | |
133 | 133 | for_each_online_cpu(cpu) { |
134 | - for (event = 0; event < perf_num_counters; ++event) { | |
134 | + for (event = 0; event < num_counters; ++event) { | |
135 | 135 | ret = op_create_counter(cpu, event); |
136 | 136 | if (ret) |
137 | 137 | goto out; |
... | ... | @@ -150,7 +150,7 @@ |
150 | 150 | int cpu, event; |
151 | 151 | |
152 | 152 | for_each_online_cpu(cpu) |
153 | - for (event = 0; event < perf_num_counters; ++event) | |
153 | + for (event = 0; event < num_counters; ++event) | |
154 | 154 | op_destroy_counter(cpu, event); |
155 | 155 | } |
156 | 156 | |
... | ... | @@ -179,7 +179,7 @@ |
179 | 179 | { |
180 | 180 | unsigned int i; |
181 | 181 | |
182 | - for (i = 0; i < perf_num_counters; i++) { | |
182 | + for (i = 0; i < num_counters; i++) { | |
183 | 183 | struct dentry *dir; |
184 | 184 | char buf[4]; |
185 | 185 | |
186 | 186 | |
187 | 187 | |
... | ... | @@ -353,14 +353,19 @@ |
353 | 353 | |
354 | 354 | memset(&perf_events, 0, sizeof(perf_events)); |
355 | 355 | |
356 | - perf_num_counters = armpmu_get_max_events(); | |
356 | + num_counters = perf_num_counters(); | |
357 | + if (num_counters <= 0) { | |
358 | + pr_info("oprofile: no performance counters\n"); | |
359 | + ret = -ENODEV; | |
360 | + goto out; | |
361 | + } | |
357 | 362 | |
358 | - counter_config = kcalloc(perf_num_counters, | |
363 | + counter_config = kcalloc(num_counters, | |
359 | 364 | sizeof(struct op_counter_config), GFP_KERNEL); |
360 | 365 | |
361 | 366 | if (!counter_config) { |
362 | 367 | pr_info("oprofile: failed to allocate %d " |
363 | - "counters\n", perf_num_counters); | |
368 | + "counters\n", num_counters); | |
364 | 369 | ret = -ENOMEM; |
365 | 370 | goto out; |
366 | 371 | } |
367 | 372 | |
... | ... | @@ -370,11 +375,11 @@ |
370 | 375 | goto out; |
371 | 376 | |
372 | 377 | for_each_possible_cpu(cpu) { |
373 | - perf_events[cpu] = kcalloc(perf_num_counters, | |
378 | + perf_events[cpu] = kcalloc(num_counters, | |
374 | 379 | sizeof(struct perf_event *), GFP_KERNEL); |
375 | 380 | if (!perf_events[cpu]) { |
376 | 381 | pr_info("oprofile: failed to allocate %d perf events " |
377 | - "for cpu %d\n", perf_num_counters, cpu); | |
382 | + "for cpu %d\n", num_counters, cpu); | |
378 | 383 | ret = -ENOMEM; |
379 | 384 | goto out; |
380 | 385 | } |
... | ... | @@ -409,7 +414,7 @@ |
409 | 414 | struct perf_event *event; |
410 | 415 | |
411 | 416 | for_each_possible_cpu(cpu) { |
412 | - for (id = 0; id < perf_num_counters; ++id) { | |
417 | + for (id = 0; id < num_counters; ++id) { | |
413 | 418 | event = perf_events[cpu][id]; |
414 | 419 | if (event) |
415 | 420 | perf_event_release_kernel(event); |
arch/sh/kernel/perf_event.c
include/linux/perf_event.h
... | ... | @@ -849,6 +849,7 @@ |
849 | 849 | |
850 | 850 | extern const struct pmu *hw_perf_event_init(struct perf_event *event); |
851 | 851 | |
852 | +extern int perf_num_counters(void); | |
852 | 853 | extern void perf_event_task_sched_in(struct task_struct *task); |
853 | 854 | extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); |
854 | 855 | extern void perf_event_task_tick(struct task_struct *task); |