Commit f8bbfd7d28303967ca4e8597de9bdc9bf8b197e7
1 parent
e734568b67
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
oprofile, perf: Use per-cpu framework
This changes oprofile_perf.c to use the per-cpu framework. Using the per-cpu framework should avoid error like the following: arch/arm/oprofile/../../../drivers/oprofile/oprofile_perf.c:28:28: error: variably modified 'perf_events' at file scope Reported-by: William Cohen <wcohen@redhat.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Robert Richter <robert.richter@amd.com>
Showing 1 changed file with 11 additions and 12 deletions Side-by-side Diff
drivers/oprofile/oprofile_perf.c
1 | 1 | /* |
2 | 2 | * Copyright 2010 ARM Ltd. |
3 | + * Copyright 2012 Advanced Micro Devices, Inc., Robert Richter | |
3 | 4 | * |
4 | 5 | * Perf-events backend for OProfile. |
5 | 6 | */ |
... | ... | @@ -25,7 +26,7 @@ |
25 | 26 | static DEFINE_MUTEX(oprofile_perf_mutex); |
26 | 27 | |
27 | 28 | static struct op_counter_config *counter_config; |
28 | -static struct perf_event **perf_events[NR_CPUS]; | |
29 | +static DEFINE_PER_CPU(struct perf_event **, perf_events); | |
29 | 30 | static int num_counters; |
30 | 31 | |
31 | 32 | /* |
... | ... | @@ -38,7 +39,7 @@ |
38 | 39 | u32 cpu = smp_processor_id(); |
39 | 40 | |
40 | 41 | for (id = 0; id < num_counters; ++id) |
41 | - if (perf_events[cpu][id] == event) | |
42 | + if (per_cpu(perf_events, cpu)[id] == event) | |
42 | 43 | break; |
43 | 44 | |
44 | 45 | if (id != num_counters) |
... | ... | @@ -74,7 +75,7 @@ |
74 | 75 | { |
75 | 76 | struct perf_event *pevent; |
76 | 77 | |
77 | - if (!counter_config[event].enabled || perf_events[cpu][event]) | |
78 | + if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event]) | |
78 | 79 | return 0; |
79 | 80 | |
80 | 81 | pevent = perf_event_create_kernel_counter(&counter_config[event].attr, |
81 | 82 | |
82 | 83 | |
... | ... | @@ -91,18 +92,18 @@ |
91 | 92 | return -EBUSY; |
92 | 93 | } |
93 | 94 | |
94 | - perf_events[cpu][event] = pevent; | |
95 | + per_cpu(perf_events, cpu)[event] = pevent; | |
95 | 96 | |
96 | 97 | return 0; |
97 | 98 | } |
98 | 99 | |
99 | 100 | static void op_destroy_counter(int cpu, int event) |
100 | 101 | { |
101 | - struct perf_event *pevent = perf_events[cpu][event]; | |
102 | + struct perf_event *pevent = per_cpu(perf_events, cpu)[event]; | |
102 | 103 | |
103 | 104 | if (pevent) { |
104 | 105 | perf_event_release_kernel(pevent); |
105 | - perf_events[cpu][event] = NULL; | |
106 | + per_cpu(perf_events, cpu)[event] = NULL; | |
106 | 107 | } |
107 | 108 | } |
108 | 109 | |
109 | 110 | |
... | ... | @@ -257,12 +258,12 @@ |
257 | 258 | |
258 | 259 | for_each_possible_cpu(cpu) { |
259 | 260 | for (id = 0; id < num_counters; ++id) { |
260 | - event = perf_events[cpu][id]; | |
261 | + event = per_cpu(perf_events, cpu)[id]; | |
261 | 262 | if (event) |
262 | 263 | perf_event_release_kernel(event); |
263 | 264 | } |
264 | 265 | |
265 | - kfree(perf_events[cpu]); | |
266 | + kfree(per_cpu(perf_events, cpu)); | |
266 | 267 | } |
267 | 268 | |
268 | 269 | kfree(counter_config); |
... | ... | @@ -277,8 +278,6 @@ |
277 | 278 | if (ret) |
278 | 279 | return ret; |
279 | 280 | |
280 | - memset(&perf_events, 0, sizeof(perf_events)); | |
281 | - | |
282 | 281 | num_counters = perf_num_counters(); |
283 | 282 | if (num_counters <= 0) { |
284 | 283 | pr_info("oprofile: no performance counters\n"); |
285 | 284 | |
... | ... | @@ -298,9 +297,9 @@ |
298 | 297 | } |
299 | 298 | |
300 | 299 | for_each_possible_cpu(cpu) { |
301 | - perf_events[cpu] = kcalloc(num_counters, | |
300 | + per_cpu(perf_events, cpu) = kcalloc(num_counters, | |
302 | 301 | sizeof(struct perf_event *), GFP_KERNEL); |
303 | - if (!perf_events[cpu]) { | |
302 | + if (!per_cpu(perf_events, cpu)) { | |
304 | 303 | pr_info("oprofile: failed to allocate %d perf events " |
305 | 304 | "for cpu %d\n", num_counters, cpu); |
306 | 305 | ret = -ENOMEM; |