Commit b3e9f672b6cd0f4c2982c1bcc0b3c3fb39d3b0fe
1 parent
9705f69ed0
Exists in
master
and in
39 other branches
percpu: make percpu symbols in oprofile unique
This patch updates percpu related symbols in oprofile such that percpu symbols are unique and don't clash with local symbols. This serves two purposes of decreasing the possibility of global percpu symbol collision and allowing dropping per_cpu__ prefix from percpu symbols. * drivers/oprofile/cpu_buffer.c: s/cpu_buffer/op_cpu_buffer/ Partly based on Rusty Russell's "alloc_percpu: rename percpu vars which cause name clashes" patch. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Robert Richter <robert.richter@amd.com> Cc: Rusty Russell <rusty@rustcorp.com.au>
Showing 3 changed files with 13 additions and 14 deletions Side-by-side Diff
drivers/oprofile/cpu_buffer.c
... | ... | @@ -47,7 +47,7 @@ |
47 | 47 | */ |
48 | 48 | static struct ring_buffer *op_ring_buffer_read; |
49 | 49 | static struct ring_buffer *op_ring_buffer_write; |
50 | -DEFINE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); | |
50 | +DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer); | |
51 | 51 | |
52 | 52 | static void wq_sync_buffer(struct work_struct *work); |
53 | 53 | |
... | ... | @@ -61,8 +61,7 @@ |
61 | 61 | |
62 | 62 | void oprofile_cpu_buffer_inc_smpl_lost(void) |
63 | 63 | { |
64 | - struct oprofile_cpu_buffer *cpu_buf | |
65 | - = &__get_cpu_var(cpu_buffer); | |
64 | + struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); | |
66 | 65 | |
67 | 66 | cpu_buf->sample_lost_overflow++; |
68 | 67 | } |
... | ... | @@ -95,7 +94,7 @@ |
95 | 94 | goto fail; |
96 | 95 | |
97 | 96 | for_each_possible_cpu(i) { |
98 | - struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); | |
97 | + struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); | |
99 | 98 | |
100 | 99 | b->last_task = NULL; |
101 | 100 | b->last_is_kernel = -1; |
... | ... | @@ -122,7 +121,7 @@ |
122 | 121 | work_enabled = 1; |
123 | 122 | |
124 | 123 | for_each_online_cpu(i) { |
125 | - struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); | |
124 | + struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); | |
126 | 125 | |
127 | 126 | /* |
128 | 127 | * Spread the work by 1 jiffy per cpu so they dont all |
... | ... | @@ -139,7 +138,7 @@ |
139 | 138 | work_enabled = 0; |
140 | 139 | |
141 | 140 | for_each_online_cpu(i) { |
142 | - struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i); | |
141 | + struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); | |
143 | 142 | |
144 | 143 | cancel_delayed_work(&b->work); |
145 | 144 | } |
... | ... | @@ -330,7 +329,7 @@ |
330 | 329 | __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, |
331 | 330 | unsigned long event, int is_kernel) |
332 | 331 | { |
333 | - struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); | |
332 | + struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); | |
334 | 333 | unsigned long backtrace = oprofile_backtrace_depth; |
335 | 334 | |
336 | 335 | /* |
... | ... | @@ -375,7 +374,7 @@ |
375 | 374 | { |
376 | 375 | struct op_sample *sample; |
377 | 376 | int is_kernel = !user_mode(regs); |
378 | - struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); | |
377 | + struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); | |
379 | 378 | |
380 | 379 | cpu_buf->sample_received++; |
381 | 380 | |
382 | 381 | |
... | ... | @@ -430,13 +429,13 @@ |
430 | 429 | |
431 | 430 | void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) |
432 | 431 | { |
433 | - struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); | |
432 | + struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); | |
434 | 433 | log_sample(cpu_buf, pc, 0, is_kernel, event); |
435 | 434 | } |
436 | 435 | |
437 | 436 | void oprofile_add_trace(unsigned long pc) |
438 | 437 | { |
439 | - struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); | |
438 | + struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); | |
440 | 439 | |
441 | 440 | if (!cpu_buf->tracing) |
442 | 441 | return; |
drivers/oprofile/cpu_buffer.h
... | ... | @@ -50,7 +50,7 @@ |
50 | 50 | struct delayed_work work; |
51 | 51 | }; |
52 | 52 | |
53 | -DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer); | |
53 | +DECLARE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer); | |
54 | 54 | |
55 | 55 | /* |
56 | 56 | * Resets the cpu buffer to a sane state. |
... | ... | @@ -60,7 +60,7 @@ |
60 | 60 | */ |
61 | 61 | static inline void op_cpu_buffer_reset(int cpu) |
62 | 62 | { |
63 | - struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu); | |
63 | + struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu); | |
64 | 64 | |
65 | 65 | cpu_buf->last_is_kernel = -1; |
66 | 66 | cpu_buf->last_task = NULL; |
drivers/oprofile/oprofile_stats.c
... | ... | @@ -23,7 +23,7 @@ |
23 | 23 | int i; |
24 | 24 | |
25 | 25 | for_each_possible_cpu(i) { |
26 | - cpu_buf = &per_cpu(cpu_buffer, i); | |
26 | + cpu_buf = &per_cpu(op_cpu_buffer, i); | |
27 | 27 | cpu_buf->sample_received = 0; |
28 | 28 | cpu_buf->sample_lost_overflow = 0; |
29 | 29 | cpu_buf->backtrace_aborted = 0; |
... | ... | @@ -51,7 +51,7 @@ |
51 | 51 | return; |
52 | 52 | |
53 | 53 | for_each_possible_cpu(i) { |
54 | - cpu_buf = &per_cpu(cpu_buffer, i); | |
54 | + cpu_buf = &per_cpu(op_cpu_buffer, i); | |
55 | 55 | snprintf(buf, 10, "cpu%d", i); |
56 | 56 | cpudir = oprofilefs_mkdir(sb, dir, buf); |
57 | 57 |