Commit f16250669d78a32bdfb27cec4d791e85141e11e2

Authored by Tejun Heo
1 parent b3e9f672b6

percpu: make percpu symbols in cpufreq unique

This patch updates percpu related symbols in cpufreq such that percpu
symbols are unique and don't clash with local symbols.  This serves
two purposes of decreasing the possibility of global percpu symbol
collision and allowing dropping per_cpu__ prefix from percpu symbols.

* drivers/cpufreq/cpufreq.c: s/policy_cpu/cpufreq_policy_cpu/
* drivers/cpufreq/freq_table.c: s/show_table/cpufreq_show_table/
* arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c: s/drv_data/acfreq_data/
  					      s/old_perf/acfreq_old_perf/

Partly based on Rusty Russell's "alloc_percpu: rename percpu vars
which cause name clashes" patch.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>

Showing 3 changed files with 28 additions and 28 deletions Side-by-side Diff

arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
... ... @@ -68,9 +68,9 @@
68 68 unsigned int cpu_feature;
69 69 };
70 70  
71   -static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
  71 +static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
72 72  
73   -static DEFINE_PER_CPU(struct aperfmperf, old_perf);
  73 +static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
74 74  
75 75 /* acpi_perf_data is a pointer to percpu data. */
76 76 static struct acpi_processor_performance *acpi_perf_data;
77 77  
... ... @@ -214,14 +214,14 @@
214 214 if (unlikely(cpumask_empty(mask)))
215 215 return 0;
216 216  
217   - switch (per_cpu(drv_data, cpumask_first(mask))->cpu_feature) {
  217 + switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
218 218 case SYSTEM_INTEL_MSR_CAPABLE:
219 219 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
220 220 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
221 221 break;
222 222 case SYSTEM_IO_CAPABLE:
223 223 cmd.type = SYSTEM_IO_CAPABLE;
224   - perf = per_cpu(drv_data, cpumask_first(mask))->acpi_data;
  224 + perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
225 225 cmd.addr.io.port = perf->control_register.address;
226 226 cmd.addr.io.bit_width = perf->control_register.bit_width;
227 227 break;
... ... @@ -268,8 +268,8 @@
268 268 if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
269 269 return 0;
270 270  
271   - ratio = calc_aperfmperf_ratio(&per_cpu(old_perf, cpu), &perf);
272   - per_cpu(old_perf, cpu) = perf;
  271 + ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
  272 + per_cpu(acfreq_old_perf, cpu) = perf;
273 273  
274 274 retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
275 275  
... ... @@ -278,7 +278,7 @@
278 278  
279 279 static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
280 280 {
281   - struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
  281 + struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
282 282 unsigned int freq;
283 283 unsigned int cached_freq;
284 284  
... ... @@ -322,7 +322,7 @@
322 322 static int acpi_cpufreq_target(struct cpufreq_policy *policy,
323 323 unsigned int target_freq, unsigned int relation)
324 324 {
325   - struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
  325 + struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
326 326 struct acpi_processor_performance *perf;
327 327 struct cpufreq_freqs freqs;
328 328 struct drv_cmd cmd;
... ... @@ -416,7 +416,7 @@
416 416  
417 417 static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
418 418 {
419   - struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
  419 + struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
420 420  
421 421 dprintk("acpi_cpufreq_verify\n");
422 422  
... ... @@ -563,7 +563,7 @@
563 563 return -ENOMEM;
564 564  
565 565 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
566   - per_cpu(drv_data, cpu) = data;
  566 + per_cpu(acfreq_data, cpu) = data;
567 567  
568 568 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
569 569 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
570 570  
571 571  
... ... @@ -714,20 +714,20 @@
714 714 acpi_processor_unregister_performance(perf, cpu);
715 715 err_free:
716 716 kfree(data);
717   - per_cpu(drv_data, cpu) = NULL;
  717 + per_cpu(acfreq_data, cpu) = NULL;
718 718  
719 719 return result;
720 720 }
721 721  
722 722 static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
723 723 {
724   - struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
  724 + struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
725 725  
726 726 dprintk("acpi_cpufreq_cpu_exit\n");
727 727  
728 728 if (data) {
729 729 cpufreq_frequency_table_put_attr(policy->cpu);
730   - per_cpu(drv_data, policy->cpu) = NULL;
  730 + per_cpu(acfreq_data, policy->cpu) = NULL;
731 731 acpi_processor_unregister_performance(data->acpi_data,
732 732 policy->cpu);
733 733 kfree(data);
... ... @@ -738,7 +738,7 @@
738 738  
739 739 static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
740 740 {
741   - struct acpi_cpufreq_data *data = per_cpu(drv_data, policy->cpu);
  741 + struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
742 742  
743 743 dprintk("acpi_cpufreq_resume\n");
744 744  
drivers/cpufreq/cpufreq.c
... ... @@ -64,14 +64,14 @@
64 64 * - Lock should not be held across
65 65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
66 66 */
67   -static DEFINE_PER_CPU(int, policy_cpu);
  67 +static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
68 68 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69 69  
70 70 #define lock_policy_rwsem(mode, cpu) \
71 71 int lock_policy_rwsem_##mode \
72 72 (int cpu) \
73 73 { \
74   - int policy_cpu = per_cpu(policy_cpu, cpu); \
  74 + int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
75 75 BUG_ON(policy_cpu == -1); \
76 76 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
77 77 if (unlikely(!cpu_online(cpu))) { \
... ... @@ -90,7 +90,7 @@
90 90  
91 91 void unlock_policy_rwsem_read(int cpu)
92 92 {
93   - int policy_cpu = per_cpu(policy_cpu, cpu);
  93 + int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
94 94 BUG_ON(policy_cpu == -1);
95 95 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
96 96 }
... ... @@ -98,7 +98,7 @@
98 98  
99 99 void unlock_policy_rwsem_write(int cpu)
100 100 {
101   - int policy_cpu = per_cpu(policy_cpu, cpu);
  101 + int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);
102 102 BUG_ON(policy_cpu == -1);
103 103 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
104 104 }
... ... @@ -799,7 +799,7 @@
799 799  
800 800 /* Set proper policy_cpu */
801 801 unlock_policy_rwsem_write(cpu);
802   - per_cpu(policy_cpu, cpu) = managed_policy->cpu;
  802 + per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
803 803  
804 804 if (lock_policy_rwsem_write(cpu) < 0) {
805 805 /* Should not go through policy unlock path */
... ... @@ -906,7 +906,7 @@
906 906 if (!cpu_online(j))
907 907 continue;
908 908 per_cpu(cpufreq_cpu_data, j) = policy;
909   - per_cpu(policy_cpu, j) = policy->cpu;
  909 + per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
910 910 }
911 911 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
912 912  
... ... @@ -991,7 +991,7 @@
991 991 cpumask_copy(policy->cpus, cpumask_of(cpu));
992 992  
993 993 /* Initially set CPU itself as the policy_cpu */
994   - per_cpu(policy_cpu, cpu) = cpu;
  994 + per_cpu(cpufreq_policy_cpu, cpu) = cpu;
995 995 ret = (lock_policy_rwsem_write(cpu) < 0);
996 996 WARN_ON(ret);
997 997  
... ... @@ -1946,7 +1946,7 @@
1946 1946 int cpu;
1947 1947  
1948 1948 for_each_possible_cpu(cpu) {
1949   - per_cpu(policy_cpu, cpu) = -1;
  1949 + per_cpu(cpufreq_policy_cpu, cpu) = -1;
1950 1950 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1951 1951 }
1952 1952  
drivers/cpufreq/freq_table.c
... ... @@ -174,7 +174,7 @@
174 174 }
175 175 EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
176 176  
177   -static DEFINE_PER_CPU(struct cpufreq_frequency_table *, show_table);
  177 +static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table);
178 178 /**
179 179 * show_available_freqs - show available frequencies for the specified CPU
180 180 */
181 181  
... ... @@ -185,10 +185,10 @@
185 185 ssize_t count = 0;
186 186 struct cpufreq_frequency_table *table;
187 187  
188   - if (!per_cpu(show_table, cpu))
  188 + if (!per_cpu(cpufreq_show_table, cpu))
189 189 return -ENODEV;
190 190  
191   - table = per_cpu(show_table, cpu);
  191 + table = per_cpu(cpufreq_show_table, cpu);
192 192  
193 193 for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
194 194 if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
195 195  
196 196  
... ... @@ -217,20 +217,20 @@
217 217 unsigned int cpu)
218 218 {
219 219 dprintk("setting show_table for cpu %u to %p\n", cpu, table);
220   - per_cpu(show_table, cpu) = table;
  220 + per_cpu(cpufreq_show_table, cpu) = table;
221 221 }
222 222 EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr);
223 223  
224 224 void cpufreq_frequency_table_put_attr(unsigned int cpu)
225 225 {
226 226 dprintk("clearing show_table for cpu %u\n", cpu);
227   - per_cpu(show_table, cpu) = NULL;
  227 + per_cpu(cpufreq_show_table, cpu) = NULL;
228 228 }
229 229 EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
230 230  
231 231 struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
232 232 {
233   - return per_cpu(show_table, cpu);
  233 + return per_cpu(cpufreq_show_table, cpu);
234 234 }
235 235 EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
236 236