Commit 796aadeb1b2db9b5d463946766c5bbfd7717158c
Exists in
master
and in
39 other branches
Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq
* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq: [CPUFREQ][2/2] preregister support for powernow-k8 [CPUFREQ][1/2] whitespace fix for powernow-k8 [CPUFREQ] Update MAINTAINERS to reflect new mailing list. [CPUFREQ] Fix warning in elanfreq [CPUFREQ] Fix -Wshadow warning in conservative governor. [CPUFREQ] Remove EXPERIMENTAL annotation from VIA C7 powersaver kconfig.
Showing 6 changed files Side-by-side Diff
MAINTAINERS
... | ... | @@ -1249,7 +1249,7 @@ |
1249 | 1249 | CPU FREQUENCY DRIVERS |
1250 | 1250 | P: Dave Jones |
1251 | 1251 | M: davej@codemonkey.org.uk |
1252 | -L: cpufreq@lists.linux.org.uk | |
1252 | +L: cpufreq@vger.kernel.org | |
1253 | 1253 | W: http://www.codemonkey.org.uk/projects/cpufreq/ |
1254 | 1254 | T: git kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git |
1255 | 1255 | S: Maintained |
arch/x86/kernel/cpu/cpufreq/Kconfig
... | ... | @@ -235,9 +235,9 @@ |
235 | 235 | If in doubt, say N. |
236 | 236 | |
237 | 237 | config X86_E_POWERSAVER |
238 | - tristate "VIA C7 Enhanced PowerSaver (EXPERIMENTAL)" | |
238 | + tristate "VIA C7 Enhanced PowerSaver" | |
239 | 239 | select CPU_FREQ_TABLE |
240 | - depends on X86_32 && EXPERIMENTAL | |
240 | + depends on X86_32 | |
241 | 241 | help |
242 | 242 | This adds the CPUFreq driver for VIA C7 processors. |
243 | 243 |
arch/x86/kernel/cpu/cpufreq/elanfreq.c
... | ... | @@ -44,7 +44,7 @@ |
44 | 44 | * It is important that the frequencies |
45 | 45 | * are listed in ascending order here! |
46 | 46 | */ |
47 | -struct s_elan_multiplier elan_multiplier[] = { | |
47 | +static struct s_elan_multiplier elan_multiplier[] = { | |
48 | 48 | {1000, 0x02, 0x18}, |
49 | 49 | {2000, 0x02, 0x10}, |
50 | 50 | {4000, 0x02, 0x08}, |
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
... | ... | @@ -66,7 +66,6 @@ |
66 | 66 | return 800 + (fid * 100); |
67 | 67 | } |
68 | 68 | |
69 | - | |
70 | 69 | /* Return a frequency in KHz, given an input fid */ |
71 | 70 | static u32 find_khz_freq_from_fid(u32 fid) |
72 | 71 | { |
... | ... | @@ -78,7 +77,6 @@ |
78 | 77 | return data[pstate].frequency; |
79 | 78 | } |
80 | 79 | |
81 | - | |
82 | 80 | /* Return the vco fid for an input fid |
83 | 81 | * |
84 | 82 | * Each "low" fid has corresponding "high" fid, and you can get to "low" fids |
... | ... | @@ -166,7 +164,6 @@ |
166 | 164 | wrmsr(MSR_FIDVID_CTL, lo, hi); |
167 | 165 | } |
168 | 166 | |
169 | - | |
170 | 167 | /* write the new fid value along with the other control fields to the msr */ |
171 | 168 | static int write_new_fid(struct powernow_k8_data *data, u32 fid) |
172 | 169 | { |
173 | 170 | |
174 | 171 | |
175 | 172 | |
176 | 173 | |
177 | 174 | |
178 | 175 | |
179 | 176 | |
180 | 177 | |
... | ... | @@ -740,44 +737,63 @@ |
740 | 737 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI |
741 | 738 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) |
742 | 739 | { |
743 | - if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) | |
740 | + if (!data->acpi_data->state_count || (cpu_family == CPU_HW_PSTATE)) | |
744 | 741 | return; |
745 | 742 | |
746 | - data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK; | |
747 | - data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK; | |
748 | - data->exttype = (data->acpi_data.states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; | |
749 | - data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK; | |
750 | - data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK); | |
751 | - data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK; | |
743 | + data->irt = (data->acpi_data->states[index].control >> IRT_SHIFT) & IRT_MASK; | |
744 | + data->rvo = (data->acpi_data->states[index].control >> RVO_SHIFT) & RVO_MASK; | |
745 | + data->exttype = (data->acpi_data->states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; | |
746 | + data->plllock = (data->acpi_data->states[index].control >> PLL_L_SHIFT) & PLL_L_MASK; | |
747 | + data->vidmvs = 1 << ((data->acpi_data->states[index].control >> MVS_SHIFT) & MVS_MASK); | |
748 | + data->vstable = (data->acpi_data->states[index].control >> VST_SHIFT) & VST_MASK; | |
752 | 749 | } |
753 | 750 | |
751 | + | |
752 | +static struct acpi_processor_performance *acpi_perf_data; | |
753 | +static int preregister_valid; | |
754 | + | |
755 | +static int powernow_k8_cpu_preinit_acpi(void) | |
756 | +{ | |
757 | + acpi_perf_data = alloc_percpu(struct acpi_processor_performance); | |
758 | + if (!acpi_perf_data) | |
759 | + return -ENODEV; | |
760 | + | |
761 | + if (acpi_processor_preregister_performance(acpi_perf_data)) | |
762 | + return -ENODEV; | |
763 | + else | |
764 | + preregister_valid = 1; | |
765 | + return 0; | |
766 | +} | |
767 | + | |
754 | 768 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) |
755 | 769 | { |
756 | 770 | struct cpufreq_frequency_table *powernow_table; |
757 | 771 | int ret_val; |
772 | + int cpu = 0; | |
758 | 773 | |
759 | - if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { | |
774 | + data->acpi_data = percpu_ptr(acpi_perf_data, cpu); | |
775 | + if (acpi_processor_register_performance(data->acpi_data, data->cpu)) { | |
760 | 776 | dprintk("register performance failed: bad ACPI data\n"); |
761 | 777 | return -EIO; |
762 | 778 | } |
763 | 779 | |
764 | 780 | /* verify the data contained in the ACPI structures */ |
765 | - if (data->acpi_data.state_count <= 1) { | |
781 | + if (data->acpi_data->state_count <= 1) { | |
766 | 782 | dprintk("No ACPI P-States\n"); |
767 | 783 | goto err_out; |
768 | 784 | } |
769 | 785 | |
770 | - if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || | |
771 | - (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { | |
786 | + if ((data->acpi_data->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || | |
787 | + (data->acpi_data->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { | |
772 | 788 | dprintk("Invalid control/status registers (%x - %x)\n", |
773 | - data->acpi_data.control_register.space_id, | |
774 | - data->acpi_data.status_register.space_id); | |
789 | + data->acpi_data->control_register.space_id, | |
790 | + data->acpi_data->status_register.space_id); | |
775 | 791 | goto err_out; |
776 | 792 | } |
777 | 793 | |
778 | 794 | /* fill in data->powernow_table */ |
779 | 795 | powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) |
780 | - * (data->acpi_data.state_count + 1)), GFP_KERNEL); | |
796 | + * (data->acpi_data->state_count + 1)), GFP_KERNEL); | |
781 | 797 | if (!powernow_table) { |
782 | 798 | dprintk("powernow_table memory alloc failure\n"); |
783 | 799 | goto err_out; |
784 | 800 | |
... | ... | @@ -790,12 +806,12 @@ |
790 | 806 | if (ret_val) |
791 | 807 | goto err_out_mem; |
792 | 808 | |
793 | - powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END; | |
794 | - powernow_table[data->acpi_data.state_count].index = 0; | |
809 | + powernow_table[data->acpi_data->state_count].frequency = CPUFREQ_TABLE_END; | |
810 | + powernow_table[data->acpi_data->state_count].index = 0; | |
795 | 811 | data->powernow_table = powernow_table; |
796 | 812 | |
797 | 813 | /* fill in data */ |
798 | - data->numps = data->acpi_data.state_count; | |
814 | + data->numps = data->acpi_data->state_count; | |
799 | 815 | if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) |
800 | 816 | print_basics(data); |
801 | 817 | powernow_k8_acpi_pst_values(data, 0); |
802 | 818 | |
803 | 819 | |
... | ... | @@ -803,16 +819,31 @@ |
803 | 819 | /* notify BIOS that we exist */ |
804 | 820 | acpi_processor_notify_smm(THIS_MODULE); |
805 | 821 | |
822 | + /* determine affinity, from ACPI if available */ | |
823 | + if (preregister_valid) { | |
824 | + if ((data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ALL) || | |
825 | + (data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ANY)) | |
826 | + data->starting_core_affinity = data->acpi_data->shared_cpu_map; | |
827 | + else | |
828 | + data->starting_core_affinity = cpumask_of_cpu(data->cpu); | |
829 | + } else { | |
830 | + /* best guess from family if not */ | |
831 | + if (cpu_family == CPU_HW_PSTATE) | |
832 | + data->starting_core_affinity = cpumask_of_cpu(data->cpu); | |
833 | + else | |
834 | + data->starting_core_affinity = per_cpu(cpu_core_map, data->cpu); | |
835 | + } | |
836 | + | |
806 | 837 | return 0; |
807 | 838 | |
808 | 839 | err_out_mem: |
809 | 840 | kfree(powernow_table); |
810 | 841 | |
811 | 842 | err_out: |
812 | - acpi_processor_unregister_performance(&data->acpi_data, data->cpu); | |
843 | + acpi_processor_unregister_performance(data->acpi_data, data->cpu); | |
813 | 844 | |
814 | 845 | /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ |
815 | - data->acpi_data.state_count = 0; | |
846 | + data->acpi_data->state_count = 0; | |
816 | 847 | |
817 | 848 | return -ENODEV; |
818 | 849 | } |
819 | 850 | |
... | ... | @@ -824,10 +855,10 @@ |
824 | 855 | rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo); |
825 | 856 | data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; |
826 | 857 | |
827 | - for (i = 0; i < data->acpi_data.state_count; i++) { | |
858 | + for (i = 0; i < data->acpi_data->state_count; i++) { | |
828 | 859 | u32 index; |
829 | 860 | |
830 | - index = data->acpi_data.states[i].control & HW_PSTATE_MASK; | |
861 | + index = data->acpi_data->states[i].control & HW_PSTATE_MASK; | |
831 | 862 | if (index > data->max_hw_pstate) { |
832 | 863 | printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index); |
833 | 864 | printk(KERN_ERR PFX "Please report to BIOS manufacturer\n"); |
... | ... | @@ -843,7 +874,7 @@ |
843 | 874 | |
844 | 875 | powernow_table[i].index = index; |
845 | 876 | |
846 | - powernow_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000; | |
877 | + powernow_table[i].frequency = data->acpi_data->states[i].core_frequency * 1000; | |
847 | 878 | } |
848 | 879 | return 0; |
849 | 880 | } |
850 | 881 | |
851 | 882 | |
... | ... | @@ -852,16 +883,16 @@ |
852 | 883 | { |
853 | 884 | int i; |
854 | 885 | int cntlofreq = 0; |
855 | - for (i = 0; i < data->acpi_data.state_count; i++) { | |
886 | + for (i = 0; i < data->acpi_data->state_count; i++) { | |
856 | 887 | u32 fid; |
857 | 888 | u32 vid; |
858 | 889 | |
859 | 890 | if (data->exttype) { |
860 | - fid = data->acpi_data.states[i].status & EXT_FID_MASK; | |
861 | - vid = (data->acpi_data.states[i].status >> VID_SHIFT) & EXT_VID_MASK; | |
891 | + fid = data->acpi_data->states[i].status & EXT_FID_MASK; | |
892 | + vid = (data->acpi_data->states[i].status >> VID_SHIFT) & EXT_VID_MASK; | |
862 | 893 | } else { |
863 | - fid = data->acpi_data.states[i].control & FID_MASK; | |
864 | - vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK; | |
894 | + fid = data->acpi_data->states[i].control & FID_MASK; | |
895 | + vid = (data->acpi_data->states[i].control >> VID_SHIFT) & VID_MASK; | |
865 | 896 | } |
866 | 897 | |
867 | 898 | dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); |
868 | 899 | |
... | ... | @@ -902,10 +933,10 @@ |
902 | 933 | cntlofreq = i; |
903 | 934 | } |
904 | 935 | |
905 | - if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) { | |
936 | + if (powernow_table[i].frequency != (data->acpi_data->states[i].core_frequency * 1000)) { | |
906 | 937 | printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n", |
907 | 938 | powernow_table[i].frequency, |
908 | - (unsigned int) (data->acpi_data.states[i].core_frequency * 1000)); | |
939 | + (unsigned int) (data->acpi_data->states[i].core_frequency * 1000)); | |
909 | 940 | powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; |
910 | 941 | continue; |
911 | 942 | } |
912 | 943 | |
... | ... | @@ -915,11 +946,12 @@ |
915 | 946 | |
916 | 947 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) |
917 | 948 | { |
918 | - if (data->acpi_data.state_count) | |
919 | - acpi_processor_unregister_performance(&data->acpi_data, data->cpu); | |
949 | + if (data->acpi_data->state_count) | |
950 | + acpi_processor_unregister_performance(data->acpi_data, data->cpu); | |
920 | 951 | } |
921 | 952 | |
922 | 953 | #else |
954 | +static int powernow_k8_cpu_preinit_acpi(void) { return -ENODEV; } | |
923 | 955 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } |
924 | 956 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } |
925 | 957 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } |
... | ... | @@ -1104,7 +1136,7 @@ |
1104 | 1136 | static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) |
1105 | 1137 | { |
1106 | 1138 | struct powernow_k8_data *data; |
1107 | - cpumask_t oldmask; | |
1139 | + cpumask_t oldmask = CPU_MASK_ALL; | |
1108 | 1140 | int rc; |
1109 | 1141 | |
1110 | 1142 | if (!cpu_online(pol->cpu)) |
... | ... | @@ -1177,10 +1209,7 @@ |
1177 | 1209 | /* run on any CPU again */ |
1178 | 1210 | set_cpus_allowed_ptr(current, &oldmask); |
1179 | 1211 | |
1180 | - if (cpu_family == CPU_HW_PSTATE) | |
1181 | - pol->cpus = cpumask_of_cpu(pol->cpu); | |
1182 | - else | |
1183 | - pol->cpus = per_cpu(cpu_core_map, pol->cpu); | |
1212 | + pol->cpus = data->starting_core_affinity; | |
1184 | 1213 | data->available_cores = &(pol->cpus); |
1185 | 1214 | |
1186 | 1215 | /* Take a crude guess here. |
... | ... | @@ -1303,6 +1332,7 @@ |
1303 | 1332 | } |
1304 | 1333 | |
1305 | 1334 | if (supported_cpus == num_online_cpus()) { |
1335 | + powernow_k8_cpu_preinit_acpi(); | |
1306 | 1336 | printk(KERN_INFO PFX "Found %d %s " |
1307 | 1337 | "processors (%d cpu cores) (" VERSION ")\n", |
1308 | 1338 | num_online_nodes(), |
... | ... | @@ -1319,6 +1349,10 @@ |
1319 | 1349 | dprintk("exit\n"); |
1320 | 1350 | |
1321 | 1351 | cpufreq_unregister_driver(&cpufreq_amd64_driver); |
1352 | + | |
1353 | +#ifdef CONFIG_X86_POWERNOW_K8_ACPI | |
1354 | + free_percpu(acpi_perf_data); | |
1355 | +#endif | |
1322 | 1356 | } |
1323 | 1357 | |
1324 | 1358 | MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>"); |
arch/x86/kernel/cpu/cpufreq/powernow-k8.h
... | ... | @@ -33,12 +33,13 @@ |
33 | 33 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI |
34 | 34 | /* the acpi table needs to be kept. it's only available if ACPI was |
35 | 35 | * used to determine valid frequency/vid/fid states */ |
36 | - struct acpi_processor_performance acpi_data; | |
36 | + struct acpi_processor_performance *acpi_data; | |
37 | 37 | #endif |
38 | 38 | /* we need to keep track of associated cores, but let cpufreq |
39 | 39 | * handle hotplug events - so just point at cpufreq pol->cpus |
40 | 40 | * structure */ |
41 | 41 | cpumask_t *available_cores; |
42 | + cpumask_t starting_core_affinity; | |
42 | 43 | }; |
43 | 44 | |
44 | 45 |
drivers/cpufreq/cpufreq_conservative.c
... | ... | @@ -333,7 +333,7 @@ |
333 | 333 | { |
334 | 334 | unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; |
335 | 335 | unsigned int tmp_idle_ticks, total_idle_ticks; |
336 | - unsigned int freq_step; | |
336 | + unsigned int freq_target; | |
337 | 337 | unsigned int freq_down_sampling_rate; |
338 | 338 | struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu); |
339 | 339 | struct cpufreq_policy *policy; |
340 | 340 | |
341 | 341 | |
... | ... | @@ -383,13 +383,13 @@ |
383 | 383 | if (this_dbs_info->requested_freq == policy->max) |
384 | 384 | return; |
385 | 385 | |
386 | - freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; | |
386 | + freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; | |
387 | 387 | |
388 | 388 | /* max freq cannot be less than 100. But who knows.... */ |
389 | - if (unlikely(freq_step == 0)) | |
390 | - freq_step = 5; | |
389 | + if (unlikely(freq_target == 0)) | |
390 | + freq_target = 5; | |
391 | 391 | |
392 | - this_dbs_info->requested_freq += freq_step; | |
392 | + this_dbs_info->requested_freq += freq_target; | |
393 | 393 | if (this_dbs_info->requested_freq > policy->max) |
394 | 394 | this_dbs_info->requested_freq = policy->max; |
395 | 395 | |
396 | 396 | |
397 | 397 | |
398 | 398 | |
... | ... | @@ -425,19 +425,19 @@ |
425 | 425 | /* |
426 | 426 | * if we are already at the lowest speed then break out early |
427 | 427 | * or if we 'cannot' reduce the speed as the user might want |
428 | - * freq_step to be zero | |
428 | + * freq_target to be zero | |
429 | 429 | */ |
430 | 430 | if (this_dbs_info->requested_freq == policy->min |
431 | 431 | || dbs_tuners_ins.freq_step == 0) |
432 | 432 | return; |
433 | 433 | |
434 | - freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100; | |
434 | + freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; | |
435 | 435 | |
436 | 436 | /* max freq cannot be less than 100. But who knows.... */ |
437 | - if (unlikely(freq_step == 0)) | |
438 | - freq_step = 5; | |
437 | + if (unlikely(freq_target == 0)) | |
438 | + freq_target = 5; | |
439 | 439 | |
440 | - this_dbs_info->requested_freq -= freq_step; | |
440 | + this_dbs_info->requested_freq -= freq_target; | |
441 | 441 | if (this_dbs_info->requested_freq < policy->min) |
442 | 442 | this_dbs_info->requested_freq = policy->min; |
443 | 443 |