Commit bd2a0f6754f18f801ed1e490bf678fc3be013eca
Exists in
ti-lsk-linux-4.1.y
and in
10 other branches
Merge back cpufreq material for 3.19-rc1.
Showing 12 changed files Side-by-side Diff
- Documentation/cpu-freq/intel-pstate.txt
- Documentation/kernel-parameters.txt
- arch/x86/include/asm/cpufeature.h
- arch/x86/include/uapi/asm/msr-index.h
- arch/x86/kernel/cpu/scattered.c
- drivers/cpufreq/Kconfig
- drivers/cpufreq/Makefile
- drivers/cpufreq/cpufreq-dt.c
- drivers/cpufreq/cpufreq.c
- drivers/cpufreq/intel_pstate.c
- drivers/cpufreq/ls1x-cpufreq.c
- drivers/cpufreq/pcc-cpufreq.c
Documentation/cpu-freq/intel-pstate.txt
1 | 1 | Intel P-state driver |
2 | 2 | -------------------- |
3 | 3 | |
4 | -This driver implements a scaling driver with an internal governor for | |
5 | -Intel Core processors. The driver follows the same model as the | |
6 | -Transmeta scaling driver (longrun.c) and implements the setpolicy() | |
7 | -instead of target(). Scaling drivers that implement setpolicy() are | |
8 | -assumed to implement internal governors by the cpufreq core. All the | |
9 | -logic for selecting the current P state is contained within the | |
10 | -driver; no external governor is used by the cpufreq core. | |
4 | +This driver provides an interface to control the P state selection for | |
5 | +SandyBridge+ Intel processors. The driver can operate two different | |
6 | +modes based on the processor model legacy and Hardware P state (HWP) | |
7 | +mode. | |
11 | 8 | |
12 | -Intel SandyBridge+ processors are supported. | |
9 | +In legacy mode the driver implements a scaling driver with an internal | |
10 | +governor for Intel Core processors. The driver follows the same model | |
11 | +as the Transmeta scaling driver (longrun.c) and implements the | |
12 | +setpolicy() instead of target(). Scaling drivers that implement | |
13 | +setpolicy() are assumed to implement internal governors by the cpufreq | |
14 | +core. All the logic for selecting the current P state is contained | |
15 | +within the driver; no external governor is used by the cpufreq core. | |
13 | 16 | |
14 | -New sysfs files for controlling P state selection have been added to | |
17 | +In HWP mode P state selection is implemented in the processor | |
18 | +itself. The driver provides the interfaces between the cpufreq core and | |
19 | +the processor to control P state selection based on user preferences | |
20 | +and reporting frequency to the cpufreq core. In this mode the | |
21 | +internal governor code is disabled. | |
22 | + | |
23 | +In addtion to the interfaces provided by the cpufreq core for | |
24 | +controlling frequency the driver provides sysfs files for | |
25 | +controlling P state selection. These files have been added to | |
15 | 26 | /sys/devices/system/cpu/intel_pstate/ |
16 | 27 | |
17 | 28 | max_perf_pct: limits the maximum P state that will be requested by |
... | ... | @@ -33,7 +44,9 @@ |
33 | 44 | driver selects a single P state the actual frequency the processor |
34 | 45 | will run at is selected by the processor itself. |
35 | 46 | |
36 | -New debugfs files have also been added to /sys/kernel/debug/pstate_snb/ | |
47 | +For legacy mode debugfs files have also been added to allow tuning of | |
48 | +the internal governor algorythm. These files are located at | |
49 | +/sys/kernel/debug/pstate_snb/ These files are NOT present in HWP mode. | |
37 | 50 | |
38 | 51 | deadband |
39 | 52 | d_gain_pct |
Documentation/kernel-parameters.txt
... | ... | @@ -1446,6 +1446,9 @@ |
1446 | 1446 | disable |
1447 | 1447 | Do not enable intel_pstate as the default |
1448 | 1448 | scaling driver for the supported processors |
1449 | + no_hwp | |
1450 | + Do not enable hardware P state control (HWP) | |
1451 | + if available. | |
1449 | 1452 | |
1450 | 1453 | intremap= [X86-64, Intel-IOMMU] |
1451 | 1454 | on enable Interrupt Remapping (default) |
arch/x86/include/asm/cpufeature.h
... | ... | @@ -189,6 +189,11 @@ |
189 | 189 | #define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */ |
190 | 190 | #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ |
191 | 191 | #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ |
192 | +#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ | |
193 | +#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ | |
194 | +#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */ | |
195 | +#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ | |
196 | +#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ | |
192 | 197 | |
193 | 198 | /* Virtualization flags: Linux defined, word 8 */ |
194 | 199 | #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ |
arch/x86/include/uapi/asm/msr-index.h
... | ... | @@ -152,6 +152,45 @@ |
152 | 152 | #define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668 |
153 | 153 | #define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669 |
154 | 154 | |
155 | +/* Hardware P state interface */ | |
156 | +#define MSR_PPERF 0x0000064e | |
157 | +#define MSR_PERF_LIMIT_REASONS 0x0000064f | |
158 | +#define MSR_PM_ENABLE 0x00000770 | |
159 | +#define MSR_HWP_CAPABILITIES 0x00000771 | |
160 | +#define MSR_HWP_REQUEST_PKG 0x00000772 | |
161 | +#define MSR_HWP_INTERRUPT 0x00000773 | |
162 | +#define MSR_HWP_REQUEST 0x00000774 | |
163 | +#define MSR_HWP_STATUS 0x00000777 | |
164 | + | |
165 | +/* CPUID.6.EAX */ | |
166 | +#define HWP_BASE_BIT (1<<7) | |
167 | +#define HWP_NOTIFICATIONS_BIT (1<<8) | |
168 | +#define HWP_ACTIVITY_WINDOW_BIT (1<<9) | |
169 | +#define HWP_ENERGY_PERF_PREFERENCE_BIT (1<<10) | |
170 | +#define HWP_PACKAGE_LEVEL_REQUEST_BIT (1<<11) | |
171 | + | |
172 | +/* IA32_HWP_CAPABILITIES */ | |
173 | +#define HWP_HIGHEST_PERF(x) (x & 0xff) | |
174 | +#define HWP_GUARANTEED_PERF(x) ((x & (0xff << 8)) >>8) | |
175 | +#define HWP_MOSTEFFICIENT_PERF(x) ((x & (0xff << 16)) >>16) | |
176 | +#define HWP_LOWEST_PERF(x) ((x & (0xff << 24)) >>24) | |
177 | + | |
178 | +/* IA32_HWP_REQUEST */ | |
179 | +#define HWP_MIN_PERF(x) (x & 0xff) | |
180 | +#define HWP_MAX_PERF(x) ((x & 0xff) << 8) | |
181 | +#define HWP_DESIRED_PERF(x) ((x & 0xff) << 16) | |
182 | +#define HWP_ENERGY_PERF_PREFERENCE(x) ((x & 0xff) << 24) | |
183 | +#define HWP_ACTIVITY_WINDOW(x) ((x & 0xff3) << 32) | |
184 | +#define HWP_PACKAGE_CONTROL(x) ((x & 0x1) << 42) | |
185 | + | |
186 | +/* IA32_HWP_STATUS */ | |
187 | +#define HWP_GUARANTEED_CHANGE(x) (x & 0x1) | |
188 | +#define HWP_EXCURSION_TO_MINIMUM(x) (x & 0x4) | |
189 | + | |
190 | +/* IA32_HWP_INTERRUPT */ | |
191 | +#define HWP_CHANGE_TO_GUARANTEED_INT(x) (x & 0x1) | |
192 | +#define HWP_EXCURSION_TO_MINIMUM_INT(x) (x & 0x2) | |
193 | + | |
155 | 194 | #define MSR_AMD64_MC0_MASK 0xc0010044 |
156 | 195 | |
157 | 196 | #define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) |
... | ... | @@ -344,6 +383,8 @@ |
344 | 383 | #define MSR_IA32_MISC_ENABLE 0x000001a0 |
345 | 384 | |
346 | 385 | #define MSR_IA32_TEMPERATURE_TARGET 0x000001a2 |
386 | + | |
387 | +#define MSR_MISC_PWR_MGMT 0x000001aa | |
347 | 388 | |
348 | 389 | #define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0 |
349 | 390 | #define ENERGY_PERF_BIAS_PERFORMANCE 0 |
arch/x86/kernel/cpu/scattered.c
... | ... | @@ -36,6 +36,11 @@ |
36 | 36 | { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, |
37 | 37 | { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, |
38 | 38 | { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 }, |
39 | + { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 }, | |
40 | + { X86_FEATURE_HWP_NOITFY, CR_EAX, 8, 0x00000006, 0 }, | |
41 | + { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 }, | |
42 | + { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 }, | |
43 | + { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 }, | |
39 | 44 | { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 }, |
40 | 45 | { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 }, |
41 | 46 | { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 }, |
drivers/cpufreq/Kconfig
... | ... | @@ -63,7 +63,6 @@ |
63 | 63 | |
64 | 64 | config CPU_FREQ_DEFAULT_GOV_POWERSAVE |
65 | 65 | bool "powersave" |
66 | - depends on EXPERT | |
67 | 66 | select CPU_FREQ_GOV_POWERSAVE |
68 | 67 | help |
69 | 68 | Use the CPUFreq governor 'powersave' as default. This sets |
... | ... | @@ -245,6 +244,16 @@ |
245 | 244 | support software configurable cpu frequency. |
246 | 245 | |
247 | 246 | Loongson2F and it's successors support this feature. |
247 | + | |
248 | + For details, take a look at <file:Documentation/cpu-freq/>. | |
249 | + | |
250 | + If in doubt, say N. | |
251 | + | |
252 | +config LOONGSON1_CPUFREQ | |
253 | + tristate "Loongson1 CPUFreq Driver" | |
254 | + help | |
255 | + This option adds a CPUFreq driver for loongson1 processors which | |
256 | + support software configurable cpu frequency. | |
248 | 257 | |
249 | 258 | For details, take a look at <file:Documentation/cpu-freq/>. |
250 | 259 |
drivers/cpufreq/Makefile
... | ... | @@ -98,6 +98,7 @@ |
98 | 98 | obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o |
99 | 99 | obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o |
100 | 100 | obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o |
101 | +obj-$(CONFIG_LOONGSON1_CPUFREQ) += ls1x-cpufreq.o | |
101 | 102 | obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o |
102 | 103 | obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o |
103 | 104 | obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o |
drivers/cpufreq/cpufreq-dt.c
... | ... | @@ -58,6 +58,8 @@ |
58 | 58 | old_freq = clk_get_rate(cpu_clk) / 1000; |
59 | 59 | |
60 | 60 | if (!IS_ERR(cpu_reg)) { |
61 | + unsigned long opp_freq; | |
62 | + | |
61 | 63 | rcu_read_lock(); |
62 | 64 | opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz); |
63 | 65 | if (IS_ERR(opp)) { |
64 | 66 | |
65 | 67 | |
... | ... | @@ -67,13 +69,16 @@ |
67 | 69 | return PTR_ERR(opp); |
68 | 70 | } |
69 | 71 | volt = dev_pm_opp_get_voltage(opp); |
72 | + opp_freq = dev_pm_opp_get_freq(opp); | |
70 | 73 | rcu_read_unlock(); |
71 | 74 | tol = volt * priv->voltage_tolerance / 100; |
72 | 75 | volt_old = regulator_get_voltage(cpu_reg); |
76 | + dev_dbg(cpu_dev, "Found OPP: %ld kHz, %ld uV\n", | |
77 | + opp_freq / 1000, volt); | |
73 | 78 | } |
74 | 79 | |
75 | 80 | dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n", |
76 | - old_freq / 1000, volt_old ? volt_old / 1000 : -1, | |
81 | + old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1, | |
77 | 82 | new_freq / 1000, volt ? volt / 1000 : -1); |
78 | 83 | |
79 | 84 | /* scaling up? scale voltage before frequency */ |
... | ... | @@ -89,7 +94,7 @@ |
89 | 94 | ret = clk_set_rate(cpu_clk, freq_exact); |
90 | 95 | if (ret) { |
91 | 96 | dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); |
92 | - if (!IS_ERR(cpu_reg)) | |
97 | + if (!IS_ERR(cpu_reg) && volt_old > 0) | |
93 | 98 | regulator_set_voltage_tol(cpu_reg, volt_old, tol); |
94 | 99 | return ret; |
95 | 100 | } |
drivers/cpufreq/cpufreq.c
... | ... | @@ -535,7 +535,7 @@ |
535 | 535 | static ssize_t store_##file_name \ |
536 | 536 | (struct cpufreq_policy *policy, const char *buf, size_t count) \ |
537 | 537 | { \ |
538 | - int ret; \ | |
538 | + int ret, temp; \ | |
539 | 539 | struct cpufreq_policy new_policy; \ |
540 | 540 | \ |
541 | 541 | ret = cpufreq_get_policy(&new_policy, policy->cpu); \ |
542 | 542 | |
... | ... | @@ -546,8 +546,10 @@ |
546 | 546 | if (ret != 1) \ |
547 | 547 | return -EINVAL; \ |
548 | 548 | \ |
549 | + temp = new_policy.object; \ | |
549 | 550 | ret = cpufreq_set_policy(policy, &new_policy); \ |
550 | - policy->user_policy.object = policy->object; \ | |
551 | + if (!ret) \ | |
552 | + policy->user_policy.object = temp; \ | |
551 | 553 | \ |
552 | 554 | return ret ? ret : count; \ |
553 | 555 | } |
drivers/cpufreq/intel_pstate.c
... | ... | @@ -137,6 +137,7 @@ |
137 | 137 | |
138 | 138 | static struct pstate_adjust_policy pid_params; |
139 | 139 | static struct pstate_funcs pstate_funcs; |
140 | +static int hwp_active; | |
140 | 141 | |
141 | 142 | struct perf_limits { |
142 | 143 | int no_turbo; |
... | ... | @@ -244,6 +245,34 @@ |
244 | 245 | cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); |
245 | 246 | } |
246 | 247 | |
248 | +#define PCT_TO_HWP(x) (x * 255 / 100) | |
249 | +static void intel_pstate_hwp_set(void) | |
250 | +{ | |
251 | + int min, max, cpu; | |
252 | + u64 value, freq; | |
253 | + | |
254 | + get_online_cpus(); | |
255 | + | |
256 | + for_each_online_cpu(cpu) { | |
257 | + rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); | |
258 | + min = PCT_TO_HWP(limits.min_perf_pct); | |
259 | + value &= ~HWP_MIN_PERF(~0L); | |
260 | + value |= HWP_MIN_PERF(min); | |
261 | + | |
262 | + max = PCT_TO_HWP(limits.max_perf_pct); | |
263 | + if (limits.no_turbo) { | |
264 | + rdmsrl( MSR_HWP_CAPABILITIES, freq); | |
265 | + max = HWP_GUARANTEED_PERF(freq); | |
266 | + } | |
267 | + | |
268 | + value &= ~HWP_MAX_PERF(~0L); | |
269 | + value |= HWP_MAX_PERF(max); | |
270 | + wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); | |
271 | + } | |
272 | + | |
273 | + put_online_cpus(); | |
274 | +} | |
275 | + | |
247 | 276 | /************************** debugfs begin ************************/ |
248 | 277 | static int pid_param_set(void *data, u64 val) |
249 | 278 | { |
... | ... | @@ -279,6 +308,8 @@ |
279 | 308 | struct dentry *debugfs_parent; |
280 | 309 | int i = 0; |
281 | 310 | |
311 | + if (hwp_active) | |
312 | + return; | |
282 | 313 | debugfs_parent = debugfs_create_dir("pstate_snb", NULL); |
283 | 314 | if (IS_ERR_OR_NULL(debugfs_parent)) |
284 | 315 | return; |
285 | 316 | |
... | ... | @@ -329,8 +360,12 @@ |
329 | 360 | pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); |
330 | 361 | return -EPERM; |
331 | 362 | } |
363 | + | |
332 | 364 | limits.no_turbo = clamp_t(int, input, 0, 1); |
333 | 365 | |
366 | + if (hwp_active) | |
367 | + intel_pstate_hwp_set(); | |
368 | + | |
334 | 369 | return count; |
335 | 370 | } |
336 | 371 | |
... | ... | @@ -348,6 +383,8 @@ |
348 | 383 | limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); |
349 | 384 | limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); |
350 | 385 | |
386 | + if (hwp_active) | |
387 | + intel_pstate_hwp_set(); | |
351 | 388 | return count; |
352 | 389 | } |
353 | 390 | |
... | ... | @@ -363,6 +400,8 @@ |
363 | 400 | limits.min_perf_pct = clamp_t(int, input, 0 , 100); |
364 | 401 | limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); |
365 | 402 | |
403 | + if (hwp_active) | |
404 | + intel_pstate_hwp_set(); | |
366 | 405 | return count; |
367 | 406 | } |
368 | 407 | |
369 | 408 | |
... | ... | @@ -395,8 +434,16 @@ |
395 | 434 | rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); |
396 | 435 | BUG_ON(rc); |
397 | 436 | } |
398 | - | |
399 | 437 | /************************** sysfs end ************************/ |
438 | + | |
439 | +static void intel_pstate_hwp_enable(void) | |
440 | +{ | |
441 | + hwp_active++; | |
442 | + pr_info("intel_pstate HWP enabled\n"); | |
443 | + | |
444 | + wrmsrl( MSR_PM_ENABLE, 0x1); | |
445 | +} | |
446 | + | |
400 | 447 | static int byt_get_min_pstate(void) |
401 | 448 | { |
402 | 449 | u64 value; |
... | ... | @@ -648,6 +695,14 @@ |
648 | 695 | cpu->prev_mperf = mperf; |
649 | 696 | } |
650 | 697 | |
698 | +static inline void intel_hwp_set_sample_time(struct cpudata *cpu) | |
699 | +{ | |
700 | + int delay; | |
701 | + | |
702 | + delay = msecs_to_jiffies(50); | |
703 | + mod_timer_pinned(&cpu->timer, jiffies + delay); | |
704 | +} | |
705 | + | |
651 | 706 | static inline void intel_pstate_set_sample_time(struct cpudata *cpu) |
652 | 707 | { |
653 | 708 | int delay; |
... | ... | @@ -694,6 +749,14 @@ |
694 | 749 | intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl); |
695 | 750 | } |
696 | 751 | |
752 | +static void intel_hwp_timer_func(unsigned long __data) | |
753 | +{ | |
754 | + struct cpudata *cpu = (struct cpudata *) __data; | |
755 | + | |
756 | + intel_pstate_sample(cpu); | |
757 | + intel_hwp_set_sample_time(cpu); | |
758 | +} | |
759 | + | |
697 | 760 | static void intel_pstate_timer_func(unsigned long __data) |
698 | 761 | { |
699 | 762 | struct cpudata *cpu = (struct cpudata *) __data; |
... | ... | @@ -730,6 +793,7 @@ |
730 | 793 | ICPU(0x3f, core_params), |
731 | 794 | ICPU(0x45, core_params), |
732 | 795 | ICPU(0x46, core_params), |
796 | + ICPU(0x47, core_params), | |
733 | 797 | ICPU(0x4c, byt_params), |
734 | 798 | ICPU(0x4f, core_params), |
735 | 799 | ICPU(0x56, core_params), |
... | ... | @@ -737,6 +801,11 @@ |
737 | 801 | }; |
738 | 802 | MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); |
739 | 803 | |
804 | +static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = { | |
805 | + ICPU(0x56, core_params), | |
806 | + {} | |
807 | +}; | |
808 | + | |
740 | 809 | static int intel_pstate_init_cpu(unsigned int cpunum) |
741 | 810 | { |
742 | 811 | struct cpudata *cpu; |
743 | 812 | |
... | ... | @@ -753,9 +822,14 @@ |
753 | 822 | intel_pstate_get_cpu_pstates(cpu); |
754 | 823 | |
755 | 824 | init_timer_deferrable(&cpu->timer); |
756 | - cpu->timer.function = intel_pstate_timer_func; | |
757 | 825 | cpu->timer.data = (unsigned long)cpu; |
758 | 826 | cpu->timer.expires = jiffies + HZ/100; |
827 | + | |
828 | + if (!hwp_active) | |
829 | + cpu->timer.function = intel_pstate_timer_func; | |
830 | + else | |
831 | + cpu->timer.function = intel_hwp_timer_func; | |
832 | + | |
759 | 833 | intel_pstate_busy_pid_reset(cpu); |
760 | 834 | intel_pstate_sample(cpu); |
761 | 835 | |
... | ... | @@ -792,6 +866,7 @@ |
792 | 866 | limits.no_turbo = 0; |
793 | 867 | return 0; |
794 | 868 | } |
869 | + | |
795 | 870 | limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; |
796 | 871 | limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); |
797 | 872 | limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); |
... | ... | @@ -801,6 +876,9 @@ |
801 | 876 | limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); |
802 | 877 | limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); |
803 | 878 | |
879 | + if (hwp_active) | |
880 | + intel_pstate_hwp_set(); | |
881 | + | |
804 | 882 | return 0; |
805 | 883 | } |
806 | 884 | |
... | ... | @@ -823,6 +901,9 @@ |
823 | 901 | pr_info("intel_pstate CPU %d exiting\n", cpu_num); |
824 | 902 | |
825 | 903 | del_timer_sync(&all_cpu_data[cpu_num]->timer); |
904 | + if (hwp_active) | |
905 | + return; | |
906 | + | |
826 | 907 | intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); |
827 | 908 | } |
828 | 909 | |
... | ... | @@ -866,6 +947,7 @@ |
866 | 947 | }; |
867 | 948 | |
868 | 949 | static int __initdata no_load; |
950 | +static int __initdata no_hwp; | |
869 | 951 | |
870 | 952 | static int intel_pstate_msrs_not_valid(void) |
871 | 953 | { |
872 | 954 | |
... | ... | @@ -959,7 +1041,16 @@ |
959 | 1041 | { |
960 | 1042 | struct acpi_table_header hdr; |
961 | 1043 | struct hw_vendor_info *v_info; |
1044 | + const struct x86_cpu_id *id; | |
1045 | + u64 misc_pwr; | |
962 | 1046 | |
1047 | + id = x86_match_cpu(intel_pstate_cpu_oob_ids); | |
1048 | + if (id) { | |
1049 | + rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); | |
1050 | + if ( misc_pwr & (1 << 8)) | |
1051 | + return true; | |
1052 | + } | |
1053 | + | |
963 | 1054 | if (acpi_disabled || |
964 | 1055 | ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) |
965 | 1056 | return false; |
... | ... | @@ -982,6 +1073,7 @@ |
982 | 1073 | int cpu, rc = 0; |
983 | 1074 | const struct x86_cpu_id *id; |
984 | 1075 | struct cpu_defaults *cpu_info; |
1076 | + struct cpuinfo_x86 *c = &boot_cpu_data; | |
985 | 1077 | |
986 | 1078 | if (no_load) |
987 | 1079 | return -ENODEV; |
... | ... | @@ -1011,6 +1103,9 @@ |
1011 | 1103 | if (!all_cpu_data) |
1012 | 1104 | return -ENOMEM; |
1013 | 1105 | |
1106 | + if (cpu_has(c,X86_FEATURE_HWP) && !no_hwp) | |
1107 | + intel_pstate_hwp_enable(); | |
1108 | + | |
1014 | 1109 | rc = cpufreq_register_driver(&intel_pstate_driver); |
1015 | 1110 | if (rc) |
1016 | 1111 | goto out; |
... | ... | @@ -1041,6 +1136,8 @@ |
1041 | 1136 | |
1042 | 1137 | if (!strcmp(str, "disable")) |
1043 | 1138 | no_load = 1; |
1139 | + if (!strcmp(str, "no_hwp")) | |
1140 | + no_hwp = 1; | |
1044 | 1141 | return 0; |
1045 | 1142 | } |
1046 | 1143 | early_param("intel_pstate", intel_pstate_setup); |
drivers/cpufreq/ls1x-cpufreq.c
1 | +/* | |
2 | + * CPU Frequency Scaling for Loongson 1 SoC | |
3 | + * | |
4 | + * Copyright (C) 2014 Zhang, Keguang <keguang.zhang@gmail.com> | |
5 | + * | |
6 | + * This file is licensed under the terms of the GNU General Public | |
7 | + * License version 2. This program is licensed "as is" without any | |
8 | + * warranty of any kind, whether express or implied. | |
9 | + */ | |
10 | + | |
11 | +#include <linux/clk.h> | |
12 | +#include <linux/clk-provider.h> | |
13 | +#include <linux/cpu.h> | |
14 | +#include <linux/cpufreq.h> | |
15 | +#include <linux/delay.h> | |
16 | +#include <linux/module.h> | |
17 | +#include <linux/platform_device.h> | |
18 | +#include <linux/slab.h> | |
19 | + | |
20 | +#include <asm/mach-loongson1/cpufreq.h> | |
21 | +#include <asm/mach-loongson1/loongson1.h> | |
22 | + | |
23 | +static struct { | |
24 | + struct device *dev; | |
25 | + struct clk *clk; /* CPU clk */ | |
26 | + struct clk *mux_clk; /* MUX of CPU clk */ | |
27 | + struct clk *pll_clk; /* PLL clk */ | |
28 | + struct clk *osc_clk; /* OSC clk */ | |
29 | + unsigned int max_freq; | |
30 | + unsigned int min_freq; | |
31 | +} ls1x_cpufreq; | |
32 | + | |
33 | +static int ls1x_cpufreq_notifier(struct notifier_block *nb, | |
34 | + unsigned long val, void *data) | |
35 | +{ | |
36 | + if (val == CPUFREQ_POSTCHANGE) | |
37 | + current_cpu_data.udelay_val = loops_per_jiffy; | |
38 | + | |
39 | + return NOTIFY_OK; | |
40 | +} | |
41 | + | |
42 | +static struct notifier_block ls1x_cpufreq_notifier_block = { | |
43 | + .notifier_call = ls1x_cpufreq_notifier | |
44 | +}; | |
45 | + | |
46 | +static int ls1x_cpufreq_target(struct cpufreq_policy *policy, | |
47 | + unsigned int index) | |
48 | +{ | |
49 | + unsigned int old_freq, new_freq; | |
50 | + | |
51 | + old_freq = policy->cur; | |
52 | + new_freq = policy->freq_table[index].frequency; | |
53 | + | |
54 | + /* | |
55 | + * The procedure of reconfiguring CPU clk is as below. | |
56 | + * | |
57 | + * - Reparent CPU clk to OSC clk | |
58 | + * - Reset CPU clock (very important) | |
59 | + * - Reconfigure CPU DIV | |
60 | + * - Reparent CPU clk back to CPU DIV clk | |
61 | + */ | |
62 | + | |
63 | + dev_dbg(ls1x_cpufreq.dev, "%u KHz --> %u KHz\n", old_freq, new_freq); | |
64 | + clk_set_parent(policy->clk, ls1x_cpufreq.osc_clk); | |
65 | + __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) | RST_CPU_EN | RST_CPU, | |
66 | + LS1X_CLK_PLL_DIV); | |
67 | + __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) & ~(RST_CPU_EN | RST_CPU), | |
68 | + LS1X_CLK_PLL_DIV); | |
69 | + clk_set_rate(ls1x_cpufreq.mux_clk, new_freq * 1000); | |
70 | + clk_set_parent(policy->clk, ls1x_cpufreq.mux_clk); | |
71 | + | |
72 | + return 0; | |
73 | +} | |
74 | + | |
75 | +static int ls1x_cpufreq_init(struct cpufreq_policy *policy) | |
76 | +{ | |
77 | + struct cpufreq_frequency_table *freq_tbl; | |
78 | + unsigned int pll_freq, freq; | |
79 | + int steps, i, ret; | |
80 | + | |
81 | + pll_freq = clk_get_rate(ls1x_cpufreq.pll_clk) / 1000; | |
82 | + | |
83 | + steps = 1 << DIV_CPU_WIDTH; | |
84 | + freq_tbl = kzalloc(sizeof(*freq_tbl) * steps, GFP_KERNEL); | |
85 | + if (!freq_tbl) { | |
86 | + dev_err(ls1x_cpufreq.dev, | |
87 | + "failed to alloc cpufreq_frequency_table\n"); | |
88 | + ret = -ENOMEM; | |
89 | + goto out; | |
90 | + } | |
91 | + | |
92 | + for (i = 0; i < (steps - 1); i++) { | |
93 | + freq = pll_freq / (i + 1); | |
94 | + if ((freq < ls1x_cpufreq.min_freq) || | |
95 | + (freq > ls1x_cpufreq.max_freq)) | |
96 | + freq_tbl[i].frequency = CPUFREQ_ENTRY_INVALID; | |
97 | + else | |
98 | + freq_tbl[i].frequency = freq; | |
99 | + dev_dbg(ls1x_cpufreq.dev, | |
100 | + "cpufreq table: index %d: frequency %d\n", i, | |
101 | + freq_tbl[i].frequency); | |
102 | + } | |
103 | + freq_tbl[i].frequency = CPUFREQ_TABLE_END; | |
104 | + | |
105 | + policy->clk = ls1x_cpufreq.clk; | |
106 | + ret = cpufreq_generic_init(policy, freq_tbl, 0); | |
107 | + if (ret) | |
108 | + kfree(freq_tbl); | |
109 | +out: | |
110 | + return ret; | |
111 | +} | |
112 | + | |
113 | +static int ls1x_cpufreq_exit(struct cpufreq_policy *policy) | |
114 | +{ | |
115 | + kfree(policy->freq_table); | |
116 | + return 0; | |
117 | +} | |
118 | + | |
119 | +static struct cpufreq_driver ls1x_cpufreq_driver = { | |
120 | + .name = "cpufreq-ls1x", | |
121 | + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, | |
122 | + .verify = cpufreq_generic_frequency_table_verify, | |
123 | + .target_index = ls1x_cpufreq_target, | |
124 | + .get = cpufreq_generic_get, | |
125 | + .init = ls1x_cpufreq_init, | |
126 | + .exit = ls1x_cpufreq_exit, | |
127 | + .attr = cpufreq_generic_attr, | |
128 | +}; | |
129 | + | |
130 | +static int ls1x_cpufreq_remove(struct platform_device *pdev) | |
131 | +{ | |
132 | + cpufreq_unregister_notifier(&ls1x_cpufreq_notifier_block, | |
133 | + CPUFREQ_TRANSITION_NOTIFIER); | |
134 | + cpufreq_unregister_driver(&ls1x_cpufreq_driver); | |
135 | + | |
136 | + return 0; | |
137 | +} | |
138 | + | |
139 | +static int ls1x_cpufreq_probe(struct platform_device *pdev) | |
140 | +{ | |
141 | + struct plat_ls1x_cpufreq *pdata = pdev->dev.platform_data; | |
142 | + struct clk *clk; | |
143 | + int ret; | |
144 | + | |
145 | + if (!pdata || !pdata->clk_name || !pdata->osc_clk_name) | |
146 | + return -EINVAL; | |
147 | + | |
148 | + ls1x_cpufreq.dev = &pdev->dev; | |
149 | + | |
150 | + clk = devm_clk_get(&pdev->dev, pdata->clk_name); | |
151 | + if (IS_ERR(clk)) { | |
152 | + dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n", | |
153 | + pdata->clk_name); | |
154 | + ret = PTR_ERR(clk); | |
155 | + goto out; | |
156 | + } | |
157 | + ls1x_cpufreq.clk = clk; | |
158 | + | |
159 | + clk = clk_get_parent(clk); | |
160 | + if (IS_ERR(clk)) { | |
161 | + dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n", | |
162 | + __clk_get_name(ls1x_cpufreq.clk)); | |
163 | + ret = PTR_ERR(clk); | |
164 | + goto out; | |
165 | + } | |
166 | + ls1x_cpufreq.mux_clk = clk; | |
167 | + | |
168 | + clk = clk_get_parent(clk); | |
169 | + if (IS_ERR(clk)) { | |
170 | + dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n", | |
171 | + __clk_get_name(ls1x_cpufreq.mux_clk)); | |
172 | + ret = PTR_ERR(clk); | |
173 | + goto out; | |
174 | + } | |
175 | + ls1x_cpufreq.pll_clk = clk; | |
176 | + | |
177 | + clk = devm_clk_get(&pdev->dev, pdata->osc_clk_name); | |
178 | + if (IS_ERR(clk)) { | |
179 | + dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n", | |
180 | + pdata->osc_clk_name); | |
181 | + ret = PTR_ERR(clk); | |
182 | + goto out; | |
183 | + } | |
184 | + ls1x_cpufreq.osc_clk = clk; | |
185 | + | |
186 | + ls1x_cpufreq.max_freq = pdata->max_freq; | |
187 | + ls1x_cpufreq.min_freq = pdata->min_freq; | |
188 | + | |
189 | + ret = cpufreq_register_driver(&ls1x_cpufreq_driver); | |
190 | + if (ret) { | |
191 | + dev_err(ls1x_cpufreq.dev, | |
192 | + "failed to register cpufreq driver: %d\n", ret); | |
193 | + goto out; | |
194 | + } | |
195 | + | |
196 | + ret = cpufreq_register_notifier(&ls1x_cpufreq_notifier_block, | |
197 | + CPUFREQ_TRANSITION_NOTIFIER); | |
198 | + | |
199 | + if (!ret) | |
200 | + goto out; | |
201 | + | |
202 | + dev_err(ls1x_cpufreq.dev, "failed to register cpufreq notifier: %d\n", | |
203 | + ret); | |
204 | + | |
205 | + cpufreq_unregister_driver(&ls1x_cpufreq_driver); | |
206 | +out: | |
207 | + return ret; | |
208 | +} | |
209 | + | |
210 | +static struct platform_driver ls1x_cpufreq_platdrv = { | |
211 | + .driver = { | |
212 | + .name = "ls1x-cpufreq", | |
213 | + .owner = THIS_MODULE, | |
214 | + }, | |
215 | + .probe = ls1x_cpufreq_probe, | |
216 | + .remove = ls1x_cpufreq_remove, | |
217 | +}; | |
218 | + | |
219 | +module_platform_driver(ls1x_cpufreq_platdrv); | |
220 | + | |
221 | +MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>"); | |
222 | +MODULE_DESCRIPTION("Loongson 1 CPUFreq driver"); | |
223 | +MODULE_LICENSE("GPL"); |
drivers/cpufreq/pcc-cpufreq.c
... | ... | @@ -603,6 +603,13 @@ |
603 | 603 | free_percpu(pcc_cpu_info); |
604 | 604 | } |
605 | 605 | |
606 | +static const struct acpi_device_id processor_device_ids[] = { | |
607 | + {ACPI_PROCESSOR_OBJECT_HID, }, | |
608 | + {ACPI_PROCESSOR_DEVICE_HID, }, | |
609 | + {}, | |
610 | +}; | |
611 | +MODULE_DEVICE_TABLE(acpi, processor_device_ids); | |
612 | + | |
606 | 613 | MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar"); |
607 | 614 | MODULE_VERSION(PCC_VERSION); |
608 | 615 | MODULE_DESCRIPTION("Processor Clocking Control interface driver"); |