Commit 1133bfa6dcf034639486982629d29472737d5e96
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
Merge branch 'pm-cpufreq-ondemand' into pm-cpufreq
* pm-cpufreq: cpufreq: Remove unused function __cpufreq_driver_getavg() cpufreq: Remove unused APERF/MPERF support cpufreq: ondemand: Change the calculation of target frequency
Showing 10 changed files Side-by-side Diff
arch/x86/include/asm/processor.h
... | ... | @@ -942,35 +942,6 @@ |
942 | 942 | |
943 | 943 | extern u16 amd_get_nb_id(int cpu); |
944 | 944 | |
945 | -struct aperfmperf { | |
946 | - u64 aperf, mperf; | |
947 | -}; | |
948 | - | |
949 | -static inline void get_aperfmperf(struct aperfmperf *am) | |
950 | -{ | |
951 | - WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_APERFMPERF)); | |
952 | - | |
953 | - rdmsrl(MSR_IA32_APERF, am->aperf); | |
954 | - rdmsrl(MSR_IA32_MPERF, am->mperf); | |
955 | -} | |
956 | - | |
957 | -#define APERFMPERF_SHIFT 10 | |
958 | - | |
959 | -static inline | |
960 | -unsigned long calc_aperfmperf_ratio(struct aperfmperf *old, | |
961 | - struct aperfmperf *new) | |
962 | -{ | |
963 | - u64 aperf = new->aperf - old->aperf; | |
964 | - u64 mperf = new->mperf - old->mperf; | |
965 | - unsigned long ratio = aperf; | |
966 | - | |
967 | - mperf >>= APERFMPERF_SHIFT; | |
968 | - if (mperf) | |
969 | - ratio = div64_u64(aperf, mperf); | |
970 | - | |
971 | - return ratio; | |
972 | -} | |
973 | - | |
974 | 945 | extern unsigned long arch_align_stack(unsigned long sp); |
975 | 946 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); |
976 | 947 |
drivers/cpufreq/Makefile
... | ... | @@ -23,7 +23,7 @@ |
23 | 23 | # powernow-k8 can load then. ACPI is preferred to all other hardware-specific drivers. |
24 | 24 | # speedstep-* is preferred over p4-clockmod. |
25 | 25 | |
26 | -obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o | |
26 | +obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o | |
27 | 27 | obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o |
28 | 28 | obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o |
29 | 29 | obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o |
drivers/cpufreq/acpi-cpufreq.c
... | ... | @@ -45,7 +45,6 @@ |
45 | 45 | #include <asm/msr.h> |
46 | 46 | #include <asm/processor.h> |
47 | 47 | #include <asm/cpufeature.h> |
48 | -#include "mperf.h" | |
49 | 48 | |
50 | 49 | MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); |
51 | 50 | MODULE_DESCRIPTION("ACPI Processor P-States Driver"); |
... | ... | @@ -860,10 +859,6 @@ |
860 | 859 | |
861 | 860 | /* notify BIOS that we exist */ |
862 | 861 | acpi_processor_notify_smm(THIS_MODULE); |
863 | - | |
864 | - /* Check for APERF/MPERF support in hardware */ | |
865 | - if (boot_cpu_has(X86_FEATURE_APERFMPERF)) | |
866 | - acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf; | |
867 | 862 | |
868 | 863 | pr_debug("CPU%u - ACPI performance management activated.\n", cpu); |
869 | 864 | for (i = 0; i < perf->state_count; i++) |
drivers/cpufreq/cpufreq.c
... | ... | @@ -1670,18 +1670,6 @@ |
1670 | 1670 | } |
1671 | 1671 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); |
1672 | 1672 | |
1673 | -int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu) | |
1674 | -{ | |
1675 | - if (cpufreq_disabled()) | |
1676 | - return 0; | |
1677 | - | |
1678 | - if (!cpufreq_driver->getavg) | |
1679 | - return 0; | |
1680 | - | |
1681 | - return cpufreq_driver->getavg(policy, cpu); | |
1682 | -} | |
1683 | -EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg); | |
1684 | - | |
1685 | 1673 | /* |
1686 | 1674 | * when "event" is CPUFREQ_GOV_LIMITS |
1687 | 1675 | */ |
drivers/cpufreq/cpufreq_governor.c
... | ... | @@ -53,7 +53,7 @@ |
53 | 53 | |
54 | 54 | policy = cdbs->cur_policy; |
55 | 55 | |
56 | - /* Get Absolute Load (in terms of freq for ondemand gov) */ | |
56 | + /* Get Absolute Load */ | |
57 | 57 | for_each_cpu(j, policy->cpus) { |
58 | 58 | struct cpu_dbs_common_info *j_cdbs; |
59 | 59 | u64 cur_wall_time, cur_idle_time; |
... | ... | @@ -103,14 +103,6 @@ |
103 | 103 | continue; |
104 | 104 | |
105 | 105 | load = 100 * (wall_time - idle_time) / wall_time; |
106 | - | |
107 | - if (dbs_data->cdata->governor == GOV_ONDEMAND) { | |
108 | - int freq_avg = __cpufreq_driver_getavg(policy, j); | |
109 | - if (freq_avg <= 0) | |
110 | - freq_avg = policy->cur; | |
111 | - | |
112 | - load *= freq_avg; | |
113 | - } | |
114 | 106 | |
115 | 107 | if (load > max_load) |
116 | 108 | max_load = load; |
drivers/cpufreq/cpufreq_governor.h
drivers/cpufreq/cpufreq_ondemand.c
... | ... | @@ -29,11 +29,9 @@ |
29 | 29 | #include "cpufreq_governor.h" |
30 | 30 | |
31 | 31 | /* On-demand governor macros */ |
32 | -#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) | |
33 | 32 | #define DEF_FREQUENCY_UP_THRESHOLD (80) |
34 | 33 | #define DEF_SAMPLING_DOWN_FACTOR (1) |
35 | 34 | #define MAX_SAMPLING_DOWN_FACTOR (100000) |
36 | -#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) | |
37 | 35 | #define MICRO_FREQUENCY_UP_THRESHOLD (95) |
38 | 36 | #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) |
39 | 37 | #define MIN_FREQUENCY_UP_THRESHOLD (11) |
40 | 38 | |
... | ... | @@ -161,14 +159,10 @@ |
161 | 159 | |
162 | 160 | /* |
163 | 161 | * Every sampling_rate, we check, if current idle time is less than 20% |
164 | - * (default), then we try to increase frequency. Every sampling_rate, we look | |
165 | - * for the lowest frequency which can sustain the load while keeping idle time | |
166 | - * over 30%. If such a frequency exist, we try to decrease to this frequency. | |
167 | - * | |
168 | - * Any frequency increase takes it to the maximum frequency. Frequency reduction | |
169 | - * happens at minimum steps of 5% (default) of current frequency | |
162 | + * (default), then we try to increase frequency. Else, we adjust the frequency | |
163 | + * proportional to load. | |
170 | 164 | */ |
171 | -static void od_check_cpu(int cpu, unsigned int load_freq) | |
165 | +static void od_check_cpu(int cpu, unsigned int load) | |
172 | 166 | { |
173 | 167 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); |
174 | 168 | struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; |
175 | 169 | |
176 | 170 | |
... | ... | @@ -178,29 +172,17 @@ |
178 | 172 | dbs_info->freq_lo = 0; |
179 | 173 | |
180 | 174 | /* Check for frequency increase */ |
181 | - if (load_freq > od_tuners->up_threshold * policy->cur) { | |
175 | + if (load > od_tuners->up_threshold) { | |
182 | 176 | /* If switching to max speed, apply sampling_down_factor */ |
183 | 177 | if (policy->cur < policy->max) |
184 | 178 | dbs_info->rate_mult = |
185 | 179 | od_tuners->sampling_down_factor; |
186 | 180 | dbs_freq_increase(policy, policy->max); |
187 | 181 | return; |
188 | - } | |
189 | - | |
190 | - /* Check for frequency decrease */ | |
191 | - /* if we cannot reduce the frequency anymore, break out early */ | |
192 | - if (policy->cur == policy->min) | |
193 | - return; | |
194 | - | |
195 | - /* | |
196 | - * The optimal frequency is the frequency that is the lowest that can | |
197 | - * support the current CPU usage without triggering the up policy. To be | |
198 | - * safe, we focus 10 points under the threshold. | |
199 | - */ | |
200 | - if (load_freq < od_tuners->adj_up_threshold | |
201 | - * policy->cur) { | |
182 | + } else { | |
183 | + /* Calculate the next frequency proportional to load */ | |
202 | 184 | unsigned int freq_next; |
203 | - freq_next = load_freq / od_tuners->adj_up_threshold; | |
185 | + freq_next = load * policy->cpuinfo.max_freq / 100; | |
204 | 186 | |
205 | 187 | /* No longer fully busy, reset rate_mult */ |
206 | 188 | dbs_info->rate_mult = 1; |
... | ... | @@ -374,9 +356,6 @@ |
374 | 356 | input < MIN_FREQUENCY_UP_THRESHOLD) { |
375 | 357 | return -EINVAL; |
376 | 358 | } |
377 | - /* Calculate the new adj_up_threshold */ | |
378 | - od_tuners->adj_up_threshold += input; | |
379 | - od_tuners->adj_up_threshold -= od_tuners->up_threshold; | |
380 | 359 | |
381 | 360 | od_tuners->up_threshold = input; |
382 | 361 | return count; |
... | ... | @@ -525,8 +504,6 @@ |
525 | 504 | if (idle_time != -1ULL) { |
526 | 505 | /* Idle micro accounting is supported. Use finer thresholds */ |
527 | 506 | tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; |
528 | - tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD - | |
529 | - MICRO_FREQUENCY_DOWN_DIFFERENTIAL; | |
530 | 507 | /* |
531 | 508 | * In nohz/micro accounting case we set the minimum frequency |
532 | 509 | * not depending on HZ, but fixed (very low). The deferred |
... | ... | @@ -535,8 +512,6 @@ |
535 | 512 | dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; |
536 | 513 | } else { |
537 | 514 | tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; |
538 | - tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD - | |
539 | - DEF_FREQUENCY_DOWN_DIFFERENTIAL; | |
540 | 515 | |
541 | 516 | /* For correct statistics, we need 10 ticks for each measure */ |
542 | 517 | dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * |
drivers/cpufreq/mperf.c
1 | -#include <linux/kernel.h> | |
2 | -#include <linux/smp.h> | |
3 | -#include <linux/module.h> | |
4 | -#include <linux/init.h> | |
5 | -#include <linux/cpufreq.h> | |
6 | -#include <linux/slab.h> | |
7 | - | |
8 | -#include "mperf.h" | |
9 | - | |
10 | -static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf); | |
11 | - | |
12 | -/* Called via smp_call_function_single(), on the target CPU */ | |
13 | -static void read_measured_perf_ctrs(void *_cur) | |
14 | -{ | |
15 | - struct aperfmperf *am = _cur; | |
16 | - | |
17 | - get_aperfmperf(am); | |
18 | -} | |
19 | - | |
20 | -/* | |
21 | - * Return the measured active (C0) frequency on this CPU since last call | |
22 | - * to this function. | |
23 | - * Input: cpu number | |
24 | - * Return: Average CPU frequency in terms of max frequency (zero on error) | |
25 | - * | |
26 | - * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance | |
27 | - * over a period of time, while CPU is in C0 state. | |
28 | - * IA32_MPERF counts at the rate of max advertised frequency | |
29 | - * IA32_APERF counts at the rate of actual CPU frequency | |
30 | - * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and | |
31 | - * no meaning should be associated with absolute values of these MSRs. | |
32 | - */ | |
33 | -unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy, | |
34 | - unsigned int cpu) | |
35 | -{ | |
36 | - struct aperfmperf perf; | |
37 | - unsigned long ratio; | |
38 | - unsigned int retval; | |
39 | - | |
40 | - if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1)) | |
41 | - return 0; | |
42 | - | |
43 | - ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf); | |
44 | - per_cpu(acfreq_old_perf, cpu) = perf; | |
45 | - | |
46 | - retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT; | |
47 | - | |
48 | - return retval; | |
49 | -} | |
50 | -EXPORT_SYMBOL_GPL(cpufreq_get_measured_perf); | |
51 | -MODULE_LICENSE("GPL"); |
drivers/cpufreq/mperf.h
1 | -/* | |
2 | - * (c) 2010 Advanced Micro Devices, Inc. | |
3 | - * Your use of this code is subject to the terms and conditions of the | |
4 | - * GNU general public license version 2. See "COPYING" or | |
5 | - * http://www.gnu.org/licenses/gpl.html | |
6 | - */ | |
7 | - | |
8 | -unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy, | |
9 | - unsigned int cpu); |
include/linux/cpufreq.h
... | ... | @@ -216,10 +216,6 @@ |
216 | 216 | extern int __cpufreq_driver_target(struct cpufreq_policy *policy, |
217 | 217 | unsigned int target_freq, |
218 | 218 | unsigned int relation); |
219 | - | |
220 | -extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy, | |
221 | - unsigned int cpu); | |
222 | - | |
223 | 219 | int cpufreq_register_governor(struct cpufreq_governor *governor); |
224 | 220 | void cpufreq_unregister_governor(struct cpufreq_governor *governor); |
225 | 221 | |
... | ... | @@ -258,8 +254,6 @@ |
258 | 254 | unsigned int (*get) (unsigned int cpu); |
259 | 255 | |
260 | 256 | /* optional */ |
261 | - unsigned int (*getavg) (struct cpufreq_policy *policy, | |
262 | - unsigned int cpu); | |
263 | 257 | int (*bios_limit) (int cpu, unsigned int *limit); |
264 | 258 | |
265 | 259 | int (*exit) (struct cpufreq_policy *policy); |